aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-04-03 09:08:57 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-04-03 09:08:57 -0400
commit76467874b83835129dc454e3a7a8e5d1186101b0 (patch)
tree162129f0c36c35be4aa323cf00626db0e804c3fc
parent8628de0583504138551a05ad44ca388467f0f552 (diff)
parent6246b6128bbe34d0752f119cf7c5111c85fe481d (diff)
Merge branch 'master'
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/kernel-api.tmpl1
-rw-r--r--Documentation/acpi-hotkey.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt19
-rw-r--r--Documentation/fujitsu/frv/kernel-ABI.txt192
-rw-r--r--Documentation/input/joystick-parport.txt11
-rw-r--r--Documentation/kernel-parameters.txt34
-rw-r--r--Documentation/leds-class.txt71
-rw-r--r--Documentation/memory-barriers.txt1913
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/networking/tuntap.txt2
-rw-r--r--Documentation/pcmcia/driver-changes.txt6
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt69
-rw-r--r--Documentation/video4linux/CARDLIST.saa71345
-rw-r--r--Documentation/video4linux/et61x251.txt (renamed from Documentation/usb/et61x251.txt)0
-rw-r--r--Documentation/video4linux/ibmcam.txt (renamed from Documentation/usb/ibmcam.txt)2
-rw-r--r--Documentation/video4linux/ov511.txt (renamed from Documentation/usb/ov511.txt)11
-rw-r--r--Documentation/video4linux/se401.txt (renamed from Documentation/usb/se401.txt)0
-rw-r--r--Documentation/video4linux/sn9c102.txt (renamed from Documentation/usb/sn9c102.txt)16
-rw-r--r--Documentation/video4linux/stv680.txt (renamed from Documentation/usb/stv680.txt)26
-rw-r--r--Documentation/video4linux/w9968cf.txt (renamed from Documentation/usb/w9968cf.txt)36
-rw-r--r--Documentation/video4linux/zc0301.txt (renamed from Documentation/usb/zc0301.txt)0
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c2
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/alpha/kernel/setup.c18
-rw-r--r--arch/arm/Kconfig10
-rw-r--r--arch/arm/Kconfig-nommu44
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/compressed/head.S106
-rw-r--r--arch/arm/common/sharpsl_pm.c10
-rw-r--r--arch/arm/configs/at91rm9200dk_defconfig2
-rw-r--r--arch/arm/configs/at91rm9200ek_defconfig2
-rw-r--r--arch/arm/kernel/armksyms.c13
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head-common.S217
-rw-r--r--arch/arm/kernel/head-nommu.S83
-rw-r--r--arch/arm/kernel/head.S207
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/traps.c9
-rw-r--r--arch/arm/mach-at91rm9200/Makefile9
-rw-r--r--arch/arm/mach-at91rm9200/board-csb337.c3
-rw-r--r--arch/arm/mach-at91rm9200/board-csb637.c3
-rw-r--r--arch/arm/mach-at91rm9200/board-dk.c10
-rw-r--r--arch/arm/mach-at91rm9200/board-ek.c10
-rw-r--r--arch/arm/mach-at91rm9200/devices.c154
-rw-r--r--arch/arm/mach-at91rm9200/leds.c100
-rw-r--r--arch/arm/mach-ep93xx/core.c10
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c39
-rw-r--r--arch/arm/mach-imx/dma.c511
-rw-r--r--arch/arm/mach-imx/generic.c13
-rw-r--r--arch/arm/mach-imx/mx1ads.c18
-rw-r--r--arch/arm/mach-ixp23xx/espresso.c9
-rw-r--r--arch/arm/mach-ixp23xx/pci.c18
-rw-r--r--arch/arm/mach-omap1/Kconfig20
-rw-r--r--arch/arm/mach-omap1/Makefile11
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c116
-rw-r--r--arch/arm/mach-omap1/board-generic.c2
-rw-r--r--arch/arm/mach-omap1/board-h2.c200
-rw-r--r--arch/arm/mach-omap1/board-h3.c277
-rw-r--r--arch/arm/mach-omap1/board-innovator.c56
-rw-r--r--arch/arm/mach-omap1/board-netstar.c160
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c268
-rw-r--r--arch/arm/mach-omap1/board-osk.c95
-rw-r--r--arch/arm/mach-omap1/board-palmte.c12
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c123
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c8
-rw-r--r--arch/arm/mach-omap1/clock.c9
-rw-r--r--arch/arm/mach-omap1/clock.h91
-rw-r--r--arch/arm/mach-omap1/devices.c40
-rw-r--r--arch/arm/mach-omap1/io.c4
-rw-r--r--arch/arm/mach-omap1/irq.c18
-rw-r--r--arch/arm/mach-omap1/mux.c30
-rw-r--r--arch/arm/mach-omap1/pm.c770
-rw-r--r--arch/arm/mach-omap1/serial.c6
-rw-r--r--arch/arm/mach-omap1/sleep.S (renamed from arch/arm/plat-omap/sleep.S)111
-rw-r--r--arch/arm/mach-omap1/time.c197
-rw-r--r--arch/arm/mach-omap2/Kconfig3
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/board-apollon.c285
-rw-r--r--arch/arm/mach-omap2/board-h4.c174
-rw-r--r--arch/arm/mach-omap2/clock.c79
-rw-r--r--arch/arm/mach-omap2/clock.h37
-rw-r--r--arch/arm/mach-omap2/devices.c42
-rw-r--r--arch/arm/mach-omap2/io.c21
-rw-r--r--arch/arm/mach-omap2/memory.c102
-rw-r--r--arch/arm/mach-omap2/memory.h34
-rw-r--r--arch/arm/mach-omap2/mux.c45
-rw-r--r--arch/arm/mach-omap2/pm.c149
-rw-r--r--arch/arm/mach-omap2/prcm-regs.h (renamed from arch/arm/mach-omap2/prcm.h)188
-rw-r--r--arch/arm/mach-omap2/prcm.c40
-rw-r--r--arch/arm/mach-omap2/sleep.S144
-rw-r--r--arch/arm/mach-omap2/sram-fn.S4
-rw-r--r--arch/arm/mach-pxa/corgi.c11
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/spitz.c11
-rw-r--r--arch/arm/mach-pxa/tosa.c9
-rw-r--r--arch/arm/mach-s3c2410/Kconfig7
-rw-r--r--arch/arm/mach-s3c2410/Makefile2
-rw-r--r--arch/arm/mach-s3c2410/clock.c2
-rw-r--r--arch/arm/mach-s3c2410/common-smdk.c134
-rw-r--r--arch/arm/mach-s3c2410/common-smdk.h15
-rw-r--r--arch/arm/mach-s3c2410/mach-anubis.c4
-rw-r--r--arch/arm/mach-s3c2410/mach-rx3715.c35
-rw-r--r--arch/arm/mach-s3c2410/mach-smdk2410.c13
-rw-r--r--arch/arm/mach-s3c2410/mach-smdk2440.c17
-rw-r--r--arch/arm/mach-sa1100/collie.c72
-rw-r--r--arch/arm/mm/consistent.c17
-rw-r--r--arch/arm/mm/mm-armv.c11
-rw-r--r--arch/arm/mm/proc-xsc3.S3
-rw-r--r--arch/arm/plat-omap/Kconfig4
-rw-r--r--arch/arm/plat-omap/Makefile6
-rw-r--r--arch/arm/plat-omap/clock.c67
-rw-r--r--arch/arm/plat-omap/devices.c143
-rw-r--r--arch/arm/plat-omap/dma.c6
-rw-r--r--arch/arm/plat-omap/dmtimer.c26
-rw-r--r--arch/arm/plat-omap/fb.c80
-rw-r--r--arch/arm/plat-omap/gpio.c86
-rw-r--r--arch/arm/plat-omap/mcbsp.c345
-rw-r--r--arch/arm/plat-omap/ocpi.c3
-rw-r--r--arch/arm/plat-omap/pm.c1
-rw-r--r--arch/arm/plat-omap/sram.c143
-rw-r--r--arch/arm/plat-omap/timer32k.c325
-rw-r--r--arch/arm26/kernel/armksyms.c2
-rw-r--r--arch/frv/kernel/frv_ksyms.c2
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c2
-rw-r--r--arch/i386/kernel/apic.c22
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--arch/i386/kernel/io_apic.c2
-rw-r--r--arch/i386/kernel/process.c1
-rw-r--r--arch/i386/kernel/setup.c18
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/traps.c2
-rw-r--r--arch/i386/kernel/vsyscall-sigreturn.S2
-rw-r--r--arch/ia64/kernel/palinfo.c8
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/ia64/kernel/topology.c367
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c1
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c2
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/i8253.c28
-rw-r--r--arch/mips/kernel/process.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c4
-rw-r--r--arch/powerpc/kernel/lparcfg.c31
-rw-r--r--arch/powerpc/kernel/process.c1
-rw-r--r--arch/powerpc/kernel/rtas.c12
-rw-r--r--arch/powerpc/kernel/setup-common.c24
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/kernel/systbl.S1
-rw-r--r--arch/powerpc/kernel/traps.c9
-rw-r--r--arch/powerpc/kernel/vdso32/sigtramp.S2
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S2
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_sys.c40
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c40
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c62
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c19
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c30
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S100
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c6
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c22
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c31
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c8
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sparc/kernel/systbls.S4
-rw-r--r--arch/sparc64/defconfig14
-rw-r--r--arch/sparc64/kernel/smp.c9
-rw-r--r--arch/sparc64/kernel/sys32.S2
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c8
-rw-r--r--arch/sparc64/kernel/systbls.S8
-rw-r--r--arch/sparc64/mm/fault.c6
-rw-r--r--arch/sparc64/mm/hugetlbpage.c7
-rw-r--r--arch/um/Kconfig3
-rw-r--r--arch/um/Makefile7
-rw-r--r--arch/um/Makefile-x86_642
-rw-r--r--arch/um/drivers/daemon_kern.c13
-rw-r--r--arch/um/drivers/harddog_kern.c8
-rw-r--r--arch/um/drivers/hostaudio_kern.c10
-rw-r--r--arch/um/drivers/mcast_kern.c13
-rw-r--r--arch/um/drivers/mconsole_kern.c140
-rw-r--r--arch/um/drivers/pcap_kern.c13
-rw-r--r--arch/um/drivers/slip_kern.c13
-rw-r--r--arch/um/drivers/slirp_kern.c15
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/include/kern_util.h6
-rw-r--r--arch/um/include/line.h18
-rw-r--r--arch/um/include/mem_user.h1
-rw-r--r--arch/um/include/os.h10
-rw-r--r--arch/um/include/sysdep-i386/checksum.h5
-rw-r--r--arch/um/include/sysdep-i386/ptrace.h5
-rw-r--r--arch/um/include/sysdep-i386/tls.h32
-rw-r--r--arch/um/include/sysdep-x86_64/tls.h29
-rw-r--r--arch/um/include/user_util.h5
-rw-r--r--arch/um/kernel/exec_kern.c16
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/kernel/process_kern.c26
-rw-r--r--arch/um/kernel/ptrace.c44
-rw-r--r--arch/um/kernel/skas/process_kern.c11
-rw-r--r--arch/um/kernel/syscall_kern.c4
-rw-r--r--arch/um/kernel/trap_kern.c8
-rw-r--r--arch/um/kernel/tt/process_kern.c10
-rw-r--r--arch/um/os-Linux/Makefile7
-rw-r--r--arch/um/os-Linux/drivers/ethertap_kern.c13
-rw-r--r--arch/um/os-Linux/drivers/tuntap_kern.c13
-rw-r--r--arch/um/os-Linux/mem.c27
-rw-r--r--arch/um/os-Linux/process.c44
-rw-r--r--arch/um/os-Linux/start_up.c20
-rw-r--r--arch/um/os-Linux/sys-i386/Makefile2
-rw-r--r--arch/um/os-Linux/sys-i386/tls.c33
-rw-r--r--arch/um/os-Linux/tls.c76
-rw-r--r--arch/um/scripts/Makefile.rules26
-rw-r--r--arch/um/scripts/Makefile.unmap22
-rw-r--r--arch/um/sys-i386/Makefile23
-rw-r--r--arch/um/sys-i386/ptrace.c45
-rw-r--r--arch/um/sys-i386/ptrace_user.c10
-rw-r--r--arch/um/sys-i386/signal.c48
-rw-r--r--arch/um/sys-i386/sys_call_table.S2
-rw-r--r--arch/um/sys-i386/syscalls.c16
-rw-r--r--arch/um/sys-i386/tls.c384
-rw-r--r--arch/um/sys-x86_64/Makefile34
-rw-r--r--arch/um/sys-x86_64/tls.c14
-rw-r--r--arch/x86_64/ia32/vsyscall-sigreturn.S23
-rw-r--r--arch/x86_64/kernel/apic.c14
-rw-r--r--arch/x86_64/kernel/early_printk.c2
-rw-r--r--arch/x86_64/kernel/mce.c4
-rw-r--r--arch/x86_64/kernel/pmtimer.c2
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/setup64.c4
-rw-r--r--arch/x86_64/kernel/smpboot.c2
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--arch/x86_64/kernel/traps.c4
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c2
-rw-r--r--arch/x86_64/mm/fault.c2
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c2
-rw-r--r--block/Kconfig8
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c103
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/bluetooth/bluecard_cs.c119
-rw-r--r--drivers/bluetooth/bt3c_cs.c130
-rw-r--r--drivers/bluetooth/btuart_cs.c130
-rw-r--r--drivers/bluetooth/dtl1_cs.c120
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c18
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c8
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c80
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c85
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c61
-rw-r--r--drivers/char/istallion.c32
-rw-r--r--drivers/char/keyboard.c118
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c121
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c133
-rw-r--r--drivers/char/pcmcia/synclink_cs.c116
-rw-r--r--drivers/char/stallion.c46
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/vt.c4
-rw-r--r--drivers/char/watchdog/Kconfig7
-rw-r--r--drivers/char/watchdog/Makefile1
-rw-r--r--drivers/char/watchdog/at91_wdt.c228
-rw-r--r--drivers/char/watchdog/pcwd.c137
-rw-r--r--drivers/char/watchdog/pcwd_usb.c3
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/hwmon/hdaps.c37
-rw-r--r--drivers/ide/ide-disk.c3
-rw-r--r--drivers/ide/ide-taskfile.c8
-rw-r--r--drivers/ide/legacy/ide-cs.c127
-rw-r--r--drivers/ieee1394/sbp2.c32
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig16
-rw-r--r--drivers/infiniband/hw/ipath/Makefile36
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h616
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c295
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h96
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c379
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c1983
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c613
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c1910
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c605
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ht400.c1586
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c951
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c841
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h884
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c236
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c1515
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h181
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c1352
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c383
-rw-r--r--drivers/infiniband/hw/ipath/ipath_pe800.c1247
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c913
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c1857
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h446
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c552
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c273
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c303
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c778
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c645
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c621
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c207
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c1222
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h697
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c333
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_x86_64.c157
-rw-r--r--drivers/infiniband/hw/ipath/ips_common.h263
-rw-r--r--drivers/infiniband/hw/ipath/verbs_debug.h107
-rw-r--r--drivers/input/evbug.c3
-rw-r--r--drivers/input/evdev.c6
-rw-r--r--drivers/input/gameport/gameport.c30
-rw-r--r--drivers/input/gameport/ns558.c13
-rw-r--r--drivers/input/input.c422
-rw-r--r--drivers/input/joydev.c6
-rw-r--r--drivers/input/joystick/amijoy.c11
-rw-r--r--drivers/input/joystick/db9.c13
-rw-r--r--drivers/input/joystick/gamecon.c96
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c24
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c2
-rw-r--r--drivers/input/joystick/iforce/iforce.h5
-rw-r--r--drivers/input/joystick/turbografx.c13
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/atkbd.c24
-rw-r--r--drivers/input/keyboard/corgikbd.c35
-rw-r--r--drivers/input/keyboard/hil_kbd.c9
-rw-r--r--drivers/input/keyboard/spitzkbd.c10
-rw-r--r--drivers/input/misc/pcspkr.c27
-rw-r--r--drivers/input/misc/uinput.c14
-rw-r--r--drivers/input/mouse/hil_ptr.c7
-rw-r--r--drivers/input/mouse/psmouse-base.c38
-rw-r--r--drivers/input/mouse/synaptics.c18
-rw-r--r--drivers/input/mousedev.c6
-rw-r--r--drivers/input/power.c3
-rw-r--r--drivers/input/serio/hil_mlc.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h26
-rw-r--r--drivers/input/serio/libps2.c10
-rw-r--r--drivers/input/serio/parkbd.c3
-rw-r--r--drivers/input/serio/rpckbd.c3
-rw-r--r--drivers/input/serio/serio.c48
-rw-r--r--drivers/input/serio/serio_raw.c29
-rw-r--r--drivers/input/tsdev.c6
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c185
-rw-r--r--drivers/isdn/hisax/avma1_cs.c182
-rw-r--r--drivers/isdn/hisax/elsa_cs.c112
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c143
-rw-r--r--drivers/isdn/hisax/teles_cs.c121
-rw-r--r--drivers/isdn/sc/ioctl.c9
-rw-r--r--drivers/leds/Kconfig77
-rw-r--r--drivers/leds/Makefile16
-rw-r--r--drivers/leds/led-class.c167
-rw-r--r--drivers/leds/led-core.c25
-rw-r--r--drivers/leds/led-triggers.c239
-rw-r--r--drivers/leds/leds-corgi.c121
-rw-r--r--drivers/leds/leds-ixp4xx-gpio.c215
-rw-r--r--drivers/leds/leds-locomo.c95
-rw-r--r--drivers/leds/leds-spitz.c125
-rw-r--r--drivers/leds/leds-tosa.c131
-rw-r--r--drivers/leds/leds.h44
-rw-r--r--drivers/leds/ledtrig-ide-disk.c62
-rw-r--r--drivers/leds/ledtrig-timer.c170
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid1.c19
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c34
-rw-r--r--drivers/md/raid6main.c31
-rw-r--r--drivers/media/Kconfig24
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c12
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c18
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h2
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c54
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c47
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h2
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x-fe.c5
-rw-r--r--drivers/media/dvb/frontends/Kconfig12
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c12
-rw-r--r--drivers/media/dvb/ttpci/av7110.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c13
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c78
-rw-r--r--drivers/media/dvb/ttpci/budget-patch.c24
-rw-r--r--drivers/media/dvb/ttpci/budget.h13
-rw-r--r--drivers/media/video/Kconfig233
-rw-r--r--drivers/media/video/Makefile6
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c170
-rw-r--r--drivers/media/video/bt8xx/bttv-vbi.c2
-rw-r--r--drivers/media/video/cpia.c13
-rw-r--r--drivers/media/video/cpia2/cpia2.h2
-rw-r--r--drivers/media/video/cpia_pp.c2
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c3
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c24
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h (renamed from drivers/media/video/cx25840/cx25840.h)46
-rw-r--r--drivers/media/video/cx25840/cx25840-firmware.c15
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c9
-rw-r--r--drivers/media/video/cx88/Kconfig15
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c33
-rw-r--r--drivers/media/video/et61x251/Kconfig14
-rw-r--r--drivers/media/video/ir-kbd-i2c.c3
-rw-r--r--drivers/media/video/msp3400-driver.c91
-rw-r--r--drivers/media/video/msp3400-driver.h6
-rw-r--r--drivers/media/video/msp3400-kthreads.c121
-rw-r--r--drivers/media/video/pwc/Kconfig28
-rw-r--r--drivers/media/video/saa7115.c65
-rw-r--r--drivers/media/video/saa7127.c43
-rw-r--r--drivers/media/video/saa7134/Kconfig1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c66
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c4
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/sn9c102/Kconfig11
-rw-r--r--drivers/media/video/tuner-core.c12
-rw-r--r--drivers/media/video/tvaudio.c15
-rw-r--r--drivers/media/video/tveeprom.c6
-rw-r--r--drivers/media/video/tvp5150.c140
-rw-r--r--drivers/media/video/upd64031a.c286
-rw-r--r--drivers/media/video/upd64083.c262
-rw-r--r--drivers/media/video/usbvideo/Kconfig38
-rw-r--r--drivers/media/video/usbvideo/Makefile8
-rw-r--r--drivers/media/video/v4l2-common.c8
-rw-r--r--drivers/media/video/video-buf.c14
-rw-r--r--drivers/media/video/wm8739.c355
-rw-r--r--drivers/media/video/zc0301/Kconfig11
-rw-r--r--drivers/mmc/Kconfig29
-rw-r--r--drivers/mmc/Makefile7
-rw-r--r--drivers/mmc/at91_mci.c988
-rw-r--r--drivers/mmc/au1xmmc.c19
-rw-r--r--drivers/mmc/imxmmc.c1096
-rw-r--r--drivers/mmc/imxmmc.h67
-rw-r--r--drivers/mmc/mmc.c19
-rw-r--r--drivers/mmc/mmci.c4
-rw-r--r--drivers/mmc/omap.c1226
-rw-r--r--drivers/mmc/omap.h55
-rw-r--r--drivers/mmc/pxamci.c24
-rw-r--r--drivers/mmc/sdhci.c6
-rw-r--r--drivers/mmc/wbsd.c9
-rw-r--r--drivers/mtd/chips/Kconfig21
-rw-r--r--drivers/mtd/chips/amd_flash.c4
-rw-r--r--drivers/mtd/chips/jedec_probe.c19
-rw-r--r--drivers/mtd/chips/sharp.c7
-rw-r--r--drivers/mtd/cmdlinepart.c7
-rw-r--r--drivers/mtd/devices/blkmtd.c13
-rw-r--r--drivers/mtd/devices/block2mtd.c13
-rw-r--r--drivers/mtd/devices/doc2000.c37
-rw-r--r--drivers/mtd/devices/lart.c10
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/devices/ms02-nv.c2
-rw-r--r--drivers/mtd/inftlcore.c7
-rw-r--r--drivers/mtd/maps/alchemy-flash.c4
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/dilnetpc.c4
-rw-r--r--drivers/mtd/maps/dmv182.c2
-rw-r--r--drivers/mtd/maps/h720x-flash.c2
-rw-r--r--drivers/mtd/maps/netsc520.c4
-rw-r--r--drivers/mtd/maps/nettel.c3
-rw-r--r--drivers/mtd/maps/ocotea.c6
-rw-r--r--drivers/mtd/maps/pci.c3
-rw-r--r--drivers/mtd/maps/pcmciamtd.c117
-rw-r--r--drivers/mtd/maps/redwood.c3
-rw-r--r--drivers/mtd/maps/sbc8240.c8
-rw-r--r--drivers/mtd/maps/sc520cdp.c2
-rw-r--r--drivers/mtd/maps/scx200_docflash.c2
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c4
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/maps/vmax301.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c32
-rw-r--r--drivers/mtd/mtdblock.c14
-rw-r--r--drivers/mtd/mtdcore.c45
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/au1550nd.c4
-rw-r--r--drivers/mtd/nand/nand_base.c26
-rw-r--r--drivers/mtd/redboot.c6
-rw-r--r--drivers/net/3c59x.c33
-rw-r--r--drivers/net/8139cp.c12
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arcnet/arcnet.c3
-rw-r--r--drivers/net/arcnet/com90xx.c4
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/chelsio/sge.c3
-rw-r--r--drivers/net/e1000/e1000_main.c3
-rw-r--r--drivers/net/eql.c3
-rw-r--r--drivers/net/ibmveth.c30
-rw-r--r--drivers/net/irda/sa1100_ir.c3
-rw-r--r--drivers/net/ne2k-pci.c4
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/ns83820.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c115
-rw-r--r--drivers/net/pcmcia/3c589_cs.c122
-rw-r--r--drivers/net/pcmcia/axnet_cs.c126
-rw-r--r--drivers/net/pcmcia/com20020_cs.c127
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c166
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c121
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c126
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c161
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c235
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c187
-rw-r--r--drivers/net/starfire.c3
-rw-r--r--drivers/net/tg3.c72
-rw-r--r--drivers/net/tokenring/Kconfig2
-rw-r--r--drivers/net/tokenring/abyss.c3
-rw-r--r--drivers/net/tokenring/madgemc.c3
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/airo_cs.c158
-rw-r--r--drivers/net/wireless/atmel_cs.c162
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c198
-rw-r--r--drivers/net/wireless/ipw2200.c9
-rw-r--r--drivers/net/wireless/netwave_cs.c127
-rw-r--r--drivers/net/wireless/orinoco_cs.c187
-rw-r--r--drivers/net/wireless/ray_cs.c279
-rw-r--r--drivers/net/wireless/ray_cs.h2
-rw-r--r--drivers/net/wireless/spectrum_cs.c173
-rw-r--r--drivers/net/wireless/wavelan_cs.c189
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h6
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c178
-rw-r--r--drivers/net/yellowfin.c3
-rw-r--r--drivers/parport/parport_cs.c129
-rw-r--r--drivers/pcmcia/Kconfig7
-rw-r--r--drivers/pcmcia/Makefile3
-rw-r--r--drivers/pcmcia/at91_cf.c365
-rw-r--r--drivers/pcmcia/cistpl.c1
-rw-r--r--drivers/pcmcia/cs.c43
-rw-r--r--drivers/pcmcia/cs_internal.h19
-rw-r--r--drivers/pcmcia/ds.c249
-rw-r--r--drivers/pcmcia/ds_internal.h4
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/pcmcia/i82365.c1
-rw-r--r--drivers/pcmcia/pcmcia_compat.c65
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c81
-rw-r--r--drivers/pcmcia/pcmcia_resource.c228
-rw-r--r--drivers/pcmcia/pd6729.c1
-rw-r--r--drivers/pcmcia/rsrc_mgr.c5
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c41
-rw-r--r--drivers/pcmcia/sa1100_cerf.c1
-rw-r--r--drivers/pcmcia/socket_sysfs.c10
-rw-r--r--drivers/pcmcia/ti113x.h1
-rw-r--r--drivers/pcmcia/vrc4171_card.c12
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c8
-rw-r--r--drivers/s390/block/dasd_erp.c8
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/tape_block.c13
-rw-r--r--drivers/s390/net/lcs.c13
-rw-r--r--drivers/scsi/ahci.c4
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx2
-rw-r--r--drivers/scsi/ata_piix.c4
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c10
-rw-r--r--drivers/scsi/libata-core.c28
-rw-r--r--drivers/scsi/libata-scsi.c8
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c112
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c155
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c136
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h8
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c127
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c124
-rw-r--r--drivers/serial/Kconfig27
-rw-r--r--drivers/serial/Makefile12
-rw-r--r--drivers/serial/jsm/jsm.h2
-rw-r--r--drivers/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/serial/jsm/jsm_neo.c2
-rw-r--r--drivers/serial/jsm/jsm_tty.c29
-rw-r--r--drivers/serial/serial_cs.c229
-rw-r--r--drivers/telephony/ixj_pcmcia.c119
-rw-r--r--drivers/usb/host/sl811_cs.c119
-rw-r--r--drivers/usb/input/hid-input.c2
-rw-r--r--drivers/video/Kconfig12
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/backlight/Kconfig4
-rw-r--r--drivers/video/backlight/backlight.c84
-rw-r--r--drivers/video/backlight/corgi_bl.c124
-rw-r--r--drivers/video/backlight/hp680_bl.c139
-rw-r--r--drivers/video/cfbimgblt.c2
-rw-r--r--drivers/video/console/fbcon.c11
-rw-r--r--drivers/video/console/sticore.c4
-rw-r--r--drivers/video/fbmem.c2
-rw-r--r--drivers/video/pxafb.c8
-rw-r--r--drivers/video/radeonfb.c3167
-rw-r--r--drivers/video/stifb.c4
-rw-r--r--drivers/video/w100fb.c162
-rw-r--r--drivers/video/w100fb.h748
-rw-r--r--fs/Makefile2
-rw-r--r--fs/char_dev.c87
-rw-r--r--fs/cifs/CHANGES18
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/README7
-rw-r--r--fs/cifs/cifsencrypt.c42
-rw-r--r--fs/cifs/cifsfs.c5
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h11
-rw-r--r--fs/cifs/cifspdu.h13
-rw-r--r--fs/cifs/cifsproto.h15
-rw-r--r--fs/cifs/cifssmb.c135
-rw-r--r--fs/cifs/connect.c99
-rw-r--r--fs/cifs/dir.c7
-rw-r--r--fs/cifs/file.c94
-rw-r--r--fs/cifs/inode.c22
-rw-r--r--fs/cifs/link.c2
-rw-r--r--fs/cifs/misc.c46
-rw-r--r--fs/cifs/ntlmssp.c129
-rw-r--r--fs/cifs/ntlmssp.h2
-rw-r--r--fs/cifs/readdir.c7
-rw-r--r--fs/cifs/transport.c22
-rw-r--r--fs/dcache.c50
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/dquot.c6
-rw-r--r--fs/exec.c2
-rw-r--r--fs/fcntl.c3
-rw-r--r--fs/freevxfs/vxfs_olt.c9
-rw-r--r--fs/hfsplus/bnode.c6
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/hppfs/hppfs_kern.c14
-rw-r--r--fs/inode.c15
-rw-r--r--fs/jffs2/background.c3
-rw-r--r--fs/locks.c45
-rw-r--r--fs/msdos/namei.c15
-rw-r--r--fs/namei.c3
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/proc/base.c13
-rw-r--r--fs/proc/proc_misc.c163
-rw-r--r--fs/select.c8
-rw-r--r--fs/smbfs/file.c6
-rw-r--r--fs/splice.c202
-rw-r--r--fs/sync.c164
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/sysfs/inode.c3
-rw-r--r--fs/sysv/dir.c6
-rw-r--r--fs/udf/inode.c6
-rw-r--r--fs/vfat/namei.c18
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c113
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c120
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h11
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c11
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h12
-rw-r--r--fs/xfs/quota/xfs_qm.c17
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c68
-rw-r--r--fs/xfs/xfs_bmap.c11
-rw-r--r--fs/xfs/xfs_bmap.h9
-rw-r--r--fs/xfs/xfs_clnt.h1
-rw-r--r--fs/xfs/xfs_error.h3
-rw-r--r--fs/xfs/xfs_mount.c71
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_quota.h5
-rw-r--r--fs/xfs/xfs_vfsops.c10
-rw-r--r--fs/xfs/xfs_vnodeops.c4
-rw-r--r--include/asm-arm/arch-at91rm9200/at91rm9200_mci.h104
-rw-r--r--include/asm-arm/arch-at91rm9200/board.h24
-rw-r--r--include/asm-arm/arch-at91rm9200/hardware.h3
-rw-r--r--include/asm-arm/arch-ep93xx/ts72xx.h11
-rw-r--r--include/asm-arm/arch-imx/dma.h17
-rw-r--r--include/asm-arm/arch-imx/imx-dma.h90
-rw-r--r--include/asm-arm/arch-imx/mmc.h12
-rw-r--r--include/asm-arm/arch-ixp23xx/memory.h17
-rw-r--r--include/asm-arm/arch-ixp23xx/platform.h1
-rw-r--r--include/asm-arm/arch-ixp23xx/uncompress.h11
-rw-r--r--include/asm-arm/arch-omap/aic23.h4
-rw-r--r--include/asm-arm/arch-omap/board-ams-delta.h65
-rw-r--r--include/asm-arm/arch-omap/board-apollon.h45
-rw-r--r--include/asm-arm/arch-omap/board-h2.h4
-rw-r--r--include/asm-arm/arch-omap/board-h3.h4
-rw-r--r--include/asm-arm/arch-omap/board-h4.h8
-rw-r--r--include/asm-arm/arch-omap/board-netstar.h19
-rw-r--r--include/asm-arm/arch-omap/board-nokia.h54
-rw-r--r--include/asm-arm/arch-omap/board-perseus2.h4
-rw-r--r--include/asm-arm/arch-omap/board.h30
-rw-r--r--include/asm-arm/arch-omap/clock.h13
-rw-r--r--include/asm-arm/arch-omap/dma.h1
-rw-r--r--include/asm-arm/arch-omap/dmtimer.h1
-rw-r--r--include/asm-arm/arch-omap/dsp.h6
-rw-r--r--include/asm-arm/arch-omap/dsp_common.h13
-rw-r--r--include/asm-arm/arch-omap/gpioexpander.h24
-rw-r--r--include/asm-arm/arch-omap/hardware.h8
-rw-r--r--include/asm-arm/arch-omap/irda.h36
-rw-r--r--include/asm-arm/arch-omap/irqs.h5
-rw-r--r--include/asm-arm/arch-omap/keypad.h36
-rw-r--r--include/asm-arm/arch-omap/lcd_lph8923.h14
-rw-r--r--include/asm-arm/arch-omap/mcbsp.h65
-rw-r--r--include/asm-arm/arch-omap/mcspi.h16
-rw-r--r--include/asm-arm/arch-omap/menelaus.h2
-rw-r--r--include/asm-arm/arch-omap/mux.h54
-rw-r--r--include/asm-arm/arch-omap/omap-alsa.h124
-rw-r--r--include/asm-arm/arch-omap/omapfb.h98
-rw-r--r--include/asm-arm/arch-omap/param.h8
-rw-r--r--include/asm-arm/arch-omap/pm.h81
-rw-r--r--include/asm-arm/arch-omap/prcm.h404
-rw-r--r--include/asm-arm/arch-omap/sram.h2
-rw-r--r--include/asm-arm/arch-omap/system.h17
-rw-r--r--include/asm-arm/arch-pxa/pxa-regs.h2
-rw-r--r--include/asm-arm/arch-pxa/sharpsl.h2
-rw-r--r--include/asm-arm/arch-s3c2410/entry-macro.S167
-rw-r--r--include/asm-arm/dma-mapping.h22
-rw-r--r--include/asm-arm/memory.h8
-rw-r--r--include/asm-arm/pgtable-hwdef.h1
-rw-r--r--include/asm-arm/pgtable.h1
-rw-r--r--include/asm-arm/unistd.h11
-rw-r--r--include/asm-generic/local.h13
-rw-r--r--include/asm-generic/mutex-dec.h30
-rw-r--r--include/asm-generic/mutex-xchg.h33
-rw-r--r--include/asm-i386/apicdef.h1
-rw-r--r--include/asm-i386/floppy.h34
-rw-r--r--include/asm-i386/local.h6
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-ia64/pal.h34
-rw-r--r--include/asm-powerpc/eeh.h20
-rw-r--r--include/asm-powerpc/hvcall.h185
-rw-r--r--include/asm-powerpc/system.h5
-rw-r--r--include/asm-s390/percpu.h2
-rw-r--r--include/asm-sparc/unistd.h6
-rw-r--r--include/asm-sparc64/unistd.h4
-rw-r--r--include/asm-um/desc.h12
-rw-r--r--include/asm-um/host_ldt-i386.h34
-rw-r--r--include/asm-um/host_ldt-x86_64.h (renamed from include/asm-um/ldt-x86_64.h)39
-rw-r--r--include/asm-um/ldt-i386.h69
-rw-r--r--include/asm-um/ldt.h41
-rw-r--r--include/asm-um/processor-i386.h35
-rw-r--r--include/asm-um/processor-x86_64.h9
-rw-r--r--include/asm-um/ptrace-generic.h16
-rw-r--r--include/asm-um/ptrace-i386.h41
-rw-r--r--include/asm-um/ptrace-x86_64.h35
-rw-r--r--include/asm-um/segment.h6
-rw-r--r--include/asm-um/thread_info.h16
-rw-r--r--include/asm-um/uaccess.h2
-rw-r--r--include/asm-x86_64/local.h10
-rw-r--r--include/linux/backlight.h25
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/fadvise.h6
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fs.h24
-rw-r--r--include/linux/gameport.h7
-rw-r--r--include/linux/hrtimer.h18
-rw-r--r--include/linux/input.h23
-rw-r--r--include/linux/ipmi_smi.h16
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/keyboard.h13
-rw-r--r--include/linux/leds.h111
-rw-r--r--include/linux/libps2.h2
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/mtd/blktrans.h4
-rw-r--r--include/linux/mtd/doc2000.h4
-rw-r--r--include/linux/mtd/inftl.h5
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/netdevice.h55
-rw-r--r--include/linux/netfilter/x_tables.h67
-rw-r--r--include/linux/netfilter/xt_esp.h14
-rw-r--r--include/linux/netfilter/xt_multiport.h30
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h18
-rw-r--r--include/linux/netfilter_ipv4/ipt_esp.h14
-rw-r--r--include/linux/netfilter_ipv4/ipt_multiport.h31
-rw-r--r--include/linux/netfilter_ipv6/ip6t_esp.h12
-rw-r--r--include/linux/netfilter_ipv6/ip6t_multiport.h25
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pid.h96
-rw-r--r--include/linux/pipe_fs_i.h9
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/serio.h9
-rw-r--r--include/linux/skbuff.h29
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/timer.h8
-rw-r--r--include/linux/tiocl.h1
-rw-r--r--include/linux/uinput.h4
-rw-r--r--include/linux/videodev2.h65
-rw-r--r--include/media/cx25840.h64
-rw-r--r--include/media/msp3400.h60
-rw-r--r--include/media/saa7115.h37
-rw-r--r--include/media/saa7127.h41
-rw-r--r--include/media/upd64031a.h40
-rw-r--r--include/media/upd64083.h58
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/xfrm.h19
-rw-r--r--include/pcmcia/bulkmem.h4
-rw-r--r--include/pcmcia/ciscode.h5
-rw-r--r--include/pcmcia/cistpl.h21
-rw-r--r--include/pcmcia/cs.h34
-rw-r--r--include/pcmcia/ds.h80
-rw-r--r--include/pcmcia/ss.h11
-rw-r--r--include/sound/core.h4
-rw-r--r--include/sound/pcm.h15
-rw-r--r--include/sound/pcm_oss.h3
-rw-r--r--ipc/shm.c15
-rw-r--r--ipc/util.c6
-rw-r--r--kernel/acct.c12
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/cpuset.c69
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c28
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/futex_compat.c4
-rw-r--r--kernel/hrtimer.c49
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/pid.c212
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/process.c3
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/sched.c84
-rw-r--r--kernel/signal.c7
-rw-r--r--kernel/sys.c19
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/timer.c95
-rw-r--r--mm/fadvise.c20
-rw-r--r--mm/highmem.c15
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/slab.c18
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/swapfile.c14
-rw-r--r--mm/vmalloc.c3
-rw-r--r--net/compat.c3
-rw-r--r--net/core/dev.c64
-rw-r--r--net/core/sock.c16
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/esp4.c5
-rw-r--r--net/ipv4/ipcomp.c3
-rw-r--r--net/ipv4/netfilter/Kconfig18
-rw-r--r--net/ipv4/netfilter/Makefile3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c1138
-rw-r--r--net/ipv4/netfilter/ipt_multiport.c195
-rw-r--r--net/ipv4/xfrm4_input.c15
-rw-r--r--net/ipv4/xfrm4_tunnel.c2
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/netfilter/Kconfig16
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/ip6t_esp.c115
-rw-r--r--net/ipv6/netfilter/ip6t_multiport.c125
-rw-r--r--net/ipv6/xfrm6_input.c11
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/netfilter/Kconfig19
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/x_tables.c113
-rw-r--r--net/netfilter/xt_esp.c (renamed from net/ipv4/netfilter/ipt_esp.c)81
-rw-r--r--net/netfilter/xt_multiport.c314
-rw-r--r--net/netfilter/xt_policy.c2
-rw-r--r--net/socket.c7
-rw-r--r--net/xfrm/xfrm_input.c4
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--sound/core/Kconfig5
-rw-r--r--sound/core/control.c6
-rw-r--r--sound/core/control_compat.c6
-rw-r--r--sound/core/init.c9
-rw-r--r--sound/core/oss/pcm_oss.c288
-rw-r--r--sound/core/pcm.c14
-rw-r--r--sound/core/pcm_lib.c49
-rw-r--r--sound/core/pcm_native.c157
-rw-r--r--sound/isa/Kconfig23
-rw-r--r--sound/isa/Makefile2
-rw-r--r--sound/isa/adlib.c161
-rw-r--r--sound/isa/cmi8330.c4
-rw-r--r--sound/isa/opti9xx/Makefile2
-rw-r--r--sound/isa/opti9xx/miro.c1455
-rw-r--r--sound/isa/opti9xx/miro.h73
-rw-r--r--sound/pci/Kconfig30
-rw-r--r--sound/pci/Makefile3
-rw-r--r--sound/pci/als300.c866
-rw-r--r--sound/pci/cs4281.c28
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_analog.c9
-rw-r--r--sound/pci/hda/patch_realtek.c298
-rw-r--r--sound/pci/hda/patch_sigmatel.c53
-rw-r--r--sound/pci/ice1712/aureon.c163
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/ice1712/ice1712.h1
-rw-r--r--sound/pci/maestro3.c57
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c9
-rw-r--r--sound/pci/riptide/Makefile3
-rw-r--r--sound/pci/riptide/riptide.c2223
-rw-r--r--sound/pci/via82xx.c1
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c86
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.h2
-rw-r--r--sound/pcmcia/vx/vxpocket.c94
-rw-r--r--sound/pcmcia/vx/vxpocket.h2
-rw-r--r--sound/usb/usbmixer.c37
898 files changed, 58984 insertions, 15524 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 7d87dd73cbe4..5a2882d275ba 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -2,7 +2,7 @@
2# This makefile is used to generate the kernel documentation, 2# This makefile is used to generate the kernel documentation,
3# primarily based on in-line comments in various source files. 3# primarily based on in-line comments in various source files.
4# See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how 4# See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how
5# to ducument the SRC - and how to read it. 5# to document the SRC - and how to read it.
6# To add a new book the only step required is to add the book to the 6# To add a new book the only step required is to add the book to the
7# list of DOCBOOKS. 7# list of DOCBOOKS.
8 8
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 8c9c6704e85b..ca02e04a906c 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -322,7 +322,6 @@ X!Earch/i386/kernel/mca.c
322 <chapter id="sysfs"> 322 <chapter id="sysfs">
323 <title>The Filesystem for Exporting Kernel Objects</title> 323 <title>The Filesystem for Exporting Kernel Objects</title>
324!Efs/sysfs/file.c 324!Efs/sysfs/file.c
325!Efs/sysfs/dir.c
326!Efs/sysfs/symlink.c 325!Efs/sysfs/symlink.c
327!Efs/sysfs/bin.c 326!Efs/sysfs/bin.c
328 </chapter> 327 </chapter>
diff --git a/Documentation/acpi-hotkey.txt b/Documentation/acpi-hotkey.txt
index 744f1aec6553..38040fa37649 100644
--- a/Documentation/acpi-hotkey.txt
+++ b/Documentation/acpi-hotkey.txt
@@ -30,7 +30,7 @@ specific hotkey(event))
30echo "event_num:event_type:event_argument" > 30echo "event_num:event_type:event_argument" >
31 /proc/acpi/hotkey/action. 31 /proc/acpi/hotkey/action.
32The result of the execution of this aml method is 32The result of the execution of this aml method is
33attached to /proc/acpi/hotkey/poll_method, which is dnyamically 33attached to /proc/acpi/hotkey/poll_method, which is dynamically
34created. Please use command "cat /proc/acpi/hotkey/polling_method" 34created. Please use command "cat /proc/acpi/hotkey/polling_method"
35to retrieve it. 35to retrieve it.
36 36
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 495858b236b6..59d0c74c79c9 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -127,13 +127,6 @@ Who: Christoph Hellwig <hch@lst.de>
127 127
128--------------------------- 128---------------------------
129 129
130What: EXPORT_SYMBOL(lookup_hash)
131When: January 2006
132Why: Too low-level interface. Use lookup_one_len or lookup_create instead.
133Who: Christoph Hellwig <hch@lst.de>
134
135---------------------------
136
137What: CONFIG_FORCED_INLINING 130What: CONFIG_FORCED_INLINING
138When: June 2006 131When: June 2006
139Why: Config option is there to see if gcc is good enough. (in january 132Why: Config option is there to see if gcc is good enough. (in january
@@ -241,3 +234,15 @@ Why: The USB subsystem has changed a lot over time, and it has been
241Who: Greg Kroah-Hartman <gregkh@suse.de> 234Who: Greg Kroah-Hartman <gregkh@suse.de>
242 235
243--------------------------- 236---------------------------
237
238What: find_trylock_page
239When: January 2007
240Why: The interface no longer has any callers left in the kernel. It
241 is an odd interface (compared with other find_*_page functions), in
242 that it does not take a refcount to the page, only the page lock.
243 It should be replaced with find_get_page or find_lock_page if possible.
244 This feature removal can be reevaluated if users of the interface
245 cannot cleanly use something else.
246Who: Nick Piggin <npiggin@suse.de>
247
248---------------------------
diff --git a/Documentation/fujitsu/frv/kernel-ABI.txt b/Documentation/fujitsu/frv/kernel-ABI.txt
index 0ed9b0a779bc..8b0a5fc8bfd9 100644
--- a/Documentation/fujitsu/frv/kernel-ABI.txt
+++ b/Documentation/fujitsu/frv/kernel-ABI.txt
@@ -1,17 +1,19 @@
1 ================================= 1 =================================
2 INTERNAL KERNEL ABI FOR FR-V ARCH 2 INTERNAL KERNEL ABI FOR FR-V ARCH
3 ================================= 3 =================================
4 4
5The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers 5The internal FRV kernel ABI is not quite the same as the userspace ABI. A
6are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs 6number of the registers are used for special purposed, and the ABI is not
7no-MMU. 7consistent between modules vs core, and MMU vs no-MMU.
8 8
9This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and 9This partly stems from the fact that FRV CPUs do not have a separate
10most of them do not have any scratch registers, thus requiring at least one general purpose 10supervisor stack pointer, and most of them do not have any scratch
11register to be clobbered in such an event. Also, within the kernel core, it is possible to simply 11registers, thus requiring at least one general purpose register to be
12jump or call directly between functions using a relative offset. This cannot be extended to modules 12clobbered in such an event. Also, within the kernel core, it is possible to
13for the displacement is likely to be too far. Thus in modules the address of a function to call 13simply jump or call directly between functions using a relative offset.
14must be calculated in a register and then used, requiring two extra instructions. 14This cannot be extended to modules for the displacement is likely to be too
15far. Thus in modules the address of a function to call must be calculated
16in a register and then used, requiring two extra instructions.
15 17
16This document has the following sections: 18This document has the following sections:
17 19
@@ -39,7 +41,8 @@ When a system call is made, the following registers are effective:
39CPU OPERATING MODES 41CPU OPERATING MODES
40=================== 42===================
41 43
42The FR-V CPU has three basic operating modes. In order of increasing capability: 44The FR-V CPU has three basic operating modes. In order of increasing
45capability:
43 46
44 (1) User mode. 47 (1) User mode.
45 48
@@ -47,42 +50,46 @@ The FR-V CPU has three basic operating modes. In order of increasing capability:
47 50
48 (2) Kernel mode. 51 (2) Kernel mode.
49 52
50 Normal kernel mode. There are many additional control registers available that may be 53 Normal kernel mode. There are many additional control registers
51 accessed in this mode, in addition to all the stuff available to user mode. This has two 54 available that may be accessed in this mode, in addition to all the
52 submodes: 55 stuff available to user mode. This has two submodes:
53 56
54 (a) Exceptions enabled (PSR.T == 1). 57 (a) Exceptions enabled (PSR.T == 1).
55 58
56 Exceptions will invoke the appropriate normal kernel mode handler. On entry to the 59 Exceptions will invoke the appropriate normal kernel mode
57 handler, the PSR.T bit will be cleared. 60 handler. On entry to the handler, the PSR.T bit will be cleared.
58 61
59 (b) Exceptions disabled (PSR.T == 0). 62 (b) Exceptions disabled (PSR.T == 0).
60 63
61 No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to 64 No exceptions or interrupts may happen. Any mandatory exceptions
62 halt unless the CPU is told to jump into debug mode instead. 65 will cause the CPU to halt unless the CPU is told to jump into
66 debug mode instead.
63 67
64 (3) Debug mode. 68 (3) Debug mode.
65 69
66 No exceptions may happen in this mode. Memory protection and management exceptions will be 70 No exceptions may happen in this mode. Memory protection and
67 flagged for later consideration, but the exception handler won't be invoked. Debugging traps 71 management exceptions will be flagged for later consideration, but
68 such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by 72 the exception handler won't be invoked. Debugging traps such as
69 debugging events obtained from the other two modes. 73 hardware breakpoints and watchpoints will be ignored. This mode is
74 entered only by debugging events obtained from the other two modes.
70 75
71 All kernel mode registers may be accessed, plus a few extra debugging specific registers. 76 All kernel mode registers may be accessed, plus a few extra debugging
77 specific registers.
72 78
73 79
74================================= 80=================================
75INTERNAL KERNEL-MODE REGISTER ABI 81INTERNAL KERNEL-MODE REGISTER ABI
76================================= 82=================================
77 83
78There are a number of permanent register assignments that are set up by entry.S in the exception 84There are a number of permanent register assignments that are set up by
79prologue. Note that there is a complete set of exception prologues for each of user->kernel 85entry.S in the exception prologue. Note that there is a complete set of
80transition and kernel->kernel transition. There are also user->debug and kernel->debug mode 86exception prologues for each of user->kernel transition and kernel->kernel
81transition prologues. 87transition. There are also user->debug and kernel->debug mode transition
88prologues.
82 89
83 90
84 REGISTER FLAVOUR USE 91 REGISTER FLAVOUR USE
85 =============== ======= ==================================================== 92 =============== ======= ==============================================
86 GR1 Supervisor stack pointer 93 GR1 Supervisor stack pointer
87 GR15 Current thread info pointer 94 GR15 Current thread info pointer
88 GR16 GP-Rel base register for small data 95 GR16 GP-Rel base register for small data
@@ -92,10 +99,12 @@ transition prologues.
92 GR31 NOMMU Destroyed by debug mode entry 99 GR31 NOMMU Destroyed by debug mode entry
93 GR31 MMU Destroyed by TLB miss kernel mode entry 100 GR31 MMU Destroyed by TLB miss kernel mode entry
94 CCR.ICC2 Virtual interrupt disablement tracking 101 CCR.ICC2 Virtual interrupt disablement tracking
95 CCCR.CC3 Cleared by exception prologue (atomic op emulation) 102 CCCR.CC3 Cleared by exception prologue
103 (atomic op emulation)
96 SCR0 MMU See mmu-layout.txt. 104 SCR0 MMU See mmu-layout.txt.
97 SCR1 MMU See mmu-layout.txt. 105 SCR1 MMU See mmu-layout.txt.
98 SCR2 MMU Save for EAR0 (destroyed by icache insns in debug mode) 106 SCR2 MMU Save for EAR0 (destroyed by icache insns
107 in debug mode)
99 SCR3 MMU Save for GR31 during debug exceptions 108 SCR3 MMU Save for GR31 during debug exceptions
100 DAMR/IAMR NOMMU Fixed memory protection layout. 109 DAMR/IAMR NOMMU Fixed memory protection layout.
101 DAMR/IAMR MMU See mmu-layout.txt. 110 DAMR/IAMR MMU See mmu-layout.txt.
@@ -104,18 +113,21 @@ transition prologues.
104Certain registers are also used or modified across function calls: 113Certain registers are also used or modified across function calls:
105 114
106 REGISTER CALL RETURN 115 REGISTER CALL RETURN
107 =============== =============================== =============================== 116 =============== =============================== ======================
108 GR0 Fixed Zero - 117 GR0 Fixed Zero -
109 GR2 Function call frame pointer 118 GR2 Function call frame pointer
110 GR3 Special Preserved 119 GR3 Special Preserved
111 GR3-GR7 - Clobbered 120 GR3-GR7 - Clobbered
112 GR8 Function call arg #1 Return value (or clobbered) 121 GR8 Function call arg #1 Return value
113 GR9 Function call arg #2 Return value MSW (or clobbered) 122 (or clobbered)
123 GR9 Function call arg #2 Return value MSW
124 (or clobbered)
114 GR10-GR13 Function call arg #3-#6 Clobbered 125 GR10-GR13 Function call arg #3-#6 Clobbered
115 GR14 - Clobbered 126 GR14 - Clobbered
116 GR15-GR16 Special Preserved 127 GR15-GR16 Special Preserved
117 GR17-GR27 - Preserved 128 GR17-GR27 - Preserved
118 GR28-GR31 Special Only accessed explicitly 129 GR28-GR31 Special Only accessed
130 explicitly
119 LR Return address after CALL Clobbered 131 LR Return address after CALL Clobbered
120 CCR/CCCR - Mostly Clobbered 132 CCR/CCCR - Mostly Clobbered
121 133
@@ -124,46 +136,53 @@ Certain registers are also used or modified across function calls:
124INTERNAL DEBUG-MODE REGISTER ABI 136INTERNAL DEBUG-MODE REGISTER ABI
125================================ 137================================
126 138
127This is the same as the kernel-mode register ABI for functions calls. The difference is that in 139This is the same as the kernel-mode register ABI for functions calls. The
128debug-mode there's a different stack and a different exception frame. Almost all the global 140difference is that in debug-mode there's a different stack and a different
129registers from kernel-mode (including the stack pointer) may be changed. 141exception frame. Almost all the global registers from kernel-mode
142(including the stack pointer) may be changed.
130 143
131 REGISTER FLAVOUR USE 144 REGISTER FLAVOUR USE
132 =============== ======= ==================================================== 145 =============== ======= ==============================================
133 GR1 Debug stack pointer 146 GR1 Debug stack pointer
134 GR16 GP-Rel base register for small data 147 GR16 GP-Rel base register for small data
135 GR31 Current debug exception frame pointer (__debug_frame) 148 GR31 Current debug exception frame pointer
149 (__debug_frame)
136 SCR3 MMU Saved value of GR31 150 SCR3 MMU Saved value of GR31
137 151
138 152
139Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be 153Note that debug mode is able to interfere with the kernel's emulated atomic
140exceedingly careful not to do any that would interact with the main kernel in this regard. Hence 154ops, so it must be exceedingly careful not to do any that would interact
141the debug mode code (gdbstub) is almost completely self-contained. The only external code used is 155with the main kernel in this regard. Hence the debug mode code (gdbstub) is
142the sprintf family of functions. 156almost completely self-contained. The only external code used is the
157sprintf family of functions.
143 158
144Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an 159Futhermore, break.S is so complicated because single-step mode does not
145exception. That means unless manually disabled, single-stepping will blithely go on stepping into 160switch off on entry to an exception. That means unless manually disabled,
146things like interrupts. See gdbstub.txt for more information. 161single-stepping will blithely go on stepping into things like interrupts.
162See gdbstub.txt for more information.
147 163
148 164
149========================== 165==========================
150VIRTUAL INTERRUPT HANDLING 166VIRTUAL INTERRUPT HANDLING
151========================== 167==========================
152 168
153Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once 169Because accesses to the PSR is so slow, and to disable interrupts we have
154to read and once to write), we don't actually disable interrupts at all if we don't have to. What 170to access it twice (once to read and once to write), we don't actually
155we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we 171disable interrupts at all if we don't have to. What we do instead is use
156then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume 172the ICC2 condition code flags to note virtual disablement, such that if we
157execution at the point the interrupt happened. Setting condition flags as a side effect of an 173then do take an interrupt, we note the flag, really disable interrupts, set
158arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the 174another flag and resume execution at the point the interrupt happened.
175Setting condition flags as a side effect of an arithmetic or logical
176instruction is really fast. This use of the ICC2 only occurs within the
159kernel - it does not affect userspace. 177kernel - it does not affect userspace.
160 178
161The flags we use are: 179The flags we use are:
162 180
163 (*) CCR.ICC2.Z [Zero flag] 181 (*) CCR.ICC2.Z [Zero flag]
164 182
165 Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be 183 Set to virtually disable interrupts, clear when interrupts are
166 modified by logical instructions without affecting the Carry flag. 184 virtually enabled. Can be modified by logical instructions without
185 affecting the Carry flag.
167 186
168 (*) CCR.ICC2.C [Carry flag] 187 (*) CCR.ICC2.C [Carry flag]
169 188
@@ -176,8 +195,9 @@ What happens is this:
176 195
177 ICC2.Z is 0, ICC2.C is 1. 196 ICC2.Z is 0, ICC2.C is 1.
178 197
179 (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs 198 (2) An interrupt occurs. The exception prologue examines ICC2.Z and
180 doing. This is done simply with an unlikely BEQ instruction. 199 determines that nothing needs doing. This is done simply with an
200 unlikely BEQ instruction.
181 201
182 (3) The interrupts are disabled (local_irq_disable) 202 (3) The interrupts are disabled (local_irq_disable)
183 203
@@ -187,48 +207,56 @@ What happens is this:
187 207
188 ICC2.Z would be set to 0. 208 ICC2.Z would be set to 0.
189 209
190 A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if 210 A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would
191 interrupts were now virtually enabled, but physically disabled - which they're not, so the 211 be used to trap if interrupts were now virtually enabled, but
192 trap isn't taken. The kernel would then be back to state (1). 212 physically disabled - which they're not, so the trap isn't taken. The
213 kernel would then be back to state (1).
193 214
194 (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt 215 (5) An interrupt occurs. The exception prologue examines ICC2.Z and
195 shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting 216 determines that the interrupt shouldn't actually have happened. It
196 PSR.PIL to 14 and then it clears ICC2.C. 217 jumps aside, and there disabled interrupts by setting PSR.PIL to 14
218 and then it clears ICC2.C.
197 219
198 (6) If interrupts were then saved and disabled again (local_irq_save): 220 (6) If interrupts were then saved and disabled again (local_irq_save):
199 221
200 ICC2.Z would be shifted into the save variable and masked off (giving a 1). 222 ICC2.Z would be shifted into the save variable and masked off
223 (giving a 1).
201 224
202 ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0). 225 ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be
226 unaffected (ie: 0).
203 227
204 (7) If interrupts were then restored from state (6) (local_irq_restore): 228 (7) If interrupts were then restored from state (6) (local_irq_restore):
205 229
206 ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which 230 ICC2.Z would be set to indicate the result of XOR'ing the saved
207 gives a result of 0 - thus leaving ICC2.Z set. 231 value (ie: 1) with 1, which gives a result of 0 - thus leaving
232 ICC2.Z set.
208 233
209 ICC2.C would remain unaffected (ie: 0). 234 ICC2.C would remain unaffected (ie: 0).
210 235
211 A TIHI #2 instruction would be used to again assay the current state, but this would do 236 A TIHI #2 instruction would be used to again assay the current state,
212 nothing as Z==1. 237 but this would do nothing as Z==1.
213 238
214 (8) If interrupts were then enabled (local_irq_enable): 239 (8) If interrupts were then enabled (local_irq_enable):
215 240
216 ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0. 241 ICC2.Z would be cleared. ICC2.C would be left unaffected. Both
242 flags would now be 0.
217 243
218 A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0 244 A TIHI #2 instruction again issued to assay the current state would
219 [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true. 245 then trap as both Z==0 [interrupts virtually enabled] and C==0
246 [interrupts really disabled] would then be true.
220 247
221 (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to 248 (9) The trap #2 handler would simply enable hardware interrupts
222 1 and return. 249 (set PSR.PIL to 0), set ICC2.C to 1 and return.
223 250
224(10) Immediately upon returning, the pending interrupt would be taken. 251(10) Immediately upon returning, the pending interrupt would be taken.
225 252
226(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is 253(11) The interrupt handler would take the path of actually processing the
227 clear, BEQ fails as per step (2)). 254 interrupt (ICC2.Z is clear, BEQ fails as per step (2)).
228 255
229(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely 256(12) The interrupt handler would then set ICC2.C to 1 since hardware
230 enabled - or else the kernel wouldn't be here. 257 interrupts are definitely enabled - or else the kernel wouldn't be here.
231 258
232(13) On return from the interrupt handler, things would be back to state (1). 259(13) On return from the interrupt handler, things would be back to state (1).
233 260
234This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL. 261This trap (#2) is only available in kernel mode. In user mode it will
262result in SIGILL.
diff --git a/Documentation/input/joystick-parport.txt b/Documentation/input/joystick-parport.txt
index 88a011c9f985..d537c48cc6d0 100644
--- a/Documentation/input/joystick-parport.txt
+++ b/Documentation/input/joystick-parport.txt
@@ -36,12 +36,12 @@ with them.
36 36
37 All NES and SNES use the same synchronous serial protocol, clocked from 37 All NES and SNES use the same synchronous serial protocol, clocked from
38the computer's side (and thus timing insensitive). To allow up to 5 NES 38the computer's side (and thus timing insensitive). To allow up to 5 NES
39and/or SNES gamepads connected to the parallel port at once, the output 39and/or SNES gamepads and/or SNES mice connected to the parallel port at once,
40lines of the parallel port are shared, while one of 5 available input lines 40the output lines of the parallel port are shared, while one of 5 available
41is assigned to each gamepad. 41input lines is assigned to each gamepad.
42 42
43 This protocol is handled by the gamecon.c driver, so that's the one 43 This protocol is handled by the gamecon.c driver, so that's the one
44you'll use for NES and SNES gamepads. 44you'll use for NES, SNES gamepads and SNES mice.
45 45
46 The main problem with PC parallel ports is that they don't have +5V power 46 The main problem with PC parallel ports is that they don't have +5V power
47source on any of their pins. So, if you want a reliable source of power 47source on any of their pins. So, if you want a reliable source of power
@@ -106,7 +106,7 @@ A, Turbo B, Select and Start, and is connected through 5 wires, then it is
106either a NES or NES clone and will work with this connection. SNES gamepads 106either a NES or NES clone and will work with this connection. SNES gamepads
107also use 5 wires, but have more buttons. They will work as well, of course. 107also use 5 wires, but have more buttons. They will work as well, of course.
108 108
109Pinout for NES gamepads Pinout for SNES gamepads 109Pinout for NES gamepads Pinout for SNES gamepads and mice
110 110
111 +----> Power +-----------------------\ 111 +----> Power +-----------------------\
112 | 7 | o o o o | x x o | 1 112 | 7 | o o o o | x x o | 1
@@ -454,6 +454,7 @@ uses the following kernel/module command line:
454 6 | N64 pad 454 6 | N64 pad
455 7 | Sony PSX controller 455 7 | Sony PSX controller
456 8 | Sony PSX DDR controller 456 8 | Sony PSX DDR controller
457 9 | SNES mouse
457 458
458 The exact type of the PSX controller type is autoprobed when used so 459 The exact type of the PSX controller type is autoprobed when used so
459hot swapping should work (but is not recomended). 460hot swapping should work (but is not recomended).
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f8cb55c30b0f..b3a6187e5305 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1,4 +1,4 @@
1February 2003 Kernel Parameters v2.5.59 1 Kernel Parameters
2 ~~~~~~~~~~~~~~~~~ 2 ~~~~~~~~~~~~~~~~~
3 3
4The following is a consolidated list of the kernel parameters as implemented 4The following is a consolidated list of the kernel parameters as implemented
@@ -17,9 +17,17 @@ are specified on the kernel command line with the module name plus
17 17
18 usbcore.blinkenlights=1 18 usbcore.blinkenlights=1
19 19
20The text in square brackets at the beginning of the description states the 20This document may not be entirely up to date and comprehensive. The command
21restrictions on the kernel for the said kernel parameter to be valid. The 21"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
22restrictions referred to are that the relevant option is valid if: 22module. Loadable modules, after being loaded into the running kernel, also
23reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
24parameters may be changed at runtime by the command
25"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
26
27The parameters listed below are only valid if certain kernel build options were
28enabled and if respective hardware is present. The text in square brackets at
29the beginning of each description states the restrictions within which a
30parameter is applicable:
23 31
24 ACPI ACPI support is enabled. 32 ACPI ACPI support is enabled.
25 ALSA ALSA sound support is enabled. 33 ALSA ALSA sound support is enabled.
@@ -1046,10 +1054,10 @@ running once the system is up.
1046 noltlbs [PPC] Do not use large page/tlb entries for kernel 1054 noltlbs [PPC] Do not use large page/tlb entries for kernel
1047 lowmem mapping on PPC40x. 1055 lowmem mapping on PPC40x.
1048 1056
1049 nomce [IA-32] Machine Check Exception
1050
1051 nomca [IA-64] Disable machine check abort handling 1057 nomca [IA-64] Disable machine check abort handling
1052 1058
1059 nomce [IA-32] Machine Check Exception
1060
1053 noresidual [PPC] Don't use residual data on PReP machines. 1061 noresidual [PPC] Don't use residual data on PReP machines.
1054 1062
1055 noresume [SWSUSP] Disables resume and restores original swap 1063 noresume [SWSUSP] Disables resume and restores original swap
@@ -1682,20 +1690,6 @@ running once the system is up.
1682 1690
1683 1691
1684______________________________________________________________________ 1692______________________________________________________________________
1685Changelog:
1686
16872000-06-?? Mr. Unknown
1688 The last known update (for 2.4.0) - the changelog was not kept before.
1689
16902002-11-24 Petr Baudis <pasky@ucw.cz>
1691 Randy Dunlap <randy.dunlap@verizon.net>
1692 Update for 2.5.49, description for most of the options introduced,
1693 references to other documentation (C files, READMEs, ..), added S390,
1694 PPC, SPARC, MTD, ALSA and OSS category. Minor corrections and
1695 reformatting.
1696
16972005-10-19 Randy Dunlap <rdunlap@xenotime.net>
1698 Lots of typos, whitespace, some reformatting.
1699 1693
1700TODO: 1694TODO:
1701 1695
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
new file mode 100644
index 000000000000..8c35c0426110
--- /dev/null
+++ b/Documentation/leds-class.txt
@@ -0,0 +1,71 @@
1LED handling under Linux
2========================
3
4If you're reading this and thinking about keyboard leds, these are
5handled by the input subsystem and the led class is *not* needed.
6
7In its simplest form, the LED class just allows control of LEDs from
8userspace. LEDs appear in /sys/class/leds/. The brightness file will
9set the brightness of the LED (taking a value 0-255). Most LEDs don't
10have hardware brightness support so will just be turned on for non-zero
11brightness settings.
12
13The class also introduces the optional concept of an LED trigger. A trigger
14is a kernel based source of led events. Triggers can either be simple or
15complex. A simple trigger isn't configurable and is designed to slot into
16existing subsystems with minimal additional code. Examples are the ide-disk,
17nand-disk and sharpsl-charge triggers. With led triggers disabled, the code
18optimises away.
19
20Complex triggers whilst available to all LEDs have LED specific
21parameters and work on a per LED basis. The timer trigger is an example.
22
23You can change triggers in a similar manner to the way an IO scheduler
24is chosen (via /sys/class/leds/<device>/trigger). Trigger specific
25parameters can appear in /sys/class/leds/<device> once a given trigger is
26selected.
27
28
29Design Philosophy
30=================
31
32The underlying design philosophy is simplicity. LEDs are simple devices
33and the aim is to keep a small amount of code giving as much functionality
34as possible. Please keep this in mind when suggesting enhancements.
35
36
37LED Device Naming
38=================
39
40Is currently of the form:
41
42"devicename:colour"
43
44There have been calls for LED properties such as colour to be exported as
45individual led class attributes. As a solution which doesn't incur as much
46overhead, I suggest these become part of the device name. The naming scheme
47above leaves scope for further attributes should they be needed.
48
49
50Known Issues
51============
52
53The LED Trigger core cannot be a module as the simple trigger functions
54would cause nightmare dependency issues. I see this as a minor issue
55compared to the benefits the simple trigger functionality brings. The
56rest of the LED subsystem can be modular.
57
58Some leds can be programmed to flash in hardware. As this isn't a generic
59LED device property, this should be exported as a device specific sysfs
60attribute rather than part of the class if this functionality is required.
61
62
63Future Development
64==================
65
66At the moment, a trigger can't be created specifically for a single LED.
67There are a number of cases where a trigger might only be mappable to a
68particular LED (ACPI?). The addition of triggers provided by the LED driver
69should cover this option and be possible to add without breaking the
70current interface.
71
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
new file mode 100644
index 000000000000..f8550310a6d5
--- /dev/null
+++ b/Documentation/memory-barriers.txt
@@ -0,0 +1,1913 @@
1 ============================
2 LINUX KERNEL MEMORY BARRIERS
3 ============================
4
5By: David Howells <dhowells@redhat.com>
6
7Contents:
8
9 (*) Abstract memory access model.
10
11 - Device operations.
12 - Guarantees.
13
14 (*) What are memory barriers?
15
16 - Varieties of memory barrier.
17 - What may not be assumed about memory barriers?
18 - Data dependency barriers.
19 - Control dependencies.
20 - SMP barrier pairing.
21 - Examples of memory barrier sequences.
22
23 (*) Explicit kernel barriers.
24
25 - Compiler barrier.
26 - The CPU memory barriers.
27 - MMIO write barrier.
28
29 (*) Implicit kernel memory barriers.
30
31 - Locking functions.
32 - Interrupt disabling functions.
33 - Miscellaneous functions.
34
35 (*) Inter-CPU locking barrier effects.
36
37 - Locks vs memory accesses.
38 - Locks vs I/O accesses.
39
40 (*) Where are memory barriers needed?
41
42 - Interprocessor interaction.
43 - Atomic operations.
44 - Accessing devices.
45 - Interrupts.
46
47 (*) Kernel I/O barrier effects.
48
49 (*) Assumed minimum execution ordering model.
50
51 (*) The effects of the cpu cache.
52
53 - Cache coherency.
54 - Cache coherency vs DMA.
55 - Cache coherency vs MMIO.
56
57 (*) The things CPUs get up to.
58
59 - And then there's the Alpha.
60
61 (*) References.
62
63
64============================
65ABSTRACT MEMORY ACCESS MODEL
66============================
67
68Consider the following abstract model of the system:
69
70 : :
71 : :
72 : :
73 +-------+ : +--------+ : +-------+
74 | | : | | : | |
75 | | : | | : | |
76 | CPU 1 |<----->| Memory |<----->| CPU 2 |
77 | | : | | : | |
78 | | : | | : | |
79 +-------+ : +--------+ : +-------+
80 ^ : ^ : ^
81 | : | : |
82 | : | : |
83 | : v : |
84 | : +--------+ : |
85 | : | | : |
86 | : | | : |
87 +---------->| Device |<----------+
88 : | | :
89 : | | :
90 : +--------+ :
91 : :
92
93Each CPU executes a program that generates memory access operations. In the
94abstract CPU, memory operation ordering is very relaxed, and a CPU may actually
95perform the memory operations in any order it likes, provided program causality
96appears to be maintained. Similarly, the compiler may also arrange the
97instructions it emits in any order it likes, provided it doesn't affect the
98apparent operation of the program.
99
100So in the above diagram, the effects of the memory operations performed by a
101CPU are perceived by the rest of the system as the operations cross the
102interface between the CPU and rest of the system (the dotted lines).
103
104
105For example, consider the following sequence of events:
106
107 CPU 1 CPU 2
108 =============== ===============
109 { A == 1; B == 2 }
110 A = 3; x = A;
111 B = 4; y = B;
112
113The set of accesses as seen by the memory system in the middle can be arranged
114in 24 different combinations:
115
116 STORE A=3, STORE B=4, x=LOAD A->3, y=LOAD B->4
117 STORE A=3, STORE B=4, y=LOAD B->4, x=LOAD A->3
118 STORE A=3, x=LOAD A->3, STORE B=4, y=LOAD B->4
119 STORE A=3, x=LOAD A->3, y=LOAD B->2, STORE B=4
120 STORE A=3, y=LOAD B->2, STORE B=4, x=LOAD A->3
121 STORE A=3, y=LOAD B->2, x=LOAD A->3, STORE B=4
122 STORE B=4, STORE A=3, x=LOAD A->3, y=LOAD B->4
123 STORE B=4, ...
124 ...
125
126and can thus result in four different combinations of values:
127
128 x == 1, y == 2
129 x == 1, y == 4
130 x == 3, y == 2
131 x == 3, y == 4
132
133
134Furthermore, the stores committed by a CPU to the memory system may not be
135perceived by the loads made by another CPU in the same order as the stores were
136committed.
137
138
139As a further example, consider this sequence of events:
140
141 CPU 1 CPU 2
142 =============== ===============
143 { A == 1, B == 2, C = 3, P == &A, Q == &C }
144 B = 4; Q = P;
145 P = &B D = *Q;
146
147There is an obvious data dependency here, as the value loaded into D depends on
148the address retrieved from P by CPU 2. At the end of the sequence, any of the
149following results are possible:
150
151 (Q == &A) and (D == 1)
152 (Q == &B) and (D == 2)
153 (Q == &B) and (D == 4)
154
155Note that CPU 2 will never try and load C into D because the CPU will load P
156into Q before issuing the load of *Q.
157
158
159DEVICE OPERATIONS
160-----------------
161
162Some devices present their control interfaces as collections of memory
163locations, but the order in which the control registers are accessed is very
164important. For instance, imagine an ethernet card with a set of internal
165registers that are accessed through an address port register (A) and a data
166port register (D). To read internal register 5, the following code might then
167be used:
168
169 *A = 5;
170 x = *D;
171
172but this might show up as either of the following two sequences:
173
174 STORE *A = 5, x = LOAD *D
175 x = LOAD *D, STORE *A = 5
176
177the second of which will almost certainly result in a malfunction, since it set
178the address _after_ attempting to read the register.
179
180
181GUARANTEES
182----------
183
184There are some minimal guarantees that may be expected of a CPU:
185
186 (*) On any given CPU, dependent memory accesses will be issued in order, with
187 respect to itself. This means that for:
188
189 Q = P; D = *Q;
190
191 the CPU will issue the following memory operations:
192
193 Q = LOAD P, D = LOAD *Q
194
195 and always in that order.
196
197 (*) Overlapping loads and stores within a particular CPU will appear to be
198 ordered within that CPU. This means that for:
199
200 a = *X; *X = b;
201
202 the CPU will only issue the following sequence of memory operations:
203
204 a = LOAD *X, STORE *X = b
205
206 And for:
207
208 *X = c; d = *X;
209
210 the CPU will only issue:
211
212 STORE *X = c, d = LOAD *X
213
214 (Loads and stores overlap if they are targetted at overlapping pieces of
215 memory).
216
217And there are a number of things that _must_ or _must_not_ be assumed:
218
219 (*) It _must_not_ be assumed that independent loads and stores will be issued
220 in the order given. This means that for:
221
222 X = *A; Y = *B; *D = Z;
223
224 we may get any of the following sequences:
225
226 X = LOAD *A, Y = LOAD *B, STORE *D = Z
227 X = LOAD *A, STORE *D = Z, Y = LOAD *B
228 Y = LOAD *B, X = LOAD *A, STORE *D = Z
229 Y = LOAD *B, STORE *D = Z, X = LOAD *A
230 STORE *D = Z, X = LOAD *A, Y = LOAD *B
231 STORE *D = Z, Y = LOAD *B, X = LOAD *A
232
233 (*) It _must_ be assumed that overlapping memory accesses may be merged or
234 discarded. This means that for:
235
236 X = *A; Y = *(A + 4);
237
238 we may get any one of the following sequences:
239
240 X = LOAD *A; Y = LOAD *(A + 4);
241 Y = LOAD *(A + 4); X = LOAD *A;
242 {X, Y} = LOAD {*A, *(A + 4) };
243
244 And for:
245
246 *A = X; Y = *A;
247
248 we may get either of:
249
250 STORE *A = X; Y = LOAD *A;
251 STORE *A = Y;
252
253
254=========================
255WHAT ARE MEMORY BARRIERS?
256=========================
257
258As can be seen above, independent memory operations are effectively performed
259in random order, but this can be a problem for CPU-CPU interaction and for I/O.
260What is required is some way of intervening to instruct the compiler and the
261CPU to restrict the order.
262
263Memory barriers are such interventions. They impose a perceived partial
264ordering between the memory operations specified on either side of the barrier.
265They request that the sequence of memory events generated appears to other
266parts of the system as if the barrier is effective on that CPU.
267
268
269VARIETIES OF MEMORY BARRIER
270---------------------------
271
272Memory barriers come in four basic varieties:
273
274 (1) Write (or store) memory barriers.
275
276 A write memory barrier gives a guarantee that all the STORE operations
277 specified before the barrier will appear to happen before all the STORE
278 operations specified after the barrier with respect to the other
279 components of the system.
280
281 A write barrier is a partial ordering on stores only; it is not required
282 to have any effect on loads.
283
284 A CPU can be viewed as as commiting a sequence of store operations to the
285 memory system as time progresses. All stores before a write barrier will
286 occur in the sequence _before_ all the stores after the write barrier.
287
288 [!] Note that write barriers should normally be paired with read or data
289 dependency barriers; see the "SMP barrier pairing" subsection.
290
291
292 (2) Data dependency barriers.
293
294 A data dependency barrier is a weaker form of read barrier. In the case
295 where two loads are performed such that the second depends on the result
296 of the first (eg: the first load retrieves the address to which the second
297 load will be directed), a data dependency barrier would be required to
298 make sure that the target of the second load is updated before the address
299 obtained by the first load is accessed.
300
301 A data dependency barrier is a partial ordering on interdependent loads
302 only; it is not required to have any effect on stores, independent loads
303 or overlapping loads.
304
305 As mentioned in (1), the other CPUs in the system can be viewed as
306 committing sequences of stores to the memory system that the CPU being
307 considered can then perceive. A data dependency barrier issued by the CPU
308 under consideration guarantees that for any load preceding it, if that
309 load touches one of a sequence of stores from another CPU, then by the
310 time the barrier completes, the effects of all the stores prior to that
311 touched by the load will be perceptible to any loads issued after the data
312 dependency barrier.
313
314 See the "Examples of memory barrier sequences" subsection for diagrams
315 showing the ordering constraints.
316
317 [!] Note that the first load really has to have a _data_ dependency and
318 not a control dependency. If the address for the second load is dependent
319 on the first load, but the dependency is through a conditional rather than
320 actually loading the address itself, then it's a _control_ dependency and
321 a full read barrier or better is required. See the "Control dependencies"
322 subsection for more information.
323
324 [!] Note that data dependency barriers should normally be paired with
325 write barriers; see the "SMP barrier pairing" subsection.
326
327
328 (3) Read (or load) memory barriers.
329
330 A read barrier is a data dependency barrier plus a guarantee that all the
331 LOAD operations specified before the barrier will appear to happen before
332 all the LOAD operations specified after the barrier with respect to the
333 other components of the system.
334
335 A read barrier is a partial ordering on loads only; it is not required to
336 have any effect on stores.
337
338 Read memory barriers imply data dependency barriers, and so can substitute
339 for them.
340
341 [!] Note that read barriers should normally be paired with write barriers;
342 see the "SMP barrier pairing" subsection.
343
344
345 (4) General memory barriers.
346
347 A general memory barrier is a combination of both a read memory barrier
348 and a write memory barrier. It is a partial ordering over both loads and
349 stores.
350
351 General memory barriers imply both read and write memory barriers, and so
352 can substitute for either.
353
354
355And a couple of implicit varieties:
356
357 (5) LOCK operations.
358
359 This acts as a one-way permeable barrier. It guarantees that all memory
360 operations after the LOCK operation will appear to happen after the LOCK
361 operation with respect to the other components of the system.
362
363 Memory operations that occur before a LOCK operation may appear to happen
364 after it completes.
365
366 A LOCK operation should almost always be paired with an UNLOCK operation.
367
368
369 (6) UNLOCK operations.
370
371 This also acts as a one-way permeable barrier. It guarantees that all
372 memory operations before the UNLOCK operation will appear to happen before
373 the UNLOCK operation with respect to the other components of the system.
374
375 Memory operations that occur after an UNLOCK operation may appear to
376 happen before it completes.
377
378 LOCK and UNLOCK operations are guaranteed to appear with respect to each
379 other strictly in the order specified.
380
381 The use of LOCK and UNLOCK operations generally precludes the need for
382 other sorts of memory barrier (but note the exceptions mentioned in the
383 subsection "MMIO write barrier").
384
385
386Memory barriers are only required where there's a possibility of interaction
387between two CPUs or between a CPU and a device. If it can be guaranteed that
388there won't be any such interaction in any particular piece of code, then
389memory barriers are unnecessary in that piece of code.
390
391
392Note that these are the _minimum_ guarantees. Different architectures may give
393more substantial guarantees, but they may _not_ be relied upon outside of arch
394specific code.
395
396
397WHAT MAY NOT BE ASSUMED ABOUT MEMORY BARRIERS?
398----------------------------------------------
399
400There are certain things that the Linux kernel memory barriers do not guarantee:
401
402 (*) There is no guarantee that any of the memory accesses specified before a
403 memory barrier will be _complete_ by the completion of a memory barrier
404 instruction; the barrier can be considered to draw a line in that CPU's
405 access queue that accesses of the appropriate type may not cross.
406
407 (*) There is no guarantee that issuing a memory barrier on one CPU will have
408 any direct effect on another CPU or any other hardware in the system. The
409 indirect effect will be the order in which the second CPU sees the effects
410 of the first CPU's accesses occur, but see the next point:
411
412 (*) There is no guarantee that the a CPU will see the correct order of effects
413 from a second CPU's accesses, even _if_ the second CPU uses a memory
414 barrier, unless the first CPU _also_ uses a matching memory barrier (see
415 the subsection on "SMP Barrier Pairing").
416
417 (*) There is no guarantee that some intervening piece of off-the-CPU
418 hardware[*] will not reorder the memory accesses. CPU cache coherency
419 mechanisms should propagate the indirect effects of a memory barrier
420 between CPUs, but might not do so in order.
421
422 [*] For information on bus mastering DMA and coherency please read:
423
424 Documentation/pci.txt
425 Documentation/DMA-mapping.txt
426 Documentation/DMA-API.txt
427
428
429DATA DEPENDENCY BARRIERS
430------------------------
431
432The usage requirements of data dependency barriers are a little subtle, and
433it's not always obvious that they're needed. To illustrate, consider the
434following sequence of events:
435
436 CPU 1 CPU 2
437 =============== ===============
438 { A == 1, B == 2, C = 3, P == &A, Q == &C }
439 B = 4;
440 <write barrier>
441 P = &B
442 Q = P;
443 D = *Q;
444
445There's a clear data dependency here, and it would seem that by the end of the
446sequence, Q must be either &A or &B, and that:
447
448 (Q == &A) implies (D == 1)
449 (Q == &B) implies (D == 4)
450
451But! CPU 2's perception of P may be updated _before_ its perception of B, thus
452leading to the following situation:
453
454 (Q == &B) and (D == 2) ????
455
456Whilst this may seem like a failure of coherency or causality maintenance, it
457isn't, and this behaviour can be observed on certain real CPUs (such as the DEC
458Alpha).
459
460To deal with this, a data dependency barrier must be inserted between the
461address load and the data load:
462
463 CPU 1 CPU 2
464 =============== ===============
465 { A == 1, B == 2, C = 3, P == &A, Q == &C }
466 B = 4;
467 <write barrier>
468 P = &B
469 Q = P;
470 <data dependency barrier>
471 D = *Q;
472
473This enforces the occurrence of one of the two implications, and prevents the
474third possibility from arising.
475
476[!] Note that this extremely counterintuitive situation arises most easily on
477machines with split caches, so that, for example, one cache bank processes
478even-numbered cache lines and the other bank processes odd-numbered cache
479lines. The pointer P might be stored in an odd-numbered cache line, and the
480variable B might be stored in an even-numbered cache line. Then, if the
481even-numbered bank of the reading CPU's cache is extremely busy while the
482odd-numbered bank is idle, one can see the new value of the pointer P (&B),
483but the old value of the variable B (1).
484
485
486Another example of where data dependency barriers might by required is where a
487number is read from memory and then used to calculate the index for an array
488access:
489
490 CPU 1 CPU 2
491 =============== ===============
492 { M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
493 M[1] = 4;
494 <write barrier>
495 P = 1
496 Q = P;
497 <data dependency barrier>
498 D = M[Q];
499
500
501The data dependency barrier is very important to the RCU system, for example.
502See rcu_dereference() in include/linux/rcupdate.h. This permits the current
503target of an RCU'd pointer to be replaced with a new modified target, without
504the replacement target appearing to be incompletely initialised.
505
506See also the subsection on "Cache Coherency" for a more thorough example.
507
508
509CONTROL DEPENDENCIES
510--------------------
511
512A control dependency requires a full read memory barrier, not simply a data
513dependency barrier to make it work correctly. Consider the following bit of
514code:
515
516 q = &a;
517 if (p)
518 q = &b;
519 <data dependency barrier>
520 x = *q;
521
522This will not have the desired effect because there is no actual data
523dependency, but rather a control dependency that the CPU may short-circuit by
524attempting to predict the outcome in advance. In such a case what's actually
525required is:
526
527 q = &a;
528 if (p)
529 q = &b;
530 <read barrier>
531 x = *q;
532
533
534SMP BARRIER PAIRING
535-------------------
536
537When dealing with CPU-CPU interactions, certain types of memory barrier should
538always be paired. A lack of appropriate pairing is almost certainly an error.
539
540A write barrier should always be paired with a data dependency barrier or read
541barrier, though a general barrier would also be viable. Similarly a read
542barrier or a data dependency barrier should always be paired with at least an
543write barrier, though, again, a general barrier is viable:
544
545 CPU 1 CPU 2
546 =============== ===============
547 a = 1;
548 <write barrier>
549 b = 2; x = a;
550 <read barrier>
551 y = b;
552
553Or:
554
555 CPU 1 CPU 2
556 =============== ===============================
557 a = 1;
558 <write barrier>
559 b = &a; x = b;
560 <data dependency barrier>
561 y = *x;
562
563Basically, the read barrier always has to be there, even though it can be of
564the "weaker" type.
565
566
567EXAMPLES OF MEMORY BARRIER SEQUENCES
568------------------------------------
569
570Firstly, write barriers act as a partial orderings on store operations.
571Consider the following sequence of events:
572
573 CPU 1
574 =======================
575 STORE A = 1
576 STORE B = 2
577 STORE C = 3
578 <write barrier>
579 STORE D = 4
580 STORE E = 5
581
582This sequence of events is committed to the memory coherence system in an order
583that the rest of the system might perceive as the unordered set of { STORE A,
584STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
585}:
586
587 +-------+ : :
588 | | +------+
589 | |------>| C=3 | } /\
590 | | : +------+ }----- \ -----> Events perceptible
591 | | : | A=1 | } \/ to rest of system
592 | | : +------+ }
593 | CPU 1 | : | B=2 | }
594 | | +------+ }
595 | | wwwwwwwwwwwwwwww } <--- At this point the write barrier
596 | | +------+ } requires all stores prior to the
597 | | : | E=5 | } barrier to be committed before
598 | | : +------+ } further stores may be take place.
599 | |------>| D=4 | }
600 | | +------+
601 +-------+ : :
602 |
603 | Sequence in which stores committed to memory system
604 | by CPU 1
605 V
606
607
608Secondly, data dependency barriers act as a partial orderings on data-dependent
609loads. Consider the following sequence of events:
610
611 CPU 1 CPU 2
612 ======================= =======================
613 STORE A = 1
614 STORE B = 2
615 <write barrier>
616 STORE C = &B LOAD X
617 STORE D = 4 LOAD C (gets &B)
618 LOAD *C (reads B)
619
620Without intervention, CPU 2 may perceive the events on CPU 1 in some
621effectively random order, despite the write barrier issued by CPU 1:
622
623 +-------+ : : : :
624 | | +------+ +-------+ | Sequence of update
625 | |------>| B=2 |----- --->| Y->8 | | of perception on
626 | | : +------+ \ +-------+ | CPU 2
627 | CPU 1 | : | A=1 | \ --->| C->&Y | V
628 | | +------+ | +-------+
629 | | wwwwwwwwwwwwwwww | : :
630 | | +------+ | : :
631 | | : | C=&B |--- | : : +-------+
632 | | : +------+ \ | +-------+ | |
633 | |------>| D=4 | ----------->| C->&B |------>| |
634 | | +------+ | +-------+ | |
635 +-------+ : : | : : | |
636 | : : | |
637 | : : | CPU 2 |
638 | +-------+ | |
639 Apparently incorrect ---> | | B->7 |------>| |
640 perception of B (!) | +-------+ | |
641 | : : | |
642 | +-------+ | |
643 The load of X holds ---> \ | X->9 |------>| |
644 up the maintenance \ +-------+ | |
645 of coherence of B ----->| B->2 | +-------+
646 +-------+
647 : :
648
649
650In the above example, CPU 2 perceives that B is 7, despite the load of *C
651(which would be B) coming after the the LOAD of C.
652
653If, however, a data dependency barrier were to be placed between the load of C
654and the load of *C (ie: B) on CPU 2, then the following will occur:
655
656 +-------+ : : : :
657 | | +------+ +-------+
658 | |------>| B=2 |----- --->| Y->8 |
659 | | : +------+ \ +-------+
660 | CPU 1 | : | A=1 | \ --->| C->&Y |
661 | | +------+ | +-------+
662 | | wwwwwwwwwwwwwwww | : :
663 | | +------+ | : :
664 | | : | C=&B |--- | : : +-------+
665 | | : +------+ \ | +-------+ | |
666 | |------>| D=4 | ----------->| C->&B |------>| |
667 | | +------+ | +-------+ | |
668 +-------+ : : | : : | |
669 | : : | |
670 | : : | CPU 2 |
671 | +-------+ | |
672 \ | X->9 |------>| |
673 \ +-------+ | |
674 ----->| B->2 | | |
675 +-------+ | |
676 Makes sure all effects ---> ddddddddddddddddd | |
677 prior to the store of C +-------+ | |
678 are perceptible to | B->2 |------>| |
679 successive loads +-------+ | |
680 : : +-------+
681
682
683And thirdly, a read barrier acts as a partial order on loads. Consider the
684following sequence of events:
685
686 CPU 1 CPU 2
687 ======================= =======================
688 STORE A=1
689 STORE B=2
690 STORE C=3
691 <write barrier>
692 STORE D=4
693 STORE E=5
694 LOAD A
695 LOAD B
696 LOAD C
697 LOAD D
698 LOAD E
699
700Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
701some effectively random order, despite the write barrier issued by CPU 1:
702
703 +-------+ : :
704 | | +------+
705 | |------>| C=3 | }
706 | | : +------+ }
707 | | : | A=1 | }
708 | | : +------+ }
709 | CPU 1 | : | B=2 | }---
710 | | +------+ } \
711 | | wwwwwwwwwwwww} \
712 | | +------+ } \ : : +-------+
713 | | : | E=5 | } \ +-------+ | |
714 | | : +------+ } \ { | C->3 |------>| |
715 | |------>| D=4 | } \ { +-------+ : | |
716 | | +------+ \ { | E->5 | : | |
717 +-------+ : : \ { +-------+ : | |
718 Transfer -->{ | A->1 | : | CPU 2 |
719 from CPU 1 { +-------+ : | |
720 to CPU 2 { | D->4 | : | |
721 { +-------+ : | |
722 { | B->2 |------>| |
723 +-------+ | |
724 : : +-------+
725
726
727If, however, a read barrier were to be placed between the load of C and the
728load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
729perceived correctly by CPU 2.
730
731 +-------+ : :
732 | | +------+
733 | |------>| C=3 | }
734 | | : +------+ }
735 | | : | A=1 | }---
736 | | : +------+ } \
737 | CPU 1 | : | B=2 | } \
738 | | +------+ \
739 | | wwwwwwwwwwwwwwww \
740 | | +------+ \ : : +-------+
741 | | : | E=5 | } \ +-------+ | |
742 | | : +------+ }--- \ { | C->3 |------>| |
743 | |------>| D=4 | } \ \ { +-------+ : | |
744 | | +------+ \ -->{ | B->2 | : | |
745 +-------+ : : \ { +-------+ : | |
746 \ { | A->1 | : | CPU 2 |
747 \ +-------+ | |
748 At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
749 barrier causes all effects \ +-------+ | |
750 prior to the storage of C \ { | E->5 | : | |
751 to be perceptible to CPU 2 -->{ +-------+ : | |
752 { | D->4 |------>| |
753 +-------+ | |
754 : : +-------+
755
756
757========================
758EXPLICIT KERNEL BARRIERS
759========================
760
761The Linux kernel has a variety of different barriers that act at different
762levels:
763
764 (*) Compiler barrier.
765
766 (*) CPU memory barriers.
767
768 (*) MMIO write barrier.
769
770
771COMPILER BARRIER
772----------------
773
774The Linux kernel has an explicit compiler barrier function that prevents the
775compiler from moving the memory accesses either side of it to the other side:
776
777 barrier();
778
779This a general barrier - lesser varieties of compiler barrier do not exist.
780
781The compiler barrier has no direct effect on the CPU, which may then reorder
782things however it wishes.
783
784
785CPU MEMORY BARRIERS
786-------------------
787
788The Linux kernel has eight basic CPU memory barriers:
789
790 TYPE MANDATORY SMP CONDITIONAL
791 =============== ======================= ===========================
792 GENERAL mb() smp_mb()
793 WRITE wmb() smp_wmb()
794 READ rmb() smp_rmb()
795 DATA DEPENDENCY read_barrier_depends() smp_read_barrier_depends()
796
797
798All CPU memory barriers unconditionally imply compiler barriers.
799
800SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
801systems because it is assumed that a CPU will be appear to be self-consistent,
802and will order overlapping accesses correctly with respect to itself.
803
804[!] Note that SMP memory barriers _must_ be used to control the ordering of
805references to shared memory on SMP systems, though the use of locking instead
806is sufficient.
807
808Mandatory barriers should not be used to control SMP effects, since mandatory
809barriers unnecessarily impose overhead on UP systems. They may, however, be
810used to control MMIO effects on accesses through relaxed memory I/O windows.
811These are required even on non-SMP systems as they affect the order in which
812memory operations appear to a device by prohibiting both the compiler and the
813CPU from reordering them.
814
815
816There are some more advanced barrier functions:
817
818 (*) set_mb(var, value)
819 (*) set_wmb(var, value)
820
821 These assign the value to the variable and then insert at least a write
822 barrier after it, depending on the function. They aren't guaranteed to
823 insert anything more than a compiler barrier in a UP compilation.
824
825
826 (*) smp_mb__before_atomic_dec();
827 (*) smp_mb__after_atomic_dec();
828 (*) smp_mb__before_atomic_inc();
829 (*) smp_mb__after_atomic_inc();
830
831 These are for use with atomic add, subtract, increment and decrement
832 functions, especially when used for reference counting. These functions
833 do not imply memory barriers.
834
835 As an example, consider a piece of code that marks an object as being dead
836 and then decrements the object's reference count:
837
838 obj->dead = 1;
839 smp_mb__before_atomic_dec();
840 atomic_dec(&obj->ref_count);
841
842 This makes sure that the death mark on the object is perceived to be set
843 *before* the reference counter is decremented.
844
845 See Documentation/atomic_ops.txt for more information. See the "Atomic
846 operations" subsection for information on where to use these.
847
848
849 (*) smp_mb__before_clear_bit(void);
850 (*) smp_mb__after_clear_bit(void);
851
852 These are for use similar to the atomic inc/dec barriers. These are
853 typically used for bitwise unlocking operations, so care must be taken as
854 there are no implicit memory barriers here either.
855
856 Consider implementing an unlock operation of some nature by clearing a
857 locking bit. The clear_bit() would then need to be barriered like this:
858
859 smp_mb__before_clear_bit();
860 clear_bit( ... );
861
862 This prevents memory operations before the clear leaking to after it. See
863 the subsection on "Locking Functions" with reference to UNLOCK operation
864 implications.
865
866 See Documentation/atomic_ops.txt for more information. See the "Atomic
867 operations" subsection for information on where to use these.
868
869
870MMIO WRITE BARRIER
871------------------
872
873The Linux kernel also has a special barrier for use with memory-mapped I/O
874writes:
875
876 mmiowb();
877
878This is a variation on the mandatory write barrier that causes writes to weakly
879ordered I/O regions to be partially ordered. Its effects may go beyond the
880CPU->Hardware interface and actually affect the hardware at some level.
881
882See the subsection "Locks vs I/O accesses" for more information.
883
884
885===============================
886IMPLICIT KERNEL MEMORY BARRIERS
887===============================
888
889Some of the other functions in the linux kernel imply memory barriers, amongst
890which are locking, scheduling and memory allocation functions.
891
892This specification is a _minimum_ guarantee; any particular architecture may
893provide more substantial guarantees, but these may not be relied upon outside
894of arch specific code.
895
896
897LOCKING FUNCTIONS
898-----------------
899
900The Linux kernel has a number of locking constructs:
901
902 (*) spin locks
903 (*) R/W spin locks
904 (*) mutexes
905 (*) semaphores
906 (*) R/W semaphores
907 (*) RCU
908
909In all cases there are variants on "LOCK" operations and "UNLOCK" operations
910for each construct. These operations all imply certain barriers:
911
912 (1) LOCK operation implication:
913
914 Memory operations issued after the LOCK will be completed after the LOCK
915 operation has completed.
916
917 Memory operations issued before the LOCK may be completed after the LOCK
918 operation has completed.
919
920 (2) UNLOCK operation implication:
921
922 Memory operations issued before the UNLOCK will be completed before the
923 UNLOCK operation has completed.
924
925 Memory operations issued after the UNLOCK may be completed before the
926 UNLOCK operation has completed.
927
928 (3) LOCK vs LOCK implication:
929
930 All LOCK operations issued before another LOCK operation will be completed
931 before that LOCK operation.
932
933 (4) LOCK vs UNLOCK implication:
934
935 All LOCK operations issued before an UNLOCK operation will be completed
936 before the UNLOCK operation.
937
938 All UNLOCK operations issued before a LOCK operation will be completed
939 before the LOCK operation.
940
941 (5) Failed conditional LOCK implication:
942
943 Certain variants of the LOCK operation may fail, either due to being
944 unable to get the lock immediately, or due to receiving an unblocked
945 signal whilst asleep waiting for the lock to become available. Failed
946 locks do not imply any sort of barrier.
947
948Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is
949equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
950
951[!] Note: one of the consequence of LOCKs and UNLOCKs being only one-way
952 barriers is that the effects instructions outside of a critical section may
953 seep into the inside of the critical section.
954
955Locks and semaphores may not provide any guarantee of ordering on UP compiled
956systems, and so cannot be counted on in such a situation to actually achieve
957anything at all - especially with respect to I/O accesses - unless combined
958with interrupt disabling operations.
959
960See also the section on "Inter-CPU locking barrier effects".
961
962
963As an example, consider the following:
964
965 *A = a;
966 *B = b;
967 LOCK
968 *C = c;
969 *D = d;
970 UNLOCK
971 *E = e;
972 *F = f;
973
974The following sequence of events is acceptable:
975
976 LOCK, {*F,*A}, *E, {*C,*D}, *B, UNLOCK
977
978 [+] Note that {*F,*A} indicates a combined access.
979
980But none of the following are:
981
982 {*F,*A}, *B, LOCK, *C, *D, UNLOCK, *E
983 *A, *B, *C, LOCK, *D, UNLOCK, *E, *F
984 *A, *B, LOCK, *C, UNLOCK, *D, *E, *F
985 *B, LOCK, *C, *D, UNLOCK, {*F,*A}, *E
986
987
988
989INTERRUPT DISABLING FUNCTIONS
990-----------------------------
991
992Functions that disable interrupts (LOCK equivalent) and enable interrupts
993(UNLOCK equivalent) will act as compiler barriers only. So if memory or I/O
994barriers are required in such a situation, they must be provided from some
995other means.
996
997
998MISCELLANEOUS FUNCTIONS
999-----------------------
1000
1001Other functions that imply barriers:
1002
1003 (*) schedule() and similar imply full memory barriers.
1004
1005 (*) Memory allocation and release functions imply full memory barriers.
1006
1007
1008=================================
1009INTER-CPU LOCKING BARRIER EFFECTS
1010=================================
1011
1012On SMP systems locking primitives give a more substantial form of barrier: one
1013that does affect memory access ordering on other CPUs, within the context of
1014conflict on any particular lock.
1015
1016
1017LOCKS VS MEMORY ACCESSES
1018------------------------
1019
1020Consider the following: the system has a pair of spinlocks (N) and (Q), and
1021three CPUs; then should the following sequence of events occur:
1022
1023 CPU 1 CPU 2
1024 =============================== ===============================
1025 *A = a; *E = e;
1026 LOCK M LOCK Q
1027 *B = b; *F = f;
1028 *C = c; *G = g;
1029 UNLOCK M UNLOCK Q
1030 *D = d; *H = h;
1031
1032Then there is no guarantee as to what order CPU #3 will see the accesses to *A
1033through *H occur in, other than the constraints imposed by the separate locks
1034on the separate CPUs. It might, for example, see:
1035
1036 *E, LOCK M, LOCK Q, *G, *C, *F, *A, *B, UNLOCK Q, *D, *H, UNLOCK M
1037
1038But it won't see any of:
1039
1040 *B, *C or *D preceding LOCK M
1041 *A, *B or *C following UNLOCK M
1042 *F, *G or *H preceding LOCK Q
1043 *E, *F or *G following UNLOCK Q
1044
1045
1046However, if the following occurs:
1047
1048 CPU 1 CPU 2
1049 =============================== ===============================
1050 *A = a;
1051 LOCK M [1]
1052 *B = b;
1053 *C = c;
1054 UNLOCK M [1]
1055 *D = d; *E = e;
1056 LOCK M [2]
1057 *F = f;
1058 *G = g;
1059 UNLOCK M [2]
1060 *H = h;
1061
1062CPU #3 might see:
1063
1064 *E, LOCK M [1], *C, *B, *A, UNLOCK M [1],
1065 LOCK M [2], *H, *F, *G, UNLOCK M [2], *D
1066
1067But assuming CPU #1 gets the lock first, it won't see any of:
1068
1069 *B, *C, *D, *F, *G or *H preceding LOCK M [1]
1070 *A, *B or *C following UNLOCK M [1]
1071 *F, *G or *H preceding LOCK M [2]
1072 *A, *B, *C, *E, *F or *G following UNLOCK M [2]
1073
1074
1075LOCKS VS I/O ACCESSES
1076---------------------
1077
1078Under certain circumstances (especially involving NUMA), I/O accesses within
1079two spinlocked sections on two different CPUs may be seen as interleaved by the
1080PCI bridge, because the PCI bridge does not necessarily participate in the
1081cache-coherence protocol, and is therefore incapable of issuing the required
1082read memory barriers.
1083
1084For example:
1085
1086 CPU 1 CPU 2
1087 =============================== ===============================
1088 spin_lock(Q)
1089 writel(0, ADDR)
1090 writel(1, DATA);
1091 spin_unlock(Q);
1092 spin_lock(Q);
1093 writel(4, ADDR);
1094 writel(5, DATA);
1095 spin_unlock(Q);
1096
1097may be seen by the PCI bridge as follows:
1098
1099 STORE *ADDR = 0, STORE *ADDR = 4, STORE *DATA = 1, STORE *DATA = 5
1100
1101which would probably cause the hardware to malfunction.
1102
1103
1104What is necessary here is to intervene with an mmiowb() before dropping the
1105spinlock, for example:
1106
1107 CPU 1 CPU 2
1108 =============================== ===============================
1109 spin_lock(Q)
1110 writel(0, ADDR)
1111 writel(1, DATA);
1112 mmiowb();
1113 spin_unlock(Q);
1114 spin_lock(Q);
1115 writel(4, ADDR);
1116 writel(5, DATA);
1117 mmiowb();
1118 spin_unlock(Q);
1119
1120this will ensure that the two stores issued on CPU #1 appear at the PCI bridge
1121before either of the stores issued on CPU #2.
1122
1123
1124Furthermore, following a store by a load to the same device obviates the need
1125for an mmiowb(), because the load forces the store to complete before the load
1126is performed:
1127
1128 CPU 1 CPU 2
1129 =============================== ===============================
1130 spin_lock(Q)
1131 writel(0, ADDR)
1132 a = readl(DATA);
1133 spin_unlock(Q);
1134 spin_lock(Q);
1135 writel(4, ADDR);
1136 b = readl(DATA);
1137 spin_unlock(Q);
1138
1139
1140See Documentation/DocBook/deviceiobook.tmpl for more information.
1141
1142
1143=================================
1144WHERE ARE MEMORY BARRIERS NEEDED?
1145=================================
1146
1147Under normal operation, memory operation reordering is generally not going to
1148be a problem as a single-threaded linear piece of code will still appear to
1149work correctly, even if it's in an SMP kernel. There are, however, three
1150circumstances in which reordering definitely _could_ be a problem:
1151
1152 (*) Interprocessor interaction.
1153
1154 (*) Atomic operations.
1155
1156 (*) Accessing devices (I/O).
1157
1158 (*) Interrupts.
1159
1160
1161INTERPROCESSOR INTERACTION
1162--------------------------
1163
1164When there's a system with more than one processor, more than one CPU in the
1165system may be working on the same data set at the same time. This can cause
1166synchronisation problems, and the usual way of dealing with them is to use
1167locks. Locks, however, are quite expensive, and so it may be preferable to
1168operate without the use of a lock if at all possible. In such a case
1169operations that affect both CPUs may have to be carefully ordered to prevent
1170a malfunction.
1171
1172Consider, for example, the R/W semaphore slow path. Here a waiting process is
1173queued on the semaphore, by virtue of it having a piece of its stack linked to
1174the semaphore's list of waiting processes:
1175
1176 struct rw_semaphore {
1177 ...
1178 spinlock_t lock;
1179 struct list_head waiters;
1180 };
1181
1182 struct rwsem_waiter {
1183 struct list_head list;
1184 struct task_struct *task;
1185 };
1186
1187To wake up a particular waiter, the up_read() or up_write() functions have to:
1188
1189 (1) read the next pointer from this waiter's record to know as to where the
1190 next waiter record is;
1191
1192 (4) read the pointer to the waiter's task structure;
1193
1194 (3) clear the task pointer to tell the waiter it has been given the semaphore;
1195
1196 (4) call wake_up_process() on the task; and
1197
1198 (5) release the reference held on the waiter's task struct.
1199
1200In otherwords, it has to perform this sequence of events:
1201
1202 LOAD waiter->list.next;
1203 LOAD waiter->task;
1204 STORE waiter->task;
1205 CALL wakeup
1206 RELEASE task
1207
1208and if any of these steps occur out of order, then the whole thing may
1209malfunction.
1210
1211Once it has queued itself and dropped the semaphore lock, the waiter does not
1212get the lock again; it instead just waits for its task pointer to be cleared
1213before proceeding. Since the record is on the waiter's stack, this means that
1214if the task pointer is cleared _before_ the next pointer in the list is read,
1215another CPU might start processing the waiter and might clobber the waiter's
1216stack before the up*() function has a chance to read the next pointer.
1217
1218Consider then what might happen to the above sequence of events:
1219
1220 CPU 1 CPU 2
1221 =============================== ===============================
1222 down_xxx()
1223 Queue waiter
1224 Sleep
1225 up_yyy()
1226 LOAD waiter->task;
1227 STORE waiter->task;
1228 Woken up by other event
1229 <preempt>
1230 Resume processing
1231 down_xxx() returns
1232 call foo()
1233 foo() clobbers *waiter
1234 </preempt>
1235 LOAD waiter->list.next;
1236 --- OOPS ---
1237
1238This could be dealt with using the semaphore lock, but then the down_xxx()
1239function has to needlessly get the spinlock again after being woken up.
1240
1241The way to deal with this is to insert a general SMP memory barrier:
1242
1243 LOAD waiter->list.next;
1244 LOAD waiter->task;
1245 smp_mb();
1246 STORE waiter->task;
1247 CALL wakeup
1248 RELEASE task
1249
1250In this case, the barrier makes a guarantee that all memory accesses before the
1251barrier will appear to happen before all the memory accesses after the barrier
1252with respect to the other CPUs on the system. It does _not_ guarantee that all
1253the memory accesses before the barrier will be complete by the time the barrier
1254instruction itself is complete.
1255
1256On a UP system - where this wouldn't be a problem - the smp_mb() is just a
1257compiler barrier, thus making sure the compiler emits the instructions in the
1258right order without actually intervening in the CPU. Since there there's only
1259one CPU, that CPU's dependency ordering logic will take care of everything
1260else.
1261
1262
1263ATOMIC OPERATIONS
1264-----------------
1265
1266Though they are technically interprocessor interaction considerations, atomic
1267operations are noted specially as they do _not_ generally imply memory
1268barriers. The possible offenders include:
1269
1270 xchg();
1271 cmpxchg();
1272 test_and_set_bit();
1273 test_and_clear_bit();
1274 test_and_change_bit();
1275 atomic_cmpxchg();
1276 atomic_inc_return();
1277 atomic_dec_return();
1278 atomic_add_return();
1279 atomic_sub_return();
1280 atomic_inc_and_test();
1281 atomic_dec_and_test();
1282 atomic_sub_and_test();
1283 atomic_add_negative();
1284 atomic_add_unless();
1285
1286These may be used for such things as implementing LOCK operations or controlling
1287the lifetime of objects by decreasing their reference counts. In such cases
1288they need preceding memory barriers.
1289
1290The following may also be possible offenders as they may be used as UNLOCK
1291operations.
1292
1293 set_bit();
1294 clear_bit();
1295 change_bit();
1296 atomic_set();
1297
1298
1299The following are a little tricky:
1300
1301 atomic_add();
1302 atomic_sub();
1303 atomic_inc();
1304 atomic_dec();
1305
1306If they're used for statistics generation, then they probably don't need memory
1307barriers, unless there's a coupling between statistical data.
1308
1309If they're used for reference counting on an object to control its lifetime,
1310they probably don't need memory barriers because either the reference count
1311will be adjusted inside a locked section, or the caller will already hold
1312sufficient references to make the lock, and thus a memory barrier unnecessary.
1313
1314If they're used for constructing a lock of some description, then they probably
1315do need memory barriers as a lock primitive generally has to do things in a
1316specific order.
1317
1318
1319Basically, each usage case has to be carefully considered as to whether memory
1320barriers are needed or not. The simplest rule is probably: if the atomic
1321operation is protected by a lock, then it does not require a barrier unless
1322there's another operation within the critical section with respect to which an
1323ordering must be maintained.
1324
1325See Documentation/atomic_ops.txt for more information.
1326
1327
1328ACCESSING DEVICES
1329-----------------
1330
1331Many devices can be memory mapped, and so appear to the CPU as if they're just
1332a set of memory locations. To control such a device, the driver usually has to
1333make the right memory accesses in exactly the right order.
1334
1335However, having a clever CPU or a clever compiler creates a potential problem
1336in that the carefully sequenced accesses in the driver code won't reach the
1337device in the requisite order if the CPU or the compiler thinks it is more
1338efficient to reorder, combine or merge accesses - something that would cause
1339the device to malfunction.
1340
1341Inside of the Linux kernel, I/O should be done through the appropriate accessor
1342routines - such as inb() or writel() - which know how to make such accesses
1343appropriately sequential. Whilst this, for the most part, renders the explicit
1344use of memory barriers unnecessary, there are a couple of situations where they
1345might be needed:
1346
1347 (1) On some systems, I/O stores are not strongly ordered across all CPUs, and
1348 so for _all_ general drivers locks should be used and mmiowb() must be
1349 issued prior to unlocking the critical section.
1350
1351 (2) If the accessor functions are used to refer to an I/O memory window with
1352 relaxed memory access properties, then _mandatory_ memory barriers are
1353 required to enforce ordering.
1354
1355See Documentation/DocBook/deviceiobook.tmpl for more information.
1356
1357
1358INTERRUPTS
1359----------
1360
1361A driver may be interrupted by its own interrupt service routine, and thus the
1362two parts of the driver may interfere with each other's attempts to control or
1363access the device.
1364
1365This may be alleviated - at least in part - by disabling local interrupts (a
1366form of locking), such that the critical operations are all contained within
1367the interrupt-disabled section in the driver. Whilst the driver's interrupt
1368routine is executing, the driver's core may not run on the same CPU, and its
1369interrupt is not permitted to happen again until the current interrupt has been
1370handled, thus the interrupt handler does not need to lock against that.
1371
1372However, consider a driver that was talking to an ethernet card that sports an
1373address register and a data register. If that driver's core talks to the card
1374under interrupt-disablement and then the driver's interrupt handler is invoked:
1375
1376 LOCAL IRQ DISABLE
1377 writew(ADDR, 3);
1378 writew(DATA, y);
1379 LOCAL IRQ ENABLE
1380 <interrupt>
1381 writew(ADDR, 4);
1382 q = readw(DATA);
1383 </interrupt>
1384
1385The store to the data register might happen after the second store to the
1386address register if ordering rules are sufficiently relaxed:
1387
1388 STORE *ADDR = 3, STORE *ADDR = 4, STORE *DATA = y, q = LOAD *DATA
1389
1390
1391If ordering rules are relaxed, it must be assumed that accesses done inside an
1392interrupt disabled section may leak outside of it and may interleave with
1393accesses performed in an interrupt - and vice versa - unless implicit or
1394explicit barriers are used.
1395
1396Normally this won't be a problem because the I/O accesses done inside such
1397sections will include synchronous load operations on strictly ordered I/O
1398registers that form implicit I/O barriers. If this isn't sufficient then an
1399mmiowb() may need to be used explicitly.
1400
1401
1402A similar situation may occur between an interrupt routine and two routines
1403running on separate CPUs that communicate with each other. If such a case is
1404likely, then interrupt-disabling locks should be used to guarantee ordering.
1405
1406
1407==========================
1408KERNEL I/O BARRIER EFFECTS
1409==========================
1410
1411When accessing I/O memory, drivers should use the appropriate accessor
1412functions:
1413
1414 (*) inX(), outX():
1415
1416 These are intended to talk to I/O space rather than memory space, but
1417 that's primarily a CPU-specific concept. The i386 and x86_64 processors do
1418 indeed have special I/O space access cycles and instructions, but many
1419 CPUs don't have such a concept.
1420
1421 The PCI bus, amongst others, defines an I/O space concept - which on such
1422 CPUs as i386 and x86_64 cpus readily maps to the CPU's concept of I/O
1423 space. However, it may also mapped as a virtual I/O space in the CPU's
1424 memory map, particularly on those CPUs that don't support alternate
1425 I/O spaces.
1426
1427 Accesses to this space may be fully synchronous (as on i386), but
1428 intermediary bridges (such as the PCI host bridge) may not fully honour
1429 that.
1430
1431 They are guaranteed to be fully ordered with respect to each other.
1432
1433 They are not guaranteed to be fully ordered with respect to other types of
1434 memory and I/O operation.
1435
1436 (*) readX(), writeX():
1437
1438 Whether these are guaranteed to be fully ordered and uncombined with
1439 respect to each other on the issuing CPU depends on the characteristics
1440 defined for the memory window through which they're accessing. On later
1441 i386 architecture machines, for example, this is controlled by way of the
1442 MTRR registers.
1443
1444 Ordinarily, these will be guaranteed to be fully ordered and uncombined,,
1445 provided they're not accessing a prefetchable device.
1446
1447 However, intermediary hardware (such as a PCI bridge) may indulge in
1448 deferral if it so wishes; to flush a store, a load from the same location
1449 is preferred[*], but a load from the same device or from configuration
1450 space should suffice for PCI.
1451
1452 [*] NOTE! attempting to load from the same location as was written to may
1453 cause a malfunction - consider the 16550 Rx/Tx serial registers for
1454 example.
1455
1456 Used with prefetchable I/O memory, an mmiowb() barrier may be required to
1457 force stores to be ordered.
1458
1459 Please refer to the PCI specification for more information on interactions
1460 between PCI transactions.
1461
1462 (*) readX_relaxed()
1463
1464 These are similar to readX(), but are not guaranteed to be ordered in any
1465 way. Be aware that there is no I/O read barrier available.
1466
1467 (*) ioreadX(), iowriteX()
1468
1469 These will perform as appropriate for the type of access they're actually
1470 doing, be it inX()/outX() or readX()/writeX().
1471
1472
1473========================================
1474ASSUMED MINIMUM EXECUTION ORDERING MODEL
1475========================================
1476
1477It has to be assumed that the conceptual CPU is weakly-ordered but that it will
1478maintain the appearance of program causality with respect to itself. Some CPUs
1479(such as i386 or x86_64) are more constrained than others (such as powerpc or
1480frv), and so the most relaxed case (namely DEC Alpha) must be assumed outside
1481of arch-specific code.
1482
1483This means that it must be considered that the CPU will execute its instruction
1484stream in any order it feels like - or even in parallel - provided that if an
1485instruction in the stream depends on the an earlier instruction, then that
1486earlier instruction must be sufficiently complete[*] before the later
1487instruction may proceed; in other words: provided that the appearance of
1488causality is maintained.
1489
1490 [*] Some instructions have more than one effect - such as changing the
1491 condition codes, changing registers or changing memory - and different
1492 instructions may depend on different effects.
1493
1494A CPU may also discard any instruction sequence that winds up having no
1495ultimate effect. For example, if two adjacent instructions both load an
1496immediate value into the same register, the first may be discarded.
1497
1498
1499Similarly, it has to be assumed that compiler might reorder the instruction
1500stream in any way it sees fit, again provided the appearance of causality is
1501maintained.
1502
1503
1504============================
1505THE EFFECTS OF THE CPU CACHE
1506============================
1507
1508The way cached memory operations are perceived across the system is affected to
1509a certain extent by the caches that lie between CPUs and memory, and by the
1510memory coherence system that maintains the consistency of state in the system.
1511
1512As far as the way a CPU interacts with another part of the system through the
1513caches goes, the memory system has to include the CPU's caches, and memory
1514barriers for the most part act at the interface between the CPU and its cache
1515(memory barriers logically act on the dotted line in the following diagram):
1516
1517 <--- CPU ---> : <----------- Memory ----------->
1518 :
1519 +--------+ +--------+ : +--------+ +-----------+
1520 | | | | : | | | | +--------+
1521 | CPU | | Memory | : | CPU | | | | |
1522 | Core |--->| Access |----->| Cache |<-->| | | |
1523 | | | Queue | : | | | |--->| Memory |
1524 | | | | : | | | | | |
1525 +--------+ +--------+ : +--------+ | | | |
1526 : | Cache | +--------+
1527 : | Coherency |
1528 : | Mechanism | +--------+
1529 +--------+ +--------+ : +--------+ | | | |
1530 | | | | : | | | | | |
1531 | CPU | | Memory | : | CPU | | |--->| Device |
1532 | Core |--->| Access |----->| Cache |<-->| | | |
1533 | | | Queue | : | | | | | |
1534 | | | | : | | | | +--------+
1535 +--------+ +--------+ : +--------+ +-----------+
1536 :
1537 :
1538
1539Although any particular load or store may not actually appear outside of the
1540CPU that issued it since it may have been satisfied within the CPU's own cache,
1541it will still appear as if the full memory access had taken place as far as the
1542other CPUs are concerned since the cache coherency mechanisms will migrate the
1543cacheline over to the accessing CPU and propagate the effects upon conflict.
1544
1545The CPU core may execute instructions in any order it deems fit, provided the
1546expected program causality appears to be maintained. Some of the instructions
1547generate load and store operations which then go into the queue of memory
1548accesses to be performed. The core may place these in the queue in any order
1549it wishes, and continue execution until it is forced to wait for an instruction
1550to complete.
1551
1552What memory barriers are concerned with is controlling the order in which
1553accesses cross from the CPU side of things to the memory side of things, and
1554the order in which the effects are perceived to happen by the other observers
1555in the system.
1556
1557[!] Memory barriers are _not_ needed within a given CPU, as CPUs always see
1558their own loads and stores as if they had happened in program order.
1559
1560[!] MMIO or other device accesses may bypass the cache system. This depends on
1561the properties of the memory window through which devices are accessed and/or
1562the use of any special device communication instructions the CPU may have.
1563
1564
1565CACHE COHERENCY
1566---------------
1567
1568Life isn't quite as simple as it may appear above, however: for while the
1569caches are expected to be coherent, there's no guarantee that that coherency
1570will be ordered. This means that whilst changes made on one CPU will
1571eventually become visible on all CPUs, there's no guarantee that they will
1572become apparent in the same order on those other CPUs.
1573
1574
1575Consider dealing with a system that has pair of CPUs (1 & 2), each of which has
1576a pair of parallel data caches (CPU 1 has A/B, and CPU 2 has C/D):
1577
1578 :
1579 : +--------+
1580 : +---------+ | |
1581 +--------+ : +--->| Cache A |<------->| |
1582 | | : | +---------+ | |
1583 | CPU 1 |<---+ | |
1584 | | : | +---------+ | |
1585 +--------+ : +--->| Cache B |<------->| |
1586 : +---------+ | |
1587 : | Memory |
1588 : +---------+ | System |
1589 +--------+ : +--->| Cache C |<------->| |
1590 | | : | +---------+ | |
1591 | CPU 2 |<---+ | |
1592 | | : | +---------+ | |
1593 +--------+ : +--->| Cache D |<------->| |
1594 : +---------+ | |
1595 : +--------+
1596 :
1597
1598Imagine the system has the following properties:
1599
1600 (*) an odd-numbered cache line may be in cache A, cache C or it may still be
1601 resident in memory;
1602
1603 (*) an even-numbered cache line may be in cache B, cache D or it may still be
1604 resident in memory;
1605
1606 (*) whilst the CPU core is interrogating one cache, the other cache may be
1607 making use of the bus to access the rest of the system - perhaps to
1608 displace a dirty cacheline or to do a speculative load;
1609
1610 (*) each cache has a queue of operations that need to be applied to that cache
1611 to maintain coherency with the rest of the system;
1612
1613 (*) the coherency queue is not flushed by normal loads to lines already
1614 present in the cache, even though the contents of the queue may
1615 potentially effect those loads.
1616
1617Imagine, then, that two writes are made on the first CPU, with a write barrier
1618between them to guarantee that they will appear to reach that CPU's caches in
1619the requisite order:
1620
1621 CPU 1 CPU 2 COMMENT
1622 =============== =============== =======================================
1623 u == 0, v == 1 and p == &u, q == &u
1624 v = 2;
1625 smp_wmb(); Make sure change to v visible before
1626 change to p
1627 <A:modify v=2> v is now in cache A exclusively
1628 p = &v;
1629 <B:modify p=&v> p is now in cache B exclusively
1630
1631The write memory barrier forces the other CPUs in the system to perceive that
1632the local CPU's caches have apparently been updated in the correct order. But
1633now imagine that the second CPU that wants to read those values:
1634
1635 CPU 1 CPU 2 COMMENT
1636 =============== =============== =======================================
1637 ...
1638 q = p;
1639 x = *q;
1640
1641The above pair of reads may then fail to happen in expected order, as the
1642cacheline holding p may get updated in one of the second CPU's caches whilst
1643the update to the cacheline holding v is delayed in the other of the second
1644CPU's caches by some other cache event:
1645
1646 CPU 1 CPU 2 COMMENT
1647 =============== =============== =======================================
1648 u == 0, v == 1 and p == &u, q == &u
1649 v = 2;
1650 smp_wmb();
1651 <A:modify v=2> <C:busy>
1652 <C:queue v=2>
1653 p = &b; q = p;
1654 <D:request p>
1655 <B:modify p=&v> <D:commit p=&v>
1656 <D:read p>
1657 x = *q;
1658 <C:read *q> Reads from v before v updated in cache
1659 <C:unbusy>
1660 <C:commit v=2>
1661
1662Basically, whilst both cachelines will be updated on CPU 2 eventually, there's
1663no guarantee that, without intervention, the order of update will be the same
1664as that committed on CPU 1.
1665
1666
1667To intervene, we need to interpolate a data dependency barrier or a read
1668barrier between the loads. This will force the cache to commit its coherency
1669queue before processing any further requests:
1670
1671 CPU 1 CPU 2 COMMENT
1672 =============== =============== =======================================
1673 u == 0, v == 1 and p == &u, q == &u
1674 v = 2;
1675 smp_wmb();
1676 <A:modify v=2> <C:busy>
1677 <C:queue v=2>
1678 p = &b; q = p;
1679 <D:request p>
1680 <B:modify p=&v> <D:commit p=&v>
1681 <D:read p>
1682 smp_read_barrier_depends()
1683 <C:unbusy>
1684 <C:commit v=2>
1685 x = *q;
1686 <C:read *q> Reads from v after v updated in cache
1687
1688
1689This sort of problem can be encountered on DEC Alpha processors as they have a
1690split cache that improves performance by making better use of the data bus.
1691Whilst most CPUs do imply a data dependency barrier on the read when a memory
1692access depends on a read, not all do, so it may not be relied on.
1693
1694Other CPUs may also have split caches, but must coordinate between the various
1695cachelets for normal memory accesss. The semantics of the Alpha removes the
1696need for coordination in absence of memory barriers.
1697
1698
1699CACHE COHERENCY VS DMA
1700----------------------
1701
1702Not all systems maintain cache coherency with respect to devices doing DMA. In
1703such cases, a device attempting DMA may obtain stale data from RAM because
1704dirty cache lines may be resident in the caches of various CPUs, and may not
1705have been written back to RAM yet. To deal with this, the appropriate part of
1706the kernel must flush the overlapping bits of cache on each CPU (and maybe
1707invalidate them as well).
1708
1709In addition, the data DMA'd to RAM by a device may be overwritten by dirty
1710cache lines being written back to RAM from a CPU's cache after the device has
1711installed its own data, or cache lines simply present in a CPUs cache may
1712simply obscure the fact that RAM has been updated, until at such time as the
1713cacheline is discarded from the CPU's cache and reloaded. To deal with this,
1714the appropriate part of the kernel must invalidate the overlapping bits of the
1715cache on each CPU.
1716
1717See Documentation/cachetlb.txt for more information on cache management.
1718
1719
1720CACHE COHERENCY VS MMIO
1721-----------------------
1722
1723Memory mapped I/O usually takes place through memory locations that are part of
1724a window in the CPU's memory space that have different properties assigned than
1725the usual RAM directed window.
1726
1727Amongst these properties is usually the fact that such accesses bypass the
1728caching entirely and go directly to the device buses. This means MMIO accesses
1729may, in effect, overtake accesses to cached memory that were emitted earlier.
1730A memory barrier isn't sufficient in such a case, but rather the cache must be
1731flushed between the cached memory write and the MMIO access if the two are in
1732any way dependent.
1733
1734
1735=========================
1736THE THINGS CPUS GET UP TO
1737=========================
1738
1739A programmer might take it for granted that the CPU will perform memory
1740operations in exactly the order specified, so that if a CPU is, for example,
1741given the following piece of code to execute:
1742
1743 a = *A;
1744 *B = b;
1745 c = *C;
1746 d = *D;
1747 *E = e;
1748
1749They would then expect that the CPU will complete the memory operation for each
1750instruction before moving on to the next one, leading to a definite sequence of
1751operations as seen by external observers in the system:
1752
1753 LOAD *A, STORE *B, LOAD *C, LOAD *D, STORE *E.
1754
1755
1756Reality is, of course, much messier. With many CPUs and compilers, the above
1757assumption doesn't hold because:
1758
1759 (*) loads are more likely to need to be completed immediately to permit
1760 execution progress, whereas stores can often be deferred without a
1761 problem;
1762
1763 (*) loads may be done speculatively, and the result discarded should it prove
1764 to have been unnecessary;
1765
1766 (*) loads may be done speculatively, leading to the result having being
1767 fetched at the wrong time in the expected sequence of events;
1768
1769 (*) the order of the memory accesses may be rearranged to promote better use
1770 of the CPU buses and caches;
1771
1772 (*) loads and stores may be combined to improve performance when talking to
1773 memory or I/O hardware that can do batched accesses of adjacent locations,
1774 thus cutting down on transaction setup costs (memory and PCI devices may
1775 both be able to do this); and
1776
1777 (*) the CPU's data cache may affect the ordering, and whilst cache-coherency
1778 mechanisms may alleviate this - once the store has actually hit the cache
1779 - there's no guarantee that the coherency management will be propagated in
1780 order to other CPUs.
1781
1782So what another CPU, say, might actually observe from the above piece of code
1783is:
1784
1785 LOAD *A, ..., LOAD {*C,*D}, STORE *E, STORE *B
1786
1787 (Where "LOAD {*C,*D}" is a combined load)
1788
1789
1790However, it is guaranteed that a CPU will be self-consistent: it will see its
1791_own_ accesses appear to be correctly ordered, without the need for a memory
1792barrier. For instance with the following code:
1793
1794 U = *A;
1795 *A = V;
1796 *A = W;
1797 X = *A;
1798 *A = Y;
1799 Z = *A;
1800
1801and assuming no intervention by an external influence, it can be assumed that
1802the final result will appear to be:
1803
1804 U == the original value of *A
1805 X == W
1806 Z == Y
1807 *A == Y
1808
1809The code above may cause the CPU to generate the full sequence of memory
1810accesses:
1811
1812 U=LOAD *A, STORE *A=V, STORE *A=W, X=LOAD *A, STORE *A=Y, Z=LOAD *A
1813
1814in that order, but, without intervention, the sequence may have almost any
1815combination of elements combined or discarded, provided the program's view of
1816the world remains consistent.
1817
1818The compiler may also combine, discard or defer elements of the sequence before
1819the CPU even sees them.
1820
1821For instance:
1822
1823 *A = V;
1824 *A = W;
1825
1826may be reduced to:
1827
1828 *A = W;
1829
1830since, without a write barrier, it can be assumed that the effect of the
1831storage of V to *A is lost. Similarly:
1832
1833 *A = Y;
1834 Z = *A;
1835
1836may, without a memory barrier, be reduced to:
1837
1838 *A = Y;
1839 Z = Y;
1840
1841and the LOAD operation never appear outside of the CPU.
1842
1843
1844AND THEN THERE'S THE ALPHA
1845--------------------------
1846
1847The DEC Alpha CPU is one of the most relaxed CPUs there is. Not only that,
1848some versions of the Alpha CPU have a split data cache, permitting them to have
1849two semantically related cache lines updating at separate times. This is where
1850the data dependency barrier really becomes necessary as this synchronises both
1851caches with the memory coherence system, thus making it seem like pointer
1852changes vs new data occur in the right order.
1853
1854The Alpha defines the Linux's kernel's memory barrier model.
1855
1856See the subsection on "Cache Coherency" above.
1857
1858
1859==========
1860REFERENCES
1861==========
1862
1863Alpha AXP Architecture Reference Manual, Second Edition (Sites & Witek,
1864Digital Press)
1865 Chapter 5.2: Physical Address Space Characteristics
1866 Chapter 5.4: Caches and Write Buffers
1867 Chapter 5.5: Data Sharing
1868 Chapter 5.6: Read/Write Ordering
1869
1870AMD64 Architecture Programmer's Manual Volume 2: System Programming
1871 Chapter 7.1: Memory-Access Ordering
1872 Chapter 7.4: Buffering and Combining Memory Writes
1873
1874IA-32 Intel Architecture Software Developer's Manual, Volume 3:
1875System Programming Guide
1876 Chapter 7.1: Locked Atomic Operations
1877 Chapter 7.2: Memory Ordering
1878 Chapter 7.4: Serializing Instructions
1879
1880The SPARC Architecture Manual, Version 9
1881 Chapter 8: Memory Models
1882 Appendix D: Formal Specification of the Memory Models
1883 Appendix J: Programming with the Memory Models
1884
1885UltraSPARC Programmer Reference Manual
1886 Chapter 5: Memory Accesses and Cacheability
1887 Chapter 15: Sparc-V9 Memory Models
1888
1889UltraSPARC III Cu User's Manual
1890 Chapter 9: Memory Models
1891
1892UltraSPARC IIIi Processor User's Manual
1893 Chapter 8: Memory Models
1894
1895UltraSPARC Architecture 2005
1896 Chapter 9: Memory
1897 Appendix D: Formal Specifications of the Memory Models
1898
1899UltraSPARC T1 Supplement to the UltraSPARC Architecture 2005
1900 Chapter 8: Memory Models
1901 Appendix F: Caches and Cache Coherency
1902
1903Solaris Internals, Core Kernel Architecture, p63-68:
1904 Chapter 3.3: Hardware Considerations for Locks and
1905 Synchronization
1906
1907Unix Systems for Modern Architectures, Symmetric Multiprocessing and Caching
1908for Kernel Programmers:
1909 Chapter 13: Other Memory Models
1910
1911Intel Itanium Architecture Software Developer's Manual: Volume 1:
1912 Section 2.6: Speculation
1913 Section 4.4: Memory Access
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 4fc8e9874320..aaf99d5f0dad 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -254,7 +254,7 @@ and, the number of frames be
254 254
255 <block number> * <block size> / <frame size> 255 <block number> * <block size> / <frame size>
256 256
257Suposse the following parameters, which apply for 2.6 kernel and an 257Suppose the following parameters, which apply for 2.6 kernel and an
258i386 architecture: 258i386 architecture:
259 259
260 <size-max> = 131072 bytes 260 <size-max> = 131072 bytes
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index ec3d109d787a..76750fb9151a 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -138,7 +138,7 @@ This means that you have to read/write IP packets when you are using tun and
138ethernet frames when using tap. 138ethernet frames when using tap.
139 139
1405. What is the difference between BPF and TUN/TAP driver? 1405. What is the difference between BPF and TUN/TAP driver?
141BFP is an advanced packet filter. It can be attached to existing 141BPF is an advanced packet filter. It can be attached to existing
142network interface. It does not provide a virtual network interface. 142network interface. It does not provide a virtual network interface.
143A TUN/TAP driver does provide a virtual network interface and it is possible 143A TUN/TAP driver does provide a virtual network interface and it is possible
144to attach BPF to this interface. 144to attach BPF to this interface.
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 97420f08c786..4739c5c3face 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -1,5 +1,11 @@
1This file details changes in 2.6 which affect PCMCIA card driver authors: 1This file details changes in 2.6 which affect PCMCIA card driver authors:
2 2
3* New release helper (as of 2.6.17)
4 Instead of calling pcmcia_release_{configuration,io,irq,win}, all that's
5 necessary now is calling pcmcia_disable_device. As there is no valid
6 reason left to call pcmcia_release_io and pcmcia_release_irq, the
7 exports for them were removed.
8
3* Unify detach and REMOVAL event code, as well as attach and INSERTION 9* Unify detach and REMOVAL event code, as well as attach and INSERTION
4 code (as of 2.6.16) 10 code (as of 2.6.16)
5 void (*remove) (struct pcmcia_device *dev); 11 void (*remove) (struct pcmcia_device *dev);
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 1def6049784c..0ee2c7dfc482 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -120,6 +120,34 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
120 enable - enable card 120 enable - enable card
121 - Default: enabled, for PCI and ISA PnP cards 121 - Default: enabled, for PCI and ISA PnP cards
122 122
123 Module snd-adlib
124 ----------------
125
126 Module for AdLib FM cards.
127
128 port - port # for OPL chip
129
130 This module supports multiple cards. It does not support autoprobe, so
131 the port must be specified. For actual AdLib FM cards it will be 0x388.
132 Note that this card does not have PCM support and no mixer; only FM
133 synthesis.
134
135 Make sure you have "sbiload" from the alsa-tools package available and,
136 after loading the module, find out the assigned ALSA sequencer port
137 number through "sbiload -l". Example output:
138
139 Port Client name Port name
140 64:0 OPL2 FM synth OPL2 FM Port
141
142 Load the std.sb and drums.sb patches also supplied by sbiload:
143
144 sbiload -p 64:0 std.sb drums.sb
145
146 If you use this driver to drive an OPL3, you can use std.o3 and drums.o3
147 instead. To have the card produce sound, use aplaymidi from alsa-utils:
148
149 aplaymidi -p 64:0 foo.mid
150
123 Module snd-ad1816a 151 Module snd-ad1816a
124 ------------------ 152 ------------------
125 153
@@ -190,6 +218,15 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
190 218
191 The power-management is supported. 219 The power-management is supported.
192 220
221 Module snd-als300
222 -----------------
223
224 Module for Avance Logic ALS300 and ALS300+
225
226 This module supports multiple cards.
227
228 The power-management is supported.
229
193 Module snd-als4000 230 Module snd-als4000
194 ------------------ 231 ------------------
195 232
@@ -701,6 +738,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
701 uniwill 3-jack 738 uniwill 3-jack
702 F1734 2-jack 739 F1734 2-jack
703 lg LG laptop (m1 express dual) 740 lg LG laptop (m1 express dual)
741 lg-lw LG LW20 laptop
704 test for testing/debugging purpose, almost all controls can be 742 test for testing/debugging purpose, almost all controls can be
705 adjusted. Appearing only when compiled with 743 adjusted. Appearing only when compiled with
706 $CONFIG_SND_DEBUG=y 744 $CONFIG_SND_DEBUG=y
@@ -1013,6 +1051,23 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
1013 1051
1014 The power-management is supported. 1052 The power-management is supported.
1015 1053
1054 Module snd-miro
1055 ---------------
1056
1057 Module for Miro soundcards: miroSOUND PCM 1 pro,
1058 miroSOUND PCM 12,
1059 miroSOUND PCM 20 Radio.
1060
1061 port - Port # (0x530,0x604,0xe80,0xf40)
1062 irq - IRQ # (5,7,9,10,11)
1063 dma1 - 1st dma # (0,1,3)
1064 dma2 - 2nd dma # (0,1)
1065 mpu_port - MPU-401 port # (0x300,0x310,0x320,0x330)
1066 mpu_irq - MPU-401 irq # (5,7,9,10)
1067 fm_port - FM Port # (0x388)
1068 wss - enable WSS mode
1069 ide - enable onboard ide support
1070
1016 Module snd-mixart 1071 Module snd-mixart
1017 ----------------- 1072 -----------------
1018 1073
@@ -1202,6 +1257,20 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
1202 1257
1203 The power-management is supported. 1258 The power-management is supported.
1204 1259
1260 Module snd-riptide
1261 ------------------
1262
1263 Module for Conexant Riptide chip
1264
1265 joystick_port - Joystick port # (default: 0x200)
1266 mpu_port - MPU401 port # (default: 0x330)
1267 opl3_port - OPL3 port # (default: 0x388)
1268
1269 This module supports multiple cards.
1270 The driver requires the firmware loader support on kernel.
1271 You need to install the firmware file "riptide.hex" to the standard
1272 firmware path (e.g. /lib/firmware).
1273
1205 Module snd-rme32 1274 Module snd-rme32
1206 ---------------- 1275 ----------------
1207 1276
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 8c7195455963..bca50903233f 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -52,7 +52,7 @@
52 51 -> ProVideo PV952 [1540:9524] 52 51 -> ProVideo PV952 [1540:9524]
53 52 -> AverMedia AverTV/305 [1461:2108] 53 52 -> AverMedia AverTV/305 [1461:2108]
54 53 -> ASUS TV-FM 7135 [1043:4845] 54 53 -> ASUS TV-FM 7135 [1043:4845]
55 54 -> LifeView FlyTV Platinum FM [5168:0214,1489:0214] 55 54 -> LifeView FlyTV Platinum FM / Gold [5168:0214,1489:0214,5168:0304]
56 55 -> LifeView FlyDVB-T DUO [5168:0306] 56 55 -> LifeView FlyDVB-T DUO [5168:0306]
57 56 -> Avermedia AVerTV 307 [1461:a70a] 57 56 -> Avermedia AVerTV 307 [1461:a70a]
58 57 -> Avermedia AVerTV GO 007 FM [1461:f31f] 58 57 -> Avermedia AVerTV GO 007 FM [1461:f31f]
@@ -84,7 +84,7 @@
84 83 -> Terratec Cinergy 250 PCI TV [153b:1160] 84 83 -> Terratec Cinergy 250 PCI TV [153b:1160]
85 84 -> LifeView FlyDVB Trio [5168:0319] 85 84 -> LifeView FlyDVB Trio [5168:0319]
86 85 -> AverTV DVB-T 777 [1461:2c05] 86 85 -> AverTV DVB-T 777 [1461:2c05]
87 86 -> LifeView FlyDVB-T [5168:0301] 87 86 -> LifeView FlyDVB-T / Genius VideoWonder DVB-T [5168:0301,1489:0301]
88 87 -> ADS Instant TV Duo Cardbus PTV331 [0331:1421] 88 87 -> ADS Instant TV Duo Cardbus PTV331 [0331:1421]
89 88 -> Tevion/KWorld DVB-T 220RF [17de:7201] 89 88 -> Tevion/KWorld DVB-T 220RF [17de:7201]
90 89 -> ELSA EX-VISION 700TV [1048:226c] 90 89 -> ELSA EX-VISION 700TV [1048:226c]
@@ -92,3 +92,4 @@
92 91 -> AVerMedia A169 B [1461:7360] 92 91 -> AVerMedia A169 B [1461:7360]
93 92 -> AVerMedia A169 B1 [1461:6360] 93 92 -> AVerMedia A169 B1 [1461:6360]
94 93 -> Medion 7134 Bridge #2 [16be:0005] 94 93 -> Medion 7134 Bridge #2 [16be:0005]
95 94 -> LifeView FlyDVB-T Hybrid Cardbus [5168:3306,5168:3502]
diff --git a/Documentation/usb/et61x251.txt b/Documentation/video4linux/et61x251.txt
index 29340282ab5f..29340282ab5f 100644
--- a/Documentation/usb/et61x251.txt
+++ b/Documentation/video4linux/et61x251.txt
diff --git a/Documentation/usb/ibmcam.txt b/Documentation/video4linux/ibmcam.txt
index c25003644131..4a40a2e99451 100644
--- a/Documentation/usb/ibmcam.txt
+++ b/Documentation/video4linux/ibmcam.txt
@@ -122,7 +122,7 @@ WHAT YOU NEED:
122- A Linux box with USB support (2.3/2.4; 2.2 w/backport may work) 122- A Linux box with USB support (2.3/2.4; 2.2 w/backport may work)
123 123
124- A Video4Linux compatible frame grabber program such as xawtv. 124- A Video4Linux compatible frame grabber program such as xawtv.
125 125
126HOW TO COMPILE THE DRIVER: 126HOW TO COMPILE THE DRIVER:
127 127
128You need to compile the driver only if you are a developer 128You need to compile the driver only if you are a developer
diff --git a/Documentation/usb/ov511.txt b/Documentation/video4linux/ov511.txt
index a7fc0432bff1..142741e3c578 100644
--- a/Documentation/usb/ov511.txt
+++ b/Documentation/video4linux/ov511.txt
@@ -9,7 +9,7 @@ INTRODUCTION:
9 9
10This is a driver for the OV511, a USB-only chip used in many "webcam" devices. 10This is a driver for the OV511, a USB-only chip used in many "webcam" devices.
11Any camera using the OV511/OV511+ and the OV6620/OV7610/20/20AE should work. 11Any camera using the OV511/OV511+ and the OV6620/OV7610/20/20AE should work.
12Video capture devices that use the Philips SAA7111A decoder also work. It 12Video capture devices that use the Philips SAA7111A decoder also work. It
13supports streaming and capture of color or monochrome video via the Video4Linux 13supports streaming and capture of color or monochrome video via the Video4Linux
14API. Most V4L apps are compatible with it. Most resolutions with a width and 14API. Most V4L apps are compatible with it. Most resolutions with a width and
15height that are a multiple of 8 are supported. 15height that are a multiple of 8 are supported.
@@ -52,15 +52,15 @@ from it:
52 52
53 chmod 666 /dev/video 53 chmod 666 /dev/video
54 chmod 666 /dev/video0 (if necessary) 54 chmod 666 /dev/video0 (if necessary)
55 55
56Now you are ready to run a video app! Both vidcat and xawtv work well for me 56Now you are ready to run a video app! Both vidcat and xawtv work well for me
57at 640x480. 57at 640x480.
58 58
59[Using vidcat:] 59[Using vidcat:]
60 60
61 vidcat -s 640x480 -p c > test.jpg 61 vidcat -s 640x480 -p c > test.jpg
62 xview test.jpg 62 xview test.jpg
63 63
64[Using xawtv:] 64[Using xawtv:]
65 65
66From the main xawtv directory: 66From the main xawtv directory:
@@ -70,7 +70,7 @@ From the main xawtv directory:
70 make 70 make
71 make install 71 make install
72 72
73Now you should be able to run xawtv. Right click for the options dialog. 73Now you should be able to run xawtv. Right click for the options dialog.
74 74
75MODULE PARAMETERS: 75MODULE PARAMETERS:
76 76
@@ -286,4 +286,3 @@ Randy Dunlap, and others. Big thanks to them for their pioneering work on that
286and the USB stack. Thanks to Bret Wallach for getting camera reg IO, ISOC, and 286and the USB stack. Thanks to Bret Wallach for getting camera reg IO, ISOC, and
287image capture working. Thanks to Orion Sky Lawlor, Kevin Moore, and Claudio 287image capture working. Thanks to Orion Sky Lawlor, Kevin Moore, and Claudio
288Matsuoka for their work as well. 288Matsuoka for their work as well.
289
diff --git a/Documentation/usb/se401.txt b/Documentation/video4linux/se401.txt
index 7b9d1c960a10..7b9d1c960a10 100644
--- a/Documentation/usb/se401.txt
+++ b/Documentation/video4linux/se401.txt
diff --git a/Documentation/usb/sn9c102.txt b/Documentation/video4linux/sn9c102.txt
index b957beae5607..142920bc011f 100644
--- a/Documentation/usb/sn9c102.txt
+++ b/Documentation/video4linux/sn9c102.txt
@@ -174,7 +174,7 @@ Module parameters are listed below:
174------------------------------------------------------------------------------- 174-------------------------------------------------------------------------------
175Name: video_nr 175Name: video_nr
176Type: short array (min = 0, max = 64) 176Type: short array (min = 0, max = 64)
177Syntax: <-1|n[,...]> 177Syntax: <-1|n[,...]>
178Description: Specify V4L2 minor mode number: 178Description: Specify V4L2 minor mode number:
179 -1 = use next available 179 -1 = use next available
180 n = use minor number n 180 n = use minor number n
@@ -187,7 +187,7 @@ Default: -1
187------------------------------------------------------------------------------- 187-------------------------------------------------------------------------------
188Name: force_munmap 188Name: force_munmap
189Type: bool array (min = 0, max = 64) 189Type: bool array (min = 0, max = 64)
190Syntax: <0|1[,...]> 190Syntax: <0|1[,...]>
191Description: Force the application to unmap previously mapped buffer memory 191Description: Force the application to unmap previously mapped buffer memory
192 before calling any VIDIOC_S_CROP or VIDIOC_S_FMT ioctl's. Not 192 before calling any VIDIOC_S_CROP or VIDIOC_S_FMT ioctl's. Not
193 all the applications support this feature. This parameter is 193 all the applications support this feature. This parameter is
@@ -206,7 +206,7 @@ Default: 2
206------------------------------------------------------------------------------- 206-------------------------------------------------------------------------------
207Name: debug 207Name: debug
208Type: ushort 208Type: ushort
209Syntax: <n> 209Syntax: <n>
210Description: Debugging information level, from 0 to 3: 210Description: Debugging information level, from 0 to 3:
211 0 = none (use carefully) 211 0 = none (use carefully)
212 1 = critical errors 212 1 = critical errors
@@ -267,7 +267,7 @@ The sysfs interface also provides the "frame_header" entry, which exports the
267frame header of the most recent requested and captured video frame. The header 267frame header of the most recent requested and captured video frame. The header
268is always 18-bytes long and is appended to every video frame by the SN9C10x 268is always 18-bytes long and is appended to every video frame by the SN9C10x
269controllers. As an example, this additional information can be used by the user 269controllers. As an example, this additional information can be used by the user
270application for implementing auto-exposure features via software. 270application for implementing auto-exposure features via software.
271 271
272The following table describes the frame header: 272The following table describes the frame header:
273 273
@@ -441,7 +441,7 @@ blue pixels in one video frame. Each pixel is associated with a 8-bit long
441value and is disposed in memory according to the pattern shown below: 441value and is disposed in memory according to the pattern shown below:
442 442
443B[0] G[1] B[2] G[3] ... B[m-2] G[m-1] 443B[0] G[1] B[2] G[3] ... B[m-2] G[m-1]
444G[m] R[m+1] G[m+2] R[m+2] ... G[2m-2] R[2m-1] 444G[m] R[m+1] G[m+2] R[m+2] ... G[2m-2] R[2m-1]
445... 445...
446... B[(n-1)(m-2)] G[(n-1)(m-1)] 446... B[(n-1)(m-2)] G[(n-1)(m-1)]
447... G[n(m-2)] R[n(m-1)] 447... G[n(m-2)] R[n(m-1)]
@@ -472,12 +472,12 @@ The pixel reference value is calculated as follows:
472The algorithm purely describes the conversion from compressed Bayer code used 472The algorithm purely describes the conversion from compressed Bayer code used
473in the SN9C10x chips to uncompressed Bayer. Additional steps are required to 473in the SN9C10x chips to uncompressed Bayer. Additional steps are required to
474convert this to a color image (i.e. a color interpolation algorithm). 474convert this to a color image (i.e. a color interpolation algorithm).
475 475
476The following Huffman codes have been found: 476The following Huffman codes have been found:
4770: +0 (relative to reference pixel value) 4770: +0 (relative to reference pixel value)
478100: +4 478100: +4
479101: -4? 479101: -4?
4801110xxxx: set absolute value to xxxx.0000 4801110xxxx: set absolute value to xxxx.0000
4811101: +11 4811101: +11
4821111: -11 4821111: -11
48311001: +20 48311001: +20
diff --git a/Documentation/usb/stv680.txt b/Documentation/video4linux/stv680.txt
index 6448041e7a37..4f8946f32f51 100644
--- a/Documentation/usb/stv680.txt
+++ b/Documentation/video4linux/stv680.txt
@@ -5,15 +5,15 @@ Copyright, 2001, Kevin Sisson
5 5
6INTRODUCTION: 6INTRODUCTION:
7 7
8STMicroelectronics produces the STV0680B chip, which comes in two 8STMicroelectronics produces the STV0680B chip, which comes in two
9types, -001 and -003. The -003 version allows the recording and downloading 9types, -001 and -003. The -003 version allows the recording and downloading
10of sound clips from the camera, and allows a flash attachment. Otherwise, 10of sound clips from the camera, and allows a flash attachment. Otherwise,
11it uses the same commands as the -001 version. Both versions support a 11it uses the same commands as the -001 version. Both versions support a
12variety of SDRAM sizes and sensors, allowing for a maximum of 26 VGA or 20 12variety of SDRAM sizes and sensors, allowing for a maximum of 26 VGA or 20
13CIF pictures. The STV0680 supports either a serial or a usb interface, and 13CIF pictures. The STV0680 supports either a serial or a usb interface, and
14video is possible through the usb interface. 14video is possible through the usb interface.
15 15
16The following cameras are known to work with this driver, although any 16The following cameras are known to work with this driver, although any
17camera with Vendor/Product codes of 0553/0202 should work: 17camera with Vendor/Product codes of 0553/0202 should work:
18 18
19Aiptek Pencam (various models) 19Aiptek Pencam (various models)
@@ -34,15 +34,15 @@ http://www.linux-usb.org
34MODULE OPTIONS: 34MODULE OPTIONS:
35 35
36When the driver is compiled as a module, you can set a "swapRGB=1" 36When the driver is compiled as a module, you can set a "swapRGB=1"
37option, if necessary, for those applications that require it 37option, if necessary, for those applications that require it
38(such as xawtv). However, the driver should detect and set this 38(such as xawtv). However, the driver should detect and set this
39automatically, so this option should not normally be used. 39automatically, so this option should not normally be used.
40 40
41 41
42KNOWN PROBLEMS: 42KNOWN PROBLEMS:
43 43
44The driver seems to work better with the usb-ohci than the usb-uhci host 44The driver seems to work better with the usb-ohci than the usb-uhci host
45controller driver. 45controller driver.
46 46
47HELP: 47HELP:
48 48
@@ -50,6 +50,4 @@ The latest info on this driver can be found at:
50http://personal.clt.bellsouth.net/~kjsisson or at 50http://personal.clt.bellsouth.net/~kjsisson or at
51http://stv0680-usb.sourceforge.net 51http://stv0680-usb.sourceforge.net
52 52
53Any questions to me can be send to: kjsisson@bellsouth.net 53Any questions to me can be send to: kjsisson@bellsouth.net \ No newline at end of file
54
55
diff --git a/Documentation/usb/w9968cf.txt b/Documentation/video4linux/w9968cf.txt
index 9d46cd0b19e3..3b704f2aae6d 100644
--- a/Documentation/usb/w9968cf.txt
+++ b/Documentation/video4linux/w9968cf.txt
@@ -1,5 +1,5 @@
1 1
2 W996[87]CF JPEG USB Dual Mode Camera Chip 2 W996[87]CF JPEG USB Dual Mode Camera Chip
3 Driver for Linux 2.6 (basic version) 3 Driver for Linux 2.6 (basic version)
4 ========================================= 4 =========================================
5 5
@@ -115,7 +115,7 @@ additional testing and full support, would be much appreciated.
115====================== 115======================
116For it to work properly, the driver needs kernel support for Video4Linux, USB 116For it to work properly, the driver needs kernel support for Video4Linux, USB
117and I2C, and the "ovcamchip" module for the image sensor. Make sure you are not 117and I2C, and the "ovcamchip" module for the image sensor. Make sure you are not
118actually using any external "ovcamchip" module, given that the W996[87]CF 118actually using any external "ovcamchip" module, given that the W996[87]CF
119driver depends on the version of the module present in the official kernels. 119driver depends on the version of the module present in the official kernels.
120 120
121The following options of the kernel configuration file must be enabled and 121The following options of the kernel configuration file must be enabled and
@@ -197,16 +197,16 @@ Note: The kernel must be compiled with the CONFIG_KMOD option
197 enabled for the 'ovcamchip' module to be loaded and for 197 enabled for the 'ovcamchip' module to be loaded and for
198 this parameter to be present. 198 this parameter to be present.
199------------------------------------------------------------------------------- 199-------------------------------------------------------------------------------
200Name: simcams 200Name: simcams
201Type: int 201Type: int
202Syntax: <n> 202Syntax: <n>
203Description: Number of cameras allowed to stream simultaneously. 203Description: Number of cameras allowed to stream simultaneously.
204 n may vary from 0 to 32. 204 n may vary from 0 to 32.
205Default: 32 205Default: 32
206------------------------------------------------------------------------------- 206-------------------------------------------------------------------------------
207Name: video_nr 207Name: video_nr
208Type: int array (min = 0, max = 32) 208Type: int array (min = 0, max = 32)
209Syntax: <-1|n[,...]> 209Syntax: <-1|n[,...]>
210Description: Specify V4L minor mode number. 210Description: Specify V4L minor mode number.
211 -1 = use next available 211 -1 = use next available
212 n = use minor number n 212 n = use minor number n
@@ -219,7 +219,7 @@ Default: -1
219------------------------------------------------------------------------------- 219-------------------------------------------------------------------------------
220Name: packet_size 220Name: packet_size
221Type: int array (min = 0, max = 32) 221Type: int array (min = 0, max = 32)
222Syntax: <n[,...]> 222Syntax: <n[,...]>
223Description: Specify the maximum data payload size in bytes for alternate 223Description: Specify the maximum data payload size in bytes for alternate
224 settings, for each device. n is scaled between 63 and 1023. 224 settings, for each device. n is scaled between 63 and 1023.
225Default: 1023 225Default: 1023
@@ -234,7 +234,7 @@ Default: 2
234------------------------------------------------------------------------------- 234-------------------------------------------------------------------------------
235Name: double_buffer 235Name: double_buffer
236Type: bool array (min = 0, max = 32) 236Type: bool array (min = 0, max = 32)
237Syntax: <0|1[,...]> 237Syntax: <0|1[,...]>
238Description: Hardware double buffering: 0 disabled, 1 enabled. 238Description: Hardware double buffering: 0 disabled, 1 enabled.
239 It should be enabled if you want smooth video output: if you 239 It should be enabled if you want smooth video output: if you
240 obtain out of sync. video, disable it, or try to 240 obtain out of sync. video, disable it, or try to
@@ -243,13 +243,13 @@ Default: 1 for every device.
243------------------------------------------------------------------------------- 243-------------------------------------------------------------------------------
244Name: clamping 244Name: clamping
245Type: bool array (min = 0, max = 32) 245Type: bool array (min = 0, max = 32)
246Syntax: <0|1[,...]> 246Syntax: <0|1[,...]>
247Description: Video data clamping: 0 disabled, 1 enabled. 247Description: Video data clamping: 0 disabled, 1 enabled.
248Default: 0 for every device. 248Default: 0 for every device.
249------------------------------------------------------------------------------- 249-------------------------------------------------------------------------------
250Name: filter_type 250Name: filter_type
251Type: int array (min = 0, max = 32) 251Type: int array (min = 0, max = 32)
252Syntax: <0|1|2[,...]> 252Syntax: <0|1|2[,...]>
253Description: Video filter type. 253Description: Video filter type.
254 0 none, 1 (1-2-1) 3-tap filter, 2 (2-3-6-3-2) 5-tap filter. 254 0 none, 1 (1-2-1) 3-tap filter, 2 (2-3-6-3-2) 5-tap filter.
255 The filter is used to reduce noise and aliasing artifacts 255 The filter is used to reduce noise and aliasing artifacts
@@ -258,13 +258,13 @@ Default: 0 for every device.
258------------------------------------------------------------------------------- 258-------------------------------------------------------------------------------
259Name: largeview 259Name: largeview
260Type: bool array (min = 0, max = 32) 260Type: bool array (min = 0, max = 32)
261Syntax: <0|1[,...]> 261Syntax: <0|1[,...]>
262Description: Large view: 0 disabled, 1 enabled. 262Description: Large view: 0 disabled, 1 enabled.
263Default: 1 for every device. 263Default: 1 for every device.
264------------------------------------------------------------------------------- 264-------------------------------------------------------------------------------
265Name: upscaling 265Name: upscaling
266Type: bool array (min = 0, max = 32) 266Type: bool array (min = 0, max = 32)
267Syntax: <0|1[,...]> 267Syntax: <0|1[,...]>
268Description: Software scaling (for non-compressed video only): 268Description: Software scaling (for non-compressed video only):
269 0 disabled, 1 enabled. 269 0 disabled, 1 enabled.
270 Disable it if you have a slow CPU or you don't have enough 270 Disable it if you have a slow CPU or you don't have enough
@@ -341,8 +341,8 @@ Default: 50 for every device.
341------------------------------------------------------------------------------- 341-------------------------------------------------------------------------------
342Name: bandingfilter 342Name: bandingfilter
343Type: bool array (min = 0, max = 32) 343Type: bool array (min = 0, max = 32)
344Syntax: <0|1[,...]> 344Syntax: <0|1[,...]>
345Description: Banding filter to reduce effects of fluorescent 345Description: Banding filter to reduce effects of fluorescent
346 lighting: 346 lighting:
347 0 disabled, 1 enabled. 347 0 disabled, 1 enabled.
348 This filter tries to reduce the pattern of horizontal 348 This filter tries to reduce the pattern of horizontal
@@ -374,7 +374,7 @@ Default: 0 for every device.
374------------------------------------------------------------------------------- 374-------------------------------------------------------------------------------
375Name: monochrome 375Name: monochrome
376Type: bool array (min = 0, max = 32) 376Type: bool array (min = 0, max = 32)
377Syntax: <0|1[,...]> 377Syntax: <0|1[,...]>
378Description: The image sensor is monochrome: 378Description: The image sensor is monochrome:
379 0 = no, 1 = yes 379 0 = no, 1 = yes
380Default: 0 for every device. 380Default: 0 for every device.
@@ -400,19 +400,19 @@ Default: 32768 for every device.
400------------------------------------------------------------------------------- 400-------------------------------------------------------------------------------
401Name: contrast 401Name: contrast
402Type: long array (min = 0, max = 32) 402Type: long array (min = 0, max = 32)
403Syntax: <n[,...]> 403Syntax: <n[,...]>
404Description: Set picture contrast (0-65535). 404Description: Set picture contrast (0-65535).
405Default: 50000 for every device. 405Default: 50000 for every device.
406------------------------------------------------------------------------------- 406-------------------------------------------------------------------------------
407Name: whiteness 407Name: whiteness
408Type: long array (min = 0, max = 32) 408Type: long array (min = 0, max = 32)
409Syntax: <n[,...]> 409Syntax: <n[,...]>
410Description: Set picture whiteness (0-65535). 410Description: Set picture whiteness (0-65535).
411Default: 32768 for every device. 411Default: 32768 for every device.
412------------------------------------------------------------------------------- 412-------------------------------------------------------------------------------
413Name: debug 413Name: debug
414Type: int 414Type: int
415Syntax: <n> 415Syntax: <n>
416Description: Debugging information level, from 0 to 6: 416Description: Debugging information level, from 0 to 6:
417 0 = none (use carefully) 417 0 = none (use carefully)
418 1 = critical errors 418 1 = critical errors
diff --git a/Documentation/usb/zc0301.txt b/Documentation/video4linux/zc0301.txt
index f55262c6733b..f55262c6733b 100644
--- a/Documentation/usb/zc0301.txt
+++ b/Documentation/video4linux/zc0301.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index c9465811addc..f97657b7e2c7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1451,6 +1451,12 @@ P: Juanjo Ciarlante
1451M: jjciarla@raiz.uncu.edu.ar 1451M: jjciarla@raiz.uncu.edu.ar
1452S: Maintained 1452S: Maintained
1453 1453
1454IPATH DRIVER:
1455P: Bryan O'Sullivan
1456M: support@pathscale.com
1457L: openib-general@openib.org
1458S: Supported
1459
1454IPX NETWORK LAYER 1460IPX NETWORK LAYER
1455P: Arnaldo Carvalho de Melo 1461P: Arnaldo Carvalho de Melo
1456M: acme@conectiva.com.br 1462M: acme@conectiva.com.br
diff --git a/Makefile b/Makefile
index af6210d48836..b4019426fa25 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 16 3SUBLEVEL = 17
4EXTRAVERSION = 4EXTRAVERSION =-rc1
5NAME=Sliding Snow Leopard 5NAME=Sliding Snow Leopard
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 1898ea79d0e2..9d6186d50245 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -216,8 +216,6 @@ EXPORT_SYMBOL(memcpy);
216EXPORT_SYMBOL(memset); 216EXPORT_SYMBOL(memset);
217EXPORT_SYMBOL(memchr); 217EXPORT_SYMBOL(memchr);
218 218
219EXPORT_SYMBOL(get_wchan);
220
221#ifdef CONFIG_ALPHA_IRONGATE 219#ifdef CONFIG_ALPHA_IRONGATE
222EXPORT_SYMBOL(irongate_ioremap); 220EXPORT_SYMBOL(irongate_ioremap);
223EXPORT_SYMBOL(irongate_iounmap); 221EXPORT_SYMBOL(irongate_iounmap);
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 44866cb26a80..7f6a98455e74 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -435,7 +435,7 @@ marvel_specify_io7(char *str)
435 str = pchar; 435 str = pchar;
436 } while(*str); 436 } while(*str);
437 437
438 return 0; 438 return 1;
439} 439}
440__setup("io7=", marvel_specify_io7); 440__setup("io7=", marvel_specify_io7);
441 441
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index dd8769670596..a15e18a00258 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/platform_device.h>
31#include <linux/bootmem.h> 32#include <linux/bootmem.h>
32#include <linux/pci.h> 33#include <linux/pci.h>
33#include <linux/seq_file.h> 34#include <linux/seq_file.h>
@@ -1478,3 +1479,20 @@ alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1478#endif 1479#endif
1479 return NOTIFY_DONE; 1480 return NOTIFY_DONE;
1480} 1481}
1482
1483static __init int add_pcspkr(void)
1484{
1485 struct platform_device *pd;
1486 int ret;
1487
1488 pd = platform_device_alloc("pcspkr", -1);
1489 if (!pd)
1490 return -ENOMEM;
1491
1492 ret = platform_device_add(pd);
1493 if (ret)
1494 platform_device_put(pd);
1495
1496 return ret;
1497}
1498device_initcall(add_pcspkr);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba46d779ede7..dc5a9332c915 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -77,6 +77,14 @@ config FIQ
77config ARCH_MTD_XIP 77config ARCH_MTD_XIP
78 bool 78 bool
79 79
80config VECTORS_BASE
81 hex
82 default 0xffff0000 if MMU
83 default DRAM_BASE if REMAP_VECTORS_TO_RAM
84 default 0x00000000
85 help
86 The base address of exception vectors.
87
80source "init/Kconfig" 88source "init/Kconfig"
81 89
82menu "System Type" 90menu "System Type"
@@ -839,6 +847,8 @@ source "drivers/misc/Kconfig"
839 847
840source "drivers/mfd/Kconfig" 848source "drivers/mfd/Kconfig"
841 849
850source "drivers/leds/Kconfig"
851
842source "drivers/media/Kconfig" 852source "drivers/media/Kconfig"
843 853
844source "drivers/video/Kconfig" 854source "drivers/video/Kconfig"
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
new file mode 100644
index 000000000000..e1574be2ded6
--- /dev/null
+++ b/arch/arm/Kconfig-nommu
@@ -0,0 +1,44 @@
1#
2# Kconfig for uClinux(non-paged MM) depend configurations
3# Hyok S. Choi <hyok.choi@samsung.com>
4#
5
6config SET_MEM_PARAM
7 bool "Set flash/sdram size and base addr"
8 help
9 Say Y to manually set the base addresses and sizes.
10 otherwise, the default values are assigned.
11
12config DRAM_BASE
13 hex '(S)DRAM Base Address' if SET_MEM_PARAM
14 default 0x00800000
15
16config DRAM_SIZE
17 hex '(S)DRAM SIZE' if SET_MEM_PARAM
18 default 0x00800000
19
20config FLASH_MEM_BASE
21 hex 'FLASH Base Address' if SET_MEM_PARAM
22 default 0x00400000
23
24config FLASH_SIZE
25 hex 'FLASH Size' if SET_MEM_PARAM
26 default 0x00400000
27
28config REMAP_VECTORS_TO_RAM
29 bool 'Install vectors to the begining of RAM' if DRAM_BASE
30 depends on DRAM_BASE
31 help
32 The kernel needs to change the hardware exception vectors.
33 In nommu mode, the hardware exception vectors are normally
34 placed at address 0x00000000. However, this region may be
35 occupied by read-only memory depending on H/W design.
36
37 If the region contains read-write memory, say 'n' here.
38
39 If your CPU provides a remap facility which allows the exception
40 vectors to be mapped to writable memory, say 'n' here.
41
42 Otherwise, say 'y' here. In this case, the kernel will require
43 external support to redirect the hardware exception vectors to
44 the writable versions located at DRAM_BASE.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ce3e804ea0f3..95a96275f88a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -20,6 +20,11 @@ GZFLAGS :=-9
20# Select a platform tht is kept up-to-date 20# Select a platform tht is kept up-to-date
21KBUILD_DEFCONFIG := versatile_defconfig 21KBUILD_DEFCONFIG := versatile_defconfig
22 22
23# defines filename extension depending memory manement type.
24ifeq ($(CONFIG_MMU),)
25MMUEXT := -nommu
26endif
27
23ifeq ($(CONFIG_FRAME_POINTER),y) 28ifeq ($(CONFIG_FRAME_POINTER),y)
24CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog 29CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
25endif 30endif
@@ -73,7 +78,7 @@ AFLAGS +=$(CFLAGS_ABI) $(arch-y) $(tune-y) -msoft-float
73CHECKFLAGS += -D__arm__ 78CHECKFLAGS += -D__arm__
74 79
75#Default value 80#Default value
76head-y := arch/arm/kernel/head.o arch/arm/kernel/init_task.o 81head-y := arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
77textofs-y := 0x00008000 82textofs-y := 0x00008000
78 83
79 machine-$(CONFIG_ARCH_RPC) := rpc 84 machine-$(CONFIG_ARCH_RPC) := rpc
@@ -133,7 +138,7 @@ else
133MACHINE := 138MACHINE :=
134endif 139endif
135 140
136export TEXT_OFFSET GZFLAGS 141export TEXT_OFFSET GZFLAGS MMUEXT
137 142
138# Do we have FASTFPE? 143# Do we have FASTFPE?
139FASTFPE :=arch/arm/fastfpe 144FASTFPE :=arch/arm/fastfpe
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 491c7e4c9ac6..b56f5e691d65 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -2,6 +2,7 @@
2 * linux/arch/arm/boot/compressed/head.S 2 * linux/arch/arm/boot/compressed/head.S
3 * 3 *
4 * Copyright (C) 1996-2002 Russell King 4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -320,6 +321,62 @@ params: ldr r0, =params_phys
320cache_on: mov r3, #8 @ cache_on function 321cache_on: mov r3, #8 @ cache_on function
321 b call_cache_fn 322 b call_cache_fn
322 323
324/*
325 * Initialize the highest priority protection region, PR7
326 * to cover all 32bit address and cacheable and bufferable.
327 */
328__armv4_mpu_cache_on:
329 mov r0, #0x3f @ 4G, the whole
330 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
331 mcr p15, 0, r0, c6, c7, 1
332
333 mov r0, #0x80 @ PR7
334 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
335 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
336 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
337
338 mov r0, #0xc000
339 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
340 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
341
342 mov r0, #0
343 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
344 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
345 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
346 mrc p15, 0, r0, c1, c0, 0 @ read control reg
347 @ ...I .... ..D. WC.M
348 orr r0, r0, #0x002d @ .... .... ..1. 11.1
349 orr r0, r0, #0x1000 @ ...1 .... .... ....
350
351 mcr p15, 0, r0, c1, c0, 0 @ write control reg
352
353 mov r0, #0
354 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
355 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
356 mov pc, lr
357
358__armv3_mpu_cache_on:
359 mov r0, #0x3f @ 4G, the whole
360 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
361
362 mov r0, #0x80 @ PR7
363 mcr p15, 0, r0, c2, c0, 0 @ cache on
364 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
365
366 mov r0, #0xc000
367 mcr p15, 0, r0, c5, c0, 0 @ access permission
368
369 mov r0, #0
370 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
371 mrc p15, 0, r0, c1, c0, 0 @ read control reg
372 @ .... .... .... WC.M
373 orr r0, r0, #0x000d @ .... .... .... 11.1
374 mov r0, #0
375 mcr p15, 0, r0, c1, c0, 0 @ write control reg
376
377 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
378 mov pc, lr
379
323__setup_mmu: sub r3, r4, #16384 @ Page directory size 380__setup_mmu: sub r3, r4, #16384 @ Page directory size
324 bic r3, r3, #0xff @ Align the pointer 381 bic r3, r3, #0xff @ Align the pointer
325 bic r3, r3, #0x3f00 382 bic r3, r3, #0x3f00
@@ -496,6 +553,18 @@ proc_types:
496 b __armv4_mmu_cache_off 553 b __armv4_mmu_cache_off
497 mov pc, lr 554 mov pc, lr
498 555
556 .word 0x41007400 @ ARM74x
557 .word 0xff00ff00
558 b __armv3_mpu_cache_on
559 b __armv3_mpu_cache_off
560 b __armv3_mpu_cache_flush
561
562 .word 0x41009400 @ ARM94x
563 .word 0xff00ff00
564 b __armv4_mpu_cache_on
565 b __armv4_mpu_cache_off
566 b __armv4_mpu_cache_flush
567
499 .word 0x00007000 @ ARM7 IDs 568 .word 0x00007000 @ ARM7 IDs
500 .word 0x0000f000 569 .word 0x0000f000
501 mov pc, lr 570 mov pc, lr
@@ -562,6 +631,24 @@ proc_types:
562cache_off: mov r3, #12 @ cache_off function 631cache_off: mov r3, #12 @ cache_off function
563 b call_cache_fn 632 b call_cache_fn
564 633
634__armv4_mpu_cache_off:
635 mrc p15, 0, r0, c1, c0
636 bic r0, r0, #0x000d
637 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
638 mov r0, #0
639 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
640 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
641 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
642 mov pc, lr
643
644__armv3_mpu_cache_off:
645 mrc p15, 0, r0, c1, c0
646 bic r0, r0, #0x000d
647 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
648 mov r0, #0
649 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
650 mov pc, lr
651
565__armv4_mmu_cache_off: 652__armv4_mmu_cache_off:
566 mrc p15, 0, r0, c1, c0 653 mrc p15, 0, r0, c1, c0
567 bic r0, r0, #0x000d 654 bic r0, r0, #0x000d
@@ -601,6 +688,24 @@ cache_clean_flush:
601 mov r3, #16 688 mov r3, #16
602 b call_cache_fn 689 b call_cache_fn
603 690
691__armv4_mpu_cache_flush:
692 mov r2, #1
693 mov r3, #0
694 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
695 mov r1, #7 << 5 @ 8 segments
6961: orr r3, r1, #63 << 26 @ 64 entries
6972: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
698 subs r3, r3, #1 << 26
699 bcs 2b @ entries 63 to 0
700 subs r1, r1, #1 << 5
701 bcs 1b @ segments 7 to 0
702
703 teq r2, #0
704 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
705 mcr p15, 0, ip, c7, c10, 4 @ drain WB
706 mov pc, lr
707
708
604__armv6_mmu_cache_flush: 709__armv6_mmu_cache_flush:
605 mov r1, #0 710 mov r1, #0
606 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D 711 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
@@ -638,6 +743,7 @@ no_cache_id:
638 mov pc, lr 743 mov pc, lr
639 744
640__armv3_mmu_cache_flush: 745__armv3_mmu_cache_flush:
746__armv3_mpu_cache_flush:
641 mov r1, #0 747 mov r1, #0
642 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 748 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
643 mov pc, lr 749 mov pc, lr
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 978d32e82d39..3cd8c9ee4510 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/leds.h>
25 26
26#include <asm/hardware.h> 27#include <asm/hardware.h>
27#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -75,6 +76,7 @@ static void sharpsl_battery_thread(void *private_);
75struct sharpsl_pm_status sharpsl_pm; 76struct sharpsl_pm_status sharpsl_pm;
76DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL); 77DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
77DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL); 78DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
79DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
78 80
79 81
80static int get_percentage(int voltage) 82static int get_percentage(int voltage)
@@ -190,10 +192,10 @@ void sharpsl_pm_led(int val)
190 dev_err(sharpsl_pm.dev, "Charging Error!\n"); 192 dev_err(sharpsl_pm.dev, "Charging Error!\n");
191 } else if (val == SHARPSL_LED_ON) { 193 } else if (val == SHARPSL_LED_ON) {
192 dev_dbg(sharpsl_pm.dev, "Charge LED On\n"); 194 dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
193 195 led_trigger_event(sharpsl_charge_led_trigger, LED_FULL);
194 } else { 196 } else {
195 dev_dbg(sharpsl_pm.dev, "Charge LED Off\n"); 197 dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
196 198 led_trigger_event(sharpsl_charge_led_trigger, LED_OFF);
197 } 199 }
198} 200}
199 201
@@ -786,6 +788,8 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
786 init_timer(&sharpsl_pm.chrg_full_timer); 788 init_timer(&sharpsl_pm.chrg_full_timer);
787 sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer; 789 sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
788 790
791 led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger);
792
789 sharpsl_pm.machinfo->init(); 793 sharpsl_pm.machinfo->init();
790 794
791 device_create_file(&pdev->dev, &dev_attr_battery_percentage); 795 device_create_file(&pdev->dev, &dev_attr_battery_percentage);
@@ -807,6 +811,8 @@ static int sharpsl_pm_remove(struct platform_device *pdev)
807 device_remove_file(&pdev->dev, &dev_attr_battery_percentage); 811 device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
808 device_remove_file(&pdev->dev, &dev_attr_battery_voltage); 812 device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
809 813
814 led_trigger_unregister_simple(sharpsl_charge_led_trigger);
815
810 sharpsl_pm.machinfo->exit(); 816 sharpsl_pm.machinfo->exit();
811 817
812 del_timer_sync(&sharpsl_pm.chrg_full_timer); 818 del_timer_sync(&sharpsl_pm.chrg_full_timer);
diff --git a/arch/arm/configs/at91rm9200dk_defconfig b/arch/arm/configs/at91rm9200dk_defconfig
index 1fe73d198888..9e1c1cceb735 100644
--- a/arch/arm/configs/at91rm9200dk_defconfig
+++ b/arch/arm/configs/at91rm9200dk_defconfig
@@ -379,7 +379,7 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
379# CONFIG_MTD_DOC2001 is not set 379# CONFIG_MTD_DOC2001 is not set
380# CONFIG_MTD_DOC2001PLUS is not set 380# CONFIG_MTD_DOC2001PLUS is not set
381CONFIG_MTD_AT91_DATAFLASH=y 381CONFIG_MTD_AT91_DATAFLASH=y
382CONFIG_MTD_AT91_DATAFLASH_CARD=y 382# CONFIG_MTD_AT91_DATAFLASH_CARD is not set
383 383
384# 384#
385# NAND Flash Device Drivers 385# NAND Flash Device Drivers
diff --git a/arch/arm/configs/at91rm9200ek_defconfig b/arch/arm/configs/at91rm9200ek_defconfig
index b7d934cdb1b7..6e0805a971d7 100644
--- a/arch/arm/configs/at91rm9200ek_defconfig
+++ b/arch/arm/configs/at91rm9200ek_defconfig
@@ -370,7 +370,7 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
370# CONFIG_MTD_DOC2001 is not set 370# CONFIG_MTD_DOC2001 is not set
371# CONFIG_MTD_DOC2001PLUS is not set 371# CONFIG_MTD_DOC2001PLUS is not set
372CONFIG_MTD_AT91_DATAFLASH=y 372CONFIG_MTD_AT91_DATAFLASH=y
373CONFIG_MTD_AT91_DATAFLASH_CARD=y 373# CONFIG_MTD_AT91_DATAFLASH_CARD is not set
374 374
375# 375#
376# NAND Flash Device Drivers 376# NAND Flash Device Drivers
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 1574941ebfe1..ee083b3f0522 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -100,23 +100,12 @@ EXPORT_SYMBOL(__raw_writesl);
100#endif 100#endif
101 101
102 /* string / mem functions */ 102 /* string / mem functions */
103EXPORT_SYMBOL(strcpy);
104EXPORT_SYMBOL(strncpy);
105EXPORT_SYMBOL(strcat);
106EXPORT_SYMBOL(strncat);
107EXPORT_SYMBOL(strcmp);
108EXPORT_SYMBOL(strncmp);
109EXPORT_SYMBOL(strchr); 103EXPORT_SYMBOL(strchr);
110EXPORT_SYMBOL(strlen);
111EXPORT_SYMBOL(strnlen);
112EXPORT_SYMBOL(strpbrk); 104EXPORT_SYMBOL(strpbrk);
113EXPORT_SYMBOL(strrchr); 105EXPORT_SYMBOL(strrchr);
114EXPORT_SYMBOL(strstr);
115EXPORT_SYMBOL(memset); 106EXPORT_SYMBOL(memset);
116EXPORT_SYMBOL(memcpy); 107EXPORT_SYMBOL(memcpy);
117EXPORT_SYMBOL(memmove); 108EXPORT_SYMBOL(memmove);
118EXPORT_SYMBOL(memcmp);
119EXPORT_SYMBOL(memscan);
120EXPORT_SYMBOL(memchr); 109EXPORT_SYMBOL(memchr);
121EXPORT_SYMBOL(__memzero); 110EXPORT_SYMBOL(__memzero);
122 111
@@ -190,8 +179,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
190 179
191 /* syscalls */ 180 /* syscalls */
192EXPORT_SYMBOL(sys_write); 181EXPORT_SYMBOL(sys_write);
193EXPORT_SYMBOL(sys_read);
194EXPORT_SYMBOL(sys_lseek); 182EXPORT_SYMBOL(sys_lseek);
195EXPORT_SYMBOL(sys_open);
196EXPORT_SYMBOL(sys_exit); 183EXPORT_SYMBOL(sys_exit);
197EXPORT_SYMBOL(sys_wait4); 184EXPORT_SYMBOL(sys_wait4);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 355914ffb192..ab8e600c18c8 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -666,7 +666,7 @@ __kuser_helper_start:
666 * 666 *
667 * #define __kernel_dmb() \ 667 * #define __kernel_dmb() \
668 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ 668 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
669 * : : : "lr","cc" ) 669 * : : : "r0", "lr","cc" )
670 */ 670 */
671 671
672__kuser_memory_barrier: @ 0xffff0fa0 672__kuser_memory_barrier: @ 0xffff0fa0
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
new file mode 100644
index 000000000000..a52da0ddb43d
--- /dev/null
+++ b/arch/arm/kernel/head-common.S
@@ -0,0 +1,217 @@
1/*
2 * linux/arch/arm/kernel/head-common.S
3 *
4 * Copyright (C) 1994-2002 Russell King
5 * Copyright (c) 2003 ARM Limited
6 * All Rights Reserved
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14 .type __switch_data, %object
15__switch_data:
16 .long __mmap_switched
17 .long __data_loc @ r4
18 .long __data_start @ r5
19 .long __bss_start @ r6
20 .long _end @ r7
21 .long processor_id @ r4
22 .long __machine_arch_type @ r5
23 .long cr_alignment @ r6
24 .long init_thread_union + THREAD_START_SP @ sp
25
26/*
27 * The following fragment of code is executed with the MMU on in MMU mode,
28 * and uses absolute addresses; this is not position independent.
29 *
30 * r0 = cp#15 control register
31 * r1 = machine ID
32 * r9 = processor ID
33 */
34 .type __mmap_switched, %function
35__mmap_switched:
36 adr r3, __switch_data + 4
37
38 ldmia r3!, {r4, r5, r6, r7}
39 cmp r4, r5 @ Copy data segment if needed
401: cmpne r5, r6
41 ldrne fp, [r4], #4
42 strne fp, [r5], #4
43 bne 1b
44
45 mov fp, #0 @ Clear BSS (and zero fp)
461: cmp r6, r7
47 strcc fp, [r6],#4
48 bcc 1b
49
50 ldmia r3, {r4, r5, r6, sp}
51 str r9, [r4] @ Save processor ID
52 str r1, [r5] @ Save machine type
53 bic r4, r0, #CR_A @ Clear 'A' bit
54 stmia r6, {r0, r4} @ Save control register values
55 b start_kernel
56
57/*
58 * Exception handling. Something went wrong and we can't proceed. We
59 * ought to tell the user, but since we don't have any guarantee that
60 * we're even running on the right architecture, we do virtually nothing.
61 *
62 * If CONFIG_DEBUG_LL is set we try to print out something about the error
63 * and hope for the best (useful if bootloader fails to pass a proper
64 * machine ID for example).
65 */
66
67 .type __error_p, %function
68__error_p:
69#ifdef CONFIG_DEBUG_LL
70 adr r0, str_p1
71 bl printascii
72 b __error
73str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
74 .align
75#endif
76
77 .type __error_a, %function
78__error_a:
79#ifdef CONFIG_DEBUG_LL
80 mov r4, r1 @ preserve machine ID
81 adr r0, str_a1
82 bl printascii
83 mov r0, r4
84 bl printhex8
85 adr r0, str_a2
86 bl printascii
87 adr r3, 3f
88 ldmia r3, {r4, r5, r6} @ get machine desc list
89 sub r4, r3, r4 @ get offset between virt&phys
90 add r5, r5, r4 @ convert virt addresses to
91 add r6, r6, r4 @ physical address space
921: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
93 bl printhex8
94 mov r0, #'\t'
95 bl printch
96 ldr r0, [r5, #MACHINFO_NAME] @ get machine name
97 add r0, r0, r4
98 bl printascii
99 mov r0, #'\n'
100 bl printch
101 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
102 cmp r5, r6
103 blo 1b
104 adr r0, str_a3
105 bl printascii
106 b __error
107str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
108str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
109str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
110 .align
111#endif
112
113 .type __error, %function
114__error:
115#ifdef CONFIG_ARCH_RPC
116/*
117 * Turn the screen red on a error - RiscPC only.
118 */
119 mov r0, #0x02000000
120 mov r3, #0x11
121 orr r3, r3, r3, lsl #8
122 orr r3, r3, r3, lsl #16
123 str r3, [r0], #4
124 str r3, [r0], #4
125 str r3, [r0], #4
126 str r3, [r0], #4
127#endif
1281: mov r0, r0
129 b 1b
130
131
132/*
133 * Read processor ID register (CP#15, CR0), and look up in the linker-built
134 * supported processor list. Note that we can't use the absolute addresses
135 * for the __proc_info lists since we aren't running with the MMU on
136 * (and therefore, we are not in the correct address space). We have to
137 * calculate the offset.
138 *
139 * r9 = cpuid
140 * Returns:
141 * r3, r4, r6 corrupted
142 * r5 = proc_info pointer in physical address space
143 * r9 = cpuid (preserved)
144 */
145 .type __lookup_processor_type, %function
146__lookup_processor_type:
147 adr r3, 3f
148 ldmda r3, {r5 - r7}
149 sub r3, r3, r7 @ get offset between virt&phys
150 add r5, r5, r3 @ convert virt addresses to
151 add r6, r6, r3 @ physical address space
1521: ldmia r5, {r3, r4} @ value, mask
153 and r4, r4, r9 @ mask wanted bits
154 teq r3, r4
155 beq 2f
156 add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
157 cmp r5, r6
158 blo 1b
159 mov r5, #0 @ unknown processor
1602: mov pc, lr
161
162/*
163 * This provides a C-API version of the above function.
164 */
165ENTRY(lookup_processor_type)
166 stmfd sp!, {r4 - r7, r9, lr}
167 mov r9, r0
168 bl __lookup_processor_type
169 mov r0, r5
170 ldmfd sp!, {r4 - r7, r9, pc}
171
172/*
173 * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
174 * more information about the __proc_info and __arch_info structures.
175 */
176 .long __proc_info_begin
177 .long __proc_info_end
1783: .long .
179 .long __arch_info_begin
180 .long __arch_info_end
181
182/*
183 * Lookup machine architecture in the linker-build list of architectures.
184 * Note that we can't use the absolute addresses for the __arch_info
185 * lists since we aren't running with the MMU on (and therefore, we are
186 * not in the correct address space). We have to calculate the offset.
187 *
188 * r1 = machine architecture number
189 * Returns:
190 * r3, r4, r6 corrupted
191 * r5 = mach_info pointer in physical address space
192 */
193 .type __lookup_machine_type, %function
194__lookup_machine_type:
195 adr r3, 3b
196 ldmia r3, {r4, r5, r6}
197 sub r3, r3, r4 @ get offset between virt&phys
198 add r5, r5, r3 @ convert virt addresses to
199 add r6, r6, r3 @ physical address space
2001: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
201 teq r3, r1 @ matches loader number?
202 beq 2f @ found
203 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
204 cmp r5, r6
205 blo 1b
206 mov r5, #0 @ unknown machine
2072: mov pc, lr
208
209/*
210 * This provides a C-API version of the above function.
211 */
212ENTRY(lookup_machine_type)
213 stmfd sp!, {r4 - r6, lr}
214 mov r1, r0
215 bl __lookup_machine_type
216 mov r0, r5
217 ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
new file mode 100644
index 000000000000..b093ab8738b5
--- /dev/null
+++ b/arch/arm/kernel/head-nommu.S
@@ -0,0 +1,83 @@
1/*
2 * linux/arch/arm/kernel/head-nommu.S
3 *
4 * Copyright (C) 1994-2002 Russell King
5 * Copyright (C) 2003-2006 Hyok S. Choi
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Common kernel startup code (non-paged MM)
12 * for 32-bit CPUs which has a process ID register(CP15).
13 *
14 */
15#include <linux/config.h>
16#include <linux/linkage.h>
17#include <linux/init.h>
18
19#include <asm/assembler.h>
20#include <asm/mach-types.h>
21#include <asm/procinfo.h>
22#include <asm/ptrace.h>
23#include <asm/constants.h>
24#include <asm/system.h>
25
26#define PROCINFO_INITFUNC 12
27
28/*
29 * Kernel startup entry point.
30 * ---------------------------
31 *
32 * This is normally called from the decompressor code. The requirements
33 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
34 * r1 = machine nr.
35 *
36 * See linux/arch/arm/tools/mach-types for the complete list of machine
37 * numbers for r1.
38 *
39 */
40 __INIT
41 .type stext, %function
42ENTRY(stext)
43 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
44 @ and irqs disabled
45 mrc p15, 0, r9, c0, c0 @ get processor id
46 bl __lookup_processor_type @ r5=procinfo r9=cpuid
47 movs r10, r5 @ invalid processor (r5=0)?
48 beq __error_p @ yes, error 'p'
49 bl __lookup_machine_type @ r5=machinfo
50 movs r8, r5 @ invalid machine (r5=0)?
51 beq __error_a @ yes, error 'a'
52
53 ldr r13, __switch_data @ address to jump to after
54 @ the initialization is done
55 adr lr, __after_proc_init @ return (PIC) address
56 add pc, r10, #PROCINFO_INITFUNC
57
58/*
59 * Set the Control Register and Read the process ID.
60 */
61 .type __after_proc_init, %function
62__after_proc_init:
63 mrc p15, 0, r0, c1, c0, 0 @ read control reg
64#ifdef CONFIG_ALIGNMENT_TRAP
65 orr r0, r0, #CR_A
66#else
67 bic r0, r0, #CR_A
68#endif
69#ifdef CONFIG_CPU_DCACHE_DISABLE
70 bic r0, r0, #CR_C
71#endif
72#ifdef CONFIG_CPU_BPREDICT_DISABLE
73 bic r0, r0, #CR_Z
74#endif
75#ifdef CONFIG_CPU_ICACHE_DISABLE
76 bic r0, r0, #CR_I
77#endif
78 mcr p15, 0, r0, c1, c0, 0 @ write control reg
79
80 mov pc, r13 @ clear the BSS and jump
81 @ to start_kernel
82
83#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 53b6901f70a6..04b66a9328ef 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -102,49 +102,6 @@ ENTRY(stext)
102 adr lr, __enable_mmu @ return (PIC) address 102 adr lr, __enable_mmu @ return (PIC) address
103 add pc, r10, #PROCINFO_INITFUNC 103 add pc, r10, #PROCINFO_INITFUNC
104 104
105 .type __switch_data, %object
106__switch_data:
107 .long __mmap_switched
108 .long __data_loc @ r4
109 .long __data_start @ r5
110 .long __bss_start @ r6
111 .long _end @ r7
112 .long processor_id @ r4
113 .long __machine_arch_type @ r5
114 .long cr_alignment @ r6
115 .long init_thread_union + THREAD_START_SP @ sp
116
117/*
118 * The following fragment of code is executed with the MMU on, and uses
119 * absolute addresses; this is not position independent.
120 *
121 * r0 = cp#15 control register
122 * r1 = machine ID
123 * r9 = processor ID
124 */
125 .type __mmap_switched, %function
126__mmap_switched:
127 adr r3, __switch_data + 4
128
129 ldmia r3!, {r4, r5, r6, r7}
130 cmp r4, r5 @ Copy data segment if needed
1311: cmpne r5, r6
132 ldrne fp, [r4], #4
133 strne fp, [r5], #4
134 bne 1b
135
136 mov fp, #0 @ Clear BSS (and zero fp)
1371: cmp r6, r7
138 strcc fp, [r6],#4
139 bcc 1b
140
141 ldmia r3, {r4, r5, r6, sp}
142 str r9, [r4] @ Save processor ID
143 str r1, [r5] @ Save machine type
144 bic r4, r0, #CR_A @ Clear 'A' bit
145 stmia r6, {r0, r4} @ Save control register values
146 b start_kernel
147
148#if defined(CONFIG_SMP) 105#if defined(CONFIG_SMP)
149 .type secondary_startup, #function 106 .type secondary_startup, #function
150ENTRY(secondary_startup) 107ENTRY(secondary_startup)
@@ -367,166 +324,4 @@ __create_page_tables:
367 mov pc, lr 324 mov pc, lr
368 .ltorg 325 .ltorg
369 326
370 327#include "head-common.S"
371
372/*
373 * Exception handling. Something went wrong and we can't proceed. We
374 * ought to tell the user, but since we don't have any guarantee that
375 * we're even running on the right architecture, we do virtually nothing.
376 *
377 * If CONFIG_DEBUG_LL is set we try to print out something about the error
378 * and hope for the best (useful if bootloader fails to pass a proper
379 * machine ID for example).
380 */
381
382 .type __error_p, %function
383__error_p:
384#ifdef CONFIG_DEBUG_LL
385 adr r0, str_p1
386 bl printascii
387 b __error
388str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
389 .align
390#endif
391
392 .type __error_a, %function
393__error_a:
394#ifdef CONFIG_DEBUG_LL
395 mov r4, r1 @ preserve machine ID
396 adr r0, str_a1
397 bl printascii
398 mov r0, r4
399 bl printhex8
400 adr r0, str_a2
401 bl printascii
402 adr r3, 3f
403 ldmia r3, {r4, r5, r6} @ get machine desc list
404 sub r4, r3, r4 @ get offset between virt&phys
405 add r5, r5, r4 @ convert virt addresses to
406 add r6, r6, r4 @ physical address space
4071: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
408 bl printhex8
409 mov r0, #'\t'
410 bl printch
411 ldr r0, [r5, #MACHINFO_NAME] @ get machine name
412 add r0, r0, r4
413 bl printascii
414 mov r0, #'\n'
415 bl printch
416 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
417 cmp r5, r6
418 blo 1b
419 adr r0, str_a3
420 bl printascii
421 b __error
422str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
423str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
424str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
425 .align
426#endif
427
428 .type __error, %function
429__error:
430#ifdef CONFIG_ARCH_RPC
431/*
432 * Turn the screen red on a error - RiscPC only.
433 */
434 mov r0, #0x02000000
435 mov r3, #0x11
436 orr r3, r3, r3, lsl #8
437 orr r3, r3, r3, lsl #16
438 str r3, [r0], #4
439 str r3, [r0], #4
440 str r3, [r0], #4
441 str r3, [r0], #4
442#endif
4431: mov r0, r0
444 b 1b
445
446
447/*
448 * Read processor ID register (CP#15, CR0), and look up in the linker-built
449 * supported processor list. Note that we can't use the absolute addresses
450 * for the __proc_info lists since we aren't running with the MMU on
451 * (and therefore, we are not in the correct address space). We have to
452 * calculate the offset.
453 *
454 * r9 = cpuid
455 * Returns:
456 * r3, r4, r6 corrupted
457 * r5 = proc_info pointer in physical address space
458 * r9 = cpuid (preserved)
459 */
460 .type __lookup_processor_type, %function
461__lookup_processor_type:
462 adr r3, 3f
463 ldmda r3, {r5 - r7}
464 sub r3, r3, r7 @ get offset between virt&phys
465 add r5, r5, r3 @ convert virt addresses to
466 add r6, r6, r3 @ physical address space
4671: ldmia r5, {r3, r4} @ value, mask
468 and r4, r4, r9 @ mask wanted bits
469 teq r3, r4
470 beq 2f
471 add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
472 cmp r5, r6
473 blo 1b
474 mov r5, #0 @ unknown processor
4752: mov pc, lr
476
477/*
478 * This provides a C-API version of the above function.
479 */
480ENTRY(lookup_processor_type)
481 stmfd sp!, {r4 - r7, r9, lr}
482 mov r9, r0
483 bl __lookup_processor_type
484 mov r0, r5
485 ldmfd sp!, {r4 - r7, r9, pc}
486
487/*
488 * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
489 * more information about the __proc_info and __arch_info structures.
490 */
491 .long __proc_info_begin
492 .long __proc_info_end
4933: .long .
494 .long __arch_info_begin
495 .long __arch_info_end
496
497/*
498 * Lookup machine architecture in the linker-build list of architectures.
499 * Note that we can't use the absolute addresses for the __arch_info
500 * lists since we aren't running with the MMU on (and therefore, we are
501 * not in the correct address space). We have to calculate the offset.
502 *
503 * r1 = machine architecture number
504 * Returns:
505 * r3, r4, r6 corrupted
506 * r5 = mach_info pointer in physical address space
507 */
508 .type __lookup_machine_type, %function
509__lookup_machine_type:
510 adr r3, 3b
511 ldmia r3, {r4, r5, r6}
512 sub r3, r3, r4 @ get offset between virt&phys
513 add r5, r5, r3 @ convert virt addresses to
514 add r6, r6, r3 @ physical address space
5151: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
516 teq r3, r1 @ matches loader number?
517 beq 2f @ found
518 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
519 cmp r5, r6
520 blo 1b
521 mov r5, #0 @ unknown machine
5222: mov pc, lr
523
524/*
525 * This provides a C-API version of the above function.
526 */
527ENTRY(lookup_machine_type)
528 stmfd sp!, {r4 - r6, lr}
529 mov r1, r0
530 bl __lookup_machine_type
531 mov r0, r5
532 ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 489c069e5c3e..1ff75cee4b0d 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -474,4 +474,3 @@ unsigned long get_wchan(struct task_struct *p)
474 } while (count ++ < 16); 474 } while (count ++ < 16);
475 return 0; 475 return 0;
476} 476}
477EXPORT_SYMBOL(get_wchan);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b7cd280bfd63..437528403959 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -252,6 +252,9 @@ static void __init dump_cpu_info(int cpu)
252 dump_cache("cache", cpu, CACHE_ISIZE(info)); 252 dump_cache("cache", cpu, CACHE_ISIZE(info));
253 } 253 }
254 } 254 }
255
256 if (arch_is_coherent())
257 printk("Cache coherency enabled\n");
255} 258}
256 259
257int cpu_architecture(void) 260int cpu_architecture(void)
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 9991049c522d..27beece15502 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -7,6 +7,6 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#define KERN_SIGRETURN_CODE 0xffff0500 10#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
11 11
12extern const unsigned long sigreturn_codes[7]; 12extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d566d5f4574d..35230a060108 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -688,6 +688,7 @@ EXPORT_SYMBOL(abort);
688 688
689void __init trap_init(void) 689void __init trap_init(void)
690{ 690{
691 unsigned long vectors = CONFIG_VECTORS_BASE;
691 extern char __stubs_start[], __stubs_end[]; 692 extern char __stubs_start[], __stubs_end[];
692 extern char __vectors_start[], __vectors_end[]; 693 extern char __vectors_start[], __vectors_end[];
693 extern char __kuser_helper_start[], __kuser_helper_end[]; 694 extern char __kuser_helper_start[], __kuser_helper_end[];
@@ -698,9 +699,9 @@ void __init trap_init(void)
698 * into the vector page, mapped at 0xffff0000, and ensure these 699 * into the vector page, mapped at 0xffff0000, and ensure these
699 * are visible to the instruction stream. 700 * are visible to the instruction stream.
700 */ 701 */
701 memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); 702 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
702 memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); 703 memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
703 memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); 704 memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
704 705
705 /* 706 /*
706 * Copy signal return handlers into the vector page, and 707 * Copy signal return handlers into the vector page, and
@@ -709,6 +710,6 @@ void __init trap_init(void)
709 memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, 710 memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
710 sizeof(sigreturn_codes)); 711 sizeof(sigreturn_codes));
711 712
712 flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); 713 flush_icache_range(vectors, vectors + PAGE_SIZE);
713 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 714 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
714} 715}
diff --git a/arch/arm/mach-at91rm9200/Makefile b/arch/arm/mach-at91rm9200/Makefile
index 75e6ee318ded..ef88c4128edc 100644
--- a/arch/arm/mach-at91rm9200/Makefile
+++ b/arch/arm/mach-at91rm9200/Makefile
@@ -16,11 +16,12 @@ obj-$(CONFIG_MACH_CSB637) += board-csb637.o
16#obj-$(CONFIG_MACH_KB9200) += board-kb9202.o 16#obj-$(CONFIG_MACH_KB9200) += board-kb9202.o
17 17
18# LEDs support 18# LEDs support
19#led-$(CONFIG_ARCH_AT91RM9200DK) += leds.o 19led-$(CONFIG_ARCH_AT91RM9200DK) += leds.o
20#led-$(CONFIG_MACH_AT91RM9200EK) += leds.o 20led-$(CONFIG_MACH_AT91RM9200EK) += leds.o
21#led-$(CONFIG_MACH_CSB337) += leds.o 21led-$(CONFIG_MACH_CSB337) += leds.o
22#led-$(CONFIG_MACH_CSB637) += leds.o 22led-$(CONFIG_MACH_CSB637) += leds.o
23#led-$(CONFIG_MACH_KB9200) += leds.o 23#led-$(CONFIG_MACH_KB9200) += leds.o
24#led-$(CONFIG_MACH_KAFA) += leds.o
24obj-$(CONFIG_LEDS) += $(led-y) 25obj-$(CONFIG_LEDS) += $(led-y)
25 26
26# VGA support 27# VGA support
diff --git a/arch/arm/mach-at91rm9200/board-csb337.c b/arch/arm/mach-at91rm9200/board-csb337.c
index 54022e58d50d..f45104ceea8f 100644
--- a/arch/arm/mach-at91rm9200/board-csb337.c
+++ b/arch/arm/mach-at91rm9200/board-csb337.c
@@ -67,6 +67,9 @@ static void __init csb337_map_io(void)
67 /* Initialize clocks: 3.6864 MHz crystal */ 67 /* Initialize clocks: 3.6864 MHz crystal */
68 at91_clock_init(3686400); 68 at91_clock_init(3686400);
69 69
70 /* Setup the LEDs */
71 at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2);
72
70#ifdef CONFIG_SERIAL_AT91 73#ifdef CONFIG_SERIAL_AT91
71 at91_console_port = CSB337_SERIAL_CONSOLE; 74 at91_console_port = CSB337_SERIAL_CONSOLE;
72 memcpy(at91_serial_map, serial, sizeof(serial)); 75 memcpy(at91_serial_map, serial, sizeof(serial));
diff --git a/arch/arm/mach-at91rm9200/board-csb637.c b/arch/arm/mach-at91rm9200/board-csb637.c
index 8195f9d919ea..f2c2d6e79bc6 100644
--- a/arch/arm/mach-at91rm9200/board-csb637.c
+++ b/arch/arm/mach-at91rm9200/board-csb637.c
@@ -67,6 +67,9 @@ static void __init csb637_map_io(void)
67 /* Initialize clocks: 3.6864 MHz crystal */ 67 /* Initialize clocks: 3.6864 MHz crystal */
68 at91_clock_init(3686400); 68 at91_clock_init(3686400);
69 69
70 /* Setup the LEDs */
71 at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2);
72
70#ifdef CONFIG_SERIAL_AT91 73#ifdef CONFIG_SERIAL_AT91
71 at91_console_port = CSB637_SERIAL_CONSOLE; 74 at91_console_port = CSB637_SERIAL_CONSOLE;
72 memcpy(at91_serial_map, serial, sizeof(serial)); 75 memcpy(at91_serial_map, serial, sizeof(serial));
diff --git a/arch/arm/mach-at91rm9200/board-dk.c b/arch/arm/mach-at91rm9200/board-dk.c
index 8a783368366e..2d7200ed66ed 100644
--- a/arch/arm/mach-at91rm9200/board-dk.c
+++ b/arch/arm/mach-at91rm9200/board-dk.c
@@ -70,6 +70,9 @@ static void __init dk_map_io(void)
70 /* Initialize clocks: 18.432 MHz crystal */ 70 /* Initialize clocks: 18.432 MHz crystal */
71 at91_clock_init(18432000); 71 at91_clock_init(18432000);
72 72
73 /* Setup the LEDs */
74 at91_init_leds(AT91_PIN_PB2, AT91_PIN_PB2);
75
73#ifdef CONFIG_SERIAL_AT91 76#ifdef CONFIG_SERIAL_AT91
74 at91_console_port = DK_SERIAL_CONSOLE; 77 at91_console_port = DK_SERIAL_CONSOLE;
75 memcpy(at91_serial_map, serial, sizeof(serial)); 78 memcpy(at91_serial_map, serial, sizeof(serial));
@@ -118,9 +121,14 @@ static void __init dk_board_init(void)
118 at91_add_device_udc(&dk_udc_data); 121 at91_add_device_udc(&dk_udc_data);
119 /* Compact Flash */ 122 /* Compact Flash */
120 at91_add_device_cf(&dk_cf_data); 123 at91_add_device_cf(&dk_cf_data);
124#ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
125 /* DataFlash card */
126 at91_set_gpio_output(AT91_PIN_PB7, 0);
127#else
121 /* MMC */ 128 /* MMC */
122 at91_set_gpio_output(AT91_PIN_PB7, 1); /* this MMC card slot can optionally use SPI signaling (CS3). default: MMC */ 129 at91_set_gpio_output(AT91_PIN_PB7, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */
123 at91_add_device_mmc(&dk_mmc_data); 130 at91_add_device_mmc(&dk_mmc_data);
131#endif
124 /* VGA */ 132 /* VGA */
125// dk_add_device_video(); 133// dk_add_device_video();
126} 134}
diff --git a/arch/arm/mach-at91rm9200/board-ek.c b/arch/arm/mach-at91rm9200/board-ek.c
index fd0752eba897..80d90f5135a1 100644
--- a/arch/arm/mach-at91rm9200/board-ek.c
+++ b/arch/arm/mach-at91rm9200/board-ek.c
@@ -70,6 +70,9 @@ static void __init ek_map_io(void)
70 /* Initialize clocks: 18.432 MHz crystal */ 70 /* Initialize clocks: 18.432 MHz crystal */
71 at91_clock_init(18432000); 71 at91_clock_init(18432000);
72 72
73 /* Setup the LEDs */
74 at91_init_leds(AT91_PIN_PB1, AT91_PIN_PB2);
75
73#ifdef CONFIG_SERIAL_AT91 76#ifdef CONFIG_SERIAL_AT91
74 at91_console_port = EK_SERIAL_CONSOLE; 77 at91_console_port = EK_SERIAL_CONSOLE;
75 memcpy(at91_serial_map, serial, sizeof(serial)); 78 memcpy(at91_serial_map, serial, sizeof(serial));
@@ -111,9 +114,14 @@ static void __init ek_board_init(void)
111 at91_add_device_usbh(&ek_usbh_data); 114 at91_add_device_usbh(&ek_usbh_data);
112 /* USB Device */ 115 /* USB Device */
113 at91_add_device_udc(&ek_udc_data); 116 at91_add_device_udc(&ek_udc_data);
117#ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
118 /* DataFlash card */
119 at91_set_gpio_output(AT91_PIN_PB22, 0);
120#else
114 /* MMC */ 121 /* MMC */
115 at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). default: MMC */ 122 at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */
116 at91_add_device_mmc(&ek_mmc_data); 123 at91_add_device_mmc(&ek_mmc_data);
124#endif
117 /* VGA */ 125 /* VGA */
118// ek_add_device_video(); 126// ek_add_device_video();
119} 127}
diff --git a/arch/arm/mach-at91rm9200/devices.c b/arch/arm/mach-at91rm9200/devices.c
index 57eedd5beaf6..1781b8f342c4 100644
--- a/arch/arm/mach-at91rm9200/devices.c
+++ b/arch/arm/mach-at91rm9200/devices.c
@@ -28,10 +28,10 @@
28static u64 ohci_dmamask = 0xffffffffUL; 28static u64 ohci_dmamask = 0xffffffffUL;
29static struct at91_usbh_data usbh_data; 29static struct at91_usbh_data usbh_data;
30 30
31static struct resource at91rm9200_usbh_resource[] = { 31static struct resource at91_usbh_resource[] = {
32 [0] = { 32 [0] = {
33 .start = AT91_UHP_BASE, 33 .start = AT91_UHP_BASE,
34 .end = AT91_UHP_BASE + SZ_1M -1, 34 .end = AT91_UHP_BASE + SZ_1M - 1,
35 .flags = IORESOURCE_MEM, 35 .flags = IORESOURCE_MEM,
36 }, 36 },
37 [1] = { 37 [1] = {
@@ -49,8 +49,8 @@ static struct platform_device at91rm9200_usbh_device = {
49 .coherent_dma_mask = 0xffffffff, 49 .coherent_dma_mask = 0xffffffff,
50 .platform_data = &usbh_data, 50 .platform_data = &usbh_data,
51 }, 51 },
52 .resource = at91rm9200_usbh_resource, 52 .resource = at91_usbh_resource,
53 .num_resources = ARRAY_SIZE(at91rm9200_usbh_resource), 53 .num_resources = ARRAY_SIZE(at91_usbh_resource),
54}; 54};
55 55
56void __init at91_add_device_usbh(struct at91_usbh_data *data) 56void __init at91_add_device_usbh(struct at91_usbh_data *data)
@@ -121,6 +121,19 @@ void __init at91_add_device_udc(struct at91_udc_data *data) {}
121static u64 eth_dmamask = 0xffffffffUL; 121static u64 eth_dmamask = 0xffffffffUL;
122static struct at91_eth_data eth_data; 122static struct at91_eth_data eth_data;
123 123
124static struct resource at91_eth_resources[] = {
125 [0] = {
126 .start = AT91_BASE_EMAC,
127 .end = AT91_BASE_EMAC + SZ_16K - 1,
128 .flags = IORESOURCE_MEM,
129 },
130 [1] = {
131 .start = AT91_ID_EMAC,
132 .end = AT91_ID_EMAC,
133 .flags = IORESOURCE_IRQ,
134 },
135};
136
124static struct platform_device at91rm9200_eth_device = { 137static struct platform_device at91rm9200_eth_device = {
125 .name = "at91_ether", 138 .name = "at91_ether",
126 .id = -1, 139 .id = -1,
@@ -129,7 +142,8 @@ static struct platform_device at91rm9200_eth_device = {
129 .coherent_dma_mask = 0xffffffff, 142 .coherent_dma_mask = 0xffffffff,
130 .platform_data = &eth_data, 143 .platform_data = &eth_data,
131 }, 144 },
132 .num_resources = 0, 145 .resource = at91_eth_resources,
146 .num_resources = ARRAY_SIZE(at91_eth_resources),
133}; 147};
134 148
135void __init at91_add_device_eth(struct at91_eth_data *data) 149void __init at91_add_device_eth(struct at91_eth_data *data)
@@ -224,15 +238,20 @@ static u64 mmc_dmamask = 0xffffffffUL;
224static struct at91_mmc_data mmc_data; 238static struct at91_mmc_data mmc_data;
225 239
226static struct resource at91_mmc_resources[] = { 240static struct resource at91_mmc_resources[] = {
227 { 241 [0] = {
228 .start = AT91_BASE_MCI, 242 .start = AT91_BASE_MCI,
229 .end = AT91_BASE_MCI + SZ_16K - 1, 243 .end = AT91_BASE_MCI + SZ_16K - 1,
230 .flags = IORESOURCE_MEM, 244 .flags = IORESOURCE_MEM,
231 } 245 },
246 [1] = {
247 .start = AT91_ID_MCI,
248 .end = AT91_ID_MCI,
249 .flags = IORESOURCE_IRQ,
250 },
232}; 251};
233 252
234static struct platform_device at91rm9200_mmc_device = { 253static struct platform_device at91rm9200_mmc_device = {
235 .name = "at91rm9200_mci", 254 .name = "at91_mci",
236 .id = -1, 255 .id = -1,
237 .dev = { 256 .dev = {
238 .dma_mask = &mmc_dmamask, 257 .dma_mask = &mmc_dmamask,
@@ -290,4 +309,123 @@ void __init at91_add_device_mmc(struct at91_mmc_data *data)
290void __init at91_add_device_mmc(struct at91_mmc_data *data) {} 309void __init at91_add_device_mmc(struct at91_mmc_data *data) {}
291#endif 310#endif
292 311
312/* --------------------------------------------------------------------
313 * NAND / SmartMedia
314 * -------------------------------------------------------------------- */
315
316#if defined(CONFIG_MTD_NAND_AT91) || defined(CONFIG_MTD_NAND_AT91_MODULE)
317static struct at91_nand_data nand_data;
318
319static struct resource at91_nand_resources[] = {
320 {
321 .start = AT91_SMARTMEDIA_BASE,
322 .end = AT91_SMARTMEDIA_BASE + SZ_8M - 1,
323 .flags = IORESOURCE_MEM,
324 }
325};
326
327static struct platform_device at91_nand_device = {
328 .name = "at91_nand",
329 .id = -1,
330 .dev = {
331 .platform_data = &nand_data,
332 },
333 .resource = at91_nand_resources,
334 .num_resources = ARRAY_SIZE(at91_nand_resources),
335};
336
337void __init at91_add_device_nand(struct at91_nand_data *data)
338{
339 if (!data)
340 return;
341
342 /* enable pin */
343 if (data->enable_pin)
344 at91_set_gpio_output(data->enable_pin, 1);
345
346 /* ready/busy pin */
347 if (data->rdy_pin)
348 at91_set_gpio_input(data->rdy_pin, 1);
349
350 /* card detect pin */
351 if (data->det_pin)
352 at91_set_gpio_input(data->det_pin, 1);
353
354 at91_set_A_periph(AT91_PIN_PC1, 0); /* SMOE */
355 at91_set_A_periph(AT91_PIN_PC3, 0); /* SMWE */
356
357 nand_data = *data;
358 platform_device_register(&at91_nand_device);
359}
360#else
361void __init at91_add_device_nand(struct at91_nand_data *data) {}
362#endif
363
364
365/* --------------------------------------------------------------------
366 * TWI (i2c)
367 * -------------------------------------------------------------------- */
368
369#if defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
370static struct platform_device at91rm9200_twi_device = {
371 .name = "at91_i2c",
372 .id = -1,
373 .num_resources = 0,
374};
375
376void __init at91_add_device_i2c(void)
377{
378 /* pins used for TWI interface */
379 at91_set_A_periph(AT91_PIN_PA25, 0); /* TWD */
380 at91_set_multi_drive(AT91_PIN_PA25, 1);
381
382 at91_set_A_periph(AT91_PIN_PA26, 0); /* TWCK */
383 at91_set_multi_drive(AT91_PIN_PA26, 1);
384
385 platform_device_register(&at91rm9200_twi_device);
386}
387#else
388void __init at91_add_device_i2c(void) {}
389#endif
390
391
392/* --------------------------------------------------------------------
393 * RTC
394 * -------------------------------------------------------------------- */
395
396#if defined(CONFIG_AT91_RTC) || defined(CONFIG_AT91_RTC_MODULE)
397static struct platform_device at91rm9200_rtc_device = {
398 .name = "at91_rtc",
399 .id = -1,
400 .num_resources = 0,
401};
402
403void __init at91_add_device_rtc(void)
404{
405 platform_device_register(&at91rm9200_rtc_device);
406}
407#else
408void __init at91_add_device_rtc(void) {}
409#endif
410
411
412/* --------------------------------------------------------------------
413 * LEDs
414 * -------------------------------------------------------------------- */
415
416#if defined(CONFIG_LEDS)
417u8 at91_leds_cpu;
418u8 at91_leds_timer;
419
420void __init at91_init_leds(u8 cpu_led, u8 timer_led)
421{
422 at91_leds_cpu = cpu_led;
423 at91_leds_timer = timer_led;
424}
425
426#else
427void __init at91_init_leds(u8 cpu_led, u8 timer_led) {}
428#endif
429
430
293/* -------------------------------------------------------------------- */ 431/* -------------------------------------------------------------------- */
diff --git a/arch/arm/mach-at91rm9200/leds.c b/arch/arm/mach-at91rm9200/leds.c
new file mode 100644
index 000000000000..28150e8905ba
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/leds.c
@@ -0,0 +1,100 @@
1/*
2 * LED driver for Atmel AT91-based boards.
3 *
4 * Copyright (C) SAN People (Pty) Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10*/
11
12#include <linux/config.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16
17#include <asm/mach-types.h>
18#include <asm/leds.h>
19#include <asm/arch/board.h>
20#include <asm/arch/gpio.h>
21
22
23static inline void at91_led_on(unsigned int led)
24{
25 at91_set_gpio_value(led, 0);
26}
27
28static inline void at91_led_off(unsigned int led)
29{
30 at91_set_gpio_value(led, 1);
31}
32
33static inline void at91_led_toggle(unsigned int led)
34{
35 unsigned long is_off = at91_get_gpio_value(led);
36 if (is_off)
37 at91_led_on(led);
38 else
39 at91_led_off(led);
40}
41
42
43/*
44 * Handle LED events.
45 */
46static void at91_leds_event(led_event_t evt)
47{
48 unsigned long flags;
49
50 local_irq_save(flags);
51
52 switch(evt) {
53 case led_start: /* System startup */
54 at91_led_on(at91_leds_cpu);
55 break;
56
57 case led_stop: /* System stop / suspend */
58 at91_led_off(at91_leds_cpu);
59 break;
60
61#ifdef CONFIG_LEDS_TIMER
62 case led_timer: /* Every 50 timer ticks */
63 at91_led_toggle(at91_leds_timer);
64 break;
65#endif
66
67#ifdef CONFIG_LEDS_CPU
68 case led_idle_start: /* Entering idle state */
69 at91_led_off(at91_leds_cpu);
70 break;
71
72 case led_idle_end: /* Exit idle state */
73 at91_led_on(at91_leds_cpu);
74 break;
75#endif
76
77 default:
78 break;
79 }
80
81 local_irq_restore(flags);
82}
83
84
85static int __init leds_init(void)
86{
87 if (!at91_leds_timer || !at91_leds_cpu)
88 return -ENODEV;
89
90 /* Enable PIO to access the LEDs */
91 at91_set_gpio_output(at91_leds_timer, 1);
92 at91_set_gpio_output(at91_leds_cpu, 1);
93
94 leds_event = at91_leds_event;
95
96 leds_event(led_start);
97 return 0;
98}
99
100__initcall(leds_init);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 2d892e4daa07..dcd417625389 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -424,6 +424,14 @@ static struct amba_device uart3_device = {
424 .periphid = 0x00041010, 424 .periphid = 0x00041010,
425}; 425};
426 426
427
428static struct platform_device ep93xx_rtc_device = {
429 .name = "ep93xx-rtc",
430 .id = -1,
431 .num_resources = 0,
432};
433
434
427void __init ep93xx_init_devices(void) 435void __init ep93xx_init_devices(void)
428{ 436{
429 unsigned int v; 437 unsigned int v;
@@ -439,4 +447,6 @@ void __init ep93xx_init_devices(void)
439 amba_device_register(&uart1_device, &iomem_resource); 447 amba_device_register(&uart1_device, &iomem_resource);
440 amba_device_register(&uart2_device, &iomem_resource); 448 amba_device_register(&uart2_device, &iomem_resource);
441 amba_device_register(&uart3_device, &iomem_resource); 449 amba_device_register(&uart3_device, &iomem_resource);
450
451 platform_device_register(&ep93xx_rtc_device);
442} 452}
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 777e75daa8a5..9be01b0c3f48 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -17,6 +17,8 @@
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/mtd/physmap.h> 19#include <linux/mtd/physmap.h>
20#include <linux/platform_device.h>
21#include <linux/m48t86.h>
20#include <asm/io.h> 22#include <asm/io.h>
21#include <asm/hardware.h> 23#include <asm/hardware.h>
22#include <asm/mach-types.h> 24#include <asm/mach-types.h>
@@ -39,6 +41,16 @@ static struct map_desc ts72xx_io_desc[] __initdata = {
39 .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE), 41 .pfn = __phys_to_pfn(TS72XX_OPTIONS2_PHYS_BASE),
40 .length = TS72XX_OPTIONS2_SIZE, 42 .length = TS72XX_OPTIONS2_SIZE,
41 .type = MT_DEVICE, 43 .type = MT_DEVICE,
44 }, {
45 .virtual = TS72XX_RTC_INDEX_VIRT_BASE,
46 .pfn = __phys_to_pfn(TS72XX_RTC_INDEX_PHYS_BASE),
47 .length = TS72XX_RTC_INDEX_SIZE,
48 .type = MT_DEVICE,
49 }, {
50 .virtual = TS72XX_RTC_DATA_VIRT_BASE,
51 .pfn = __phys_to_pfn(TS72XX_RTC_DATA_PHYS_BASE),
52 .length = TS72XX_RTC_DATA_SIZE,
53 .type = MT_DEVICE,
42 } 54 }
43}; 55};
44 56
@@ -99,11 +111,38 @@ static void __init ts72xx_map_io(void)
99 } 111 }
100} 112}
101 113
114static unsigned char ts72xx_rtc_readb(unsigned long addr)
115{
116 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
117 return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
118}
119
120static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr)
121{
122 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
123 __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
124}
125
126static struct m48t86_ops ts72xx_rtc_ops = {
127 .readb = ts72xx_rtc_readb,
128 .writeb = ts72xx_rtc_writeb,
129};
130
131static struct platform_device ts72xx_rtc_device = {
132 .name = "rtc-m48t86",
133 .id = -1,
134 .dev = {
135 .platform_data = &ts72xx_rtc_ops,
136 },
137 .num_resources = 0,
138};
139
102static void __init ts72xx_init_machine(void) 140static void __init ts72xx_init_machine(void)
103{ 141{
104 ep93xx_init_devices(); 142 ep93xx_init_devices();
105 if (board_is_ts7200()) 143 if (board_is_ts7200())
106 physmap_configure(TS72XX_NOR_PHYS_BASE, 0x01000000, 1, NULL); 144 physmap_configure(TS72XX_NOR_PHYS_BASE, 0x01000000, 1, NULL);
145 platform_device_register(&ts72xx_rtc_device);
107} 146}
108 147
109MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC") 148MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
diff --git a/arch/arm/mach-imx/dma.c b/arch/arm/mach-imx/dma.c
index 71a59e196166..4ca51dcf13ac 100644
--- a/arch/arm/mach-imx/dma.c
+++ b/arch/arm/mach-imx/dma.c
@@ -7,11 +7,18 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * 03/03/2004 Sascha Hauer <sascha@saschahauer.de> 10 * 2004-03-03 Sascha Hauer <sascha@saschahauer.de>
11 * initial version heavily inspired by 11 * initial version heavily inspired by
12 * linux/arch/arm/mach-pxa/dma.c 12 * linux/arch/arm/mach-pxa/dma.c
13 *
14 * 2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
15 * Changed to support scatter gather DMA
16 * by taking Russell's code from RiscPC
17 *
13 */ 18 */
14 19
20#undef DEBUG
21
15#include <linux/module.h> 22#include <linux/module.h>
16#include <linux/init.h> 23#include <linux/init.h>
17#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -22,69 +29,368 @@
22#include <asm/irq.h> 29#include <asm/irq.h>
23#include <asm/hardware.h> 30#include <asm/hardware.h>
24#include <asm/dma.h> 31#include <asm/dma.h>
32#include <asm/arch/imx-dma.h>
33
34struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
35
36/*
37 * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
38 * @dma_ch: i.MX DMA channel number
39 * @lastcount: number of bytes transferred during last transfer
40 *
41 * Functions prepares DMA controller for next sg data chunk transfer.
42 * The @lastcount argument informs function about number of bytes transferred
43 * during last block. Zero value can be used for @lastcount to setup DMA
44 * for the first chunk.
45 */
46static inline int imx_dma_sg_next(imx_dmach_t dma_ch, unsigned int lastcount)
47{
48 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
49 unsigned int nextcount;
50 unsigned int nextaddr;
51
52 if (!imxdma->name) {
53 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
54 __FUNCTION__, dma_ch);
55 return 0;
56 }
57
58 imxdma->resbytes -= lastcount;
59
60 if (!imxdma->sg) {
61 pr_debug("imxdma%d: no sg data\n", dma_ch);
62 return 0;
63 }
64
65 imxdma->sgbc += lastcount;
66 if ((imxdma->sgbc >= imxdma->sg->length) || !imxdma->resbytes) {
67 if ((imxdma->sgcount <= 1) || !imxdma->resbytes) {
68 pr_debug("imxdma%d: sg transfer limit reached\n",
69 dma_ch);
70 imxdma->sgcount=0;
71 imxdma->sg = NULL;
72 return 0;
73 } else {
74 imxdma->sgcount--;
75 imxdma->sg++;
76 imxdma->sgbc = 0;
77 }
78 }
79 nextcount = imxdma->sg->length - imxdma->sgbc;
80 nextaddr = imxdma->sg->dma_address + imxdma->sgbc;
25 81
26static struct dma_channel { 82 if(imxdma->resbytes < nextcount)
27 char *name; 83 nextcount = imxdma->resbytes;
28 void (*irq_handler) (int, void *, struct pt_regs *);
29 void (*err_handler) (int, void *, struct pt_regs *);
30 void *data;
31} dma_channels[11];
32 84
33/* set err_handler to NULL to have the standard info-only error handler */ 85 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
86 DAR(dma_ch) = nextaddr;
87 else
88 SAR(dma_ch) = nextaddr;
89
90 CNTR(dma_ch) = nextcount;
91 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, size 0x%08x\n",
92 dma_ch, DAR(dma_ch), SAR(dma_ch), CNTR(dma_ch));
93
94 return nextcount;
95}
96
97/*
98 * imx_dma_setup_sg_base - scatter-gather DMA emulation
99 * @dma_ch: i.MX DMA channel number
100 * @sg: pointer to the scatter-gather list/vector
101 * @sgcount: scatter-gather list hungs count
102 *
103 * Functions sets up i.MX DMA state for emulated scatter-gather transfer
104 * and sets up channel registers to be ready for the first chunk
105 */
106static int
107imx_dma_setup_sg_base(imx_dmach_t dma_ch,
108 struct scatterlist *sg, unsigned int sgcount)
109{
110 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
111
112 imxdma->sg = sg;
113 imxdma->sgcount = sgcount;
114 imxdma->sgbc = 0;
115 return imx_dma_sg_next(dma_ch, 0);
116}
117
118/**
119 * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from device transfer
120 * @dma_ch: i.MX DMA channel number
121 * @dma_address: the DMA/physical memory address of the linear data block
122 * to transfer
123 * @dma_length: length of the data block in bytes
124 * @dev_addr: physical device port address
125 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
126 * or %DMA_MODE_WRITE from memory to the device
127 *
128 * The function setups DMA channel source and destination addresses for transfer
129 * specified by provided parameters. The scatter-gather emulation is disabled,
130 * because linear data block
131 * form the physical address range is transfered.
132 * Return value: if incorrect parameters are provided -%EINVAL.
133 * Zero indicates success.
134 */
34int 135int
35imx_request_dma(char *name, imx_dma_prio prio, 136imx_dma_setup_single(imx_dmach_t dma_ch, dma_addr_t dma_address,
36 void (*irq_handler) (int, void *, struct pt_regs *), 137 unsigned int dma_length, unsigned int dev_addr,
37 void (*err_handler) (int, void *, struct pt_regs *), void *data) 138 dmamode_t dmamode)
38{ 139{
39 unsigned long flags; 140 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
40 int i, found = 0;
41 141
42 /* basic sanity checks */ 142 imxdma->sg = NULL;
43 if (!name || !irq_handler) 143 imxdma->sgcount = 0;
144 imxdma->dma_mode = dmamode;
145 imxdma->resbytes = dma_length;
146
147 if (!dma_address) {
148 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
149 dma_ch);
44 return -EINVAL; 150 return -EINVAL;
151 }
45 152
46 local_irq_save(flags); 153 if (!dma_length) {
154 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
155 dma_ch);
156 return -EINVAL;
157 }
47 158
48 /* try grabbing a DMA channel with the requested priority */ 159 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
49 for (i = prio; i < prio + (prio == DMA_PRIO_LOW) ? 8 : 4; i++) { 160 pr_debug("imxdma%d: mx_dma_setup_single2dev dma_addressg=0x%08x dma_length=%d dev_addr=0x%08x for read\n",
50 if (!dma_channels[i].name) { 161 dma_ch, (unsigned int)dma_address, dma_length,
51 found = 1; 162 dev_addr);
52 break; 163 SAR(dma_ch) = dev_addr;
53 } 164 DAR(dma_ch) = (unsigned int)dma_address;
165 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
166 pr_debug("imxdma%d: mx_dma_setup_single2dev dma_addressg=0x%08x dma_length=%d dev_addr=0x%08x for write\n",
167 dma_ch, (unsigned int)dma_address, dma_length,
168 dev_addr);
169 SAR(dma_ch) = (unsigned int)dma_address;
170 DAR(dma_ch) = dev_addr;
171 } else {
172 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
173 dma_ch);
174 return -EINVAL;
54 } 175 }
55 176
56 if (!found) { 177 CNTR(dma_ch) = dma_length;
57 /* requested prio group is full, try hier priorities */ 178
58 for (i = prio - 1; i >= 0; i--) { 179 return 0;
59 if (!dma_channels[i].name) { 180}
60 found = 1; 181
61 break; 182/**
62 } 183 * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
63 } 184 * @dma_ch: i.MX DMA channel number
185 * @sg: pointer to the scatter-gather list/vector
186 * @sgcount: scatter-gather list hungs count
187 * @dma_length: total length of the transfer request in bytes
188 * @dev_addr: physical device port address
189 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
190 * or %DMA_MODE_WRITE from memory to the device
191 *
192 * The function setups DMA channel state and registers to be ready for transfer
193 * specified by provided parameters. The scatter-gather emulation is set up
194 * according to the parameters.
195 *
196 * The full preparation of the transfer requires setup of more register
197 * by the caller before imx_dma_enable() can be called.
198 *
199 * %BLR(dma_ch) holds transfer burst length in bytes, 0 means 64 bytes
200 *
201 * %RSSR(dma_ch) has to be set to the DMA request line source %DMA_REQ_xxx
202 *
203 * %CCR(dma_ch) has to specify transfer parameters, the next settings is typical
204 * for linear or simple scatter-gather transfers if %DMA_MODE_READ is specified
205 *
206 * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
207 *
208 * The typical setup for %DMA_MODE_WRITE is specified by next options combination
209 *
210 * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
211 *
212 * Be carefull there and do not mistakenly mix source and target device
213 * port sizes constants, they are really different:
214 * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
215 * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
216 *
217 * Return value: if incorrect parameters are provided -%EINVAL.
218 * Zero indicates success.
219 */
220int
221imx_dma_setup_sg(imx_dmach_t dma_ch,
222 struct scatterlist *sg, unsigned int sgcount, unsigned int dma_length,
223 unsigned int dev_addr, dmamode_t dmamode)
224{
225 int res;
226 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
227
228 imxdma->sg = NULL;
229 imxdma->sgcount = 0;
230 imxdma->dma_mode = dmamode;
231 imxdma->resbytes = dma_length;
232
233 if (!sg || !sgcount) {
234 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg epty sg list\n",
235 dma_ch);
236 return -EINVAL;
237 }
238
239 if (!sg->length) {
240 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
241 dma_ch);
242 return -EINVAL;
64 } 243 }
65 244
66 if (found) { 245 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
67 DIMR &= ~(1 << i); 246 pr_debug("imxdma%d: mx_dma_setup_sg2dev sg=%p sgcount=%d total length=%d dev_addr=0x%08x for read\n",
68 dma_channels[i].name = name; 247 dma_ch, sg, sgcount, dma_length, dev_addr);
69 dma_channels[i].irq_handler = irq_handler; 248 SAR(dma_ch) = dev_addr;
70 dma_channels[i].err_handler = err_handler; 249 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
71 dma_channels[i].data = data; 250 pr_debug("imxdma%d: mx_dma_setup_sg2dev sg=%p sgcount=%d total length=%d dev_addr=0x%08x for write\n",
251 dma_ch, sg, sgcount, dma_length, dev_addr);
252 DAR(dma_ch) = dev_addr;
72 } else { 253 } else {
73 printk(KERN_WARNING "No more available DMA channels for %s\n", 254 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
74 name); 255 dma_ch);
75 i = -ENODEV; 256 return -EINVAL;
257 }
258
259 res = imx_dma_setup_sg_base(dma_ch, sg, sgcount);
260 if (res <= 0) {
261 printk(KERN_ERR "imxdma%d: no sg chunk ready\n", dma_ch);
262 return -EINVAL;
263 }
264
265 return 0;
266}
267
268/**
269 * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification handlers
270 * @dma_ch: i.MX DMA channel number
271 * @irq_handler: the pointer to the function called if the transfer
272 * ends successfully
273 * @err_handler: the pointer to the function called if the premature
274 * end caused by error occurs
275 * @data: user specified value to be passed to the handlers
276 */
277int
278imx_dma_setup_handlers(imx_dmach_t dma_ch,
279 void (*irq_handler) (int, void *, struct pt_regs *),
280 void (*err_handler) (int, void *, struct pt_regs *),
281 void *data)
282{
283 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
284 unsigned long flags;
285
286 if (!imxdma->name) {
287 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
288 __FUNCTION__, dma_ch);
289 return -ENODEV;
290 }
291
292 local_irq_save(flags);
293 DISR = (1 << dma_ch);
294 imxdma->irq_handler = irq_handler;
295 imxdma->err_handler = err_handler;
296 imxdma->data = data;
297 local_irq_restore(flags);
298 return 0;
299}
300
301/**
302 * imx_dma_enable - function to start i.MX DMA channel operation
303 * @dma_ch: i.MX DMA channel number
304 *
305 * The channel has to be allocated by driver through imx_dma_request()
306 * or imx_dma_request_by_prio() function.
307 * The transfer parameters has to be set to the channel registers through
308 * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
309 * and registers %BLR(dma_ch), %RSSR(dma_ch) and %CCR(dma_ch) has to
310 * be set prior this function call by the channel user.
311 */
312void imx_dma_enable(imx_dmach_t dma_ch)
313{
314 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
315 unsigned long flags;
316
317 pr_debug("imxdma%d: imx_dma_enable\n", dma_ch);
318
319 if (!imxdma->name) {
320 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
321 __FUNCTION__, dma_ch);
322 return;
323 }
324
325 local_irq_save(flags);
326 DISR = (1 << dma_ch);
327 DIMR &= ~(1 << dma_ch);
328 CCR(dma_ch) |= CCR_CEN;
329 local_irq_restore(flags);
330}
331
332/**
333 * imx_dma_disable - stop, finish i.MX DMA channel operatin
334 * @dma_ch: i.MX DMA channel number
335 */
336void imx_dma_disable(imx_dmach_t dma_ch)
337{
338 unsigned long flags;
339
340 pr_debug("imxdma%d: imx_dma_disable\n", dma_ch);
341
342 local_irq_save(flags);
343 DIMR |= (1 << dma_ch);
344 CCR(dma_ch) &= ~CCR_CEN;
345 DISR = (1 << dma_ch);
346 local_irq_restore(flags);
347}
348
349/**
350 * imx_dma_request - request/allocate specified channel number
351 * @dma_ch: i.MX DMA channel number
352 * @name: the driver/caller own non-%NULL identification
353 */
354int imx_dma_request(imx_dmach_t dma_ch, const char *name)
355{
356 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
357 unsigned long flags;
358
359 /* basic sanity checks */
360 if (!name)
361 return -EINVAL;
362
363 if (dma_ch >= IMX_DMA_CHANNELS) {
364 printk(KERN_CRIT "%s: called for non-existed channel %d\n",
365 __FUNCTION__, dma_ch);
366 return -EINVAL;
76 } 367 }
77 368
369 local_irq_save(flags);
370 if (imxdma->name) {
371 local_irq_restore(flags);
372 return -ENODEV;
373 }
374
375 imxdma->name = name;
376 imxdma->irq_handler = NULL;
377 imxdma->err_handler = NULL;
378 imxdma->data = NULL;
379 imxdma->sg = NULL;
78 local_irq_restore(flags); 380 local_irq_restore(flags);
79 return i; 381 return 0;
80} 382}
81 383
82void 384/**
83imx_free_dma(int dma_ch) 385 * imx_dma_free - release previously acquired channel
386 * @dma_ch: i.MX DMA channel number
387 */
388void imx_dma_free(imx_dmach_t dma_ch)
84{ 389{
85 unsigned long flags; 390 unsigned long flags;
391 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
86 392
87 if (!dma_channels[dma_ch].name) { 393 if (!imxdma->name) {
88 printk(KERN_CRIT 394 printk(KERN_CRIT
89 "%s: trying to free channel %d which is already freed\n", 395 "%s: trying to free channel %d which is already freed\n",
90 __FUNCTION__, dma_ch); 396 __FUNCTION__, dma_ch);
@@ -92,27 +398,84 @@ imx_free_dma(int dma_ch)
92 } 398 }
93 399
94 local_irq_save(flags); 400 local_irq_save(flags);
95 DIMR &= ~(1 << dma_ch); 401 /* Disable interrupts */
96 dma_channels[dma_ch].name = NULL; 402 DIMR |= (1 << dma_ch);
403 CCR(dma_ch) &= ~CCR_CEN;
404 imxdma->name = NULL;
97 local_irq_restore(flags); 405 local_irq_restore(flags);
98} 406}
99 407
100static irqreturn_t 408/**
101dma_err_handler(int irq, void *dev_id, struct pt_regs *regs) 409 * imx_dma_request_by_prio - find and request some of free channels best suiting requested priority
410 * @dma_ch: i.MX DMA channel number
411 * @name: the driver/caller own non-%NULL identification
412 * @prio: one of the hardware distinguished priority level:
413 * %DMA_PRIO_HIGH, %DMA_PRIO_MEDIUM, %DMA_PRIO_LOW
414 *
415 * This function tries to find free channel in the specified priority group
416 * if the priority cannot be achieved it tries to look for free channel
417 * in the higher and then even lower priority groups.
418 *
419 * Return value: If there is no free channel to allocate, -%ENODEV is returned.
420 * Zero value indicates successful channel allocation.
421 */
422int
423imx_dma_request_by_prio(imx_dmach_t * pdma_ch, const char *name,
424 imx_dma_prio prio)
425{
426 int i;
427 int best;
428
429 switch (prio) {
430 case (DMA_PRIO_HIGH):
431 best = 8;
432 break;
433 case (DMA_PRIO_MEDIUM):
434 best = 4;
435 break;
436 case (DMA_PRIO_LOW):
437 default:
438 best = 0;
439 break;
440 }
441
442 for (i = best; i < IMX_DMA_CHANNELS; i++) {
443 if (!imx_dma_request(i, name)) {
444 *pdma_ch = i;
445 return 0;
446 }
447 }
448
449 for (i = best - 1; i >= 0; i--) {
450 if (!imx_dma_request(i, name)) {
451 *pdma_ch = i;
452 return 0;
453 }
454 }
455
456 printk(KERN_ERR "%s: no free DMA channel found\n", __FUNCTION__);
457
458 return -ENODEV;
459}
460
461static irqreturn_t dma_err_handler(int irq, void *dev_id, struct pt_regs *regs)
102{ 462{
103 int i, disr = DISR; 463 int i, disr = DISR;
104 struct dma_channel *channel; 464 struct imx_dma_channel *channel;
105 unsigned int err_mask = DBTOSR | DRTOSR | DSESR | DBOSR; 465 unsigned int err_mask = DBTOSR | DRTOSR | DSESR | DBOSR;
106 466
107 DISR = disr; 467 DISR = disr;
108 for (i = 0; i < 11; i++) { 468 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
109 channel = &dma_channels[i]; 469 channel = &imx_dma_channels[i];
110 470
111 if ( (err_mask & 1<<i) && channel->name && channel->err_handler) { 471 if ((err_mask & 1 << i) && channel->name
472 && channel->err_handler) {
112 channel->err_handler(i, channel->data, regs); 473 channel->err_handler(i, channel->data, regs);
113 continue; 474 continue;
114 } 475 }
115 476
477 imx_dma_channels[i].sg = NULL;
478
116 if (DBTOSR & (1 << i)) { 479 if (DBTOSR & (1 << i)) {
117 printk(KERN_WARNING 480 printk(KERN_WARNING
118 "Burst timeout on channel %d (%s)\n", 481 "Burst timeout on channel %d (%s)\n",
@@ -141,17 +504,27 @@ dma_err_handler(int irq, void *dev_id, struct pt_regs *regs)
141 return IRQ_HANDLED; 504 return IRQ_HANDLED;
142} 505}
143 506
144static irqreturn_t 507static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
145dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
146{ 508{
147 int i, disr = DISR; 509 int i, disr = DISR;
148 510
511 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
512 disr);
513
149 DISR = disr; 514 DISR = disr;
150 for (i = 0; i < 11; i++) { 515 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
151 if (disr & (1 << i)) { 516 if (disr & (1 << i)) {
152 struct dma_channel *channel = &dma_channels[i]; 517 struct imx_dma_channel *channel = &imx_dma_channels[i];
153 if (channel->name && channel->irq_handler) { 518 if (channel->name) {
154 channel->irq_handler(i, channel->data, regs); 519 if (imx_dma_sg_next(i, CNTR(i))) {
520 CCR(i) &= ~CCR_CEN;
521 mb();
522 CCR(i) |= CCR_CEN;
523 } else {
524 if (channel->irq_handler)
525 channel->irq_handler(i,
526 channel->data, regs);
527 }
155 } else { 528 } else {
156 /* 529 /*
157 * IRQ for an unregistered DMA channel: 530 * IRQ for an unregistered DMA channel:
@@ -165,10 +538,10 @@ dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
165 return IRQ_HANDLED; 538 return IRQ_HANDLED;
166} 539}
167 540
168static int __init 541static int __init imx_dma_init(void)
169imx_dma_init(void)
170{ 542{
171 int ret; 543 int ret;
544 int i;
172 545
173 /* reset DMA module */ 546 /* reset DMA module */
174 DCR = DCR_DRST; 547 DCR = DCR_DRST;
@@ -189,15 +562,27 @@ imx_dma_init(void)
189 DCR = DCR_DEN; 562 DCR = DCR_DEN;
190 563
191 /* clear all interrupts */ 564 /* clear all interrupts */
192 DISR = 0x3ff; 565 DISR = (1 << IMX_DMA_CHANNELS) - 1;
193 566
194 /* enable interrupts */ 567 /* enable interrupts */
195 DIMR = 0; 568 DIMR = (1 << IMX_DMA_CHANNELS) - 1;
569
570 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
571 imx_dma_channels[i].sg = NULL;
572 imx_dma_channels[i].dma_num = i;
573 }
196 574
197 return ret; 575 return ret;
198} 576}
199 577
200arch_initcall(imx_dma_init); 578arch_initcall(imx_dma_init);
201 579
202EXPORT_SYMBOL(imx_request_dma); 580EXPORT_SYMBOL(imx_dma_setup_single);
203EXPORT_SYMBOL(imx_free_dma); 581EXPORT_SYMBOL(imx_dma_setup_sg);
582EXPORT_SYMBOL(imx_dma_setup_handlers);
583EXPORT_SYMBOL(imx_dma_enable);
584EXPORT_SYMBOL(imx_dma_disable);
585EXPORT_SYMBOL(imx_dma_request);
586EXPORT_SYMBOL(imx_dma_free);
587EXPORT_SYMBOL(imx_dma_request_by_prio);
588EXPORT_SYMBOL(imx_dma_channels);
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c
index 37613ad68366..9d8331be2b58 100644
--- a/arch/arm/mach-imx/generic.c
+++ b/arch/arm/mach-imx/generic.c
@@ -33,6 +33,7 @@
33#include <asm/arch/imx-regs.h> 33#include <asm/arch/imx-regs.h>
34 34
35#include <asm/mach/map.h> 35#include <asm/mach/map.h>
36#include <asm/arch/mmc.h>
36 37
37void imx_gpio_mode(int gpio_mode) 38void imx_gpio_mode(int gpio_mode)
38{ 39{
@@ -175,13 +176,25 @@ static struct resource imx_mmc_resources[] = {
175 }, 176 },
176}; 177};
177 178
179static u64 imxmmmc_dmamask = 0xffffffffUL;
180
178static struct platform_device imx_mmc_device = { 181static struct platform_device imx_mmc_device = {
179 .name = "imx-mmc", 182 .name = "imx-mmc",
180 .id = 0, 183 .id = 0,
184 .dev = {
185 .dma_mask = &imxmmmc_dmamask,
186 .coherent_dma_mask = 0xffffffff,
187 },
181 .num_resources = ARRAY_SIZE(imx_mmc_resources), 188 .num_resources = ARRAY_SIZE(imx_mmc_resources),
182 .resource = imx_mmc_resources, 189 .resource = imx_mmc_resources,
183}; 190};
184 191
192void __init imx_set_mmc_info(struct imxmmc_platform_data *info)
193{
194 imx_mmc_device.dev.platform_data = info;
195}
196EXPORT_SYMBOL(imx_set_mmc_info);
197
185static struct resource imx_uart1_resources[] = { 198static struct resource imx_uart1_resources[] = {
186 [0] = { 199 [0] = {
187 .start = 0x00206000, 200 .start = 0x00206000,
diff --git a/arch/arm/mach-imx/mx1ads.c b/arch/arm/mach-imx/mx1ads.c
index 8ab1b040288c..e34d0df90aed 100644
--- a/arch/arm/mach-imx/mx1ads.c
+++ b/arch/arm/mach-imx/mx1ads.c
@@ -25,6 +25,7 @@
25#include <asm/mach-types.h> 25#include <asm/mach-types.h>
26 26
27#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
28#include <asm/arch/mmc.h>
28#include <linux/interrupt.h> 29#include <linux/interrupt.h>
29#include "generic.h" 30#include "generic.h"
30 31
@@ -51,12 +52,29 @@ static struct platform_device *devices[] __initdata = {
51 &cs89x0_device, 52 &cs89x0_device,
52}; 53};
53 54
55#ifdef CONFIG_MMC_IMX
56static int mx1ads_mmc_card_present(void)
57{
58 /* MMC/SD Card Detect is PB 20 on MX1ADS V1.0.7 */
59 return (SSR(1) & (1 << 20) ? 0 : 1);
60}
61
62static struct imxmmc_platform_data mx1ads_mmc_info = {
63 .card_present = mx1ads_mmc_card_present,
64};
65#endif
66
54static void __init 67static void __init
55mx1ads_init(void) 68mx1ads_init(void)
56{ 69{
57#ifdef CONFIG_LEDS 70#ifdef CONFIG_LEDS
58 imx_gpio_mode(GPIO_PORTA | GPIO_OUT | 2); 71 imx_gpio_mode(GPIO_PORTA | GPIO_OUT | 2);
59#endif 72#endif
73#ifdef CONFIG_MMC_IMX
74 /* SD/MMC card detect */
75 imx_gpio_mode(GPIO_PORTB | GPIO_GIUS | GPIO_IN | 20);
76 imx_set_mmc_info(&mx1ads_mmc_info);
77#endif
60 platform_add_devices(devices, ARRAY_SIZE(devices)); 78 platform_add_devices(devices, ARRAY_SIZE(devices));
61} 79}
62 80
diff --git a/arch/arm/mach-ixp23xx/espresso.c b/arch/arm/mach-ixp23xx/espresso.c
index 2327c9790416..bf688c128630 100644
--- a/arch/arm/mach-ixp23xx/espresso.c
+++ b/arch/arm/mach-ixp23xx/espresso.c
@@ -44,6 +44,15 @@
44#include <asm/mach/irq.h> 44#include <asm/mach/irq.h>
45#include <asm/mach/pci.h> 45#include <asm/mach/pci.h>
46 46
47static int __init espresso_pci_init(void)
48{
49 if (machine_is_espresso())
50 ixp23xx_pci_slave_init();
51
52 return 0;
53};
54subsys_initcall(espresso_pci_init);
55
47static void __init espresso_init(void) 56static void __init espresso_init(void)
48{ 57{
49 physmap_configure(0x90000000, 0x02000000, 2, NULL); 58 physmap_configure(0x90000000, 0x02000000, 2, NULL);
diff --git a/arch/arm/mach-ixp23xx/pci.c b/arch/arm/mach-ixp23xx/pci.c
index 5330ad78c1bb..ac72f94c5b4d 100644
--- a/arch/arm/mach-ixp23xx/pci.c
+++ b/arch/arm/mach-ixp23xx/pci.c
@@ -201,7 +201,7 @@ int clear_master_aborts(void)
201 return 0; 201 return 0;
202} 202}
203 203
204void __init ixp23xx_pci_preinit(void) 204static void __init ixp23xx_pci_common_init(void)
205{ 205{
206#ifdef __ARMEB__ 206#ifdef __ARMEB__
207 *IXP23XX_PCI_CONTROL |= 0x20000; /* set I/O swapping */ 207 *IXP23XX_PCI_CONTROL |= 0x20000; /* set I/O swapping */
@@ -219,7 +219,18 @@ void __init ixp23xx_pci_preinit(void)
219 *IXP23XX_PCI_CPP_ADDR_BITS &= ~(1 << 1); 219 *IXP23XX_PCI_CPP_ADDR_BITS &= ~(1 << 1);
220 } else { 220 } else {
221 *IXP23XX_PCI_CPP_ADDR_BITS |= (1 << 1); 221 *IXP23XX_PCI_CPP_ADDR_BITS |= (1 << 1);
222
223 /*
224 * Enable coherency on A2 silicon.
225 */
226 if (arch_is_coherent())
227 *IXP23XX_CPP2XSI_CURR_XFER_REG3 &= ~IXP23XX_CPP2XSI_COH_OFF;
222 } 228 }
229}
230
231void __init ixp23xx_pci_preinit(void)
232{
233 ixp23xx_pci_common_init();
223 234
224 hook_fault_code(16+6, ixp23xx_pci_abort_handler, SIGBUS, 235 hook_fault_code(16+6, ixp23xx_pci_abort_handler, SIGBUS,
225 "PCI config cycle to non-existent device"); 236 "PCI config cycle to non-existent device");
@@ -273,3 +284,8 @@ int ixp23xx_pci_setup(int nr, struct pci_sys_data *sys)
273 284
274 return 1; 285 return 1;
275} 286}
287
288void ixp23xx_pci_slave_init(void)
289{
290 ixp23xx_pci_common_init();
291}
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 86a0f0d14345..f8d716ccc1df 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -69,12 +69,6 @@ config MACH_VOICEBLUE
69 Support for Voiceblue GSM/VoIP gateway. Say Y here if you have 69 Support for Voiceblue GSM/VoIP gateway. Say Y here if you have
70 such a board. 70 such a board.
71 71
72config MACH_NETSTAR
73 bool "NetStar"
74 depends on ARCH_OMAP1 && ARCH_OMAP15XX
75 help
76 Support for NetStar PBX. Say Y here if you have such a board.
77
78config MACH_OMAP_PALMTE 72config MACH_OMAP_PALMTE
79 bool "Palm Tungsten E" 73 bool "Palm Tungsten E"
80 depends on ARCH_OMAP1 && ARCH_OMAP15XX 74 depends on ARCH_OMAP1 && ARCH_OMAP15XX
@@ -85,6 +79,20 @@ config MACH_OMAP_PALMTE
85 informations. 79 informations.
86 Say Y here if you have such a PDA, say NO otherwise. 80 Say Y here if you have such a PDA, say NO otherwise.
87 81
82config MACH_NOKIA770
83 bool "Nokia 770"
84 depends on ARCH_OMAP1 && ARCH_OMAP16XX
85 help
86 Support for the Nokia 770 Internet Tablet. Say Y here if you
87 have such a device.
88
89config MACH_AMS_DELTA
90 bool "Amstrad E3 (Delta)"
91 depends on ARCH_OMAP1 && ARCH_OMAP15XX
92 help
93 Support for the Amstrad E3 (codename Delta) videophone. Say Y here
94 if you have such a device.
95
88config MACH_OMAP_GENERIC 96config MACH_OMAP_GENERIC
89 bool "Generic OMAP board" 97 bool "Generic OMAP board"
90 depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX) 98 depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index b0b00156faae..9ea719550ad3 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,7 +3,13 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := io.o id.o clock.o irq.o time.o mux.o serial.o devices.o 6obj-y := io.o id.o clock.o irq.o mux.o serial.o devices.o
7
8obj-$(CONFIG_OMAP_MPU_TIMER) += time.o
9
10# Power Management
11obj-$(CONFIG_PM) += pm.o sleep.o
12
7led-y := leds.o 13led-y := leds.o
8 14
9# Specific board support 15# Specific board support
@@ -14,8 +20,9 @@ obj-$(CONFIG_MACH_OMAP_PERSEUS2) += board-perseus2.o
14obj-$(CONFIG_MACH_OMAP_OSK) += board-osk.o 20obj-$(CONFIG_MACH_OMAP_OSK) += board-osk.o
15obj-$(CONFIG_MACH_OMAP_H3) += board-h3.o 21obj-$(CONFIG_MACH_OMAP_H3) += board-h3.o
16obj-$(CONFIG_MACH_VOICEBLUE) += board-voiceblue.o 22obj-$(CONFIG_MACH_VOICEBLUE) += board-voiceblue.o
17obj-$(CONFIG_MACH_NETSTAR) += board-netstar.o
18obj-$(CONFIG_MACH_OMAP_PALMTE) += board-palmte.o 23obj-$(CONFIG_MACH_OMAP_PALMTE) += board-palmte.o
24obj-$(CONFIG_MACH_NOKIA770) += board-nokia770.o
25obj-$(CONFIG_MACH_AMS_DELTA) += board-ams-delta.o
19 26
20ifeq ($(CONFIG_ARCH_OMAP15XX),y) 27ifeq ($(CONFIG_ARCH_OMAP15XX),y)
21# Innovator-1510 FPGA 28# Innovator-1510 FPGA
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
new file mode 100644
index 000000000000..6178f046f128
--- /dev/null
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -0,0 +1,116 @@
1/*
2 * linux/arch/arm/mach-omap1/board-ams-delta.c
3 *
4 * Modified from board-generic.c
5 *
6 * Board specific inits for the Amstrad E3 (codename Delta) videophone
7 *
8 * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18
19#include <asm/hardware.h>
20#include <asm/mach-types.h>
21#include <asm/mach/arch.h>
22#include <asm/mach/map.h>
23
24#include <asm/arch/board-ams-delta.h>
25#include <asm/arch/gpio.h>
26#include <asm/arch/mux.h>
27#include <asm/arch/usb.h>
28#include <asm/arch/board.h>
29#include <asm/arch/common.h>
30
31static u8 ams_delta_latch1_reg;
32static u16 ams_delta_latch2_reg;
33
34void ams_delta_latch1_write(u8 mask, u8 value)
35{
36 ams_delta_latch1_reg &= ~mask;
37 ams_delta_latch1_reg |= value;
38 *(volatile __u8 *) AMS_DELTA_LATCH1_VIRT = ams_delta_latch1_reg;
39}
40
41void ams_delta_latch2_write(u16 mask, u16 value)
42{
43 ams_delta_latch2_reg &= ~mask;
44 ams_delta_latch2_reg |= value;
45 *(volatile __u16 *) AMS_DELTA_LATCH2_VIRT = ams_delta_latch2_reg;
46}
47
48static void __init ams_delta_init_irq(void)
49{
50 omap1_init_common_hw();
51 omap_init_irq();
52 omap_gpio_init();
53}
54
55static struct map_desc ams_delta_io_desc[] __initdata = {
56 // AMS_DELTA_LATCH1
57 {
58 .virtual = AMS_DELTA_LATCH1_VIRT,
59 .pfn = __phys_to_pfn(AMS_DELTA_LATCH1_PHYS),
60 .length = 0x01000000,
61 .type = MT_DEVICE
62 },
63 // AMS_DELTA_LATCH2
64 {
65 .virtual = AMS_DELTA_LATCH2_VIRT,
66 .pfn = __phys_to_pfn(AMS_DELTA_LATCH2_PHYS),
67 .length = 0x01000000,
68 .type = MT_DEVICE
69 },
70 // AMS_DELTA_MODEM
71 {
72 .virtual = AMS_DELTA_MODEM_VIRT,
73 .pfn = __phys_to_pfn(AMS_DELTA_MODEM_PHYS),
74 .length = 0x01000000,
75 .type = MT_DEVICE
76 }
77};
78
79static struct omap_uart_config ams_delta_uart_config __initdata = {
80 .enabled_uarts = 1,
81};
82
83static struct omap_board_config_kernel ams_delta_config[] = {
84 { OMAP_TAG_UART, &ams_delta_uart_config },
85};
86
87static void __init ams_delta_init(void)
88{
89 iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
90
91 omap_board_config = ams_delta_config;
92 omap_board_config_size = ARRAY_SIZE(ams_delta_config);
93 omap_serial_init();
94
95 /* Clear latch2 (NAND, LCD, modem enable) */
96 ams_delta_latch2_write(~0, 0);
97}
98
99static void __init ams_delta_map_io(void)
100{
101 omap1_map_common_io();
102}
103
104MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
105 /* Maintainer: Jonathan McDowell <noodles@earth.li> */
106 .phys_io = 0xfff00000,
107 .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc,
108 .boot_params = 0x10000100,
109 .map_io = ams_delta_map_io,
110 .init_irq = ams_delta_init_irq,
111 .init_machine = ams_delta_init,
112 .timer = &omap_timer,
113MACHINE_END
114
115EXPORT_SYMBOL(ams_delta_latch1_write);
116EXPORT_SYMBOL(ams_delta_latch2_write);
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index a177e78b2b87..33d01adab1ed 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -88,7 +88,7 @@ static struct omap_board_config_kernel generic_config[] = {
88static void __init omap_generic_init(void) 88static void __init omap_generic_init(void)
89{ 89{
90#ifdef CONFIG_ARCH_OMAP15XX 90#ifdef CONFIG_ARCH_OMAP15XX
91 if (cpu_is_omap1510()) { 91 if (cpu_is_omap15xx()) {
92 generic_config[0].data = &generic1510_usb_config; 92 generic_config[0].data = &generic1510_usb_config;
93 } 93 }
94#endif 94#endif
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 89f0cc74a519..cd3a06dfc0a8 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -24,7 +24,9 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
27#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
29#include <linux/input.h>
28 30
29#include <asm/hardware.h> 31#include <asm/hardware.h>
30#include <asm/mach-types.h> 32#include <asm/mach-types.h>
@@ -35,12 +37,55 @@
35#include <asm/arch/gpio.h> 37#include <asm/arch/gpio.h>
36#include <asm/arch/mux.h> 38#include <asm/arch/mux.h>
37#include <asm/arch/tc.h> 39#include <asm/arch/tc.h>
40#include <asm/arch/irda.h>
38#include <asm/arch/usb.h> 41#include <asm/arch/usb.h>
42#include <asm/arch/keypad.h>
39#include <asm/arch/common.h> 43#include <asm/arch/common.h>
44#include <asm/arch/mcbsp.h>
45#include <asm/arch/omap-alsa.h>
40 46
41extern int omap_gpio_init(void); 47extern int omap_gpio_init(void);
42 48
43static struct mtd_partition h2_partitions[] = { 49static int h2_keymap[] = {
50 KEY(0, 0, KEY_LEFT),
51 KEY(0, 1, KEY_RIGHT),
52 KEY(0, 2, KEY_3),
53 KEY(0, 3, KEY_F10),
54 KEY(0, 4, KEY_F5),
55 KEY(0, 5, KEY_9),
56 KEY(1, 0, KEY_DOWN),
57 KEY(1, 1, KEY_UP),
58 KEY(1, 2, KEY_2),
59 KEY(1, 3, KEY_F9),
60 KEY(1, 4, KEY_F7),
61 KEY(1, 5, KEY_0),
62 KEY(2, 0, KEY_ENTER),
63 KEY(2, 1, KEY_6),
64 KEY(2, 2, KEY_1),
65 KEY(2, 3, KEY_F2),
66 KEY(2, 4, KEY_F6),
67 KEY(2, 5, KEY_HOME),
68 KEY(3, 0, KEY_8),
69 KEY(3, 1, KEY_5),
70 KEY(3, 2, KEY_F12),
71 KEY(3, 3, KEY_F3),
72 KEY(3, 4, KEY_F8),
73 KEY(3, 5, KEY_END),
74 KEY(4, 0, KEY_7),
75 KEY(4, 1, KEY_4),
76 KEY(4, 2, KEY_F11),
77 KEY(4, 3, KEY_F1),
78 KEY(4, 4, KEY_F4),
79 KEY(4, 5, KEY_ESC),
80 KEY(5, 0, KEY_F13),
81 KEY(5, 1, KEY_F14),
82 KEY(5, 2, KEY_F15),
83 KEY(5, 3, KEY_F16),
84 KEY(5, 4, KEY_SLEEP),
85 0
86};
87
88static struct mtd_partition h2_nor_partitions[] = {
44 /* bootloader (U-Boot, etc) in first sector */ 89 /* bootloader (U-Boot, etc) in first sector */
45 { 90 {
46 .name = "bootloader", 91 .name = "bootloader",
@@ -71,26 +116,26 @@ static struct mtd_partition h2_partitions[] = {
71 } 116 }
72}; 117};
73 118
74static struct flash_platform_data h2_flash_data = { 119static struct flash_platform_data h2_nor_data = {
75 .map_name = "cfi_probe", 120 .map_name = "cfi_probe",
76 .width = 2, 121 .width = 2,
77 .parts = h2_partitions, 122 .parts = h2_nor_partitions,
78 .nr_parts = ARRAY_SIZE(h2_partitions), 123 .nr_parts = ARRAY_SIZE(h2_nor_partitions),
79}; 124};
80 125
81static struct resource h2_flash_resource = { 126static struct resource h2_nor_resource = {
82 /* This is on CS3, wherever it's mapped */ 127 /* This is on CS3, wherever it's mapped */
83 .flags = IORESOURCE_MEM, 128 .flags = IORESOURCE_MEM,
84}; 129};
85 130
86static struct platform_device h2_flash_device = { 131static struct platform_device h2_nor_device = {
87 .name = "omapflash", 132 .name = "omapflash",
88 .id = 0, 133 .id = 0,
89 .dev = { 134 .dev = {
90 .platform_data = &h2_flash_data, 135 .platform_data = &h2_nor_data,
91 }, 136 },
92 .num_resources = 1, 137 .num_resources = 1,
93 .resource = &h2_flash_resource, 138 .resource = &h2_nor_resource,
94}; 139};
95 140
96static struct resource h2_smc91x_resources[] = { 141static struct resource h2_smc91x_resources[] = {
@@ -113,9 +158,119 @@ static struct platform_device h2_smc91x_device = {
113 .resource = h2_smc91x_resources, 158 .resource = h2_smc91x_resources,
114}; 159};
115 160
161static struct resource h2_kp_resources[] = {
162 [0] = {
163 .start = INT_KEYBOARD,
164 .end = INT_KEYBOARD,
165 .flags = IORESOURCE_IRQ,
166 },
167};
168
169static struct omap_kp_platform_data h2_kp_data = {
170 .rows = 8,
171 .cols = 8,
172 .keymap = h2_keymap,
173 .rep = 1,
174};
175
176static struct platform_device h2_kp_device = {
177 .name = "omap-keypad",
178 .id = -1,
179 .dev = {
180 .platform_data = &h2_kp_data,
181 },
182 .num_resources = ARRAY_SIZE(h2_kp_resources),
183 .resource = h2_kp_resources,
184};
185
186#define H2_IRDA_FIRSEL_GPIO_PIN 17
187
188#if defined(CONFIG_OMAP_IR) || defined(CONFIG_OMAP_IR_MODULE)
189static int h2_transceiver_mode(struct device *dev, int state)
190{
191 if (state & IR_SIRMODE)
192 omap_set_gpio_dataout(H2_IRDA_FIRSEL_GPIO_PIN, 0);
193 else /* MIR/FIR */
194 omap_set_gpio_dataout(H2_IRDA_FIRSEL_GPIO_PIN, 1);
195
196 return 0;
197}
198#endif
199
200static struct omap_irda_config h2_irda_data = {
201 .transceiver_cap = IR_SIRMODE | IR_MIRMODE | IR_FIRMODE,
202 .rx_channel = OMAP_DMA_UART3_RX,
203 .tx_channel = OMAP_DMA_UART3_TX,
204 .dest_start = UART3_THR,
205 .src_start = UART3_RHR,
206 .tx_trigger = 0,
207 .rx_trigger = 0,
208};
209
210static struct resource h2_irda_resources[] = {
211 [0] = {
212 .start = INT_UART3,
213 .end = INT_UART3,
214 .flags = IORESOURCE_IRQ,
215 },
216};
217static struct platform_device h2_irda_device = {
218 .name = "omapirda",
219 .id = 0,
220 .dev = {
221 .platform_data = &h2_irda_data,
222 },
223 .num_resources = ARRAY_SIZE(h2_irda_resources),
224 .resource = h2_irda_resources,
225};
226
227static struct platform_device h2_lcd_device = {
228 .name = "lcd_h2",
229 .id = -1,
230};
231
232static struct omap_mcbsp_reg_cfg mcbsp_regs = {
233 .spcr2 = FREE | FRST | GRST | XRST | XINTM(3),
234 .spcr1 = RINTM(3) | RRST,
235 .rcr2 = RPHASE | RFRLEN2(OMAP_MCBSP_WORD_8) |
236 RWDLEN2(OMAP_MCBSP_WORD_16) | RDATDLY(1),
237 .rcr1 = RFRLEN1(OMAP_MCBSP_WORD_8) | RWDLEN1(OMAP_MCBSP_WORD_16),
238 .xcr2 = XPHASE | XFRLEN2(OMAP_MCBSP_WORD_8) |
239 XWDLEN2(OMAP_MCBSP_WORD_16) | XDATDLY(1) | XFIG,
240 .xcr1 = XFRLEN1(OMAP_MCBSP_WORD_8) | XWDLEN1(OMAP_MCBSP_WORD_16),
241 .srgr1 = FWID(15),
242 .srgr2 = GSYNC | CLKSP | FSGM | FPER(31),
243
244 .pcr0 = CLKXM | CLKRM | FSXP | FSRP | CLKXP | CLKRP,
245 //.pcr0 = CLKXP | CLKRP, /* mcbsp: slave */
246};
247
248static struct omap_alsa_codec_config alsa_config = {
249 .name = "H2 TSC2101",
250 .mcbsp_regs_alsa = &mcbsp_regs,
251 .codec_configure_dev = NULL, // tsc2101_configure,
252 .codec_set_samplerate = NULL, // tsc2101_set_samplerate,
253 .codec_clock_setup = NULL, // tsc2101_clock_setup,
254 .codec_clock_on = NULL, // tsc2101_clock_on,
255 .codec_clock_off = NULL, // tsc2101_clock_off,
256 .get_default_samplerate = NULL, // tsc2101_get_default_samplerate,
257};
258
259static struct platform_device h2_mcbsp1_device = {
260 .name = "omap_alsa_mcbsp",
261 .id = 1,
262 .dev = {
263 .platform_data = &alsa_config,
264 },
265};
266
116static struct platform_device *h2_devices[] __initdata = { 267static struct platform_device *h2_devices[] __initdata = {
117 &h2_flash_device, 268 &h2_nor_device,
118 &h2_smc91x_device, 269 &h2_smc91x_device,
270 &h2_irda_device,
271 &h2_kp_device,
272 &h2_lcd_device,
273 &h2_mcbsp1_device,
119}; 274};
120 275
121static void __init h2_init_smc91x(void) 276static void __init h2_init_smc91x(void)
@@ -164,7 +319,6 @@ static struct omap_uart_config h2_uart_config __initdata = {
164}; 319};
165 320
166static struct omap_lcd_config h2_lcd_config __initdata = { 321static struct omap_lcd_config h2_lcd_config __initdata = {
167 .panel_name = "h2",
168 .ctrl_name = "internal", 322 .ctrl_name = "internal",
169}; 323};
170 324
@@ -177,16 +331,34 @@ static struct omap_board_config_kernel h2_config[] = {
177 331
178static void __init h2_init(void) 332static void __init h2_init(void)
179{ 333{
180 /* NOTE: revC boards support NAND-boot, which can put NOR on CS2B 334 /* Here we assume the NOR boot config: NOR on CS3 (possibly swapped
181 * and NAND (either 16bit or 8bit) on CS3. 335 * to address 0 by a dip switch), NAND on CS2B. The NAND driver will
336 * notice whether a NAND chip is enabled at probe time.
337 *
338 * FIXME revC boards (and H3) support NAND-boot, with a dip switch to
339 * put NOR on CS2B and NAND (which on H2 may be 16bit) on CS3. Try
340 * detecting that in code here, to avoid probing every possible flash
341 * configuration...
182 */ 342 */
183 h2_flash_resource.end = h2_flash_resource.start = omap_cs3_phys(); 343 h2_nor_resource.end = h2_nor_resource.start = omap_cs3_phys();
184 h2_flash_resource.end += SZ_32M - 1; 344 h2_nor_resource.end += SZ_32M - 1;
345
346 omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
347 omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
185 348
186 /* MMC: card detect and WP */ 349 /* MMC: card detect and WP */
187 // omap_cfg_reg(U19_ARMIO1); /* CD */ 350 // omap_cfg_reg(U19_ARMIO1); /* CD */
188 omap_cfg_reg(BALLOUT_V8_ARMIO3); /* WP */ 351 omap_cfg_reg(BALLOUT_V8_ARMIO3); /* WP */
189 352
353 /* Irda */
354#if defined(CONFIG_OMAP_IR) || defined(CONFIG_OMAP_IR_MODULE)
355 omap_writel(omap_readl(FUNC_MUX_CTRL_A) | 7, FUNC_MUX_CTRL_A);
356 if (!(omap_request_gpio(H2_IRDA_FIRSEL_GPIO_PIN))) {
357 omap_set_gpio_direction(H2_IRDA_FIRSEL_GPIO_PIN, 0);
358 h2_irda_data.transceiver_mode = h2_transceiver_mode;
359 }
360#endif
361
190 platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices)); 362 platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices));
191 omap_board_config = h2_config; 363 omap_board_config = h2_config;
192 omap_board_config_size = ARRAY_SIZE(h2_config); 364 omap_board_config_size = ARRAY_SIZE(h2_config);
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index d9f386265996..4b8d0ec73cb7 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -21,8 +21,11 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/workqueue.h>
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
26#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
28#include <linux/input.h>
26 29
27#include <asm/setup.h> 30#include <asm/setup.h>
28#include <asm/page.h> 31#include <asm/page.h>
@@ -33,15 +36,59 @@
33#include <asm/mach/map.h> 36#include <asm/mach/map.h>
34 37
35#include <asm/arch/gpio.h> 38#include <asm/arch/gpio.h>
39#include <asm/arch/gpioexpander.h>
36#include <asm/arch/irqs.h> 40#include <asm/arch/irqs.h>
37#include <asm/arch/mux.h> 41#include <asm/arch/mux.h>
38#include <asm/arch/tc.h> 42#include <asm/arch/tc.h>
43#include <asm/arch/irda.h>
39#include <asm/arch/usb.h> 44#include <asm/arch/usb.h>
45#include <asm/arch/keypad.h>
46#include <asm/arch/dma.h>
40#include <asm/arch/common.h> 47#include <asm/arch/common.h>
41 48
42extern int omap_gpio_init(void); 49extern int omap_gpio_init(void);
43 50
44static struct mtd_partition h3_partitions[] = { 51static int h3_keymap[] = {
52 KEY(0, 0, KEY_LEFT),
53 KEY(0, 1, KEY_RIGHT),
54 KEY(0, 2, KEY_3),
55 KEY(0, 3, KEY_F10),
56 KEY(0, 4, KEY_F5),
57 KEY(0, 5, KEY_9),
58 KEY(1, 0, KEY_DOWN),
59 KEY(1, 1, KEY_UP),
60 KEY(1, 2, KEY_2),
61 KEY(1, 3, KEY_F9),
62 KEY(1, 4, KEY_F7),
63 KEY(1, 5, KEY_0),
64 KEY(2, 0, KEY_ENTER),
65 KEY(2, 1, KEY_6),
66 KEY(2, 2, KEY_1),
67 KEY(2, 3, KEY_F2),
68 KEY(2, 4, KEY_F6),
69 KEY(2, 5, KEY_HOME),
70 KEY(3, 0, KEY_8),
71 KEY(3, 1, KEY_5),
72 KEY(3, 2, KEY_F12),
73 KEY(3, 3, KEY_F3),
74 KEY(3, 4, KEY_F8),
75 KEY(3, 5, KEY_END),
76 KEY(4, 0, KEY_7),
77 KEY(4, 1, KEY_4),
78 KEY(4, 2, KEY_F11),
79 KEY(4, 3, KEY_F1),
80 KEY(4, 4, KEY_F4),
81 KEY(4, 5, KEY_ESC),
82 KEY(5, 0, KEY_F13),
83 KEY(5, 1, KEY_F14),
84 KEY(5, 2, KEY_F15),
85 KEY(5, 3, KEY_F16),
86 KEY(5, 4, KEY_SLEEP),
87 0
88};
89
90
91static struct mtd_partition nor_partitions[] = {
45 /* bootloader (U-Boot, etc) in first sector */ 92 /* bootloader (U-Boot, etc) in first sector */
46 { 93 {
47 .name = "bootloader", 94 .name = "bootloader",
@@ -72,26 +119,80 @@ static struct mtd_partition h3_partitions[] = {
72 } 119 }
73}; 120};
74 121
75static struct flash_platform_data h3_flash_data = { 122static struct flash_platform_data nor_data = {
76 .map_name = "cfi_probe", 123 .map_name = "cfi_probe",
77 .width = 2, 124 .width = 2,
78 .parts = h3_partitions, 125 .parts = nor_partitions,
79 .nr_parts = ARRAY_SIZE(h3_partitions), 126 .nr_parts = ARRAY_SIZE(nor_partitions),
80}; 127};
81 128
82static struct resource h3_flash_resource = { 129static struct resource nor_resource = {
83 /* This is on CS3, wherever it's mapped */ 130 /* This is on CS3, wherever it's mapped */
84 .flags = IORESOURCE_MEM, 131 .flags = IORESOURCE_MEM,
85}; 132};
86 133
87static struct platform_device flash_device = { 134static struct platform_device nor_device = {
88 .name = "omapflash", 135 .name = "omapflash",
89 .id = 0, 136 .id = 0,
90 .dev = { 137 .dev = {
91 .platform_data = &h3_flash_data, 138 .platform_data = &nor_data,
139 },
140 .num_resources = 1,
141 .resource = &nor_resource,
142};
143
144static struct mtd_partition nand_partitions[] = {
145#if 0
146 /* REVISIT: enable these partitions if you make NAND BOOT work */
147 {
148 .name = "xloader",
149 .offset = 0,
150 .size = 64 * 1024,
151 .mask_flags = MTD_WRITEABLE, /* force read-only */
152 },
153 {
154 .name = "bootloader",
155 .offset = MTDPART_OFS_APPEND,
156 .size = 256 * 1024,
157 .mask_flags = MTD_WRITEABLE, /* force read-only */
158 },
159 {
160 .name = "params",
161 .offset = MTDPART_OFS_APPEND,
162 .size = 192 * 1024,
163 },
164 {
165 .name = "kernel",
166 .offset = MTDPART_OFS_APPEND,
167 .size = 2 * SZ_1M,
168 },
169#endif
170 {
171 .name = "filesystem",
172 .size = MTDPART_SIZ_FULL,
173 .offset = MTDPART_OFS_APPEND,
174 },
175};
176
177/* dip switches control NAND chip access: 8 bit, 16 bit, or neither */
178static struct nand_platform_data nand_data = {
179 .options = NAND_SAMSUNG_LP_OPTIONS,
180 .parts = nand_partitions,
181 .nr_parts = ARRAY_SIZE(nand_partitions),
182};
183
184static struct resource nand_resource = {
185 .flags = IORESOURCE_MEM,
186};
187
188static struct platform_device nand_device = {
189 .name = "omapnand",
190 .id = 0,
191 .dev = {
192 .platform_data = &nand_data,
92 }, 193 },
93 .num_resources = 1, 194 .num_resources = 1,
94 .resource = &h3_flash_resource, 195 .resource = &nand_resource,
95}; 196};
96 197
97static struct resource smc91x_resources[] = { 198static struct resource smc91x_resources[] = {
@@ -138,10 +239,136 @@ static struct platform_device intlat_device = {
138 .resource = intlat_resources, 239 .resource = intlat_resources,
139}; 240};
140 241
242static struct resource h3_kp_resources[] = {
243 [0] = {
244 .start = INT_KEYBOARD,
245 .end = INT_KEYBOARD,
246 .flags = IORESOURCE_IRQ,
247 },
248};
249
250static struct omap_kp_platform_data h3_kp_data = {
251 .rows = 8,
252 .cols = 8,
253 .keymap = h3_keymap,
254 .rep = 1,
255};
256
257static struct platform_device h3_kp_device = {
258 .name = "omap-keypad",
259 .id = -1,
260 .dev = {
261 .platform_data = &h3_kp_data,
262 },
263 .num_resources = ARRAY_SIZE(h3_kp_resources),
264 .resource = h3_kp_resources,
265};
266
267
268/* Select between the IrDA and aGPS module
269 */
270static int h3_select_irda(struct device *dev, int state)
271{
272 unsigned char expa;
273 int err = 0;
274
275 if ((err = read_gpio_expa(&expa, 0x26))) {
276 printk(KERN_ERR "Error reading from I/O EXPANDER \n");
277 return err;
278 }
279
280 /* 'P6' enable/disable IRDA_TX and IRDA_RX */
281 if (state & IR_SEL) { /* IrDA */
282 if ((err = write_gpio_expa(expa | 0x40, 0x26))) {
283 printk(KERN_ERR "Error writing to I/O EXPANDER \n");
284 return err;
285 }
286 } else {
287 if ((err = write_gpio_expa(expa & ~0x40, 0x26))) {
288 printk(KERN_ERR "Error writing to I/O EXPANDER \n");
289 return err;
290 }
291 }
292 return err;
293}
294
295static void set_trans_mode(void *data)
296{
297 int *mode = data;
298 unsigned char expa;
299 int err = 0;
300
301 if ((err = read_gpio_expa(&expa, 0x27)) != 0) {
302 printk(KERN_ERR "Error reading from I/O expander\n");
303 }
304
305 expa &= ~0x03;
306
307 if (*mode & IR_SIRMODE) {
308 expa |= 0x01;
309 } else { /* MIR/FIR */
310 expa |= 0x03;
311 }
312
313 if ((err = write_gpio_expa(expa, 0x27)) != 0) {
314 printk(KERN_ERR "Error writing to I/O expander\n");
315 }
316}
317
318static int h3_transceiver_mode(struct device *dev, int mode)
319{
320 struct omap_irda_config *irda_config = dev->platform_data;
321
322 cancel_delayed_work(&irda_config->gpio_expa);
323 PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
324 schedule_work(&irda_config->gpio_expa);
325
326 return 0;
327}
328
329static struct omap_irda_config h3_irda_data = {
330 .transceiver_cap = IR_SIRMODE | IR_MIRMODE | IR_FIRMODE,
331 .transceiver_mode = h3_transceiver_mode,
332 .select_irda = h3_select_irda,
333 .rx_channel = OMAP_DMA_UART3_RX,
334 .tx_channel = OMAP_DMA_UART3_TX,
335 .dest_start = UART3_THR,
336 .src_start = UART3_RHR,
337 .tx_trigger = 0,
338 .rx_trigger = 0,
339};
340
341static struct resource h3_irda_resources[] = {
342 [0] = {
343 .start = INT_UART3,
344 .end = INT_UART3,
345 .flags = IORESOURCE_IRQ,
346 },
347};
348
349static struct platform_device h3_irda_device = {
350 .name = "omapirda",
351 .id = 0,
352 .dev = {
353 .platform_data = &h3_irda_data,
354 },
355 .num_resources = ARRAY_SIZE(h3_irda_resources),
356 .resource = h3_irda_resources,
357};
358
359static struct platform_device h3_lcd_device = {
360 .name = "lcd_h3",
361 .id = -1,
362};
363
141static struct platform_device *devices[] __initdata = { 364static struct platform_device *devices[] __initdata = {
142 &flash_device, 365 &nor_device,
366 &nand_device,
143 &smc91x_device, 367 &smc91x_device,
144 &intlat_device, 368 &intlat_device,
369 &h3_irda_device,
370 &h3_kp_device,
371 &h3_lcd_device,
145}; 372};
146 373
147static struct omap_usb_config h3_usb_config __initdata = { 374static struct omap_usb_config h3_usb_config __initdata = {
@@ -171,7 +398,6 @@ static struct omap_uart_config h3_uart_config __initdata = {
171}; 398};
172 399
173static struct omap_lcd_config h3_lcd_config __initdata = { 400static struct omap_lcd_config h3_lcd_config __initdata = {
174 .panel_name = "h3",
175 .ctrl_name = "internal", 401 .ctrl_name = "internal",
176}; 402};
177 403
@@ -182,11 +408,36 @@ static struct omap_board_config_kernel h3_config[] = {
182 { OMAP_TAG_LCD, &h3_lcd_config }, 408 { OMAP_TAG_LCD, &h3_lcd_config },
183}; 409};
184 410
411#define H3_NAND_RB_GPIO_PIN 10
412
413static int nand_dev_ready(struct nand_platform_data *data)
414{
415 return omap_get_gpio_datain(H3_NAND_RB_GPIO_PIN);
416}
417
185static void __init h3_init(void) 418static void __init h3_init(void)
186{ 419{
187 h3_flash_resource.end = h3_flash_resource.start = omap_cs3_phys(); 420 /* Here we assume the NOR boot config: NOR on CS3 (possibly swapped
188 h3_flash_resource.end += OMAP_CS3_SIZE - 1; 421 * to address 0 by a dip switch), NAND on CS2B. The NAND driver will
189 (void) platform_add_devices(devices, ARRAY_SIZE(devices)); 422 * notice whether a NAND chip is enabled at probe time.
423 *
424 * H3 support NAND-boot, with a dip switch to put NOR on CS2B and NAND
425 * (which on H2 may be 16bit) on CS3. Try detecting that in code here,
426 * to avoid probing every possible flash configuration...
427 */
428 nor_resource.end = nor_resource.start = omap_cs3_phys();
429 nor_resource.end += SZ_32M - 1;
430
431 nand_resource.end = nand_resource.start = OMAP_CS2B_PHYS;
432 nand_resource.end += SZ_4K - 1;
433 if (!(omap_request_gpio(H3_NAND_RB_GPIO_PIN)))
434 nand_data.dev_ready = nand_dev_ready;
435
436 /* GPIO10 Func_MUX_CTRL reg bit 29:27, Configure V2 to mode1 as GPIO */
437 /* GPIO10 pullup/down register, Enable pullup on GPIO10 */
438 omap_cfg_reg(V2_1710_GPIO10);
439
440 platform_add_devices(devices, ARRAY_SIZE(devices));
190 omap_board_config = h3_config; 441 omap_board_config = h3_config;
191 omap_board_config_size = ARRAY_SIZE(h3_config); 442 omap_board_config_size = ARRAY_SIZE(h3_config);
192 omap_serial_init(); 443 omap_serial_init();
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index a04e4332915e..e90c137a4cf3 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24#include <linux/mtd/partitions.h> 24#include <linux/mtd/partitions.h>
25#include <linux/input.h>
25 26
26#include <asm/hardware.h> 27#include <asm/hardware.h>
27#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -34,8 +35,22 @@
34#include <asm/arch/gpio.h> 35#include <asm/arch/gpio.h>
35#include <asm/arch/tc.h> 36#include <asm/arch/tc.h>
36#include <asm/arch/usb.h> 37#include <asm/arch/usb.h>
38#include <asm/arch/keypad.h>
37#include <asm/arch/common.h> 39#include <asm/arch/common.h>
38 40
41static int innovator_keymap[] = {
42 KEY(0, 0, KEY_F1),
43 KEY(0, 3, KEY_DOWN),
44 KEY(1, 1, KEY_F2),
45 KEY(1, 2, KEY_RIGHT),
46 KEY(2, 0, KEY_F3),
47 KEY(2, 1, KEY_F4),
48 KEY(2, 2, KEY_UP),
49 KEY(3, 2, KEY_ENTER),
50 KEY(3, 3, KEY_LEFT),
51 0
52};
53
39static struct mtd_partition innovator_partitions[] = { 54static struct mtd_partition innovator_partitions[] = {
40 /* bootloader (U-Boot, etc) in first sector */ 55 /* bootloader (U-Boot, etc) in first sector */
41 { 56 {
@@ -97,6 +112,31 @@ static struct platform_device innovator_flash_device = {
97 .resource = &innovator_flash_resource, 112 .resource = &innovator_flash_resource,
98}; 113};
99 114
115static struct resource innovator_kp_resources[] = {
116 [0] = {
117 .start = INT_KEYBOARD,
118 .end = INT_KEYBOARD,
119 .flags = IORESOURCE_IRQ,
120 },
121};
122
123static struct omap_kp_platform_data innovator_kp_data = {
124 .rows = 8,
125 .cols = 8,
126 .keymap = innovator_keymap,
127};
128
129static struct platform_device innovator_kp_device = {
130 .name = "omap-keypad",
131 .id = -1,
132 .dev = {
133 .platform_data = &innovator_kp_data,
134 },
135 .num_resources = ARRAY_SIZE(innovator_kp_resources),
136 .resource = innovator_kp_resources,
137};
138
139
100#ifdef CONFIG_ARCH_OMAP15XX 140#ifdef CONFIG_ARCH_OMAP15XX
101 141
102/* Only FPGA needs to be mapped here. All others are done with ioremap */ 142/* Only FPGA needs to be mapped here. All others are done with ioremap */
@@ -129,9 +169,16 @@ static struct platform_device innovator1510_smc91x_device = {
129 .resource = innovator1510_smc91x_resources, 169 .resource = innovator1510_smc91x_resources,
130}; 170};
131 171
172static struct platform_device innovator1510_lcd_device = {
173 .name = "lcd_inn1510",
174 .id = -1,
175};
176
132static struct platform_device *innovator1510_devices[] __initdata = { 177static struct platform_device *innovator1510_devices[] __initdata = {
133 &innovator_flash_device, 178 &innovator_flash_device,
134 &innovator1510_smc91x_device, 179 &innovator1510_smc91x_device,
180 &innovator_kp_device,
181 &innovator1510_lcd_device,
135}; 182};
136 183
137#endif /* CONFIG_ARCH_OMAP15XX */ 184#endif /* CONFIG_ARCH_OMAP15XX */
@@ -158,9 +205,16 @@ static struct platform_device innovator1610_smc91x_device = {
158 .resource = innovator1610_smc91x_resources, 205 .resource = innovator1610_smc91x_resources,
159}; 206};
160 207
208static struct platform_device innovator1610_lcd_device = {
209 .name = "inn1610_lcd",
210 .id = -1,
211};
212
161static struct platform_device *innovator1610_devices[] __initdata = { 213static struct platform_device *innovator1610_devices[] __initdata = {
162 &innovator_flash_device, 214 &innovator_flash_device,
163 &innovator1610_smc91x_device, 215 &innovator1610_smc91x_device,
216 &innovator_kp_device,
217 &innovator1610_lcd_device,
164}; 218};
165 219
166#endif /* CONFIG_ARCH_OMAP16XX */ 220#endif /* CONFIG_ARCH_OMAP16XX */
@@ -206,7 +260,6 @@ static struct omap_usb_config innovator1510_usb_config __initdata = {
206}; 260};
207 261
208static struct omap_lcd_config innovator1510_lcd_config __initdata = { 262static struct omap_lcd_config innovator1510_lcd_config __initdata = {
209 .panel_name = "inn1510",
210 .ctrl_name = "internal", 263 .ctrl_name = "internal",
211}; 264};
212#endif 265#endif
@@ -228,7 +281,6 @@ static struct omap_usb_config h2_usb_config __initdata = {
228}; 281};
229 282
230static struct omap_lcd_config innovator1610_lcd_config __initdata = { 283static struct omap_lcd_config innovator1610_lcd_config __initdata = {
231 .panel_name = "inn1610",
232 .ctrl_name = "internal", 284 .ctrl_name = "internal",
233}; 285};
234#endif 286#endif
diff --git a/arch/arm/mach-omap1/board-netstar.c b/arch/arm/mach-omap1/board-netstar.c
deleted file mode 100644
index 7520e602d7a2..000000000000
--- a/arch/arm/mach-omap1/board-netstar.c
+++ /dev/null
@@ -1,160 +0,0 @@
1/*
2 * Modified from board-generic.c
3 *
4 * Copyright (C) 2004 2N Telekomunikace, Ladislav Michl <michl@2n.cz>
5 *
6 * Code for Netstar OMAP board.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/delay.h>
14#include <linux/platform_device.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/notifier.h>
19#include <linux/reboot.h>
20
21#include <asm/hardware.h>
22#include <asm/mach-types.h>
23#include <asm/mach/arch.h>
24#include <asm/mach/map.h>
25
26#include <asm/arch/gpio.h>
27#include <asm/arch/mux.h>
28#include <asm/arch/usb.h>
29#include <asm/arch/common.h>
30
31extern void __init omap_init_time(void);
32extern int omap_gpio_init(void);
33
34static struct resource netstar_smc91x_resources[] = {
35 [0] = {
36 .start = OMAP_CS1_PHYS + 0x300,
37 .end = OMAP_CS1_PHYS + 0x300 + 16,
38 .flags = IORESOURCE_MEM,
39 },
40 [1] = {
41 .start = OMAP_GPIO_IRQ(8),
42 .end = OMAP_GPIO_IRQ(8),
43 .flags = IORESOURCE_IRQ,
44 },
45};
46
47static struct platform_device netstar_smc91x_device = {
48 .name = "smc91x",
49 .id = 0,
50 .num_resources = ARRAY_SIZE(netstar_smc91x_resources),
51 .resource = netstar_smc91x_resources,
52};
53
54static struct platform_device *netstar_devices[] __initdata = {
55 &netstar_smc91x_device,
56};
57
58static struct omap_uart_config netstar_uart_config __initdata = {
59 .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
60};
61
62static struct omap_board_config_kernel netstar_config[] = {
63 { OMAP_TAG_UART, &netstar_uart_config },
64};
65
66static void __init netstar_init_irq(void)
67{
68 omap1_init_common_hw();
69 omap_init_irq();
70 omap_gpio_init();
71}
72
73static void __init netstar_init(void)
74{
75 /* green LED */
76 omap_request_gpio(4);
77 omap_set_gpio_direction(4, 0);
78 /* smc91x reset */
79 omap_request_gpio(7);
80 omap_set_gpio_direction(7, 0);
81 omap_set_gpio_dataout(7, 1);
82 udelay(2); /* wait at least 100ns */
83 omap_set_gpio_dataout(7, 0);
84 mdelay(50); /* 50ms until PHY ready */
85 /* smc91x interrupt pin */
86 omap_request_gpio(8);
87
88 omap_request_gpio(12);
89 omap_request_gpio(13);
90 omap_request_gpio(14);
91 omap_request_gpio(15);
92 set_irq_type(OMAP_GPIO_IRQ(12), IRQT_FALLING);
93 set_irq_type(OMAP_GPIO_IRQ(13), IRQT_FALLING);
94 set_irq_type(OMAP_GPIO_IRQ(14), IRQT_FALLING);
95 set_irq_type(OMAP_GPIO_IRQ(15), IRQT_FALLING);
96
97 platform_add_devices(netstar_devices, ARRAY_SIZE(netstar_devices));
98
99 /* Switch on green LED */
100 omap_set_gpio_dataout(4, 0);
101 /* Switch off red LED */
102 omap_writeb(0x00, OMAP_LPG1_PMR); /* Disable clock */
103 omap_writeb(0x80, OMAP_LPG1_LCR);
104
105 omap_board_config = netstar_config;
106 omap_board_config_size = ARRAY_SIZE(netstar_config);
107 omap_serial_init();
108}
109
110static void __init netstar_map_io(void)
111{
112 omap1_map_common_io();
113}
114
115#define MACHINE_PANICED 1
116#define MACHINE_REBOOTING 2
117#define MACHINE_REBOOT 4
118static unsigned long machine_state;
119
120static int panic_event(struct notifier_block *this, unsigned long event,
121 void *ptr)
122{
123 if (test_and_set_bit(MACHINE_PANICED, &machine_state))
124 return NOTIFY_DONE;
125
126 /* Switch off green LED */
127 omap_set_gpio_dataout(4, 1);
128 /* Flash red LED */
129 omap_writeb(0x78, OMAP_LPG1_LCR);
130 omap_writeb(0x01, OMAP_LPG1_PMR); /* Enable clock */
131
132 return NOTIFY_DONE;
133}
134
135static struct notifier_block panic_block = {
136 .notifier_call = panic_event,
137};
138
139static int __init netstar_late_init(void)
140{
141 /* TODO: Setup front panel switch here */
142
143 /* Setup panic notifier */
144 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
145
146 return 0;
147}
148
149postcore_initcall(netstar_late_init);
150
151MACHINE_START(NETSTAR, "NetStar OMAP5910")
152 /* Maintainer: Ladislav Michl <michl@2n.cz> */
153 .phys_io = 0xfff00000,
154 .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc,
155 .boot_params = 0x10000100,
156 .map_io = netstar_map_io,
157 .init_irq = netstar_init_irq,
158 .init_machine = netstar_init,
159 .timer = &omap_timer,
160MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
new file mode 100644
index 000000000000..02b980d77b12
--- /dev/null
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -0,0 +1,268 @@
1/*
2 * linux/arch/arm/mach-omap1/board-nokia770.c
3 *
4 * Modified from board-generic.c
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/input.h>
15#include <linux/clk.h>
16
17#include <linux/spi/spi.h>
18#include <linux/spi/ads7846.h>
19
20#include <asm/hardware.h>
21#include <asm/mach-types.h>
22#include <asm/mach/arch.h>
23#include <asm/mach/map.h>
24
25#include <asm/arch/gpio.h>
26#include <asm/arch/mux.h>
27#include <asm/arch/usb.h>
28#include <asm/arch/board.h>
29#include <asm/arch/keypad.h>
30#include <asm/arch/common.h>
31#include <asm/arch/dsp_common.h>
32#include <asm/arch/aic23.h>
33#include <asm/arch/gpio.h>
34
35static void __init omap_nokia770_init_irq(void)
36{
37 /* On Nokia 770, the SleepX signal is masked with an
38 * MPUIO line by default. It has to be unmasked for it
39 * to become functional */
40
41 /* SleepX mask direction */
42 omap_writew((omap_readw(0xfffb5008) & ~2), 0xfffb5008);
43 /* Unmask SleepX signal */
44 omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
45
46 omap1_init_common_hw();
47 omap_init_irq();
48}
49
50static int nokia770_keymap[] = {
51 KEY(0, 1, GROUP_0 | KEY_UP),
52 KEY(0, 2, GROUP_1 | KEY_F5),
53 KEY(1, 0, GROUP_0 | KEY_LEFT),
54 KEY(1, 1, GROUP_0 | KEY_ENTER),
55 KEY(1, 2, GROUP_0 | KEY_RIGHT),
56 KEY(2, 0, GROUP_1 | KEY_ESC),
57 KEY(2, 1, GROUP_0 | KEY_DOWN),
58 KEY(2, 2, GROUP_1 | KEY_F4),
59 KEY(3, 0, GROUP_2 | KEY_F7),
60 KEY(3, 1, GROUP_2 | KEY_F8),
61 KEY(3, 2, GROUP_2 | KEY_F6),
62 0
63};
64
65static struct resource nokia770_kp_resources[] = {
66 [0] = {
67 .start = INT_KEYBOARD,
68 .end = INT_KEYBOARD,
69 .flags = IORESOURCE_IRQ,
70 },
71};
72
73static struct omap_kp_platform_data nokia770_kp_data = {
74 .rows = 8,
75 .cols = 8,
76 .keymap = nokia770_keymap
77};
78
79static struct platform_device nokia770_kp_device = {
80 .name = "omap-keypad",
81 .id = -1,
82 .dev = {
83 .platform_data = &nokia770_kp_data,
84 },
85 .num_resources = ARRAY_SIZE(nokia770_kp_resources),
86 .resource = nokia770_kp_resources,
87};
88
89static struct platform_device *nokia770_devices[] __initdata = {
90 &nokia770_kp_device,
91};
92
93static struct ads7846_platform_data nokia770_ads7846_platform_data __initdata = {
94 .x_max = 0x0fff,
95 .y_max = 0x0fff,
96 .x_plate_ohms = 180,
97 .pressure_max = 255,
98 .debounce_max = 10,
99 .debounce_tol = 3,
100};
101
102static struct spi_board_info nokia770_spi_board_info[] __initdata = {
103 [0] = {
104 .modalias = "lcd_lph8923",
105 .bus_num = 2,
106 .chip_select = 3,
107 .max_speed_hz = 12000000,
108 },
109 [1] = {
110 .modalias = "ads7846",
111 .bus_num = 2,
112 .chip_select = 0,
113 .max_speed_hz = 2500000,
114 .irq = OMAP_GPIO_IRQ(15),
115 .platform_data = &nokia770_ads7846_platform_data,
116 },
117};
118
119
120/* assume no Mini-AB port */
121
122static struct omap_usb_config nokia770_usb_config __initdata = {
123 .otg = 1,
124 .register_host = 1,
125 .register_dev = 1,
126 .hmc_mode = 16,
127 .pins[0] = 6,
128};
129
130static struct omap_mmc_config nokia770_mmc_config __initdata = {
131 .mmc[0] = {
132 .enabled = 0,
133 .wire4 = 0,
134 .wp_pin = -1,
135 .power_pin = -1,
136 .switch_pin = -1,
137 },
138 .mmc[1] = {
139 .enabled = 0,
140 .wire4 = 0,
141 .wp_pin = -1,
142 .power_pin = -1,
143 .switch_pin = -1,
144 },
145};
146
147static struct omap_board_config_kernel nokia770_config[] = {
148 { OMAP_TAG_USB, NULL },
149 { OMAP_TAG_MMC, &nokia770_mmc_config },
150};
151
152/*
153 * audio power control
154 */
155#define HEADPHONE_GPIO 14
156#define AMPLIFIER_CTRL_GPIO 58
157
158static struct clk *dspxor_ck;
159static DECLARE_MUTEX(audio_pwr_sem);
160/*
161 * audio_pwr_state
162 * +--+-------------------------+---------------------------------------+
163 * |-1|down |power-up request -> 0 |
164 * +--+-------------------------+---------------------------------------+
165 * | 0|up |power-down(1) request -> 1 |
166 * | | |power-down(2) request -> (ignore) |
167 * +--+-------------------------+---------------------------------------+
168 * | 1|up, |power-up request -> 0 |
169 * | |received down(1) request |power-down(2) request -> -1 |
170 * +--+-------------------------+---------------------------------------+
171 */
172static int audio_pwr_state = -1;
173
174/*
175 * audio_pwr_up / down should be called under audio_pwr_sem
176 */
177static void nokia770_audio_pwr_up(void)
178{
179 clk_enable(dspxor_ck);
180
181 /* Turn on codec */
182 tlv320aic23_power_up();
183
184 if (omap_get_gpio_datain(HEADPHONE_GPIO))
185 /* HP not connected, turn on amplifier */
186 omap_set_gpio_dataout(AMPLIFIER_CTRL_GPIO, 1);
187 else
188 /* HP connected, do not turn on amplifier */
189 printk("HP connected\n");
190}
191
192static void codec_delayed_power_down(void *arg)
193{
194 down(&audio_pwr_sem);
195 if (audio_pwr_state == -1)
196 tlv320aic23_power_down();
197 clk_disable(dspxor_ck);
198 up(&audio_pwr_sem);
199}
200
201static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
202
203static void nokia770_audio_pwr_down(void)
204{
205 /* Turn off amplifier */
206 omap_set_gpio_dataout(AMPLIFIER_CTRL_GPIO, 0);
207
208 /* Turn off codec: schedule delayed work */
209 schedule_delayed_work(&codec_power_down_work, HZ / 20); /* 50ms */
210}
211
212void nokia770_audio_pwr_up_request(int stage)
213{
214 down(&audio_pwr_sem);
215 if (audio_pwr_state == -1)
216 nokia770_audio_pwr_up();
217 /* force audio_pwr_state = 0, even if it was 1. */
218 audio_pwr_state = 0;
219 up(&audio_pwr_sem);
220}
221
222void nokia770_audio_pwr_down_request(int stage)
223{
224 down(&audio_pwr_sem);
225 switch (stage) {
226 case 1:
227 if (audio_pwr_state == 0)
228 audio_pwr_state = 1;
229 break;
230 case 2:
231 if (audio_pwr_state == 1) {
232 nokia770_audio_pwr_down();
233 audio_pwr_state = -1;
234 }
235 break;
236 }
237 up(&audio_pwr_sem);
238}
239
240static void __init omap_nokia770_init(void)
241{
242 nokia770_config[0].data = &nokia770_usb_config;
243
244 platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices));
245 spi_register_board_info(nokia770_spi_board_info,
246 ARRAY_SIZE(nokia770_spi_board_info));
247 omap_board_config = nokia770_config;
248 omap_board_config_size = ARRAY_SIZE(nokia770_config);
249 omap_serial_init();
250 omap_dsp_audio_pwr_up_request = nokia770_audio_pwr_up_request;
251 omap_dsp_audio_pwr_down_request = nokia770_audio_pwr_down_request;
252 dspxor_ck = clk_get(0, "dspxor_ck");
253}
254
255static void __init omap_nokia770_map_io(void)
256{
257 omap1_map_common_io();
258}
259
260MACHINE_START(NOKIA770, "Nokia 770")
261 .phys_io = 0xfff00000,
262 .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc,
263 .boot_params = 0x10000100,
264 .map_io = omap_nokia770_map_io,
265 .init_irq = omap_nokia770_init_irq,
266 .init_machine = omap_nokia770_init,
267 .timer = &omap_timer,
268MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 543fa136106d..1160093e8ef6 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/mtd/mtd.h> 34#include <linux/mtd/mtd.h>
35#include <linux/mtd/partitions.h> 35#include <linux/mtd/partitions.h>
36#include <linux/input.h>
36 37
37#include <asm/hardware.h> 38#include <asm/hardware.h>
38#include <asm/mach-types.h> 39#include <asm/mach-types.h>
@@ -44,7 +45,24 @@
44#include <asm/arch/usb.h> 45#include <asm/arch/usb.h>
45#include <asm/arch/mux.h> 46#include <asm/arch/mux.h>
46#include <asm/arch/tc.h> 47#include <asm/arch/tc.h>
48#include <asm/arch/keypad.h>
47#include <asm/arch/common.h> 49#include <asm/arch/common.h>
50#include <asm/arch/mcbsp.h>
51#include <asm/arch/omap-alsa.h>
52
53static int osk_keymap[] = {
54 KEY(0, 0, KEY_F1),
55 KEY(0, 3, KEY_UP),
56 KEY(1, 1, KEY_LEFTCTRL),
57 KEY(1, 2, KEY_LEFT),
58 KEY(2, 0, KEY_SPACE),
59 KEY(2, 1, KEY_ESC),
60 KEY(2, 2, KEY_DOWN),
61 KEY(3, 2, KEY_ENTER),
62 KEY(3, 3, KEY_RIGHT),
63 0
64};
65
48 66
49static struct mtd_partition osk_partitions[] = { 67static struct mtd_partition osk_partitions[] = {
50 /* bootloader (U-Boot, etc) in first sector */ 68 /* bootloader (U-Boot, etc) in first sector */
@@ -133,9 +151,69 @@ static struct platform_device osk5912_cf_device = {
133 .resource = osk5912_cf_resources, 151 .resource = osk5912_cf_resources,
134}; 152};
135 153
154#define DEFAULT_BITPERSAMPLE 16
155
156static struct omap_mcbsp_reg_cfg mcbsp_regs = {
157 .spcr2 = FREE | FRST | GRST | XRST | XINTM(3),
158 .spcr1 = RINTM(3) | RRST,
159 .rcr2 = RPHASE | RFRLEN2(OMAP_MCBSP_WORD_8) |
160 RWDLEN2(OMAP_MCBSP_WORD_16) | RDATDLY(0),
161 .rcr1 = RFRLEN1(OMAP_MCBSP_WORD_8) | RWDLEN1(OMAP_MCBSP_WORD_16),
162 .xcr2 = XPHASE | XFRLEN2(OMAP_MCBSP_WORD_8) |
163 XWDLEN2(OMAP_MCBSP_WORD_16) | XDATDLY(0) | XFIG,
164 .xcr1 = XFRLEN1(OMAP_MCBSP_WORD_8) | XWDLEN1(OMAP_MCBSP_WORD_16),
165 .srgr1 = FWID(DEFAULT_BITPERSAMPLE - 1),
166 .srgr2 = GSYNC | CLKSP | FSGM | FPER(DEFAULT_BITPERSAMPLE * 2 - 1),
167 /*.pcr0 = FSXM | FSRM | CLKXM | CLKRM | CLKXP | CLKRP,*/ /* mcbsp: master */
168 .pcr0 = CLKXP | CLKRP, /* mcbsp: slave */
169};
170
171static struct omap_alsa_codec_config alsa_config = {
172 .name = "OSK AIC23",
173 .mcbsp_regs_alsa = &mcbsp_regs,
174 .codec_configure_dev = NULL, // aic23_configure,
175 .codec_set_samplerate = NULL, // aic23_set_samplerate,
176 .codec_clock_setup = NULL, // aic23_clock_setup,
177 .codec_clock_on = NULL, // aic23_clock_on,
178 .codec_clock_off = NULL, // aic23_clock_off,
179 .get_default_samplerate = NULL, // aic23_get_default_samplerate,
180};
181
136static struct platform_device osk5912_mcbsp1_device = { 182static struct platform_device osk5912_mcbsp1_device = {
137 .name = "omap_mcbsp", 183 .name = "omap_alsa_mcbsp",
138 .id = 1, 184 .id = 1,
185 .dev = {
186 .platform_data = &alsa_config,
187 },
188};
189
190static struct resource osk5912_kp_resources[] = {
191 [0] = {
192 .start = INT_KEYBOARD,
193 .end = INT_KEYBOARD,
194 .flags = IORESOURCE_IRQ,
195 },
196};
197
198static struct omap_kp_platform_data osk_kp_data = {
199 .rows = 8,
200 .cols = 8,
201 .keymap = osk_keymap,
202};
203
204static struct platform_device osk5912_kp_device = {
205 .name = "omap-keypad",
206 .id = -1,
207 .dev = {
208 .platform_data = &osk_kp_data,
209 },
210 .num_resources = ARRAY_SIZE(osk5912_kp_resources),
211 .resource = osk5912_kp_resources,
212};
213
214static struct platform_device osk5912_lcd_device = {
215 .name = "lcd_osk",
216 .id = -1,
139}; 217};
140 218
141static struct platform_device *osk5912_devices[] __initdata = { 219static struct platform_device *osk5912_devices[] __initdata = {
@@ -143,6 +221,8 @@ static struct platform_device *osk5912_devices[] __initdata = {
143 &osk5912_smc91x_device, 221 &osk5912_smc91x_device,
144 &osk5912_cf_device, 222 &osk5912_cf_device,
145 &osk5912_mcbsp1_device, 223 &osk5912_mcbsp1_device,
224 &osk5912_kp_device,
225 &osk5912_lcd_device,
146}; 226};
147 227
148static void __init osk_init_smc91x(void) 228static void __init osk_init_smc91x(void)
@@ -197,7 +277,6 @@ static struct omap_uart_config osk_uart_config __initdata = {
197}; 277};
198 278
199static struct omap_lcd_config osk_lcd_config __initdata = { 279static struct omap_lcd_config osk_lcd_config __initdata = {
200 .panel_name = "osk",
201 .ctrl_name = "internal", 280 .ctrl_name = "internal",
202}; 281};
203 282
@@ -255,8 +334,18 @@ static void __init osk_mistral_init(void)
255static void __init osk_mistral_init(void) { } 334static void __init osk_mistral_init(void) { }
256#endif 335#endif
257 336
337#define EMIFS_CS3_VAL (0x88013141)
338
258static void __init osk_init(void) 339static void __init osk_init(void)
259{ 340{
341 /* Workaround for wrong CS3 (NOR flash) timing
342 * There are some U-Boot versions out there which configure
343 * wrong CS3 memory timings. This mainly leads to CRC
344 * or similiar errors if you use NOR flash (e.g. with JFFS2)
345 */
346 if (EMIFS_CCS(3) != EMIFS_CS3_VAL)
347 EMIFS_CCS(3) = EMIFS_CS3_VAL;
348
260 osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys(); 349 osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys();
261 osk_flash_resource.end += SZ_32M - 1; 350 osk_flash_resource.end += SZ_32M - 1;
262 platform_add_devices(osk5912_devices, ARRAY_SIZE(osk5912_devices)); 351 platform_add_devices(osk5912_devices, ARRAY_SIZE(osk5912_devices));
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index e488f7236775..4bc8a62909b9 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -38,6 +38,15 @@ static void __init omap_generic_init_irq(void)
38 omap_init_irq(); 38 omap_init_irq();
39} 39}
40 40
41static struct platform_device palmte_lcd_device = {
42 .name = "lcd_palmte",
43 .id = -1,
44};
45
46static struct platform_device *devices[] __initdata = {
47 &palmte_lcd_device,
48};
49
41static struct omap_usb_config palmte_usb_config __initdata = { 50static struct omap_usb_config palmte_usb_config __initdata = {
42 .register_dev = 1, 51 .register_dev = 1,
43 .hmc_mode = 0, 52 .hmc_mode = 0,
@@ -55,7 +64,6 @@ static struct omap_mmc_config palmte_mmc_config __initdata = {
55}; 64};
56 65
57static struct omap_lcd_config palmte_lcd_config __initdata = { 66static struct omap_lcd_config palmte_lcd_config __initdata = {
58 .panel_name = "palmte",
59 .ctrl_name = "internal", 67 .ctrl_name = "internal",
60}; 68};
61 69
@@ -69,6 +77,8 @@ static void __init omap_generic_init(void)
69{ 77{
70 omap_board_config = palmte_config; 78 omap_board_config = palmte_config;
71 omap_board_config_size = ARRAY_SIZE(palmte_config); 79 omap_board_config_size = ARRAY_SIZE(palmte_config);
80
81 platform_add_devices(devices, ARRAY_SIZE(devices));
72} 82}
73 83
74static void __init omap_generic_map_io(void) 84static void __init omap_generic_map_io(void)
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index 3913a3cc0ce6..64b45d8ae357 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -16,7 +16,9 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <linux/input.h>
20 22
21#include <asm/hardware.h> 23#include <asm/hardware.h>
22#include <asm/mach-types.h> 24#include <asm/mach-types.h>
@@ -28,9 +30,44 @@
28#include <asm/arch/gpio.h> 30#include <asm/arch/gpio.h>
29#include <asm/arch/mux.h> 31#include <asm/arch/mux.h>
30#include <asm/arch/fpga.h> 32#include <asm/arch/fpga.h>
33#include <asm/arch/keypad.h>
31#include <asm/arch/common.h> 34#include <asm/arch/common.h>
32#include <asm/arch/board.h> 35#include <asm/arch/board.h>
33 36
37static int p2_keymap[] = {
38 KEY(0,0,KEY_UP),
39 KEY(0,1,KEY_RIGHT),
40 KEY(0,2,KEY_LEFT),
41 KEY(0,3,KEY_DOWN),
42 KEY(0,4,KEY_CENTER),
43 KEY(0,5,KEY_0_5),
44 KEY(1,0,KEY_SOFT2),
45 KEY(1,1,KEY_SEND),
46 KEY(1,2,KEY_END),
47 KEY(1,3,KEY_VOLUMEDOWN),
48 KEY(1,4,KEY_VOLUMEUP),
49 KEY(1,5,KEY_RECORD),
50 KEY(2,0,KEY_SOFT1),
51 KEY(2,1,KEY_3),
52 KEY(2,2,KEY_6),
53 KEY(2,3,KEY_9),
54 KEY(2,4,KEY_SHARP),
55 KEY(2,5,KEY_2_5),
56 KEY(3,0,KEY_BACK),
57 KEY(3,1,KEY_2),
58 KEY(3,2,KEY_5),
59 KEY(3,3,KEY_8),
60 KEY(3,4,KEY_0),
61 KEY(3,5,KEY_HEADSETHOOK),
62 KEY(4,0,KEY_HOME),
63 KEY(4,1,KEY_1),
64 KEY(4,2,KEY_4),
65 KEY(4,3,KEY_7),
66 KEY(4,4,KEY_STAR),
67 KEY(4,5,KEY_POWER),
68 0
69};
70
34static struct resource smc91x_resources[] = { 71static struct resource smc91x_resources[] = {
35 [0] = { 72 [0] = {
36 .start = H2P2_DBG_FPGA_ETHR_START, /* Physical */ 73 .start = H2P2_DBG_FPGA_ETHR_START, /* Physical */
@@ -44,7 +81,7 @@ static struct resource smc91x_resources[] = {
44 }, 81 },
45}; 82};
46 83
47static struct mtd_partition p2_partitions[] = { 84static struct mtd_partition nor_partitions[] = {
48 /* bootloader (U-Boot, etc) in first sector */ 85 /* bootloader (U-Boot, etc) in first sector */
49 { 86 {
50 .name = "bootloader", 87 .name = "bootloader",
@@ -75,27 +112,47 @@ static struct mtd_partition p2_partitions[] = {
75 }, 112 },
76}; 113};
77 114
78static struct flash_platform_data p2_flash_data = { 115static struct flash_platform_data nor_data = {
79 .map_name = "cfi_probe", 116 .map_name = "cfi_probe",
80 .width = 2, 117 .width = 2,
81 .parts = p2_partitions, 118 .parts = nor_partitions,
82 .nr_parts = ARRAY_SIZE(p2_partitions), 119 .nr_parts = ARRAY_SIZE(nor_partitions),
83}; 120};
84 121
85static struct resource p2_flash_resource = { 122static struct resource nor_resource = {
86 .start = OMAP_CS0_PHYS, 123 .start = OMAP_CS0_PHYS,
87 .end = OMAP_CS0_PHYS + SZ_32M - 1, 124 .end = OMAP_CS0_PHYS + SZ_32M - 1,
88 .flags = IORESOURCE_MEM, 125 .flags = IORESOURCE_MEM,
89}; 126};
90 127
91static struct platform_device p2_flash_device = { 128static struct platform_device nor_device = {
92 .name = "omapflash", 129 .name = "omapflash",
93 .id = 0, 130 .id = 0,
94 .dev = { 131 .dev = {
95 .platform_data = &p2_flash_data, 132 .platform_data = &nor_data,
133 },
134 .num_resources = 1,
135 .resource = &nor_resource,
136};
137
138static struct nand_platform_data nand_data = {
139 .options = NAND_SAMSUNG_LP_OPTIONS,
140};
141
142static struct resource nand_resource = {
143 .start = OMAP_CS3_PHYS,
144 .end = OMAP_CS3_PHYS + SZ_4K - 1,
145 .flags = IORESOURCE_MEM,
146};
147
148static struct platform_device nand_device = {
149 .name = "omapnand",
150 .id = 0,
151 .dev = {
152 .platform_data = &nand_data,
96 }, 153 },
97 .num_resources = 1, 154 .num_resources = 1,
98 .resource = &p2_flash_resource, 155 .resource = &nand_resource,
99}; 156};
100 157
101static struct platform_device smc91x_device = { 158static struct platform_device smc91x_device = {
@@ -105,17 +162,55 @@ static struct platform_device smc91x_device = {
105 .resource = smc91x_resources, 162 .resource = smc91x_resources,
106}; 163};
107 164
165static struct resource kp_resources[] = {
166 [0] = {
167 .start = INT_730_MPUIO_KEYPAD,
168 .end = INT_730_MPUIO_KEYPAD,
169 .flags = IORESOURCE_IRQ,
170 },
171};
172
173static struct omap_kp_platform_data kp_data = {
174 .rows = 8,
175 .cols = 8,
176 .keymap = p2_keymap,
177};
178
179static struct platform_device kp_device = {
180 .name = "omap-keypad",
181 .id = -1,
182 .dev = {
183 .platform_data = &kp_data,
184 },
185 .num_resources = ARRAY_SIZE(kp_resources),
186 .resource = kp_resources,
187};
188
189static struct platform_device lcd_device = {
190 .name = "lcd_p2",
191 .id = -1,
192};
193
108static struct platform_device *devices[] __initdata = { 194static struct platform_device *devices[] __initdata = {
109 &p2_flash_device, 195 &nor_device,
196 &nand_device,
110 &smc91x_device, 197 &smc91x_device,
198 &kp_device,
199 &lcd_device,
111}; 200};
112 201
202#define P2_NAND_RB_GPIO_PIN 62
203
204static int nand_dev_ready(struct nand_platform_data *data)
205{
206 return omap_get_gpio_datain(P2_NAND_RB_GPIO_PIN);
207}
208
113static struct omap_uart_config perseus2_uart_config __initdata = { 209static struct omap_uart_config perseus2_uart_config __initdata = {
114 .enabled_uarts = ((1 << 0) | (1 << 1)), 210 .enabled_uarts = ((1 << 0) | (1 << 1)),
115}; 211};
116 212
117static struct omap_lcd_config perseus2_lcd_config __initdata = { 213static struct omap_lcd_config perseus2_lcd_config __initdata = {
118 .panel_name = "p2",
119 .ctrl_name = "internal", 214 .ctrl_name = "internal",
120}; 215};
121 216
@@ -126,7 +221,13 @@ static struct omap_board_config_kernel perseus2_config[] = {
126 221
127static void __init omap_perseus2_init(void) 222static void __init omap_perseus2_init(void)
128{ 223{
129 (void) platform_add_devices(devices, ARRAY_SIZE(devices)); 224 if (!(omap_request_gpio(P2_NAND_RB_GPIO_PIN)))
225 nand_data.dev_ready = nand_dev_ready;
226
227 omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
228 omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
229
230 platform_add_devices(devices, ARRAY_SIZE(devices));
130 231
131 omap_board_config = perseus2_config; 232 omap_board_config = perseus2_config;
132 omap_board_config_size = ARRAY_SIZE(perseus2_config); 233 omap_board_config_size = ARRAY_SIZE(perseus2_config);
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 52e4a9d69642..447a586eb334 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -155,9 +155,9 @@ static struct omap_uart_config voiceblue_uart_config __initdata = {
155}; 155};
156 156
157static struct omap_board_config_kernel voiceblue_config[] = { 157static struct omap_board_config_kernel voiceblue_config[] = {
158 { OMAP_TAG_USB, &voiceblue_usb_config }, 158 { OMAP_TAG_USB, &voiceblue_usb_config },
159 { OMAP_TAG_MMC, &voiceblue_mmc_config }, 159 { OMAP_TAG_MMC, &voiceblue_mmc_config },
160 { OMAP_TAG_UART, &voiceblue_uart_config }, 160 { OMAP_TAG_UART, &voiceblue_uart_config },
161}; 161};
162 162
163static void __init voiceblue_init_irq(void) 163static void __init voiceblue_init_irq(void)
@@ -235,7 +235,7 @@ static struct notifier_block panic_block = {
235static int __init voiceblue_setup(void) 235static int __init voiceblue_setup(void)
236{ 236{
237 /* Setup panic notifier */ 237 /* Setup panic notifier */
238 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 238 notifier_chain_register(&panic_notifier_list, &panic_block);
239 239
240 return 0; 240 return 0;
241} 241}
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 75110ba10424..619db18144ea 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -345,7 +345,7 @@ static unsigned calc_ext_dsor(unsigned long rate)
345 */ 345 */
346 for (dsor = 2; dsor < 96; ++dsor) { 346 for (dsor = 2; dsor < 96; ++dsor) {
347 if ((dsor & 1) && dsor > 8) 347 if ((dsor & 1) && dsor > 8)
348 continue; 348 continue;
349 if (rate >= 96000000 / dsor) 349 if (rate >= 96000000 / dsor)
350 break; 350 break;
351 } 351 }
@@ -687,6 +687,11 @@ int __init omap1_clk_init(void)
687 clk_register(*clkp); 687 clk_register(*clkp);
688 continue; 688 continue;
689 } 689 }
690
691 if (((*clkp)->flags &CLOCK_IN_OMAP310) && cpu_is_omap310()) {
692 clk_register(*clkp);
693 continue;
694 }
690 } 695 }
691 696
692 info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config); 697 info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
@@ -784,7 +789,7 @@ int __init omap1_clk_init(void)
784 clk_enable(&armxor_ck.clk); 789 clk_enable(&armxor_ck.clk);
785 clk_enable(&armtim_ck.clk); /* This should be done by timer code */ 790 clk_enable(&armtim_ck.clk); /* This should be done by timer code */
786 791
787 if (cpu_is_omap1510()) 792 if (cpu_is_omap15xx())
788 clk_enable(&arm_gpio_ck); 793 clk_enable(&arm_gpio_ck);
789 794
790 return 0; 795 return 0;
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index 4f18d1b94449..b7c68819c4e7 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -151,7 +151,7 @@ static struct clk ck_ref = {
151 .name = "ck_ref", 151 .name = "ck_ref",
152 .rate = 12000000, 152 .rate = 12000000,
153 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 153 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
154 ALWAYS_ENABLED, 154 CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
155 .enable = &omap1_clk_enable_generic, 155 .enable = &omap1_clk_enable_generic,
156 .disable = &omap1_clk_disable_generic, 156 .disable = &omap1_clk_disable_generic,
157}; 157};
@@ -160,7 +160,7 @@ static struct clk ck_dpll1 = {
160 .name = "ck_dpll1", 160 .name = "ck_dpll1",
161 .parent = &ck_ref, 161 .parent = &ck_ref,
162 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 162 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
163 RATE_PROPAGATES | ALWAYS_ENABLED, 163 CLOCK_IN_OMAP310 | RATE_PROPAGATES | ALWAYS_ENABLED,
164 .enable = &omap1_clk_enable_generic, 164 .enable = &omap1_clk_enable_generic,
165 .disable = &omap1_clk_disable_generic, 165 .disable = &omap1_clk_disable_generic,
166}; 166};
@@ -183,7 +183,8 @@ static struct clk arm_ck = {
183 .name = "arm_ck", 183 .name = "arm_ck",
184 .parent = &ck_dpll1, 184 .parent = &ck_dpll1,
185 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 185 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
186 RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED, 186 CLOCK_IN_OMAP310 | RATE_CKCTL | RATE_PROPAGATES |
187 ALWAYS_ENABLED,
187 .rate_offset = CKCTL_ARMDIV_OFFSET, 188 .rate_offset = CKCTL_ARMDIV_OFFSET,
188 .recalc = &omap1_ckctl_recalc, 189 .recalc = &omap1_ckctl_recalc,
189 .enable = &omap1_clk_enable_generic, 190 .enable = &omap1_clk_enable_generic,
@@ -195,7 +196,8 @@ static struct arm_idlect1_clk armper_ck = {
195 .name = "armper_ck", 196 .name = "armper_ck",
196 .parent = &ck_dpll1, 197 .parent = &ck_dpll1,
197 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 198 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
198 RATE_CKCTL | CLOCK_IDLE_CONTROL, 199 CLOCK_IN_OMAP310 | RATE_CKCTL |
200 CLOCK_IDLE_CONTROL,
199 .enable_reg = (void __iomem *)ARM_IDLECT2, 201 .enable_reg = (void __iomem *)ARM_IDLECT2,
200 .enable_bit = EN_PERCK, 202 .enable_bit = EN_PERCK,
201 .rate_offset = CKCTL_PERDIV_OFFSET, 203 .rate_offset = CKCTL_PERDIV_OFFSET,
@@ -209,7 +211,7 @@ static struct arm_idlect1_clk armper_ck = {
209static struct clk arm_gpio_ck = { 211static struct clk arm_gpio_ck = {
210 .name = "arm_gpio_ck", 212 .name = "arm_gpio_ck",
211 .parent = &ck_dpll1, 213 .parent = &ck_dpll1,
212 .flags = CLOCK_IN_OMAP1510, 214 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
213 .enable_reg = (void __iomem *)ARM_IDLECT2, 215 .enable_reg = (void __iomem *)ARM_IDLECT2,
214 .enable_bit = EN_GPIOCK, 216 .enable_bit = EN_GPIOCK,
215 .recalc = &followparent_recalc, 217 .recalc = &followparent_recalc,
@@ -222,7 +224,7 @@ static struct arm_idlect1_clk armxor_ck = {
222 .name = "armxor_ck", 224 .name = "armxor_ck",
223 .parent = &ck_ref, 225 .parent = &ck_ref,
224 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 226 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
225 CLOCK_IDLE_CONTROL, 227 CLOCK_IN_OMAP310 | CLOCK_IDLE_CONTROL,
226 .enable_reg = (void __iomem *)ARM_IDLECT2, 228 .enable_reg = (void __iomem *)ARM_IDLECT2,
227 .enable_bit = EN_XORPCK, 229 .enable_bit = EN_XORPCK,
228 .recalc = &followparent_recalc, 230 .recalc = &followparent_recalc,
@@ -237,7 +239,7 @@ static struct arm_idlect1_clk armtim_ck = {
237 .name = "armtim_ck", 239 .name = "armtim_ck",
238 .parent = &ck_ref, 240 .parent = &ck_ref,
239 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 241 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
240 CLOCK_IDLE_CONTROL, 242 CLOCK_IN_OMAP310 | CLOCK_IDLE_CONTROL,
241 .enable_reg = (void __iomem *)ARM_IDLECT2, 243 .enable_reg = (void __iomem *)ARM_IDLECT2,
242 .enable_bit = EN_TIMCK, 244 .enable_bit = EN_TIMCK,
243 .recalc = &followparent_recalc, 245 .recalc = &followparent_recalc,
@@ -252,7 +254,7 @@ static struct arm_idlect1_clk armwdt_ck = {
252 .name = "armwdt_ck", 254 .name = "armwdt_ck",
253 .parent = &ck_ref, 255 .parent = &ck_ref,
254 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 256 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
255 CLOCK_IDLE_CONTROL, 257 CLOCK_IN_OMAP310 | CLOCK_IDLE_CONTROL,
256 .enable_reg = (void __iomem *)ARM_IDLECT2, 258 .enable_reg = (void __iomem *)ARM_IDLECT2,
257 .enable_bit = EN_WDTCK, 259 .enable_bit = EN_WDTCK,
258 .recalc = &omap1_watchdog_recalc, 260 .recalc = &omap1_watchdog_recalc,
@@ -344,9 +346,9 @@ static struct arm_idlect1_clk tc_ck = {
344 .name = "tc_ck", 346 .name = "tc_ck",
345 .parent = &ck_dpll1, 347 .parent = &ck_dpll1,
346 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 348 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
347 CLOCK_IN_OMAP730 | RATE_CKCTL | 349 CLOCK_IN_OMAP730 | CLOCK_IN_OMAP310 |
348 RATE_PROPAGATES | ALWAYS_ENABLED | 350 RATE_CKCTL | RATE_PROPAGATES |
349 CLOCK_IDLE_CONTROL, 351 ALWAYS_ENABLED | CLOCK_IDLE_CONTROL,
350 .rate_offset = CKCTL_TCDIV_OFFSET, 352 .rate_offset = CKCTL_TCDIV_OFFSET,
351 .recalc = &omap1_ckctl_recalc, 353 .recalc = &omap1_ckctl_recalc,
352 .enable = &omap1_clk_enable_generic, 354 .enable = &omap1_clk_enable_generic,
@@ -358,7 +360,8 @@ static struct arm_idlect1_clk tc_ck = {
358static struct clk arminth_ck1510 = { 360static struct clk arminth_ck1510 = {
359 .name = "arminth_ck", 361 .name = "arminth_ck",
360 .parent = &tc_ck.clk, 362 .parent = &tc_ck.clk,
361 .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED, 363 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 |
364 ALWAYS_ENABLED,
362 .recalc = &followparent_recalc, 365 .recalc = &followparent_recalc,
363 /* Note: On 1510 the frequency follows TC_CK 366 /* Note: On 1510 the frequency follows TC_CK
364 * 367 *
@@ -372,7 +375,8 @@ static struct clk tipb_ck = {
372 /* No-idle controlled by "tc_ck" */ 375 /* No-idle controlled by "tc_ck" */
373 .name = "tibp_ck", 376 .name = "tibp_ck",
374 .parent = &tc_ck.clk, 377 .parent = &tc_ck.clk,
375 .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED, 378 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 |
379 ALWAYS_ENABLED,
376 .recalc = &followparent_recalc, 380 .recalc = &followparent_recalc,
377 .enable = &omap1_clk_enable_generic, 381 .enable = &omap1_clk_enable_generic,
378 .disable = &omap1_clk_disable_generic, 382 .disable = &omap1_clk_disable_generic,
@@ -417,7 +421,7 @@ static struct clk dma_ck = {
417 .name = "dma_ck", 421 .name = "dma_ck",
418 .parent = &tc_ck.clk, 422 .parent = &tc_ck.clk,
419 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 423 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
420 ALWAYS_ENABLED, 424 CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
421 .recalc = &followparent_recalc, 425 .recalc = &followparent_recalc,
422 .enable = &omap1_clk_enable_generic, 426 .enable = &omap1_clk_enable_generic,
423 .disable = &omap1_clk_disable_generic, 427 .disable = &omap1_clk_disable_generic,
@@ -437,7 +441,7 @@ static struct arm_idlect1_clk api_ck = {
437 .name = "api_ck", 441 .name = "api_ck",
438 .parent = &tc_ck.clk, 442 .parent = &tc_ck.clk,
439 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 443 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
440 CLOCK_IDLE_CONTROL, 444 CLOCK_IN_OMAP310 | CLOCK_IDLE_CONTROL,
441 .enable_reg = (void __iomem *)ARM_IDLECT2, 445 .enable_reg = (void __iomem *)ARM_IDLECT2,
442 .enable_bit = EN_APICK, 446 .enable_bit = EN_APICK,
443 .recalc = &followparent_recalc, 447 .recalc = &followparent_recalc,
@@ -451,7 +455,8 @@ static struct arm_idlect1_clk lb_ck = {
451 .clk = { 455 .clk = {
452 .name = "lb_ck", 456 .name = "lb_ck",
453 .parent = &tc_ck.clk, 457 .parent = &tc_ck.clk,
454 .flags = CLOCK_IN_OMAP1510 | CLOCK_IDLE_CONTROL, 458 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 |
459 CLOCK_IDLE_CONTROL,
455 .enable_reg = (void __iomem *)ARM_IDLECT2, 460 .enable_reg = (void __iomem *)ARM_IDLECT2,
456 .enable_bit = EN_LBCK, 461 .enable_bit = EN_LBCK,
457 .recalc = &followparent_recalc, 462 .recalc = &followparent_recalc,
@@ -495,8 +500,8 @@ static struct arm_idlect1_clk lcd_ck_1510 = {
495 .clk = { 500 .clk = {
496 .name = "lcd_ck", 501 .name = "lcd_ck",
497 .parent = &ck_dpll1, 502 .parent = &ck_dpll1,
498 .flags = CLOCK_IN_OMAP1510 | RATE_CKCTL | 503 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 |
499 CLOCK_IDLE_CONTROL, 504 RATE_CKCTL | CLOCK_IDLE_CONTROL,
500 .enable_reg = (void __iomem *)ARM_IDLECT2, 505 .enable_reg = (void __iomem *)ARM_IDLECT2,
501 .enable_bit = EN_LCDCK, 506 .enable_bit = EN_LCDCK,
502 .rate_offset = CKCTL_LCDDIV_OFFSET, 507 .rate_offset = CKCTL_LCDDIV_OFFSET,
@@ -512,8 +517,9 @@ static struct clk uart1_1510 = {
512 /* Direct from ULPD, no real parent */ 517 /* Direct from ULPD, no real parent */
513 .parent = &armper_ck.clk, 518 .parent = &armper_ck.clk,
514 .rate = 12000000, 519 .rate = 12000000,
515 .flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | 520 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 |
516 ALWAYS_ENABLED | CLOCK_NO_IDLE_PARENT, 521 ENABLE_REG_32BIT | ALWAYS_ENABLED |
522 CLOCK_NO_IDLE_PARENT,
517 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 523 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
518 .enable_bit = 29, /* Chooses between 12MHz and 48MHz */ 524 .enable_bit = 29, /* Chooses between 12MHz and 48MHz */
519 .set_rate = &omap1_set_uart_rate, 525 .set_rate = &omap1_set_uart_rate,
@@ -544,8 +550,8 @@ static struct clk uart2_ck = {
544 .parent = &armper_ck.clk, 550 .parent = &armper_ck.clk,
545 .rate = 12000000, 551 .rate = 12000000,
546 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 552 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
547 ENABLE_REG_32BIT | ALWAYS_ENABLED | 553 CLOCK_IN_OMAP310 | ENABLE_REG_32BIT |
548 CLOCK_NO_IDLE_PARENT, 554 ALWAYS_ENABLED | CLOCK_NO_IDLE_PARENT,
549 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 555 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
550 .enable_bit = 30, /* Chooses between 12MHz and 48MHz */ 556 .enable_bit = 30, /* Chooses between 12MHz and 48MHz */
551 .set_rate = &omap1_set_uart_rate, 557 .set_rate = &omap1_set_uart_rate,
@@ -559,8 +565,9 @@ static struct clk uart3_1510 = {
559 /* Direct from ULPD, no real parent */ 565 /* Direct from ULPD, no real parent */
560 .parent = &armper_ck.clk, 566 .parent = &armper_ck.clk,
561 .rate = 12000000, 567 .rate = 12000000,
562 .flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | 568 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 |
563 ALWAYS_ENABLED | CLOCK_NO_IDLE_PARENT, 569 ENABLE_REG_32BIT | ALWAYS_ENABLED |
570 CLOCK_NO_IDLE_PARENT,
564 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 571 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
565 .enable_bit = 31, /* Chooses between 12MHz and 48MHz */ 572 .enable_bit = 31, /* Chooses between 12MHz and 48MHz */
566 .set_rate = &omap1_set_uart_rate, 573 .set_rate = &omap1_set_uart_rate,
@@ -590,7 +597,7 @@ static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
590 /* Direct from ULPD, no parent */ 597 /* Direct from ULPD, no parent */
591 .rate = 6000000, 598 .rate = 6000000,
592 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 599 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
593 RATE_FIXED | ENABLE_REG_32BIT, 600 CLOCK_IN_OMAP310 | RATE_FIXED | ENABLE_REG_32BIT,
594 .enable_reg = (void __iomem *)ULPD_CLOCK_CTRL, 601 .enable_reg = (void __iomem *)ULPD_CLOCK_CTRL,
595 .enable_bit = USB_MCLK_EN_BIT, 602 .enable_bit = USB_MCLK_EN_BIT,
596 .enable = &omap1_clk_enable_generic, 603 .enable = &omap1_clk_enable_generic,
@@ -601,7 +608,7 @@ static struct clk usb_hhc_ck1510 = {
601 .name = "usb_hhc_ck", 608 .name = "usb_hhc_ck",
602 /* Direct from ULPD, no parent */ 609 /* Direct from ULPD, no parent */
603 .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */ 610 .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
604 .flags = CLOCK_IN_OMAP1510 | 611 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 |
605 RATE_FIXED | ENABLE_REG_32BIT, 612 RATE_FIXED | ENABLE_REG_32BIT,
606 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 613 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
607 .enable_bit = USB_HOST_HHC_UHOST_EN, 614 .enable_bit = USB_HOST_HHC_UHOST_EN,
@@ -637,7 +644,9 @@ static struct clk mclk_1510 = {
637 .name = "mclk", 644 .name = "mclk",
638 /* Direct from ULPD, no parent. May be enabled by ext hardware. */ 645 /* Direct from ULPD, no parent. May be enabled by ext hardware. */
639 .rate = 12000000, 646 .rate = 12000000,
640 .flags = CLOCK_IN_OMAP1510 | RATE_FIXED, 647 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | RATE_FIXED,
648 .enable_reg = (void __iomem *)SOFT_REQ_REG,
649 .enable_bit = 6,
641 .enable = &omap1_clk_enable_generic, 650 .enable = &omap1_clk_enable_generic,
642 .disable = &omap1_clk_disable_generic, 651 .disable = &omap1_clk_disable_generic,
643}; 652};
@@ -659,7 +668,7 @@ static struct clk bclk_1510 = {
659 .name = "bclk", 668 .name = "bclk",
660 /* Direct from ULPD, no parent. May be enabled by ext hardware. */ 669 /* Direct from ULPD, no parent. May be enabled by ext hardware. */
661 .rate = 12000000, 670 .rate = 12000000,
662 .flags = CLOCK_IN_OMAP1510 | RATE_FIXED, 671 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | RATE_FIXED,
663 .enable = &omap1_clk_enable_generic, 672 .enable = &omap1_clk_enable_generic,
664 .disable = &omap1_clk_disable_generic, 673 .disable = &omap1_clk_disable_generic,
665}; 674};
@@ -678,12 +687,14 @@ static struct clk bclk_16xx = {
678}; 687};
679 688
680static struct clk mmc1_ck = { 689static struct clk mmc1_ck = {
681 .name = "mmc1_ck", 690 .name = "mmc_ck",
691 .id = 1,
682 /* Functional clock is direct from ULPD, interface clock is ARMPER */ 692 /* Functional clock is direct from ULPD, interface clock is ARMPER */
683 .parent = &armper_ck.clk, 693 .parent = &armper_ck.clk,
684 .rate = 48000000, 694 .rate = 48000000,
685 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 695 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
686 RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT, 696 CLOCK_IN_OMAP310 | RATE_FIXED | ENABLE_REG_32BIT |
697 CLOCK_NO_IDLE_PARENT,
687 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0, 698 .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
688 .enable_bit = 23, 699 .enable_bit = 23,
689 .enable = &omap1_clk_enable_generic, 700 .enable = &omap1_clk_enable_generic,
@@ -691,7 +702,8 @@ static struct clk mmc1_ck = {
691}; 702};
692 703
693static struct clk mmc2_ck = { 704static struct clk mmc2_ck = {
694 .name = "mmc2_ck", 705 .name = "mmc_ck",
706 .id = 2,
695 /* Functional clock is direct from ULPD, interface clock is ARMPER */ 707 /* Functional clock is direct from ULPD, interface clock is ARMPER */
696 .parent = &armper_ck.clk, 708 .parent = &armper_ck.clk,
697 .rate = 48000000, 709 .rate = 48000000,
@@ -706,7 +718,7 @@ static struct clk mmc2_ck = {
706static struct clk virtual_ck_mpu = { 718static struct clk virtual_ck_mpu = {
707 .name = "mpu", 719 .name = "mpu",
708 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 720 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
709 VIRTUAL_CLOCK | ALWAYS_ENABLED, 721 CLOCK_IN_OMAP310 | VIRTUAL_CLOCK | ALWAYS_ENABLED,
710 .parent = &arm_ck, /* Is smarter alias for */ 722 .parent = &arm_ck, /* Is smarter alias for */
711 .recalc = &followparent_recalc, 723 .recalc = &followparent_recalc,
712 .set_rate = &omap1_select_table_rate, 724 .set_rate = &omap1_select_table_rate,
@@ -715,6 +727,20 @@ static struct clk virtual_ck_mpu = {
715 .disable = &omap1_clk_disable_generic, 727 .disable = &omap1_clk_disable_generic,
716}; 728};
717 729
730/* virtual functional clock domain for I2C. Just for making sure that ARMXOR_CK
731remains active during MPU idle whenever this is enabled */
732static struct clk i2c_fck = {
733 .name = "i2c_fck",
734 .id = 1,
735 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
736 VIRTUAL_CLOCK | CLOCK_NO_IDLE_PARENT |
737 ALWAYS_ENABLED,
738 .parent = &armxor_ck.clk,
739 .recalc = &followparent_recalc,
740 .enable = &omap1_clk_enable_generic,
741 .disable = &omap1_clk_disable_generic,
742};
743
718static struct clk * onchip_clks[] = { 744static struct clk * onchip_clks[] = {
719 /* non-ULPD clocks */ 745 /* non-ULPD clocks */
720 &ck_ref, 746 &ck_ref,
@@ -763,6 +789,7 @@ static struct clk * onchip_clks[] = {
763 &mmc2_ck, 789 &mmc2_ck,
764 /* Virtual clocks */ 790 /* Virtual clocks */
765 &virtual_ck_mpu, 791 &virtual_ck_mpu,
792 &i2c_fck,
766}; 793};
767 794
768#endif 795#endif
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index ecbc47514adc..876c38da14f7 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -99,6 +99,45 @@ static void omap_init_rtc(void)
99static inline void omap_init_rtc(void) {} 99static inline void omap_init_rtc(void) {}
100#endif 100#endif
101 101
102#if defined(CONFIG_OMAP_STI)
103
104#define OMAP1_STI_BASE IO_ADDRESS(0xfffea000)
105#define OMAP1_STI_CHANNEL_BASE (OMAP1_STI_BASE + 0x400)
106
107static struct resource sti_resources[] = {
108 {
109 .start = OMAP1_STI_BASE,
110 .end = OMAP1_STI_BASE + SZ_1K - 1,
111 .flags = IORESOURCE_MEM,
112 },
113 {
114 .start = OMAP1_STI_CHANNEL_BASE,
115 .end = OMAP1_STI_CHANNEL_BASE + SZ_1K - 1,
116 .flags = IORESOURCE_MEM,
117 },
118 {
119 .start = INT_1610_STI,
120 .flags = IORESOURCE_IRQ,
121 }
122};
123
124static struct platform_device sti_device = {
125 .name = "sti",
126 .id = -1,
127 .dev = {
128 .release = omap_nop_release,
129 },
130 .num_resources = ARRAY_SIZE(sti_resources),
131 .resource = sti_resources,
132};
133
134static inline void omap_init_sti(void)
135{
136 platform_device_register(&sti_device);
137}
138#else
139static inline void omap_init_sti(void) {}
140#endif
102 141
103/*-------------------------------------------------------------------------*/ 142/*-------------------------------------------------------------------------*/
104 143
@@ -129,6 +168,7 @@ static int __init omap1_init_devices(void)
129 */ 168 */
130 omap_init_irda(); 169 omap_init_irda();
131 omap_init_rtc(); 170 omap_init_rtc();
171 omap_init_sti();
132 172
133 return 0; 173 return 0;
134} 174}
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 82d556be79c5..be3a2a4ee2b8 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -18,6 +18,7 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/arch/mux.h> 19#include <asm/arch/mux.h>
20#include <asm/arch/tc.h> 20#include <asm/arch/tc.h>
21#include <asm/arch/omapfb.h>
21 22
22extern int omap1_clk_init(void); 23extern int omap1_clk_init(void);
23extern void omap_check_revision(void); 24extern void omap_check_revision(void);
@@ -110,7 +111,7 @@ void __init omap1_map_common_io(void)
110 } 111 }
111#endif 112#endif
112#ifdef CONFIG_ARCH_OMAP15XX 113#ifdef CONFIG_ARCH_OMAP15XX
113 if (cpu_is_omap1510()) { 114 if (cpu_is_omap15xx()) {
114 iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc)); 115 iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
115 } 116 }
116#endif 117#endif
@@ -121,6 +122,7 @@ void __init omap1_map_common_io(void)
121#endif 122#endif
122 123
123 omap_sram_init(); 124 omap_sram_init();
125 omapfb_reserve_mem();
124} 126}
125 127
126/* 128/*
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index ed65a7d2e941..a0431c00fa81 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -60,7 +60,7 @@ struct omap_irq_bank {
60 unsigned long wake_enable; 60 unsigned long wake_enable;
61}; 61};
62 62
63static unsigned int irq_bank_count = 0; 63static unsigned int irq_bank_count;
64static struct omap_irq_bank *irq_banks; 64static struct omap_irq_bank *irq_banks;
65 65
66static inline unsigned int irq_bank_readl(int bank, int offset) 66static inline unsigned int irq_bank_readl(int bank, int offset)
@@ -142,28 +142,28 @@ static void omap_irq_set_cfg(int irq, int fiq, int priority, int trigger)
142 142
143#ifdef CONFIG_ARCH_OMAP730 143#ifdef CONFIG_ARCH_OMAP730
144static struct omap_irq_bank omap730_irq_banks[] = { 144static struct omap_irq_bank omap730_irq_banks[] = {
145 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3f8e22f }, 145 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3f8e22f },
146 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0xfdb9c1f2 }, 146 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0xfdb9c1f2 },
147 { .base_reg = OMAP_IH2_BASE + 0x100, .trigger_map = 0x800040f3 }, 147 { .base_reg = OMAP_IH2_BASE + 0x100, .trigger_map = 0x800040f3 },
148}; 148};
149#endif 149#endif
150 150
151#ifdef CONFIG_ARCH_OMAP15XX 151#ifdef CONFIG_ARCH_OMAP15XX
152static struct omap_irq_bank omap1510_irq_banks[] = { 152static struct omap_irq_bank omap1510_irq_banks[] = {
153 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3febfff }, 153 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3febfff },
154 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0xffbfffed }, 154 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0xffbfffed },
155}; 155};
156static struct omap_irq_bank omap310_irq_banks[] = { 156static struct omap_irq_bank omap310_irq_banks[] = {
157 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3faefc3 }, 157 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3faefc3 },
158 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0x65b3c061 }, 158 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0x65b3c061 },
159}; 159};
160#endif 160#endif
161 161
162#if defined(CONFIG_ARCH_OMAP16XX) 162#if defined(CONFIG_ARCH_OMAP16XX)
163 163
164static struct omap_irq_bank omap1610_irq_banks[] = { 164static struct omap_irq_bank omap1610_irq_banks[] = {
165 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3fefe8f }, 165 { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3fefe8f },
166 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0xfdb7c1fd }, 166 { .base_reg = OMAP_IH2_BASE, .trigger_map = 0xfdb7c1fd },
167 { .base_reg = OMAP_IH2_BASE + 0x100, .trigger_map = 0xffffb7ff }, 167 { .base_reg = OMAP_IH2_BASE + 0x100, .trigger_map = 0xffffb7ff },
168 { .base_reg = OMAP_IH2_BASE + 0x200, .trigger_map = 0xffffffff }, 168 { .base_reg = OMAP_IH2_BASE + 0x200, .trigger_map = 0xffffffff },
169}; 169};
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
index d4b8d624e742..10fe0b3efcac 100644
--- a/arch/arm/mach-omap1/mux.c
+++ b/arch/arm/mach-omap1/mux.c
@@ -35,16 +35,20 @@
35 35
36#ifdef CONFIG_ARCH_OMAP730 36#ifdef CONFIG_ARCH_OMAP730
37struct pin_config __initdata_or_module omap730_pins[] = { 37struct pin_config __initdata_or_module omap730_pins[] = {
38MUX_CFG_730("E2_730_KBR0", 12, 21, 0, 0, 20, 1, NA, 0, 0) 38MUX_CFG_730("E2_730_KBR0", 12, 21, 0, 20, 1, 0)
39MUX_CFG_730("J7_730_KBR1", 12, 25, 0, 0, 24, 1, NA, 0, 0) 39MUX_CFG_730("J7_730_KBR1", 12, 25, 0, 24, 1, 0)
40MUX_CFG_730("E1_730_KBR2", 12, 29, 0, 0, 28, 1, NA, 0, 0) 40MUX_CFG_730("E1_730_KBR2", 12, 29, 0, 28, 1, 0)
41MUX_CFG_730("F3_730_KBR3", 13, 1, 0, 0, 0, 1, NA, 0, 0) 41MUX_CFG_730("F3_730_KBR3", 13, 1, 0, 0, 1, 0)
42MUX_CFG_730("D2_730_KBR4", 13, 5, 0, 0, 4, 1, NA, 0, 0) 42MUX_CFG_730("D2_730_KBR4", 13, 5, 0, 4, 1, 0)
43MUX_CFG_730("C2_730_KBC0", 13, 9, 0, 0, 8, 1, NA, 0, 0) 43MUX_CFG_730("C2_730_KBC0", 13, 9, 0, 8, 1, 0)
44MUX_CFG_730("D3_730_KBC1", 13, 13, 0, 0, 12, 1, NA, 0, 0) 44MUX_CFG_730("D3_730_KBC1", 13, 13, 0, 12, 1, 0)
45MUX_CFG_730("E4_730_KBC2", 13, 17, 0, 0, 16, 1, NA, 0, 0) 45MUX_CFG_730("E4_730_KBC2", 13, 17, 0, 16, 1, 0)
46MUX_CFG_730("F4_730_KBC3", 13, 21, 0, 0, 20, 1, NA, 0, 0) 46MUX_CFG_730("F4_730_KBC3", 13, 21, 0, 20, 1, 0)
47MUX_CFG_730("E3_730_KBC4", 13, 25, 0, 0, 24, 1, NA, 0, 0) 47MUX_CFG_730("E3_730_KBC4", 13, 25, 0, 24, 1, 0)
48
49MUX_CFG_730("AA17_730_USB_DM", 2, 21, 0, 20, 0, 0)
50MUX_CFG_730("W16_730_USB_PU_EN", 2, 25, 0, 24, 0, 0)
51MUX_CFG_730("W17_730_USB_VBUSI", 2, 29, 0, 28, 0, 0)
48}; 52};
49#endif 53#endif
50 54
@@ -73,8 +77,8 @@ MUX_CFG("UART3_BCLK", A, 0, 0, 2, 6, 0, NA, 0, 0)
73MUX_CFG("Y15_1610_UART3_RTS", A, 0, 1, 2, 6, 0, NA, 0, 0) 77MUX_CFG("Y15_1610_UART3_RTS", A, 0, 1, 2, 6, 0, NA, 0, 0)
74 78
75/* PWT & PWL, conflicts with UART3 */ 79/* PWT & PWL, conflicts with UART3 */
76MUX_CFG("PWT", 6, 0, 2, 0, 30, 0, NA, 0, 0) 80MUX_CFG("PWT", 6, 0, 2, 0, 30, 0, NA, 0, 0)
77MUX_CFG("PWL", 6, 3, 1, 0, 31, 1, NA, 0, 0) 81MUX_CFG("PWL", 6, 3, 1, 0, 31, 1, NA, 0, 0)
78 82
79/* USB internal master generic */ 83/* USB internal master generic */
80MUX_CFG("R18_USB_VBUS", 7, 9, 2, 1, 11, 0, NA, 0, 1) 84MUX_CFG("R18_USB_VBUS", 7, 9, 2, 1, 11, 0, NA, 0, 1)
@@ -151,7 +155,7 @@ MUX_CFG("MCBSP3_CLKX", 9, 3, 1, 1, 29, 0, NA, 0, 1)
151 155
152/* Misc ballouts */ 156/* Misc ballouts */
153MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1) 157MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1)
154MUX_CFG("N20_HDQ", 6, 18, 1, 1, 4, 0, 1, 4, 0) 158MUX_CFG("N20_HDQ", 6, 18, 1, 1, 4, 0, 1, 4, 0)
155 159
156/* OMAP-1610 MMC2 */ 160/* OMAP-1610 MMC2 */
157MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1) 161MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1)
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
new file mode 100644
index 000000000000..ddf6b07dc9c7
--- /dev/null
+++ b/arch/arm/mach-omap1/pm.c
@@ -0,0 +1,770 @@
1/*
2 * linux/arch/arm/mach-omap1/pm.c
3 *
4 * OMAP Power Management Routines
5 *
6 * Original code for the SA11x0:
7 * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
8 *
9 * Modified for the PXA250 by Nicolas Pitre:
10 * Copyright (c) 2002 Monta Vista Software, Inc.
11 *
12 * Modified for the OMAP1510 by David Singleton:
13 * Copyright (c) 2002 Monta Vista Software, Inc.
14 *
15 * Cleanup 2004 for OMAP1510/1610 by Dirk Behme <dirk.behme@de.bosch.com>
16 *
17 * This program is free software; you can redistribute it and/or modify it
18 * under the terms of the GNU General Public License as published by the
19 * Free Software Foundation; either version 2 of the License, or (at your
20 * option) any later version.
21 *
22 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * You should have received a copy of the GNU General Public License along
34 * with this program; if not, write to the Free Software Foundation, Inc.,
35 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 */
37
38#include <linux/pm.h>
39#include <linux/sched.h>
40#include <linux/proc_fs.h>
41#include <linux/pm.h>
42#include <linux/interrupt.h>
43#include <linux/sysfs.h>
44#include <linux/module.h>
45
46#include <asm/io.h>
47#include <asm/irq.h>
48#include <asm/atomic.h>
49#include <asm/mach/time.h>
50#include <asm/mach/irq.h>
51#include <asm/mach-types.h>
52
53#include <asm/arch/irqs.h>
54#include <asm/arch/clock.h>
55#include <asm/arch/sram.h>
56#include <asm/arch/tc.h>
57#include <asm/arch/pm.h>
58#include <asm/arch/mux.h>
59#include <asm/arch/tps65010.h>
60#include <asm/arch/dma.h>
61#include <asm/arch/dsp_common.h>
62#include <asm/arch/dmtimer.h>
63
64static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE];
65static unsigned short dsp_sleep_save[DSP_SLEEP_SAVE_SIZE];
66static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE];
67static unsigned int mpui730_sleep_save[MPUI730_SLEEP_SAVE_SIZE];
68static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE];
69static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
70
71static unsigned short enable_dyn_sleep = 1;
72
73static ssize_t omap_pm_sleep_while_idle_show(struct subsystem * subsys, char *buf)
74{
75 return sprintf(buf, "%hu\n", enable_dyn_sleep);
76}
77
78static ssize_t omap_pm_sleep_while_idle_store(struct subsystem * subsys,
79 const char * buf,
80 size_t n)
81{
82 unsigned short value;
83 if (sscanf(buf, "%hu", &value) != 1 ||
84 (value != 0 && value != 1)) {
85 printk(KERN_ERR "idle_sleep_store: Invalid value\n");
86 return -EINVAL;
87 }
88 enable_dyn_sleep = value;
89 return n;
90}
91
92static struct subsys_attribute sleep_while_idle_attr = {
93 .attr = {
94 .name = __stringify(sleep_while_idle),
95 .mode = 0644,
96 },
97 .show = omap_pm_sleep_while_idle_show,
98 .store = omap_pm_sleep_while_idle_store,
99};
100
101extern struct subsystem power_subsys;
102static void (*omap_sram_idle)(void) = NULL;
103static void (*omap_sram_suspend)(unsigned long r0, unsigned long r1) = NULL;
104
105/*
106 * Let's power down on idle, but only if we are really
107 * idle, because once we start down the path of
108 * going idle we continue to do idle even if we get
109 * a clock tick interrupt . .
110 */
111void omap_pm_idle(void)
112{
113 extern __u32 arm_idlect1_mask;
114 __u32 use_idlect1 = arm_idlect1_mask;
115#ifndef CONFIG_OMAP_MPU_TIMER
116 int do_sleep;
117#endif
118
119 local_irq_disable();
120 local_fiq_disable();
121 if (need_resched()) {
122 local_fiq_enable();
123 local_irq_enable();
124 return;
125 }
126
127 /*
128 * Since an interrupt may set up a timer, we don't want to
129 * reprogram the hardware timer with interrupts enabled.
130 * Re-enable interrupts only after returning from idle.
131 */
132 timer_dyn_reprogram();
133
134#ifdef CONFIG_OMAP_MPU_TIMER
135#warning Enable 32kHz OS timer in order to allow sleep states in idle
136 use_idlect1 = use_idlect1 & ~(1 << 9);
137#else
138
139 do_sleep = 0;
140 while (enable_dyn_sleep) {
141
142#ifdef CONFIG_CBUS_TAHVO_USB
143 extern int vbus_active;
144 /* Clock requirements? */
145 if (vbus_active)
146 break;
147#endif
148 do_sleep = 1;
149 break;
150 }
151
152#ifdef CONFIG_OMAP_DM_TIMER
153 use_idlect1 = omap_dm_timer_modify_idlect_mask(use_idlect1);
154#endif
155
156 if (omap_dma_running()) {
157 use_idlect1 &= ~(1 << 6);
158 if (omap_lcd_dma_ext_running())
159 use_idlect1 &= ~(1 << 12);
160 }
161
162 /* We should be able to remove the do_sleep variable and multiple
163 * tests above as soon as drivers, timer and DMA code have been fixed.
164 * Even the sleep block count should become obsolete. */
165 if ((use_idlect1 != ~0) || !do_sleep) {
166
167 __u32 saved_idlect1 = omap_readl(ARM_IDLECT1);
168 if (cpu_is_omap15xx())
169 use_idlect1 &= OMAP1510_BIG_SLEEP_REQUEST;
170 else
171 use_idlect1 &= OMAP1610_IDLECT1_SLEEP_VAL;
172 omap_writel(use_idlect1, ARM_IDLECT1);
173 __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4");
174 omap_writel(saved_idlect1, ARM_IDLECT1);
175
176 local_fiq_enable();
177 local_irq_enable();
178 return;
179 }
180 omap_sram_suspend(omap_readl(ARM_IDLECT1),
181 omap_readl(ARM_IDLECT2));
182#endif
183
184 local_fiq_enable();
185 local_irq_enable();
186}
187
188/*
189 * Configuration of the wakeup event is board specific. For the
190 * moment we put it into this helper function. Later it may move
191 * to board specific files.
192 */
193static void omap_pm_wakeup_setup(void)
194{
195 u32 level1_wake = 0;
196 u32 level2_wake = OMAP_IRQ_BIT(INT_UART2);
197
198 /*
199 * Turn off all interrupts except GPIO bank 1, L1-2nd level cascade,
200 * and the L2 wakeup interrupts: keypad and UART2. Note that the
201 * drivers must still separately call omap_set_gpio_wakeup() to
202 * wake up to a GPIO interrupt.
203 */
204 if (cpu_is_omap730())
205 level1_wake = OMAP_IRQ_BIT(INT_730_GPIO_BANK1) |
206 OMAP_IRQ_BIT(INT_730_IH2_IRQ);
207 else if (cpu_is_omap15xx())
208 level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
209 OMAP_IRQ_BIT(INT_1510_IH2_IRQ);
210 else if (cpu_is_omap16xx())
211 level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
212 OMAP_IRQ_BIT(INT_1610_IH2_IRQ);
213
214 omap_writel(~level1_wake, OMAP_IH1_MIR);
215
216 if (cpu_is_omap730()) {
217 omap_writel(~level2_wake, OMAP_IH2_0_MIR);
218 omap_writel(~(OMAP_IRQ_BIT(INT_730_WAKE_UP_REQ) |
219 OMAP_IRQ_BIT(INT_730_MPUIO_KEYPAD)),
220 OMAP_IH2_1_MIR);
221 } else if (cpu_is_omap15xx()) {
222 level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
223 omap_writel(~level2_wake, OMAP_IH2_MIR);
224 } else if (cpu_is_omap16xx()) {
225 level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
226 omap_writel(~level2_wake, OMAP_IH2_0_MIR);
227
228 /* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
229 omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ),
230 OMAP_IH2_1_MIR);
231 omap_writel(~0x0, OMAP_IH2_2_MIR);
232 omap_writel(~0x0, OMAP_IH2_3_MIR);
233 }
234
235 /* New IRQ agreement, recalculate in cascade order */
236 omap_writel(1, OMAP_IH2_CONTROL);
237 omap_writel(1, OMAP_IH1_CONTROL);
238}
239
240#define EN_DSPCK 13 /* ARM_CKCTL */
241#define EN_APICK 6 /* ARM_IDLECT2 */
242#define DSP_EN 1 /* ARM_RSTCT1 */
243
244void omap_pm_suspend(void)
245{
246 unsigned long arg0 = 0, arg1 = 0;
247
248 printk("PM: OMAP%x is trying to enter deep sleep...\n", system_rev);
249
250 omap_serial_wake_trigger(1);
251
252 if (machine_is_omap_osk()) {
253 /* Stop LED1 (D9) blink */
254 tps65010_set_led(LED1, OFF);
255 }
256
257 omap_writew(0xffff, ULPD_SOFT_DISABLE_REQ_REG);
258
259 /*
260 * Step 1: turn off interrupts (FIXME: NOTE: already disabled)
261 */
262
263 local_irq_disable();
264 local_fiq_disable();
265
266 /*
267 * Step 2: save registers
268 *
269 * The omap is a strange/beautiful device. The caches, memory
270 * and register state are preserved across power saves.
271 * We have to save and restore very little register state to
272 * idle the omap.
273 *
274 * Save interrupt, MPUI, ARM and UPLD control registers.
275 */
276
277 if (cpu_is_omap730()) {
278 MPUI730_SAVE(OMAP_IH1_MIR);
279 MPUI730_SAVE(OMAP_IH2_0_MIR);
280 MPUI730_SAVE(OMAP_IH2_1_MIR);
281 MPUI730_SAVE(MPUI_CTRL);
282 MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
283 MPUI730_SAVE(MPUI_DSP_API_CONFIG);
284 MPUI730_SAVE(EMIFS_CONFIG);
285 MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
286
287 } else if (cpu_is_omap15xx()) {
288 MPUI1510_SAVE(OMAP_IH1_MIR);
289 MPUI1510_SAVE(OMAP_IH2_MIR);
290 MPUI1510_SAVE(MPUI_CTRL);
291 MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG);
292 MPUI1510_SAVE(MPUI_DSP_API_CONFIG);
293 MPUI1510_SAVE(EMIFS_CONFIG);
294 MPUI1510_SAVE(EMIFF_SDRAM_CONFIG);
295 } else if (cpu_is_omap16xx()) {
296 MPUI1610_SAVE(OMAP_IH1_MIR);
297 MPUI1610_SAVE(OMAP_IH2_0_MIR);
298 MPUI1610_SAVE(OMAP_IH2_1_MIR);
299 MPUI1610_SAVE(OMAP_IH2_2_MIR);
300 MPUI1610_SAVE(OMAP_IH2_3_MIR);
301 MPUI1610_SAVE(MPUI_CTRL);
302 MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG);
303 MPUI1610_SAVE(MPUI_DSP_API_CONFIG);
304 MPUI1610_SAVE(EMIFS_CONFIG);
305 MPUI1610_SAVE(EMIFF_SDRAM_CONFIG);
306 }
307
308 ARM_SAVE(ARM_CKCTL);
309 ARM_SAVE(ARM_IDLECT1);
310 ARM_SAVE(ARM_IDLECT2);
311 if (!(cpu_is_omap15xx()))
312 ARM_SAVE(ARM_IDLECT3);
313 ARM_SAVE(ARM_EWUPCT);
314 ARM_SAVE(ARM_RSTCT1);
315 ARM_SAVE(ARM_RSTCT2);
316 ARM_SAVE(ARM_SYSST);
317 ULPD_SAVE(ULPD_CLOCK_CTRL);
318 ULPD_SAVE(ULPD_STATUS_REQ);
319
320 /* (Step 3 removed - we now allow deep sleep by default) */
321
322 /*
323 * Step 4: OMAP DSP Shutdown
324 */
325
326 /* stop DSP */
327 omap_writew(omap_readw(ARM_RSTCT1) & ~(1 << DSP_EN), ARM_RSTCT1);
328
329 /* shut down dsp_ck */
330 omap_writew(omap_readw(ARM_CKCTL) & ~(1 << EN_DSPCK), ARM_CKCTL);
331
332 /* temporarily enabling api_ck to access DSP registers */
333 omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2);
334
335 /* save DSP registers */
336 DSP_SAVE(DSP_IDLECT2);
337
338 /* Stop all DSP domain clocks */
339 __raw_writew(0, DSP_IDLECT2);
340
341 /*
342 * Step 5: Wakeup Event Setup
343 */
344
345 omap_pm_wakeup_setup();
346
347 /*
348 * Step 6: ARM and Traffic controller shutdown
349 */
350
351 /* disable ARM watchdog */
352 omap_writel(0x00F5, OMAP_WDT_TIMER_MODE);
353 omap_writel(0x00A0, OMAP_WDT_TIMER_MODE);
354
355 /*
356 * Step 6b: ARM and Traffic controller shutdown
357 *
358 * Step 6 continues here. Prepare jump to power management
359 * assembly code in internal SRAM.
360 *
361 * Since the omap_cpu_suspend routine has been copied to
362 * SRAM, we'll do an indirect procedure call to it and pass the
363 * contents of arm_idlect1 and arm_idlect2 so it can restore
364 * them when it wakes up and it will return.
365 */
366
367 arg0 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT1];
368 arg1 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT2];
369
370 /*
371 * Step 6c: ARM and Traffic controller shutdown
372 *
373 * Jump to assembly code. The processor will stay there
374 * until wake up.
375 */
376 omap_sram_suspend(arg0, arg1);
377
378 /*
379 * If we are here, processor is woken up!
380 */
381
382 /*
383 * Restore DSP clocks
384 */
385
386 /* again temporarily enabling api_ck to access DSP registers */
387 omap_writew(omap_readw(ARM_IDLECT2) | 1 << EN_APICK, ARM_IDLECT2);
388
389 /* Restore DSP domain clocks */
390 DSP_RESTORE(DSP_IDLECT2);
391
392 /*
393 * Restore ARM state, except ARM_IDLECT1/2 which omap_cpu_suspend did
394 */
395
396 if (!(cpu_is_omap15xx()))
397 ARM_RESTORE(ARM_IDLECT3);
398 ARM_RESTORE(ARM_CKCTL);
399 ARM_RESTORE(ARM_EWUPCT);
400 ARM_RESTORE(ARM_RSTCT1);
401 ARM_RESTORE(ARM_RSTCT2);
402 ARM_RESTORE(ARM_SYSST);
403 ULPD_RESTORE(ULPD_CLOCK_CTRL);
404 ULPD_RESTORE(ULPD_STATUS_REQ);
405
406 if (cpu_is_omap730()) {
407 MPUI730_RESTORE(EMIFS_CONFIG);
408 MPUI730_RESTORE(EMIFF_SDRAM_CONFIG);
409 MPUI730_RESTORE(OMAP_IH1_MIR);
410 MPUI730_RESTORE(OMAP_IH2_0_MIR);
411 MPUI730_RESTORE(OMAP_IH2_1_MIR);
412 } else if (cpu_is_omap15xx()) {
413 MPUI1510_RESTORE(MPUI_CTRL);
414 MPUI1510_RESTORE(MPUI_DSP_BOOT_CONFIG);
415 MPUI1510_RESTORE(MPUI_DSP_API_CONFIG);
416 MPUI1510_RESTORE(EMIFS_CONFIG);
417 MPUI1510_RESTORE(EMIFF_SDRAM_CONFIG);
418 MPUI1510_RESTORE(OMAP_IH1_MIR);
419 MPUI1510_RESTORE(OMAP_IH2_MIR);
420 } else if (cpu_is_omap16xx()) {
421 MPUI1610_RESTORE(MPUI_CTRL);
422 MPUI1610_RESTORE(MPUI_DSP_BOOT_CONFIG);
423 MPUI1610_RESTORE(MPUI_DSP_API_CONFIG);
424 MPUI1610_RESTORE(EMIFS_CONFIG);
425 MPUI1610_RESTORE(EMIFF_SDRAM_CONFIG);
426
427 MPUI1610_RESTORE(OMAP_IH1_MIR);
428 MPUI1610_RESTORE(OMAP_IH2_0_MIR);
429 MPUI1610_RESTORE(OMAP_IH2_1_MIR);
430 MPUI1610_RESTORE(OMAP_IH2_2_MIR);
431 MPUI1610_RESTORE(OMAP_IH2_3_MIR);
432 }
433
434 omap_writew(0, ULPD_SOFT_DISABLE_REQ_REG);
435
436 /*
437 * Reenable interrupts
438 */
439
440 local_irq_enable();
441 local_fiq_enable();
442
443 omap_serial_wake_trigger(0);
444
445 printk("PM: OMAP%x is re-starting from deep sleep...\n", system_rev);
446
447 if (machine_is_omap_osk()) {
448 /* Let LED1 (D9) blink again */
449 tps65010_set_led(LED1, BLINK);
450 }
451}
452
453#if defined(DEBUG) && defined(CONFIG_PROC_FS)
454static int g_read_completed;
455
456/*
457 * Read system PM registers for debugging
458 */
459static int omap_pm_read_proc(
460 char *page_buffer,
461 char **my_first_byte,
462 off_t virtual_start,
463 int length,
464 int *eof,
465 void *data)
466{
467 int my_buffer_offset = 0;
468 char * const my_base = page_buffer;
469
470 ARM_SAVE(ARM_CKCTL);
471 ARM_SAVE(ARM_IDLECT1);
472 ARM_SAVE(ARM_IDLECT2);
473 if (!(cpu_is_omap15xx()))
474 ARM_SAVE(ARM_IDLECT3);
475 ARM_SAVE(ARM_EWUPCT);
476 ARM_SAVE(ARM_RSTCT1);
477 ARM_SAVE(ARM_RSTCT2);
478 ARM_SAVE(ARM_SYSST);
479
480 ULPD_SAVE(ULPD_IT_STATUS);
481 ULPD_SAVE(ULPD_CLOCK_CTRL);
482 ULPD_SAVE(ULPD_SOFT_REQ);
483 ULPD_SAVE(ULPD_STATUS_REQ);
484 ULPD_SAVE(ULPD_DPLL_CTRL);
485 ULPD_SAVE(ULPD_POWER_CTRL);
486
487 if (cpu_is_omap730()) {
488 MPUI730_SAVE(MPUI_CTRL);
489 MPUI730_SAVE(MPUI_DSP_STATUS);
490 MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
491 MPUI730_SAVE(MPUI_DSP_API_CONFIG);
492 MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
493 MPUI730_SAVE(EMIFS_CONFIG);
494 } else if (cpu_is_omap15xx()) {
495 MPUI1510_SAVE(MPUI_CTRL);
496 MPUI1510_SAVE(MPUI_DSP_STATUS);
497 MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG);
498 MPUI1510_SAVE(MPUI_DSP_API_CONFIG);
499 MPUI1510_SAVE(EMIFF_SDRAM_CONFIG);
500 MPUI1510_SAVE(EMIFS_CONFIG);
501 } else if (cpu_is_omap16xx()) {
502 MPUI1610_SAVE(MPUI_CTRL);
503 MPUI1610_SAVE(MPUI_DSP_STATUS);
504 MPUI1610_SAVE(MPUI_DSP_BOOT_CONFIG);
505 MPUI1610_SAVE(MPUI_DSP_API_CONFIG);
506 MPUI1610_SAVE(EMIFF_SDRAM_CONFIG);
507 MPUI1610_SAVE(EMIFS_CONFIG);
508 }
509
510 if (virtual_start == 0) {
511 g_read_completed = 0;
512
513 my_buffer_offset += sprintf(my_base + my_buffer_offset,
514 "ARM_CKCTL_REG: 0x%-8x \n"
515 "ARM_IDLECT1_REG: 0x%-8x \n"
516 "ARM_IDLECT2_REG: 0x%-8x \n"
517 "ARM_IDLECT3_REG: 0x%-8x \n"
518 "ARM_EWUPCT_REG: 0x%-8x \n"
519 "ARM_RSTCT1_REG: 0x%-8x \n"
520 "ARM_RSTCT2_REG: 0x%-8x \n"
521 "ARM_SYSST_REG: 0x%-8x \n"
522 "ULPD_IT_STATUS_REG: 0x%-4x \n"
523 "ULPD_CLOCK_CTRL_REG: 0x%-4x \n"
524 "ULPD_SOFT_REQ_REG: 0x%-4x \n"
525 "ULPD_DPLL_CTRL_REG: 0x%-4x \n"
526 "ULPD_STATUS_REQ_REG: 0x%-4x \n"
527 "ULPD_POWER_CTRL_REG: 0x%-4x \n",
528 ARM_SHOW(ARM_CKCTL),
529 ARM_SHOW(ARM_IDLECT1),
530 ARM_SHOW(ARM_IDLECT2),
531 ARM_SHOW(ARM_IDLECT3),
532 ARM_SHOW(ARM_EWUPCT),
533 ARM_SHOW(ARM_RSTCT1),
534 ARM_SHOW(ARM_RSTCT2),
535 ARM_SHOW(ARM_SYSST),
536 ULPD_SHOW(ULPD_IT_STATUS),
537 ULPD_SHOW(ULPD_CLOCK_CTRL),
538 ULPD_SHOW(ULPD_SOFT_REQ),
539 ULPD_SHOW(ULPD_DPLL_CTRL),
540 ULPD_SHOW(ULPD_STATUS_REQ),
541 ULPD_SHOW(ULPD_POWER_CTRL));
542
543 if (cpu_is_omap730()) {
544 my_buffer_offset += sprintf(my_base + my_buffer_offset,
545 "MPUI730_CTRL_REG 0x%-8x \n"
546 "MPUI730_DSP_STATUS_REG: 0x%-8x \n"
547 "MPUI730_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
548 "MPUI730_DSP_API_CONFIG_REG: 0x%-8x \n"
549 "MPUI730_SDRAM_CONFIG_REG: 0x%-8x \n"
550 "MPUI730_EMIFS_CONFIG_REG: 0x%-8x \n",
551 MPUI730_SHOW(MPUI_CTRL),
552 MPUI730_SHOW(MPUI_DSP_STATUS),
553 MPUI730_SHOW(MPUI_DSP_BOOT_CONFIG),
554 MPUI730_SHOW(MPUI_DSP_API_CONFIG),
555 MPUI730_SHOW(EMIFF_SDRAM_CONFIG),
556 MPUI730_SHOW(EMIFS_CONFIG));
557 } else if (cpu_is_omap15xx()) {
558 my_buffer_offset += sprintf(my_base + my_buffer_offset,
559 "MPUI1510_CTRL_REG 0x%-8x \n"
560 "MPUI1510_DSP_STATUS_REG: 0x%-8x \n"
561 "MPUI1510_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
562 "MPUI1510_DSP_API_CONFIG_REG: 0x%-8x \n"
563 "MPUI1510_SDRAM_CONFIG_REG: 0x%-8x \n"
564 "MPUI1510_EMIFS_CONFIG_REG: 0x%-8x \n",
565 MPUI1510_SHOW(MPUI_CTRL),
566 MPUI1510_SHOW(MPUI_DSP_STATUS),
567 MPUI1510_SHOW(MPUI_DSP_BOOT_CONFIG),
568 MPUI1510_SHOW(MPUI_DSP_API_CONFIG),
569 MPUI1510_SHOW(EMIFF_SDRAM_CONFIG),
570 MPUI1510_SHOW(EMIFS_CONFIG));
571 } else if (cpu_is_omap16xx()) {
572 my_buffer_offset += sprintf(my_base + my_buffer_offset,
573 "MPUI1610_CTRL_REG 0x%-8x \n"
574 "MPUI1610_DSP_STATUS_REG: 0x%-8x \n"
575 "MPUI1610_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
576 "MPUI1610_DSP_API_CONFIG_REG: 0x%-8x \n"
577 "MPUI1610_SDRAM_CONFIG_REG: 0x%-8x \n"
578 "MPUI1610_EMIFS_CONFIG_REG: 0x%-8x \n",
579 MPUI1610_SHOW(MPUI_CTRL),
580 MPUI1610_SHOW(MPUI_DSP_STATUS),
581 MPUI1610_SHOW(MPUI_DSP_BOOT_CONFIG),
582 MPUI1610_SHOW(MPUI_DSP_API_CONFIG),
583 MPUI1610_SHOW(EMIFF_SDRAM_CONFIG),
584 MPUI1610_SHOW(EMIFS_CONFIG));
585 }
586
587 g_read_completed++;
588 } else if (g_read_completed >= 1) {
589 *eof = 1;
590 return 0;
591 }
592 g_read_completed++;
593
594 *my_first_byte = page_buffer;
595 return my_buffer_offset;
596}
597
598static void omap_pm_init_proc(void)
599{
600 struct proc_dir_entry *entry;
601
602 entry = create_proc_read_entry("driver/omap_pm",
603 S_IWUSR | S_IRUGO, NULL,
604 omap_pm_read_proc, NULL);
605}
606
607#endif /* DEBUG && CONFIG_PROC_FS */
608
609static void (*saved_idle)(void) = NULL;
610
611/*
612 * omap_pm_prepare - Do preliminary suspend work.
613 * @state: suspend state we're entering.
614 *
615 */
616static int omap_pm_prepare(suspend_state_t state)
617{
618 int error = 0;
619
620 /* We cannot sleep in idle until we have resumed */
621 saved_idle = pm_idle;
622 pm_idle = NULL;
623
624 switch (state)
625 {
626 case PM_SUSPEND_STANDBY:
627 case PM_SUSPEND_MEM:
628 break;
629
630 case PM_SUSPEND_DISK:
631 return -ENOTSUPP;
632
633 default:
634 return -EINVAL;
635 }
636
637 return error;
638}
639
640
641/*
642 * omap_pm_enter - Actually enter a sleep state.
643 * @state: State we're entering.
644 *
645 */
646
647static int omap_pm_enter(suspend_state_t state)
648{
649 switch (state)
650 {
651 case PM_SUSPEND_STANDBY:
652 case PM_SUSPEND_MEM:
653 omap_pm_suspend();
654 break;
655
656 case PM_SUSPEND_DISK:
657 return -ENOTSUPP;
658
659 default:
660 return -EINVAL;
661 }
662
663 return 0;
664}
665
666
667/**
668 * omap_pm_finish - Finish up suspend sequence.
669 * @state: State we're coming out of.
670 *
671 * This is called after we wake back up (or if entering the sleep state
672 * failed).
673 */
674
675static int omap_pm_finish(suspend_state_t state)
676{
677 pm_idle = saved_idle;
678 return 0;
679}
680
681
682static irqreturn_t omap_wakeup_interrupt(int irq, void * dev,
683 struct pt_regs * regs)
684{
685 return IRQ_HANDLED;
686}
687
688static struct irqaction omap_wakeup_irq = {
689 .name = "peripheral wakeup",
690 .flags = SA_INTERRUPT,
691 .handler = omap_wakeup_interrupt
692};
693
694
695
696static struct pm_ops omap_pm_ops ={
697 .pm_disk_mode = 0,
698 .prepare = omap_pm_prepare,
699 .enter = omap_pm_enter,
700 .finish = omap_pm_finish,
701};
702
703static int __init omap_pm_init(void)
704{
705 printk("Power Management for TI OMAP.\n");
706
707 /*
708 * We copy the assembler sleep/wakeup routines to SRAM.
709 * These routines need to be in SRAM as that's the only
710 * memory the MPU can see when it wakes up.
711 */
712 if (cpu_is_omap730()) {
713 omap_sram_idle = omap_sram_push(omap730_idle_loop_suspend,
714 omap730_idle_loop_suspend_sz);
715 omap_sram_suspend = omap_sram_push(omap730_cpu_suspend,
716 omap730_cpu_suspend_sz);
717 } else if (cpu_is_omap15xx()) {
718 omap_sram_idle = omap_sram_push(omap1510_idle_loop_suspend,
719 omap1510_idle_loop_suspend_sz);
720 omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend,
721 omap1510_cpu_suspend_sz);
722 } else if (cpu_is_omap16xx()) {
723 omap_sram_idle = omap_sram_push(omap1610_idle_loop_suspend,
724 omap1610_idle_loop_suspend_sz);
725 omap_sram_suspend = omap_sram_push(omap1610_cpu_suspend,
726 omap1610_cpu_suspend_sz);
727 }
728
729 if (omap_sram_idle == NULL || omap_sram_suspend == NULL) {
730 printk(KERN_ERR "PM not initialized: Missing SRAM support\n");
731 return -ENODEV;
732 }
733
734 pm_idle = omap_pm_idle;
735
736 if (cpu_is_omap730())
737 setup_irq(INT_730_WAKE_UP_REQ, &omap_wakeup_irq);
738 else if (cpu_is_omap16xx())
739 setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
740
741 /* Program new power ramp-up time
742 * (0 for most boards since we don't lower voltage when in deep sleep)
743 */
744 omap_writew(ULPD_SETUP_ANALOG_CELL_3_VAL, ULPD_SETUP_ANALOG_CELL_3);
745
746 /* Setup ULPD POWER_CTRL_REG - enter deep sleep whenever possible */
747 omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL);
748
749 /* Configure IDLECT3 */
750 if (cpu_is_omap730())
751 omap_writel(OMAP730_IDLECT3_VAL, OMAP730_IDLECT3);
752 else if (cpu_is_omap16xx())
753 omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
754
755 pm_set_ops(&omap_pm_ops);
756
757#if defined(DEBUG) && defined(CONFIG_PROC_FS)
758 omap_pm_init_proc();
759#endif
760
761 subsys_create_file(&power_subsys, &sleep_while_idle_attr);
762
763 if (cpu_is_omap16xx()) {
764 /* configure LOW_PWR pin */
765 omap_cfg_reg(T20_1610_LOW_PWR);
766 }
767
768 return 0;
769}
770__initcall(omap_pm_init);
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index e924e0c6a4ce..9b4cd698bec8 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -30,9 +30,9 @@
30#include <asm/arch/pm.h> 30#include <asm/arch/pm.h>
31#endif 31#endif
32 32
33static struct clk * uart1_ck = NULL; 33static struct clk * uart1_ck;
34static struct clk * uart2_ck = NULL; 34static struct clk * uart2_ck;
35static struct clk * uart3_ck = NULL; 35static struct clk * uart3_ck;
36 36
37static inline unsigned int omap_serial_in(struct plat_serial8250_port *up, 37static inline unsigned int omap_serial_in(struct plat_serial8250_port *up,
38 int offset) 38 int offset)
diff --git a/arch/arm/plat-omap/sleep.S b/arch/arm/mach-omap1/sleep.S
index 4cd7d292f854..e58295e2d3b2 100644
--- a/arch/arm/plat-omap/sleep.S
+++ b/arch/arm/mach-omap1/sleep.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/plat-omap/sleep.S 2 * linux/arch/arm/mach-omap1/sleep.S
3 * 3 *
4 * Low-level OMAP730/1510/1610 sleep/wakeUp support 4 * Low-level OMAP730/1510/1610 sleep/wakeUp support
5 * 5 *
@@ -383,60 +383,133 @@ ENTRY(omap1610_cpu_suspend)
383 mcr p15, 0, r0, c7, c10, 4 383 mcr p15, 0, r0, c7, c10, 4
384 nop 384 nop
385 385
386 @ load base address of Traffic Controller 386 @ Load base address of Traffic Controller
387 mov r6, #TCMIF_ASM_BASE & 0xff000000 387 mov r6, #TCMIF_ASM_BASE & 0xff000000
388 orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000 388 orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
389 orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00 389 orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
390 390
391 @ prepare to put SDRAM into self-refresh manually 391 @ Prepare to put SDRAM into self-refresh manually
392 ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff] 392 ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
393 orr r9, r7, #SELF_REFRESH_MODE & 0xff000000 393 orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
394 orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff 394 orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
395 str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff] 395 str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
396 396
397 @ prepare to put EMIFS to Sleep 397 @ Prepare to put EMIFS to Sleep
398 ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff] 398 ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
399 orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff 399 orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
400 str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff] 400 str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
401 401
402 @ load base address of ARM_IDLECT1 and ARM_IDLECT2 402 @ Load base address of ARM_IDLECT1 and ARM_IDLECT2
403 mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000 403 mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
404 orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000 404 orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
405 orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00 405 orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
406 406
407 @ turn off clock domains 407 @ Turn off clock domains
408 @ do not disable PERCK (0x04) 408 @ Do not disable PERCK (0x04)
409 mov r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff 409 mov r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff
410 orr r5, r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff00 410 orr r5, r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff00
411 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 411 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
412 412
413 @ request ARM idle 413 @ Request ARM idle
414 mov r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff 414 mov r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff
415 orr r3, r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff00 415 orr r3, r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff00
416 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 416 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
417 417
418 @ disable instruction cache
419 mrc p15, 0, r9, c1, c0, 0
420 bic r2, r9, #0x1000
421 mcr p15, 0, r2, c1, c0, 0
422 nop
423
424/* 418/*
425 * Let's wait for the next wake up event to wake us up. r0 can't be 419 * Let's wait for the next wake up event to wake us up. r0 can't be
426 * used here because r0 holds ARM_IDLECT1 420 * used here because r0 holds ARM_IDLECT1
427 */ 421 */
428 mov r2, #0 422 mov r2, #0
429 mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt 423 mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
424
425 @ Errata (HEL3SU467, section 1.4.4) specifies nop-instructions
426 @ according to this formula:
427 @ 2 + (4*DPLL_MULT)/DPLL_DIV/ARMDIV
428 @ Max DPLL_MULT = 18
429 @ DPLL_DIV = 1
430 @ ARMDIV = 1
431 @ => 74 nop-instructions
432 nop
433 nop
434 nop
435 nop
436 nop
437 nop
438 nop
439 nop
440 nop
441 nop @10
442 nop
443 nop
444 nop
445 nop
446 nop
447 nop
448 nop
449 nop
450 nop
451 nop @20
452 nop
453 nop
454 nop
455 nop
456 nop
457 nop
458 nop
459 nop
460 nop
461 nop @30
462 nop
463 nop
464 nop
465 nop
466 nop
467 nop
468 nop
469 nop
470 nop
471 nop @40
472 nop
473 nop
474 nop
475 nop
476 nop
477 nop
478 nop
479 nop
480 nop
481 nop @50
482 nop
483 nop
484 nop
485 nop
486 nop
487 nop
488 nop
489 nop
490 nop
491 nop @60
492 nop
493 nop
494 nop
495 nop
496 nop
497 nop
498 nop
499 nop
500 nop
501 nop @70
502 nop
503 nop
504 nop
505 nop @74
430/* 506/*
431 * omap1610_cpu_suspend()'s resume point. 507 * omap1610_cpu_suspend()'s resume point.
432 * 508 *
433 * It will just start executing here, so we'll restore stuff from the 509 * It will just start executing here, so we'll restore stuff from the
434 * stack. 510 * stack.
435 */ 511 */
436 @ re-enable Icache 512 @ Restore the ARM_IDLECT1 and ARM_IDLECT2.
437 mcr p15, 0, r9, c1, c0, 0
438
439 @ reset the ARM_IDLECT1 and ARM_IDLECT2.
440 strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 513 strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
441 strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 514 strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
442 515
@@ -444,7 +517,7 @@ ENTRY(omap1610_cpu_suspend)
444 str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff] 517 str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
445 str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff] 518 str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
446 519
447 @ restore regs and return 520 @ Restore regs and return
448 ldmfd sp!, {r0 - r12, pc} 521 ldmfd sp!, {r0 - r12, pc}
449 522
450ENTRY(omap1610_cpu_suspend_sz) 523ENTRY(omap1610_cpu_suspend_sz)
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index cdbf4d7620c6..a85fe6066bc4 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -51,8 +51,6 @@
51 51
52struct sys_timer omap_timer; 52struct sys_timer omap_timer;
53 53
54#ifdef CONFIG_OMAP_MPU_TIMER
55
56/* 54/*
57 * --------------------------------------------------------------------------- 55 * ---------------------------------------------------------------------------
58 * MPU timer 56 * MPU timer
@@ -222,195 +220,6 @@ unsigned long long sched_clock(void)
222 220
223 return cycles_2_ns(ticks64); 221 return cycles_2_ns(ticks64);
224} 222}
225#endif /* CONFIG_OMAP_MPU_TIMER */
226
227#ifdef CONFIG_OMAP_32K_TIMER
228
229#ifdef CONFIG_ARCH_OMAP15XX
230#error OMAP 32KHz timer does not currently work on 15XX!
231#endif
232
233/*
234 * ---------------------------------------------------------------------------
235 * 32KHz OS timer
236 *
237 * This currently works only on 16xx, as 1510 does not have the continuous
238 * 32KHz synchronous timer. The 32KHz synchronous timer is used to keep track
239 * of time in addition to the 32KHz OS timer. Using only the 32KHz OS timer
240 * on 1510 would be possible, but the timer would not be as accurate as
241 * with the 32KHz synchronized timer.
242 * ---------------------------------------------------------------------------
243 */
244#define OMAP_32K_TIMER_BASE 0xfffb9000
245#define OMAP_32K_TIMER_CR 0x08
246#define OMAP_32K_TIMER_TVR 0x00
247#define OMAP_32K_TIMER_TCR 0x04
248
249#define OMAP_32K_TICKS_PER_HZ (32768 / HZ)
250
251/*
252 * TRM says 1 / HZ = ( TVR + 1) / 32768, so TRV = (32768 / HZ) - 1
253 * so with HZ = 100, TVR = 327.68.
254 */
255#define OMAP_32K_TIMER_TICK_PERIOD ((32768 / HZ) - 1)
256#define TIMER_32K_SYNCHRONIZED 0xfffbc410
257
258#define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \
259 (((nr_jiffies) * (clock_rate)) / HZ)
260
261static inline void omap_32k_timer_write(int val, int reg)
262{
263 omap_writew(val, reg + OMAP_32K_TIMER_BASE);
264}
265
266static inline unsigned long omap_32k_timer_read(int reg)
267{
268 return omap_readl(reg + OMAP_32K_TIMER_BASE) & 0xffffff;
269}
270
271/*
272 * The 32KHz synchronized timer is an additional timer on 16xx.
273 * It is always running.
274 */
275static inline unsigned long omap_32k_sync_timer_read(void)
276{
277 return omap_readl(TIMER_32K_SYNCHRONIZED);
278}
279
280static inline void omap_32k_timer_start(unsigned long load_val)
281{
282 omap_32k_timer_write(load_val, OMAP_32K_TIMER_TVR);
283 omap_32k_timer_write(0x0f, OMAP_32K_TIMER_CR);
284}
285
286static inline void omap_32k_timer_stop(void)
287{
288 omap_32k_timer_write(0x0, OMAP_32K_TIMER_CR);
289}
290
291/*
292 * Rounds down to nearest usec. Note that this will overflow for larger values.
293 */
294static inline unsigned long omap_32k_ticks_to_usecs(unsigned long ticks_32k)
295{
296 return (ticks_32k * 5*5*5*5*5*5) >> 9;
297}
298
299/*
300 * Rounds down to nearest nsec.
301 */
302static inline unsigned long long
303omap_32k_ticks_to_nsecs(unsigned long ticks_32k)
304{
305 return (unsigned long long) ticks_32k * 1000 * 5*5*5*5*5*5 >> 9;
306}
307
308static unsigned long omap_32k_last_tick = 0;
309
310/*
311 * Returns elapsed usecs since last 32k timer interrupt
312 */
313static unsigned long omap_32k_timer_gettimeoffset(void)
314{
315 unsigned long now = omap_32k_sync_timer_read();
316 return omap_32k_ticks_to_usecs(now - omap_32k_last_tick);
317}
318
319/*
320 * Returns current time from boot in nsecs. It's OK for this to wrap
321 * around for now, as it's just a relative time stamp.
322 */
323unsigned long long sched_clock(void)
324{
325 return omap_32k_ticks_to_nsecs(omap_32k_sync_timer_read());
326}
327
328/*
329 * Timer interrupt for 32KHz timer. When dynamic tick is enabled, this
330 * function is also called from other interrupts to remove latency
331 * issues with dynamic tick. In the dynamic tick case, we need to lock
332 * with irqsave.
333 */
334static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id,
335 struct pt_regs *regs)
336{
337 unsigned long flags;
338 unsigned long now;
339
340 write_seqlock_irqsave(&xtime_lock, flags);
341 now = omap_32k_sync_timer_read();
342
343 while (now - omap_32k_last_tick >= OMAP_32K_TICKS_PER_HZ) {
344 omap_32k_last_tick += OMAP_32K_TICKS_PER_HZ;
345 timer_tick(regs);
346 }
347
348 /* Restart timer so we don't drift off due to modulo or dynamic tick.
349 * By default we program the next timer to be continuous to avoid
350 * latencies during high system load. During dynamic tick operation the
351 * continuous timer can be overridden from pm_idle to be longer.
352 */
353 omap_32k_timer_start(omap_32k_last_tick + OMAP_32K_TICKS_PER_HZ - now);
354 write_sequnlock_irqrestore(&xtime_lock, flags);
355
356 return IRQ_HANDLED;
357}
358
359#ifdef CONFIG_NO_IDLE_HZ
360/*
361 * Programs the next timer interrupt needed. Called when dynamic tick is
362 * enabled, and to reprogram the ticks to skip from pm_idle. Note that
363 * we can keep the timer continuous, and don't need to set it to run in
364 * one-shot mode. This is because the timer will get reprogrammed again
365 * after next interrupt.
366 */
367void omap_32k_timer_reprogram(unsigned long next_tick)
368{
369 omap_32k_timer_start(JIFFIES_TO_HW_TICKS(next_tick, 32768) + 1);
370}
371
372static struct irqaction omap_32k_timer_irq;
373extern struct timer_update_handler timer_update;
374
375static int omap_32k_timer_enable_dyn_tick(void)
376{
377 /* No need to reprogram timer, just use the next interrupt */
378 return 0;
379}
380
381static int omap_32k_timer_disable_dyn_tick(void)
382{
383 omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
384 return 0;
385}
386
387static struct dyn_tick_timer omap_dyn_tick_timer = {
388 .enable = omap_32k_timer_enable_dyn_tick,
389 .disable = omap_32k_timer_disable_dyn_tick,
390 .reprogram = omap_32k_timer_reprogram,
391 .handler = omap_32k_timer_interrupt,
392};
393#endif /* CONFIG_NO_IDLE_HZ */
394
395static struct irqaction omap_32k_timer_irq = {
396 .name = "32KHz timer",
397 .flags = SA_INTERRUPT | SA_TIMER,
398 .handler = omap_32k_timer_interrupt,
399};
400
401static __init void omap_init_32k_timer(void)
402{
403
404#ifdef CONFIG_NO_IDLE_HZ
405 omap_timer.dyn_tick = &omap_dyn_tick_timer;
406#endif
407
408 setup_irq(INT_OS_TIMER, &omap_32k_timer_irq);
409 omap_timer.offset = omap_32k_timer_gettimeoffset;
410 omap_32k_last_tick = omap_32k_sync_timer_read();
411 omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
412}
413#endif /* CONFIG_OMAP_32K_TIMER */
414 223
415/* 224/*
416 * --------------------------------------------------------------------------- 225 * ---------------------------------------------------------------------------
@@ -419,13 +228,7 @@ static __init void omap_init_32k_timer(void)
419 */ 228 */
420static void __init omap_timer_init(void) 229static void __init omap_timer_init(void)
421{ 230{
422#if defined(CONFIG_OMAP_MPU_TIMER)
423 omap_init_mpu_timer(); 231 omap_init_mpu_timer();
424#elif defined(CONFIG_OMAP_32K_TIMER)
425 omap_init_32k_timer();
426#else
427#error No system timer selected in Kconfig!
428#endif
429} 232}
430 233
431struct sys_timer omap_timer = { 234struct sys_timer omap_timer = {
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 578880943cf2..537dd2e6d380 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -20,3 +20,6 @@ config MACH_OMAP_H4
20 bool "OMAP 2420 H4 board" 20 bool "OMAP 2420 H4 board"
21 depends on ARCH_OMAP2 && ARCH_OMAP24XX 21 depends on ARCH_OMAP2 && ARCH_OMAP24XX
22 22
23config MACH_OMAP_APOLLON
24 bool "OMAP 2420 Apollon board"
25 depends on ARCH_OMAP2 && ARCH_OMAP24XX
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 42041166435c..111eaa64258f 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -3,11 +3,15 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := irq.o id.o io.o sram-fn.o clock.o mux.o devices.o serial.o 6obj-y := irq.o id.o io.o sram-fn.o memory.o prcm.o clock.o mux.o devices.o serial.o
7 7
8obj-$(CONFIG_OMAP_MPU_TIMER) += timer-gp.o 8obj-$(CONFIG_OMAP_MPU_TIMER) += timer-gp.o
9 9
10# Power Management
11obj-$(CONFIG_PM) += pm.o sleep.o
12
10# Specific board support 13# Specific board support
11obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o 14obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
12obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o 15obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
16obj-$(CONFIG_MACH_OMAP_APOLLON) += board-apollon.o
13 17
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
new file mode 100644
index 000000000000..6c6ba172cdf6
--- /dev/null
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -0,0 +1,285 @@
1/*
2 * linux/arch/arm/mach-omap/omap2/board-apollon.c
3 *
4 * Copyright (C) 2005,2006 Samsung Electronics
5 * Author: Kyungmin Park <kyungmin.park@samsung.com>
6 *
7 * Modified from mach-omap/omap2/board-h4.c
8 *
9 * Code for apollon OMAP2 board. Should work on many OMAP2 systems where
10 * the bootloader passes the board-specific data to the kernel.
11 * Do not put any board specific code to this file; create a new machine
12 * type if you need custom low-level initializations.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/partitions.h>
24#include <linux/mtd/onenand.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27
28#include <asm/hardware.h>
29#include <asm/mach-types.h>
30#include <asm/mach/arch.h>
31#include <asm/mach/flash.h>
32
33#include <asm/arch/gpio.h>
34#include <asm/arch/mux.h>
35#include <asm/arch/usb.h>
36#include <asm/arch/board.h>
37#include <asm/arch/common.h>
38#include "prcm-regs.h"
39
40/* LED & Switch macros */
41#define LED0_GPIO13 13
42#define LED1_GPIO14 14
43#define LED2_GPIO15 15
44#define SW_ENTER_GPIO16 16
45#define SW_UP_GPIO17 17
46#define SW_DOWN_GPIO58 58
47
48static struct mtd_partition apollon_partitions[] = {
49 {
50 .name = "X-Loader + U-Boot",
51 .offset = 0,
52 .size = SZ_128K,
53 .mask_flags = MTD_WRITEABLE,
54 },
55 {
56 .name = "params",
57 .offset = MTDPART_OFS_APPEND,
58 .size = SZ_128K,
59 },
60 {
61 .name = "kernel",
62 .offset = MTDPART_OFS_APPEND,
63 .size = SZ_2M,
64 },
65 {
66 .name = "rootfs",
67 .offset = MTDPART_OFS_APPEND,
68 .size = SZ_16M,
69 },
70 {
71 .name = "filesystem00",
72 .offset = MTDPART_OFS_APPEND,
73 .size = SZ_32M,
74 },
75 {
76 .name = "filesystem01",
77 .offset = MTDPART_OFS_APPEND,
78 .size = MTDPART_SIZ_FULL,
79 },
80};
81
82static struct flash_platform_data apollon_flash_data = {
83 .parts = apollon_partitions,
84 .nr_parts = ARRAY_SIZE(apollon_partitions),
85};
86
87static struct resource apollon_flash_resource = {
88 .start = APOLLON_CS0_BASE,
89 .end = APOLLON_CS0_BASE + SZ_128K,
90 .flags = IORESOURCE_MEM,
91};
92
93static struct platform_device apollon_onenand_device = {
94 .name = "onenand",
95 .id = -1,
96 .dev = {
97 .platform_data = &apollon_flash_data,
98 },
99 .num_resources = ARRAY_SIZE(&apollon_flash_resource),
100 .resource = &apollon_flash_resource,
101};
102
103static struct resource apollon_smc91x_resources[] = {
104 [0] = {
105 .start = APOLLON_ETHR_START, /* Physical */
106 .end = APOLLON_ETHR_START + 0xf,
107 .flags = IORESOURCE_MEM,
108 },
109 [1] = {
110 .start = OMAP_GPIO_IRQ(APOLLON_ETHR_GPIO_IRQ),
111 .end = OMAP_GPIO_IRQ(APOLLON_ETHR_GPIO_IRQ),
112 .flags = IORESOURCE_IRQ,
113 },
114};
115
116static struct platform_device apollon_smc91x_device = {
117 .name = "smc91x",
118 .id = -1,
119 .num_resources = ARRAY_SIZE(apollon_smc91x_resources),
120 .resource = apollon_smc91x_resources,
121};
122
123static struct platform_device apollon_lcd_device = {
124 .name = "apollon_lcd",
125 .id = -1,
126};
127
128static struct platform_device *apollon_devices[] __initdata = {
129 &apollon_onenand_device,
130 &apollon_smc91x_device,
131 &apollon_lcd_device,
132};
133
134static inline void __init apollon_init_smc91x(void)
135{
136 /* Make sure CS1 timings are correct */
137 GPMC_CONFIG1_1 = 0x00011203;
138 GPMC_CONFIG2_1 = 0x001f1f01;
139 GPMC_CONFIG3_1 = 0x00080803;
140 GPMC_CONFIG4_1 = 0x1c091c09;
141 GPMC_CONFIG5_1 = 0x041f1f1f;
142 GPMC_CONFIG6_1 = 0x000004c4;
143 GPMC_CONFIG7_1 = 0x00000f40 | (APOLLON_CS1_BASE >> 24);
144 udelay(100);
145
146 omap_cfg_reg(W4__24XX_GPIO74);
147 if (omap_request_gpio(APOLLON_ETHR_GPIO_IRQ) < 0) {
148 printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n",
149 APOLLON_ETHR_GPIO_IRQ);
150 return;
151 }
152 omap_set_gpio_direction(APOLLON_ETHR_GPIO_IRQ, 1);
153}
154
155static void __init omap_apollon_init_irq(void)
156{
157 omap2_init_common_hw();
158 omap_init_irq();
159 omap_gpio_init();
160 apollon_init_smc91x();
161}
162
163static struct omap_uart_config apollon_uart_config __initdata = {
164 .enabled_uarts = (1 << 0) | (0 << 1) | (0 << 2),
165};
166
167static struct omap_mmc_config apollon_mmc_config __initdata = {
168 .mmc [0] = {
169 .enabled = 0,
170 .wire4 = 0,
171 .wp_pin = -1,
172 .power_pin = -1,
173 .switch_pin = -1,
174 },
175};
176
177static struct omap_lcd_config apollon_lcd_config __initdata = {
178 .ctrl_name = "internal",
179};
180
181static struct omap_board_config_kernel apollon_config[] = {
182 { OMAP_TAG_UART, &apollon_uart_config },
183 { OMAP_TAG_MMC, &apollon_mmc_config },
184 { OMAP_TAG_LCD, &apollon_lcd_config },
185};
186
187static void __init apollon_led_init(void)
188{
189 /* LED0 - AA10 */
190 omap_cfg_reg(AA10_242X_GPIO13);
191 omap_request_gpio(LED0_GPIO13);
192 omap_set_gpio_direction(LED0_GPIO13, 0);
193 omap_set_gpio_dataout(LED0_GPIO13, 0);
194 /* LED1 - AA6 */
195 omap_cfg_reg(AA6_242X_GPIO14);
196 omap_request_gpio(LED1_GPIO14);
197 omap_set_gpio_direction(LED1_GPIO14, 0);
198 omap_set_gpio_dataout(LED1_GPIO14, 0);
199 /* LED2 - AA4 */
200 omap_cfg_reg(AA4_242X_GPIO15);
201 omap_request_gpio(LED2_GPIO15);
202 omap_set_gpio_direction(LED2_GPIO15, 0);
203 omap_set_gpio_dataout(LED2_GPIO15, 0);
204}
205
206static irqreturn_t apollon_sw_interrupt(int irq, void *ignored, struct pt_regs *regs)
207{
208 static unsigned int led0, led1, led2;
209
210 if (irq == OMAP_GPIO_IRQ(SW_ENTER_GPIO16))
211 omap_set_gpio_dataout(LED0_GPIO13, led0 ^= 1);
212 else if (irq == OMAP_GPIO_IRQ(SW_UP_GPIO17))
213 omap_set_gpio_dataout(LED1_GPIO14, led1 ^= 1);
214 else if (irq == OMAP_GPIO_IRQ(SW_DOWN_GPIO58))
215 omap_set_gpio_dataout(LED2_GPIO15, led2 ^= 1);
216
217 return IRQ_HANDLED;
218}
219
220static void __init apollon_sw_init(void)
221{
222 /* Enter SW - Y11 */
223 omap_cfg_reg(Y11_242X_GPIO16);
224 omap_request_gpio(SW_ENTER_GPIO16);
225 omap_set_gpio_direction(SW_ENTER_GPIO16, 1);
226 /* Up SW - AA12 */
227 omap_cfg_reg(AA12_242X_GPIO17);
228 omap_request_gpio(SW_UP_GPIO17);
229 omap_set_gpio_direction(SW_UP_GPIO17, 1);
230 /* Down SW - AA8 */
231 omap_cfg_reg(AA8_242X_GPIO58);
232 omap_request_gpio(SW_DOWN_GPIO58);
233 omap_set_gpio_direction(SW_DOWN_GPIO58, 1);
234
235 set_irq_type(OMAP_GPIO_IRQ(SW_ENTER_GPIO16), IRQT_RISING);
236 if (request_irq(OMAP_GPIO_IRQ(SW_ENTER_GPIO16), &apollon_sw_interrupt,
237 SA_SHIRQ, "enter sw",
238 &apollon_sw_interrupt))
239 return;
240 set_irq_type(OMAP_GPIO_IRQ(SW_UP_GPIO17), IRQT_RISING);
241 if (request_irq(OMAP_GPIO_IRQ(SW_UP_GPIO17), &apollon_sw_interrupt,
242 SA_SHIRQ, "up sw",
243 &apollon_sw_interrupt))
244 return;
245 set_irq_type(OMAP_GPIO_IRQ(SW_DOWN_GPIO58), IRQT_RISING);
246 if (request_irq(OMAP_GPIO_IRQ(SW_DOWN_GPIO58), &apollon_sw_interrupt,
247 SA_SHIRQ, "down sw",
248 &apollon_sw_interrupt))
249 return;
250}
251
252static void __init omap_apollon_init(void)
253{
254 apollon_led_init();
255 apollon_sw_init();
256
257 /* REVISIT: where's the correct place */
258 omap_cfg_reg(W19_24XX_SYS_NIRQ);
259
260 /*
261 * Make sure the serial ports are muxed on at this point.
262 * You have to mux them off in device drivers later on
263 * if not needed.
264 */
265 platform_add_devices(apollon_devices, ARRAY_SIZE(apollon_devices));
266 omap_board_config = apollon_config;
267 omap_board_config_size = ARRAY_SIZE(apollon_config);
268 omap_serial_init();
269}
270
271static void __init omap_apollon_map_io(void)
272{
273 omap2_map_common_io();
274}
275
276MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
277 /* Maintainer: Kyungmin Park <kyungmin.park@samsung.com> */
278 .phys_io = 0x48000000,
279 .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc,
280 .boot_params = 0x80000100,
281 .map_io = omap_apollon_map_io,
282 .init_irq = omap_apollon_init_irq,
283 .init_machine = omap_apollon_init,
284 .timer = &omap_timer,
285MACHINE_END
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index a300d634d8a5..4933fce766c8 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -17,6 +17,8 @@
17#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
18#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/workqueue.h>
21#include <linux/input.h>
20 22
21#include <asm/hardware.h> 23#include <asm/hardware.h>
22#include <asm/mach-types.h> 24#include <asm/mach-types.h>
@@ -25,15 +27,57 @@
25#include <asm/mach/flash.h> 27#include <asm/mach/flash.h>
26 28
27#include <asm/arch/gpio.h> 29#include <asm/arch/gpio.h>
30#include <asm/arch/gpioexpander.h>
28#include <asm/arch/mux.h> 31#include <asm/arch/mux.h>
29#include <asm/arch/usb.h> 32#include <asm/arch/usb.h>
33#include <asm/arch/irda.h>
30#include <asm/arch/board.h> 34#include <asm/arch/board.h>
31#include <asm/arch/common.h> 35#include <asm/arch/common.h>
32#include <asm/arch/prcm.h> 36#include <asm/arch/keypad.h>
37#include <asm/arch/menelaus.h>
38#include <asm/arch/dma.h>
39#include "prcm-regs.h"
33 40
34#include <asm/io.h> 41#include <asm/io.h>
35#include <asm/delay.h> 42#include <asm/delay.h>
36 43
44static unsigned int row_gpios[6] = { 88, 89, 124, 11, 6, 96 };
45static unsigned int col_gpios[7] = { 90, 91, 100, 36, 12, 97, 98 };
46
47static int h4_keymap[] = {
48 KEY(0, 0, KEY_LEFT),
49 KEY(0, 1, KEY_RIGHT),
50 KEY(0, 2, KEY_A),
51 KEY(0, 3, KEY_B),
52 KEY(0, 4, KEY_C),
53 KEY(1, 0, KEY_DOWN),
54 KEY(1, 1, KEY_UP),
55 KEY(1, 2, KEY_E),
56 KEY(1, 3, KEY_F),
57 KEY(1, 4, KEY_G),
58 KEY(2, 0, KEY_ENTER),
59 KEY(2, 1, KEY_I),
60 KEY(2, 2, KEY_J),
61 KEY(2, 3, KEY_K),
62 KEY(2, 4, KEY_3),
63 KEY(3, 0, KEY_M),
64 KEY(3, 1, KEY_N),
65 KEY(3, 2, KEY_O),
66 KEY(3, 3, KEY_P),
67 KEY(3, 4, KEY_Q),
68 KEY(4, 0, KEY_R),
69 KEY(4, 1, KEY_4),
70 KEY(4, 2, KEY_T),
71 KEY(4, 3, KEY_U),
72 KEY(4, 4, KEY_ENTER),
73 KEY(5, 0, KEY_V),
74 KEY(5, 1, KEY_W),
75 KEY(5, 2, KEY_L),
76 KEY(5, 3, KEY_S),
77 KEY(5, 4, KEY_ENTER),
78 0
79};
80
37static struct mtd_partition h4_partitions[] = { 81static struct mtd_partition h4_partitions[] = {
38 /* bootloader (U-Boot, etc) in first sector */ 82 /* bootloader (U-Boot, etc) in first sector */
39 { 83 {
@@ -108,9 +152,123 @@ static struct platform_device h4_smc91x_device = {
108 .resource = h4_smc91x_resources, 152 .resource = h4_smc91x_resources,
109}; 153};
110 154
155/* Select between the IrDA and aGPS module
156 */
157static int h4_select_irda(struct device *dev, int state)
158{
159 unsigned char expa;
160 int err = 0;
161
162 if ((err = read_gpio_expa(&expa, 0x21))) {
163 printk(KERN_ERR "Error reading from I/O expander\n");
164 return err;
165 }
166
167 /* 'P6' enable/disable IRDA_TX and IRDA_RX */
168 if (state & IR_SEL) { /* IrDa */
169 if ((err = write_gpio_expa(expa | 0x01, 0x21))) {
170 printk(KERN_ERR "Error writing to I/O expander\n");
171 return err;
172 }
173 } else {
174 if ((err = write_gpio_expa(expa & ~0x01, 0x21))) {
175 printk(KERN_ERR "Error writing to I/O expander\n");
176 return err;
177 }
178 }
179 return err;
180}
181
182static void set_trans_mode(void *data)
183{
184 int *mode = data;
185 unsigned char expa;
186 int err = 0;
187
188 if ((err = read_gpio_expa(&expa, 0x20)) != 0) {
189 printk(KERN_ERR "Error reading from I/O expander\n");
190 }
191
192 expa &= ~0x01;
193
194 if (!(*mode & IR_SIRMODE)) { /* MIR/FIR */
195 expa |= 0x01;
196 }
197
198 if ((err = write_gpio_expa(expa, 0x20)) != 0) {
199 printk(KERN_ERR "Error writing to I/O expander\n");
200 }
201}
202
203static int h4_transceiver_mode(struct device *dev, int mode)
204{
205 struct omap_irda_config *irda_config = dev->platform_data;
206
207 cancel_delayed_work(&irda_config->gpio_expa);
208 PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
209 schedule_work(&irda_config->gpio_expa);
210
211 return 0;
212}
213
214static struct omap_irda_config h4_irda_data = {
215 .transceiver_cap = IR_SIRMODE | IR_MIRMODE | IR_FIRMODE,
216 .transceiver_mode = h4_transceiver_mode,
217 .select_irda = h4_select_irda,
218 .rx_channel = OMAP24XX_DMA_UART3_RX,
219 .tx_channel = OMAP24XX_DMA_UART3_TX,
220 .dest_start = OMAP_UART3_BASE,
221 .src_start = OMAP_UART3_BASE,
222 .tx_trigger = OMAP24XX_DMA_UART3_TX,
223 .rx_trigger = OMAP24XX_DMA_UART3_RX,
224};
225
226static struct resource h4_irda_resources[] = {
227 [0] = {
228 .start = INT_24XX_UART3_IRQ,
229 .end = INT_24XX_UART3_IRQ,
230 .flags = IORESOURCE_IRQ,
231 },
232};
233
234static struct platform_device h4_irda_device = {
235 .name = "omapirda",
236 .id = -1,
237 .dev = {
238 .platform_data = &h4_irda_data,
239 },
240 .num_resources = 1,
241 .resource = h4_irda_resources,
242};
243
244static struct omap_kp_platform_data h4_kp_data = {
245 .rows = 6,
246 .cols = 7,
247 .keymap = h4_keymap,
248 .rep = 1,
249 .row_gpios = row_gpios,
250 .col_gpios = col_gpios,
251};
252
253static struct platform_device h4_kp_device = {
254 .name = "omap-keypad",
255 .id = -1,
256 .dev = {
257 .platform_data = &h4_kp_data,
258 },
259};
260
261static struct platform_device h4_lcd_device = {
262 .name = "lcd_h4",
263 .id = -1,
264};
265
111static struct platform_device *h4_devices[] __initdata = { 266static struct platform_device *h4_devices[] __initdata = {
112 &h4_smc91x_device, 267 &h4_smc91x_device,
113 &h4_flash_device, 268 &h4_flash_device,
269 &h4_irda_device,
270 &h4_kp_device,
271 &h4_lcd_device,
114}; 272};
115 273
116static inline void __init h4_init_smc91x(void) 274static inline void __init h4_init_smc91x(void)
@@ -157,7 +315,6 @@ static struct omap_mmc_config h4_mmc_config __initdata = {
157}; 315};
158 316
159static struct omap_lcd_config h4_lcd_config __initdata = { 317static struct omap_lcd_config h4_lcd_config __initdata = {
160 .panel_name = "h4",
161 .ctrl_name = "internal", 318 .ctrl_name = "internal",
162}; 319};
163 320
@@ -174,6 +331,19 @@ static void __init omap_h4_init(void)
174 * You have to mux them off in device drivers later on 331 * You have to mux them off in device drivers later on
175 * if not needed. 332 * if not needed.
176 */ 333 */
334#if defined(CONFIG_OMAP_IR) || defined(CONFIG_OMAP_IR_MODULE)
335 omap_cfg_reg(K15_24XX_UART3_TX);
336 omap_cfg_reg(K14_24XX_UART3_RX);
337#endif
338
339#if defined(CONFIG_KEYBOARD_OMAP) || defined(CONFIG_KEYBOARD_OMAP_MODULE)
340 if (omap_has_menelaus()) {
341 row_gpios[5] = 0;
342 col_gpios[2] = 15;
343 col_gpios[6] = 18;
344 }
345#endif
346
177 platform_add_devices(h4_devices, ARRAY_SIZE(h4_devices)); 347 platform_add_devices(h4_devices, ARRAY_SIZE(h4_devices));
178 omap_board_config = h4_config; 348 omap_board_config = h4_config;
179 omap_board_config_size = ARRAY_SIZE(h4_config); 349 omap_board_config_size = ARRAY_SIZE(h4_config);
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 180f675c9064..72eb4bf571ac 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -28,14 +28,14 @@
28 28
29#include <asm/arch/clock.h> 29#include <asm/arch/clock.h>
30#include <asm/arch/sram.h> 30#include <asm/arch/sram.h>
31#include <asm/arch/prcm.h>
32 31
32#include "prcm-regs.h"
33#include "memory.h"
33#include "clock.h" 34#include "clock.h"
34 35
35//#define DOWN_VARIABLE_DPLL 1 /* Experimental */ 36//#define DOWN_VARIABLE_DPLL 1 /* Experimental */
36 37
37static struct prcm_config *curr_prcm_set; 38static struct prcm_config *curr_prcm_set;
38static struct memory_timings mem_timings;
39static u32 curr_perf_level = PRCM_FULL_SPEED; 39static u32 curr_perf_level = PRCM_FULL_SPEED;
40 40
41/*------------------------------------------------------------------------- 41/*-------------------------------------------------------------------------
@@ -54,11 +54,13 @@ static void omap2_sys_clk_recalc(struct clk * clk)
54 54
55static u32 omap2_get_dpll_rate(struct clk * tclk) 55static u32 omap2_get_dpll_rate(struct clk * tclk)
56{ 56{
57 int dpll_clk, dpll_mult, dpll_div, amult; 57 long long dpll_clk;
58 int dpll_mult, dpll_div, amult;
58 59
59 dpll_mult = (CM_CLKSEL1_PLL >> 12) & 0x03ff; /* 10 bits */ 60 dpll_mult = (CM_CLKSEL1_PLL >> 12) & 0x03ff; /* 10 bits */
60 dpll_div = (CM_CLKSEL1_PLL >> 8) & 0x0f; /* 4 bits */ 61 dpll_div = (CM_CLKSEL1_PLL >> 8) & 0x0f; /* 4 bits */
61 dpll_clk = (tclk->parent->rate * dpll_mult) / (dpll_div + 1); 62 dpll_clk = (long long)tclk->parent->rate * dpll_mult;
63 do_div(dpll_clk, dpll_div + 1);
62 amult = CM_CLKSEL2_PLL & 0x3; 64 amult = CM_CLKSEL2_PLL & 0x3;
63 dpll_clk *= amult; 65 dpll_clk *= amult;
64 66
@@ -385,75 +387,23 @@ static u32 omap2_dll_force_needed(void)
385 return 0; 387 return 0;
386} 388}
387 389
388static void omap2_init_memory_params(u32 force_lock_to_unlock_mode)
389{
390 unsigned long dll_cnt;
391 u32 fast_dll = 0;
392
393 mem_timings.m_type = !((SDRC_MR_0 & 0x3) == 0x1); /* DDR = 1, SDR = 0 */
394
395 /* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others.
396 * In the case of 2422, its ok to use CS1 instead of CS0.
397 */
398
399#if 0 /* FIXME: Enable after 24xx cpu detection works */
400 ctype = get_cpu_type();
401 if (cpu_is_omap2422())
402 mem_timings.base_cs = 1;
403 else
404#endif
405 mem_timings.base_cs = 0;
406
407 if (mem_timings.m_type != M_DDR)
408 return;
409
410 /* With DDR we need to determine the low frequency DLL value */
411 if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL))
412 mem_timings.dll_mode = M_UNLOCK;
413 else
414 mem_timings.dll_mode = M_LOCK;
415
416 if (mem_timings.base_cs == 0) {
417 fast_dll = SDRC_DLLA_CTRL;
418 dll_cnt = SDRC_DLLA_STATUS & 0xff00;
419 } else {
420 fast_dll = SDRC_DLLB_CTRL;
421 dll_cnt = SDRC_DLLB_STATUS & 0xff00;
422 }
423 if (force_lock_to_unlock_mode) {
424 fast_dll &= ~0xff00;
425 fast_dll |= dll_cnt; /* Current lock mode */
426 }
427 mem_timings.fast_dll_ctrl = fast_dll;
428
429 /* No disruptions, DDR will be offline & C-ABI not followed */
430 omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl,
431 mem_timings.fast_dll_ctrl,
432 mem_timings.base_cs,
433 force_lock_to_unlock_mode);
434 mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */
435
436 /* Turn status into unlock ctrl */
437 mem_timings.slow_dll_ctrl |=
438 ((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2));
439
440 /* 90 degree phase for anything below 133Mhz */
441 mem_timings.slow_dll_ctrl |= (1 << 1);
442}
443
444static u32 omap2_reprogram_sdrc(u32 level, u32 force) 390static u32 omap2_reprogram_sdrc(u32 level, u32 force)
445{ 391{
392 u32 slow_dll_ctrl, fast_dll_ctrl, m_type;
446 u32 prev = curr_perf_level, flags; 393 u32 prev = curr_perf_level, flags;
447 394
448 if ((curr_perf_level == level) && !force) 395 if ((curr_perf_level == level) && !force)
449 return prev; 396 return prev;
450 397
398 m_type = omap2_memory_get_type();
399 slow_dll_ctrl = omap2_memory_get_slow_dll_ctrl();
400 fast_dll_ctrl = omap2_memory_get_fast_dll_ctrl();
401
451 if (level == PRCM_HALF_SPEED) { 402 if (level == PRCM_HALF_SPEED) {
452 local_irq_save(flags); 403 local_irq_save(flags);
453 PRCM_VOLTSETUP = 0xffff; 404 PRCM_VOLTSETUP = 0xffff;
454 omap2_sram_reprogram_sdrc(PRCM_HALF_SPEED, 405 omap2_sram_reprogram_sdrc(PRCM_HALF_SPEED,
455 mem_timings.slow_dll_ctrl, 406 slow_dll_ctrl, m_type);
456 mem_timings.m_type);
457 curr_perf_level = PRCM_HALF_SPEED; 407 curr_perf_level = PRCM_HALF_SPEED;
458 local_irq_restore(flags); 408 local_irq_restore(flags);
459 } 409 }
@@ -461,8 +411,7 @@ static u32 omap2_reprogram_sdrc(u32 level, u32 force)
461 local_irq_save(flags); 411 local_irq_save(flags);
462 PRCM_VOLTSETUP = 0xffff; 412 PRCM_VOLTSETUP = 0xffff;
463 omap2_sram_reprogram_sdrc(PRCM_FULL_SPEED, 413 omap2_sram_reprogram_sdrc(PRCM_FULL_SPEED,
464 mem_timings.fast_dll_ctrl, 414 fast_dll_ctrl, m_type);
465 mem_timings.m_type);
466 curr_perf_level = PRCM_FULL_SPEED; 415 curr_perf_level = PRCM_FULL_SPEED;
467 local_irq_restore(flags); 416 local_irq_restore(flags);
468 } 417 }
@@ -650,7 +599,7 @@ static u32 omap2_get_clksel(u32 *div_sel, u32 *field_mask,
650 case 13: /* dss2 */ 599 case 13: /* dss2 */
651 mask = 0x1; break; 600 mask = 0x1; break;
652 case 25: /* usb */ 601 case 25: /* usb */
653 mask = 0xf; break; 602 mask = 0x7; break;
654 } 603 }
655 } 604 }
656 605
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 6cab20b1d3c1..6c78d471fab7 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -33,20 +33,6 @@ static u32 omap2_clksel_get_divisor(struct clk *clk);
33#define RATE_IN_242X (1 << 0) 33#define RATE_IN_242X (1 << 0)
34#define RATE_IN_243X (1 << 1) 34#define RATE_IN_243X (1 << 1)
35 35
36/* Memory timings */
37#define M_DDR 1
38#define M_LOCK_CTRL (1 << 2)
39#define M_UNLOCK 0
40#define M_LOCK 1
41
42struct memory_timings {
43 u32 m_type; /* ddr = 1, sdr = 0 */
44 u32 dll_mode; /* use lock mode = 1, unlock mode = 0 */
45 u32 slow_dll_ctrl; /* unlock mode, dll value for slow speed */
46 u32 fast_dll_ctrl; /* unlock mode, dll value for fast speed */
47 u32 base_cs; /* base chip select to use for calculations */
48};
49
50/* Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated. 36/* Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
51 * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,CM_CLKSEL_DSP 37 * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,CM_CLKSEL_DSP
52 * CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL CM_CLKSEL2_PLL, CM_CLKSEL_MDM 38 * CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL CM_CLKSEL2_PLL, CM_CLKSEL_MDM
@@ -731,6 +717,16 @@ static struct clk sys_clkout2 = {
731 .recalc = &omap2_clksel_recalc, 717 .recalc = &omap2_clksel_recalc,
732}; 718};
733 719
720static struct clk emul_ck = {
721 .name = "emul_ck",
722 .parent = &func_54m_ck,
723 .flags = CLOCK_IN_OMAP242X,
724 .enable_reg = (void __iomem *)&PRCM_CLKEMUL_CTRL,
725 .enable_bit = 0,
726 .recalc = &omap2_propagate_rate,
727
728};
729
734/* 730/*
735 * MPU clock domain 731 * MPU clock domain
736 * Clocks: 732 * Clocks:
@@ -1702,7 +1698,8 @@ static struct clk hdq_fck = {
1702}; 1698};
1703 1699
1704static struct clk i2c2_ick = { 1700static struct clk i2c2_ick = {
1705 .name = "i2c2_ick", 1701 .name = "i2c_ick",
1702 .id = 2,
1706 .parent = &l4_ck, 1703 .parent = &l4_ck,
1707 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, 1704 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
1708 .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, 1705 .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
@@ -1711,7 +1708,8 @@ static struct clk i2c2_ick = {
1711}; 1708};
1712 1709
1713static struct clk i2c2_fck = { 1710static struct clk i2c2_fck = {
1714 .name = "i2c2_fck", 1711 .name = "i2c_fck",
1712 .id = 2,
1715 .parent = &func_12m_ck, 1713 .parent = &func_12m_ck,
1716 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, 1714 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
1717 .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE, 1715 .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
@@ -1729,7 +1727,8 @@ static struct clk i2chs2_fck = {
1729}; 1727};
1730 1728
1731static struct clk i2c1_ick = { 1729static struct clk i2c1_ick = {
1732 .name = "i2c1_ick", 1730 .name = "i2c_ick",
1731 .id = 1,
1733 .parent = &l4_ck, 1732 .parent = &l4_ck,
1734 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, 1733 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
1735 .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, 1734 .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
@@ -1738,7 +1737,8 @@ static struct clk i2c1_ick = {
1738}; 1737};
1739 1738
1740static struct clk i2c1_fck = { 1739static struct clk i2c1_fck = {
1741 .name = "i2c1_fck", 1740 .name = "i2c_fck",
1741 .id = 1,
1742 .parent = &func_12m_ck, 1742 .parent = &func_12m_ck,
1743 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, 1743 .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
1744 .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE, 1744 .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
@@ -1971,6 +1971,7 @@ static struct clk *onchip_clks[] = {
1971 &wdt1_osc_ck, 1971 &wdt1_osc_ck,
1972 &sys_clkout, 1972 &sys_clkout,
1973 &sys_clkout2, 1973 &sys_clkout2,
1974 &emul_ck,
1974 /* mpu domain clocks */ 1975 /* mpu domain clocks */
1975 &mpu_ck, 1976 &mpu_ck,
1976 /* dsp domain clocks */ 1977 /* dsp domain clocks */
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 7181edb89352..def9e5370edf 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -74,6 +74,47 @@ static void omap_init_i2c(void) {}
74 74
75#endif 75#endif
76 76
77#if defined(CONFIG_OMAP_STI)
78
79#define OMAP2_STI_BASE IO_ADDRESS(0x48068000)
80#define OMAP2_STI_CHANNEL_BASE 0x54000000
81#define OMAP2_STI_IRQ 4
82
83static struct resource sti_resources[] = {
84 {
85 .start = OMAP2_STI_BASE,
86 .end = OMAP2_STI_BASE + 0x7ff,
87 .flags = IORESOURCE_MEM,
88 },
89 {
90 .start = OMAP2_STI_CHANNEL_BASE,
91 .end = OMAP2_STI_CHANNEL_BASE + SZ_64K - 1,
92 .flags = IORESOURCE_MEM,
93 },
94 {
95 .start = OMAP2_STI_IRQ,
96 .flags = IORESOURCE_IRQ,
97 }
98};
99
100static struct platform_device sti_device = {
101 .name = "sti",
102 .id = -1,
103 .dev = {
104 .release = omap_nop_release,
105 },
106 .num_resources = ARRAY_SIZE(sti_resources),
107 .resource = sti_resources,
108};
109
110static inline void omap_init_sti(void)
111{
112 platform_device_register(&sti_device);
113}
114#else
115static inline void omap_init_sti(void) {}
116#endif
117
77/*-------------------------------------------------------------------------*/ 118/*-------------------------------------------------------------------------*/
78 119
79static int __init omap2_init_devices(void) 120static int __init omap2_init_devices(void)
@@ -82,6 +123,7 @@ static int __init omap2_init_devices(void)
82 * in alphabetical order so they're easier to sort through. 123 * in alphabetical order so they're easier to sort through.
83 */ 124 */
84 omap_init_i2c(); 125 omap_init_i2c();
126 omap_init_sti();
85 127
86 return 0; 128 return 0;
87} 129}
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 8ea67bf196a5..7d5711611f2f 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -16,9 +16,13 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19#include <asm/mach/map.h> 19#include <asm/tlb.h>
20#include <asm/io.h> 20#include <asm/io.h>
21
22#include <asm/mach/map.h>
23
21#include <asm/arch/mux.h> 24#include <asm/arch/mux.h>
25#include <asm/arch/omapfb.h>
22 26
23extern void omap_sram_init(void); 27extern void omap_sram_init(void);
24extern int omap2_clk_init(void); 28extern int omap2_clk_init(void);
@@ -43,11 +47,24 @@ static struct map_desc omap2_io_desc[] __initdata = {
43 } 47 }
44}; 48};
45 49
46void __init omap_map_common_io(void) 50void __init omap2_map_common_io(void)
47{ 51{
48 iotable_init(omap2_io_desc, ARRAY_SIZE(omap2_io_desc)); 52 iotable_init(omap2_io_desc, ARRAY_SIZE(omap2_io_desc));
53
54 /* Normally devicemaps_init() would flush caches and tlb after
55 * mdesc->map_io(), but we must also do it here because of the CPU
56 * revision check below.
57 */
58 local_flush_tlb_all();
59 flush_cache_all();
60
49 omap2_check_revision(); 61 omap2_check_revision();
50 omap_sram_init(); 62 omap_sram_init();
63 omapfb_reserve_mem();
64}
65
66void __init omap2_init_common_hw(void)
67{
51 omap2_mux_init(); 68 omap2_mux_init();
52 omap2_clk_init(); 69 omap2_clk_init();
53} 70}
diff --git a/arch/arm/mach-omap2/memory.c b/arch/arm/mach-omap2/memory.c
new file mode 100644
index 000000000000..1d925d69fc35
--- /dev/null
+++ b/arch/arm/mach-omap2/memory.c
@@ -0,0 +1,102 @@
1/*
2 * linux/arch/arm/mach-omap2/memory.c
3 *
4 * Memory timing related functions for OMAP24XX
5 *
6 * Copyright (C) 2005 Texas Instruments Inc.
7 * Richard Woodruff <r-woodruff2@ti.com>
8 *
9 * Copyright (C) 2005 Nokia Corporation
10 * Tony Lindgren <tony@atomide.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/device.h>
21#include <linux/list.h>
22#include <linux/errno.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25
26#include <asm/io.h>
27
28#include <asm/arch/clock.h>
29#include <asm/arch/sram.h>
30
31#include "prcm-regs.h"
32#include "memory.h"
33
34static struct memory_timings mem_timings;
35
36u32 omap2_memory_get_slow_dll_ctrl(void)
37{
38 return mem_timings.slow_dll_ctrl;
39}
40
41u32 omap2_memory_get_fast_dll_ctrl(void)
42{
43 return mem_timings.fast_dll_ctrl;
44}
45
46u32 omap2_memory_get_type(void)
47{
48 return mem_timings.m_type;
49}
50
51void omap2_init_memory_params(u32 force_lock_to_unlock_mode)
52{
53 unsigned long dll_cnt;
54 u32 fast_dll = 0;
55
56 mem_timings.m_type = !((SDRC_MR_0 & 0x3) == 0x1); /* DDR = 1, SDR = 0 */
57
58 /* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others.
59 * In the case of 2422, its ok to use CS1 instead of CS0.
60 */
61 if (cpu_is_omap2422())
62 mem_timings.base_cs = 1;
63 else
64 mem_timings.base_cs = 0;
65
66 if (mem_timings.m_type != M_DDR)
67 return;
68
69 /* With DDR we need to determine the low frequency DLL value */
70 if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL))
71 mem_timings.dll_mode = M_UNLOCK;
72 else
73 mem_timings.dll_mode = M_LOCK;
74
75 if (mem_timings.base_cs == 0) {
76 fast_dll = SDRC_DLLA_CTRL;
77 dll_cnt = SDRC_DLLA_STATUS & 0xff00;
78 } else {
79 fast_dll = SDRC_DLLB_CTRL;
80 dll_cnt = SDRC_DLLB_STATUS & 0xff00;
81 }
82 if (force_lock_to_unlock_mode) {
83 fast_dll &= ~0xff00;
84 fast_dll |= dll_cnt; /* Current lock mode */
85 }
86 /* set fast timings with DLL filter disabled */
87 mem_timings.fast_dll_ctrl = (fast_dll | (3 << 8));
88
89 /* No disruptions, DDR will be offline & C-ABI not followed */
90 omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl,
91 mem_timings.fast_dll_ctrl,
92 mem_timings.base_cs,
93 force_lock_to_unlock_mode);
94 mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */
95
96 /* Turn status into unlock ctrl */
97 mem_timings.slow_dll_ctrl |=
98 ((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2));
99
100 /* 90 degree phase for anything below 133Mhz + disable DLL filter */
101 mem_timings.slow_dll_ctrl |= ((1 << 1) | (3 << 8));
102}
diff --git a/arch/arm/mach-omap2/memory.h b/arch/arm/mach-omap2/memory.h
new file mode 100644
index 000000000000..d212eea83a05
--- /dev/null
+++ b/arch/arm/mach-omap2/memory.h
@@ -0,0 +1,34 @@
1/*
2 * linux/arch/arm/mach-omap2/memory.h
3 *
4 * Interface for memory timing related functions for OMAP24XX
5 *
6 * Copyright (C) 2005 Texas Instruments Inc.
7 * Richard Woodruff <r-woodruff2@ti.com>
8 *
9 * Copyright (C) 2005 Nokia Corporation
10 * Tony Lindgren <tony@atomide.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17/* Memory timings */
18#define M_DDR 1
19#define M_LOCK_CTRL (1 << 2)
20#define M_UNLOCK 0
21#define M_LOCK 1
22
23struct memory_timings {
24 u32 m_type; /* ddr = 1, sdr = 0 */
25 u32 dll_mode; /* use lock mode = 1, unlock mode = 0 */
26 u32 slow_dll_ctrl; /* unlock mode, dll value for slow speed */
27 u32 fast_dll_ctrl; /* unlock mode, dll value for fast speed */
28 u32 base_cs; /* base chip select to use for calculations */
29};
30
31extern void omap2_init_memory_params(u32 force_lock_to_unlock_mode);
32extern u32 omap2_memory_get_slow_dll_ctrl(void);
33extern u32 omap2_memory_get_fast_dll_ctrl(void);
34extern u32 omap2_memory_get_type(void);
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index ea4654815dd1..1197dc38c20a 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -50,9 +50,54 @@ MUX_CFG_24XX("H19_24XX_I2C2_SDA", 0x114, 0, 0, 0, 1)
50/* Menelaus interrupt */ 50/* Menelaus interrupt */
51MUX_CFG_24XX("W19_24XX_SYS_NIRQ", 0x12c, 0, 1, 1, 1) 51MUX_CFG_24XX("W19_24XX_SYS_NIRQ", 0x12c, 0, 1, 1, 1)
52 52
53/* 24xx clocks */
54MUX_CFG_24XX("W14_24XX_SYS_CLKOUT", 0x137, 0, 1, 1, 1)
55
56/* 24xx McBSP */
57MUX_CFG_24XX("Y15_24XX_MCBSP2_CLKX", 0x124, 1, 1, 0, 1)
58MUX_CFG_24XX("R14_24XX_MCBSP2_FSX", 0x125, 1, 1, 0, 1)
59MUX_CFG_24XX("W15_24XX_MCBSP2_DR", 0x126, 1, 1, 0, 1)
60MUX_CFG_24XX("V15_24XX_MCBSP2_DX", 0x127, 1, 1, 0, 1)
61
53/* 24xx GPIO */ 62/* 24xx GPIO */
63MUX_CFG_24XX("M21_242X_GPIO11", 0x0c9, 3, 1, 1, 1)
64MUX_CFG_24XX("AA10_242X_GPIO13", 0x0e5, 3, 0, 0, 1)
65MUX_CFG_24XX("AA6_242X_GPIO14", 0x0e6, 3, 0, 0, 1)
66MUX_CFG_24XX("AA4_242X_GPIO15", 0x0e7, 3, 0, 0, 1)
67MUX_CFG_24XX("Y11_242X_GPIO16", 0x0e8, 3, 0, 0, 1)
68MUX_CFG_24XX("AA12_242X_GPIO17", 0x0e9, 3, 0, 0, 1)
69MUX_CFG_24XX("AA8_242X_GPIO58", 0x0ea, 3, 0, 0, 1)
54MUX_CFG_24XX("Y20_24XX_GPIO60", 0x12c, 3, 0, 0, 1) 70MUX_CFG_24XX("Y20_24XX_GPIO60", 0x12c, 3, 0, 0, 1)
71MUX_CFG_24XX("W4__24XX_GPIO74", 0x0f2, 3, 0, 0, 1)
55MUX_CFG_24XX("M15_24XX_GPIO92", 0x10a, 3, 0, 0, 1) 72MUX_CFG_24XX("M15_24XX_GPIO92", 0x10a, 3, 0, 0, 1)
73MUX_CFG_24XX("V14_24XX_GPIO117", 0x128, 3, 1, 0, 1)
74
75/* TSC IRQ */
76MUX_CFG_24XX("P20_24XX_TSC_IRQ", 0x108, 0, 0, 0, 1)
77
78/* UART3 */
79MUX_CFG_24XX("K15_24XX_UART3_TX", 0x118, 0, 0, 0, 1)
80MUX_CFG_24XX("K14_24XX_UART3_RX", 0x119, 0, 0, 0, 1)
81
82/* Keypad GPIO*/
83MUX_CFG_24XX("T19_24XX_KBR0", 0x106, 3, 1, 1, 1)
84MUX_CFG_24XX("R19_24XX_KBR1", 0x107, 3, 1, 1, 1)
85MUX_CFG_24XX("V18_24XX_KBR2", 0x139, 3, 1, 1, 1)
86MUX_CFG_24XX("M21_24XX_KBR3", 0xc9, 3, 1, 1, 1)
87MUX_CFG_24XX("E5__24XX_KBR4", 0x138, 3, 1, 1, 1)
88MUX_CFG_24XX("M18_24XX_KBR5", 0x10e, 3, 1, 1, 1)
89MUX_CFG_24XX("R20_24XX_KBC0", 0x108, 3, 0, 0, 1)
90MUX_CFG_24XX("M14_24XX_KBC1", 0x109, 3, 0, 0, 1)
91MUX_CFG_24XX("H19_24XX_KBC2", 0x114, 3, 0, 0, 1)
92MUX_CFG_24XX("V17_24XX_KBC3", 0x135, 3, 0, 0, 1)
93MUX_CFG_24XX("P21_24XX_KBC4", 0xca, 3, 0, 0, 1)
94MUX_CFG_24XX("L14_24XX_KBC5", 0x10f, 3, 0, 0, 1)
95MUX_CFG_24XX("N19_24XX_KBC6", 0x110, 3, 0, 0, 1)
96
97/* 24xx Menelaus Keypad GPIO */
98MUX_CFG_24XX("B3__24XX_KBR5", 0x30, 3, 1, 1, 1)
99MUX_CFG_24XX("AA4_24XX_KBC2", 0xe7, 3, 0, 0, 1)
100MUX_CFG_24XX("B13_24XX_KBC6", 0x110, 3, 0, 0, 1)
56 101
57}; 102};
58 103
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
new file mode 100644
index 000000000000..562168fa2b16
--- /dev/null
+++ b/arch/arm/mach-omap2/pm.c
@@ -0,0 +1,149 @@
1/*
2 * linux/arch/arm/mach-omap2/pm.c
3 *
4 * OMAP2 Power Management Routines
5 *
6 * Copyright (C) 2006 Nokia Corporation
7 * Tony Lindgren <tony@atomide.com>
8 *
9 * Copyright (C) 2005 Texas Instruments, Inc.
10 * Richard Woodruff <r-woodruff2@ti.com>
11 *
12 * Based on pm.c for omap1
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/pm.h>
20#include <linux/sched.h>
21#include <linux/proc_fs.h>
22#include <linux/pm.h>
23#include <linux/interrupt.h>
24#include <linux/sysfs.h>
25#include <linux/module.h>
26
27#include <asm/io.h>
28#include <asm/irq.h>
29#include <asm/atomic.h>
30#include <asm/mach/time.h>
31#include <asm/mach/irq.h>
32#include <asm/mach-types.h>
33
34#include <asm/arch/irqs.h>
35#include <asm/arch/clock.h>
36#include <asm/arch/sram.h>
37#include <asm/arch/pm.h>
38
39static struct clk *vclk;
40static void (*omap2_sram_idle)(void);
41static void (*omap2_sram_suspend)(int dllctrl, int cpu_rev);
42static void (*saved_idle)(void);
43
44void omap2_pm_idle(void)
45{
46 local_irq_disable();
47 local_fiq_disable();
48 if (need_resched()) {
49 local_fiq_enable();
50 local_irq_enable();
51 return;
52 }
53
54 /*
55 * Since an interrupt may set up a timer, we don't want to
56 * reprogram the hardware timer with interrupts enabled.
57 * Re-enable interrupts only after returning from idle.
58 */
59 timer_dyn_reprogram();
60
61 omap2_sram_idle();
62 local_fiq_enable();
63 local_irq_enable();
64}
65
66static int omap2_pm_prepare(suspend_state_t state)
67{
68 int error = 0;
69
70 /* We cannot sleep in idle until we have resumed */
71 saved_idle = pm_idle;
72 pm_idle = NULL;
73
74 switch (state)
75 {
76 case PM_SUSPEND_STANDBY:
77 case PM_SUSPEND_MEM:
78 break;
79
80 case PM_SUSPEND_DISK:
81 return -ENOTSUPP;
82
83 default:
84 return -EINVAL;
85 }
86
87 return error;
88}
89
90static int omap2_pm_enter(suspend_state_t state)
91{
92 switch (state)
93 {
94 case PM_SUSPEND_STANDBY:
95 case PM_SUSPEND_MEM:
96 /* FIXME: Add suspend */
97 break;
98
99 case PM_SUSPEND_DISK:
100 return -ENOTSUPP;
101
102 default:
103 return -EINVAL;
104 }
105
106 return 0;
107}
108
109static int omap2_pm_finish(suspend_state_t state)
110{
111 pm_idle = saved_idle;
112 return 0;
113}
114
115static struct pm_ops omap_pm_ops = {
116 .pm_disk_mode = 0,
117 .prepare = omap2_pm_prepare,
118 .enter = omap2_pm_enter,
119 .finish = omap2_pm_finish,
120};
121
122int __init omap2_pm_init(void)
123{
124 printk("Power Management for TI OMAP.\n");
125
126 vclk = clk_get(NULL, "virt_prcm_set");
127 if (IS_ERR(vclk)) {
128 printk(KERN_ERR "Could not get PM vclk\n");
129 return -ENODEV;
130 }
131
132 /*
133 * We copy the assembler sleep/wakeup routines to SRAM.
134 * These routines need to be in SRAM as that's the only
135 * memory the MPU can see when it wakes up.
136 */
137 omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
138 omap24xx_idle_loop_suspend_sz);
139
140 omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
141 omap24xx_cpu_suspend_sz);
142
143 pm_set_ops(&omap_pm_ops);
144 pm_idle = omap2_pm_idle;
145
146 return 0;
147}
148
149__initcall(omap2_pm_init);
diff --git a/arch/arm/mach-omap2/prcm.h b/arch/arm/mach-omap2/prcm-regs.h
index 2eb89b936c83..22ac7be4f782 100644
--- a/arch/arm/mach-omap2/prcm.h
+++ b/arch/arm/mach-omap2/prcm-regs.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * prcm.h - Access definations for use in OMAP24XX clock and power management 2 * linux/arch/arm/mach-omap2/prcm-reg.h
3 *
4 * OMAP24XX Power Reset and Clock Management (PRCM) registers
3 * 5 *
4 * Copyright (C) 2005 Texas Instruments, Inc. 6 * Copyright (C) 2005 Texas Instruments, Inc.
5 * 7 *
@@ -18,8 +20,8 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 21 */
20 22
21#ifndef __ASM_ARM_ARCH_DPM_PRCM_H 23#ifndef __ARCH_ARM_MACH_OMAP2_PRCM_H
22#define __ASM_ARM_ARCH_DPM_PRCM_H 24#define __ARCH_ARM_MACH_OMAP2_PRCM_H
23 25
24/* SET_PERFORMANCE_LEVEL PARAMETERS */ 26/* SET_PERFORMANCE_LEVEL PARAMETERS */
25#define PRCM_HALF_SPEED 1 27#define PRCM_HALF_SPEED 1
@@ -159,54 +161,63 @@
159#define CM_FCLKEN_MDM PRCM_REG32(0xC00) 161#define CM_FCLKEN_MDM PRCM_REG32(0xC00)
160#define CM_ICLKEN_MDM PRCM_REG32(0xC10) 162#define CM_ICLKEN_MDM PRCM_REG32(0xC10)
161#define CM_IDLEST_MDM PRCM_REG32(0xC20) 163#define CM_IDLEST_MDM PRCM_REG32(0xC20)
164#define CM_AUTOIDLE_MDM PRCM_REG32(0xC30)
162#define CM_CLKSEL_MDM PRCM_REG32(0xC40) 165#define CM_CLKSEL_MDM PRCM_REG32(0xC40)
163 166#define CM_CLKSTCTRL_MDM PRCM_REG32(0xC48)
164/* FIXME: Move to header for 2430 */ 167#define RM_RSTCTRL_MDM PRCM_REG32(0xC50)
165#define DISP_BASE (OMAP24XX_L4_IO_BASE+0x50000) 168#define RM_RSTST_MDM PRCM_REG32(0xC58)
169#define PM_WKEN_MDM PRCM_REG32(0xCA0)
170#define PM_WKST_MDM PRCM_REG32(0xCB0)
171#define PM_WKDEP_MDM PRCM_REG32(0xCC8)
172#define PM_PWSTCTRL_MDM PRCM_REG32(0xCE0)
173#define PM_PWSTST_MDM PRCM_REG32(0xCE4)
174
175#define OMAP24XX_L4_IO_BASE 0x48000000
176
177#define DISP_BASE (OMAP24XX_L4_IO_BASE + 0x50000)
166#define DISP_REG32(offset) __REG32(DISP_BASE + (offset)) 178#define DISP_REG32(offset) __REG32(DISP_BASE + (offset))
167 179
168#define GPMC_BASE (OMAP24XX_GPMC_BASE) 180#define OMAP24XX_GPMC_BASE (L3_24XX_BASE + 0xa000)
169#define GPMC_REG32(offset) __REG32(GPMC_BASE + (offset)) 181#define GPMC_REG32(offset) __REG32(OMAP24XX_GPMC_BASE + (offset))
170 182
171#define GPT1_BASE (OMAP24XX_GPT1) 183/* FIXME: Move these to timer code */
184#define GPT1_BASE (0x48028000)
172#define GPT1_REG32(offset) __REG32(GPT1_BASE + (offset)) 185#define GPT1_REG32(offset) __REG32(GPT1_BASE + (offset))
173 186
174/* Misc sysconfig */ 187/* Misc sysconfig */
175#define DISPC_SYSCONFIG DISP_REG32(0x410) 188#define DISPC_SYSCONFIG DISP_REG32(0x410)
176#define SPI_BASE (OMAP24XX_L4_IO_BASE+0x98000) 189#define SPI_BASE (OMAP24XX_L4_IO_BASE + 0x98000)
177#define MCSPI1_SYSCONFIG __REG32(SPI_BASE + 0x10) 190#define MCSPI1_SYSCONFIG __REG32(SPI_BASE + 0x10)
178#define MCSPI2_SYSCONFIG __REG32(SPI_BASE+0x2000 + 0x10) 191#define MCSPI2_SYSCONFIG __REG32(SPI_BASE + 0x2000 + 0x10)
179 192#define MCSPI3_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE + 0xb8010)
180//#define DSP_MMU_SYSCONFIG 0x5A000010 193
181#define CAMERA_MMU_SYSCONFIG __REG32(DISP_BASE+0x2C10) 194#define CAMERA_MMU_SYSCONFIG __REG32(DISP_BASE + 0x2C10)
182//#define IVA_MMU_SYSCONFIG 0x5D000010 195#define CAMERA_DMA_SYSCONFIG __REG32(DISP_BASE + 0x282C)
183//#define DSP_DMA_SYSCONFIG 0x00FCC02C 196#define SYSTEM_DMA_SYSCONFIG __REG32(DISP_BASE + 0x602C)
184#define CAMERA_DMA_SYSCONFIG __REG32(DISP_BASE+0x282C)
185#define SYSTEM_DMA_SYSCONFIG __REG32(DISP_BASE+0x602C)
186#define GPMC_SYSCONFIG GPMC_REG32(0x010) 197#define GPMC_SYSCONFIG GPMC_REG32(0x010)
187#define MAILBOXES_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x94010) 198#define MAILBOXES_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE + 0x94010)
188#define UART1_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6A054) 199#define UART1_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE + 0x6A054)
189#define UART2_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6C054) 200#define UART2_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE + 0x6C054)
190#define UART3_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6E054) 201#define UART3_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE + 0x6E054)
191//#define IVA_SYSCONFIG 0x5C060010 202#define SDRC_SYSCONFIG __REG32(OMAP24XX_SDRC_BASE + 0x10)
192#define SDRC_SYSCONFIG __REG32(OMAP24XX_SDRC_BASE+0x10) 203#define OMAP24XX_SMS_BASE (L3_24XX_BASE + 0x8000)
193#define SMS_SYSCONFIG __REG32(OMAP24XX_SMS_BASE+0x10) 204#define SMS_SYSCONFIG __REG32(OMAP24XX_SMS_BASE + 0x10)
194#define SSI_SYSCONFIG __REG32(DISP_BASE+0x8010) 205#define SSI_SYSCONFIG __REG32(DISP_BASE + 0x8010)
195//#define VLYNQ_SYSCONFIG 0x67FFFE10
196 206
197/* rkw - good cannidates for PM_ to start what nm was trying */ 207/* rkw - good cannidates for PM_ to start what nm was trying */
198#define OMAP24XX_GPT2 (OMAP24XX_L4_IO_BASE+0x2A000) 208#define OMAP24XX_GPT2 (OMAP24XX_L4_IO_BASE + 0x2A000)
199#define OMAP24XX_GPT3 (OMAP24XX_L4_IO_BASE+0x78000) 209#define OMAP24XX_GPT3 (OMAP24XX_L4_IO_BASE + 0x78000)
200#define OMAP24XX_GPT4 (OMAP24XX_L4_IO_BASE+0x7A000) 210#define OMAP24XX_GPT4 (OMAP24XX_L4_IO_BASE + 0x7A000)
201#define OMAP24XX_GPT5 (OMAP24XX_L4_IO_BASE+0x7C000) 211#define OMAP24XX_GPT5 (OMAP24XX_L4_IO_BASE + 0x7C000)
202#define OMAP24XX_GPT6 (OMAP24XX_L4_IO_BASE+0x7E000) 212#define OMAP24XX_GPT6 (OMAP24XX_L4_IO_BASE + 0x7E000)
203#define OMAP24XX_GPT7 (OMAP24XX_L4_IO_BASE+0x80000) 213#define OMAP24XX_GPT7 (OMAP24XX_L4_IO_BASE + 0x80000)
204#define OMAP24XX_GPT8 (OMAP24XX_L4_IO_BASE+0x82000) 214#define OMAP24XX_GPT8 (OMAP24XX_L4_IO_BASE + 0x82000)
205#define OMAP24XX_GPT9 (OMAP24XX_L4_IO_BASE+0x84000) 215#define OMAP24XX_GPT9 (OMAP24XX_L4_IO_BASE + 0x84000)
206#define OMAP24XX_GPT10 (OMAP24XX_L4_IO_BASE+0x86000) 216#define OMAP24XX_GPT10 (OMAP24XX_L4_IO_BASE + 0x86000)
207#define OMAP24XX_GPT11 (OMAP24XX_L4_IO_BASE+0x88000) 217#define OMAP24XX_GPT11 (OMAP24XX_L4_IO_BASE + 0x88000)
208#define OMAP24XX_GPT12 (OMAP24XX_L4_IO_BASE+0x8A000) 218#define OMAP24XX_GPT12 (OMAP24XX_L4_IO_BASE + 0x8A000)
209 219
220/* FIXME: Move these to timer code */
210#define GPTIMER1_SYSCONFIG GPT1_REG32(0x010) 221#define GPTIMER1_SYSCONFIG GPT1_REG32(0x010)
211#define GPTIMER2_SYSCONFIG __REG32(OMAP24XX_GPT2 + 0x10) 222#define GPTIMER2_SYSCONFIG __REG32(OMAP24XX_GPT2 + 0x10)
212#define GPTIMER3_SYSCONFIG __REG32(OMAP24XX_GPT3 + 0x10) 223#define GPTIMER3_SYSCONFIG __REG32(OMAP24XX_GPT3 + 0x10)
@@ -220,12 +231,18 @@
220#define GPTIMER11_SYSCONFIG __REG32(OMAP24XX_GPT11 + 0x10) 231#define GPTIMER11_SYSCONFIG __REG32(OMAP24XX_GPT11 + 0x10)
221#define GPTIMER12_SYSCONFIG __REG32(OMAP24XX_GPT12 + 0x10) 232#define GPTIMER12_SYSCONFIG __REG32(OMAP24XX_GPT12 + 0x10)
222 233
223#define GPIOX_BASE(X) (OMAP24XX_GPIO_BASE+(0x2000*((X)-1))) 234/* FIXME: Move these to gpio code */
235#define OMAP24XX_GPIO_BASE 0x48018000
236#define GPIOX_BASE(X) (OMAP24XX_GPIO_BASE + (0x2000 * ((X) - 1)))
237
238#define GPIO1_SYSCONFIG __REG32((GPIOX_BASE(1) + 0x10))
239#define GPIO2_SYSCONFIG __REG32((GPIOX_BASE(2) + 0x10))
240#define GPIO3_SYSCONFIG __REG32((GPIOX_BASE(3) + 0x10))
241#define GPIO4_SYSCONFIG __REG32((GPIOX_BASE(4) + 0x10))
224 242
225#define GPIO1_SYSCONFIG __REG32((GPIOX_BASE(1)+0x10)) 243#if defined(CONFIG_ARCH_OMAP243X)
226#define GPIO2_SYSCONFIG __REG32((GPIOX_BASE(2)+0x10)) 244#define GPIO5_SYSCONFIG __REG32((OMAP24XX_GPIO5_BASE + 0x10))
227#define GPIO3_SYSCONFIG __REG32((GPIOX_BASE(3)+0x10)) 245#endif
228#define GPIO4_SYSCONFIG __REG32((GPIOX_BASE(4)+0x10))
229 246
230/* GP TIMER 1 */ 247/* GP TIMER 1 */
231#define GPTIMER1_TISTAT GPT1_REG32(0x014) 248#define GPTIMER1_TISTAT GPT1_REG32(0x014)
@@ -243,15 +260,15 @@
243#define GPTIMER1_TCAR2 GPT1_REG32(0x044) 260#define GPTIMER1_TCAR2 GPT1_REG32(0x044)
244 261
245/* rkw -- base fix up please... */ 262/* rkw -- base fix up please... */
246#define GPTIMER3_TISR __REG32(OMAP24XX_L4_IO_BASE+0x78018) 263#define GPTIMER3_TISR __REG32(OMAP24XX_L4_IO_BASE + 0x78018)
247 264
248/* SDRC */ 265/* SDRC */
249#define SDRC_DLLA_CTRL __REG32(OMAP24XX_SDRC_BASE+0x060) 266#define SDRC_DLLA_CTRL __REG32(OMAP24XX_SDRC_BASE + 0x060)
250#define SDRC_DLLA_STATUS __REG32(OMAP24XX_SDRC_BASE+0x064) 267#define SDRC_DLLA_STATUS __REG32(OMAP24XX_SDRC_BASE + 0x064)
251#define SDRC_DLLB_CTRL __REG32(OMAP24XX_SDRC_BASE+0x068) 268#define SDRC_DLLB_CTRL __REG32(OMAP24XX_SDRC_BASE + 0x068)
252#define SDRC_DLLB_STATUS __REG32(OMAP24XX_SDRC_BASE+0x06C) 269#define SDRC_DLLB_STATUS __REG32(OMAP24XX_SDRC_BASE + 0x06C)
253#define SDRC_POWER __REG32(OMAP24XX_SDRC_BASE+0x070) 270#define SDRC_POWER __REG32(OMAP24XX_SDRC_BASE + 0x070)
254#define SDRC_MR_0 __REG32(OMAP24XX_SDRC_BASE+0x084) 271#define SDRC_MR_0 __REG32(OMAP24XX_SDRC_BASE + 0x084)
255 272
256/* GPIO 1 */ 273/* GPIO 1 */
257#define GPIO1_BASE GPIOX_BASE(1) 274#define GPIO1_BASE GPIOX_BASE(1)
@@ -278,6 +295,8 @@
278#define GPIO2_DATAIN GPIO2_REG32(0x038) 295#define GPIO2_DATAIN GPIO2_REG32(0x038)
279#define GPIO2_OE GPIO2_REG32(0x034) 296#define GPIO2_OE GPIO2_REG32(0x034)
280#define GPIO2_DATAOUT GPIO2_REG32(0x03C) 297#define GPIO2_DATAOUT GPIO2_REG32(0x03C)
298#define GPIO2_DEBOUNCENABLE GPIO2_REG32(0x050)
299#define GPIO2_DEBOUNCINGTIME GPIO2_REG32(0x054)
281 300
282/* GPIO 3 */ 301/* GPIO 3 */
283#define GPIO3_BASE GPIOX_BASE(3) 302#define GPIO3_BASE GPIOX_BASE(3)
@@ -294,6 +313,8 @@
294#define GPIO3_DATAOUT GPIO3_REG32(0x03C) 313#define GPIO3_DATAOUT GPIO3_REG32(0x03C)
295#define GPIO3_DEBOUNCENABLE GPIO3_REG32(0x050) 314#define GPIO3_DEBOUNCENABLE GPIO3_REG32(0x050)
296#define GPIO3_DEBOUNCINGTIME GPIO3_REG32(0x054) 315#define GPIO3_DEBOUNCINGTIME GPIO3_REG32(0x054)
316#define GPIO3_DEBOUNCENABLE GPIO3_REG32(0x050)
317#define GPIO3_DEBOUNCINGTIME GPIO3_REG32(0x054)
297 318
298/* GPIO 4 */ 319/* GPIO 4 */
299#define GPIO4_BASE GPIOX_BASE(4) 320#define GPIO4_BASE GPIOX_BASE(4)
@@ -311,10 +332,26 @@
311#define GPIO4_DEBOUNCENABLE GPIO4_REG32(0x050) 332#define GPIO4_DEBOUNCENABLE GPIO4_REG32(0x050)
312#define GPIO4_DEBOUNCINGTIME GPIO4_REG32(0x054) 333#define GPIO4_DEBOUNCINGTIME GPIO4_REG32(0x054)
313 334
335#if defined(CONFIG_ARCH_OMAP243X)
336/* GPIO 5 */
337#define GPIO5_REG32(offset) __REG32((OMAP24XX_GPIO5_BASE + (offset)))
338#define GPIO5_IRQENABLE1 GPIO5_REG32(0x01C)
339#define GPIO5_IRQSTATUS1 GPIO5_REG32(0x018)
340#define GPIO5_IRQENABLE2 GPIO5_REG32(0x02C)
341#define GPIO5_IRQSTATUS2 GPIO5_REG32(0x028)
342#define GPIO5_WAKEUPENABLE GPIO5_REG32(0x020)
343#define GPIO5_RISINGDETECT GPIO5_REG32(0x048)
344#define GPIO5_FALLINGDETECT GPIO5_REG32(0x04C)
345#define GPIO5_DATAIN GPIO5_REG32(0x038)
346#define GPIO5_OE GPIO5_REG32(0x034)
347#define GPIO5_DATAOUT GPIO5_REG32(0x03C)
348#define GPIO5_DEBOUNCENABLE GPIO5_REG32(0x050)
349#define GPIO5_DEBOUNCINGTIME GPIO5_REG32(0x054)
350#endif
314 351
315/* IO CONFIG */ 352/* IO CONFIG */
316#define CONTROL_BASE (OMAP24XX_CTRL_BASE) 353#define OMAP24XX_CTRL_BASE (L4_24XX_BASE)
317#define CONTROL_REG32(offset) __REG32(CONTROL_BASE + (offset)) 354#define CONTROL_REG32(offset) __REG32(OMAP24XX_CTRL_BASE + (offset))
318 355
319#define CONTROL_PADCONF_SPI1_NCS2 CONTROL_REG32(0x104) 356#define CONTROL_PADCONF_SPI1_NCS2 CONTROL_REG32(0x104)
320#define CONTROL_PADCONF_SYS_XTALOUT CONTROL_REG32(0x134) 357#define CONTROL_PADCONF_SYS_XTALOUT CONTROL_REG32(0x134)
@@ -322,15 +359,18 @@
322#define CONTROL_PADCONF_MCBSP1_DX CONTROL_REG32(0x10C) 359#define CONTROL_PADCONF_MCBSP1_DX CONTROL_REG32(0x10C)
323#define CONTROL_PADCONF_GPMC_NCS4 CONTROL_REG32(0x090) 360#define CONTROL_PADCONF_GPMC_NCS4 CONTROL_REG32(0x090)
324#define CONTROL_PADCONF_DSS_D5 CONTROL_REG32(0x0B8) 361#define CONTROL_PADCONF_DSS_D5 CONTROL_REG32(0x0B8)
325#define CONTROL_PADCONF_DSS_D9 CONTROL_REG32(0x0BC) 362#define CONTROL_PADCONF_DSS_D9 CONTROL_REG32(0x0BC) /* 2420 */
326#define CONTROL_PADCONF_DSS_D13 CONTROL_REG32(0x0C0) 363#define CONTROL_PADCONF_DSS_D13 CONTROL_REG32(0x0C0)
327#define CONTROL_PADCONF_DSS_VSYNC CONTROL_REG32(0x0CC) 364#define CONTROL_PADCONF_DSS_VSYNC CONTROL_REG32(0x0CC)
365#define CONTROL_PADCONF_SYS_NIRQW0 CONTROL_REG32(0x0BC) /* 2430 */
366#define CONTROL_PADCONF_SSI1_FLAG_TX CONTROL_REG32(0x108) /* 2430 */
328 367
329/* CONTROL */ 368/* CONTROL */
330#define CONTROL_DEVCONF CONTROL_REG32(0x274) 369#define CONTROL_DEVCONF CONTROL_REG32(0x274)
370#define CONTROL_DEVCONF1 CONTROL_REG32(0x2E8)
331 371
332/* INTERRUPT CONTROLLER */ 372/* INTERRUPT CONTROLLER */
333#define INTC_BASE (OMAP24XX_L4_IO_BASE+0xfe000) 373#define INTC_BASE ((L4_24XX_BASE) + 0xfe000)
334#define INTC_REG32(offset) __REG32(INTC_BASE + (offset)) 374#define INTC_REG32(offset) __REG32(INTC_BASE + (offset))
335 375
336#define INTC1_U_BASE INTC_REG32(0x000) 376#define INTC1_U_BASE INTC_REG32(0x000)
@@ -348,10 +388,12 @@
348#define INTC_ISR_CLEAR2 INTC_REG32(0x0D4) 388#define INTC_ISR_CLEAR2 INTC_REG32(0x0D4)
349#define INTC_SIR_IRQ INTC_REG32(0x040) 389#define INTC_SIR_IRQ INTC_REG32(0x040)
350#define INTC_CONTROL INTC_REG32(0x048) 390#define INTC_CONTROL INTC_REG32(0x048)
351#define INTC_ILR11 INTC_REG32(0x12C) 391#define INTC_ILR11 INTC_REG32(0x12C) /* PRCM on MPU PIC */
392#define INTC_ILR30 INTC_REG32(0x178)
393#define INTC_ILR31 INTC_REG32(0x17C)
352#define INTC_ILR32 INTC_REG32(0x180) 394#define INTC_ILR32 INTC_REG32(0x180)
353#define INTC_ILR37 INTC_REG32(0x194) 395#define INTC_ILR37 INTC_REG32(0x194) /* GPIO4 on MPU PIC */
354#define INTC_SYSCONFIG INTC_REG32(0x010) 396#define INTC_SYSCONFIG INTC_REG32(0x010) /* GPT1 on MPU PIC */
355 397
356/* RAM FIREWALL */ 398/* RAM FIREWALL */
357#define RAMFW_BASE (0x68005000) 399#define RAMFW_BASE (0x68005000)
@@ -373,6 +415,24 @@
373#define GPMC_CONFIG6_0 GPMC_REG32(0x074) 415#define GPMC_CONFIG6_0 GPMC_REG32(0x074)
374#define GPMC_CONFIG7_0 GPMC_REG32(0x078) 416#define GPMC_CONFIG7_0 GPMC_REG32(0x078)
375 417
418/* GPMC CS1 */
419#define GPMC_CONFIG1_1 GPMC_REG32(0x090)
420#define GPMC_CONFIG2_1 GPMC_REG32(0x094)
421#define GPMC_CONFIG3_1 GPMC_REG32(0x098)
422#define GPMC_CONFIG4_1 GPMC_REG32(0x09C)
423#define GPMC_CONFIG5_1 GPMC_REG32(0x0a0)
424#define GPMC_CONFIG6_1 GPMC_REG32(0x0a4)
425#define GPMC_CONFIG7_1 GPMC_REG32(0x0a8)
426
427/* GPMC CS3 */
428#define GPMC_CONFIG1_3 GPMC_REG32(0x0F0)
429#define GPMC_CONFIG2_3 GPMC_REG32(0x0F4)
430#define GPMC_CONFIG3_3 GPMC_REG32(0x0F8)
431#define GPMC_CONFIG4_3 GPMC_REG32(0x0FC)
432#define GPMC_CONFIG5_3 GPMC_REG32(0x100)
433#define GPMC_CONFIG6_3 GPMC_REG32(0x104)
434#define GPMC_CONFIG7_3 GPMC_REG32(0x108)
435
376/* DSS */ 436/* DSS */
377#define DSS_CONTROL DISP_REG32(0x040) 437#define DSS_CONTROL DISP_REG32(0x040)
378#define DISPC_CONTROL DISP_REG32(0x440) 438#define DISPC_CONTROL DISP_REG32(0x440)
@@ -405,11 +465,15 @@
405#define DISPC_DATA_CYCLE2 DISP_REG32(0x5D8) 465#define DISPC_DATA_CYCLE2 DISP_REG32(0x5D8)
406#define DISPC_DATA_CYCLE3 DISP_REG32(0x5DC) 466#define DISPC_DATA_CYCLE3 DISP_REG32(0x5DC)
407 467
408/* Wake up define for board */ 468/* HSUSB Suspend */
409#define GPIO97 (1 << 1) 469#define HSUSB_CTRL __REG8(0x480AC001)
410#define GPIO88 (1 << 24) 470#define USBOTG_POWER __REG32(0x480AC000)
471
472/* HS MMC */
473#define MMCHS1_SYSCONFIG __REG32(0x4809C010)
474#define MMCHS2_SYSCONFIG __REG32(0x480b4010)
411 475
412#endif /* __ASSEMBLER__ */ 476#endif /* __ASSEMBLER__ */
413 477
414#endif 478#endif
415 479
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
new file mode 100644
index 000000000000..8893479dc7e0
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm.c
@@ -0,0 +1,40 @@
1/*
2 * linux/arch/arm/mach-omap2/prcm.c
3 *
4 * OMAP 24xx Power Reset and Clock Management (PRCM) functions
5 *
6 * Copyright (C) 2005 Nokia Corporation
7 *
8 * Written by Tony Lindgren <tony.lindgren@nokia.com>
9 *
10 * Some pieces of code Copyright (C) 2005 Texas Instruments, Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/clk.h>
20
21#include "prcm-regs.h"
22
23u32 omap_prcm_get_reset_sources(void)
24{
25 return RM_RSTST_WKUP & 0x7f;
26}
27EXPORT_SYMBOL(omap_prcm_get_reset_sources);
28
29/* Resets clock rates and reboots the system. Only called from system.h */
30void omap_prcm_arch_reset(char mode)
31{
32 u32 rate;
33 struct clk *vclk, *sclk;
34
35 vclk = clk_get(NULL, "virt_prcm_set");
36 sclk = clk_get(NULL, "sys_ck");
37 rate = clk_get_rate(sclk);
38 clk_set_rate(vclk, rate); /* go to bypass for OMAP limitation */
39 RM_RSTCTRL_WKUP |= 2;
40}
diff --git a/arch/arm/mach-omap2/sleep.S b/arch/arm/mach-omap2/sleep.S
new file mode 100644
index 000000000000..00299cbeb911
--- /dev/null
+++ b/arch/arm/mach-omap2/sleep.S
@@ -0,0 +1,144 @@
1/*
2 * linux/arch/arm/mach-omap2/sleep.S
3 *
4 * (C) Copyright 2004
5 * Texas Instruments, <www.ti.com>
6 * Richard Woodruff <r-woodruff2@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24#include <linux/config.h>
25#include <linux/linkage.h>
26#include <asm/assembler.h>
27#include <asm/arch/io.h>
28#include <asm/arch/pm.h>
29
30#define A_32KSYNC_CR_V IO_ADDRESS(OMAP_TIMER32K_BASE+0x10)
31#define A_PRCM_VOLTCTRL_V IO_ADDRESS(OMAP24XX_PRCM_BASE+0x50)
32#define A_PRCM_CLKCFG_CTRL_V IO_ADDRESS(OMAP24XX_PRCM_BASE+0x80)
33#define A_CM_CLKEN_PLL_V IO_ADDRESS(OMAP24XX_PRCM_BASE+0x500)
34#define A_CM_IDLEST_CKGEN_V IO_ADDRESS(OMAP24XX_PRCM_BASE+0x520)
35#define A_CM_CLKSEL1_PLL_V IO_ADDRESS(OMAP24XX_PRCM_BASE+0x540)
36#define A_CM_CLKSEL2_PLL_V IO_ADDRESS(OMAP24XX_PRCM_BASE+0x544)
37
38#define A_SDRC_DLLA_CTRL_V IO_ADDRESS(OMAP24XX_SDRC_BASE+0x60)
39#define A_SDRC_POWER_V IO_ADDRESS(OMAP24XX_SDRC_BASE+0x70)
40#define A_SDRC_RFR_CTRL_V IO_ADDRESS(OMAP24XX_SDRC_BASE+0xA4)
41#define A_SDRC0_V (0xC0000000)
42#define A_SDRC_MANUAL_V IO_ADDRESS(OMAP24XX_SDRC_BASE+0xA8)
43
44 .text
45
46/*
47 * Forces OMAP into idle state
48 *
49 * omap24xx_idle_loop_suspend() - This bit of code just executes the WFI
50 * for normal idles.
51 *
52 * Note: This code get's copied to internal SRAM at boot. When the OMAP
53 * wakes up it continues execution at the point it went to sleep.
54 */
55ENTRY(omap24xx_idle_loop_suspend)
56 stmfd sp!, {r0, lr} @ save registers on stack
57 mov r0, #0 @ clear for mcr setup
58 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
59 ldmfd sp!, {r0, pc} @ restore regs and return
60
61ENTRY(omap24xx_idle_loop_suspend_sz)
62 .word . - omap24xx_idle_loop_suspend
63
64/*
65 * omap242x_cpu_suspend() - Forces OMAP into deep sleep state by completing
66 * SDRC shutdown then ARM shutdown. Upon wake MPU is back on so just restore
67 * SDRC.
68 *
69 * Input:
70 * R0 : DLL ctrl value pre-Sleep
71 * R1 : Processor+Revision
72 * 2420: 0x21 = 242xES1, 0x26 = 242xES2.2
73 * 2430: 0x31 = 2430ES1, 0x32 = 2430ES2
74 *
75 * The if the DPLL is going to AutoIdle. It seems like the DPLL may be back on
76 * when we get called, but the DLL probably isn't. We will wait a bit more in
77 * case the DPLL isn't quite there yet. The code will wait on DLL for DDR even
78 * if in unlocked mode.
79 *
80 * For less than 242x-ES2.2 upon wake from a sleep mode where the external
81 * oscillator was stopped, a timing bug exists where a non-stabilized 12MHz
82 * clock can pass into the PRCM can cause problems at DSP and IVA.
83 * To work around this the code will switch to the 32kHz source prior to sleep.
84 * Post sleep we will shift back to using the DPLL. Apparently,
85 * CM_IDLEST_CLKGEN does not reflect the full clock change so you need to wait
86 * 3x12MHz + 3x32kHz clocks for a full switch.
87 *
88 * The DLL load value is not kept in RETENTION or OFF. It needs to be restored
89 * at wake
90 */
91ENTRY(omap24xx_cpu_suspend)
92 stmfd sp!, {r0 - r12, lr} @ save registers on stack
93 mov r3, #0x0 @ clear for mrc call
94 mcr p15, 0, r3, c7, c10, 4 @ memory barrier, hope SDR/DDR finished
95 nop
96 nop
97 ldr r3, A_SDRC_POWER @ addr of sdrc power
98 ldr r4, [r3] @ value of sdrc power
99 orr r4, r4, #0x40 @ enable self refresh on idle req
100 mov r5, #0x2000 @ set delay (DPLL relock + DLL relock)
101 str r4, [r3] @ make it so
102 mov r2, #0
103 nop
104 mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
105 nop
106loop:
107 subs r5, r5, #0x1 @ awake, wait just a bit
108 bne loop
109
110 /* The DPLL has on before we take the DDR out of self refresh */
111 bic r4, r4, #0x40 @ now clear self refresh bit.
112 str r4, [r3] @ put vlaue back.
113 ldr r4, A_SDRC0 @ make a clock happen
114 ldr r4, [r4]
115 nop @ start auto refresh only after clk ok
116 movs r0, r0 @ see if DDR or SDR
117 ldrne r1, A_SDRC_DLLA_CTRL_S @ get addr of DLL ctrl
118 strne r0, [r1] @ rewrite DLLA to force DLL reload
119 addne r1, r1, #0x8 @ move to DLLB
120 strne r0, [r1] @ rewrite DLLB to force DLL reload
121
122 mov r5, #0x1000
123loop2:
124 subs r5, r5, #0x1
125 bne loop2
126 /* resume*/
127 ldmfd sp!, {r0 - r12, pc} @ restore regs and return
128
129A_SDRC_POWER:
130 .word A_SDRC_POWER_V
131A_SDRC0:
132 .word A_SDRC0_V
133A_CM_CLKSEL2_PLL_S:
134 .word A_CM_CLKSEL2_PLL_V
135A_CM_CLKEN_PLL:
136 .word A_CM_CLKEN_PLL_V
137A_SDRC_DLLA_CTRL_S:
138 .word A_SDRC_DLLA_CTRL_V
139A_SDRC_MANUAL_S:
140 .word A_SDRC_MANUAL_V
141
142ENTRY(omap24xx_cpu_suspend_sz)
143 .word . - omap24xx_cpu_suspend
144
diff --git a/arch/arm/mach-omap2/sram-fn.S b/arch/arm/mach-omap2/sram-fn.S
index 2a869e203342..d261e4ff4d9b 100644
--- a/arch/arm/mach-omap2/sram-fn.S
+++ b/arch/arm/mach-omap2/sram-fn.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mach-omap1/sram.S 2 * linux/arch/arm/mach-omap2/sram.S
3 * 3 *
4 * Omap2 specific functions that need to be run in internal SRAM 4 * Omap2 specific functions that need to be run in internal SRAM
5 * 5 *
@@ -28,7 +28,7 @@
28#include <asm/arch/io.h> 28#include <asm/arch/io.h>
29#include <asm/hardware.h> 29#include <asm/hardware.h>
30 30
31#include <asm/arch/prcm.h> 31#include "prcm-regs.h"
32 32
33#define TIMER_32KSYNCT_CR_V IO_ADDRESS(OMAP24XX_32KSYNCT_BASE + 0x010) 33#define TIMER_32KSYNCT_CR_V IO_ADDRESS(OMAP24XX_32KSYNCT_BASE + 0x010)
34 34
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 68923b1d2b62..d6d726036361 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -141,6 +141,8 @@ struct corgissp_machinfo corgi_ssp_machinfo = {
141 */ 141 */
142static struct corgibl_machinfo corgi_bl_machinfo = { 142static struct corgibl_machinfo corgi_bl_machinfo = {
143 .max_intensity = 0x2f, 143 .max_intensity = 0x2f,
144 .default_intensity = 0x1f,
145 .limit_mask = 0x0b,
144 .set_bl_intensity = corgi_bl_set_intensity, 146 .set_bl_intensity = corgi_bl_set_intensity,
145}; 147};
146 148
@@ -164,6 +166,14 @@ static struct platform_device corgikbd_device = {
164 166
165 167
166/* 168/*
169 * Corgi LEDs
170 */
171static struct platform_device corgiled_device = {
172 .name = "corgi-led",
173 .id = -1,
174};
175
176/*
167 * Corgi Touch Screen Device 177 * Corgi Touch Screen Device
168 */ 178 */
169static struct resource corgits_resources[] = { 179static struct resource corgits_resources[] = {
@@ -297,6 +307,7 @@ static struct platform_device *devices[] __initdata = {
297 &corgikbd_device, 307 &corgikbd_device,
298 &corgibl_device, 308 &corgibl_device,
299 &corgits_device, 309 &corgits_device,
310 &corgiled_device,
300}; 311};
301 312
302static void __init corgi_init(void) 313static void __init corgi_init(void)
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index b45560a8f6c4..a042473deedd 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -307,6 +307,10 @@ static void __init fixup_poodle(struct machine_desc *desc,
307 struct tag *tags, char **cmdline, struct meminfo *mi) 307 struct tag *tags, char **cmdline, struct meminfo *mi)
308{ 308{
309 sharpsl_save_param(); 309 sharpsl_save_param();
310 mi->nr_banks=1;
311 mi->bank[0].start = 0xa0000000;
312 mi->bank[0].node = 0;
313 mi->bank[0].size = (32*1024*1024);
310} 314}
311 315
312MACHINE_START(POODLE, "SHARP Poodle") 316MACHINE_START(POODLE, "SHARP Poodle")
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 0dbb079ecd25..19b372df544a 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -220,6 +220,8 @@ struct corgissp_machinfo spitz_ssp_machinfo = {
220 * Spitz Backlight Device 220 * Spitz Backlight Device
221 */ 221 */
222static struct corgibl_machinfo spitz_bl_machinfo = { 222static struct corgibl_machinfo spitz_bl_machinfo = {
223 .default_intensity = 0x1f,
224 .limit_mask = 0x0b,
223 .max_intensity = 0x2f, 225 .max_intensity = 0x2f,
224}; 226};
225 227
@@ -242,6 +244,14 @@ static struct platform_device spitzkbd_device = {
242 244
243 245
244/* 246/*
247 * Spitz LEDs
248 */
249static struct platform_device spitzled_device = {
250 .name = "spitz-led",
251 .id = -1,
252};
253
254/*
245 * Spitz Touch Screen Device 255 * Spitz Touch Screen Device
246 */ 256 */
247static struct resource spitzts_resources[] = { 257static struct resource spitzts_resources[] = {
@@ -418,6 +428,7 @@ static struct platform_device *devices[] __initdata = {
418 &spitzkbd_device, 428 &spitzkbd_device,
419 &spitzts_device, 429 &spitzts_device,
420 &spitzbl_device, 430 &spitzbl_device,
431 &spitzled_device,
421}; 432};
422 433
423static void __init common_init(void) 434static void __init common_init(void)
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 66ec71756d0f..76c0e7f0a219 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -251,10 +251,19 @@ static struct platform_device tosakbd_device = {
251 .id = -1, 251 .id = -1,
252}; 252};
253 253
254/*
255 * Tosa LEDs
256 */
257static struct platform_device tosaled_device = {
258 .name = "tosa-led",
259 .id = -1,
260};
261
254static struct platform_device *devices[] __initdata = { 262static struct platform_device *devices[] __initdata = {
255 &tosascoop_device, 263 &tosascoop_device,
256 &tosascoop_jc_device, 264 &tosascoop_jc_device,
257 &tosakbd_device, 265 &tosakbd_device,
266 &tosaled_device,
258}; 267};
259 268
260static void __init tosa_init(void) 269static void __init tosa_init(void)
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index ed07c4149d82..ce7d81000695 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -50,9 +50,15 @@ config MACH_N30
50 50
51 <http://zoo.weinigel.se/n30>. 51 <http://zoo.weinigel.se/n30>.
52 52
53config MACH_SMDK
54 bool
55 help
56 Common machine code for SMDK2410 and SMDK2440
57
53config ARCH_SMDK2410 58config ARCH_SMDK2410
54 bool "SMDK2410/A9M2410" 59 bool "SMDK2410/A9M2410"
55 select CPU_S3C2410 60 select CPU_S3C2410
61 select MACH_SMDK
56 help 62 help
57 Say Y here if you are using the SMDK2410 or the derived module A9M2410 63 Say Y here if you are using the SMDK2410 or the derived module A9M2410
58 <http://www.fsforth.de> 64 <http://www.fsforth.de>
@@ -60,6 +66,7 @@ config ARCH_SMDK2410
60config ARCH_S3C2440 66config ARCH_S3C2440
61 bool "SMDK2440" 67 bool "SMDK2440"
62 select CPU_S3C2440 68 select CPU_S3C2440
69 select MACH_SMDK
63 help 70 help
64 Say Y here if you are using the SMDK2440. 71 Say Y here if you are using the SMDK2440.
65 72
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 1b3b476e5637..3e5712db6b52 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -48,3 +48,5 @@ obj-$(CONFIG_MACH_VR1000) += mach-vr1000.o usb-simtec.o
48obj-$(CONFIG_MACH_RX3715) += mach-rx3715.o 48obj-$(CONFIG_MACH_RX3715) += mach-rx3715.o
49obj-$(CONFIG_MACH_OTOM) += mach-otom.o 49obj-$(CONFIG_MACH_OTOM) += mach-otom.o
50obj-$(CONFIG_MACH_NEXCODER_2440) += mach-nexcoder.o 50obj-$(CONFIG_MACH_NEXCODER_2440) += mach-nexcoder.o
51
52obj-$(CONFIG_MACH_SMDK) += common-smdk.o \ No newline at end of file
diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c
index fec02c92f95f..b7f85e6d6b76 100644
--- a/arch/arm/mach-s3c2410/clock.c
+++ b/arch/arm/mach-s3c2410/clock.c
@@ -249,7 +249,7 @@ static int s3c24xx_upll_enable(struct clk *clk, int enable)
249 249
250 /* if we started the UPLL, then allow to settle */ 250 /* if we started the UPLL, then allow to settle */
251 251
252 if (enable && !(orig & S3C2410_CLKSLOW_UCLK_OFF)) 252 if (enable && (orig & S3C2410_CLKSLOW_UCLK_OFF))
253 udelay(200); 253 udelay(200);
254 254
255 return 0; 255 return 0;
diff --git a/arch/arm/mach-s3c2410/common-smdk.c b/arch/arm/mach-s3c2410/common-smdk.c
new file mode 100644
index 000000000000..36b8291b5e03
--- /dev/null
+++ b/arch/arm/mach-s3c2410/common-smdk.c
@@ -0,0 +1,134 @@
1/* linux/arch/arm/mach-s3c2410/common-smdk.c
2 *
3 * Copyright (c) 2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Common code for SMDK2410 and SMDK2440 boards
7 *
8 * http://www.fluff.org/ben/smdk2440/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/interrupt.h>
18#include <linux/list.h>
19#include <linux/timer.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/nand_ecc.h>
26#include <linux/mtd/partitions.h>
27
28#include <asm/mach/arch.h>
29#include <asm/mach/map.h>
30#include <asm/mach/irq.h>
31
32#include <asm/hardware.h>
33#include <asm/io.h>
34#include <asm/irq.h>
35
36#include <asm/arch/regs-gpio.h>
37
38#include <asm/arch/nand.h>
39
40#include "devs.h"
41#include "pm.h"
42
43/* NAND parititon from 2.4.18-swl5 */
44
45static struct mtd_partition smdk_default_nand_part[] = {
46 [0] = {
47 .name = "Boot Agent",
48 .size = SZ_16K,
49 .offset = 0,
50 },
51 [1] = {
52 .name = "S3C2410 flash parition 1",
53 .offset = 0,
54 .size = SZ_2M,
55 },
56 [2] = {
57 .name = "S3C2410 flash partition 2",
58 .offset = SZ_4M,
59 .size = SZ_4M,
60 },
61 [3] = {
62 .name = "S3C2410 flash partition 3",
63 .offset = SZ_8M,
64 .size = SZ_2M,
65 },
66 [4] = {
67 .name = "S3C2410 flash partition 4",
68 .offset = SZ_1M * 10,
69 .size = SZ_4M,
70 },
71 [5] = {
72 .name = "S3C2410 flash partition 5",
73 .offset = SZ_1M * 14,
74 .size = SZ_1M * 10,
75 },
76 [6] = {
77 .name = "S3C2410 flash partition 6",
78 .offset = SZ_1M * 24,
79 .size = SZ_1M * 24,
80 },
81 [7] = {
82 .name = "S3C2410 flash partition 7",
83 .offset = SZ_1M * 48,
84 .size = SZ_16M,
85 }
86};
87
88static struct s3c2410_nand_set smdk_nand_sets[] = {
89 [0] = {
90 .name = "NAND",
91 .nr_chips = 1,
92 .nr_partitions = ARRAY_SIZE(smdk_default_nand_part),
93 .partitions = smdk_default_nand_part,
94 },
95};
96
97/* choose a set of timings which should suit most 512Mbit
98 * chips and beyond.
99*/
100
101static struct s3c2410_platform_nand smdk_nand_info = {
102 .tacls = 20,
103 .twrph0 = 60,
104 .twrph1 = 20,
105 .nr_sets = ARRAY_SIZE(smdk_nand_sets),
106 .sets = smdk_nand_sets,
107};
108
109/* devices we initialise */
110
111static struct platform_device __initdata *smdk_devs[] = {
112 &s3c_device_nand,
113};
114
115void __init smdk_machine_init(void)
116{
117 /* Configure the LEDs (even if we have no LED support)*/
118
119 s3c2410_gpio_cfgpin(S3C2410_GPF4, S3C2410_GPF4_OUTP);
120 s3c2410_gpio_cfgpin(S3C2410_GPF5, S3C2410_GPF5_OUTP);
121 s3c2410_gpio_cfgpin(S3C2410_GPF6, S3C2410_GPF6_OUTP);
122 s3c2410_gpio_cfgpin(S3C2410_GPF7, S3C2410_GPF7_OUTP);
123
124 s3c2410_gpio_setpin(S3C2410_GPF4, 1);
125 s3c2410_gpio_setpin(S3C2410_GPF5, 1);
126 s3c2410_gpio_setpin(S3C2410_GPF6, 1);
127 s3c2410_gpio_setpin(S3C2410_GPF7, 1);
128
129 s3c_device_nand.dev.platform_data = &smdk_nand_info;
130
131 platform_add_devices(smdk_devs, ARRAY_SIZE(smdk_devs));
132
133 s3c2410_pm_init();
134}
diff --git a/arch/arm/mach-s3c2410/common-smdk.h b/arch/arm/mach-s3c2410/common-smdk.h
new file mode 100644
index 000000000000..0e3a3be330a3
--- /dev/null
+++ b/arch/arm/mach-s3c2410/common-smdk.h
@@ -0,0 +1,15 @@
1/* linux/arch/arm/mach-s3c2410/common-smdk.h
2 *
3 * Copyright (c) 2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Common code for SMDK2410 and SMDK2440 boards
7 *
8 * http://www.fluff.org/ben/smdk2440/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15extern void smdk_machine_init(void);
diff --git a/arch/arm/mach-s3c2410/mach-anubis.c b/arch/arm/mach-s3c2410/mach-anubis.c
index 3e327b8e46be..cc97fbf66291 100644
--- a/arch/arm/mach-s3c2410/mach-anubis.c
+++ b/arch/arm/mach-s3c2410/mach-anubis.c
@@ -232,8 +232,8 @@ static void anubis_nand_select(struct s3c2410_nand_set *set, int slot)
232 232
233static struct s3c2410_platform_nand anubis_nand_info = { 233static struct s3c2410_platform_nand anubis_nand_info = {
234 .tacls = 25, 234 .tacls = 25,
235 .twrph0 = 80, 235 .twrph0 = 55,
236 .twrph1 = 80, 236 .twrph1 = 40,
237 .nr_sets = ARRAY_SIZE(anubis_nand_sets), 237 .nr_sets = ARRAY_SIZE(anubis_nand_sets),
238 .sets = anubis_nand_sets, 238 .sets = anubis_nand_sets,
239 .select_chip = anubis_nand_select, 239 .select_chip = anubis_nand_select,
diff --git a/arch/arm/mach-s3c2410/mach-rx3715.c b/arch/arm/mach-s3c2410/mach-rx3715.c
index 0260ed5ab946..306afc1d7cd3 100644
--- a/arch/arm/mach-s3c2410/mach-rx3715.c
+++ b/arch/arm/mach-s3c2410/mach-rx3715.c
@@ -32,6 +32,11 @@
32#include <linux/serial_core.h> 32#include <linux/serial_core.h>
33#include <linux/serial.h> 33#include <linux/serial.h>
34 34
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/nand.h>
37#include <linux/mtd/nand_ecc.h>
38#include <linux/mtd/partitions.h>
39
35#include <asm/mach/arch.h> 40#include <asm/mach/arch.h>
36#include <asm/mach/map.h> 41#include <asm/mach/map.h>
37#include <asm/mach/irq.h> 42#include <asm/mach/irq.h>
@@ -46,6 +51,7 @@
46#include <asm/arch/regs-gpio.h> 51#include <asm/arch/regs-gpio.h>
47#include <asm/arch/regs-lcd.h> 52#include <asm/arch/regs-lcd.h>
48 53
54#include <asm/arch/nand.h>
49#include <asm/arch/fb.h> 55#include <asm/arch/fb.h>
50 56
51#include "clock.h" 57#include "clock.h"
@@ -170,12 +176,39 @@ static struct s3c2410fb_mach_info rx3715_lcdcfg __initdata = {
170 }, 176 },
171}; 177};
172 178
179static struct mtd_partition rx3715_nand_part[] = {
180 [0] = {
181 .name = "Whole Flash",
182 .offset = 0,
183 .size = MTDPART_SIZ_FULL,
184 .mask_flags = MTD_WRITEABLE,
185 }
186};
187
188static struct s3c2410_nand_set rx3715_nand_sets[] = {
189 [0] = {
190 .name = "Internal",
191 .nr_chips = 1,
192 .nr_partitions = ARRAY_SIZE(rx3715_nand_part),
193 .partitions = rx3715_nand_part,
194 },
195};
196
197static struct s3c2410_platform_nand rx3715_nand_info = {
198 .tacls = 25,
199 .twrph0 = 50,
200 .twrph1 = 15,
201 .nr_sets = ARRAY_SIZE(rx3715_nand_sets),
202 .sets = rx3715_nand_sets,
203};
204
173static struct platform_device *rx3715_devices[] __initdata = { 205static struct platform_device *rx3715_devices[] __initdata = {
174 &s3c_device_usb, 206 &s3c_device_usb,
175 &s3c_device_lcd, 207 &s3c_device_lcd,
176 &s3c_device_wdt, 208 &s3c_device_wdt,
177 &s3c_device_i2c, 209 &s3c_device_i2c,
178 &s3c_device_iis, 210 &s3c_device_iis,
211 &s3c_device_nand,
179}; 212};
180 213
181static struct s3c24xx_board rx3715_board __initdata = { 214static struct s3c24xx_board rx3715_board __initdata = {
@@ -185,6 +218,8 @@ static struct s3c24xx_board rx3715_board __initdata = {
185 218
186static void __init rx3715_map_io(void) 219static void __init rx3715_map_io(void)
187{ 220{
221 s3c_device_nand.dev.platform_data = &rx3715_nand_info;
222
188 s3c24xx_init_io(rx3715_iodesc, ARRAY_SIZE(rx3715_iodesc)); 223 s3c24xx_init_io(rx3715_iodesc, ARRAY_SIZE(rx3715_iodesc));
189 s3c24xx_init_clocks(16934000); 224 s3c24xx_init_clocks(16934000);
190 s3c24xx_init_uarts(rx3715_uartcfgs, ARRAY_SIZE(rx3715_uartcfgs)); 225 s3c24xx_init_uarts(rx3715_uartcfgs, ARRAY_SIZE(rx3715_uartcfgs));
diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c
index 1e76e1fdfcea..2db932d72c5a 100644
--- a/arch/arm/mach-s3c2410/mach-smdk2410.c
+++ b/arch/arm/mach-s3c2410/mach-smdk2410.c
@@ -28,7 +28,8 @@
28 * Ben Dooks <ben@simtec.co.uk> 28 * Ben Dooks <ben@simtec.co.uk>
29 * 29 *
30 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA 30 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
31 * 20-Sep-2005 BJD Added static to non-exported items 31 * 20-Sep-2005 BJD Added static to non-exported items
32 * 01-Apr-2006 BJD Moved init code to common smdk
32 * 33 *
33 ***********************************************************************/ 34 ***********************************************************************/
34 35
@@ -54,6 +55,8 @@
54#include "devs.h" 55#include "devs.h"
55#include "cpu.h" 56#include "cpu.h"
56 57
58#include "common-smdk.h"
59
57static struct map_desc smdk2410_iodesc[] __initdata = { 60static struct map_desc smdk2410_iodesc[] __initdata = {
58 /* nothing here yet */ 61 /* nothing here yet */
59}; 62};
@@ -107,11 +110,6 @@ static void __init smdk2410_map_io(void)
107 s3c24xx_set_board(&smdk2410_board); 110 s3c24xx_set_board(&smdk2410_board);
108} 111}
109 112
110static void __init smdk2410_init_irq(void)
111{
112 s3c24xx_init_irq();
113}
114
115MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch 113MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch
116 * to SMDK2410 */ 114 * to SMDK2410 */
117 /* Maintainer: Jonas Dietsche */ 115 /* Maintainer: Jonas Dietsche */
@@ -119,7 +117,8 @@ MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switc
119 .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc, 117 .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
120 .boot_params = S3C2410_SDRAM_PA + 0x100, 118 .boot_params = S3C2410_SDRAM_PA + 0x100,
121 .map_io = smdk2410_map_io, 119 .map_io = smdk2410_map_io,
122 .init_irq = smdk2410_init_irq, 120 .init_irq = s3c24xx_init_irq,
121 .init_machine = smdk_machine_init,
123 .timer = &s3c24xx_timer, 122 .timer = &s3c24xx_timer,
124MACHINE_END 123MACHINE_END
125 124
diff --git a/arch/arm/mach-s3c2410/mach-smdk2440.c b/arch/arm/mach-s3c2410/mach-smdk2440.c
index f4315721c3b8..5fffd1d51047 100644
--- a/arch/arm/mach-s3c2410/mach-smdk2440.c
+++ b/arch/arm/mach-s3c2410/mach-smdk2440.c
@@ -53,7 +53,8 @@
53#include "clock.h" 53#include "clock.h"
54#include "devs.h" 54#include "devs.h"
55#include "cpu.h" 55#include "cpu.h"
56#include "pm.h" 56
57#include "common-smdk.h"
57 58
58static struct map_desc smdk2440_iodesc[] __initdata = { 59static struct map_desc smdk2440_iodesc[] __initdata = {
59 /* ISA IO Space map (memory space selected by A24) */ 60 /* ISA IO Space map (memory space selected by A24) */
@@ -197,21 +198,9 @@ static void __init smdk2440_map_io(void)
197 198
198static void __init smdk2440_machine_init(void) 199static void __init smdk2440_machine_init(void)
199{ 200{
200 /* Configure the LEDs (even if we have no LED support)*/
201
202 s3c2410_gpio_cfgpin(S3C2410_GPF4, S3C2410_GPF4_OUTP);
203 s3c2410_gpio_cfgpin(S3C2410_GPF5, S3C2410_GPF5_OUTP);
204 s3c2410_gpio_cfgpin(S3C2410_GPF6, S3C2410_GPF6_OUTP);
205 s3c2410_gpio_cfgpin(S3C2410_GPF7, S3C2410_GPF7_OUTP);
206
207 s3c2410_gpio_setpin(S3C2410_GPF4, 0);
208 s3c2410_gpio_setpin(S3C2410_GPF5, 0);
209 s3c2410_gpio_setpin(S3C2410_GPF6, 0);
210 s3c2410_gpio_setpin(S3C2410_GPF7, 0);
211
212 s3c24xx_fb_set_platdata(&smdk2440_lcd_cfg); 201 s3c24xx_fb_set_platdata(&smdk2440_lcd_cfg);
213 202
214 s3c2410_pm_init(); 203 smdk_machine_init();
215} 204}
216 205
217MACHINE_START(S3C2440, "SMDK2440") 206MACHINE_START(S3C2440, "SMDK2440")
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 102454082474..676b5c5b75bb 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -11,7 +11,8 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * ChangeLog: 13 * ChangeLog:
14 * 03-06-2004 John Lenz <jelenz@wisc.edu> 14 * 2006 Pavel Machek <pavel@suse.cz>
15 * 03-06-2004 John Lenz <lenz@cs.wisc.edu>
15 * 06-04-2002 Chris Larson <kergoth@digitalnemesis.net> 16 * 06-04-2002 Chris Larson <kergoth@digitalnemesis.net>
16 * 04-16-2001 Lineo Japan,Inc. ... 17 * 04-16-2001 Lineo Japan,Inc. ...
17 */ 18 */
@@ -87,12 +88,75 @@ static struct mcp_plat_data collie_mcp_data = {
87 .sclk_rate = 11981000, 88 .sclk_rate = 11981000,
88}; 89};
89 90
91#ifdef CONFIG_SHARP_LOCOMO
92/*
93 * low-level UART features.
94 */
95static struct locomo_dev *uart_dev = NULL;
96
97static void collie_uart_set_mctrl(struct uart_port *port, u_int mctrl)
98{
99 if (!uart_dev) return;
100
101 if (mctrl & TIOCM_RTS)
102 locomo_gpio_write(uart_dev, LOCOMO_GPIO_RTS, 0);
103 else
104 locomo_gpio_write(uart_dev, LOCOMO_GPIO_RTS, 1);
105
106 if (mctrl & TIOCM_DTR)
107 locomo_gpio_write(uart_dev, LOCOMO_GPIO_DTR, 0);
108 else
109 locomo_gpio_write(uart_dev, LOCOMO_GPIO_DTR, 1);
110}
111
112static u_int collie_uart_get_mctrl(struct uart_port *port)
113{
114 int ret = TIOCM_CD;
115 unsigned int r;
116 if (!uart_dev) return ret;
117
118 r = locomo_gpio_read_output(uart_dev, LOCOMO_GPIO_CTS & LOCOMO_GPIO_DSR);
119 if (r & LOCOMO_GPIO_CTS)
120 ret |= TIOCM_CTS;
121 if (r & LOCOMO_GPIO_DSR)
122 ret |= TIOCM_DSR;
123
124 return ret;
125}
90 126
91static struct sa1100_port_fns collie_port_fns __initdata = { 127static struct sa1100_port_fns collie_port_fns __initdata = {
92 .set_mctrl = collie_uart_set_mctrl, 128 .set_mctrl = collie_uart_set_mctrl,
93 .get_mctrl = collie_uart_get_mctrl, 129 .get_mctrl = collie_uart_get_mctrl,
94}; 130};
95 131
132static int collie_uart_probe(struct locomo_dev *dev)
133{
134 uart_dev = dev;
135 return 0;
136}
137
138static int collie_uart_remove(struct locomo_dev *dev)
139{
140 uart_dev = NULL;
141 return 0;
142}
143
144static struct locomo_driver collie_uart_driver = {
145 .drv = {
146 .name = "collie_uart",
147 },
148 .devid = LOCOMO_DEVID_UART,
149 .probe = collie_uart_probe,
150 .remove = collie_uart_remove,
151};
152
153static int __init collie_uart_init(void) {
154 return locomo_driver_register(&collie_uart_driver);
155}
156device_initcall(collie_uart_init);
157
158#endif
159
96 160
97static struct resource locomo_resources[] = { 161static struct resource locomo_resources[] = {
98 [0] = { 162 [0] = {
@@ -218,6 +282,12 @@ static void __init collie_map_io(void)
218{ 282{
219 sa1100_map_io(); 283 sa1100_map_io();
220 iotable_init(collie_io_desc, ARRAY_SIZE(collie_io_desc)); 284 iotable_init(collie_io_desc, ARRAY_SIZE(collie_io_desc));
285
286#ifdef CONFIG_SHARP_LOCOMO
287 sa1100_register_uart_fns(&collie_port_fns);
288#endif
289 sa1100_register_uart(0, 3);
290 sa1100_register_uart(1, 1);
221} 291}
222 292
223MACHINE_START(COLLIE, "Sharp-Collie") 293MACHINE_START(COLLIE, "Sharp-Collie")
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 8a1bfcd50087..50e6b6bfb2e2 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -18,6 +18,7 @@
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20 20
21#include <asm/memory.h>
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
23#include <asm/sizes.h> 24#include <asm/sizes.h>
@@ -272,6 +273,17 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
272void * 273void *
273dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 274dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
274{ 275{
276 if (arch_is_coherent()) {
277 void *virt;
278
279 virt = kmalloc(size, gfp);
280 if (!virt)
281 return NULL;
282 *handle = virt_to_dma(dev, virt);
283
284 return virt;
285 }
286
275 return __dma_alloc(dev, size, handle, gfp, 287 return __dma_alloc(dev, size, handle, gfp,
276 pgprot_noncached(pgprot_kernel)); 288 pgprot_noncached(pgprot_kernel));
277} 289}
@@ -350,6 +362,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
350 362
351 WARN_ON(irqs_disabled()); 363 WARN_ON(irqs_disabled());
352 364
365 if (arch_is_coherent()) {
366 kfree(cpu_addr);
367 return;
368 }
369
353 size = PAGE_ALIGN(size); 370 size = PAGE_ALIGN(size);
354 371
355 spin_lock_irqsave(&consistent_lock, flags); 372 spin_lock_irqsave(&consistent_lock, flags);
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 5e5d05bcad50..f14b2d0f3690 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -389,6 +389,17 @@ void __init build_mem_type_table(void)
389 kern_pgprot = user_pgprot = cp->pte; 389 kern_pgprot = user_pgprot = cp->pte;
390 390
391 /* 391 /*
392 * Enable CPU-specific coherency if supported.
393 * (Only available on XSC3 at the moment.)
394 */
395 if (arch_is_coherent()) {
396 if (cpu_is_xsc3()) {
397 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
398 mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
399 }
400 }
401
402 /*
392 * ARMv6 and above have extended page tables. 403 * ARMv6 and above have extended page tables.
393 */ 404 */
394 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 405 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index f90513e9af0c..80873b36c3f7 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -30,6 +30,7 @@
30#include <asm/procinfo.h> 30#include <asm/procinfo.h>
31#include <asm/hardware.h> 31#include <asm/hardware.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/pgtable-hwdef.h>
33#include <asm/page.h> 34#include <asm/page.h>
34#include <asm/ptrace.h> 35#include <asm/ptrace.h>
35#include "proc-macros.S" 36#include "proc-macros.S"
@@ -370,7 +371,7 @@ ENTRY(cpu_xsc3_switch_mm)
370ENTRY(cpu_xsc3_set_pte) 371ENTRY(cpu_xsc3_set_pte)
371 str r1, [r0], #-2048 @ linux version 372 str r1, [r0], #-2048 @ linux version
372 373
373 bic r2, r1, #0xff0 374 bic r2, r1, #0xdf0 @ Keep C, B, coherency bits
374 orr r2, r2, #PTE_TYPE_EXT @ extended page 375 orr r2, r2, #PTE_TYPE_EXT @ extended page
375 376
376 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 377 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 0887bb2a2551..ec49495e651e 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -70,13 +70,13 @@ config OMAP_MPU_TIMER
70 70
71config OMAP_32K_TIMER 71config OMAP_32K_TIMER
72 bool "Use 32KHz timer" 72 bool "Use 32KHz timer"
73 depends on ARCH_OMAP16XX 73 depends on ARCH_OMAP16XX || ARCH_OMAP24XX
74 help 74 help
75 Select this option if you want to enable the OMAP 32KHz timer. 75 Select this option if you want to enable the OMAP 32KHz timer.
76 This timer saves power compared to the OMAP_MPU_TIMER, and has 76 This timer saves power compared to the OMAP_MPU_TIMER, and has
77 support for no tick during idle. The 32KHz timer provides less 77 support for no tick during idle. The 32KHz timer provides less
78 intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is 78 intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is
79 currently only available for OMAP-16xx. 79 currently only available for OMAP16XX and 24XX.
80 80
81endchoice 81endchoice
82 82
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 9ccf1943fc94..2896b4546411 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,16 +3,16 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := common.o sram.o sram-fn.o clock.o devices.o dma.o mux.o gpio.o mcbsp.o usb.o 6obj-y := common.o sram.o sram-fn.o clock.o devices.o dma.o mux.o gpio.o mcbsp.o usb.o fb.o
7obj-m := 7obj-m :=
8obj-n := 8obj-n :=
9obj- := 9obj- :=
10 10
11obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
12
11# OCPI interconnect support for 1710, 1610 and 5912 13# OCPI interconnect support for 1710, 1610 and 5912
12obj-$(CONFIG_ARCH_OMAP16XX) += ocpi.o 14obj-$(CONFIG_ARCH_OMAP16XX) += ocpi.o
13 15
14# Power Management
15obj-$(CONFIG_PM) += pm.o sleep.o
16 16
17obj-$(CONFIG_CPU_FREQ) += cpu-omap.o 17obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
18obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o 18obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 3c2bfc0efdaf..06485c193ee3 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -21,6 +21,7 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/platform_device.h>
24 25
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/semaphore.h> 27#include <asm/semaphore.h>
@@ -37,17 +38,37 @@ static struct clk_functions *arch_clock;
37 * Standard clock functions defined in include/linux/clk.h 38 * Standard clock functions defined in include/linux/clk.h
38 *-------------------------------------------------------------------------*/ 39 *-------------------------------------------------------------------------*/
39 40
41/*
42 * Returns a clock. Note that we first try to use device id on the bus
43 * and clock name. If this fails, we try to use clock name only.
44 */
40struct clk * clk_get(struct device *dev, const char *id) 45struct clk * clk_get(struct device *dev, const char *id)
41{ 46{
42 struct clk *p, *clk = ERR_PTR(-ENOENT); 47 struct clk *p, *clk = ERR_PTR(-ENOENT);
48 int idno;
49
50 if (dev == NULL || dev->bus != &platform_bus_type)
51 idno = -1;
52 else
53 idno = to_platform_device(dev)->id;
43 54
44 mutex_lock(&clocks_mutex); 55 mutex_lock(&clocks_mutex);
56
57 list_for_each_entry(p, &clocks, node) {
58 if (p->id == idno &&
59 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
60 clk = p;
61 break;
62 }
63 }
64
45 list_for_each_entry(p, &clocks, node) { 65 list_for_each_entry(p, &clocks, node) {
46 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 66 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
47 clk = p; 67 clk = p;
48 break; 68 break;
49 } 69 }
50 } 70 }
71
51 mutex_unlock(&clocks_mutex); 72 mutex_unlock(&clocks_mutex);
52 73
53 return clk; 74 return clk;
@@ -59,6 +80,9 @@ int clk_enable(struct clk *clk)
59 unsigned long flags; 80 unsigned long flags;
60 int ret = 0; 81 int ret = 0;
61 82
83 if (clk == NULL || IS_ERR(clk))
84 return -EINVAL;
85
62 spin_lock_irqsave(&clockfw_lock, flags); 86 spin_lock_irqsave(&clockfw_lock, flags);
63 if (arch_clock->clk_enable) 87 if (arch_clock->clk_enable)
64 ret = arch_clock->clk_enable(clk); 88 ret = arch_clock->clk_enable(clk);
@@ -72,6 +96,9 @@ void clk_disable(struct clk *clk)
72{ 96{
73 unsigned long flags; 97 unsigned long flags;
74 98
99 if (clk == NULL || IS_ERR(clk))
100 return;
101
75 spin_lock_irqsave(&clockfw_lock, flags); 102 spin_lock_irqsave(&clockfw_lock, flags);
76 if (arch_clock->clk_disable) 103 if (arch_clock->clk_disable)
77 arch_clock->clk_disable(clk); 104 arch_clock->clk_disable(clk);
@@ -84,6 +111,9 @@ int clk_get_usecount(struct clk *clk)
84 unsigned long flags; 111 unsigned long flags;
85 int ret = 0; 112 int ret = 0;
86 113
114 if (clk == NULL || IS_ERR(clk))
115 return 0;
116
87 spin_lock_irqsave(&clockfw_lock, flags); 117 spin_lock_irqsave(&clockfw_lock, flags);
88 ret = clk->usecount; 118 ret = clk->usecount;
89 spin_unlock_irqrestore(&clockfw_lock, flags); 119 spin_unlock_irqrestore(&clockfw_lock, flags);
@@ -97,6 +127,9 @@ unsigned long clk_get_rate(struct clk *clk)
97 unsigned long flags; 127 unsigned long flags;
98 unsigned long ret = 0; 128 unsigned long ret = 0;
99 129
130 if (clk == NULL || IS_ERR(clk))
131 return 0;
132
100 spin_lock_irqsave(&clockfw_lock, flags); 133 spin_lock_irqsave(&clockfw_lock, flags);
101 ret = clk->rate; 134 ret = clk->rate;
102 spin_unlock_irqrestore(&clockfw_lock, flags); 135 spin_unlock_irqrestore(&clockfw_lock, flags);
@@ -121,6 +154,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
121 unsigned long flags; 154 unsigned long flags;
122 long ret = 0; 155 long ret = 0;
123 156
157 if (clk == NULL || IS_ERR(clk))
158 return ret;
159
124 spin_lock_irqsave(&clockfw_lock, flags); 160 spin_lock_irqsave(&clockfw_lock, flags);
125 if (arch_clock->clk_round_rate) 161 if (arch_clock->clk_round_rate)
126 ret = arch_clock->clk_round_rate(clk, rate); 162 ret = arch_clock->clk_round_rate(clk, rate);
@@ -133,7 +169,10 @@ EXPORT_SYMBOL(clk_round_rate);
133int clk_set_rate(struct clk *clk, unsigned long rate) 169int clk_set_rate(struct clk *clk, unsigned long rate)
134{ 170{
135 unsigned long flags; 171 unsigned long flags;
136 int ret = 0; 172 int ret = -EINVAL;
173
174 if (clk == NULL || IS_ERR(clk))
175 return ret;
137 176
138 spin_lock_irqsave(&clockfw_lock, flags); 177 spin_lock_irqsave(&clockfw_lock, flags);
139 if (arch_clock->clk_set_rate) 178 if (arch_clock->clk_set_rate)
@@ -147,7 +186,10 @@ EXPORT_SYMBOL(clk_set_rate);
147int clk_set_parent(struct clk *clk, struct clk *parent) 186int clk_set_parent(struct clk *clk, struct clk *parent)
148{ 187{
149 unsigned long flags; 188 unsigned long flags;
150 int ret = 0; 189 int ret = -EINVAL;
190
191 if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
192 return ret;
151 193
152 spin_lock_irqsave(&clockfw_lock, flags); 194 spin_lock_irqsave(&clockfw_lock, flags);
153 if (arch_clock->clk_set_parent) 195 if (arch_clock->clk_set_parent)
@@ -163,6 +205,9 @@ struct clk *clk_get_parent(struct clk *clk)
163 unsigned long flags; 205 unsigned long flags;
164 struct clk * ret = NULL; 206 struct clk * ret = NULL;
165 207
208 if (clk == NULL || IS_ERR(clk))
209 return ret;
210
166 spin_lock_irqsave(&clockfw_lock, flags); 211 spin_lock_irqsave(&clockfw_lock, flags);
167 if (arch_clock->clk_get_parent) 212 if (arch_clock->clk_get_parent)
168 ret = arch_clock->clk_get_parent(clk); 213 ret = arch_clock->clk_get_parent(clk);
@@ -199,6 +244,9 @@ __setup("mpurate=", omap_clk_setup);
199/* Used for clocks that always have same value as the parent clock */ 244/* Used for clocks that always have same value as the parent clock */
200void followparent_recalc(struct clk *clk) 245void followparent_recalc(struct clk *clk)
201{ 246{
247 if (clk == NULL || IS_ERR(clk))
248 return;
249
202 clk->rate = clk->parent->rate; 250 clk->rate = clk->parent->rate;
203} 251}
204 252
@@ -207,6 +255,9 @@ void propagate_rate(struct clk * tclk)
207{ 255{
208 struct clk *clkp; 256 struct clk *clkp;
209 257
258 if (tclk == NULL || IS_ERR(tclk))
259 return;
260
210 list_for_each_entry(clkp, &clocks, node) { 261 list_for_each_entry(clkp, &clocks, node) {
211 if (likely(clkp->parent != tclk)) 262 if (likely(clkp->parent != tclk))
212 continue; 263 continue;
@@ -217,6 +268,9 @@ void propagate_rate(struct clk * tclk)
217 268
218int clk_register(struct clk *clk) 269int clk_register(struct clk *clk)
219{ 270{
271 if (clk == NULL || IS_ERR(clk))
272 return -EINVAL;
273
220 mutex_lock(&clocks_mutex); 274 mutex_lock(&clocks_mutex);
221 list_add(&clk->node, &clocks); 275 list_add(&clk->node, &clocks);
222 if (clk->init) 276 if (clk->init)
@@ -229,6 +283,9 @@ EXPORT_SYMBOL(clk_register);
229 283
230void clk_unregister(struct clk *clk) 284void clk_unregister(struct clk *clk)
231{ 285{
286 if (clk == NULL || IS_ERR(clk))
287 return;
288
232 mutex_lock(&clocks_mutex); 289 mutex_lock(&clocks_mutex);
233 list_del(&clk->node); 290 list_del(&clk->node);
234 mutex_unlock(&clocks_mutex); 291 mutex_unlock(&clocks_mutex);
@@ -239,6 +296,9 @@ void clk_deny_idle(struct clk *clk)
239{ 296{
240 unsigned long flags; 297 unsigned long flags;
241 298
299 if (clk == NULL || IS_ERR(clk))
300 return;
301
242 spin_lock_irqsave(&clockfw_lock, flags); 302 spin_lock_irqsave(&clockfw_lock, flags);
243 if (arch_clock->clk_deny_idle) 303 if (arch_clock->clk_deny_idle)
244 arch_clock->clk_deny_idle(clk); 304 arch_clock->clk_deny_idle(clk);
@@ -250,6 +310,9 @@ void clk_allow_idle(struct clk *clk)
250{ 310{
251 unsigned long flags; 311 unsigned long flags;
252 312
313 if (clk == NULL || IS_ERR(clk))
314 return;
315
253 spin_lock_irqsave(&clockfw_lock, flags); 316 spin_lock_irqsave(&clockfw_lock, flags);
254 if (arch_clock->clk_allow_idle) 317 if (arch_clock->clk_allow_idle)
255 arch_clock->clk_allow_idle(clk); 318 arch_clock->clk_allow_idle(clk);
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 9dcce904b608..079b67deac0f 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -24,6 +24,7 @@
24#include <asm/arch/board.h> 24#include <asm/arch/board.h>
25#include <asm/arch/mux.h> 25#include <asm/arch/mux.h>
26#include <asm/arch/gpio.h> 26#include <asm/arch/gpio.h>
27#include <asm/arch/menelaus.h>
27 28
28 29
29void omap_nop_release(struct device *dev) 30void omap_nop_release(struct device *dev)
@@ -98,6 +99,62 @@ static inline void omap_init_i2c(void) {}
98#endif 99#endif
99 100
100/*-------------------------------------------------------------------------*/ 101/*-------------------------------------------------------------------------*/
102#if defined(CONFIG_KEYBOARD_OMAP) || defined(CONFIG_KEYBOARD_OMAP_MODULE)
103
104static void omap_init_kp(void)
105{
106 if (machine_is_omap_h2() || machine_is_omap_h3()) {
107 omap_cfg_reg(F18_1610_KBC0);
108 omap_cfg_reg(D20_1610_KBC1);
109 omap_cfg_reg(D19_1610_KBC2);
110 omap_cfg_reg(E18_1610_KBC3);
111 omap_cfg_reg(C21_1610_KBC4);
112
113 omap_cfg_reg(G18_1610_KBR0);
114 omap_cfg_reg(F19_1610_KBR1);
115 omap_cfg_reg(H14_1610_KBR2);
116 omap_cfg_reg(E20_1610_KBR3);
117 omap_cfg_reg(E19_1610_KBR4);
118 omap_cfg_reg(N19_1610_KBR5);
119 } else if (machine_is_omap_perseus2()) {
120 omap_cfg_reg(E2_730_KBR0);
121 omap_cfg_reg(J7_730_KBR1);
122 omap_cfg_reg(E1_730_KBR2);
123 omap_cfg_reg(F3_730_KBR3);
124 omap_cfg_reg(D2_730_KBR4);
125
126 omap_cfg_reg(C2_730_KBC0);
127 omap_cfg_reg(D3_730_KBC1);
128 omap_cfg_reg(E4_730_KBC2);
129 omap_cfg_reg(F4_730_KBC3);
130 omap_cfg_reg(E3_730_KBC4);
131 } else if (machine_is_omap_h4()) {
132 omap_cfg_reg(T19_24XX_KBR0);
133 omap_cfg_reg(R19_24XX_KBR1);
134 omap_cfg_reg(V18_24XX_KBR2);
135 omap_cfg_reg(M21_24XX_KBR3);
136 omap_cfg_reg(E5__24XX_KBR4);
137 if (omap_has_menelaus()) {
138 omap_cfg_reg(B3__24XX_KBR5);
139 omap_cfg_reg(AA4_24XX_KBC2);
140 omap_cfg_reg(B13_24XX_KBC6);
141 } else {
142 omap_cfg_reg(M18_24XX_KBR5);
143 omap_cfg_reg(H19_24XX_KBC2);
144 omap_cfg_reg(N19_24XX_KBC6);
145 }
146 omap_cfg_reg(R20_24XX_KBC0);
147 omap_cfg_reg(M14_24XX_KBC1);
148 omap_cfg_reg(V17_24XX_KBC3);
149 omap_cfg_reg(P21_24XX_KBC4);
150 omap_cfg_reg(L14_24XX_KBC5);
151 }
152}
153#else
154static inline void omap_init_kp(void) {}
155#endif
156
157/*-------------------------------------------------------------------------*/
101 158
102#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) 159#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
103 160
@@ -240,6 +297,55 @@ static void __init omap_init_mmc(void)
240static inline void omap_init_mmc(void) {} 297static inline void omap_init_mmc(void) {}
241#endif 298#endif
242 299
300/*-------------------------------------------------------------------------*/
301
302/* Numbering for the SPI-capable controllers when used for SPI:
303 * spi = 1
304 * uwire = 2
305 * mmc1..2 = 3..4
306 * mcbsp1..3 = 5..7
307 */
308
309#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
310
311#define OMAP_UWIRE_BASE 0xfffb3000
312
313static struct resource uwire_resources[] = {
314 {
315 .start = OMAP_UWIRE_BASE,
316 .end = OMAP_UWIRE_BASE + 0x20,
317 .flags = IORESOURCE_MEM,
318 },
319};
320
321static struct platform_device omap_uwire_device = {
322 .name = "omap_uwire",
323 .id = -1,
324 .dev = {
325 .release = omap_nop_release,
326 },
327 .num_resources = ARRAY_SIZE(uwire_resources),
328 .resource = uwire_resources,
329};
330
331static void omap_init_uwire(void)
332{
333 /* FIXME define and use a boot tag; not all boards will be hooking
334 * up devices to the microwire controller, and multi-board configs
335 * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
336 */
337
338 /* board-specific code must configure chipselects (only a few
339 * are normally used) and SCLK/SDI/SDO (each has two choices).
340 */
341 (void) platform_device_register(&omap_uwire_device);
342}
343#else
344static inline void omap_init_uwire(void) {}
345#endif
346
347/*-------------------------------------------------------------------------*/
348
243#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE) 349#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
244 350
245#ifdef CONFIG_ARCH_OMAP24XX 351#ifdef CONFIG_ARCH_OMAP24XX
@@ -310,40 +416,6 @@ static void omap_init_rng(void)
310static inline void omap_init_rng(void) {} 416static inline void omap_init_rng(void) {}
311#endif 417#endif
312 418
313#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
314
315static struct omap_lcd_config omap_fb_conf;
316
317static u64 omap_fb_dma_mask = ~(u32)0;
318
319static struct platform_device omap_fb_device = {
320 .name = "omapfb",
321 .id = -1,
322 .dev = {
323 .release = omap_nop_release,
324 .dma_mask = &omap_fb_dma_mask,
325 .coherent_dma_mask = ~(u32)0,
326 .platform_data = &omap_fb_conf,
327 },
328 .num_resources = 0,
329};
330
331static inline void omap_init_fb(void)
332{
333 const struct omap_lcd_config *conf;
334
335 conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
336 if (conf != NULL)
337 omap_fb_conf = *conf;
338 platform_device_register(&omap_fb_device);
339}
340
341#else
342
343static inline void omap_init_fb(void) {}
344
345#endif
346
347/* 419/*
348 * This gets called after board-specific INIT_MACHINE, and initializes most 420 * This gets called after board-specific INIT_MACHINE, and initializes most
349 * on-chip peripherals accessible on this board (except for few like USB): 421 * on-chip peripherals accessible on this board (except for few like USB):
@@ -369,9 +441,10 @@ static int __init omap_init_devices(void)
369 /* please keep these calls, and their implementations above, 441 /* please keep these calls, and their implementations above,
370 * in alphabetical order so they're easier to sort through. 442 * in alphabetical order so they're easier to sort through.
371 */ 443 */
372 omap_init_fb();
373 omap_init_i2c(); 444 omap_init_i2c();
445 omap_init_kp();
374 omap_init_mmc(); 446 omap_init_mmc();
447 omap_init_uwire();
375 omap_init_wdt(); 448 omap_init_wdt();
376 omap_init_rng(); 449 omap_init_rng();
377 450
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index a4e5ac77f6df..5dac4230360d 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1258,6 +1258,11 @@ void omap_stop_lcd_dma(void)
1258 omap_writew(w, OMAP1610_DMA_LCD_CTRL); 1258 omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1259} 1259}
1260 1260
1261int omap_lcd_dma_ext_running(void)
1262{
1263 return lcd_dma.ext_ctrl && lcd_dma.active;
1264}
1265
1261/*----------------------------------------------------------------------------*/ 1266/*----------------------------------------------------------------------------*/
1262 1267
1263static int __init omap_init_dma(void) 1268static int __init omap_init_dma(void)
@@ -1389,6 +1394,7 @@ EXPORT_SYMBOL(omap_free_lcd_dma);
1389EXPORT_SYMBOL(omap_enable_lcd_dma); 1394EXPORT_SYMBOL(omap_enable_lcd_dma);
1390EXPORT_SYMBOL(omap_setup_lcd_dma); 1395EXPORT_SYMBOL(omap_setup_lcd_dma);
1391EXPORT_SYMBOL(omap_stop_lcd_dma); 1396EXPORT_SYMBOL(omap_stop_lcd_dma);
1397EXPORT_SYMBOL(omap_lcd_dma_ext_running);
1392EXPORT_SYMBOL(omap_set_lcd_dma_b1); 1398EXPORT_SYMBOL(omap_set_lcd_dma_b1);
1393EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); 1399EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
1394EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller); 1400EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 38d7ebf87920..eba3cb52ad87 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -97,6 +97,32 @@ int omap_dm_timers_active(void)
97} 97}
98 98
99 99
100/**
101 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
102 * @inputmask: current value of idlect mask
103 */
104__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
105{
106 int n;
107
108 /* If ARMXOR cannot be idled this function call is unnecessary */
109 if (!(inputmask & (1 << 1)))
110 return inputmask;
111
112 /* If any active timer is using ARMXOR return modified mask */
113 for (n = 0; dm_timers[n].base; ++n)
114 if (omap_dm_timer_read_reg(&dm_timers[n], OMAP_TIMER_CTRL_REG)&
115 OMAP_TIMER_CTRL_ST) {
116 if (((omap_readl(MOD_CONF_CTRL_1)>>(n*2)) & 0x03) == 0)
117 inputmask &= ~(1 << 1);
118 else
119 inputmask &= ~(1 << 2);
120 }
121
122 return inputmask;
123}
124
125
100void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source) 126void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
101{ 127{
102 int n = (timer - dm_timers) << 1; 128 int n = (timer - dm_timers) << 1;
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
new file mode 100644
index 000000000000..305e9b990b71
--- /dev/null
+++ b/arch/arm/plat-omap/fb.c
@@ -0,0 +1,80 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/kernel.h>
4#include <linux/init.h>
5#include <linux/platform_device.h>
6#include <linux/bootmem.h>
7
8#include <asm/hardware.h>
9#include <asm/io.h>
10#include <asm/mach-types.h>
11#include <asm/mach/map.h>
12
13#include <asm/arch/board.h>
14#include <asm/arch/sram.h>
15#include <asm/arch/omapfb.h>
16
17#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
18
19static struct omapfb_platform_data omapfb_config;
20
21static u64 omap_fb_dma_mask = ~(u32)0;
22
23static struct platform_device omap_fb_device = {
24 .name = "omapfb",
25 .id = -1,
26 .dev = {
27 .dma_mask = &omap_fb_dma_mask,
28 .coherent_dma_mask = ~(u32)0,
29 .platform_data = &omapfb_config,
30 },
31 .num_resources = 0,
32};
33
34/* called from map_io */
35void omapfb_reserve_mem(void)
36{
37 const struct omap_fbmem_config *fbmem_conf;
38
39 omapfb_config.fbmem.fb_sram_start = omap_fb_sram_start;
40 omapfb_config.fbmem.fb_sram_size = omap_fb_sram_size;
41
42 fbmem_conf = omap_get_config(OMAP_TAG_FBMEM, struct omap_fbmem_config);
43
44 if (fbmem_conf != NULL) {
45 /* indicate that the bootloader already initialized the
46 * fb device, so we'll skip that part in the fb driver
47 */
48 omapfb_config.fbmem.fb_sdram_start = fbmem_conf->fb_sdram_start;
49 omapfb_config.fbmem.fb_sdram_size = fbmem_conf->fb_sdram_size;
50 if (fbmem_conf->fb_sdram_size) {
51 pr_info("Reserving %u bytes SDRAM for frame buffer\n",
52 fbmem_conf->fb_sdram_size);
53 reserve_bootmem(fbmem_conf->fb_sdram_start,
54 fbmem_conf->fb_sdram_size);
55 }
56 }
57}
58
59static inline int omap_init_fb(void)
60{
61 const struct omap_lcd_config *conf;
62
63 conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
64 if (conf == NULL)
65 return 0;
66
67 omapfb_config.lcd = *conf;
68
69 return platform_device_register(&omap_fb_device);
70}
71
72arch_initcall(omap_init_fb);
73
74#else
75
76void omapfb_reserve_mem(void) {}
77
78#endif
79
80
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index b4d5b9e4bfce..d3c8ea7eecfd 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -174,7 +174,7 @@ static int gpio_bank_count;
174static inline struct gpio_bank *get_gpio_bank(int gpio) 174static inline struct gpio_bank *get_gpio_bank(int gpio)
175{ 175{
176#ifdef CONFIG_ARCH_OMAP15XX 176#ifdef CONFIG_ARCH_OMAP15XX
177 if (cpu_is_omap1510()) { 177 if (cpu_is_omap15xx()) {
178 if (OMAP_GPIO_IS_MPUIO(gpio)) 178 if (OMAP_GPIO_IS_MPUIO(gpio))
179 return &gpio_bank[0]; 179 return &gpio_bank[0];
180 return &gpio_bank[1]; 180 return &gpio_bank[1];
@@ -223,7 +223,7 @@ static inline int gpio_valid(int gpio)
223 return 0; 223 return 0;
224 } 224 }
225#ifdef CONFIG_ARCH_OMAP15XX 225#ifdef CONFIG_ARCH_OMAP15XX
226 if (cpu_is_omap1510() && gpio < 16) 226 if (cpu_is_omap15xx() && gpio < 16)
227 return 0; 227 return 0;
228#endif 228#endif
229#if defined(CONFIG_ARCH_OMAP16XX) 229#if defined(CONFIG_ARCH_OMAP16XX)
@@ -402,13 +402,13 @@ static inline void set_24xx_gpio_triggering(void __iomem *base, int gpio, int tr
402 u32 gpio_bit = 1 << gpio; 402 u32 gpio_bit = 1 << gpio;
403 403
404 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, 404 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
405 trigger & IRQT_LOW); 405 trigger & __IRQT_LOWLVL);
406 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, 406 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
407 trigger & IRQT_HIGH); 407 trigger & __IRQT_HIGHLVL);
408 MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit, 408 MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
409 trigger & IRQT_RISING); 409 trigger & __IRQT_RISEDGE);
410 MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, 410 MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
411 trigger & IRQT_FALLING); 411 trigger & __IRQT_FALEDGE);
412 /* FIXME: Possibly do 'set_irq_handler(j, do_level_IRQ)' if only level 412 /* FIXME: Possibly do 'set_irq_handler(j, do_level_IRQ)' if only level
413 * triggering requested. */ 413 * triggering requested. */
414} 414}
@@ -422,9 +422,9 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
422 case METHOD_MPUIO: 422 case METHOD_MPUIO:
423 reg += OMAP_MPUIO_GPIO_INT_EDGE; 423 reg += OMAP_MPUIO_GPIO_INT_EDGE;
424 l = __raw_readl(reg); 424 l = __raw_readl(reg);
425 if (trigger == IRQT_RISING) 425 if (trigger & __IRQT_RISEDGE)
426 l |= 1 << gpio; 426 l |= 1 << gpio;
427 else if (trigger == IRQT_FALLING) 427 else if (trigger & __IRQT_FALEDGE)
428 l &= ~(1 << gpio); 428 l &= ~(1 << gpio);
429 else 429 else
430 goto bad; 430 goto bad;
@@ -432,9 +432,9 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
432 case METHOD_GPIO_1510: 432 case METHOD_GPIO_1510:
433 reg += OMAP1510_GPIO_INT_CONTROL; 433 reg += OMAP1510_GPIO_INT_CONTROL;
434 l = __raw_readl(reg); 434 l = __raw_readl(reg);
435 if (trigger == IRQT_RISING) 435 if (trigger & __IRQT_RISEDGE)
436 l |= 1 << gpio; 436 l |= 1 << gpio;
437 else if (trigger == IRQT_FALLING) 437 else if (trigger & __IRQT_FALEDGE)
438 l &= ~(1 << gpio); 438 l &= ~(1 << gpio);
439 else 439 else
440 goto bad; 440 goto bad;
@@ -446,20 +446,21 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
446 reg += OMAP1610_GPIO_EDGE_CTRL1; 446 reg += OMAP1610_GPIO_EDGE_CTRL1;
447 gpio &= 0x07; 447 gpio &= 0x07;
448 /* We allow only edge triggering, i.e. two lowest bits */ 448 /* We allow only edge triggering, i.e. two lowest bits */
449 if (trigger & ~IRQT_BOTHEDGE) 449 if (trigger & (__IRQT_LOWLVL | __IRQT_HIGHLVL))
450 BUG(); 450 BUG();
451 /* NOTE: knows __IRQT_{FAL,RIS}EDGE match OMAP hardware */
452 trigger &= 0x03;
453 l = __raw_readl(reg); 451 l = __raw_readl(reg);
454 l &= ~(3 << (gpio << 1)); 452 l &= ~(3 << (gpio << 1));
455 l |= trigger << (gpio << 1); 453 if (trigger & __IRQT_RISEDGE)
454 l |= 2 << (gpio << 1);
455 if (trigger & __IRQT_FALEDGE)
456 l |= 1 << (gpio << 1);
456 break; 457 break;
457 case METHOD_GPIO_730: 458 case METHOD_GPIO_730:
458 reg += OMAP730_GPIO_INT_CONTROL; 459 reg += OMAP730_GPIO_INT_CONTROL;
459 l = __raw_readl(reg); 460 l = __raw_readl(reg);
460 if (trigger == IRQT_RISING) 461 if (trigger & __IRQT_RISEDGE)
461 l |= 1 << gpio; 462 l |= 1 << gpio;
462 else if (trigger == IRQT_FALLING) 463 else if (trigger & __IRQT_FALEDGE)
463 l &= ~(1 << gpio); 464 l &= ~(1 << gpio);
464 else 465 else
465 goto bad; 466 goto bad;
@@ -491,7 +492,9 @@ static int gpio_irq_type(unsigned irq, unsigned type)
491 if (check_gpio(gpio) < 0) 492 if (check_gpio(gpio) < 0)
492 return -EINVAL; 493 return -EINVAL;
493 494
494 if (type & (__IRQT_LOWLVL|__IRQT_HIGHLVL|IRQT_PROBE)) 495 if (type & IRQT_PROBE)
496 return -EINVAL;
497 if (!cpu_is_omap24xx() && (type & (__IRQT_LOWLVL|__IRQT_HIGHLVL)))
495 return -EINVAL; 498 return -EINVAL;
496 499
497 bank = get_gpio_bank(gpio); 500 bank = get_gpio_bank(gpio);
@@ -755,13 +758,32 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
755 if (bank->method == METHOD_GPIO_24XX) 758 if (bank->method == METHOD_GPIO_24XX)
756 isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1; 759 isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
757#endif 760#endif
758
759 while(1) { 761 while(1) {
760 isr = __raw_readl(isr_reg); 762 u32 isr_saved, level_mask = 0;
761 _enable_gpio_irqbank(bank, isr, 0); 763
762 _clear_gpio_irqbank(bank, isr); 764 isr_saved = isr = __raw_readl(isr_reg);
763 _enable_gpio_irqbank(bank, isr, 1); 765
764 desc->chip->unmask(irq); 766 if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
767 isr &= 0x0000ffff;
768
769 if (cpu_is_omap24xx())
770 level_mask =
771 __raw_readl(bank->base +
772 OMAP24XX_GPIO_LEVELDETECT0) |
773 __raw_readl(bank->base +
774 OMAP24XX_GPIO_LEVELDETECT1);
775
776 /* clear edge sensitive interrupts before handler(s) are
777 called so that we don't miss any interrupt occurred while
778 executing them */
779 _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 0);
780 _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
781 _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 1);
782
783 /* if there is only edge sensitive GPIO pin interrupts
784 configured, we could unmask GPIO bank interrupt immediately */
785 if (!level_mask)
786 desc->chip->unmask(irq);
765 787
766 if (!isr) 788 if (!isr)
767 break; 789 break;
@@ -774,6 +796,20 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
774 d = irq_desc + gpio_irq; 796 d = irq_desc + gpio_irq;
775 desc_handle_irq(gpio_irq, d, regs); 797 desc_handle_irq(gpio_irq, d, regs);
776 } 798 }
799
800 if (cpu_is_omap24xx()) {
801 /* clear level sensitive interrupts after handler(s) */
802 _enable_gpio_irqbank(bank, isr_saved & level_mask, 0);
803 _clear_gpio_irqbank(bank, isr_saved & level_mask);
804 _enable_gpio_irqbank(bank, isr_saved & level_mask, 1);
805 }
806
807 /* if bank has any level sensitive GPIO pin interrupt
808 configured, we must unmask the bank interrupt only after
809 handler(s) are executed in order to avoid spurious bank
810 interrupt */
811 if (level_mask)
812 desc->chip->unmask(irq);
777 } 813 }
778} 814}
779 815
@@ -848,7 +884,7 @@ static int __init _omap_gpio_init(void)
848 884
849 initialized = 1; 885 initialized = 1;
850 886
851 if (cpu_is_omap1510()) { 887 if (cpu_is_omap15xx()) {
852 gpio_ick = clk_get(NULL, "arm_gpio_ck"); 888 gpio_ick = clk_get(NULL, "arm_gpio_ck");
853 if (IS_ERR(gpio_ick)) 889 if (IS_ERR(gpio_ick))
854 printk("Could not get arm_gpio_ck\n"); 890 printk("Could not get arm_gpio_ck\n");
@@ -869,7 +905,7 @@ static int __init _omap_gpio_init(void)
869 } 905 }
870 906
871#ifdef CONFIG_ARCH_OMAP15XX 907#ifdef CONFIG_ARCH_OMAP15XX
872 if (cpu_is_omap1510()) { 908 if (cpu_is_omap15xx()) {
873 printk(KERN_INFO "OMAP1510 GPIO hardware\n"); 909 printk(KERN_INFO "OMAP1510 GPIO hardware\n");
874 gpio_bank_count = 2; 910 gpio_bank_count = 2;
875 gpio_bank = gpio_bank_1510; 911 gpio_bank = gpio_bank_1510;
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 1cd2cace7e1b..196aac3ac329 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -34,7 +34,7 @@
34#ifdef CONFIG_MCBSP_DEBUG 34#ifdef CONFIG_MCBSP_DEBUG
35#define DBG(x...) printk(x) 35#define DBG(x...) printk(x)
36#else 36#else
37#define DBG(x...) do { } while (0) 37#define DBG(x...) do { } while (0)
38#endif 38#endif
39 39
40struct omap_mcbsp { 40struct omap_mcbsp {
@@ -44,6 +44,7 @@ struct omap_mcbsp {
44 omap_mcbsp_word_length rx_word_length; 44 omap_mcbsp_word_length rx_word_length;
45 omap_mcbsp_word_length tx_word_length; 45 omap_mcbsp_word_length tx_word_length;
46 46
47 omap_mcbsp_io_type_t io_type; /* IRQ or poll */
47 /* IRQ based TX/RX */ 48 /* IRQ based TX/RX */
48 int rx_irq; 49 int rx_irq;
49 int tx_irq; 50 int tx_irq;
@@ -64,10 +65,19 @@ struct omap_mcbsp {
64}; 65};
65 66
66static struct omap_mcbsp mcbsp[OMAP_MAX_MCBSP_COUNT]; 67static struct omap_mcbsp mcbsp[OMAP_MAX_MCBSP_COUNT];
68#ifdef CONFIG_ARCH_OMAP1
67static struct clk *mcbsp_dsp_ck = 0; 69static struct clk *mcbsp_dsp_ck = 0;
68static struct clk *mcbsp_api_ck = 0; 70static struct clk *mcbsp_api_ck = 0;
69static struct clk *mcbsp_dspxor_ck = 0; 71static struct clk *mcbsp_dspxor_ck = 0;
70 72#endif
73#ifdef CONFIG_ARCH_OMAP2
74static struct clk *mcbsp1_ick = 0;
75static struct clk *mcbsp1_fck = 0;
76static struct clk *mcbsp2_ick = 0;
77static struct clk *mcbsp2_fck = 0;
78static struct clk *sys_ck = 0;
79static struct clk *sys_clkout = 0;
80#endif
71 81
72static void omap_mcbsp_dump_reg(u8 id) 82static void omap_mcbsp_dump_reg(u8 id)
73{ 83{
@@ -88,7 +98,6 @@ static void omap_mcbsp_dump_reg(u8 id)
88 DBG("***********************\n"); 98 DBG("***********************\n");
89} 99}
90 100
91
92static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id, struct pt_regs *regs) 101static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
93{ 102{
94 struct omap_mcbsp * mcbsp_tx = (struct omap_mcbsp *)(dev_id); 103 struct omap_mcbsp * mcbsp_tx = (struct omap_mcbsp *)(dev_id);
@@ -109,7 +118,6 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id, struct pt_re
109 return IRQ_HANDLED; 118 return IRQ_HANDLED;
110} 119}
111 120
112
113static void omap_mcbsp_tx_dma_callback(int lch, u16 ch_status, void *data) 121static void omap_mcbsp_tx_dma_callback(int lch, u16 ch_status, void *data)
114{ 122{
115 struct omap_mcbsp * mcbsp_dma_tx = (struct omap_mcbsp *)(data); 123 struct omap_mcbsp * mcbsp_dma_tx = (struct omap_mcbsp *)(data);
@@ -176,7 +184,7 @@ static int omap_mcbsp_check(unsigned int id)
176 return 0; 184 return 0;
177 } 185 }
178 186
179 if (cpu_is_omap1510() || cpu_is_omap16xx()) { 187 if (cpu_is_omap15xx() || cpu_is_omap16xx() || cpu_is_omap24xx()) {
180 if (id > OMAP_MAX_MCBSP_COUNT) { 188 if (id > OMAP_MAX_MCBSP_COUNT) {
181 printk(KERN_ERR "OMAP-McBSP: McBSP%d doesn't exist\n", id + 1); 189 printk(KERN_ERR "OMAP-McBSP: McBSP%d doesn't exist\n", id + 1);
182 return -1; 190 return -1;
@@ -187,9 +195,10 @@ static int omap_mcbsp_check(unsigned int id)
187 return -1; 195 return -1;
188} 196}
189 197
198#ifdef CONFIG_ARCH_OMAP1
190static void omap_mcbsp_dsp_request(void) 199static void omap_mcbsp_dsp_request(void)
191{ 200{
192 if (cpu_is_omap1510() || cpu_is_omap16xx()) { 201 if (cpu_is_omap15xx() || cpu_is_omap16xx()) {
193 clk_enable(mcbsp_dsp_ck); 202 clk_enable(mcbsp_dsp_ck);
194 clk_enable(mcbsp_api_ck); 203 clk_enable(mcbsp_api_ck);
195 204
@@ -207,12 +216,49 @@ static void omap_mcbsp_dsp_request(void)
207 216
208static void omap_mcbsp_dsp_free(void) 217static void omap_mcbsp_dsp_free(void)
209{ 218{
210 if (cpu_is_omap1510() || cpu_is_omap16xx()) { 219 if (cpu_is_omap15xx() || cpu_is_omap16xx()) {
211 clk_disable(mcbsp_dspxor_ck); 220 clk_disable(mcbsp_dspxor_ck);
212 clk_disable(mcbsp_dsp_ck); 221 clk_disable(mcbsp_dsp_ck);
213 clk_disable(mcbsp_api_ck); 222 clk_disable(mcbsp_api_ck);
214 } 223 }
215} 224}
225#endif
226
227#ifdef CONFIG_ARCH_OMAP2
228static void omap2_mcbsp2_mux_setup(void)
229{
230 omap_cfg_reg(Y15_24XX_MCBSP2_CLKX);
231 omap_cfg_reg(R14_24XX_MCBSP2_FSX);
232 omap_cfg_reg(W15_24XX_MCBSP2_DR);
233 omap_cfg_reg(V15_24XX_MCBSP2_DX);
234 omap_cfg_reg(V14_24XX_GPIO117);
235 omap_cfg_reg(W14_24XX_SYS_CLKOUT);
236}
237#endif
238
239/*
240 * We can choose between IRQ based or polled IO.
241 * This needs to be called before omap_mcbsp_request().
242 */
243int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type)
244{
245 if (omap_mcbsp_check(id) < 0)
246 return -EINVAL;
247
248 spin_lock(&mcbsp[id].lock);
249
250 if (!mcbsp[id].free) {
251 printk (KERN_ERR "OMAP-McBSP: McBSP%d is currently in use\n", id + 1);
252 spin_unlock(&mcbsp[id].lock);
253 return -EINVAL;
254 }
255
256 mcbsp[id].io_type = io_type;
257
258 spin_unlock(&mcbsp[id].lock);
259
260 return 0;
261}
216 262
217int omap_mcbsp_request(unsigned int id) 263int omap_mcbsp_request(unsigned int id)
218{ 264{
@@ -221,12 +267,26 @@ int omap_mcbsp_request(unsigned int id)
221 if (omap_mcbsp_check(id) < 0) 267 if (omap_mcbsp_check(id) < 0)
222 return -EINVAL; 268 return -EINVAL;
223 269
270#ifdef CONFIG_ARCH_OMAP1
224 /* 271 /*
225 * On 1510, 1610 and 1710, McBSP1 and McBSP3 272 * On 1510, 1610 and 1710, McBSP1 and McBSP3
226 * are DSP public peripherals. 273 * are DSP public peripherals.
227 */ 274 */
228 if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3) 275 if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3)
229 omap_mcbsp_dsp_request(); 276 omap_mcbsp_dsp_request();
277#endif
278
279#ifdef CONFIG_ARCH_OMAP2
280 if (cpu_is_omap24xx()) {
281 if (id == OMAP_MCBSP1) {
282 clk_enable(mcbsp1_ick);
283 clk_enable(mcbsp1_fck);
284 } else {
285 clk_enable(mcbsp2_ick);
286 clk_enable(mcbsp2_fck);
287 }
288 }
289#endif
230 290
231 spin_lock(&mcbsp[id].lock); 291 spin_lock(&mcbsp[id].lock);
232 if (!mcbsp[id].free) { 292 if (!mcbsp[id].free) {
@@ -238,30 +298,33 @@ int omap_mcbsp_request(unsigned int id)
238 mcbsp[id].free = 0; 298 mcbsp[id].free = 0;
239 spin_unlock(&mcbsp[id].lock); 299 spin_unlock(&mcbsp[id].lock);
240 300
241 /* We need to get IRQs here */ 301 if (mcbsp[id].io_type == OMAP_MCBSP_IRQ_IO) {
242 err = request_irq(mcbsp[id].tx_irq, omap_mcbsp_tx_irq_handler, 0, 302 /* We need to get IRQs here */
243 "McBSP", 303 err = request_irq(mcbsp[id].tx_irq, omap_mcbsp_tx_irq_handler, 0,
244 (void *) (&mcbsp[id])); 304 "McBSP",
245 if (err != 0) { 305 (void *) (&mcbsp[id]));
246 printk(KERN_ERR "OMAP-McBSP: Unable to request TX IRQ %d for McBSP%d\n", 306 if (err != 0) {
247 mcbsp[id].tx_irq, mcbsp[id].id); 307 printk(KERN_ERR "OMAP-McBSP: Unable to request TX IRQ %d for McBSP%d\n",
248 return err; 308 mcbsp[id].tx_irq, mcbsp[id].id);
249 } 309 return err;
310 }
250 311
251 init_completion(&(mcbsp[id].tx_irq_completion)); 312 init_completion(&(mcbsp[id].tx_irq_completion));
252 313
253 314
254 err = request_irq(mcbsp[id].rx_irq, omap_mcbsp_rx_irq_handler, 0, 315 err = request_irq(mcbsp[id].rx_irq, omap_mcbsp_rx_irq_handler, 0,
255 "McBSP", 316 "McBSP",
256 (void *) (&mcbsp[id])); 317 (void *) (&mcbsp[id]));
257 if (err != 0) { 318 if (err != 0) {
258 printk(KERN_ERR "OMAP-McBSP: Unable to request RX IRQ %d for McBSP%d\n", 319 printk(KERN_ERR "OMAP-McBSP: Unable to request RX IRQ %d for McBSP%d\n",
259 mcbsp[id].rx_irq, mcbsp[id].id); 320 mcbsp[id].rx_irq, mcbsp[id].id);
260 free_irq(mcbsp[id].tx_irq, (void *) (&mcbsp[id])); 321 free_irq(mcbsp[id].tx_irq, (void *) (&mcbsp[id]));
261 return err; 322 return err;
323 }
324
325 init_completion(&(mcbsp[id].rx_irq_completion));
262 } 326 }
263 327
264 init_completion(&(mcbsp[id].rx_irq_completion));
265 return 0; 328 return 0;
266 329
267} 330}
@@ -271,8 +334,24 @@ void omap_mcbsp_free(unsigned int id)
271 if (omap_mcbsp_check(id) < 0) 334 if (omap_mcbsp_check(id) < 0)
272 return; 335 return;
273 336
274 if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3) 337#ifdef CONFIG_ARCH_OMAP1
275 omap_mcbsp_dsp_free(); 338 if (cpu_class_is_omap1()) {
339 if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3)
340 omap_mcbsp_dsp_free();
341 }
342#endif
343
344#ifdef CONFIG_ARCH_OMAP2
345 if (cpu_is_omap24xx()) {
346 if (id == OMAP_MCBSP1) {
347 clk_disable(mcbsp1_ick);
348 clk_disable(mcbsp1_fck);
349 } else {
350 clk_disable(mcbsp2_ick);
351 clk_disable(mcbsp2_fck);
352 }
353 }
354#endif
276 355
277 spin_lock(&mcbsp[id].lock); 356 spin_lock(&mcbsp[id].lock);
278 if (mcbsp[id].free) { 357 if (mcbsp[id].free) {
@@ -284,9 +363,11 @@ void omap_mcbsp_free(unsigned int id)
284 mcbsp[id].free = 1; 363 mcbsp[id].free = 1;
285 spin_unlock(&mcbsp[id].lock); 364 spin_unlock(&mcbsp[id].lock);
286 365
287 /* Free IRQs */ 366 if (mcbsp[id].io_type == OMAP_MCBSP_IRQ_IO) {
288 free_irq(mcbsp[id].rx_irq, (void *) (&mcbsp[id])); 367 /* Free IRQs */
289 free_irq(mcbsp[id].tx_irq, (void *) (&mcbsp[id])); 368 free_irq(mcbsp[id].rx_irq, (void *) (&mcbsp[id]));
369 free_irq(mcbsp[id].tx_irq, (void *) (&mcbsp[id]));
370 }
290} 371}
291 372
292/* 373/*
@@ -461,6 +542,115 @@ u32 omap_mcbsp_recv_word(unsigned int id)
461} 542}
462 543
463 544
545int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word)
546{
547 u32 io_base = mcbsp[id].io_base;
548 omap_mcbsp_word_length tx_word_length = mcbsp[id].tx_word_length;
549 omap_mcbsp_word_length rx_word_length = mcbsp[id].rx_word_length;
550 u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
551
552 if (tx_word_length != rx_word_length)
553 return -EINVAL;
554
555 /* First we wait for the transmitter to be ready */
556 spcr2 = OMAP_MCBSP_READ(io_base, SPCR2);
557 while (!(spcr2 & XRDY)) {
558 spcr2 = OMAP_MCBSP_READ(io_base, SPCR2);
559 if (attempts++ > 1000) {
560 /* We must reset the transmitter */
561 OMAP_MCBSP_WRITE(io_base, SPCR2, spcr2 & (~XRST));
562 udelay(10);
563 OMAP_MCBSP_WRITE(io_base, SPCR2, spcr2 | XRST);
564 udelay(10);
565 printk("McBSP transmitter not ready\n");
566 return -EAGAIN;
567 }
568 }
569
570 /* Now we can push the data */
571 if (tx_word_length > OMAP_MCBSP_WORD_16)
572 OMAP_MCBSP_WRITE(io_base, DXR2, word >> 16);
573 OMAP_MCBSP_WRITE(io_base, DXR1, word & 0xffff);
574
575 /* We wait for the receiver to be ready */
576 spcr1 = OMAP_MCBSP_READ(io_base, SPCR1);
577 while (!(spcr1 & RRDY)) {
578 spcr1 = OMAP_MCBSP_READ(io_base, SPCR1);
579 if (attempts++ > 1000) {
580 /* We must reset the receiver */
581 OMAP_MCBSP_WRITE(io_base, SPCR1, spcr1 & (~RRST));
582 udelay(10);
583 OMAP_MCBSP_WRITE(io_base, SPCR1, spcr1 | RRST);
584 udelay(10);
585 printk("McBSP receiver not ready\n");
586 return -EAGAIN;
587 }
588 }
589
590 /* Receiver is ready, let's read the dummy data */
591 if (rx_word_length > OMAP_MCBSP_WORD_16)
592 word_msb = OMAP_MCBSP_READ(io_base, DRR2);
593 word_lsb = OMAP_MCBSP_READ(io_base, DRR1);
594
595 return 0;
596}
597
598int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 * word)
599{
600 u32 io_base = mcbsp[id].io_base, clock_word = 0;
601 omap_mcbsp_word_length tx_word_length = mcbsp[id].tx_word_length;
602 omap_mcbsp_word_length rx_word_length = mcbsp[id].rx_word_length;
603 u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
604
605 if (tx_word_length != rx_word_length)
606 return -EINVAL;
607
608 /* First we wait for the transmitter to be ready */
609 spcr2 = OMAP_MCBSP_READ(io_base, SPCR2);
610 while (!(spcr2 & XRDY)) {
611 spcr2 = OMAP_MCBSP_READ(io_base, SPCR2);
612 if (attempts++ > 1000) {
613 /* We must reset the transmitter */
614 OMAP_MCBSP_WRITE(io_base, SPCR2, spcr2 & (~XRST));
615 udelay(10);
616 OMAP_MCBSP_WRITE(io_base, SPCR2, spcr2 | XRST);
617 udelay(10);
618 printk("McBSP transmitter not ready\n");
619 return -EAGAIN;
620 }
621 }
622
623 /* We first need to enable the bus clock */
624 if (tx_word_length > OMAP_MCBSP_WORD_16)
625 OMAP_MCBSP_WRITE(io_base, DXR2, clock_word >> 16);
626 OMAP_MCBSP_WRITE(io_base, DXR1, clock_word & 0xffff);
627
628 /* We wait for the receiver to be ready */
629 spcr1 = OMAP_MCBSP_READ(io_base, SPCR1);
630 while (!(spcr1 & RRDY)) {
631 spcr1 = OMAP_MCBSP_READ(io_base, SPCR1);
632 if (attempts++ > 1000) {
633 /* We must reset the receiver */
634 OMAP_MCBSP_WRITE(io_base, SPCR1, spcr1 & (~RRST));
635 udelay(10);
636 OMAP_MCBSP_WRITE(io_base, SPCR1, spcr1 | RRST);
637 udelay(10);
638 printk("McBSP receiver not ready\n");
639 return -EAGAIN;
640 }
641 }
642
643 /* Receiver is ready, there is something for us */
644 if (rx_word_length > OMAP_MCBSP_WORD_16)
645 word_msb = OMAP_MCBSP_READ(io_base, DRR2);
646 word_lsb = OMAP_MCBSP_READ(io_base, DRR1);
647
648 word[0] = (word_lsb | (word_msb << 16));
649
650 return 0;
651}
652
653
464/* 654/*
465 * Simple DMA based buffer rx/tx routines. 655 * Simple DMA based buffer rx/tx routines.
466 * Nothing fancy, just a single buffer tx/rx through DMA. 656 * Nothing fancy, just a single buffer tx/rx through DMA.
@@ -471,6 +661,9 @@ u32 omap_mcbsp_recv_word(unsigned int id)
471int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int length) 661int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int length)
472{ 662{
473 int dma_tx_ch; 663 int dma_tx_ch;
664 int src_port = 0;
665 int dest_port = 0;
666 int sync_dev = 0;
474 667
475 if (omap_mcbsp_check(id) < 0) 668 if (omap_mcbsp_check(id) < 0)
476 return -EINVAL; 669 return -EINVAL;
@@ -487,20 +680,27 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
487 680
488 init_completion(&(mcbsp[id].tx_dma_completion)); 681 init_completion(&(mcbsp[id].tx_dma_completion));
489 682
683 if (cpu_class_is_omap1()) {
684 src_port = OMAP_DMA_PORT_TIPB;
685 dest_port = OMAP_DMA_PORT_EMIFF;
686 }
687 if (cpu_is_omap24xx())
688 sync_dev = mcbsp[id].dma_tx_sync;
689
490 omap_set_dma_transfer_params(mcbsp[id].dma_tx_lch, 690 omap_set_dma_transfer_params(mcbsp[id].dma_tx_lch,
491 OMAP_DMA_DATA_TYPE_S16, 691 OMAP_DMA_DATA_TYPE_S16,
492 length >> 1, 1, 692 length >> 1, 1,
493 OMAP_DMA_SYNC_ELEMENT, 693 OMAP_DMA_SYNC_ELEMENT,
494 0, 0); 694 sync_dev, 0);
495 695
496 omap_set_dma_dest_params(mcbsp[id].dma_tx_lch, 696 omap_set_dma_dest_params(mcbsp[id].dma_tx_lch,
497 OMAP_DMA_PORT_TIPB, 697 src_port,
498 OMAP_DMA_AMODE_CONSTANT, 698 OMAP_DMA_AMODE_CONSTANT,
499 mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1, 699 mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1,
500 0, 0); 700 0, 0);
501 701
502 omap_set_dma_src_params(mcbsp[id].dma_tx_lch, 702 omap_set_dma_src_params(mcbsp[id].dma_tx_lch,
503 OMAP_DMA_PORT_EMIFF, 703 dest_port,
504 OMAP_DMA_AMODE_POST_INC, 704 OMAP_DMA_AMODE_POST_INC,
505 buffer, 705 buffer,
506 0, 0); 706 0, 0);
@@ -514,6 +714,9 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
514int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int length) 714int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int length)
515{ 715{
516 int dma_rx_ch; 716 int dma_rx_ch;
717 int src_port = 0;
718 int dest_port = 0;
719 int sync_dev = 0;
517 720
518 if (omap_mcbsp_check(id) < 0) 721 if (omap_mcbsp_check(id) < 0)
519 return -EINVAL; 722 return -EINVAL;
@@ -530,20 +733,27 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
530 733
531 init_completion(&(mcbsp[id].rx_dma_completion)); 734 init_completion(&(mcbsp[id].rx_dma_completion));
532 735
736 if (cpu_class_is_omap1()) {
737 src_port = OMAP_DMA_PORT_TIPB;
738 dest_port = OMAP_DMA_PORT_EMIFF;
739 }
740 if (cpu_is_omap24xx())
741 sync_dev = mcbsp[id].dma_rx_sync;
742
533 omap_set_dma_transfer_params(mcbsp[id].dma_rx_lch, 743 omap_set_dma_transfer_params(mcbsp[id].dma_rx_lch,
534 OMAP_DMA_DATA_TYPE_S16, 744 OMAP_DMA_DATA_TYPE_S16,
535 length >> 1, 1, 745 length >> 1, 1,
536 OMAP_DMA_SYNC_ELEMENT, 746 OMAP_DMA_SYNC_ELEMENT,
537 0, 0); 747 sync_dev, 0);
538 748
539 omap_set_dma_src_params(mcbsp[id].dma_rx_lch, 749 omap_set_dma_src_params(mcbsp[id].dma_rx_lch,
540 OMAP_DMA_PORT_TIPB, 750 src_port,
541 OMAP_DMA_AMODE_CONSTANT, 751 OMAP_DMA_AMODE_CONSTANT,
542 mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1, 752 mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1,
543 0, 0); 753 0, 0);
544 754
545 omap_set_dma_dest_params(mcbsp[id].dma_rx_lch, 755 omap_set_dma_dest_params(mcbsp[id].dma_rx_lch,
546 OMAP_DMA_PORT_EMIFF, 756 dest_port,
547 OMAP_DMA_AMODE_POST_INC, 757 OMAP_DMA_AMODE_POST_INC,
548 buffer, 758 buffer,
549 0, 0); 759 0, 0);
@@ -688,6 +898,23 @@ static const struct omap_mcbsp_info mcbsp_1610[] = {
688}; 898};
689#endif 899#endif
690 900
901#if defined(CONFIG_ARCH_OMAP24XX)
902static const struct omap_mcbsp_info mcbsp_24xx[] = {
903 [0] = { .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE),
904 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
905 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
906 .rx_irq = INT_24XX_MCBSP1_IRQ_RX,
907 .tx_irq = INT_24XX_MCBSP1_IRQ_TX,
908 },
909 [1] = { .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE),
910 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
911 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
912 .rx_irq = INT_24XX_MCBSP2_IRQ_RX,
913 .tx_irq = INT_24XX_MCBSP2_IRQ_TX,
914 },
915};
916#endif
917
691static int __init omap_mcbsp_init(void) 918static int __init omap_mcbsp_init(void)
692{ 919{
693 int mcbsp_count = 0, i; 920 int mcbsp_count = 0, i;
@@ -695,6 +922,7 @@ static int __init omap_mcbsp_init(void)
695 922
696 printk("Initializing OMAP McBSP system\n"); 923 printk("Initializing OMAP McBSP system\n");
697 924
925#ifdef CONFIG_ARCH_OMAP1
698 mcbsp_dsp_ck = clk_get(0, "dsp_ck"); 926 mcbsp_dsp_ck = clk_get(0, "dsp_ck");
699 if (IS_ERR(mcbsp_dsp_ck)) { 927 if (IS_ERR(mcbsp_dsp_ck)) {
700 printk(KERN_ERR "mcbsp: could not acquire dsp_ck handle.\n"); 928 printk(KERN_ERR "mcbsp: could not acquire dsp_ck handle.\n");
@@ -710,6 +938,29 @@ static int __init omap_mcbsp_init(void)
710 printk(KERN_ERR "mcbsp: could not acquire dspxor_ck handle.\n"); 938 printk(KERN_ERR "mcbsp: could not acquire dspxor_ck handle.\n");
711 return PTR_ERR(mcbsp_dspxor_ck); 939 return PTR_ERR(mcbsp_dspxor_ck);
712 } 940 }
941#endif
942#ifdef CONFIG_ARCH_OMAP2
943 mcbsp1_ick = clk_get(0, "mcbsp1_ick");
944 if (IS_ERR(mcbsp1_ick)) {
945 printk(KERN_ERR "mcbsp: could not acquire mcbsp1_ick handle.\n");
946 return PTR_ERR(mcbsp1_ick);
947 }
948 mcbsp1_fck = clk_get(0, "mcbsp1_fck");
949 if (IS_ERR(mcbsp1_fck)) {
950 printk(KERN_ERR "mcbsp: could not acquire mcbsp1_fck handle.\n");
951 return PTR_ERR(mcbsp1_fck);
952 }
953 mcbsp2_ick = clk_get(0, "mcbsp2_ick");
954 if (IS_ERR(mcbsp2_ick)) {
955 printk(KERN_ERR "mcbsp: could not acquire mcbsp2_ick handle.\n");
956 return PTR_ERR(mcbsp2_ick);
957 }
958 mcbsp2_fck = clk_get(0, "mcbsp2_fck");
959 if (IS_ERR(mcbsp2_fck)) {
960 printk(KERN_ERR "mcbsp: could not acquire mcbsp2_fck handle.\n");
961 return PTR_ERR(mcbsp2_fck);
962 }
963#endif
713 964
714#ifdef CONFIG_ARCH_OMAP730 965#ifdef CONFIG_ARCH_OMAP730
715 if (cpu_is_omap730()) { 966 if (cpu_is_omap730()) {
@@ -718,7 +969,7 @@ static int __init omap_mcbsp_init(void)
718 } 969 }
719#endif 970#endif
720#ifdef CONFIG_ARCH_OMAP15XX 971#ifdef CONFIG_ARCH_OMAP15XX
721 if (cpu_is_omap1510()) { 972 if (cpu_is_omap15xx()) {
722 mcbsp_info = mcbsp_1510; 973 mcbsp_info = mcbsp_1510;
723 mcbsp_count = ARRAY_SIZE(mcbsp_1510); 974 mcbsp_count = ARRAY_SIZE(mcbsp_1510);
724 } 975 }
@@ -729,6 +980,19 @@ static int __init omap_mcbsp_init(void)
729 mcbsp_count = ARRAY_SIZE(mcbsp_1610); 980 mcbsp_count = ARRAY_SIZE(mcbsp_1610);
730 } 981 }
731#endif 982#endif
983#if defined(CONFIG_ARCH_OMAP24XX)
984 if (cpu_is_omap24xx()) {
985 mcbsp_info = mcbsp_24xx;
986 mcbsp_count = ARRAY_SIZE(mcbsp_24xx);
987
988 /* REVISIT: where's the right place? */
989 omap2_mcbsp2_mux_setup();
990 sys_ck = clk_get(0, "sys_ck");
991 sys_clkout = clk_get(0, "sys_clkout");
992 clk_set_parent(sys_clkout, sys_ck);
993 clk_enable(sys_clkout);
994 }
995#endif
732 for (i = 0; i < OMAP_MAX_MCBSP_COUNT ; i++) { 996 for (i = 0; i < OMAP_MAX_MCBSP_COUNT ; i++) {
733 if (i >= mcbsp_count) { 997 if (i >= mcbsp_count) {
734 mcbsp[i].io_base = 0; 998 mcbsp[i].io_base = 0;
@@ -741,6 +1005,7 @@ static int __init omap_mcbsp_init(void)
741 mcbsp[i].dma_rx_lch = -1; 1005 mcbsp[i].dma_rx_lch = -1;
742 1006
743 mcbsp[i].io_base = mcbsp_info[i].virt_base; 1007 mcbsp[i].io_base = mcbsp_info[i].virt_base;
1008 mcbsp[i].io_type = OMAP_MCBSP_IRQ_IO; /* Default I/O is IRQ based */
744 mcbsp[i].tx_irq = mcbsp_info[i].tx_irq; 1009 mcbsp[i].tx_irq = mcbsp_info[i].tx_irq;
745 mcbsp[i].rx_irq = mcbsp_info[i].rx_irq; 1010 mcbsp[i].rx_irq = mcbsp_info[i].rx_irq;
746 mcbsp[i].dma_rx_sync = mcbsp_info[i].dma_rx_sync; 1011 mcbsp[i].dma_rx_sync = mcbsp_info[i].dma_rx_sync;
@@ -751,11 +1016,11 @@ static int __init omap_mcbsp_init(void)
751 return 0; 1016 return 0;
752} 1017}
753 1018
754
755arch_initcall(omap_mcbsp_init); 1019arch_initcall(omap_mcbsp_init);
756 1020
757EXPORT_SYMBOL(omap_mcbsp_config); 1021EXPORT_SYMBOL(omap_mcbsp_config);
758EXPORT_SYMBOL(omap_mcbsp_request); 1022EXPORT_SYMBOL(omap_mcbsp_request);
1023EXPORT_SYMBOL(omap_mcbsp_set_io_type);
759EXPORT_SYMBOL(omap_mcbsp_free); 1024EXPORT_SYMBOL(omap_mcbsp_free);
760EXPORT_SYMBOL(omap_mcbsp_start); 1025EXPORT_SYMBOL(omap_mcbsp_start);
761EXPORT_SYMBOL(omap_mcbsp_stop); 1026EXPORT_SYMBOL(omap_mcbsp_stop);
@@ -763,4 +1028,6 @@ EXPORT_SYMBOL(omap_mcbsp_xmit_word);
763EXPORT_SYMBOL(omap_mcbsp_recv_word); 1028EXPORT_SYMBOL(omap_mcbsp_recv_word);
764EXPORT_SYMBOL(omap_mcbsp_xmit_buffer); 1029EXPORT_SYMBOL(omap_mcbsp_xmit_buffer);
765EXPORT_SYMBOL(omap_mcbsp_recv_buffer); 1030EXPORT_SYMBOL(omap_mcbsp_recv_buffer);
1031EXPORT_SYMBOL(omap_mcbsp_spi_master_xmit_word_poll);
1032EXPORT_SYMBOL(omap_mcbsp_spi_master_recv_word_poll);
766EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); 1033EXPORT_SYMBOL(omap_mcbsp_set_spi_mode);
diff --git a/arch/arm/plat-omap/ocpi.c b/arch/arm/plat-omap/ocpi.c
index 5cc6775c789c..37792d43738b 100644
--- a/arch/arm/plat-omap/ocpi.c
+++ b/arch/arm/plat-omap/ocpi.c
@@ -62,9 +62,6 @@ int ocpi_enable(void)
62 if (!cpu_is_omap16xx()) 62 if (!cpu_is_omap16xx())
63 return -ENODEV; 63 return -ENODEV;
64 64
65 /* Make sure there's clock for OCPI */
66 clk_enable(ocpi_ck);
67
68 /* Enable access for OHCI in OCPI */ 65 /* Enable access for OHCI in OCPI */
69 val = omap_readl(OCPI_PROT); 66 val = omap_readl(OCPI_PROT);
70 val &= ~0xff; 67 val &= ~0xff;
diff --git a/arch/arm/plat-omap/pm.c b/arch/arm/plat-omap/pm.c
index 093efd786f21..1a24e2c10714 100644
--- a/arch/arm/plat-omap/pm.c
+++ b/arch/arm/plat-omap/pm.c
@@ -38,6 +38,7 @@
38#include <linux/pm.h> 38#include <linux/pm.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/pm.h>
41#include <linux/interrupt.h> 42#include <linux/interrupt.h>
42 43
43#include <asm/io.h> 44#include <asm/io.h>
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index ee82763b02b8..b7bf09b1b412 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -16,24 +16,94 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19#include <asm/mach/map.h>
20#include <asm/tlb.h> 19#include <asm/tlb.h>
21#include <asm/io.h> 20#include <asm/io.h>
22#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
23 22
23#include <asm/mach/map.h>
24
24#include <asm/arch/sram.h> 25#include <asm/arch/sram.h>
26#include <asm/arch/board.h>
25 27
26#define OMAP1_SRAM_PA 0x20000000 28#define OMAP1_SRAM_PA 0x20000000
27#define OMAP1_SRAM_VA 0xd0000000 29#define OMAP1_SRAM_VA 0xd0000000
28#define OMAP2_SRAM_PA 0x40200000 30#define OMAP2_SRAM_PA 0x40200000
31#define OMAP2_SRAM_PUB_PA 0x4020f800
29#define OMAP2_SRAM_VA 0xd0000000 32#define OMAP2_SRAM_VA 0xd0000000
33#define OMAP2_SRAM_PUB_VA 0xd0000800
30 34
35#if defined(CONFIG_ARCH_OMAP24XX)
36#define SRAM_BOOTLOADER_SZ 0x00
37#else
31#define SRAM_BOOTLOADER_SZ 0x80 38#define SRAM_BOOTLOADER_SZ 0x80
39#endif
40
41#define VA_REQINFOPERM0 IO_ADDRESS(0x68005048)
42#define VA_READPERM0 IO_ADDRESS(0x68005050)
43#define VA_WRITEPERM0 IO_ADDRESS(0x68005058)
44#define VA_CONTROL_STAT IO_ADDRESS(0x480002F8)
45#define GP_DEVICE 0x300
46#define TYPE_MASK 0x700
47
48#define ROUND_DOWN(value,boundary) ((value) & (~((boundary)-1)))
32 49
33static unsigned long omap_sram_base; 50static unsigned long omap_sram_base;
34static unsigned long omap_sram_size; 51static unsigned long omap_sram_size;
35static unsigned long omap_sram_ceil; 52static unsigned long omap_sram_ceil;
36 53
54unsigned long omap_fb_sram_start;
55unsigned long omap_fb_sram_size;
56
57/* Depending on the target RAMFS firewall setup, the public usable amount of
58 * SRAM varies. The default accessable size for all device types is 2k. A GP
59 * device allows ARM11 but not other initators for full size. This
60 * functionality seems ok until some nice security API happens.
61 */
62static int is_sram_locked(void)
63{
64 int type = 0;
65
66 if (cpu_is_omap242x())
67 type = __raw_readl(VA_CONTROL_STAT) & TYPE_MASK;
68
69 if (type == GP_DEVICE) {
70 /* RAMFW: R/W access to all initators for all qualifier sets */
71 if (cpu_is_omap242x()) {
72 __raw_writel(0xFF, VA_REQINFOPERM0); /* all q-vects */
73 __raw_writel(0xCFDE, VA_READPERM0); /* all i-read */
74 __raw_writel(0xCFDE, VA_WRITEPERM0); /* all i-write */
75 }
76 return 0;
77 } else
78 return 1; /* assume locked with no PPA or security driver */
79}
80
81void get_fb_sram_conf(unsigned long start_avail, unsigned size_avail,
82 unsigned long *start, unsigned long *size)
83{
84 const struct omap_fbmem_config *fbmem_conf;
85
86 fbmem_conf = omap_get_config(OMAP_TAG_FBMEM, struct omap_fbmem_config);
87 if (fbmem_conf != NULL) {
88 *start = fbmem_conf->fb_sram_start;
89 *size = fbmem_conf->fb_sram_size;
90 } else {
91 *size = 0;
92 *start = 0;
93 }
94
95 if (*size && (
96 *start < start_avail ||
97 *start + *size > start_avail + size_avail)) {
98 printk(KERN_ERR "invalid FB SRAM configuration\n");
99 *start = start_avail;
100 *size = size_avail;
101 }
102
103 if (*size)
104 pr_info("Reserving %lu bytes SRAM for frame buffer\n", *size);
105}
106
37/* 107/*
38 * The amount of SRAM depends on the core type. 108 * The amount of SRAM depends on the core type.
39 * Note that we cannot try to test for SRAM here because writes 109 * Note that we cannot try to test for SRAM here because writes
@@ -42,26 +112,45 @@ static unsigned long omap_sram_ceil;
42 */ 112 */
43void __init omap_detect_sram(void) 113void __init omap_detect_sram(void)
44{ 114{
45 if (!cpu_is_omap24xx()) 115 unsigned long sram_start;
116
117 if (cpu_is_omap24xx()) {
118 if (is_sram_locked()) {
119 omap_sram_base = OMAP2_SRAM_PUB_VA;
120 sram_start = OMAP2_SRAM_PUB_PA;
121 omap_sram_size = 0x800; /* 2K */
122 } else {
123 omap_sram_base = OMAP2_SRAM_VA;
124 sram_start = OMAP2_SRAM_PA;
125 if (cpu_is_omap242x())
126 omap_sram_size = 0xa0000; /* 640K */
127 else if (cpu_is_omap243x())
128 omap_sram_size = 0x10000; /* 64K */
129 }
130 } else {
46 omap_sram_base = OMAP1_SRAM_VA; 131 omap_sram_base = OMAP1_SRAM_VA;
47 else 132 sram_start = OMAP1_SRAM_PA;
48 omap_sram_base = OMAP2_SRAM_VA; 133
49 134 if (cpu_is_omap730())
50 if (cpu_is_omap730()) 135 omap_sram_size = 0x32000; /* 200K */
51 omap_sram_size = 0x32000; /* 200K */ 136 else if (cpu_is_omap15xx())
52 else if (cpu_is_omap15xx()) 137 omap_sram_size = 0x30000; /* 192K */
53 omap_sram_size = 0x30000; /* 192K */ 138 else if (cpu_is_omap1610() || cpu_is_omap1621() ||
54 else if (cpu_is_omap1610() || cpu_is_omap1621() || cpu_is_omap1710()) 139 cpu_is_omap1710())
55 omap_sram_size = 0x4000; /* 16K */ 140 omap_sram_size = 0x4000; /* 16K */
56 else if (cpu_is_omap1611()) 141 else if (cpu_is_omap1611())
57 omap_sram_size = 0x3e800; /* 250K */ 142 omap_sram_size = 0x3e800; /* 250K */
58 else if (cpu_is_omap2420()) 143 else {
59 omap_sram_size = 0xa0014; /* 640K */ 144 printk(KERN_ERR "Could not detect SRAM size\n");
60 else { 145 omap_sram_size = 0x4000;
61 printk(KERN_ERR "Could not detect SRAM size\n"); 146 }
62 omap_sram_size = 0x4000;
63 } 147 }
64 148 get_fb_sram_conf(sram_start + SRAM_BOOTLOADER_SZ,
149 omap_sram_size - SRAM_BOOTLOADER_SZ,
150 &omap_fb_sram_start, &omap_fb_sram_size);
151 if (omap_fb_sram_size)
152 omap_sram_size -= sram_start + omap_sram_size -
153 omap_fb_sram_start;
65 omap_sram_ceil = omap_sram_base + omap_sram_size; 154 omap_sram_ceil = omap_sram_base + omap_sram_size;
66} 155}
67 156
@@ -80,12 +169,20 @@ static struct map_desc omap_sram_io_desc[] __initdata = {
80 */ 169 */
81void __init omap_map_sram(void) 170void __init omap_map_sram(void)
82{ 171{
172 unsigned long base;
173
83 if (omap_sram_size == 0) 174 if (omap_sram_size == 0)
84 return; 175 return;
85 176
86 if (cpu_is_omap24xx()) { 177 if (cpu_is_omap24xx()) {
87 omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA; 178 omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
88 omap_sram_io_desc[0].pfn = __phys_to_pfn(OMAP2_SRAM_PA); 179
180 if (is_sram_locked())
181 base = OMAP2_SRAM_PUB_PA;
182 else
183 base = OMAP2_SRAM_PA;
184 base = ROUND_DOWN(base, PAGE_SIZE);
185 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
89 } 186 }
90 187
91 omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE; 188 omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE;
@@ -93,7 +190,8 @@ void __init omap_map_sram(void)
93 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 190 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
94 191
95 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 192 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
96 omap_sram_io_desc[0].pfn, omap_sram_io_desc[0].virtual, 193 __pfn_to_phys(omap_sram_io_desc[0].pfn),
194 omap_sram_io_desc[0].virtual,
97 omap_sram_io_desc[0].length); 195 omap_sram_io_desc[0].length);
98 196
99 /* 197 /*
@@ -118,8 +216,9 @@ void * omap_sram_push(void * start, unsigned long size)
118 printk(KERN_ERR "Not enough space in SRAM\n"); 216 printk(KERN_ERR "Not enough space in SRAM\n");
119 return NULL; 217 return NULL;
120 } 218 }
219
121 omap_sram_ceil -= size; 220 omap_sram_ceil -= size;
122 omap_sram_ceil &= ~0x3; 221 omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *));
123 memcpy((void *)omap_sram_ceil, start, size); 222 memcpy((void *)omap_sram_ceil, start, size);
124 223
125 return (void *)omap_sram_ceil; 224 return (void *)omap_sram_ceil;
diff --git a/arch/arm/plat-omap/timer32k.c b/arch/arm/plat-omap/timer32k.c
new file mode 100644
index 000000000000..b2a943bf11ef
--- /dev/null
+++ b/arch/arm/plat-omap/timer32k.c
@@ -0,0 +1,325 @@
1/*
2 * linux/arch/arm/plat-omap/timer32k.c
3 *
4 * OMAP 32K Timer
5 *
6 * Copyright (C) 2004 - 2005 Nokia Corporation
7 * Partial timer rewrite and additional dynamic tick timer support by
8 * Tony Lindgen <tony@atomide.com> and
9 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
11 * MPU timer code based on the older MPU timer code for OMAP
12 * Copyright (C) 2000 RidgeRun, Inc.
13 * Author: Greg Lonnon <glonnon@ridgerun.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
23 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
26 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * You should have received a copy of the GNU General Public License along
32 * with this program; if not, write to the Free Software Foundation, Inc.,
33 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 */
35
36#include <linux/config.h>
37#include <linux/kernel.h>
38#include <linux/init.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/sched.h>
42#include <linux/spinlock.h>
43#include <linux/err.h>
44#include <linux/clk.h>
45
46#include <asm/system.h>
47#include <asm/hardware.h>
48#include <asm/io.h>
49#include <asm/leds.h>
50#include <asm/irq.h>
51#include <asm/mach/irq.h>
52#include <asm/mach/time.h>
53
54struct sys_timer omap_timer;
55
56/*
57 * ---------------------------------------------------------------------------
58 * 32KHz OS timer
59 *
60 * This currently works only on 16xx, as 1510 does not have the continuous
61 * 32KHz synchronous timer. The 32KHz synchronous timer is used to keep track
62 * of time in addition to the 32KHz OS timer. Using only the 32KHz OS timer
63 * on 1510 would be possible, but the timer would not be as accurate as
64 * with the 32KHz synchronized timer.
65 * ---------------------------------------------------------------------------
66 */
67
68#if defined(CONFIG_ARCH_OMAP16XX)
69#define TIMER_32K_SYNCHRONIZED 0xfffbc410
70#elif defined(CONFIG_ARCH_OMAP24XX)
71#define TIMER_32K_SYNCHRONIZED 0x48004010
72#else
73#error OMAP 32KHz timer does not currently work on 15XX!
74#endif
75
76/* 16xx specific defines */
77#define OMAP1_32K_TIMER_BASE 0xfffb9000
78#define OMAP1_32K_TIMER_CR 0x08
79#define OMAP1_32K_TIMER_TVR 0x00
80#define OMAP1_32K_TIMER_TCR 0x04
81
82/* 24xx specific defines */
83#define OMAP2_GP_TIMER_BASE 0x48028000
84#define CM_CLKSEL_WKUP 0x48008440
85#define GP_TIMER_TIDR 0x00
86#define GP_TIMER_TISR 0x18
87#define GP_TIMER_TIER 0x1c
88#define GP_TIMER_TCLR 0x24
89#define GP_TIMER_TCRR 0x28
90#define GP_TIMER_TLDR 0x2c
91#define GP_TIMER_TTGR 0x30
92#define GP_TIMER_TSICR 0x40
93
94#define OMAP_32K_TICKS_PER_HZ (32768 / HZ)
95
96/*
97 * TRM says 1 / HZ = ( TVR + 1) / 32768, so TRV = (32768 / HZ) - 1
98 * so with HZ = 128, TVR = 255.
99 */
100#define OMAP_32K_TIMER_TICK_PERIOD ((32768 / HZ) - 1)
101
102#define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \
103 (((nr_jiffies) * (clock_rate)) / HZ)
104
105static inline void omap_32k_timer_write(int val, int reg)
106{
107 if (cpu_class_is_omap1())
108 omap_writew(val, OMAP1_32K_TIMER_BASE + reg);
109
110 if (cpu_is_omap24xx())
111 omap_writel(val, OMAP2_GP_TIMER_BASE + reg);
112}
113
114static inline unsigned long omap_32k_timer_read(int reg)
115{
116 if (cpu_class_is_omap1())
117 return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff;
118
119 if (cpu_is_omap24xx())
120 return omap_readl(OMAP2_GP_TIMER_BASE + reg);
121}
122
123/*
124 * The 32KHz synchronized timer is an additional timer on 16xx.
125 * It is always running.
126 */
127static inline unsigned long omap_32k_sync_timer_read(void)
128{
129 return omap_readl(TIMER_32K_SYNCHRONIZED);
130}
131
132static inline void omap_32k_timer_start(unsigned long load_val)
133{
134 if (cpu_class_is_omap1()) {
135 omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR);
136 omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR);
137 }
138
139 if (cpu_is_omap24xx()) {
140 omap_32k_timer_write(0xffffffff - load_val, GP_TIMER_TCRR);
141 omap_32k_timer_write((1 << 1), GP_TIMER_TIER);
142 omap_32k_timer_write((1 << 1) | 1, GP_TIMER_TCLR);
143 }
144}
145
146static inline void omap_32k_timer_stop(void)
147{
148 if (cpu_class_is_omap1())
149 omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR);
150
151 if (cpu_is_omap24xx())
152 omap_32k_timer_write(0x0, GP_TIMER_TCLR);
153}
154
155/*
156 * Rounds down to nearest usec. Note that this will overflow for larger values.
157 */
158static inline unsigned long omap_32k_ticks_to_usecs(unsigned long ticks_32k)
159{
160 return (ticks_32k * 5*5*5*5*5*5) >> 9;
161}
162
163/*
164 * Rounds down to nearest nsec.
165 */
166static inline unsigned long long
167omap_32k_ticks_to_nsecs(unsigned long ticks_32k)
168{
169 return (unsigned long long) ticks_32k * 1000 * 5*5*5*5*5*5 >> 9;
170}
171
172static unsigned long omap_32k_last_tick = 0;
173
174/*
175 * Returns elapsed usecs since last 32k timer interrupt
176 */
177static unsigned long omap_32k_timer_gettimeoffset(void)
178{
179 unsigned long now = omap_32k_sync_timer_read();
180 return omap_32k_ticks_to_usecs(now - omap_32k_last_tick);
181}
182
183/*
184 * Returns current time from boot in nsecs. It's OK for this to wrap
185 * around for now, as it's just a relative time stamp.
186 */
187unsigned long long sched_clock(void)
188{
189 return omap_32k_ticks_to_nsecs(omap_32k_sync_timer_read());
190}
191
192/*
193 * Timer interrupt for 32KHz timer. When dynamic tick is enabled, this
194 * function is also called from other interrupts to remove latency
195 * issues with dynamic tick. In the dynamic tick case, we need to lock
196 * with irqsave.
197 */
198static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id,
199 struct pt_regs *regs)
200{
201 unsigned long flags;
202 unsigned long now;
203
204 write_seqlock_irqsave(&xtime_lock, flags);
205
206 if (cpu_is_omap24xx()) {
207 u32 status = omap_32k_timer_read(GP_TIMER_TISR);
208 omap_32k_timer_write(status, GP_TIMER_TISR);
209 }
210
211 now = omap_32k_sync_timer_read();
212
213 while (now - omap_32k_last_tick >= OMAP_32K_TICKS_PER_HZ) {
214 omap_32k_last_tick += OMAP_32K_TICKS_PER_HZ;
215 timer_tick(regs);
216 }
217
218 /* Restart timer so we don't drift off due to modulo or dynamic tick.
219 * By default we program the next timer to be continuous to avoid
220 * latencies during high system load. During dynamic tick operation the
221 * continuous timer can be overridden from pm_idle to be longer.
222 */
223 omap_32k_timer_start(omap_32k_last_tick + OMAP_32K_TICKS_PER_HZ - now);
224 write_sequnlock_irqrestore(&xtime_lock, flags);
225
226 return IRQ_HANDLED;
227}
228
229#ifdef CONFIG_NO_IDLE_HZ
230/*
231 * Programs the next timer interrupt needed. Called when dynamic tick is
232 * enabled, and to reprogram the ticks to skip from pm_idle. Note that
233 * we can keep the timer continuous, and don't need to set it to run in
234 * one-shot mode. This is because the timer will get reprogrammed again
235 * after next interrupt.
236 */
237void omap_32k_timer_reprogram(unsigned long next_tick)
238{
239 omap_32k_timer_start(JIFFIES_TO_HW_TICKS(next_tick, 32768) + 1);
240}
241
242static struct irqaction omap_32k_timer_irq;
243extern struct timer_update_handler timer_update;
244
245static int omap_32k_timer_enable_dyn_tick(void)
246{
247 /* No need to reprogram timer, just use the next interrupt */
248 return 0;
249}
250
251static int omap_32k_timer_disable_dyn_tick(void)
252{
253 omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
254 return 0;
255}
256
257static struct dyn_tick_timer omap_dyn_tick_timer = {
258 .enable = omap_32k_timer_enable_dyn_tick,
259 .disable = omap_32k_timer_disable_dyn_tick,
260 .reprogram = omap_32k_timer_reprogram,
261 .handler = omap_32k_timer_interrupt,
262};
263#endif /* CONFIG_NO_IDLE_HZ */
264
265static struct irqaction omap_32k_timer_irq = {
266 .name = "32KHz timer",
267 .flags = SA_INTERRUPT | SA_TIMER,
268 .handler = omap_32k_timer_interrupt,
269};
270
271static struct clk * gpt1_ick;
272static struct clk * gpt1_fck;
273
274static __init void omap_init_32k_timer(void)
275{
276#ifdef CONFIG_NO_IDLE_HZ
277 omap_timer.dyn_tick = &omap_dyn_tick_timer;
278#endif
279
280 if (cpu_class_is_omap1())
281 setup_irq(INT_OS_TIMER, &omap_32k_timer_irq);
282 if (cpu_is_omap24xx())
283 setup_irq(37, &omap_32k_timer_irq);
284 omap_timer.offset = omap_32k_timer_gettimeoffset;
285 omap_32k_last_tick = omap_32k_sync_timer_read();
286
287 /* REVISIT: Check 24xx TIOCP_CFG settings after idle works */
288 if (cpu_is_omap24xx()) {
289 omap_32k_timer_write(0, GP_TIMER_TCLR);
290 omap_writel(0, CM_CLKSEL_WKUP); /* 32KHz clock source */
291
292 gpt1_ick = clk_get(NULL, "gpt1_ick");
293 if (IS_ERR(gpt1_ick))
294 printk(KERN_ERR "Could not get gpt1_ick\n");
295 else
296 clk_enable(gpt1_ick);
297
298 gpt1_fck = clk_get(NULL, "gpt1_fck");
299 if (IS_ERR(gpt1_fck))
300 printk(KERN_ERR "Could not get gpt1_fck\n");
301 else
302 clk_enable(gpt1_fck);
303
304 mdelay(100); /* Wait for clocks to stabilize */
305
306 omap_32k_timer_write(0x7, GP_TIMER_TISR);
307 }
308
309 omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
310}
311
312/*
313 * ---------------------------------------------------------------------------
314 * Timer initialization
315 * ---------------------------------------------------------------------------
316 */
317static void __init omap_timer_init(void)
318{
319 omap_init_32k_timer();
320}
321
322struct sys_timer omap_timer = {
323 .init = omap_timer_init,
324 .offset = NULL, /* Initialized later */
325};
diff --git a/arch/arm26/kernel/armksyms.c b/arch/arm26/kernel/armksyms.c
index 811a6376c624..a6a1b3373444 100644
--- a/arch/arm26/kernel/armksyms.c
+++ b/arch/arm26/kernel/armksyms.c
@@ -212,8 +212,6 @@ EXPORT_SYMBOL(sys_open);
212EXPORT_SYMBOL(sys_exit); 212EXPORT_SYMBOL(sys_exit);
213EXPORT_SYMBOL(sys_wait4); 213EXPORT_SYMBOL(sys_wait4);
214 214
215EXPORT_SYMBOL(get_wchan);
216
217#ifdef CONFIG_PREEMPT 215#ifdef CONFIG_PREEMPT
218EXPORT_SYMBOL(kernel_flag); 216EXPORT_SYMBOL(kernel_flag);
219#endif 217#endif
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index aa6b7d0a2109..07c8ffa0dd39 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -79,8 +79,6 @@ EXPORT_SYMBOL(memmove);
79EXPORT_SYMBOL(__outsl_ns); 79EXPORT_SYMBOL(__outsl_ns);
80EXPORT_SYMBOL(__insl_ns); 80EXPORT_SYMBOL(__insl_ns);
81 81
82EXPORT_SYMBOL(get_wchan);
83
84#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 82#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
85EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask); 83EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
86EXPORT_SYMBOL(atomic_test_and_OR_mask); 84EXPORT_SYMBOL(atomic_test_and_OR_mask);
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index 69d6ad32d56c..b6cd78c972bb 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -55,8 +55,6 @@ EXPORT_SYMBOL(memcmp);
55EXPORT_SYMBOL(memscan); 55EXPORT_SYMBOL(memscan);
56EXPORT_SYMBOL(memmove); 56EXPORT_SYMBOL(memmove);
57 57
58EXPORT_SYMBOL(get_wchan);
59
60/* 58/*
61 * libgcc functions - functions that are used internally by the 59 * libgcc functions - functions that are used internally by the
62 * compiler... (prototypes are not correct though, but that 60 * compiler... (prototypes are not correct though, but that
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index eb5279d23b7f..6273bf74c203 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -415,6 +415,7 @@ void __init init_bsp_APIC(void)
415void __devinit setup_local_APIC(void) 415void __devinit setup_local_APIC(void)
416{ 416{
417 unsigned long oldvalue, value, ver, maxlvt; 417 unsigned long oldvalue, value, ver, maxlvt;
418 int i, j;
418 419
419 /* Pound the ESR really hard over the head with a big hammer - mbligh */ 420 /* Pound the ESR really hard over the head with a big hammer - mbligh */
420 if (esr_disable) { 421 if (esr_disable) {
@@ -452,6 +453,25 @@ void __devinit setup_local_APIC(void)
452 apic_write_around(APIC_TASKPRI, value); 453 apic_write_around(APIC_TASKPRI, value);
453 454
454 /* 455 /*
456 * After a crash, we no longer service the interrupts and a pending
457 * interrupt from previous kernel might still have ISR bit set.
458 *
459 * Most probably by now CPU has serviced that pending interrupt and
460 * it might not have done the ack_APIC_irq() because it thought,
461 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
462 * does not clear the ISR bit and cpu thinks it has already serivced
463 * the interrupt. Hence a vector might get locked. It was noticed
464 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
465 */
466 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
467 value = apic_read(APIC_ISR + i*0x10);
468 for (j = 31; j >= 0; j--) {
469 if (value & (1<<j))
470 ack_APIC_irq();
471 }
472 }
473
474 /*
455 * Now that we are all set up, enable the APIC 475 * Now that we are all set up, enable the APIC
456 */ 476 */
457 value = apic_read(APIC_SPIV); 477 value = apic_read(APIC_SPIV);
@@ -732,7 +752,7 @@ static int __init apic_set_verbosity(char *str)
732 printk(KERN_WARNING "APIC Verbosity level %s not recognised" 752 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
733 " use apic=verbose or apic=debug\n", str); 753 " use apic=verbose or apic=debug\n", str);
734 754
735 return 0; 755 return 1;
736} 756}
737 757
738__setup("apic=", apic_set_verbosity); 758__setup("apic=", apic_set_verbosity);
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c
index 6170af3c271a..afa0888f9a1e 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.c
+++ b/arch/i386/kernel/cpu/mcheck/mce.c
@@ -64,13 +64,13 @@ void mcheck_init(struct cpuinfo_x86 *c)
64static int __init mcheck_disable(char *str) 64static int __init mcheck_disable(char *str)
65{ 65{
66 mce_disabled = 1; 66 mce_disabled = 1;
67 return 0; 67 return 1;
68} 68}
69 69
70static int __init mcheck_enable(char *str) 70static int __init mcheck_enable(char *str)
71{ 71{
72 mce_disabled = -1; 72 mce_disabled = -1;
73 return 0; 73 return 1;
74} 74}
75 75
76__setup("nomce", mcheck_disable); 76__setup("nomce", mcheck_disable);
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index e3c5fca0aa8a..2b0cfce24a61 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -69,7 +69,7 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
69 * for the data I pass, and I need tags 69 * for the data I pass, and I need tags
70 * on the data to indicate what information I have 70 * on the data to indicate what information I have
71 * squirrelled away. ELF notes happen to provide 71 * squirrelled away. ELF notes happen to provide
72 * all of that that no need to invent something new. 72 * all of that, so there is no need to invent something new.
73 */ 73 */
74 buf = (u32*)per_cpu_ptr(crash_notes, cpu); 74 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
75 if (!buf) 75 if (!buf)
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 3b329af4afc5..f8f132aa5472 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -644,7 +644,7 @@ failed:
644int __init irqbalance_disable(char *str) 644int __init irqbalance_disable(char *str)
645{ 645{
646 irqbalance_disabled = 1; 646 irqbalance_disabled = 1;
647 return 0; 647 return 1;
648} 648}
649 649
650__setup("noirqbalance", irqbalance_disable); 650__setup("noirqbalance", irqbalance_disable);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 24b3e745478b..6259afea46d1 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -781,7 +781,6 @@ unsigned long get_wchan(struct task_struct *p)
781 } while (count++ < 16); 781 } while (count++ < 16);
782 return 0; 782 return 0;
783} 783}
784EXPORT_SYMBOL(get_wchan);
785 784
786/* 785/*
787 * sys_alloc_thread_area: get a yet unused TLS descriptor index. 786 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 8c08660b4e5d..eacc3f0a2ea4 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -34,6 +34,7 @@
34#include <linux/initrd.h> 34#include <linux/initrd.h>
35#include <linux/bootmem.h> 35#include <linux/bootmem.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/platform_device.h>
37#include <linux/console.h> 38#include <linux/console.h>
38#include <linux/mca.h> 39#include <linux/mca.h>
39#include <linux/root_dev.h> 40#include <linux/root_dev.h>
@@ -1547,6 +1548,23 @@ void __init setup_arch(char **cmdline_p)
1547#endif 1548#endif
1548} 1549}
1549 1550
1551static __init int add_pcspkr(void)
1552{
1553 struct platform_device *pd;
1554 int ret;
1555
1556 pd = platform_device_alloc("pcspkr", -1);
1557 if (!pd)
1558 return -ENOMEM;
1559
1560 ret = platform_device_add(pd);
1561 if (ret)
1562 platform_device_put(pd);
1563
1564 return ret;
1565}
1566device_initcall(add_pcspkr);
1567
1550#include "setup_arch_post.h" 1568#include "setup_arch_post.h"
1551/* 1569/*
1552 * Local Variables: 1570 * Local Variables:
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index ce3ef4fa0551..4f58b9c0efe3 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -313,3 +313,4 @@ ENTRY(sys_call_table)
313 .long sys_set_robust_list 313 .long sys_set_robust_list
314 .long sys_get_robust_list 314 .long sys_get_robust_list
315 .long sys_splice 315 .long sys_splice
316 .long sys_sync_file_range
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 6b63a5aa1e46..e38527994590 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -1193,6 +1193,6 @@ void __init trap_init(void)
1193static int __init kstack_setup(char *s) 1193static int __init kstack_setup(char *s)
1194{ 1194{
1195 kstack_depth_to_print = simple_strtoul(s, NULL, 0); 1195 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1196 return 0; 1196 return 1;
1197} 1197}
1198__setup("kstack=", kstack_setup); 1198__setup("kstack=", kstack_setup);
diff --git a/arch/i386/kernel/vsyscall-sigreturn.S b/arch/i386/kernel/vsyscall-sigreturn.S
index fadb5bc3c374..a92262f41659 100644
--- a/arch/i386/kernel/vsyscall-sigreturn.S
+++ b/arch/i386/kernel/vsyscall-sigreturn.S
@@ -44,7 +44,7 @@ __kernel_rt_sigreturn:
44.LSTARTCIEDLSI1: 44.LSTARTCIEDLSI1:
45 .long 0 /* CIE ID */ 45 .long 0 /* CIE ID */
46 .byte 1 /* Version number */ 46 .byte 1 /* Version number */
47 .string "zR" /* NUL-terminated augmentation string */ 47 .string "zRS" /* NUL-terminated augmentation string */
48 .uleb128 1 /* Code alignment factor */ 48 .uleb128 1 /* Code alignment factor */
49 .sleb128 -4 /* Data alignment factor */ 49 .sleb128 -4 /* Data alignment factor */
50 .byte 8 /* Return address register column */ 50 .byte 8 /* Return address register column */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 89faa603c6be..6386f63c413e 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -240,7 +240,7 @@ cache_info(char *page)
240 } 240 }
241 p += sprintf(p, 241 p += sprintf(p,
242 "%s Cache level %lu:\n" 242 "%s Cache level %lu:\n"
243 "\tSize : %lu bytes\n" 243 "\tSize : %u bytes\n"
244 "\tAttributes : ", 244 "\tAttributes : ",
245 cache_types[j+cci.pcci_unified], i+1, 245 cache_types[j+cci.pcci_unified], i+1,
246 cci.pcci_cache_size); 246 cci.pcci_cache_size);
@@ -648,9 +648,9 @@ frequency_info(char *page)
648 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; 648 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
649 649
650 p += sprintf(p, 650 p += sprintf(p,
651 "Processor/Clock ratio : %ld/%ld\n" 651 "Processor/Clock ratio : %d/%d\n"
652 "Bus/Clock ratio : %ld/%ld\n" 652 "Bus/Clock ratio : %d/%d\n"
653 "ITC/Clock ratio : %ld/%ld\n", 653 "ITC/Clock ratio : %d/%d\n",
654 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den); 654 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
655 655
656 return p - page; 656 return p - page;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ac167436e936..49958904045b 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -188,7 +188,7 @@ ia64_init_itm (void)
188 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; 188 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
189 189
190 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; 190 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
191 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, " 191 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
192 "ITC freq=%lu.%03luMHz", smp_processor_id(), 192 "ITC freq=%lu.%03luMHz", smp_processor_id(),
193 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, 193 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
194 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); 194 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 3b6fd798c4d6..b47476d655f1 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -9,6 +9,8 @@
9 * 2002/08/07 Erich Focht <efocht@ess.nec.de> 9 * 2002/08/07 Erich Focht <efocht@ess.nec.de>
10 * Populate cpu entries in sysfs for non-numa systems as well 10 * Populate cpu entries in sysfs for non-numa systems as well
11 * Intel Corporation - Ashok Raj 11 * Intel Corporation - Ashok Raj
12 * 02/27/2006 Zhang, Yanmin
13 * Populate cpu cache entries in sysfs for cpu cache info
12 */ 14 */
13 15
14#include <linux/config.h> 16#include <linux/config.h>
@@ -19,6 +21,7 @@
19#include <linux/init.h> 21#include <linux/init.h>
20#include <linux/bootmem.h> 22#include <linux/bootmem.h>
21#include <linux/nodemask.h> 23#include <linux/nodemask.h>
24#include <linux/notifier.h>
22#include <asm/mmzone.h> 25#include <asm/mmzone.h>
23#include <asm/numa.h> 26#include <asm/numa.h>
24#include <asm/cpu.h> 27#include <asm/cpu.h>
@@ -101,3 +104,367 @@ out:
101} 104}
102 105
103subsys_initcall(topology_init); 106subsys_initcall(topology_init);
107
108
109/*
110 * Export cpu cache information through sysfs
111 */
112
113/*
114 * A bunch of string array to get pretty printing
115 */
116static const char *cache_types[] = {
117 "", /* not used */
118 "Instruction",
119 "Data",
120 "Unified" /* unified */
121};
122
123static const char *cache_mattrib[]={
124 "WriteThrough",
125 "WriteBack",
126 "", /* reserved */
127 "" /* reserved */
128};
129
130struct cache_info {
131 pal_cache_config_info_t cci;
132 cpumask_t shared_cpu_map;
133 int level;
134 int type;
135 struct kobject kobj;
136};
137
138struct cpu_cache_info {
139 struct cache_info *cache_leaves;
140 int num_cache_leaves;
141 struct kobject kobj;
142};
143
144static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
145#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
146
147#ifdef CONFIG_SMP
148static void cache_shared_cpu_map_setup( unsigned int cpu,
149 struct cache_info * this_leaf)
150{
151 pal_cache_shared_info_t csi;
152 int num_shared, i = 0;
153 unsigned int j;
154
155 if (cpu_data(cpu)->threads_per_core <= 1 &&
156 cpu_data(cpu)->cores_per_socket <= 1) {
157 cpu_set(cpu, this_leaf->shared_cpu_map);
158 return;
159 }
160
161 if (ia64_pal_cache_shared_info(this_leaf->level,
162 this_leaf->type,
163 0,
164 &csi) != PAL_STATUS_SUCCESS)
165 return;
166
167 num_shared = (int) csi.num_shared;
168 do {
169 for_each_cpu(j)
170 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
171 && cpu_data(j)->core_id == csi.log1_cid
172 && cpu_data(j)->thread_id == csi.log1_tid)
173 cpu_set(j, this_leaf->shared_cpu_map);
174
175 i++;
176 } while (i < num_shared &&
177 ia64_pal_cache_shared_info(this_leaf->level,
178 this_leaf->type,
179 i,
180 &csi) == PAL_STATUS_SUCCESS);
181}
182#else
183static void cache_shared_cpu_map_setup(unsigned int cpu,
184 struct cache_info * this_leaf)
185{
186 cpu_set(cpu, this_leaf->shared_cpu_map);
187 return;
188}
189#endif
190
191static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
192 char *buf)
193{
194 return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
195}
196
197static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
198 char *buf)
199{
200 return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
201}
202
203static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
204{
205 return sprintf(buf,
206 "%s\n",
207 cache_mattrib[this_leaf->cci.pcci_cache_attr]);
208}
209
210static ssize_t show_size(struct cache_info *this_leaf, char *buf)
211{
212 return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
213}
214
215static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
216{
217 unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
218 number_of_sets /= this_leaf->cci.pcci_assoc;
219 number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
220
221 return sprintf(buf, "%u\n", number_of_sets);
222}
223
224static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
225{
226 ssize_t len;
227 cpumask_t shared_cpu_map;
228
229 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
230 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
231 len += sprintf(buf+len, "\n");
232 return len;
233}
234
235static ssize_t show_type(struct cache_info *this_leaf, char *buf)
236{
237 int type = this_leaf->type + this_leaf->cci.pcci_unified;
238 return sprintf(buf, "%s\n", cache_types[type]);
239}
240
241static ssize_t show_level(struct cache_info *this_leaf, char *buf)
242{
243 return sprintf(buf, "%u\n", this_leaf->level);
244}
245
246struct cache_attr {
247 struct attribute attr;
248 ssize_t (*show)(struct cache_info *, char *);
249 ssize_t (*store)(struct cache_info *, const char *, size_t count);
250};
251
252#ifdef define_one_ro
253 #undef define_one_ro
254#endif
255#define define_one_ro(_name) \
256 static struct cache_attr _name = \
257__ATTR(_name, 0444, show_##_name, NULL)
258
259define_one_ro(level);
260define_one_ro(type);
261define_one_ro(coherency_line_size);
262define_one_ro(ways_of_associativity);
263define_one_ro(size);
264define_one_ro(number_of_sets);
265define_one_ro(shared_cpu_map);
266define_one_ro(attributes);
267
268static struct attribute * cache_default_attrs[] = {
269 &type.attr,
270 &level.attr,
271 &coherency_line_size.attr,
272 &ways_of_associativity.attr,
273 &attributes.attr,
274 &size.attr,
275 &number_of_sets.attr,
276 &shared_cpu_map.attr,
277 NULL
278};
279
280#define to_object(k) container_of(k, struct cache_info, kobj)
281#define to_attr(a) container_of(a, struct cache_attr, attr)
282
283static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
284{
285 struct cache_attr *fattr = to_attr(attr);
286 struct cache_info *this_leaf = to_object(kobj);
287 ssize_t ret;
288
289 ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
290 return ret;
291}
292
293static struct sysfs_ops cache_sysfs_ops = {
294 .show = cache_show
295};
296
297static struct kobj_type cache_ktype = {
298 .sysfs_ops = &cache_sysfs_ops,
299 .default_attrs = cache_default_attrs,
300};
301
302static struct kobj_type cache_ktype_percpu_entry = {
303 .sysfs_ops = &cache_sysfs_ops,
304};
305
306static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
307{
308 if (all_cpu_cache_info[cpu].cache_leaves) {
309 kfree(all_cpu_cache_info[cpu].cache_leaves);
310 all_cpu_cache_info[cpu].cache_leaves = NULL;
311 }
312 all_cpu_cache_info[cpu].num_cache_leaves = 0;
313 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
314
315 return;
316}
317
318static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
319{
320 u64 i, levels, unique_caches;
321 pal_cache_config_info_t cci;
322 int j;
323 s64 status;
324 struct cache_info *this_cache;
325 int num_cache_leaves = 0;
326
327 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
328 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
329 return -1;
330 }
331
332 this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
333 GFP_KERNEL);
334 if (this_cache == NULL)
335 return -ENOMEM;
336
337 for (i=0; i < levels; i++) {
338 for (j=2; j >0 ; j--) {
339 if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
340 PAL_STATUS_SUCCESS)
341 continue;
342
343 this_cache[num_cache_leaves].cci = cci;
344 this_cache[num_cache_leaves].level = i + 1;
345 this_cache[num_cache_leaves].type = j;
346
347 cache_shared_cpu_map_setup(cpu,
348 &this_cache[num_cache_leaves]);
349 num_cache_leaves ++;
350 }
351 }
352
353 all_cpu_cache_info[cpu].cache_leaves = this_cache;
354 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
355
356 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
357
358 return 0;
359}
360
361/* Add cache interface for CPU device */
362static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
363{
364 unsigned int cpu = sys_dev->id;
365 unsigned long i, j;
366 struct cache_info *this_object;
367 int retval = 0;
368 cpumask_t oldmask;
369
370 if (all_cpu_cache_info[cpu].kobj.parent)
371 return 0;
372
373 oldmask = current->cpus_allowed;
374 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
375 if (unlikely(retval))
376 return retval;
377
378 retval = cpu_cache_sysfs_init(cpu);
379 set_cpus_allowed(current, oldmask);
380 if (unlikely(retval < 0))
381 return retval;
382
383 all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
384 kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
385 all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
386 retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
387
388 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
389 this_object = LEAF_KOBJECT_PTR(cpu,i);
390 this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
391 kobject_set_name(&(this_object->kobj), "index%1lu", i);
392 this_object->kobj.ktype = &cache_ktype;
393 retval = kobject_register(&(this_object->kobj));
394 if (unlikely(retval)) {
395 for (j = 0; j < i; j++) {
396 kobject_unregister(
397 &(LEAF_KOBJECT_PTR(cpu,j)->kobj));
398 }
399 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
400 cpu_cache_sysfs_exit(cpu);
401 break;
402 }
403 }
404 return retval;
405}
406
407/* Remove cache interface for CPU device */
408static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
409{
410 unsigned int cpu = sys_dev->id;
411 unsigned long i;
412
413 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
414 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
415
416 if (all_cpu_cache_info[cpu].kobj.parent) {
417 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
418 memset(&all_cpu_cache_info[cpu].kobj,
419 0,
420 sizeof(struct kobject));
421 }
422
423 cpu_cache_sysfs_exit(cpu);
424
425 return 0;
426}
427
428/*
429 * When a cpu is hot-plugged, do a check and initiate
430 * cache kobject if necessary
431 */
432static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
433 unsigned long action, void *hcpu)
434{
435 unsigned int cpu = (unsigned long)hcpu;
436 struct sys_device *sys_dev;
437
438 sys_dev = get_cpu_sysdev(cpu);
439 switch (action) {
440 case CPU_ONLINE:
441 cache_add_dev(sys_dev);
442 break;
443 case CPU_DEAD:
444 cache_remove_dev(sys_dev);
445 break;
446 }
447 return NOTIFY_OK;
448}
449
450static struct notifier_block cache_cpu_notifier =
451{
452 .notifier_call = cache_cpu_callback
453};
454
455static int __cpuinit cache_sysfs_init(void)
456{
457 int i;
458
459 for_each_online_cpu(i) {
460 cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
461 (void *)(long)i);
462 }
463
464 register_cpu_notifier(&cache_cpu_notifier);
465
466 return 0;
467}
468
469device_initcall(cache_sysfs_init);
470
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 3d7f2000b714..c3319514a85e 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -79,4 +79,3 @@ EXPORT_SYMBOL(__down_failed_interruptible);
79EXPORT_SYMBOL(__down_failed_trylock); 79EXPORT_SYMBOL(__down_failed_trylock);
80EXPORT_SYMBOL(__up_wakeup); 80EXPORT_SYMBOL(__up_wakeup);
81 81
82EXPORT_SYMBOL(get_wchan);
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index d844c755945a..f9b4ea16c099 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -57,8 +57,6 @@ EXPORT_SYMBOL(__down_failed_interruptible);
57EXPORT_SYMBOL(__down_failed_trylock); 57EXPORT_SYMBOL(__down_failed_trylock);
58EXPORT_SYMBOL(__up_wakeup); 58EXPORT_SYMBOL(__up_wakeup);
59 59
60EXPORT_SYMBOL(get_wchan);
61
62/* 60/*
63 * libgcc functions - functions that are used internally by the 61 * libgcc functions - functions that are used internally by the
64 * compiler... (prototypes are not correct though, but that 62 * compiler... (prototypes are not correct though, but that
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5080ea1799a4..e15709ce8866 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -233,6 +233,7 @@ config MACH_JAZZ
233 select ARC32 233 select ARC32
234 select ARCH_MAY_HAVE_PC_FDC 234 select ARCH_MAY_HAVE_PC_FDC
235 select GENERIC_ISA_DMA 235 select GENERIC_ISA_DMA
236 select I8253
236 select I8259 237 select I8259
237 select ISA 238 select ISA
238 select SYS_HAS_CPU_R4X00 239 select SYS_HAS_CPU_R4X00
@@ -530,6 +531,7 @@ config QEMU
530 select DMA_COHERENT 531 select DMA_COHERENT
531 select GENERIC_ISA_DMA 532 select GENERIC_ISA_DMA
532 select HAVE_STD_PC_SERIAL_PORT 533 select HAVE_STD_PC_SERIAL_PORT
534 select I8253
533 select I8259 535 select I8259
534 select ISA 536 select ISA
535 select SWAP_IO_SPACE 537 select SWAP_IO_SPACE
@@ -714,6 +716,7 @@ config SNI_RM200_PCI
714 select HAVE_STD_PC_SERIAL_PORT 716 select HAVE_STD_PC_SERIAL_PORT
715 select HW_HAS_EISA 717 select HW_HAS_EISA
716 select HW_HAS_PCI 718 select HW_HAS_PCI
719 select I8253
717 select I8259 720 select I8259
718 select ISA 721 select ISA
719 select SYS_HAS_CPU_R4X00 722 select SYS_HAS_CPU_R4X00
@@ -1721,6 +1724,9 @@ config MMU
1721 bool 1724 bool
1722 default y 1725 default y
1723 1726
1727config I8253
1728 bool
1729
1724source "drivers/pcmcia/Kconfig" 1730source "drivers/pcmcia/Kconfig"
1725 1731
1726source "drivers/pci/hotplug/Kconfig" 1732source "drivers/pci/hotplug/Kconfig"
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index f36c4f20ee8a..309d54cceda3 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -59,6 +59,8 @@ obj-$(CONFIG_PROC_FS) += proc.o
59 59
60obj-$(CONFIG_64BIT) += cpu-bugs64.o 60obj-$(CONFIG_64BIT) += cpu-bugs64.o
61 61
62obj-$(CONFIG_I8253) += i8253.o
63
62CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 64CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
63 65
64EXTRA_AFLAGS := $(CFLAGS) 66EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
new file mode 100644
index 000000000000..475df6904219
--- /dev/null
+++ b/arch/mips/kernel/i8253.c
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) 2006 IBM Corporation
3 *
4 * Implements device information for i8253 timer chip
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation
9 */
10
11#include <linux/platform_device.h>
12
13static __init int add_pcspkr(void)
14{
15 struct platform_device *pd;
16 int ret;
17
18 pd = platform_device_alloc("pcspkr", -1);
19 if (!pd)
20 return -ENOMEM;
21
22 ret = platform_device_add(pd);
23 if (ret)
24 platform_device_put(pd);
25
26 return ret;
27}
28device_initcall(add_pcspkr);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index a8f435d82940..c66db5e5ab62 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -419,4 +419,3 @@ unsigned long get_wchan(struct task_struct *p)
419 return pc; 419 return pc;
420} 420}
421 421
422EXPORT_SYMBOL(get_wchan);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 211d72653ea6..764d07329716 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -61,7 +61,7 @@ static int __init parse_elfcorehdr(char *p)
61 if (p) 61 if (p)
62 elfcorehdr_addr = memparse(p, &p); 62 elfcorehdr_addr = memparse(p, &p);
63 63
64 return 0; 64 return 1;
65} 65}
66__setup("elfcorehdr=", parse_elfcorehdr); 66__setup("elfcorehdr=", parse_elfcorehdr);
67#endif 67#endif
@@ -71,7 +71,7 @@ static int __init parse_savemaxmem(char *p)
71 if (p) 71 if (p)
72 saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; 72 saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
73 73
74 return 0; 74 return 1;
75} 75}
76__setup("savemaxmem=", parse_savemaxmem); 76__setup("savemaxmem=", parse_savemaxmem);
77 77
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1b73508ecb2b..2cbde865d4f5 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -37,7 +37,7 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/vdso_datapage.h> 38#include <asm/vdso_datapage.h>
39 39
40#define MODULE_VERS "1.6" 40#define MODULE_VERS "1.7"
41#define MODULE_NAME "lparcfg" 41#define MODULE_NAME "lparcfg"
42 42
43/* #define LPARCFG_DEBUG */ 43/* #define LPARCFG_DEBUG */
@@ -149,17 +149,17 @@ static void log_plpar_hcall_return(unsigned long rc, char *tag)
149 if (rc == 0) /* success, return */ 149 if (rc == 0) /* success, return */
150 return; 150 return;
151/* check for null tag ? */ 151/* check for null tag ? */
152 if (rc == H_Hardware) 152 if (rc == H_HARDWARE)
153 printk(KERN_INFO 153 printk(KERN_INFO
154 "plpar-hcall (%s) failed with hardware fault\n", tag); 154 "plpar-hcall (%s) failed with hardware fault\n", tag);
155 else if (rc == H_Function) 155 else if (rc == H_FUNCTION)
156 printk(KERN_INFO 156 printk(KERN_INFO
157 "plpar-hcall (%s) failed; function not allowed\n", tag); 157 "plpar-hcall (%s) failed; function not allowed\n", tag);
158 else if (rc == H_Authority) 158 else if (rc == H_AUTHORITY)
159 printk(KERN_INFO 159 printk(KERN_INFO
160 "plpar-hcall (%s) failed; not authorized to this function\n", 160 "plpar-hcall (%s) failed; not authorized to this"
161 tag); 161 " function\n", tag);
162 else if (rc == H_Parameter) 162 else if (rc == H_PARAMETER)
163 printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n", 163 printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
164 tag); 164 tag);
165 else 165 else
@@ -209,7 +209,7 @@ static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs)
209 unsigned long dummy; 209 unsigned long dummy;
210 rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy); 210 rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy);
211 211
212 if (rc != H_Authority) 212 if (rc != H_AUTHORITY)
213 log_plpar_hcall_return(rc, "H_PIC"); 213 log_plpar_hcall_return(rc, "H_PIC");
214} 214}
215 215
@@ -242,7 +242,7 @@ static void parse_system_parameter_string(struct seq_file *m)
242{ 242{
243 int call_status; 243 int call_status;
244 244
245 char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); 245 unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
246 if (!local_buffer) { 246 if (!local_buffer) {
247 printk(KERN_ERR "%s %s kmalloc failure at line %d \n", 247 printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
248 __FILE__, __FUNCTION__, __LINE__); 248 __FILE__, __FUNCTION__, __LINE__);
@@ -254,7 +254,8 @@ static void parse_system_parameter_string(struct seq_file *m)
254 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 254 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
255 NULL, 255 NULL,
256 SPLPAR_CHARACTERISTICS_TOKEN, 256 SPLPAR_CHARACTERISTICS_TOKEN,
257 __pa(rtas_data_buf)); 257 __pa(rtas_data_buf),
258 RTAS_DATA_BUF_SIZE);
258 memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); 259 memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
259 spin_unlock(&rtas_data_buf_lock); 260 spin_unlock(&rtas_data_buf_lock);
260 261
@@ -275,7 +276,7 @@ static void parse_system_parameter_string(struct seq_file *m)
275#ifdef LPARCFG_DEBUG 276#ifdef LPARCFG_DEBUG
276 printk(KERN_INFO "success calling get-system-parameter \n"); 277 printk(KERN_INFO "success calling get-system-parameter \n");
277#endif 278#endif
278 splpar_strlen = local_buffer[0] * 16 + local_buffer[1]; 279 splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
279 local_buffer += 2; /* step over strlen value */ 280 local_buffer += 2; /* step over strlen value */
280 281
281 memset(workbuffer, 0, SPLPAR_MAXLENGTH); 282 memset(workbuffer, 0, SPLPAR_MAXLENGTH);
@@ -529,13 +530,13 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
529 retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr, 530 retval = plpar_hcall_norets(H_SET_PPP, *new_entitled_ptr,
530 *new_weight_ptr); 531 *new_weight_ptr);
531 532
532 if (retval == H_Success || retval == H_Constrained) { 533 if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
533 retval = count; 534 retval = count;
534 } else if (retval == H_Busy) { 535 } else if (retval == H_BUSY) {
535 retval = -EBUSY; 536 retval = -EBUSY;
536 } else if (retval == H_Hardware) { 537 } else if (retval == H_HARDWARE) {
537 retval = -EIO; 538 retval = -EIO;
538 } else if (retval == H_Parameter) { 539 } else if (retval == H_PARAMETER) {
539 retval = -EINVAL; 540 retval = -EINVAL;
540 } else { 541 } else {
541 printk(KERN_WARNING "%s: received unknown hv return code %ld", 542 printk(KERN_WARNING "%s: received unknown hv return code %ld",
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 706090c99f47..2dd47d2dd998 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -834,7 +834,6 @@ unsigned long get_wchan(struct task_struct *p)
834 } while (count++ < 16); 834 } while (count++ < 16);
835 return 0; 835 return 0;
836} 836}
837EXPORT_SYMBOL(get_wchan);
838 837
839static int kstack_depth_to_print = 64; 838static int kstack_depth_to_print = 64;
840 839
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 06636c927a7e..0112318213ab 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -578,18 +578,18 @@ static void rtas_percpu_suspend_me(void *info)
578 * We use "waiting" to indicate our state. As long 578 * We use "waiting" to indicate our state. As long
579 * as it is >0, we are still trying to all join up. 579 * as it is >0, we are still trying to all join up.
580 * If it goes to 0, we have successfully joined up and 580 * If it goes to 0, we have successfully joined up and
581 * one thread got H_Continue. If any error happens, 581 * one thread got H_CONTINUE. If any error happens,
582 * we set it to <0. 582 * we set it to <0.
583 */ 583 */
584 local_irq_save(flags); 584 local_irq_save(flags);
585 do { 585 do {
586 rc = plpar_hcall_norets(H_JOIN); 586 rc = plpar_hcall_norets(H_JOIN);
587 smp_rmb(); 587 smp_rmb();
588 } while (rc == H_Success && data->waiting > 0); 588 } while (rc == H_SUCCESS && data->waiting > 0);
589 if (rc == H_Success) 589 if (rc == H_SUCCESS)
590 goto out; 590 goto out;
591 591
592 if (rc == H_Continue) { 592 if (rc == H_CONTINUE) {
593 data->waiting = 0; 593 data->waiting = 0;
594 data->args->args[data->args->nargs] = 594 data->args->args[data->args->nargs] =
595 rtas_call(ibm_suspend_me_token, 0, 1, NULL); 595 rtas_call(ibm_suspend_me_token, 0, 1, NULL);
@@ -597,7 +597,7 @@ static void rtas_percpu_suspend_me(void *info)
597 plpar_hcall_norets(H_PROD,i); 597 plpar_hcall_norets(H_PROD,i);
598 } else { 598 } else {
599 data->waiting = -EBUSY; 599 data->waiting = -EBUSY;
600 printk(KERN_ERR "Error on H_Join hypervisor call\n"); 600 printk(KERN_ERR "Error on H_JOIN hypervisor call\n");
601 } 601 }
602 602
603out: 603out:
@@ -624,7 +624,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
624 printk(KERN_ERR "Error doing global join\n"); 624 printk(KERN_ERR "Error doing global join\n");
625 625
626 /* Prod each CPU. This won't hurt, and will wake 626 /* Prod each CPU. This won't hurt, and will wake
627 * anyone we successfully put to sleep with H_Join 627 * anyone we successfully put to sleep with H_JOIN.
628 */ 628 */
629 for_each_possible_cpu(i) 629 for_each_possible_cpu(i)
630 plpar_hcall_norets(H_PROD, i); 630 plpar_hcall_norets(H_PROD, i);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index c607f3b9ca17..1d93e73a7003 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -21,6 +21,7 @@
21#include <linux/reboot.h> 21#include <linux/reboot.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/initrd.h> 23#include <linux/initrd.h>
24#include <linux/platform_device.h>
24#include <linux/ide.h> 25#include <linux/ide.h>
25#include <linux/seq_file.h> 26#include <linux/seq_file.h>
26#include <linux/ioport.h> 27#include <linux/ioport.h>
@@ -462,6 +463,29 @@ static int __init early_xmon(char *p)
462early_param("xmon", early_xmon); 463early_param("xmon", early_xmon);
463#endif 464#endif
464 465
466static __init int add_pcspkr(void)
467{
468 struct device_node *np;
469 struct platform_device *pd;
470 int ret;
471
472 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
473 of_node_put(np);
474 if (!np)
475 return -ENODEV;
476
477 pd = platform_device_alloc("pcspkr", -1);
478 if (!pd)
479 return -ENOMEM;
480
481 ret = platform_device_add(pd);
482 if (ret)
483 platform_device_put(pd);
484
485 return ret;
486}
487device_initcall(add_pcspkr);
488
465void probe_machine(void) 489void probe_machine(void)
466{ 490{
467 extern struct machdep_calls __machine_desc_start; 491 extern struct machdep_calls __machine_desc_start;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a72bf5dceeee..69ac25701344 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -50,7 +50,6 @@
50#include <asm/kgdb.h> 50#include <asm/kgdb.h>
51#endif 51#endif
52 52
53extern void platform_init(void);
54extern void bootx_init(unsigned long r4, unsigned long phys); 53extern void bootx_init(unsigned long r4, unsigned long phys);
55 54
56boot_infos_t *boot_infos; 55boot_infos_t *boot_infos;
@@ -138,12 +137,7 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
138 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); 137 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
139#endif /* CONFIG_CMDLINE */ 138#endif /* CONFIG_CMDLINE */
140 139
141#ifdef CONFIG_PPC_MULTIPLATFORM
142 probe_machine(); 140 probe_machine();
143#else
144 /* Base init based on machine type. Obsoloete, please kill ! */
145 platform_init();
146#endif
147 141
148#ifdef CONFIG_6xx 142#ifdef CONFIG_6xx
149 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || 143 if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 59aa92cd6fa4..13e91c4d70a8 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -215,12 +215,10 @@ void __init early_setup(unsigned long dt_ptr)
215 /* 215 /*
216 * Initialize stab / SLB management except on iSeries 216 * Initialize stab / SLB management except on iSeries
217 */ 217 */
218 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 218 if (cpu_has_feature(CPU_FTR_SLB))
219 if (cpu_has_feature(CPU_FTR_SLB)) 219 slb_initialize();
220 slb_initialize(); 220 else if (!firmware_has_feature(FW_FEATURE_ISERIES))
221 else 221 stab_initialize(get_paca()->stab_real);
222 stab_initialize(get_paca()->stab_real);
223 }
224 222
225 DBG(" <- early_setup()\n"); 223 DBG(" <- early_setup()\n");
226} 224}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 1ad55f0466fd..1424eab450ee 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -322,3 +322,4 @@ SYSCALL(spu_create)
322COMPAT_SYS(pselect6) 322COMPAT_SYS(pselect6)
323COMPAT_SYS(ppoll) 323COMPAT_SYS(ppoll)
324SYSCALL(unshare) 324SYSCALL(unshare)
325SYSCALL(splice)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4cbde211eb69..064a52564692 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -228,7 +228,7 @@ void system_reset_exception(struct pt_regs *regs)
228 */ 228 */
229static inline int check_io_access(struct pt_regs *regs) 229static inline int check_io_access(struct pt_regs *regs)
230{ 230{
231#ifdef CONFIG_PPC_PMAC 231#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
232 unsigned long msr = regs->msr; 232 unsigned long msr = regs->msr;
233 const struct exception_table_entry *entry; 233 const struct exception_table_entry *entry;
234 unsigned int *nip = (unsigned int *)regs->nip; 234 unsigned int *nip = (unsigned int *)regs->nip;
@@ -261,7 +261,7 @@ static inline int check_io_access(struct pt_regs *regs)
261 return 1; 261 return 1;
262 } 262 }
263 } 263 }
264#endif /* CONFIG_PPC_PMAC */ 264#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
265 return 0; 265 return 0;
266} 266}
267 267
@@ -308,8 +308,8 @@ platform_machine_check(struct pt_regs *regs)
308 308
309void machine_check_exception(struct pt_regs *regs) 309void machine_check_exception(struct pt_regs *regs)
310{ 310{
311#ifdef CONFIG_PPC64
312 int recover = 0; 311 int recover = 0;
312 unsigned long reason = get_mc_reason(regs);
313 313
314 /* See if any machine dependent calls */ 314 /* See if any machine dependent calls */
315 if (ppc_md.machine_check_exception) 315 if (ppc_md.machine_check_exception)
@@ -317,8 +317,6 @@ void machine_check_exception(struct pt_regs *regs)
317 317
318 if (recover) 318 if (recover)
319 return; 319 return;
320#else
321 unsigned long reason = get_mc_reason(regs);
322 320
323 if (user_mode(regs)) { 321 if (user_mode(regs)) {
324 regs->msr |= MSR_RI; 322 regs->msr |= MSR_RI;
@@ -462,7 +460,6 @@ void machine_check_exception(struct pt_regs *regs)
462 * additional info, e.g. bus error registers. 460 * additional info, e.g. bus error registers.
463 */ 461 */
464 platform_machine_check(regs); 462 platform_machine_check(regs);
465#endif /* CONFIG_PPC64 */
466 463
467 if (debugger_fault_handler(regs)) 464 if (debugger_fault_handler(regs))
468 return; 465 return;
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S
index e04642781917..0c6a37b29dde 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -261,7 +261,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt32)
261.Lcie_start: 261.Lcie_start:
262 .long 0 /* CIE ID */ 262 .long 0 /* CIE ID */
263 .byte 1 /* Version number */ 263 .byte 1 /* Version number */
264 .string "zR" /* NUL-terminated augmentation string */ 264 .string "zRS" /* NUL-terminated augmentation string */
265 .uleb128 4 /* Code alignment factor */ 265 .uleb128 4 /* Code alignment factor */
266 .sleb128 -4 /* Data alignment factor */ 266 .sleb128 -4 /* Data alignment factor */
267 .byte 67 /* Return address register column, ap */ 267 .byte 67 /* Return address register column, ap */
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 31b604ab56de..7479edb101b8 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -263,7 +263,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
263.Lcie_start: 263.Lcie_start:
264 .long 0 /* CIE ID */ 264 .long 0 /* CIE ID */
265 .byte 1 /* Version number */ 265 .byte 1 /* Version number */
266 .string "zR" /* NUL-terminated augmentation string */ 266 .string "zRS" /* NUL-terminated augmentation string */
267 .uleb128 4 /* Code alignment factor */ 267 .uleb128 4 /* Code alignment factor */
268 .sleb128 -8 /* Data alignment factor */ 268 .sleb128 -8 /* Data alignment factor */
269 .byte 67 /* Return address register column, ap */ 269 .byte 67 /* Return address register column, ap */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5aea0909a5ec..fdbba4206d59 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -177,15 +177,15 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
177 177
178 /* When running in the kernel we expect faults to occur only to 178 /* When running in the kernel we expect faults to occur only to
179 * addresses in user space. All other faults represent errors in the 179 * addresses in user space. All other faults represent errors in the
180 * kernel and should generate an OOPS. Unfortunatly, in the case of an 180 * kernel and should generate an OOPS. Unfortunately, in the case of an
181 * erroneous fault occuring in a code path which already holds mmap_sem 181 * erroneous fault occurring in a code path which already holds mmap_sem
182 * we will deadlock attempting to validate the fault against the 182 * we will deadlock attempting to validate the fault against the
183 * address space. Luckily the kernel only validly references user 183 * address space. Luckily the kernel only validly references user
184 * space from well defined areas of code, which are listed in the 184 * space from well defined areas of code, which are listed in the
185 * exceptions table. 185 * exceptions table.
186 * 186 *
187 * As the vast majority of faults will be valid we will only perform 187 * As the vast majority of faults will be valid we will only perform
188 * the source reference check when there is a possibilty of a deadlock. 188 * the source reference check when there is a possibility of a deadlock.
189 * Attempt to lock the address space, if we cannot we then validate the 189 * Attempt to lock the address space, if we cannot we then validate the
190 * source. If this is invalid we can skip the address space check, 190 * source. If this is invalid we can skip the address space check,
191 * thus avoiding the deadlock. 191 * thus avoiding the deadlock.
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
index 7c18b4cd5db4..7e789d2420ba 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_sys.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c
@@ -158,25 +158,25 @@ static int __init mpc834x_rtc_hookup(void)
158late_initcall(mpc834x_rtc_hookup); 158late_initcall(mpc834x_rtc_hookup);
159#endif 159#endif
160 160
161void __init platform_init(void) 161/*
162 * Called very early, MMU is off, device-tree isn't unflattened
163 */
164static int __init mpc834x_sys_probe(void)
162{ 165{
163 /* setup the PowerPC module struct */ 166 /* We always match for now, eventually we should look at the flat
164 ppc_md.setup_arch = mpc834x_sys_setup_arch; 167 dev tree to ensure this is the board we are suppose to run on
165 168 */
166 ppc_md.init_IRQ = mpc834x_sys_init_IRQ; 169 return 1;
167 ppc_md.get_irq = ipic_get_irq;
168
169 ppc_md.restart = mpc83xx_restart;
170
171 ppc_md.time_init = mpc83xx_time_init;
172 ppc_md.set_rtc_time = NULL;
173 ppc_md.get_rtc_time = NULL;
174 ppc_md.calibrate_decr = generic_calibrate_decr;
175
176 ppc_md.progress = udbg_progress;
177
178 if (ppc_md.progress)
179 ppc_md.progress("mpc834x_sys_init(): exit", 0);
180
181 return;
182} 170}
171
172define_machine(mpc834x_sys) {
173 .name = "MPC834x SYS",
174 .probe = mpc834x_sys_probe,
175 .setup_arch = mpc834x_sys_setup_arch,
176 .init_IRQ = mpc834x_sys_init_IRQ,
177 .get_irq = ipic_get_irq,
178 .restart = mpc83xx_restart,
179 .time_init = mpc83xx_time_init,
180 .calibrate_decr = generic_calibrate_decr,
181 .progress = udbg_progress,
182};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index b7821dbae00d..5eeff370f5fc 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -220,25 +220,25 @@ void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
220 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); 220 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
221} 221}
222 222
223void __init platform_init(void) 223/*
224 * Called very early, device-tree isn't unflattened
225 */
226static int __init mpc85xx_ads_probe(void)
224{ 227{
225 ppc_md.setup_arch = mpc85xx_ads_setup_arch; 228 /* We always match for now, eventually we should look at the flat
226 ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo; 229 dev tree to ensure this is the board we are suppose to run on
227 230 */
228 ppc_md.init_IRQ = mpc85xx_ads_pic_init; 231 return 1;
229 ppc_md.get_irq = mpic_get_irq;
230
231 ppc_md.restart = mpc85xx_restart;
232 ppc_md.power_off = NULL;
233 ppc_md.halt = NULL;
234
235 ppc_md.time_init = NULL;
236 ppc_md.set_rtc_time = NULL;
237 ppc_md.get_rtc_time = NULL;
238 ppc_md.calibrate_decr = generic_calibrate_decr;
239
240 ppc_md.progress = udbg_progress;
241
242 if (ppc_md.progress)
243 ppc_md.progress("mpc85xx_ads platform_init(): exit", 0);
244} 232}
233
234define_machine(mpc85xx_ads) {
235 .name = "MPC85xx ADS",
236 .probe = mpc85xx_ads_probe,
237 .setup_arch = mpc85xx_ads_setup_arch,
238 .init_IRQ = mpc85xx_ads_pic_init,
239 .show_cpuinfo = mpc85xx_ads_show_cpuinfo,
240 .get_irq = mpic_get_irq,
241 .restart = mpc85xx_restart,
242 .calibrate_decr = generic_calibrate_decr,
243 .progress = udbg_progress,
244};
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index 3a4245c926ad..6594bec73882 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -316,6 +316,7 @@ void *spu_syscall_table[] = {
316 [__NR_pselect6] sys_ni_syscall, /* sys_pselect */ 316 [__NR_pselect6] sys_ni_syscall, /* sys_pselect */
317 [__NR_ppoll] sys_ni_syscall, /* sys_ppoll */ 317 [__NR_ppoll] sys_ni_syscall, /* sys_ppoll */
318 [__NR_unshare] sys_unshare, 318 [__NR_unshare] sys_unshare,
319 [__NR_splice] sys_splice,
319}; 320};
320 321
321long spu_sys_callback(struct spu_syscall_block *s) 322long spu_sys_callback(struct spu_syscall_block *s)
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c04e078c0fe5..483c8b76232c 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -2,6 +2,7 @@
2#include <linux/ptrace.h> 2#include <linux/ptrace.h>
3 3
4#include <asm/spu.h> 4#include <asm/spu.h>
5#include <asm/unistd.h>
5 6
6#include "spufs.h" 7#include "spufs.h"
7 8
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 9b2b1cb117b3..780fb27a0099 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -865,7 +865,7 @@ void __init eeh_init(void)
865 * on the CEC architecture, type of the device, on earlier boot 865 * on the CEC architecture, type of the device, on earlier boot
866 * command-line arguments & etc. 866 * command-line arguments & etc.
867 */ 867 */
868void eeh_add_device_early(struct device_node *dn) 868static void eeh_add_device_early(struct device_node *dn)
869{ 869{
870 struct pci_controller *phb; 870 struct pci_controller *phb;
871 struct eeh_early_enable_info info; 871 struct eeh_early_enable_info info;
@@ -882,7 +882,6 @@ void eeh_add_device_early(struct device_node *dn)
882 info.buid_lo = BUID_LO(phb->buid); 882 info.buid_lo = BUID_LO(phb->buid);
883 early_enable_eeh(dn, &info); 883 early_enable_eeh(dn, &info);
884} 884}
885EXPORT_SYMBOL_GPL(eeh_add_device_early);
886 885
887void eeh_add_device_tree_early(struct device_node *dn) 886void eeh_add_device_tree_early(struct device_node *dn)
888{ 887{
@@ -893,20 +892,6 @@ void eeh_add_device_tree_early(struct device_node *dn)
893} 892}
894EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); 893EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
895 894
896void eeh_add_device_tree_late(struct pci_bus *bus)
897{
898 struct pci_dev *dev;
899
900 list_for_each_entry(dev, &bus->devices, bus_list) {
901 eeh_add_device_late(dev);
902 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
903 struct pci_bus *subbus = dev->subordinate;
904 if (subbus)
905 eeh_add_device_tree_late(subbus);
906 }
907 }
908}
909
910/** 895/**
911 * eeh_add_device_late - perform EEH initialization for the indicated pci device 896 * eeh_add_device_late - perform EEH initialization for the indicated pci device
912 * @dev: pci device for which to set up EEH 897 * @dev: pci device for which to set up EEH
@@ -914,7 +899,7 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
914 * This routine must be used to complete EEH initialization for PCI 899 * This routine must be used to complete EEH initialization for PCI
915 * devices that were added after system boot (e.g. hotplug, dlpar). 900 * devices that were added after system boot (e.g. hotplug, dlpar).
916 */ 901 */
917void eeh_add_device_late(struct pci_dev *dev) 902static void eeh_add_device_late(struct pci_dev *dev)
918{ 903{
919 struct device_node *dn; 904 struct device_node *dn;
920 struct pci_dn *pdn; 905 struct pci_dn *pdn;
@@ -933,16 +918,33 @@ void eeh_add_device_late(struct pci_dev *dev)
933 918
934 pci_addr_cache_insert_device (dev); 919 pci_addr_cache_insert_device (dev);
935} 920}
936EXPORT_SYMBOL_GPL(eeh_add_device_late); 921
922void eeh_add_device_tree_late(struct pci_bus *bus)
923{
924 struct pci_dev *dev;
925
926 list_for_each_entry(dev, &bus->devices, bus_list) {
927 eeh_add_device_late(dev);
928 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
929 struct pci_bus *subbus = dev->subordinate;
930 if (subbus)
931 eeh_add_device_tree_late(subbus);
932 }
933 }
934}
935EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
937 936
938/** 937/**
939 * eeh_remove_device - undo EEH setup for the indicated pci device 938 * eeh_remove_device - undo EEH setup for the indicated pci device
940 * @dev: pci device to be removed 939 * @dev: pci device to be removed
941 * 940 *
942 * This routine should be when a device is removed from a running 941 * This routine should be called when a device is removed from
943 * system (e.g. by hotplug or dlpar). 942 * a running system (e.g. by hotplug or dlpar). It unregisters
943 * the PCI device from the EEH subsystem. I/O errors affecting
944 * this device will no longer be detected after this call; thus,
945 * i/o errors affecting this slot may leave this device unusable.
944 */ 946 */
945void eeh_remove_device(struct pci_dev *dev) 947static void eeh_remove_device(struct pci_dev *dev)
946{ 948{
947 struct device_node *dn; 949 struct device_node *dn;
948 if (!dev || !eeh_subsystem_enabled) 950 if (!dev || !eeh_subsystem_enabled)
@@ -958,21 +960,17 @@ void eeh_remove_device(struct pci_dev *dev)
958 PCI_DN(dn)->pcidev = NULL; 960 PCI_DN(dn)->pcidev = NULL;
959 pci_dev_put (dev); 961 pci_dev_put (dev);
960} 962}
961EXPORT_SYMBOL_GPL(eeh_remove_device);
962 963
963void eeh_remove_bus_device(struct pci_dev *dev) 964void eeh_remove_bus_device(struct pci_dev *dev)
964{ 965{
966 struct pci_bus *bus = dev->subordinate;
967 struct pci_dev *child, *tmp;
968
965 eeh_remove_device(dev); 969 eeh_remove_device(dev);
966 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 970
967 struct pci_bus *bus = dev->subordinate; 971 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
968 struct list_head *ln; 972 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
969 if (!bus) 973 eeh_remove_bus_device(child);
970 return;
971 for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
972 struct pci_dev *pdev = pci_dev_b(ln);
973 if (pdev)
974 eeh_remove_bus_device(pdev);
975 }
976 } 974 }
977} 975}
978EXPORT_SYMBOL_GPL(eeh_remove_bus_device); 976EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index cc2495a0cdd5..1fba695e32e8 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -293,15 +293,16 @@ void handle_eeh_events (struct eeh_event *event)
293 frozen_pdn = PCI_DN(frozen_dn); 293 frozen_pdn = PCI_DN(frozen_dn);
294 frozen_pdn->eeh_freeze_count++; 294 frozen_pdn->eeh_freeze_count++;
295 295
296 pci_str = pci_name (frozen_pdn->pcidev); 296 if (frozen_pdn->pcidev) {
297 drv_str = pcid_name (frozen_pdn->pcidev); 297 pci_str = pci_name (frozen_pdn->pcidev);
298 if (!pci_str) { 298 drv_str = pcid_name (frozen_pdn->pcidev);
299 } else {
299 pci_str = pci_name (event->dev); 300 pci_str = pci_name (event->dev);
300 drv_str = pcid_name (event->dev); 301 drv_str = pcid_name (event->dev);
301 } 302 }
302 303
303 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) 304 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
304 goto hard_fail; 305 goto excess_failures;
305 306
306 /* If the reset state is a '5' and the time to reset is 0 (infinity) 307 /* If the reset state is a '5' and the time to reset is 0 (infinity)
307 * or is more then 15 seconds, then mark this as a permanent failure. 308 * or is more then 15 seconds, then mark this as a permanent failure.
@@ -356,7 +357,7 @@ void handle_eeh_events (struct eeh_event *event)
356 357
357 return; 358 return;
358 359
359hard_fail: 360excess_failures:
360 /* 361 /*
361 * About 90% of all real-life EEH failures in the field 362 * About 90% of all real-life EEH failures in the field
362 * are due to poorly seated PCI cards. Only 10% or so are 363 * are due to poorly seated PCI cards. Only 10% or so are
@@ -367,7 +368,15 @@ hard_fail:
367 "and has been permanently disabled. Please try reseating\n" 368 "and has been permanently disabled. Please try reseating\n"
368 "this device or replacing it.\n", 369 "this device or replacing it.\n",
369 drv_str, pci_str, frozen_pdn->eeh_freeze_count); 370 drv_str, pci_str, frozen_pdn->eeh_freeze_count);
371 goto perm_error;
372
373hard_fail:
374 printk(KERN_ERR
375 "EEH: Unable to recover from failure of PCI device %s - %s\n"
376 "Please try reseating this device or replacing it.\n",
377 drv_str, pci_str);
370 378
379perm_error:
371 eeh_slot_error_detail(frozen_pdn, 2 /* Permanent Error */); 380 eeh_slot_error_detail(frozen_pdn, 2 /* Permanent Error */);
372 381
373 /* Notify all devices that they're about to go down. */ 382 /* Notify all devices that they're about to go down. */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 9a9961f27480..a1bda6f96fd1 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -19,7 +19,9 @@
19 */ 19 */
20 20
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/mutex.h>
22#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/workqueue.h>
23#include <asm/eeh_event.h> 25#include <asm/eeh_event.h>
24#include <asm/ppc-pci.h> 26#include <asm/ppc-pci.h>
25 27
@@ -37,14 +39,18 @@ LIST_HEAD(eeh_eventlist);
37static void eeh_thread_launcher(void *); 39static void eeh_thread_launcher(void *);
38DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 40DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
39 41
42/* Serialize reset sequences for a given pci device */
43DEFINE_MUTEX(eeh_event_mutex);
44
40/** 45/**
41 * eeh_event_handler - dispatch EEH events. The detection of a frozen 46 * eeh_event_handler - dispatch EEH events.
42 * slot can occur inside an interrupt, where it can be hard to do
43 * anything about it. The goal of this routine is to pull these
44 * detection events out of the context of the interrupt handler, and
45 * re-dispatch them for processing at a later time in a normal context.
46 *
47 * @dummy - unused 47 * @dummy - unused
48 *
49 * The detection of a frozen slot can occur inside an interrupt,
50 * where it can be hard to do anything about it. The goal of this
51 * routine is to pull these detection events out of the context
52 * of the interrupt handler, and re-dispatch them for processing
53 * at a later time in a normal context.
48 */ 54 */
49static int eeh_event_handler(void * dummy) 55static int eeh_event_handler(void * dummy)
50{ 56{
@@ -64,23 +70,24 @@ static int eeh_event_handler(void * dummy)
64 event = list_entry(eeh_eventlist.next, struct eeh_event, list); 70 event = list_entry(eeh_eventlist.next, struct eeh_event, list);
65 list_del(&event->list); 71 list_del(&event->list);
66 } 72 }
67
68 if (event)
69 eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
70
71 spin_unlock_irqrestore(&eeh_eventlist_lock, flags); 73 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
74
72 if (event == NULL) 75 if (event == NULL)
73 break; 76 break;
74 77
78 /* Serialize processing of EEH events */
79 mutex_lock(&eeh_event_mutex);
80 eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
81
75 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", 82 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
76 pci_name(event->dev)); 83 pci_name(event->dev));
77 84
78 handle_eeh_events(event); 85 handle_eeh_events(event);
79 86
80 eeh_clear_slot(event->dn, EEH_MODE_RECOVERING); 87 eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
81
82 pci_dev_put(event->dev); 88 pci_dev_put(event->dev);
83 kfree(event); 89 kfree(event);
90 mutex_unlock(&eeh_event_mutex);
84 } 91 }
85 92
86 return 0; 93 return 0;
@@ -88,7 +95,6 @@ static int eeh_event_handler(void * dummy)
88 95
89/** 96/**
90 * eeh_thread_launcher 97 * eeh_thread_launcher
91 *
92 * @dummy - unused 98 * @dummy - unused
93 */ 99 */
94static void eeh_thread_launcher(void *dummy) 100static void eeh_thread_launcher(void *dummy)
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index db7c19fe9297..c9ff547f9d25 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -127,3 +127,103 @@ _GLOBAL(plpar_hcall_4out)
127 127
128 mtcrf 0xff,r0 128 mtcrf 0xff,r0
129 blr /* return r3 = status */ 129 blr /* return r3 = status */
130
131/* plpar_hcall_7arg_7ret(unsigned long opcode, R3
132 unsigned long arg1, R4
133 unsigned long arg2, R5
134 unsigned long arg3, R6
135 unsigned long arg4, R7
136 unsigned long arg5, R8
137 unsigned long arg6, R9
138 unsigned long arg7, R10
139 unsigned long *out1, 112(R1)
140 unsigned long *out2, 110(R1)
141 unsigned long *out3, 108(R1)
142 unsigned long *out4, 106(R1)
143 unsigned long *out5, 104(R1)
144 unsigned long *out6, 102(R1)
145 unsigned long *out7); 100(R1)
146*/
147_GLOBAL(plpar_hcall_7arg_7ret)
148 HMT_MEDIUM
149
150 mfcr r0
151 stw r0,8(r1)
152
153 HVSC /* invoke the hypervisor */
154
155 lwz r0,8(r1)
156
157 ld r11,STK_PARM(r11)(r1) /* Fetch r4 ret arg */
158 std r4,0(r11)
159 ld r11,STK_PARM(r12)(r1) /* Fetch r5 ret arg */
160 std r5,0(r11)
161 ld r11,STK_PARM(r13)(r1) /* Fetch r6 ret arg */
162 std r6,0(r11)
163 ld r11,STK_PARM(r14)(r1) /* Fetch r7 ret arg */
164 std r7,0(r11)
165 ld r11,STK_PARM(r15)(r1) /* Fetch r8 ret arg */
166 std r8,0(r11)
167 ld r11,STK_PARM(r16)(r1) /* Fetch r9 ret arg */
168 std r9,0(r11)
169 ld r11,STK_PARM(r17)(r1) /* Fetch r10 ret arg */
170 std r10,0(r11)
171
172 mtcrf 0xff,r0
173
174 blr /* return r3 = status */
175
176/* plpar_hcall_9arg_9ret(unsigned long opcode, R3
177 unsigned long arg1, R4
178 unsigned long arg2, R5
179 unsigned long arg3, R6
180 unsigned long arg4, R7
181 unsigned long arg5, R8
182 unsigned long arg6, R9
183 unsigned long arg7, R10
184 unsigned long arg8, 112(R1)
185 unsigned long arg9, 110(R1)
186 unsigned long *out1, 108(R1)
187 unsigned long *out2, 106(R1)
188 unsigned long *out3, 104(R1)
189 unsigned long *out4, 102(R1)
190 unsigned long *out5, 100(R1)
191 unsigned long *out6, 98(R1)
192 unsigned long *out7); 96(R1)
193 unsigned long *out8, 94(R1)
194 unsigned long *out9, 92(R1)
195*/
196_GLOBAL(plpar_hcall_9arg_9ret)
197 HMT_MEDIUM
198
199 mfcr r0
200 stw r0,8(r1)
201
202 ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */
203 ld r12,STK_PARM(r12)(r1) /* put arg9 in R12 */
204
205 HVSC /* invoke the hypervisor */
206
207 ld r0,STK_PARM(r13)(r1) /* Fetch r4 ret arg */
208 stdx r4,r0,r0
209 ld r0,STK_PARM(r14)(r1) /* Fetch r5 ret arg */
210 stdx r5,r0,r0
211 ld r0,STK_PARM(r15)(r1) /* Fetch r6 ret arg */
212 stdx r6,r0,r0
213 ld r0,STK_PARM(r16)(r1) /* Fetch r7 ret arg */
214 stdx r7,r0,r0
215 ld r0,STK_PARM(r17)(r1) /* Fetch r8 ret arg */
216 stdx r8,r0,r0
217 ld r0,STK_PARM(r18)(r1) /* Fetch r9 ret arg */
218 stdx r9,r0,r0
219 ld r0,STK_PARM(r19)(r1) /* Fetch r10 ret arg */
220 stdx r10,r0,r0
221 ld r0,STK_PARM(r20)(r1) /* Fetch r11 ret arg */
222 stdx r11,r0,r0
223 ld r0,STK_PARM(r21)(r1) /* Fetch r12 ret arg */
224 stdx r12,r0,r0
225
226 lwz r0,8(r1)
227 mtcrf 0xff,r0
228
229 blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index ba6befd96636..a72a987f1d4d 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -41,7 +41,7 @@ int hvc_get_chars(uint32_t vtermno, char *buf, int count)
41 unsigned long got; 41 unsigned long got;
42 42
43 if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got, 43 if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
44 (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) 44 (unsigned long *)buf, (unsigned long *)buf+1) == H_SUCCESS)
45 return got; 45 return got;
46 return 0; 46 return 0;
47} 47}
@@ -69,9 +69,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
69 69
70 ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0], 70 ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
71 lbuf[1]); 71 lbuf[1]);
72 if (ret == H_Success) 72 if (ret == H_SUCCESS)
73 return count; 73 return count;
74 if (ret == H_Busy) 74 if (ret == H_BUSY)
75 return 0; 75 return 0;
76 return -EIO; 76 return -EIO;
77} 77}
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index 22bfb5c89db9..fcf4b4cbeaf3 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -43,21 +43,21 @@ MODULE_VERSION(HVCS_ARCH_VERSION);
43static int hvcs_convert(long to_convert) 43static int hvcs_convert(long to_convert)
44{ 44{
45 switch (to_convert) { 45 switch (to_convert) {
46 case H_Success: 46 case H_SUCCESS:
47 return 0; 47 return 0;
48 case H_Parameter: 48 case H_PARAMETER:
49 return -EINVAL; 49 return -EINVAL;
50 case H_Hardware: 50 case H_HARDWARE:
51 return -EIO; 51 return -EIO;
52 case H_Busy: 52 case H_BUSY:
53 case H_LongBusyOrder1msec: 53 case H_LONG_BUSY_ORDER_1_MSEC:
54 case H_LongBusyOrder10msec: 54 case H_LONG_BUSY_ORDER_10_MSEC:
55 case H_LongBusyOrder100msec: 55 case H_LONG_BUSY_ORDER_100_MSEC:
56 case H_LongBusyOrder1sec: 56 case H_LONG_BUSY_ORDER_1_SEC:
57 case H_LongBusyOrder10sec: 57 case H_LONG_BUSY_ORDER_10_SEC:
58 case H_LongBusyOrder100sec: 58 case H_LONG_BUSY_ORDER_100_SEC:
59 return -EBUSY; 59 return -EBUSY;
60 case H_Function: /* fall through */ 60 case H_FUNCTION: /* fall through */
61 default: 61 default:
62 return -EPERM; 62 return -EPERM;
63 } 63 }
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8952528d31ac..634b7d06d3cc 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -54,7 +54,8 @@ EXPORT_SYMBOL(plpar_hcall);
54EXPORT_SYMBOL(plpar_hcall_4out); 54EXPORT_SYMBOL(plpar_hcall_4out);
55EXPORT_SYMBOL(plpar_hcall_norets); 55EXPORT_SYMBOL(plpar_hcall_norets);
56EXPORT_SYMBOL(plpar_hcall_8arg_2ret); 56EXPORT_SYMBOL(plpar_hcall_8arg_2ret);
57 57EXPORT_SYMBOL(plpar_hcall_7arg_7ret);
58EXPORT_SYMBOL(plpar_hcall_9arg_9ret);
58extern void pSeries_find_serial_port(void); 59extern void pSeries_find_serial_port(void);
59 60
60 61
@@ -72,7 +73,7 @@ static void udbg_hvsi_putc(char c)
72 73
73 do { 74 do {
74 rc = plpar_put_term_char(vtermno, sizeof(packet), packet); 75 rc = plpar_put_term_char(vtermno, sizeof(packet), packet);
75 } while (rc == H_Busy); 76 } while (rc == H_BUSY);
76} 77}
77 78
78static long hvsi_udbg_buf_len; 79static long hvsi_udbg_buf_len;
@@ -85,7 +86,7 @@ static int udbg_hvsi_getc_poll(void)
85 86
86 if (hvsi_udbg_buf_len == 0) { 87 if (hvsi_udbg_buf_len == 0) {
87 rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf); 88 rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf);
88 if (rc != H_Success || hvsi_udbg_buf[0] != 0xff) { 89 if (rc != H_SUCCESS || hvsi_udbg_buf[0] != 0xff) {
89 /* bad read or non-data packet */ 90 /* bad read or non-data packet */
90 hvsi_udbg_buf_len = 0; 91 hvsi_udbg_buf_len = 0;
91 } else { 92 } else {
@@ -139,7 +140,7 @@ static void udbg_putcLP(char c)
139 buf[0] = c; 140 buf[0] = c;
140 do { 141 do {
141 rc = plpar_put_term_char(vtermno, 1, buf); 142 rc = plpar_put_term_char(vtermno, 1, buf);
142 } while(rc == H_Busy); 143 } while(rc == H_BUSY);
143} 144}
144 145
145/* Buffered chars getc */ 146/* Buffered chars getc */
@@ -158,7 +159,7 @@ static int udbg_getc_pollLP(void)
158 /* get some more chars. */ 159 /* get some more chars. */
159 inbuflen = 0; 160 inbuflen = 0;
160 rc = plpar_get_term_char(vtermno, &inbuflen, buf); 161 rc = plpar_get_term_char(vtermno, &inbuflen, buf);
161 if (rc != H_Success) 162 if (rc != H_SUCCESS)
162 inbuflen = 0; /* otherwise inbuflen is garbage */ 163 inbuflen = 0; /* otherwise inbuflen is garbage */
163 } 164 }
164 if (inbuflen <= 0 || inbuflen > 16) { 165 if (inbuflen <= 0 || inbuflen > 16) {
@@ -304,7 +305,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
304 305
305 lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v, 306 lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
306 hpte_r, &slot, &dummy0, &dummy1); 307 hpte_r, &slot, &dummy0, &dummy1);
307 if (unlikely(lpar_rc == H_PTEG_Full)) { 308 if (unlikely(lpar_rc == H_PTEG_FULL)) {
308 if (!(vflags & HPTE_V_BOLTED)) 309 if (!(vflags & HPTE_V_BOLTED))
309 DBG_LOW(" full\n"); 310 DBG_LOW(" full\n");
310 return -1; 311 return -1;
@@ -315,7 +316,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
315 * will fail. However we must catch the failure in hash_page 316 * will fail. However we must catch the failure in hash_page
316 * or we will loop forever, so return -2 in this case. 317 * or we will loop forever, so return -2 in this case.
317 */ 318 */
318 if (unlikely(lpar_rc != H_Success)) { 319 if (unlikely(lpar_rc != H_SUCCESS)) {
319 if (!(vflags & HPTE_V_BOLTED)) 320 if (!(vflags & HPTE_V_BOLTED))
320 DBG_LOW(" lpar err %d\n", lpar_rc); 321 DBG_LOW(" lpar err %d\n", lpar_rc);
321 return -2; 322 return -2;
@@ -346,9 +347,9 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
346 /* don't remove a bolted entry */ 347 /* don't remove a bolted entry */
347 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, 348 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
348 (0x1UL << 4), &dummy1, &dummy2); 349 (0x1UL << 4), &dummy1, &dummy2);
349 if (lpar_rc == H_Success) 350 if (lpar_rc == H_SUCCESS)
350 return i; 351 return i;
351 BUG_ON(lpar_rc != H_Not_Found); 352 BUG_ON(lpar_rc != H_NOT_FOUND);
352 353
353 slot_offset++; 354 slot_offset++;
354 slot_offset &= 0x7; 355 slot_offset &= 0x7;
@@ -391,14 +392,14 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
391 392
392 lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN); 393 lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN);
393 394
394 if (lpar_rc == H_Not_Found) { 395 if (lpar_rc == H_NOT_FOUND) {
395 DBG_LOW("not found !\n"); 396 DBG_LOW("not found !\n");
396 return -1; 397 return -1;
397 } 398 }
398 399
399 DBG_LOW("ok\n"); 400 DBG_LOW("ok\n");
400 401
401 BUG_ON(lpar_rc != H_Success); 402 BUG_ON(lpar_rc != H_SUCCESS);
402 403
403 return 0; 404 return 0;
404} 405}
@@ -417,7 +418,7 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
417 418
418 lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); 419 lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
419 420
420 BUG_ON(lpar_rc != H_Success); 421 BUG_ON(lpar_rc != H_SUCCESS);
421 422
422 return dword0; 423 return dword0;
423} 424}
@@ -468,7 +469,7 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
468 flags = newpp & 7; 469 flags = newpp & 7;
469 lpar_rc = plpar_pte_protect(flags, slot, 0); 470 lpar_rc = plpar_pte_protect(flags, slot, 0);
470 471
471 BUG_ON(lpar_rc != H_Success); 472 BUG_ON(lpar_rc != H_SUCCESS);
472} 473}
473 474
474static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 475static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
@@ -484,10 +485,10 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
484 want_v = hpte_encode_v(va, psize); 485 want_v = hpte_encode_v(va, psize);
485 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN, 486 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN,
486 &dummy1, &dummy2); 487 &dummy1, &dummy2);
487 if (lpar_rc == H_Not_Found) 488 if (lpar_rc == H_NOT_FOUND)
488 return; 489 return;
489 490
490 BUG_ON(lpar_rc != H_Success); 491 BUG_ON(lpar_rc != H_SUCCESS);
491} 492}
492 493
493/* 494/*
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index b2fbf8ba8fbb..5eb55ef1c91c 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -463,7 +463,7 @@ static void pseries_dedicated_idle_sleep(void)
463 * very low priority. The cede enables interrupts, which 463 * very low priority. The cede enables interrupts, which
464 * doesn't matter here. 464 * doesn't matter here.
465 */ 465 */
466 if (!lppaca[cpu ^ 1].idle || poll_pending() == H_Pending) 466 if (!lppaca[cpu ^ 1].idle || poll_pending() == H_PENDING)
467 cede_processor(); 467 cede_processor();
468 468
469out: 469out:
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 866379b80c09..8e53e04ada8b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -258,7 +258,7 @@ EXPORT_SYMBOL(vio_find_node);
258int vio_enable_interrupts(struct vio_dev *dev) 258int vio_enable_interrupts(struct vio_dev *dev)
259{ 259{
260 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); 260 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
261 if (rc != H_Success) 261 if (rc != H_SUCCESS)
262 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); 262 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
263 return rc; 263 return rc;
264} 264}
@@ -267,7 +267,7 @@ EXPORT_SYMBOL(vio_enable_interrupts);
267int vio_disable_interrupts(struct vio_dev *dev) 267int vio_disable_interrupts(struct vio_dev *dev)
268{ 268{
269 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); 269 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
270 if (rc != H_Success) 270 if (rc != H_SUCCESS)
271 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); 271 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
272 return rc; 272 return rc;
273} 273}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 4864cb32be25..2d60ea30fed6 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -168,7 +168,7 @@ static int pSeriesLP_xirr_info_get(int n_cpu)
168 unsigned long return_value; 168 unsigned long return_value;
169 169
170 lpar_rc = plpar_xirr(&return_value); 170 lpar_rc = plpar_xirr(&return_value);
171 if (lpar_rc != H_Success) 171 if (lpar_rc != H_SUCCESS)
172 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 172 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
173 return (int)return_value; 173 return (int)return_value;
174} 174}
@@ -179,7 +179,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value)
179 unsigned long val64 = value & 0xffffffff; 179 unsigned long val64 = value & 0xffffffff;
180 180
181 lpar_rc = plpar_eoi(val64); 181 lpar_rc = plpar_eoi(val64);
182 if (lpar_rc != H_Success) 182 if (lpar_rc != H_SUCCESS)
183 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc, 183 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
184 val64); 184 val64);
185} 185}
@@ -189,7 +189,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value)
189 unsigned long lpar_rc; 189 unsigned long lpar_rc;
190 190
191 lpar_rc = plpar_cppr(value); 191 lpar_rc = plpar_cppr(value);
192 if (lpar_rc != H_Success) 192 if (lpar_rc != H_SUCCESS)
193 panic("bad return code cppr - rc = %lx\n", lpar_rc); 193 panic("bad return code cppr - rc = %lx\n", lpar_rc);
194} 194}
195 195
@@ -198,7 +198,7 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
198 unsigned long lpar_rc; 198 unsigned long lpar_rc;
199 199
200 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); 200 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
201 if (lpar_rc != H_Success) 201 if (lpar_rc != H_SUCCESS)
202 panic("bad return code qirr - rc = %lx\n", lpar_rc); 202 panic("bad return code qirr - rc = %lx\n", lpar_rc);
203} 203}
204 204
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2b8841f85534..343120c9223d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -801,7 +801,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
801 */ 801 */
802 print_cpu_info(&S390_lowcore.cpu_data); 802 print_cpu_info(&S390_lowcore.cpu_data);
803 803
804 for_each_cpu(i) { 804 for_each_possible_cpu(i) {
805 lowcore_ptr[i] = (struct _lowcore *) 805 lowcore_ptr[i] = (struct _lowcore *)
806 __get_free_pages(GFP_KERNEL|GFP_DMA, 806 __get_free_pages(GFP_KERNEL|GFP_DMA,
807 sizeof(void*) == 8 ? 1 : 0); 807 sizeof(void*) == 8 ? 1 : 0);
@@ -831,7 +831,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
831#endif 831#endif
832 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 832 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
833 833
834 for_each_cpu(cpu) 834 for_each_possible_cpu(cpu)
835 if (cpu != smp_processor_id()) 835 if (cpu != smp_processor_id())
836 smp_create_idle(cpu); 836 smp_create_idle(cpu);
837} 837}
@@ -868,7 +868,7 @@ static int __init topology_init(void)
868 int cpu; 868 int cpu;
869 int ret; 869 int ret;
870 870
871 for_each_cpu(cpu) { 871 for_each_possible_cpu(cpu) {
872 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 872 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
873 if (ret) 873 if (ret)
874 printk(KERN_WARNING "topology_init: register_cpu %d " 874 printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index cf94e8ef17c5..868e68b28880 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -30,7 +30,7 @@ static int x##_disabled __initdata = 0; \
30static int __init x##_setup(char *opts) \ 30static int __init x##_setup(char *opts) \
31{ \ 31{ \
32 x##_disabled = 1; \ 32 x##_disabled = 1; \
33 return 0; \ 33 return 1; \
34} \ 34} \
35__setup("no" __stringify(x), x##_setup); 35__setup("no" __stringify(x), x##_setup);
36 36
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7ee4ca203616..bb229ef030f3 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -401,7 +401,7 @@ static int __init topology_init(void)
401{ 401{
402 int cpu_id; 402 int cpu_id;
403 403
404 for_each_cpu(cpu_id) 404 for_each_possible_cpu(cpu_id)
405 register_cpu(&cpu[cpu_id], cpu_id, NULL); 405 register_cpu(&cpu[cpu_id], cpu_id, NULL);
406 406
407 return 0; 407 return 0;
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index 768de64b371f..fbbec5e761c6 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -64,13 +64,13 @@ sys_call_table:
64/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex 64/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex
65/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid 65/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
66/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16 66/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
67/*230*/ .long sys_select, sys_time, sys_nis_syscall, sys_stime, sys_statfs64 67/*230*/ .long sys_select, sys_time, sys_splice, sys_stime, sys_statfs64
68 /* "We are the Knights of the Forest of Ni!!" */ 68 /* "We are the Knights of the Forest of Ni!!" */
69/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall 69/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
70/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler 70/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
71/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep 71/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
72/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl 72/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
73/*255*/ .long sys_nis_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep 73/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
74/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun 74/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
75/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy 75/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
76/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink 76/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 900fb0b940d8..30389085a359 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16 3# Linux kernel version: 2.6.16
4# Sun Mar 26 14:58:11 2006 4# Fri Mar 31 01:40:57 2006
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -180,6 +180,7 @@ CONFIG_SYN_COOKIES=y
180CONFIG_INET_AH=y 180CONFIG_INET_AH=y
181CONFIG_INET_ESP=y 181CONFIG_INET_ESP=y
182CONFIG_INET_IPCOMP=y 182CONFIG_INET_IPCOMP=y
183CONFIG_INET_XFRM_TUNNEL=y
183CONFIG_INET_TUNNEL=y 184CONFIG_INET_TUNNEL=y
184CONFIG_INET_DIAG=y 185CONFIG_INET_DIAG=y
185CONFIG_INET_TCP_DIAG=y 186CONFIG_INET_TCP_DIAG=y
@@ -203,6 +204,7 @@ CONFIG_IPV6_ROUTE_INFO=y
203CONFIG_INET6_AH=m 204CONFIG_INET6_AH=m
204CONFIG_INET6_ESP=m 205CONFIG_INET6_ESP=m
205CONFIG_INET6_IPCOMP=m 206CONFIG_INET6_IPCOMP=m
207CONFIG_INET6_XFRM_TUNNEL=m
206CONFIG_INET6_TUNNEL=m 208CONFIG_INET6_TUNNEL=m
207CONFIG_IPV6_TUNNEL=m 209CONFIG_IPV6_TUNNEL=m
208# CONFIG_NETFILTER is not set 210# CONFIG_NETFILTER is not set
@@ -308,7 +310,6 @@ CONFIG_BLK_DEV_NBD=m
308# CONFIG_BLK_DEV_SX8 is not set 310# CONFIG_BLK_DEV_SX8 is not set
309CONFIG_BLK_DEV_UB=m 311CONFIG_BLK_DEV_UB=m
310# CONFIG_BLK_DEV_RAM is not set 312# CONFIG_BLK_DEV_RAM is not set
311CONFIG_BLK_DEV_RAM_COUNT=16
312# CONFIG_BLK_DEV_INITRD is not set 313# CONFIG_BLK_DEV_INITRD is not set
313CONFIG_CDROM_PKTCDVD=m 314CONFIG_CDROM_PKTCDVD=m
314CONFIG_CDROM_PKTCDVD_BUFFERS=8 315CONFIG_CDROM_PKTCDVD_BUFFERS=8
@@ -449,6 +450,7 @@ CONFIG_MD_RAID0=m
449CONFIG_MD_RAID1=m 450CONFIG_MD_RAID1=m
450CONFIG_MD_RAID10=m 451CONFIG_MD_RAID10=m
451CONFIG_MD_RAID5=m 452CONFIG_MD_RAID5=m
453# CONFIG_MD_RAID5_RESHAPE is not set
452CONFIG_MD_RAID6=m 454CONFIG_MD_RAID6=m
453CONFIG_MD_MULTIPATH=m 455CONFIG_MD_MULTIPATH=m
454# CONFIG_MD_FAULTY is not set 456# CONFIG_MD_FAULTY is not set
@@ -741,9 +743,7 @@ CONFIG_I2C_ALGOBIT=y
741# CONFIG_SENSORS_PCF8574 is not set 743# CONFIG_SENSORS_PCF8574 is not set
742# CONFIG_SENSORS_PCA9539 is not set 744# CONFIG_SENSORS_PCA9539 is not set
743# CONFIG_SENSORS_PCF8591 is not set 745# CONFIG_SENSORS_PCF8591 is not set
744# CONFIG_SENSORS_RTC8564 is not set
745# CONFIG_SENSORS_MAX6875 is not set 746# CONFIG_SENSORS_MAX6875 is not set
746# CONFIG_RTC_X1205_I2C is not set
747# CONFIG_I2C_DEBUG_CORE is not set 747# CONFIG_I2C_DEBUG_CORE is not set
748# CONFIG_I2C_DEBUG_ALGO is not set 748# CONFIG_I2C_DEBUG_ALGO is not set
749# CONFIG_I2C_DEBUG_BUS is not set 749# CONFIG_I2C_DEBUG_BUS is not set
@@ -826,6 +826,7 @@ CONFIG_FB_CFB_FILLRECT=y
826CONFIG_FB_CFB_COPYAREA=y 826CONFIG_FB_CFB_COPYAREA=y
827CONFIG_FB_CFB_IMAGEBLIT=y 827CONFIG_FB_CFB_IMAGEBLIT=y
828# CONFIG_FB_MACMODES is not set 828# CONFIG_FB_MACMODES is not set
829# CONFIG_FB_FIRMWARE_EDID is not set
829CONFIG_FB_MODE_HELPERS=y 830CONFIG_FB_MODE_HELPERS=y
830CONFIG_FB_TILEBLITTING=y 831CONFIG_FB_TILEBLITTING=y
831# CONFIG_FB_CIRRUS is not set 832# CONFIG_FB_CIRRUS is not set
@@ -1117,6 +1118,11 @@ CONFIG_USB_HIDDEV=y
1117# 1118#
1118 1119
1119# 1120#
1121# Real Time Clock
1122#
1123# CONFIG_RTC_CLASS is not set
1124
1125#
1120# Misc Linux/SPARC drivers 1126# Misc Linux/SPARC drivers
1121# 1127#
1122CONFIG_SUN_OPENPROMIO=m 1128CONFIG_SUN_OPENPROMIO=m
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 7dc28a484268..8175a6968c6b 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -830,9 +830,16 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
830 830
831static void tsb_sync(void *info) 831static void tsb_sync(void *info)
832{ 832{
833 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
833 struct mm_struct *mm = info; 834 struct mm_struct *mm = info;
834 835
835 if (current->active_mm == mm) 836 /* It is not valid to test "currrent->active_mm == mm" here.
837 *
838 * The value of "current" is not changed atomically with
839 * switch_mm(). But that's OK, we just need to check the
840 * current cpu's trap block PGD physical address.
841 */
842 if (tp->pgd_paddr == __pa(mm->pgd))
836 tsb_context_switch(mm); 843 tsb_context_switch(mm);
837} 844}
838 845
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index c4a1cef4b1e5..86dd5cb81e09 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -136,6 +136,8 @@ SIGN1(sys32_getpeername, sys_getpeername, %o0)
136SIGN1(sys32_getsockname, sys_getsockname, %o0) 136SIGN1(sys32_getsockname, sys_getsockname, %o0)
137SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) 137SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1)
138SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) 138SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2)
139SIGN2(sys32_splice, sys_splice, %o0, %o1)
140SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
139 141
140 .globl sys32_mmap2 142 .globl sys32_mmap2
141sys32_mmap2: 143sys32_mmap2:
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 2e906bad56fa..31030bf00f1a 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1069,3 +1069,11 @@ long sys32_lookup_dcookie(unsigned long cookie_high,
1069 return sys_lookup_dcookie((cookie_high << 32) | cookie_low, 1069 return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
1070 buf, len); 1070 buf, len);
1071} 1071}
1072
1073long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags)
1074{
1075 return sys_sync_file_range(fd,
1076 (off_high << 32) | off_low,
1077 (nb_high << 32) | nb_low,
1078 flags);
1079}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 3b250f2318fd..857b82c82875 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -66,12 +66,12 @@ sys_call_table32:
66 .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex 66 .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex
67/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid 67/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
68 .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 68 .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
69/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 69/*230*/ .word sys32_select, compat_sys_time, sys32_splice, compat_sys_stime, compat_sys_statfs64
70 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall 70 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
71/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler 71/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
72 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep 72 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
73/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl 73/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
74 .word sys_ni_syscall, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep 74 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
75/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun 75/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
76 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy 76 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
77/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink 77/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
@@ -135,12 +135,12 @@ sys_call_table:
135 .word sys_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex 135 .word sys_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex
136/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid 136/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
137 .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid 137 .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
138/*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64 138/*230*/ .word sys_select, sys_nis_syscall, sys_splice, sys_stime, sys_statfs64
139 .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall 139 .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
140/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler 140/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
141 .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep 141 .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
142/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl 142/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
143 .word sys_ni_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep 143 .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
144/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun 144/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
145 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy 145 .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
146/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink 146/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 0db2f7d9fab5..6e002aacb961 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -327,8 +327,12 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
327 insn = get_fault_insn(regs, 0); 327 insn = get_fault_insn(regs, 0);
328 if (!insn) 328 if (!insn)
329 goto continue_fault; 329 goto continue_fault;
330 /* All loads, stores and atomics have bits 30 and 31 both set
331 * in the instruction. Bit 21 is set in all stores, but we
332 * have to avoid prefetches which also have bit 21 set.
333 */
330 if ((insn & 0xc0200000) == 0xc0200000 && 334 if ((insn & 0xc0200000) == 0xc0200000 &&
331 (insn & 0x1780000) != 0x1680000) { 335 (insn & 0x01780000) != 0x01680000) {
332 /* Don't bother updating thread struct value, 336 /* Don't bother updating thread struct value,
333 * because update_mmu_cache only cares which tlb 337 * because update_mmu_cache only cares which tlb
334 * the access came from. 338 * the access came from.
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 074620d413d4..fbbbebbad8a4 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -198,6 +198,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
198 pmd_t *pmd; 198 pmd_t *pmd;
199 pte_t *pte = NULL; 199 pte_t *pte = NULL;
200 200
201 /* We must align the address, because our caller will run
202 * set_huge_pte_at() on whatever we return, which writes out
203 * all of the sub-ptes for the hugepage range. So we have
204 * to give it the first such sub-pte.
205 */
206 addr &= HPAGE_MASK;
207
201 pgd = pgd_offset(mm, addr); 208 pgd = pgd_offset(mm, addr);
202 pud = pud_alloc(mm, pgd, addr); 209 pud = pud_alloc(mm, pgd, addr);
203 if (pud) { 210 if (pud) {
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 5982fe2753e0..05fbb20636cb 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -22,6 +22,9 @@ config SBUS
22config PCI 22config PCI
23 bool 23 bool
24 24
25config PCMCIA
26 bool
27
25config GENERIC_CALIBRATE_DELAY 28config GENERIC_CALIBRATE_DELAY
26 bool 29 bool
27 default y 30 default y
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 8d14c7a831be..24790bed2054 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -20,7 +20,7 @@ core-y += $(ARCH_DIR)/kernel/ \
20 20
21# Have to precede the include because the included Makefiles reference them. 21# Have to precede the include because the included Makefiles reference them.
22SYMLINK_HEADERS := archparam.h system.h sigcontext.h processor.h ptrace.h \ 22SYMLINK_HEADERS := archparam.h system.h sigcontext.h processor.h ptrace.h \
23 module.h vm-flags.h elf.h ldt.h 23 module.h vm-flags.h elf.h host_ldt.h
24SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header)) 24SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header))
25 25
26# XXX: The "os" symlink is only used by arch/um/include/os.h, which includes 26# XXX: The "os" symlink is only used by arch/um/include/os.h, which includes
@@ -129,7 +129,7 @@ CPPFLAGS_vmlinux.lds = -U$(SUBARCH) \
129 -DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \ 129 -DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \
130 -DELF_FORMAT="$(ELF_FORMAT)" $(CPP_MODE-y) \ 130 -DELF_FORMAT="$(ELF_FORMAT)" $(CPP_MODE-y) \
131 -DKERNEL_STACK_SIZE=$(STACK_SIZE) \ 131 -DKERNEL_STACK_SIZE=$(STACK_SIZE) \
132 -DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap_fin.o 132 -DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap.o
133 133
134#The wrappers will select whether using "malloc" or the kernel allocator. 134#The wrappers will select whether using "malloc" or the kernel allocator.
135LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc 135LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
@@ -150,8 +150,7 @@ CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \
150 $(ARCH_DIR)/include/user_constants.h \ 150 $(ARCH_DIR)/include/user_constants.h \
151 $(ARCH_DIR)/include/kern_constants.h $(ARCH_DIR)/Kconfig.arch 151 $(ARCH_DIR)/include/kern_constants.h $(ARCH_DIR)/Kconfig.arch
152 152
153MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \ 153MRPROPER_FILES += $(ARCH_SYMLINKS)
154 $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os
155 154
156archclean: 155archclean:
157 @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ 156 @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64
index 38df311e75dc..dfd88b652fbe 100644
--- a/arch/um/Makefile-x86_64
+++ b/arch/um/Makefile-x86_64
@@ -1,7 +1,7 @@
1# Copyright 2003 - 2004 Pathscale, Inc 1# Copyright 2003 - 2004 Pathscale, Inc
2# Released under the GPL 2# Released under the GPL
3 3
4libs-y += arch/um/sys-x86_64/ 4core-y += arch/um/sys-x86_64/
5START := 0x60000000 5START := 0x60000000
6 6
7#We #undef __x86_64__ for kernelspace, not for userspace where 7#We #undef __x86_64__ for kernelspace, not for userspace where
diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c
index a61b7b46bc02..53d09ed78b42 100644
--- a/arch/um/drivers/daemon_kern.c
+++ b/arch/um/drivers/daemon_kern.c
@@ -95,18 +95,7 @@ static struct transport daemon_transport = {
95static int register_daemon(void) 95static int register_daemon(void)
96{ 96{
97 register_transport(&daemon_transport); 97 register_transport(&daemon_transport);
98 return(1); 98 return 0;
99} 99}
100 100
101__initcall(register_daemon); 101__initcall(register_daemon);
102
103/*
104 * Overrides for Emacs so that we follow Linus's tabbing style.
105 * Emacs will notice this stuff at the end of the file and automatically
106 * adjust the settings for this buffer only. This must remain at the end
107 * of the file.
108 * ---------------------------------------------------------------------------
109 * Local variables:
110 * c-file-style: "linux"
111 * End:
112 */
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 49acb2badf32..d18a974735e6 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -104,7 +104,7 @@ static int harddog_release(struct inode *inode, struct file *file)
104 104
105extern int ping_watchdog(int fd); 105extern int ping_watchdog(int fd);
106 106
107static ssize_t harddog_write(struct file *file, const char *data, size_t len, 107static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
108 loff_t *ppos) 108 loff_t *ppos)
109{ 109{
110 /* 110 /*
@@ -118,6 +118,7 @@ static ssize_t harddog_write(struct file *file, const char *data, size_t len,
118static int harddog_ioctl(struct inode *inode, struct file *file, 118static int harddog_ioctl(struct inode *inode, struct file *file,
119 unsigned int cmd, unsigned long arg) 119 unsigned int cmd, unsigned long arg)
120{ 120{
121 void __user *argp= (void __user *)arg;
121 static struct watchdog_info ident = { 122 static struct watchdog_info ident = {
122 WDIOC_SETTIMEOUT, 123 WDIOC_SETTIMEOUT,
123 0, 124 0,
@@ -127,13 +128,12 @@ static int harddog_ioctl(struct inode *inode, struct file *file,
127 default: 128 default:
128 return -ENOTTY; 129 return -ENOTTY;
129 case WDIOC_GETSUPPORT: 130 case WDIOC_GETSUPPORT:
130 if(copy_to_user((struct harddog_info *)arg, &ident, 131 if(copy_to_user(argp, &ident, sizeof(ident)))
131 sizeof(ident)))
132 return -EFAULT; 132 return -EFAULT;
133 return 0; 133 return 0;
134 case WDIOC_GETSTATUS: 134 case WDIOC_GETSTATUS:
135 case WDIOC_GETBOOTSTATUS: 135 case WDIOC_GETBOOTSTATUS:
136 return put_user(0,(int *)arg); 136 return put_user(0,(int __user *)argp);
137 case WDIOC_KEEPALIVE: 137 case WDIOC_KEEPALIVE:
138 return(ping_watchdog(harddog_out_fd)); 138 return(ping_watchdog(harddog_out_fd));
139 } 139 }
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 59602b81b240..37232f908cd7 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -67,8 +67,8 @@ MODULE_PARM_DESC(mixer, MIXER_HELP);
67 67
68/* /dev/dsp file operations */ 68/* /dev/dsp file operations */
69 69
70static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count, 70static ssize_t hostaudio_read(struct file *file, char __user *buffer,
71 loff_t *ppos) 71 size_t count, loff_t *ppos)
72{ 72{
73 struct hostaudio_state *state = file->private_data; 73 struct hostaudio_state *state = file->private_data;
74 void *kbuf; 74 void *kbuf;
@@ -94,7 +94,7 @@ static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count,
94 return(err); 94 return(err);
95} 95}
96 96
97static ssize_t hostaudio_write(struct file *file, const char *buffer, 97static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
98 size_t count, loff_t *ppos) 98 size_t count, loff_t *ppos)
99{ 99{
100 struct hostaudio_state *state = file->private_data; 100 struct hostaudio_state *state = file->private_data;
@@ -152,7 +152,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
152 case SNDCTL_DSP_CHANNELS: 152 case SNDCTL_DSP_CHANNELS:
153 case SNDCTL_DSP_SUBDIVIDE: 153 case SNDCTL_DSP_SUBDIVIDE:
154 case SNDCTL_DSP_SETFRAGMENT: 154 case SNDCTL_DSP_SETFRAGMENT:
155 if(get_user(data, (int *) arg)) 155 if(get_user(data, (int __user *) arg))
156 return(-EFAULT); 156 return(-EFAULT);
157 break; 157 break;
158 default: 158 default:
@@ -168,7 +168,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
168 case SNDCTL_DSP_CHANNELS: 168 case SNDCTL_DSP_CHANNELS:
169 case SNDCTL_DSP_SUBDIVIDE: 169 case SNDCTL_DSP_SUBDIVIDE:
170 case SNDCTL_DSP_SETFRAGMENT: 170 case SNDCTL_DSP_SETFRAGMENT:
171 if(put_user(data, (int *) arg)) 171 if(put_user(data, (int __user *) arg))
172 return(-EFAULT); 172 return(-EFAULT);
173 break; 173 break;
174 default: 174 default:
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
index c9b078fba03e..3a7af18cf944 100644
--- a/arch/um/drivers/mcast_kern.c
+++ b/arch/um/drivers/mcast_kern.c
@@ -124,18 +124,7 @@ static struct transport mcast_transport = {
124static int register_mcast(void) 124static int register_mcast(void)
125{ 125{
126 register_transport(&mcast_transport); 126 register_transport(&mcast_transport);
127 return(1); 127 return 0;
128} 128}
129 129
130__initcall(register_mcast); 130__initcall(register_mcast);
131
132/*
133 * Overrides for Emacs so that we follow Linus's tabbing style.
134 * Emacs will notice this stuff at the end of the file and automatically
135 * adjust the settings for this buffer only. This must remain at the end
136 * of the file.
137 * ---------------------------------------------------------------------------
138 * Local variables:
139 * c-file-style: "linux"
140 * End:
141 */
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 1488816588ea..28e3760e8b98 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -20,6 +20,8 @@
20#include "linux/namei.h" 20#include "linux/namei.h"
21#include "linux/proc_fs.h" 21#include "linux/proc_fs.h"
22#include "linux/syscalls.h" 22#include "linux/syscalls.h"
23#include "linux/list.h"
24#include "linux/mm.h"
23#include "linux/console.h" 25#include "linux/console.h"
24#include "asm/irq.h" 26#include "asm/irq.h"
25#include "asm/uaccess.h" 27#include "asm/uaccess.h"
@@ -347,6 +349,142 @@ static struct mc_device *mconsole_find_dev(char *name)
347 return(NULL); 349 return(NULL);
348} 350}
349 351
352#define UNPLUGGED_PER_PAGE \
353 ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(unsigned long))
354
355struct unplugged_pages {
356 struct list_head list;
357 void *pages[UNPLUGGED_PER_PAGE];
358};
359
360static unsigned long long unplugged_pages_count = 0;
361static struct list_head unplugged_pages = LIST_HEAD_INIT(unplugged_pages);
362static int unplug_index = UNPLUGGED_PER_PAGE;
363
364static int mem_config(char *str)
365{
366 unsigned long long diff;
367 int err = -EINVAL, i, add;
368 char *ret;
369
370 if(str[0] != '=')
371 goto out;
372
373 str++;
374 if(str[0] == '-')
375 add = 0;
376 else if(str[0] == '+'){
377 add = 1;
378 }
379 else goto out;
380
381 str++;
382 diff = memparse(str, &ret);
383 if(*ret != '\0')
384 goto out;
385
386 diff /= PAGE_SIZE;
387
388 for(i = 0; i < diff; i++){
389 struct unplugged_pages *unplugged;
390 void *addr;
391
392 if(add){
393 if(list_empty(&unplugged_pages))
394 break;
395
396 unplugged = list_entry(unplugged_pages.next,
397 struct unplugged_pages, list);
398 if(unplug_index > 0)
399 addr = unplugged->pages[--unplug_index];
400 else {
401 list_del(&unplugged->list);
402 addr = unplugged;
403 unplug_index = UNPLUGGED_PER_PAGE;
404 }
405
406 free_page((unsigned long) addr);
407 unplugged_pages_count--;
408 }
409 else {
410 struct page *page;
411
412 page = alloc_page(GFP_ATOMIC);
413 if(page == NULL)
414 break;
415
416 unplugged = page_address(page);
417 if(unplug_index == UNPLUGGED_PER_PAGE){
418 INIT_LIST_HEAD(&unplugged->list);
419 list_add(&unplugged->list, &unplugged_pages);
420 unplug_index = 0;
421 }
422 else {
423 struct list_head *entry = unplugged_pages.next;
424 addr = unplugged;
425
426 unplugged = list_entry(entry,
427 struct unplugged_pages,
428 list);
429 unplugged->pages[unplug_index++] = addr;
430 err = os_drop_memory(addr, PAGE_SIZE);
431 if(err)
432 printk("Failed to release memory - "
433 "errno = %d\n", err);
434 }
435
436 unplugged_pages_count++;
437 }
438 }
439
440 err = 0;
441out:
442 return err;
443}
444
445static int mem_get_config(char *name, char *str, int size, char **error_out)
446{
447 char buf[sizeof("18446744073709551615")];
448 int len = 0;
449
450 sprintf(buf, "%ld", uml_physmem);
451 CONFIG_CHUNK(str, size, len, buf, 1);
452
453 return len;
454}
455
456static int mem_id(char **str, int *start_out, int *end_out)
457{
458 *start_out = 0;
459 *end_out = 0;
460
461 return 0;
462}
463
464static int mem_remove(int n)
465{
466 return -EBUSY;
467}
468
469static struct mc_device mem_mc = {
470 .name = "mem",
471 .config = mem_config,
472 .get_config = mem_get_config,
473 .id = mem_id,
474 .remove = mem_remove,
475};
476
477static int mem_mc_init(void)
478{
479 if(can_drop_memory())
480 mconsole_register_dev(&mem_mc);
481 else printk("Can't release memory to the host - memory hotplug won't "
482 "be supported\n");
483 return 0;
484}
485
486__initcall(mem_mc_init);
487
350#define CONFIG_BUF_SIZE 64 488#define CONFIG_BUF_SIZE 64
351 489
352static void mconsole_get_config(int (*get_config)(char *, char *, int, 490static void mconsole_get_config(int (*get_config)(char *, char *, int,
@@ -478,7 +616,7 @@ static void console_write(struct console *console, const char *string,
478 return; 616 return;
479 617
480 while(1){ 618 while(1){
481 n = min(len, ARRAY_SIZE(console_buf) - console_index); 619 n = min((size_t)len, ARRAY_SIZE(console_buf) - console_index);
482 strncpy(&console_buf[console_index], string, n); 620 strncpy(&console_buf[console_index], string, n);
483 console_index += n; 621 console_index += n;
484 string += n; 622 string += n;
diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c
index 07c80f2156ef..466ff2c2f918 100644
--- a/arch/um/drivers/pcap_kern.c
+++ b/arch/um/drivers/pcap_kern.c
@@ -106,18 +106,7 @@ static struct transport pcap_transport = {
106static int register_pcap(void) 106static int register_pcap(void)
107{ 107{
108 register_transport(&pcap_transport); 108 register_transport(&pcap_transport);
109 return(1); 109 return 0;
110} 110}
111 111
112__initcall(register_pcap); 112__initcall(register_pcap);
113
114/*
115 * Overrides for Emacs so that we follow Linus's tabbing style.
116 * Emacs will notice this stuff at the end of the file and automatically
117 * adjust the settings for this buffer only. This must remain at the end
118 * of the file.
119 * ---------------------------------------------------------------------------
120 * Local variables:
121 * c-file-style: "linux"
122 * End:
123 */
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index a62f5ef445cf..163ee0d5f75e 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -93,18 +93,7 @@ static struct transport slip_transport = {
93static int register_slip(void) 93static int register_slip(void)
94{ 94{
95 register_transport(&slip_transport); 95 register_transport(&slip_transport);
96 return(1); 96 return 0;
97} 97}
98 98
99__initcall(register_slip); 99__initcall(register_slip);
100
101/*
102 * Overrides for Emacs so that we follow Linus's tabbing style.
103 * Emacs will notice this stuff at the end of the file and automatically
104 * adjust the settings for this buffer only. This must remain at the end
105 * of the file.
106 * ---------------------------------------------------------------------------
107 * Local variables:
108 * c-file-style: "linux"
109 * End:
110 */
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index 33d7982be5d3..95e50c943e14 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -77,7 +77,7 @@ static int slirp_setup(char *str, char **mac_out, void *data)
77 int i=0; 77 int i=0;
78 78
79 *init = ((struct slirp_init) 79 *init = ((struct slirp_init)
80 { argw : { { "slirp", NULL } } }); 80 { .argw = { { "slirp", NULL } } });
81 81
82 str = split_if_spec(str, mac_out, NULL); 82 str = split_if_spec(str, mac_out, NULL);
83 83
@@ -116,18 +116,7 @@ static struct transport slirp_transport = {
116static int register_slirp(void) 116static int register_slirp(void)
117{ 117{
118 register_transport(&slirp_transport); 118 register_transport(&slirp_transport);
119 return(1); 119 return 0;
120} 120}
121 121
122__initcall(register_slirp); 122__initcall(register_slirp);
123
124/*
125 * Overrides for Emacs so that we follow Linus's tabbing style.
126 * Emacs will notice this stuff at the end of the file and automatically
127 * adjust the settings for this buffer only. This must remain at the end
128 * of the file.
129 * ---------------------------------------------------------------------------
130 * Local variables:
131 * c-file-style: "linux"
132 * End:
133 */
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0336575d2448..0897852b09a3 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -891,7 +891,7 @@ int ubd_driver_init(void){
891 SA_INTERRUPT, "ubd", ubd_dev); 891 SA_INTERRUPT, "ubd", ubd_dev);
892 if(err != 0) 892 if(err != 0)
893 printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); 893 printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
894 return(err); 894 return 0;
895} 895}
896 896
897device_initcall(ubd_driver_init); 897device_initcall(ubd_driver_init);
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index 07176d92e1c9..42557130a408 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -116,7 +116,11 @@ extern void *get_current(void);
116extern struct task_struct *get_task(int pid, int require); 116extern struct task_struct *get_task(int pid, int require);
117extern void machine_halt(void); 117extern void machine_halt(void);
118extern int is_syscall(unsigned long addr); 118extern int is_syscall(unsigned long addr);
119extern void arch_switch(void); 119
120extern void arch_switch_to_tt(struct task_struct *from, struct task_struct *to);
121
122extern void arch_switch_to_skas(struct task_struct *from, struct task_struct *to);
123
120extern void free_irq(unsigned int, void *); 124extern void free_irq(unsigned int, void *);
121extern int cpu(void); 125extern int cpu(void);
122 126
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 6f4d680dc1d4..6ac0f8252e21 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -58,23 +58,17 @@ struct line {
58}; 58};
59 59
60#define LINE_INIT(str, d) \ 60#define LINE_INIT(str, d) \
61 { init_str : str, \ 61 { .init_str = str, \
62 init_pri : INIT_STATIC, \ 62 .init_pri = INIT_STATIC, \
63 valid : 1, \ 63 .valid = 1, \
64 throttled : 0, \ 64 .lock = SPIN_LOCK_UNLOCKED, \
65 lock : SPIN_LOCK_UNLOCKED, \ 65 .driver = d }
66 buffer : NULL, \
67 head : NULL, \
68 tail : NULL, \
69 sigio : 0, \
70 driver : d, \
71 have_irq : 0 }
72 66
73struct lines { 67struct lines {
74 int num; 68 int num;
75}; 69};
76 70
77#define LINES_INIT(n) { num : n } 71#define LINES_INIT(n) { .num = n }
78 72
79extern void line_close(struct tty_struct *tty, struct file * filp); 73extern void line_close(struct tty_struct *tty, struct file * filp);
80extern int line_open(struct line *lines, struct tty_struct *tty); 74extern int line_open(struct line *lines, struct tty_struct *tty);
diff --git a/arch/um/include/mem_user.h b/arch/um/include/mem_user.h
index a1064c5823bf..a54514d2cc3a 100644
--- a/arch/um/include/mem_user.h
+++ b/arch/um/include/mem_user.h
@@ -49,7 +49,6 @@ extern int iomem_size;
49extern unsigned long host_task_size; 49extern unsigned long host_task_size;
50extern unsigned long task_size; 50extern unsigned long task_size;
51 51
52extern void check_devanon(void);
53extern int init_mem_user(void); 52extern int init_mem_user(void);
54extern void setup_memory(void *entry); 53extern void setup_memory(void *entry);
55extern unsigned long find_iomem(char *driver, unsigned long *len_out); 54extern unsigned long find_iomem(char *driver, unsigned long *len_out);
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index d3d1bc6074ef..f88856c28a66 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -13,6 +13,7 @@
13#include "kern_util.h" 13#include "kern_util.h"
14#include "skas/mm_id.h" 14#include "skas/mm_id.h"
15#include "irq_user.h" 15#include "irq_user.h"
16#include "sysdep/tls.h"
16 17
17#define OS_TYPE_FILE 1 18#define OS_TYPE_FILE 1
18#define OS_TYPE_DIR 2 19#define OS_TYPE_DIR 2
@@ -172,6 +173,7 @@ extern int os_fchange_dir(int fd);
172extern void os_early_checks(void); 173extern void os_early_checks(void);
173extern int can_do_skas(void); 174extern int can_do_skas(void);
174extern void os_check_bugs(void); 175extern void os_check_bugs(void);
176extern void check_host_supports_tls(int *supports_tls, int *tls_min);
175 177
176/* Make sure they are clear when running in TT mode. Required by 178/* Make sure they are clear when running in TT mode. Required by
177 * SEGV_MAYBE_FIXABLE */ 179 * SEGV_MAYBE_FIXABLE */
@@ -205,6 +207,8 @@ extern int os_map_memory(void *virt, int fd, unsigned long long off,
205extern int os_protect_memory(void *addr, unsigned long len, 207extern int os_protect_memory(void *addr, unsigned long len,
206 int r, int w, int x); 208 int r, int w, int x);
207extern int os_unmap_memory(void *addr, int len); 209extern int os_unmap_memory(void *addr, int len);
210extern int os_drop_memory(void *addr, int length);
211extern int can_drop_memory(void);
208extern void os_flush_stdout(void); 212extern void os_flush_stdout(void);
209 213
210/* tt.c 214/* tt.c
@@ -234,8 +238,12 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
234 int stack_order); 238 int stack_order);
235extern int helper_wait(int pid); 239extern int helper_wait(int pid);
236 240
237/* umid.c */
238 241
242/* tls.c */
243extern int os_set_thread_area(user_desc_t *info, int pid);
244extern int os_get_thread_area(user_desc_t *info, int pid);
245
246/* umid.c */
239extern int umid_file_name(char *name, char *buf, int len); 247extern int umid_file_name(char *name, char *buf, int len);
240extern int set_umid(char *name); 248extern int set_umid(char *name);
241extern char *get_umid(void); 249extern char *get_umid(void);
diff --git a/arch/um/include/sysdep-i386/checksum.h b/arch/um/include/sysdep-i386/checksum.h
index 7d3d202d7fff..052bb061a978 100644
--- a/arch/um/include/sysdep-i386/checksum.h
+++ b/arch/um/include/sysdep-i386/checksum.h
@@ -48,7 +48,8 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
48 */ 48 */
49 49
50static __inline__ 50static __inline__
51unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, 51unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
52 unsigned char *dst,
52 int len, int sum, int *err_ptr) 53 int len, int sum, int *err_ptr)
53{ 54{
54 if(copy_from_user(dst, src, len)){ 55 if(copy_from_user(dst, src, len)){
@@ -192,7 +193,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
192 */ 193 */
193#define HAVE_CSUM_COPY_USER 194#define HAVE_CSUM_COPY_USER
194static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src, 195static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
195 unsigned char *dst, 196 unsigned char __user *dst,
196 int len, int sum, int *err_ptr) 197 int len, int sum, int *err_ptr)
197{ 198{
198 if (access_ok(VERIFY_WRITE, dst, len)){ 199 if (access_ok(VERIFY_WRITE, dst, len)){
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h
index c8ee9559f3ab..6670cc992ecb 100644
--- a/arch/um/include/sysdep-i386/ptrace.h
+++ b/arch/um/include/sysdep-i386/ptrace.h
@@ -14,7 +14,12 @@
14#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long)) 14#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
15#define MAX_REG_OFFSET (UM_FRAME_SIZE) 15#define MAX_REG_OFFSET (UM_FRAME_SIZE)
16 16
17#ifdef UML_CONFIG_PT_PROXY
17extern void update_debugregs(int seq); 18extern void update_debugregs(int seq);
19#else
20static inline void update_debugregs(int seq) {}
21#endif
22
18 23
19/* syscall emulation path in ptrace */ 24/* syscall emulation path in ptrace */
20 25
diff --git a/arch/um/include/sysdep-i386/tls.h b/arch/um/include/sysdep-i386/tls.h
new file mode 100644
index 000000000000..918fd3c5ff9c
--- /dev/null
+++ b/arch/um/include/sysdep-i386/tls.h
@@ -0,0 +1,32 @@
1#ifndef _SYSDEP_TLS_H
2#define _SYSDEP_TLS_H
3
4# ifndef __KERNEL__
5
6/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7 * may be named user_desc (but in 2.4 and in header matching its API was named
8 * modify_ldt_ldt_s). */
9
10typedef struct um_dup_user_desc {
11 unsigned int entry_number;
12 unsigned int base_addr;
13 unsigned int limit;
14 unsigned int seg_32bit:1;
15 unsigned int contents:2;
16 unsigned int read_exec_only:1;
17 unsigned int limit_in_pages:1;
18 unsigned int seg_not_present:1;
19 unsigned int useable:1;
20} user_desc_t;
21
22# else /* __KERNEL__ */
23
24# include <asm/ldt.h>
25typedef struct user_desc user_desc_t;
26
27# endif /* __KERNEL__ */
28
29#define GDT_ENTRY_TLS_MIN_I386 6
30#define GDT_ENTRY_TLS_MIN_X86_64 12
31
32#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/include/sysdep-x86_64/tls.h b/arch/um/include/sysdep-x86_64/tls.h
new file mode 100644
index 000000000000..35f19f25bd3b
--- /dev/null
+++ b/arch/um/include/sysdep-x86_64/tls.h
@@ -0,0 +1,29 @@
1#ifndef _SYSDEP_TLS_H
2#define _SYSDEP_TLS_H
3
4# ifndef __KERNEL__
5
6/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7 * may be named user_desc (but in 2.4 and in header matching its API was named
8 * modify_ldt_ldt_s). */
9
10typedef struct um_dup_user_desc {
11 unsigned int entry_number;
12 unsigned int base_addr;
13 unsigned int limit;
14 unsigned int seg_32bit:1;
15 unsigned int contents:2;
16 unsigned int read_exec_only:1;
17 unsigned int limit_in_pages:1;
18 unsigned int seg_not_present:1;
19 unsigned int useable:1;
20 unsigned int lm:1;
21} user_desc_t;
22
23# else /* __KERNEL__ */
24
25# include <asm/ldt.h>
26typedef struct user_desc user_desc_t;
27
28# endif /* __KERNEL__ */
29#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h
index 992a7e1e0fca..fe0c29b5144d 100644
--- a/arch/um/include/user_util.h
+++ b/arch/um/include/user_util.h
@@ -8,6 +8,9 @@
8 8
9#include "sysdep/ptrace.h" 9#include "sysdep/ptrace.h"
10 10
11/* Copied from kernel.h */
12#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
13
11#define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR)) 14#define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR))
12 15
13extern int mode_tt; 16extern int mode_tt;
@@ -31,7 +34,7 @@ extern unsigned long uml_physmem;
31extern unsigned long uml_reserved; 34extern unsigned long uml_reserved;
32extern unsigned long end_vm; 35extern unsigned long end_vm;
33extern unsigned long start_vm; 36extern unsigned long start_vm;
34extern unsigned long highmem; 37extern unsigned long long highmem;
35 38
36extern char host_info[]; 39extern char host_info[];
37 40
diff --git a/arch/um/kernel/exec_kern.c b/arch/um/kernel/exec_kern.c
index 1ca84319317d..c0cb627bf594 100644
--- a/arch/um/kernel/exec_kern.c
+++ b/arch/um/kernel/exec_kern.c
@@ -22,6 +22,7 @@
22 22
23void flush_thread(void) 23void flush_thread(void)
24{ 24{
25 arch_flush_thread(&current->thread.arch);
25 CHOOSE_MODE(flush_thread_tt(), flush_thread_skas()); 26 CHOOSE_MODE(flush_thread_tt(), flush_thread_skas());
26} 27}
27 28
@@ -58,14 +59,14 @@ long um_execve(char *file, char __user *__user *argv, char __user *__user *env)
58 return(err); 59 return(err);
59} 60}
60 61
61long sys_execve(char *file, char __user *__user *argv, 62long sys_execve(char __user *file, char __user *__user *argv,
62 char __user *__user *env) 63 char __user *__user *env)
63{ 64{
64 long error; 65 long error;
65 char *filename; 66 char *filename;
66 67
67 lock_kernel(); 68 lock_kernel();
68 filename = getname((char __user *) file); 69 filename = getname(file);
69 error = PTR_ERR(filename); 70 error = PTR_ERR(filename);
70 if (IS_ERR(filename)) goto out; 71 if (IS_ERR(filename)) goto out;
71 error = execve1(filename, argv, env); 72 error = execve1(filename, argv, env);
@@ -74,14 +75,3 @@ long sys_execve(char *file, char __user *__user *argv,
74 unlock_kernel(); 75 unlock_kernel();
75 return(error); 76 return(error);
76} 77}
77
78/*
79 * Overrides for Emacs so that we follow Linus's tabbing style.
80 * Emacs will notice this stuff at the end of the file and automatically
81 * adjust the settings for this buffer only. This must remain at the end
82 * of the file.
83 * ---------------------------------------------------------------------------
84 * Local variables:
85 * c-file-style: "linux"
86 * End:
87 */
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 92cce96b5e24..44e41a35f000 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -30,7 +30,7 @@ extern char __binary_start;
30unsigned long *empty_zero_page = NULL; 30unsigned long *empty_zero_page = NULL;
31unsigned long *empty_bad_page = NULL; 31unsigned long *empty_bad_page = NULL;
32pgd_t swapper_pg_dir[PTRS_PER_PGD]; 32pgd_t swapper_pg_dir[PTRS_PER_PGD];
33unsigned long highmem; 33unsigned long long highmem;
34int kmalloc_ok = 0; 34int kmalloc_ok = 0;
35 35
36static unsigned long brk_end; 36static unsigned long brk_end;
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 3113cab8675e..f6a5a502120b 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -156,9 +156,25 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
156 unsigned long stack_top, struct task_struct * p, 156 unsigned long stack_top, struct task_struct * p,
157 struct pt_regs *regs) 157 struct pt_regs *regs)
158{ 158{
159 int ret;
160
159 p->thread = (struct thread_struct) INIT_THREAD; 161 p->thread = (struct thread_struct) INIT_THREAD;
160 return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr, 162 ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
161 clone_flags, sp, stack_top, p, regs)); 163 clone_flags, sp, stack_top, p, regs);
164
165 if (ret || !current->thread.forking)
166 goto out;
167
168 clear_flushed_tls(p);
169
170 /*
171 * Set a new TLS for the child thread?
172 */
173 if (clone_flags & CLONE_SETTLS)
174 ret = arch_copy_tls(p);
175
176out:
177 return ret;
162} 178}
163 179
164void initial_thread_cb(void (*proc)(void *), void *arg) 180void initial_thread_cb(void (*proc)(void *), void *arg)
@@ -185,10 +201,6 @@ void default_idle(void)
185{ 201{
186 CHOOSE_MODE(uml_idle_timer(), (void) 0); 202 CHOOSE_MODE(uml_idle_timer(), (void) 0);
187 203
188 atomic_inc(&init_mm.mm_count);
189 current->mm = &init_mm;
190 current->active_mm = &init_mm;
191
192 while(1){ 204 while(1){
193 /* endless idle loop with no priority at all */ 205 /* endless idle loop with no priority at all */
194 206
@@ -407,7 +419,7 @@ static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int
407 return strlen(buf); 419 return strlen(buf);
408} 420}
409 421
410static int proc_write_sysemu(struct file *file,const char *buf, unsigned long count,void *data) 422static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
411{ 423{
412 char tmp[2]; 424 char tmp[2];
413 425
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 98e09395c093..60d2eda995c1 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -46,6 +46,7 @@ extern int poke_user(struct task_struct * child, long addr, long data);
46long arch_ptrace(struct task_struct *child, long request, long addr, long data) 46long arch_ptrace(struct task_struct *child, long request, long addr, long data)
47{ 47{
48 int i, ret; 48 int i, ret;
49 unsigned long __user *p = (void __user *)(unsigned long)data;
49 50
50 switch (request) { 51 switch (request) {
51 /* when I and D space are separate, these will need to be fixed. */ 52 /* when I and D space are separate, these will need to be fixed. */
@@ -58,7 +59,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
58 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 59 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
59 if (copied != sizeof(tmp)) 60 if (copied != sizeof(tmp))
60 break; 61 break;
61 ret = put_user(tmp, (unsigned long __user *) data); 62 ret = put_user(tmp, p);
62 break; 63 break;
63 } 64 }
64 65
@@ -136,15 +137,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
136 137
137#ifdef PTRACE_GETREGS 138#ifdef PTRACE_GETREGS
138 case PTRACE_GETREGS: { /* Get all gp regs from the child. */ 139 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
139 if (!access_ok(VERIFY_WRITE, (unsigned long *)data, 140 if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
140 MAX_REG_OFFSET)) {
141 ret = -EIO; 141 ret = -EIO;
142 break; 142 break;
143 } 143 }
144 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) { 144 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
145 __put_user(getreg(child, i), 145 __put_user(getreg(child, i), p);
146 (unsigned long __user *) data); 146 p++;
147 data += sizeof(long);
148 } 147 }
149 ret = 0; 148 ret = 0;
150 break; 149 break;
@@ -153,15 +152,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
153#ifdef PTRACE_SETREGS 152#ifdef PTRACE_SETREGS
154 case PTRACE_SETREGS: { /* Set all gp regs in the child. */ 153 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
155 unsigned long tmp = 0; 154 unsigned long tmp = 0;
156 if (!access_ok(VERIFY_READ, (unsigned *)data, 155 if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
157 MAX_REG_OFFSET)) {
158 ret = -EIO; 156 ret = -EIO;
159 break; 157 break;
160 } 158 }
161 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) { 159 for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
162 __get_user(tmp, (unsigned long __user *) data); 160 __get_user(tmp, p);
163 putreg(child, i, tmp); 161 putreg(child, i, tmp);
164 data += sizeof(long); 162 p++;
165 } 163 }
166 ret = 0; 164 ret = 0;
167 break; 165 break;
@@ -187,14 +185,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
187 ret = set_fpxregs(data, child); 185 ret = set_fpxregs(data, child);
188 break; 186 break;
189#endif 187#endif
188 case PTRACE_GET_THREAD_AREA:
189 ret = ptrace_get_thread_area(child, addr,
190 (struct user_desc __user *) data);
191 break;
192
193 case PTRACE_SET_THREAD_AREA:
194 ret = ptrace_set_thread_area(child, addr,
195 (struct user_desc __user *) data);
196 break;
197
190 case PTRACE_FAULTINFO: { 198 case PTRACE_FAULTINFO: {
191 /* Take the info from thread->arch->faultinfo, 199 /* Take the info from thread->arch->faultinfo,
192 * but transfer max. sizeof(struct ptrace_faultinfo). 200 * but transfer max. sizeof(struct ptrace_faultinfo).
193 * On i386, ptrace_faultinfo is smaller! 201 * On i386, ptrace_faultinfo is smaller!
194 */ 202 */
195 ret = copy_to_user((unsigned long __user *) data, 203 ret = copy_to_user(p, &child->thread.arch.faultinfo,
196 &child->thread.arch.faultinfo, 204 sizeof(struct ptrace_faultinfo));
197 sizeof(struct ptrace_faultinfo));
198 if(ret) 205 if(ret)
199 break; 206 break;
200 break; 207 break;
@@ -204,8 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
204 case PTRACE_LDT: { 211 case PTRACE_LDT: {
205 struct ptrace_ldt ldt; 212 struct ptrace_ldt ldt;
206 213
207 if(copy_from_user(&ldt, (unsigned long __user *) data, 214 if(copy_from_user(&ldt, p, sizeof(ldt))){
208 sizeof(ldt))){
209 ret = -EIO; 215 ret = -EIO;
210 break; 216 break;
211 } 217 }
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index 3f70a2e12f06..2135eaf98a93 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -35,6 +35,8 @@ void switch_to_skas(void *prev, void *next)
35 switch_threads(&from->thread.mode.skas.switch_buf, 35 switch_threads(&from->thread.mode.skas.switch_buf,
36 to->thread.mode.skas.switch_buf); 36 to->thread.mode.skas.switch_buf);
37 37
38 arch_switch_to_skas(current->thread.prev_sched, current);
39
38 if(current->pid == 0) 40 if(current->pid == 0)
39 switch_timers(1); 41 switch_timers(1);
40} 42}
@@ -89,10 +91,17 @@ void fork_handler(int sig)
89 panic("blech"); 91 panic("blech");
90 92
91 schedule_tail(current->thread.prev_sched); 93 schedule_tail(current->thread.prev_sched);
94
95 /* XXX: if interrupt_end() calls schedule, this call to
96 * arch_switch_to_skas isn't needed. We could want to apply this to
97 * improve performance. -bb */
98 arch_switch_to_skas(current->thread.prev_sched, current);
99
92 current->thread.prev_sched = NULL; 100 current->thread.prev_sched = NULL;
93 101
94/* Handle any immediate reschedules or signals */ 102/* Handle any immediate reschedules or signals */
95 interrupt_end(); 103 interrupt_end();
104
96 userspace(&current->thread.regs.regs); 105 userspace(&current->thread.regs.regs);
97} 106}
98 107
@@ -109,6 +118,8 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
109 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp; 118 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
110 119
111 handler = fork_handler; 120 handler = fork_handler;
121
122 arch_copy_thread(&current->thread.arch, &p->thread.arch);
112 } 123 }
113 else { 124 else {
114 init_thread_registers(&p->thread.regs.regs); 125 init_thread_registers(&p->thread.regs.regs);
diff --git a/arch/um/kernel/syscall_kern.c b/arch/um/kernel/syscall_kern.c
index 8e1a3501ff46..37d3978337d8 100644
--- a/arch/um/kernel/syscall_kern.c
+++ b/arch/um/kernel/syscall_kern.c
@@ -104,7 +104,7 @@ long sys_pipe(unsigned long __user * fildes)
104} 104}
105 105
106 106
107long sys_uname(struct old_utsname * name) 107long sys_uname(struct old_utsname __user * name)
108{ 108{
109 long err; 109 long err;
110 if (!name) 110 if (!name)
@@ -115,7 +115,7 @@ long sys_uname(struct old_utsname * name)
115 return err?-EFAULT:0; 115 return err?-EFAULT:0;
116} 116}
117 117
118long sys_olduname(struct oldold_utsname * name) 118long sys_olduname(struct oldold_utsname __user * name)
119{ 119{
120 long error; 120 long error;
121 121
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c
index d56046c2aba2..02f6d4d8dc3a 100644
--- a/arch/um/kernel/trap_kern.c
+++ b/arch/um/kernel/trap_kern.c
@@ -198,7 +198,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
198 si.si_signo = SIGBUS; 198 si.si_signo = SIGBUS;
199 si.si_errno = 0; 199 si.si_errno = 0;
200 si.si_code = BUS_ADRERR; 200 si.si_code = BUS_ADRERR;
201 si.si_addr = (void *)address; 201 si.si_addr = (void __user *)address;
202 current->thread.arch.faultinfo = fi; 202 current->thread.arch.faultinfo = fi;
203 force_sig_info(SIGBUS, &si, current); 203 force_sig_info(SIGBUS, &si, current);
204 } else if (err == -ENOMEM) { 204 } else if (err == -ENOMEM) {
@@ -207,7 +207,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
207 } else { 207 } else {
208 BUG_ON(err != -EFAULT); 208 BUG_ON(err != -EFAULT);
209 si.si_signo = SIGSEGV; 209 si.si_signo = SIGSEGV;
210 si.si_addr = (void *) address; 210 si.si_addr = (void __user *) address;
211 current->thread.arch.faultinfo = fi; 211 current->thread.arch.faultinfo = fi;
212 force_sig_info(SIGSEGV, &si, current); 212 force_sig_info(SIGSEGV, &si, current);
213 } 213 }
@@ -220,8 +220,8 @@ void bad_segv(struct faultinfo fi, unsigned long ip)
220 220
221 si.si_signo = SIGSEGV; 221 si.si_signo = SIGSEGV;
222 si.si_code = SEGV_ACCERR; 222 si.si_code = SEGV_ACCERR;
223 si.si_addr = (void *) FAULT_ADDRESS(fi); 223 si.si_addr = (void __user *) FAULT_ADDRESS(fi);
224 current->thread.arch.faultinfo = fi; 224 current->thread.arch.faultinfo = fi;
225 force_sig_info(SIGSEGV, &si, current); 225 force_sig_info(SIGSEGV, &si, current);
226} 226}
227 227
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
index 295c1ac817b3..a9c1443fc548 100644
--- a/arch/um/kernel/tt/process_kern.c
+++ b/arch/um/kernel/tt/process_kern.c
@@ -51,6 +51,13 @@ void switch_to_tt(void *prev, void *next)
51 51
52 c = 0; 52 c = 0;
53 53
54 /* Notice that here we "up" the semaphore on which "to" is waiting, and
55 * below (the read) we wait on this semaphore (which is implemented by
56 * switch_pipe) and go sleeping. Thus, after that, we have resumed in
57 * "to", and can't use any more the value of "from" (which is outdated),
58 * nor the value in "to" (since it was the task which stole us the CPU,
59 * which we don't care about). */
60
54 err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c)); 61 err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
55 if(err != sizeof(c)) 62 if(err != sizeof(c))
56 panic("write of switch_pipe failed, err = %d", -err); 63 panic("write of switch_pipe failed, err = %d", -err);
@@ -77,7 +84,7 @@ void switch_to_tt(void *prev, void *next)
77 change_sig(SIGALRM, alrm); 84 change_sig(SIGALRM, alrm);
78 change_sig(SIGPROF, prof); 85 change_sig(SIGPROF, prof);
79 86
80 arch_switch(); 87 arch_switch_to_tt(prev_sched, current);
81 88
82 flush_tlb_all(); 89 flush_tlb_all();
83 local_irq_restore(flags); 90 local_irq_restore(flags);
@@ -141,7 +148,6 @@ static void new_thread_handler(int sig)
141 set_cmdline("(kernel thread)"); 148 set_cmdline("(kernel thread)");
142 149
143 change_sig(SIGUSR1, 1); 150 change_sig(SIGUSR1, 1);
144 change_sig(SIGVTALRM, 1);
145 change_sig(SIGPROF, 1); 151 change_sig(SIGPROF, 1);
146 local_irq_enable(); 152 local_irq_enable();
147 if(!run_kernel_thread(fn, arg, &current->thread.exec_buf)) 153 if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index 1659386b42bb..f4bfc4c7ccac 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -4,7 +4,7 @@
4# 4#
5 5
6obj-y = aio.o elf_aux.o file.o helper.o irq.o main.o mem.o process.o sigio.o \ 6obj-y = aio.o elf_aux.o file.o helper.o irq.o main.o mem.o process.o sigio.o \
7 signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o \ 7 signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o tls.o \
8 user_syms.o util.o drivers/ sys-$(SUBARCH)/ 8 user_syms.o util.o drivers/ sys-$(SUBARCH)/
9 9
10obj-$(CONFIG_MODE_SKAS) += skas/ 10obj-$(CONFIG_MODE_SKAS) += skas/
@@ -12,12 +12,9 @@ obj-$(CONFIG_TTY_LOG) += tty_log.o
12user-objs-$(CONFIG_TTY_LOG) += tty_log.o 12user-objs-$(CONFIG_TTY_LOG) += tty_log.o
13 13
14USER_OBJS := $(user-objs-y) aio.o elf_aux.o file.o helper.o irq.o main.o mem.o \ 14USER_OBJS := $(user-objs-y) aio.o elf_aux.o file.o helper.o irq.o main.o mem.o \
15 process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o \ 15 process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o tls.o \
16 uaccess.o umid.o util.o 16 uaccess.o umid.o util.o
17 17
18elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
19CFLAGS_elf_aux.o += -I$(objtree)/arch/um
20
21CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH) 18CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH)
22 19
23HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \ 20HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \
diff --git a/arch/um/os-Linux/drivers/ethertap_kern.c b/arch/um/os-Linux/drivers/ethertap_kern.c
index 6ae4b19d9f50..768606bec233 100644
--- a/arch/um/os-Linux/drivers/ethertap_kern.c
+++ b/arch/um/os-Linux/drivers/ethertap_kern.c
@@ -102,18 +102,7 @@ static struct transport ethertap_transport = {
102static int register_ethertap(void) 102static int register_ethertap(void)
103{ 103{
104 register_transport(&ethertap_transport); 104 register_transport(&ethertap_transport);
105 return(1); 105 return 0;
106} 106}
107 107
108__initcall(register_ethertap); 108__initcall(register_ethertap);
109
110/*
111 * Overrides for Emacs so that we follow Linus's tabbing style.
112 * Emacs will notice this stuff at the end of the file and automatically
113 * adjust the settings for this buffer only. This must remain at the end
114 * of the file.
115 * ---------------------------------------------------------------------------
116 * Local variables:
117 * c-file-style: "linux"
118 * End:
119 */
diff --git a/arch/um/os-Linux/drivers/tuntap_kern.c b/arch/um/os-Linux/drivers/tuntap_kern.c
index 4202b9ebad4c..190009a6f89c 100644
--- a/arch/um/os-Linux/drivers/tuntap_kern.c
+++ b/arch/um/os-Linux/drivers/tuntap_kern.c
@@ -87,18 +87,7 @@ static struct transport tuntap_transport = {
87static int register_tuntap(void) 87static int register_tuntap(void)
88{ 88{
89 register_transport(&tuntap_transport); 89 register_transport(&tuntap_transport);
90 return(1); 90 return 0;
91} 91}
92 92
93__initcall(register_tuntap); 93__initcall(register_tuntap);
94
95/*
96 * Overrides for Emacs so that we follow Linus's tabbing style.
97 * Emacs will notice this stuff at the end of the file and automatically
98 * adjust the settings for this buffer only. This must remain at the end
99 * of the file.
100 * ---------------------------------------------------------------------------
101 * Local variables:
102 * c-file-style: "linux"
103 * End:
104 */
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 9d7d69a523bb..6ab372da9657 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -121,36 +121,11 @@ int create_tmp_file(unsigned long long len)
121 return(fd); 121 return(fd);
122} 122}
123 123
124static int create_anon_file(unsigned long long len)
125{
126 void *addr;
127 int fd;
128
129 fd = open("/dev/anon", O_RDWR);
130 if(fd < 0) {
131 perror("opening /dev/anon");
132 exit(1);
133 }
134
135 addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
136 if(addr == MAP_FAILED){
137 perror("mapping physmem file");
138 exit(1);
139 }
140 munmap(addr, len);
141
142 return(fd);
143}
144
145extern int have_devanon;
146
147int create_mem_file(unsigned long long len) 124int create_mem_file(unsigned long long len)
148{ 125{
149 int err, fd; 126 int err, fd;
150 127
151 if(have_devanon) 128 fd = create_tmp_file(len);
152 fd = create_anon_file(len);
153 else fd = create_tmp_file(len);
154 129
155 err = os_set_exec_close(fd, 1); 130 err = os_set_exec_close(fd, 1);
156 if(err < 0){ 131 if(err < 0){
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index d261888f39c4..8176b0b52047 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -11,6 +11,7 @@
11#include <linux/unistd.h> 11#include <linux/unistd.h>
12#include <sys/mman.h> 12#include <sys/mman.h>
13#include <sys/wait.h> 13#include <sys/wait.h>
14#include <sys/mman.h>
14#include "ptrace_user.h" 15#include "ptrace_user.h"
15#include "os.h" 16#include "os.h"
16#include "user.h" 17#include "user.h"
@@ -20,6 +21,7 @@
20#include "kern_util.h" 21#include "kern_util.h"
21#include "longjmp.h" 22#include "longjmp.h"
22#include "skas_ptrace.h" 23#include "skas_ptrace.h"
24#include "kern_constants.h"
23 25
24#define ARBITRARY_ADDR -1 26#define ARBITRARY_ADDR -1
25#define FAILURE_PID -1 27#define FAILURE_PID -1
@@ -187,6 +189,48 @@ int os_unmap_memory(void *addr, int len)
187 return(0); 189 return(0);
188} 190}
189 191
192#ifndef MADV_REMOVE
193#define MADV_REMOVE 0x5 /* remove these pages & resources */
194#endif
195
196int os_drop_memory(void *addr, int length)
197{
198 int err;
199
200 err = madvise(addr, length, MADV_REMOVE);
201 if(err < 0)
202 err = -errno;
203 return err;
204}
205
206int can_drop_memory(void)
207{
208 void *addr;
209 int fd;
210
211 printk("Checking host MADV_REMOVE support...");
212 fd = create_mem_file(UM_KERN_PAGE_SIZE);
213 if(fd < 0){
214 printk("Creating test memory file failed, err = %d\n", -fd);
215 return 0;
216 }
217
218 addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
219 MAP_PRIVATE, fd, 0);
220 if(addr == MAP_FAILED){
221 printk("Mapping test memory file failed, err = %d\n", -errno);
222 return 0;
223 }
224
225 if(madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0){
226 printk("MADV_REMOVE failed, err = %d\n", -errno);
227 return 0;
228 }
229
230 printk("OK\n");
231 return 1;
232}
233
190void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int)) 234void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int))
191{ 235{
192 int flags = 0, pages; 236 int flags = 0, pages;
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 32753131f8d8..387e26af301a 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -470,25 +470,6 @@ int can_do_skas(void)
470} 470}
471#endif 471#endif
472 472
473int have_devanon = 0;
474
475/* Runs on boot kernel stack - already safe to use printk. */
476
477void check_devanon(void)
478{
479 int fd;
480
481 printk("Checking for /dev/anon on the host...");
482 fd = open("/dev/anon", O_RDWR);
483 if(fd < 0){
484 printk("Not available (open failed with errno %d)\n", errno);
485 return;
486 }
487
488 printk("OK\n");
489 have_devanon = 1;
490}
491
492int __init parse_iomem(char *str, int *add) 473int __init parse_iomem(char *str, int *add)
493{ 474{
494 struct iomem_region *new; 475 struct iomem_region *new;
@@ -664,6 +645,5 @@ void os_check_bugs(void)
664{ 645{
665 check_ptrace(); 646 check_ptrace();
666 check_sigio(); 647 check_sigio();
667 check_devanon();
668} 648}
669 649
diff --git a/arch/um/os-Linux/sys-i386/Makefile b/arch/um/os-Linux/sys-i386/Makefile
index 340ef26f5944..b3213613c41c 100644
--- a/arch/um/os-Linux/sys-i386/Makefile
+++ b/arch/um/os-Linux/sys-i386/Makefile
@@ -3,7 +3,7 @@
3# Licensed under the GPL 3# Licensed under the GPL
4# 4#
5 5
6obj-$(CONFIG_MODE_SKAS) = registers.o 6obj-$(CONFIG_MODE_SKAS) = registers.o tls.o
7 7
8USER_OBJS := $(obj-y) 8USER_OBJS := $(obj-y)
9 9
diff --git a/arch/um/os-Linux/sys-i386/tls.c b/arch/um/os-Linux/sys-i386/tls.c
new file mode 100644
index 000000000000..ba21f0e04a2f
--- /dev/null
+++ b/arch/um/os-Linux/sys-i386/tls.c
@@ -0,0 +1,33 @@
1#include <linux/unistd.h>
2#include "sysdep/tls.h"
3#include "user_util.h"
4
5static _syscall1(int, get_thread_area, user_desc_t *, u_info);
6
7/* Checks whether host supports TLS, and sets *tls_min according to the value
8 * valid on the host.
9 * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */
10void check_host_supports_tls(int *supports_tls, int *tls_min) {
11 /* Values for x86 and x86_64.*/
12 int val[] = {GDT_ENTRY_TLS_MIN_I386, GDT_ENTRY_TLS_MIN_X86_64};
13 int i;
14
15 for (i = 0; i < ARRAY_SIZE(val); i++) {
16 user_desc_t info;
17 info.entry_number = val[i];
18
19 if (get_thread_area(&info) == 0) {
20 *tls_min = val[i];
21 *supports_tls = 1;
22 return;
23 } else {
24 if (errno == EINVAL)
25 continue;
26 else if (errno == ENOSYS)
27 *supports_tls = 0;
28 return;
29 }
30 }
31
32 *supports_tls = 0;
33}
diff --git a/arch/um/os-Linux/tls.c b/arch/um/os-Linux/tls.c
new file mode 100644
index 000000000000..9cb09a45546b
--- /dev/null
+++ b/arch/um/os-Linux/tls.c
@@ -0,0 +1,76 @@
1#include <errno.h>
2#include <sys/ptrace.h>
3#include <asm/ldt.h>
4#include "sysdep/tls.h"
5#include "uml-config.h"
6
7/* TLS support - we basically rely on the host's one.*/
8
9/* In TT mode, this should be called only by the tracing thread, and makes sense
10 * only for PTRACE_SET_THREAD_AREA. In SKAS mode, it's used normally.
11 *
12 */
13
14#ifndef PTRACE_GET_THREAD_AREA
15#define PTRACE_GET_THREAD_AREA 25
16#endif
17
18#ifndef PTRACE_SET_THREAD_AREA
19#define PTRACE_SET_THREAD_AREA 26
20#endif
21
22int os_set_thread_area(user_desc_t *info, int pid)
23{
24 int ret;
25
26 ret = ptrace(PTRACE_SET_THREAD_AREA, pid, info->entry_number,
27 (unsigned long) info);
28 if (ret < 0)
29 ret = -errno;
30 return ret;
31}
32
33#ifdef UML_CONFIG_MODE_SKAS
34
35int os_get_thread_area(user_desc_t *info, int pid)
36{
37 int ret;
38
39 ret = ptrace(PTRACE_GET_THREAD_AREA, pid, info->entry_number,
40 (unsigned long) info);
41 if (ret < 0)
42 ret = -errno;
43 return ret;
44}
45
46#endif
47
48#ifdef UML_CONFIG_MODE_TT
49#include "linux/unistd.h"
50
51static _syscall1(int, get_thread_area, user_desc_t *, u_info);
52static _syscall1(int, set_thread_area, user_desc_t *, u_info);
53
54int do_set_thread_area_tt(user_desc_t *info)
55{
56 int ret;
57
58 ret = set_thread_area(info);
59 if (ret < 0) {
60 ret = -errno;
61 }
62 return ret;
63}
64
65int do_get_thread_area_tt(user_desc_t *info)
66{
67 int ret;
68
69 ret = get_thread_area(info);
70 if (ret < 0) {
71 ret = -errno;
72 }
73 return ret;
74}
75
76#endif /* UML_CONFIG_MODE_TT */
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules
index 2e41cabd3d93..b696b451774c 100644
--- a/arch/um/scripts/Makefile.rules
+++ b/arch/um/scripts/Makefile.rules
@@ -20,25 +20,7 @@ define unprofile
20 $(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1))) 20 $(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1)))
21endef 21endef
22 22
23 23ifdef subarch-obj-y
24# cmd_make_link checks to see if the $(foo-dir) variable starts with a /. If 24obj-y += subarch.o
25# so, it's considered to be a path relative to $(srcdir) rather than 25subarch-y = $(addprefix ../../$(SUBARCH)/,$(subarch-obj-y))
26# $(srcdir)/arch/$(SUBARCH). This is because x86_64 wants to get ldt.c from 26endif
27# arch/um/sys-i386 rather than arch/i386 like the other borrowed files. So,
28# it sets $(ldt.c-dir) to /arch/um/sys-i386.
29quiet_cmd_make_link = SYMLINK $@
30cmd_make_link = rm -f $@; ln -sf $(srctree)$(if $(filter-out /%,$($(notdir $@)-dir)),/arch/$(SUBARCH))/$($(notdir $@)-dir)/$(notdir $@) $@
31
32# this needs to be before the foreach, because targets does not accept
33# complete paths like $(obj)/$(f). To make sure this works, use a := assignment
34# or we will get $(obj)/$(f) in the "targets" value.
35# Also, this forces you to use the := syntax when assigning to targets.
36# Otherwise the line below will cause an infinite loop (if you don't know why,
37# just do it).
38
39targets := $(targets) $(SYMLINKS)
40
41SYMLINKS := $(foreach f,$(SYMLINKS),$(obj)/$(f))
42
43$(SYMLINKS): FORCE
44 $(call if_changed,make_link)
diff --git a/arch/um/scripts/Makefile.unmap b/arch/um/scripts/Makefile.unmap
deleted file mode 100644
index b2165188d942..000000000000
--- a/arch/um/scripts/Makefile.unmap
+++ /dev/null
@@ -1,22 +0,0 @@
1clean-files += unmap_tmp.o unmap_fin.o unmap.o
2
3ifdef CONFIG_MODE_TT
4
5#Always build unmap_fin.o
6extra-y += unmap_fin.o
7#Do dependency tracking for unmap.o (it will be always built, but won't get the tracking unless we use this).
8targets += unmap.o
9
10#XXX: partially copied from arch/um/scripts/Makefile.rules
11$(obj)/unmap.o: _c_flags = $(call unprofile,$(CFLAGS))
12
13quiet_cmd_wrapld = LD $@
14define cmd_wrapld
15 $(LD) $(LDFLAGS) -r -o $(obj)/unmap_tmp.o $< ; \
16 $(OBJCOPY) $(UML_OBJCOPYFLAGS) $(obj)/unmap_tmp.o $@ -G switcheroo
17endef
18
19$(obj)/unmap_fin.o : $(obj)/unmap.o FORCE
20 $(call if_changed,wrapld)
21
22endif
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index f5fd5b0156d0..98b20b7bba4f 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -1,23 +1,18 @@
1obj-y := bitops.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ 1obj-y = bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
2 ptrace_user.o semaphore.o signal.o sigcontext.o syscalls.o sysrq.o \ 2 ptrace_user.o signal.o sigcontext.o syscalls.o sysrq.o \
3 sys_call_table.o 3 sys_call_table.o tls.o
4 4
5obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o 5obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
6 6
7obj-$(CONFIG_HIGHMEM) += highmem.o 7subarch-obj-y = lib/bitops.o kernel/semaphore.o
8obj-$(CONFIG_MODULES) += module.o 8subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o
9subarch-obj-$(CONFIG_MODULES) += kernel/module.o
9 10
10USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o stub_segv.o 11USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o stub_segv.o
11 12
12SYMLINKS = bitops.c semaphore.c highmem.c module.c
13
14include arch/um/scripts/Makefile.rules 13include arch/um/scripts/Makefile.rules
15 14
16bitops.c-dir = lib 15extra-$(CONFIG_MODE_TT) += unmap.o
17semaphore.c-dir = kernel
18highmem.c-dir = mm
19module.c-dir = kernel
20
21$(obj)/stub_segv.o : _c_flags = $(call unprofile,$(CFLAGS))
22 16
23include arch/um/scripts/Makefile.unmap 17$(obj)/stub_segv.o $(obj)/unmap.o: \
18 _c_flags = $(call unprofile,$(CFLAGS))
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index 8032a105949a..6028bc7cc01b 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -15,9 +15,22 @@
15#include "sysdep/sigcontext.h" 15#include "sysdep/sigcontext.h"
16#include "sysdep/sc.h" 16#include "sysdep/sc.h"
17 17
18void arch_switch(void) 18void arch_switch_to_tt(struct task_struct *from, struct task_struct *to)
19{ 19{
20 update_debugregs(current->thread.arch.debugregs_seq); 20 update_debugregs(to->thread.arch.debugregs_seq);
21 arch_switch_tls_tt(from, to);
22}
23
24void arch_switch_to_skas(struct task_struct *from, struct task_struct *to)
25{
26 int err = arch_switch_tls_skas(from, to);
27 if (!err)
28 return;
29
30 if (err != -EINVAL)
31 printk(KERN_WARNING "arch_switch_tls_skas failed, errno %d, not EINVAL\n", -err);
32 else
33 printk(KERN_WARNING "arch_switch_tls_skas failed, errno = EINVAL\n");
21} 34}
22 35
23int is_syscall(unsigned long addr) 36int is_syscall(unsigned long addr)
@@ -124,22 +137,22 @@ unsigned long getreg(struct task_struct *child, int regno)
124int peek_user(struct task_struct *child, long addr, long data) 137int peek_user(struct task_struct *child, long addr, long data)
125{ 138{
126/* read the word at location addr in the USER area. */ 139/* read the word at location addr in the USER area. */
127 unsigned long tmp; 140 unsigned long tmp;
128 141
129 if ((addr & 3) || addr < 0) 142 if ((addr & 3) || addr < 0)
130 return -EIO; 143 return -EIO;
131 144
132 tmp = 0; /* Default return condition */ 145 tmp = 0; /* Default return condition */
133 if(addr < MAX_REG_OFFSET){ 146 if(addr < MAX_REG_OFFSET){
134 tmp = getreg(child, addr); 147 tmp = getreg(child, addr);
135 } 148 }
136 else if((addr >= offsetof(struct user, u_debugreg[0])) && 149 else if((addr >= offsetof(struct user, u_debugreg[0])) &&
137 (addr <= offsetof(struct user, u_debugreg[7]))){ 150 (addr <= offsetof(struct user, u_debugreg[7]))){
138 addr -= offsetof(struct user, u_debugreg[0]); 151 addr -= offsetof(struct user, u_debugreg[0]);
139 addr = addr >> 2; 152 addr = addr >> 2;
140 tmp = child->thread.arch.debugregs[addr]; 153 tmp = child->thread.arch.debugregs[addr];
141 } 154 }
142 return put_user(tmp, (unsigned long *) data); 155 return put_user(tmp, (unsigned long __user *) data);
143} 156}
144 157
145struct i387_fxsave_struct { 158struct i387_fxsave_struct {
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
index 7c376c95de50..9f3bd8ed78f5 100644
--- a/arch/um/sys-i386/ptrace_user.c
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -14,6 +14,7 @@
14#include "sysdep/thread.h" 14#include "sysdep/thread.h"
15#include "user.h" 15#include "user.h"
16#include "os.h" 16#include "os.h"
17#include "uml-config.h"
17 18
18int ptrace_getregs(long pid, unsigned long *regs_out) 19int ptrace_getregs(long pid, unsigned long *regs_out)
19{ 20{
@@ -43,6 +44,7 @@ int ptrace_setfpregs(long pid, unsigned long *regs)
43 return 0; 44 return 0;
44} 45}
45 46
47/* All the below stuff is of interest for TT mode only */
46static void write_debugregs(int pid, unsigned long *regs) 48static void write_debugregs(int pid, unsigned long *regs)
47{ 49{
48 struct user *dummy; 50 struct user *dummy;
@@ -75,7 +77,6 @@ static void read_debugregs(int pid, unsigned long *regs)
75 77
76/* Accessed only by the tracing thread */ 78/* Accessed only by the tracing thread */
77static unsigned long kernel_debugregs[8] = { [ 0 ... 7 ] = 0 }; 79static unsigned long kernel_debugregs[8] = { [ 0 ... 7 ] = 0 };
78static int debugregs_seq = 0;
79 80
80void arch_enter_kernel(void *task, int pid) 81void arch_enter_kernel(void *task, int pid)
81{ 82{
@@ -89,6 +90,11 @@ void arch_leave_kernel(void *task, int pid)
89 write_debugregs(pid, TASK_DEBUGREGS(task)); 90 write_debugregs(pid, TASK_DEBUGREGS(task));
90} 91}
91 92
93#ifdef UML_CONFIG_PT_PROXY
94/* Accessed only by the tracing thread */
95static int debugregs_seq;
96
97/* Only called by the ptrace proxy */
92void ptrace_pokeuser(unsigned long addr, unsigned long data) 98void ptrace_pokeuser(unsigned long addr, unsigned long data)
93{ 99{
94 if((addr < offsetof(struct user, u_debugreg[0])) || 100 if((addr < offsetof(struct user, u_debugreg[0])) ||
@@ -109,6 +115,7 @@ static void update_debugregs_cb(void *arg)
109 write_debugregs(pid, kernel_debugregs); 115 write_debugregs(pid, kernel_debugregs);
110} 116}
111 117
118/* Optimized out in its header when not defined */
112void update_debugregs(int seq) 119void update_debugregs(int seq)
113{ 120{
114 int me; 121 int me;
@@ -118,6 +125,7 @@ void update_debugregs(int seq)
118 me = os_getpid(); 125 me = os_getpid();
119 initial_thread_cb(update_debugregs_cb, &me); 126 initial_thread_cb(update_debugregs_cb, &me);
120} 127}
128#endif
121 129
122/* 130/*
123 * Overrides for Emacs so that we follow Linus's tabbing style. 131 * Overrides for Emacs so that we follow Linus's tabbing style.
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
index 33a40f5ef0d2..f5d0e1c37ea2 100644
--- a/arch/um/sys-i386/signal.c
+++ b/arch/um/sys-i386/signal.c
@@ -19,7 +19,7 @@
19#include "skas.h" 19#include "skas.h"
20 20
21static int copy_sc_from_user_skas(struct pt_regs *regs, 21static int copy_sc_from_user_skas(struct pt_regs *regs,
22 struct sigcontext *from) 22 struct sigcontext __user *from)
23{ 23{
24 struct sigcontext sc; 24 struct sigcontext sc;
25 unsigned long fpregs[HOST_FP_SIZE]; 25 unsigned long fpregs[HOST_FP_SIZE];
@@ -57,7 +57,7 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
57 return(0); 57 return(0);
58} 58}
59 59
60int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, 60int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate __user *to_fp,
61 struct pt_regs *regs, unsigned long sp) 61 struct pt_regs *regs, unsigned long sp)
62{ 62{
63 struct sigcontext sc; 63 struct sigcontext sc;
@@ -92,7 +92,7 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
92 "errno = %d\n", err); 92 "errno = %d\n", err);
93 return(1); 93 return(1);
94 } 94 }
95 to_fp = (to_fp ? to_fp : (struct _fpstate *) (to + 1)); 95 to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
96 sc.fpstate = to_fp; 96 sc.fpstate = to_fp;
97 97
98 if(err) 98 if(err)
@@ -113,10 +113,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
113 * saved pointer is in the kernel, but the sigcontext is in userspace, so we 113 * saved pointer is in the kernel, but the sigcontext is in userspace, so we
114 * copy_to_user it. 114 * copy_to_user it.
115 */ 115 */
116int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, 116int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from,
117 int fpsize) 117 int fpsize)
118{ 118{
119 struct _fpstate *to_fp, *from_fp; 119 struct _fpstate *to_fp;
120 struct _fpstate __user *from_fp;
120 unsigned long sigs; 121 unsigned long sigs;
121 int err; 122 int err;
122 123
@@ -131,13 +132,14 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
131 return(err); 132 return(err);
132} 133}
133 134
134int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, 135int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate __user *fp,
135 struct sigcontext *from, int fpsize, unsigned long sp) 136 struct sigcontext *from, int fpsize, unsigned long sp)
136{ 137{
137 struct _fpstate *to_fp, *from_fp; 138 struct _fpstate __user *to_fp;
139 struct _fpstate *from_fp;
138 int err; 140 int err;
139 141
140 to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); 142 to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1));
141 from_fp = from->fpstate; 143 from_fp = from->fpstate;
142 err = copy_to_user(to, from, sizeof(*to)); 144 err = copy_to_user(to, from, sizeof(*to));
143 145
@@ -165,7 +167,7 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
165 return(ret); 167 return(ret);
166} 168}
167 169
168static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, 170static int copy_sc_to_user(struct sigcontext *to, struct _fpstate __user *fp,
169 struct pt_regs *from, unsigned long sp) 171 struct pt_regs *from, unsigned long sp)
170{ 172{
171 return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs), 173 return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs),
@@ -173,7 +175,7 @@ static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
173 copy_sc_to_user_skas(to, fp, from, sp))); 175 copy_sc_to_user_skas(to, fp, from, sp)));
174} 176}
175 177
176static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp, 178static int copy_ucontext_to_user(struct ucontext __user *uc, struct _fpstate __user *fp,
177 sigset_t *set, unsigned long sp) 179 sigset_t *set, unsigned long sp)
178{ 180{
179 int err = 0; 181 int err = 0;
@@ -188,7 +190,7 @@ static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp,
188 190
189struct sigframe 191struct sigframe
190{ 192{
191 char *pretcode; 193 char __user *pretcode;
192 int sig; 194 int sig;
193 struct sigcontext sc; 195 struct sigcontext sc;
194 struct _fpstate fpstate; 196 struct _fpstate fpstate;
@@ -198,10 +200,10 @@ struct sigframe
198 200
199struct rt_sigframe 201struct rt_sigframe
200{ 202{
201 char *pretcode; 203 char __user *pretcode;
202 int sig; 204 int sig;
203 struct siginfo *pinfo; 205 struct siginfo __user *pinfo;
204 void *puc; 206 void __user *puc;
205 struct siginfo info; 207 struct siginfo info;
206 struct ucontext uc; 208 struct ucontext uc;
207 struct _fpstate fpstate; 209 struct _fpstate fpstate;
@@ -213,16 +215,16 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
213 sigset_t *mask) 215 sigset_t *mask)
214{ 216{
215 struct sigframe __user *frame; 217 struct sigframe __user *frame;
216 void *restorer; 218 void __user *restorer;
217 unsigned long save_sp = PT_REGS_SP(regs); 219 unsigned long save_sp = PT_REGS_SP(regs);
218 int err = 0; 220 int err = 0;
219 221
220 stack_top &= -8UL; 222 stack_top &= -8UL;
221 frame = (struct sigframe *) stack_top - 1; 223 frame = (struct sigframe __user *) stack_top - 1;
222 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 224 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
223 return 1; 225 return 1;
224 226
225 restorer = (void *) frame->retcode; 227 restorer = frame->retcode;
226 if(ka->sa.sa_flags & SA_RESTORER) 228 if(ka->sa.sa_flags & SA_RESTORER)
227 restorer = ka->sa.sa_restorer; 229 restorer = ka->sa.sa_restorer;
228 230
@@ -278,16 +280,16 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
278 siginfo_t *info, sigset_t *mask) 280 siginfo_t *info, sigset_t *mask)
279{ 281{
280 struct rt_sigframe __user *frame; 282 struct rt_sigframe __user *frame;
281 void *restorer; 283 void __user *restorer;
282 unsigned long save_sp = PT_REGS_SP(regs); 284 unsigned long save_sp = PT_REGS_SP(regs);
283 int err = 0; 285 int err = 0;
284 286
285 stack_top &= -8UL; 287 stack_top &= -8UL;
286 frame = (struct rt_sigframe *) stack_top - 1; 288 frame = (struct rt_sigframe __user *) stack_top - 1;
287 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 289 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
288 return 1; 290 return 1;
289 291
290 restorer = (void *) frame->retcode; 292 restorer = frame->retcode;
291 if(ka->sa.sa_flags & SA_RESTORER) 293 if(ka->sa.sa_flags & SA_RESTORER)
292 restorer = ka->sa.sa_restorer; 294 restorer = ka->sa.sa_restorer;
293 295
@@ -333,7 +335,7 @@ err:
333long sys_sigreturn(struct pt_regs regs) 335long sys_sigreturn(struct pt_regs regs)
334{ 336{
335 unsigned long sp = PT_REGS_SP(&current->thread.regs); 337 unsigned long sp = PT_REGS_SP(&current->thread.regs);
336 struct sigframe __user *frame = (struct sigframe *)(sp - 8); 338 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
337 sigset_t set; 339 sigset_t set;
338 struct sigcontext __user *sc = &frame->sc; 340 struct sigcontext __user *sc = &frame->sc;
339 unsigned long __user *oldmask = &sc->oldmask; 341 unsigned long __user *oldmask = &sc->oldmask;
@@ -365,8 +367,8 @@ long sys_sigreturn(struct pt_regs regs)
365 367
366long sys_rt_sigreturn(struct pt_regs regs) 368long sys_rt_sigreturn(struct pt_regs regs)
367{ 369{
368 unsigned long __user sp = PT_REGS_SP(&current->thread.regs); 370 unsigned long sp = PT_REGS_SP(&current->thread.regs);
369 struct rt_sigframe __user *frame = (struct rt_sigframe *) (sp - 4); 371 struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (sp - 4);
370 sigset_t set; 372 sigset_t set;
371 struct ucontext __user *uc = &frame->uc; 373 struct ucontext __user *uc = &frame->uc;
372 int sig_size = _NSIG_WORDS * sizeof(unsigned long); 374 int sig_size = _NSIG_WORDS * sizeof(unsigned long);
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S
index ad75c27afe38..1ff61474b25c 100644
--- a/arch/um/sys-i386/sys_call_table.S
+++ b/arch/um/sys-i386/sys_call_table.S
@@ -6,8 +6,6 @@
6 6
7#define sys_vm86old sys_ni_syscall 7#define sys_vm86old sys_ni_syscall
8#define sys_vm86 sys_ni_syscall 8#define sys_vm86 sys_ni_syscall
9#define sys_set_thread_area sys_ni_syscall
10#define sys_get_thread_area sys_ni_syscall
11 9
12#define sys_stime um_stime 10#define sys_stime um_stime
13#define sys_time um_time 11#define sys_time um_time
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
index 83e9be820a86..749dd1bfe60f 100644
--- a/arch/um/sys-i386/syscalls.c
+++ b/arch/um/sys-i386/syscalls.c
@@ -61,21 +61,27 @@ long old_select(struct sel_arg_struct __user *arg)
61 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 61 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
62} 62}
63 63
64/* The i386 version skips reading from %esi, the fourth argument. So we must do 64/*
65 * this, too. 65 * The prototype on i386 is:
66 *
67 * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
68 *
69 * and the "newtls" arg. on i386 is read by copy_thread directly from the
70 * register saved on the stack.
66 */ 71 */
67long sys_clone(unsigned long clone_flags, unsigned long newsp, 72long sys_clone(unsigned long clone_flags, unsigned long newsp,
68 int __user *parent_tid, int unused, int __user *child_tid) 73 int __user *parent_tid, void *newtls, int __user *child_tid)
69{ 74{
70 long ret; 75 long ret;
71 76
72 if (!newsp) 77 if (!newsp)
73 newsp = UPT_SP(&current->thread.regs.regs); 78 newsp = UPT_SP(&current->thread.regs.regs);
79
74 current->thread.forking = 1; 80 current->thread.forking = 1;
75 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid, 81 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
76 child_tid); 82 child_tid);
77 current->thread.forking = 0; 83 current->thread.forking = 0;
78 return(ret); 84 return ret;
79} 85}
80 86
81/* 87/*
@@ -104,7 +110,7 @@ long sys_ipc (uint call, int first, int second,
104 union semun fourth; 110 union semun fourth;
105 if (!ptr) 111 if (!ptr)
106 return -EINVAL; 112 return -EINVAL;
107 if (get_user(fourth.__pad, (void **) ptr)) 113 if (get_user(fourth.__pad, (void __user * __user *) ptr))
108 return -EFAULT; 114 return -EFAULT;
109 return sys_semctl (first, second, third, fourth); 115 return sys_semctl (first, second, third, fourth);
110 } 116 }
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
new file mode 100644
index 000000000000..a3188e861cc7
--- /dev/null
+++ b/arch/um/sys-i386/tls.c
@@ -0,0 +1,384 @@
1/*
2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
4 */
5
6#include "linux/config.h"
7#include "linux/kernel.h"
8#include "linux/sched.h"
9#include "linux/slab.h"
10#include "linux/types.h"
11#include "asm/uaccess.h"
12#include "asm/ptrace.h"
13#include "asm/segment.h"
14#include "asm/smp.h"
15#include "asm/desc.h"
16#include "choose-mode.h"
17#include "kern.h"
18#include "kern_util.h"
19#include "mode_kern.h"
20#include "os.h"
21#include "mode.h"
22
23#ifdef CONFIG_MODE_SKAS
24#include "skas.h"
25#endif
26
27/* If needed we can detect when it's uninitialized. */
28static int host_supports_tls = -1;
29int host_gdt_entry_tls_min = -1;
30
31#ifdef CONFIG_MODE_SKAS
32int do_set_thread_area_skas(struct user_desc *info)
33{
34 int ret;
35 u32 cpu;
36
37 cpu = get_cpu();
38 ret = os_set_thread_area(info, userspace_pid[cpu]);
39 put_cpu();
40 return ret;
41}
42
43int do_get_thread_area_skas(struct user_desc *info)
44{
45 int ret;
46 u32 cpu;
47
48 cpu = get_cpu();
49 ret = os_get_thread_area(info, userspace_pid[cpu]);
50 put_cpu();
51 return ret;
52}
53#endif
54
55/*
56 * sys_get_thread_area: get a yet unused TLS descriptor index.
57 * XXX: Consider leaving one free slot for glibc usage at first place. This must
58 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
59 *
60 * Also, this must be tested when compiling in SKAS mode with dinamic linking
61 * and running against NPTL.
62 */
63static int get_free_idx(struct task_struct* task)
64{
65 struct thread_struct *t = &task->thread;
66 int idx;
67
68 if (!t->arch.tls_array)
69 return GDT_ENTRY_TLS_MIN;
70
71 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
72 if (!t->arch.tls_array[idx].present)
73 return idx + GDT_ENTRY_TLS_MIN;
74 return -ESRCH;
75}
76
77static inline void clear_user_desc(struct user_desc* info)
78{
79 /* Postcondition: LDT_empty(info) returns true. */
80 memset(info, 0, sizeof(*info));
81
82 /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
83 * indeed an empty user_desc.
84 */
85 info->read_exec_only = 1;
86 info->seg_not_present = 1;
87}
88
89#define O_FORCE 1
90
91static int load_TLS(int flags, struct task_struct *to)
92{
93 int ret = 0;
94 int idx;
95
96 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
97 struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
98
99 /* Actually, now if it wasn't flushed it gets cleared and
100 * flushed to the host, which will clear it.*/
101 if (!curr->present) {
102 if (!curr->flushed) {
103 clear_user_desc(&curr->tls);
104 curr->tls.entry_number = idx;
105 } else {
106 WARN_ON(!LDT_empty(&curr->tls));
107 continue;
108 }
109 }
110
111 if (!(flags & O_FORCE) && curr->flushed)
112 continue;
113
114 ret = do_set_thread_area(&curr->tls);
115 if (ret)
116 goto out;
117
118 curr->flushed = 1;
119 }
120out:
121 return ret;
122}
123
124/* Verify if we need to do a flush for the new process, i.e. if there are any
125 * present desc's, only if they haven't been flushed.
126 */
127static inline int needs_TLS_update(struct task_struct *task)
128{
129 int i;
130 int ret = 0;
131
132 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
133 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
134
135 /* Can't test curr->present, we may need to clear a descriptor
136 * which had a value. */
137 if (curr->flushed)
138 continue;
139 ret = 1;
140 break;
141 }
142 return ret;
143}
144
145/* On a newly forked process, the TLS descriptors haven't yet been flushed. So
146 * we mark them as such and the first switch_to will do the job.
147 */
148void clear_flushed_tls(struct task_struct *task)
149{
150 int i;
151
152 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
153 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
154
155 /* Still correct to do this, if it wasn't present on the host it
156 * will remain as flushed as it was. */
157 if (!curr->present)
158 continue;
159
160 curr->flushed = 0;
161 }
162}
163
164/* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
165 * common host process. So this is needed in SKAS0 too.
166 *
167 * However, if each thread had a different host process (and this was discussed
168 * for SMP support) this won't be needed.
169 *
170 * And this will not need be used when (and if) we'll add support to the host
171 * SKAS patch. */
172
173int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
174{
175 if (!host_supports_tls)
176 return 0;
177
178 /* We have no need whatsoever to switch TLS for kernel threads; beyond
179 * that, that would also result in us calling os_set_thread_area with
180 * userspace_pid[cpu] == 0, which gives an error. */
181 if (likely(to->mm))
182 return load_TLS(O_FORCE, to);
183
184 return 0;
185}
186
187int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
188{
189 if (!host_supports_tls)
190 return 0;
191
192 if (needs_TLS_update(to))
193 return load_TLS(0, to);
194
195 return 0;
196}
197
198static int set_tls_entry(struct task_struct* task, struct user_desc *info,
199 int idx, int flushed)
200{
201 struct thread_struct *t = &task->thread;
202
203 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
204 return -EINVAL;
205
206 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
207 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
208 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
209
210 return 0;
211}
212
213int arch_copy_tls(struct task_struct *new)
214{
215 struct user_desc info;
216 int idx, ret = -EFAULT;
217
218 if (copy_from_user(&info,
219 (void __user *) UPT_ESI(&new->thread.regs.regs),
220 sizeof(info)))
221 goto out;
222
223 ret = -EINVAL;
224 if (LDT_empty(&info))
225 goto out;
226
227 idx = info.entry_number;
228
229 ret = set_tls_entry(new, &info, idx, 0);
230out:
231 return ret;
232}
233
234/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
235static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
236{
237 struct thread_struct *t = &task->thread;
238
239 if (!t->arch.tls_array)
240 goto clear;
241
242 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
243 return -EINVAL;
244
245 if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
246 goto clear;
247
248 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
249
250out:
251 /* Temporary debugging check, to make sure that things have been
252 * flushed. This could be triggered if load_TLS() failed.
253 */
254 if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
255 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
256 "without flushed TLS.", current->pid);
257 }
258
259 return 0;
260clear:
261 /* When the TLS entry has not been set, the values read to user in the
262 * tls_array are 0 (because it's cleared at boot, see
263 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
264 */
265 clear_user_desc(info);
266 info->entry_number = idx;
267 goto out;
268}
269
270asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
271{
272 struct user_desc info;
273 int idx, ret;
274
275 if (!host_supports_tls)
276 return -ENOSYS;
277
278 if (copy_from_user(&info, user_desc, sizeof(info)))
279 return -EFAULT;
280
281 idx = info.entry_number;
282
283 if (idx == -1) {
284 idx = get_free_idx(current);
285 if (idx < 0)
286 return idx;
287 info.entry_number = idx;
288 /* Tell the user which slot we chose for him.*/
289 if (put_user(idx, &user_desc->entry_number))
290 return -EFAULT;
291 }
292
293 ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
294 if (ret)
295 return ret;
296 return set_tls_entry(current, &info, idx, 1);
297}
298
299/*
300 * Perform set_thread_area on behalf of the traced child.
301 * Note: error handling is not done on the deferred load, and this differ from
302 * i386. However the only possible error are caused by bugs.
303 */
304int ptrace_set_thread_area(struct task_struct *child, int idx,
305 struct user_desc __user *user_desc)
306{
307 struct user_desc info;
308
309 if (!host_supports_tls)
310 return -EIO;
311
312 if (copy_from_user(&info, user_desc, sizeof(info)))
313 return -EFAULT;
314
315 return set_tls_entry(child, &info, idx, 0);
316}
317
318asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
319{
320 struct user_desc info;
321 int idx, ret;
322
323 if (!host_supports_tls)
324 return -ENOSYS;
325
326 if (get_user(idx, &user_desc->entry_number))
327 return -EFAULT;
328
329 ret = get_tls_entry(current, &info, idx);
330 if (ret < 0)
331 goto out;
332
333 if (copy_to_user(user_desc, &info, sizeof(info)))
334 ret = -EFAULT;
335
336out:
337 return ret;
338}
339
340/*
341 * Perform get_thread_area on behalf of the traced child.
342 */
343int ptrace_get_thread_area(struct task_struct *child, int idx,
344 struct user_desc __user *user_desc)
345{
346 struct user_desc info;
347 int ret;
348
349 if (!host_supports_tls)
350 return -EIO;
351
352 ret = get_tls_entry(child, &info, idx);
353 if (ret < 0)
354 goto out;
355
356 if (copy_to_user(user_desc, &info, sizeof(info)))
357 ret = -EFAULT;
358out:
359 return ret;
360}
361
362
363/* XXX: This part is probably common to i386 and x86-64. Don't create a common
364 * file for now, do that when implementing x86-64 support.*/
365static int __init __setup_host_supports_tls(void) {
366 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
367 if (host_supports_tls) {
368 printk(KERN_INFO "Host TLS support detected\n");
369 printk(KERN_INFO "Detected host type: ");
370 switch (host_gdt_entry_tls_min) {
371 case GDT_ENTRY_TLS_MIN_I386:
372 printk("i386\n");
373 break;
374 case GDT_ENTRY_TLS_MIN_X86_64:
375 printk("x86_64\n");
376 break;
377 }
378 } else
379 printk(KERN_ERR " Host TLS support NOT detected! "
380 "TLS support inside UML will not work\n");
381 return 1;
382}
383
384__initcall(__setup_host_supports_tls);
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index a351091fbd99..b5fc22babddf 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -4,31 +4,23 @@
4# Licensed under the GPL 4# Licensed under the GPL
5# 5#
6 6
7#XXX: why into lib-y? 7obj-y = bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
8lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o ldt.o mem.o memcpy.o \ 8 sigcontext.o signal.o syscalls.o syscall_table.o sysrq.o ksyms.o \
9 ptrace.o ptrace_user.o sigcontext.o signal.o syscalls.o \ 9 tls.o
10 syscall_table.o sysrq.o thunk.o
11lib-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
12 10
13obj-y := ksyms.o 11obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
14obj-$(CONFIG_MODULES) += module.o um_module.o 12obj-$(CONFIG_MODULES) += um_module.o
15 13
16USER_OBJS := ptrace_user.o sigcontext.o stub_segv.o 14subarch-obj-y = lib/bitops.o lib/csum-partial.o lib/memcpy.o lib/thunk.o
15subarch-obj-$(CONFIG_MODULES) += kernel/module.o
17 16
18SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c ldt.c memcpy.S \ 17ldt-y = ../sys-i386/ldt.o
19 thunk.S module.c
20 18
21include arch/um/scripts/Makefile.rules 19USER_OBJS := ptrace_user.o sigcontext.o stub_segv.o
22 20
23bitops.c-dir = lib 21include arch/um/scripts/Makefile.rules
24csum-copy.S-dir = lib
25csum-partial.c-dir = lib
26csum-wrappers.c-dir = lib
27ldt.c-dir = /arch/um/sys-i386
28memcpy.S-dir = lib
29thunk.S-dir = lib
30module.c-dir = kernel
31 22
32$(obj)/stub_segv.o: _c_flags = $(call unprofile,$(CFLAGS)) 23extra-$(CONFIG_MODE_TT) += unmap.o
33 24
34include arch/um/scripts/Makefile.unmap 25$(obj)/stub_segv.o $(obj)/unmap.o: \
26 _c_flags = $(call unprofile,$(CFLAGS))
diff --git a/arch/um/sys-x86_64/tls.c b/arch/um/sys-x86_64/tls.c
new file mode 100644
index 000000000000..ce1bf1b81c43
--- /dev/null
+++ b/arch/um/sys-x86_64/tls.c
@@ -0,0 +1,14 @@
1#include "linux/sched.h"
2
3void debug_arch_force_load_TLS(void)
4{
5}
6
7void clear_flushed_tls(struct task_struct *task)
8{
9}
10
11int arch_copy_tls(struct task_struct *t)
12{
13 return 0;
14}
diff --git a/arch/x86_64/ia32/vsyscall-sigreturn.S b/arch/x86_64/ia32/vsyscall-sigreturn.S
index d90321fe9bba..1384367cdbe1 100644
--- a/arch/x86_64/ia32/vsyscall-sigreturn.S
+++ b/arch/x86_64/ia32/vsyscall-sigreturn.S
@@ -32,9 +32,28 @@ __kernel_rt_sigreturn:
32 .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn 32 .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
33 33
34 .section .eh_frame,"a",@progbits 34 .section .eh_frame,"a",@progbits
35.LSTARTFRAMES:
36 .long .LENDCIES-.LSTARTCIES
37.LSTARTCIES:
38 .long 0 /* CIE ID */
39 .byte 1 /* Version number */
40 .string "zRS" /* NUL-terminated augmentation string */
41 .uleb128 1 /* Code alignment factor */
42 .sleb128 -4 /* Data alignment factor */
43 .byte 8 /* Return address register column */
44 .uleb128 1 /* Augmentation value length */
45 .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
46 .byte 0x0c /* DW_CFA_def_cfa */
47 .uleb128 4
48 .uleb128 4
49 .byte 0x88 /* DW_CFA_offset, column 0x8 */
50 .uleb128 1
51 .align 4
52.LENDCIES:
53
35 .long .LENDFDE2-.LSTARTFDE2 /* Length FDE */ 54 .long .LENDFDE2-.LSTARTFDE2 /* Length FDE */
36.LSTARTFDE2: 55.LSTARTFDE2:
37 .long .LSTARTFDE2-.LSTARTFRAME /* CIE pointer */ 56 .long .LSTARTFDE2-.LSTARTFRAMES /* CIE pointer */
38 /* HACK: The dwarf2 unwind routines will subtract 1 from the 57 /* HACK: The dwarf2 unwind routines will subtract 1 from the
39 return address to get an address in the middle of the 58 return address to get an address in the middle of the
40 presumed call instruction. Since we didn't get here via 59 presumed call instruction. Since we didn't get here via
@@ -97,7 +116,7 @@ __kernel_rt_sigreturn:
97 116
98 .long .LENDFDE3-.LSTARTFDE3 /* Length FDE */ 117 .long .LENDFDE3-.LSTARTFDE3 /* Length FDE */
99.LSTARTFDE3: 118.LSTARTFDE3:
100 .long .LSTARTFDE3-.LSTARTFRAME /* CIE pointer */ 119 .long .LSTARTFDE3-.LSTARTFRAMES /* CIE pointer */
101 /* HACK: See above wrt unwind library assumptions. */ 120 /* HACK: See above wrt unwind library assumptions. */
102 .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */ 121 .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */
103 .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1 122 .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index d54620147e8e..100a30c40044 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -615,7 +615,7 @@ static int __init apic_set_verbosity(char *str)
615 printk(KERN_WARNING "APIC Verbosity level %s not recognised" 615 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
616 " use apic=verbose or apic=debug", str); 616 " use apic=verbose or apic=debug", str);
617 617
618 return 0; 618 return 1;
619} 619}
620 620
621__setup("apic=", apic_set_verbosity); 621__setup("apic=", apic_set_verbosity);
@@ -1137,35 +1137,35 @@ int __init APIC_init_uniprocessor (void)
1137static __init int setup_disableapic(char *str) 1137static __init int setup_disableapic(char *str)
1138{ 1138{
1139 disable_apic = 1; 1139 disable_apic = 1;
1140 return 0; 1140 return 1;
1141} 1141}
1142 1142
1143static __init int setup_nolapic(char *str) 1143static __init int setup_nolapic(char *str)
1144{ 1144{
1145 disable_apic = 1; 1145 disable_apic = 1;
1146 return 0; 1146 return 1;
1147} 1147}
1148 1148
1149static __init int setup_noapictimer(char *str) 1149static __init int setup_noapictimer(char *str)
1150{ 1150{
1151 if (str[0] != ' ' && str[0] != 0) 1151 if (str[0] != ' ' && str[0] != 0)
1152 return -1; 1152 return 0;
1153 disable_apic_timer = 1; 1153 disable_apic_timer = 1;
1154 return 0; 1154 return 1;
1155} 1155}
1156 1156
1157static __init int setup_apicmaintimer(char *str) 1157static __init int setup_apicmaintimer(char *str)
1158{ 1158{
1159 apic_runs_main_timer = 1; 1159 apic_runs_main_timer = 1;
1160 nohpet = 1; 1160 nohpet = 1;
1161 return 0; 1161 return 1;
1162} 1162}
1163__setup("apicmaintimer", setup_apicmaintimer); 1163__setup("apicmaintimer", setup_apicmaintimer);
1164 1164
1165static __init int setup_noapicmaintimer(char *str) 1165static __init int setup_noapicmaintimer(char *str)
1166{ 1166{
1167 apic_runs_main_timer = -1; 1167 apic_runs_main_timer = -1;
1168 return 0; 1168 return 1;
1169} 1169}
1170__setup("noapicmaintimer", setup_noapicmaintimer); 1170__setup("noapicmaintimer", setup_noapicmaintimer);
1171 1171
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 13af920b6594..b93ef5b51980 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -221,7 +221,7 @@ int __init setup_early_printk(char *opt)
221 char buf[256]; 221 char buf[256];
222 222
223 if (early_console_initialized) 223 if (early_console_initialized)
224 return -1; 224 return 1;
225 225
226 strlcpy(buf,opt,sizeof(buf)); 226 strlcpy(buf,opt,sizeof(buf));
227 space = strchr(buf, ' '); 227 space = strchr(buf, ' ');
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 04282ef9fbd4..10b3e348fc99 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -501,7 +501,7 @@ static struct miscdevice mce_log_device = {
501static int __init mcheck_disable(char *str) 501static int __init mcheck_disable(char *str)
502{ 502{
503 mce_dont_init = 1; 503 mce_dont_init = 1;
504 return 0; 504 return 1;
505} 505}
506 506
507/* mce=off disables machine check. Note you can reenable it later 507/* mce=off disables machine check. Note you can reenable it later
@@ -521,7 +521,7 @@ static int __init mcheck_enable(char *str)
521 get_option(&str, &tolerant); 521 get_option(&str, &tolerant);
522 else 522 else
523 printk("mce= argument %s ignored. Please use /sys", str); 523 printk("mce= argument %s ignored. Please use /sys", str);
524 return 0; 524 return 1;
525} 525}
526 526
527__setup("nomce", mcheck_disable); 527__setup("nomce", mcheck_disable);
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index ee5ee4891f3d..b0444a415bd6 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -121,7 +121,7 @@ unsigned int do_gettimeoffset_pm(void)
121static int __init nopmtimer_setup(char *s) 121static int __init nopmtimer_setup(char *s)
122{ 122{
123 pmtmr_ioport = 0; 123 pmtmr_ioport = 0;
124 return 0; 124 return 1;
125} 125}
126 126
127__setup("nopmtimer", nopmtimer_setup); 127__setup("nopmtimer", nopmtimer_setup);
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index d1f3e9272c05..0856ad444f90 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -540,7 +540,7 @@ void __init alternative_instructions(void)
540static int __init noreplacement_setup(char *s) 540static int __init noreplacement_setup(char *s)
541{ 541{
542 no_replacement = 1; 542 no_replacement = 1;
543 return 0; 543 return 1;
544} 544}
545 545
546__setup("noreplacement", noreplacement_setup); 546__setup("noreplacement", noreplacement_setup);
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index eabdb63fec31..8a691fa6d393 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -55,7 +55,7 @@ int __init nonx_setup(char *str)
55 do_not_nx = 1; 55 do_not_nx = 1;
56 __supported_pte_mask &= ~_PAGE_NX; 56 __supported_pte_mask &= ~_PAGE_NX;
57 } 57 }
58 return 0; 58 return 1;
59} 59}
60__setup("noexec=", nonx_setup); /* parsed early actually */ 60__setup("noexec=", nonx_setup); /* parsed early actually */
61 61
@@ -74,7 +74,7 @@ static int __init nonx32_setup(char *str)
74 force_personality32 &= ~READ_IMPLIES_EXEC; 74 force_personality32 &= ~READ_IMPLIES_EXEC;
75 else if (!strcmp(str, "off")) 75 else if (!strcmp(str, "off"))
76 force_personality32 |= READ_IMPLIES_EXEC; 76 force_personality32 |= READ_IMPLIES_EXEC;
77 return 0; 77 return 1;
78} 78}
79__setup("noexec32=", nonx32_setup); 79__setup("noexec32=", nonx32_setup);
80 80
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index ea48fa638070..71a7222cf9ce 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -353,7 +353,7 @@ static void __cpuinit tsc_sync_wait(void)
353static __init int notscsync_setup(char *s) 353static __init int notscsync_setup(char *s)
354{ 354{
355 notscsync = 1; 355 notscsync = 1;
356 return 0; 356 return 1;
357} 357}
358__setup("notscsync", notscsync_setup); 358__setup("notscsync", notscsync_setup);
359 359
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 473b514b66e4..ef8bc46dc140 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -1306,7 +1306,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1306static int __init nohpet_setup(char *s) 1306static int __init nohpet_setup(char *s)
1307{ 1307{
1308 nohpet = 1; 1308 nohpet = 1;
1309 return 0; 1309 return 1;
1310} 1310}
1311 1311
1312__setup("nohpet", nohpet_setup); 1312__setup("nohpet", nohpet_setup);
@@ -1314,7 +1314,7 @@ __setup("nohpet", nohpet_setup);
1314int __init notsc_setup(char *s) 1314int __init notsc_setup(char *s)
1315{ 1315{
1316 notsc = 1; 1316 notsc = 1;
1317 return 0; 1317 return 1;
1318} 1318}
1319 1319
1320__setup("notsc", notsc_setup); 1320__setup("notsc", notsc_setup);
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index edaa9fe654dc..6bda322d3caf 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -973,14 +973,14 @@ void __init trap_init(void)
973static int __init oops_dummy(char *s) 973static int __init oops_dummy(char *s)
974{ 974{
975 panic_on_oops = 1; 975 panic_on_oops = 1;
976 return -1; 976 return 1;
977} 977}
978__setup("oops=", oops_dummy); 978__setup("oops=", oops_dummy);
979 979
980static int __init kstack_setup(char *s) 980static int __init kstack_setup(char *s)
981{ 981{
982 kstack_depth_to_print = simple_strtoul(s,NULL,0); 982 kstack_depth_to_print = simple_strtoul(s,NULL,0);
983 return 0; 983 return 1;
984} 984}
985__setup("kstack=", kstack_setup); 985__setup("kstack=", kstack_setup);
986 986
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index d96a9348e5a2..d78f46056bda 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -102,8 +102,6 @@ EXPORT_SYMBOL(cpu_callout_map);
102EXPORT_SYMBOL(screen_info); 102EXPORT_SYMBOL(screen_info);
103#endif 103#endif
104 104
105EXPORT_SYMBOL(get_wchan);
106
107EXPORT_SYMBOL(rtc_lock); 105EXPORT_SYMBOL(rtc_lock);
108 106
109EXPORT_SYMBOL_GPL(set_nmi_callback); 107EXPORT_SYMBOL_GPL(set_nmi_callback);
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 316c53de47bd..55250593d8c9 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -623,6 +623,6 @@ void vmalloc_sync_all(void)
623static int __init enable_pagefaulttrace(char *str) 623static int __init enable_pagefaulttrace(char *str)
624{ 624{
625 page_fault_trace = 1; 625 page_fault_trace = 1;
626 return 0; 626 return 1;
627} 627}
628__setup("pagefaulttrace", enable_pagefaulttrace); 628__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index efae56a51475..152b9370789b 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -113,8 +113,6 @@ EXPORT_SYMBOL(__xtensa_copy_user);
113// FIXME EXPORT_SYMBOL(screen_info); 113// FIXME EXPORT_SYMBOL(screen_info);
114#endif 114#endif
115 115
116EXPORT_SYMBOL(get_wchan);
117
118EXPORT_SYMBOL(outsb); 116EXPORT_SYMBOL(outsb);
119EXPORT_SYMBOL(outsw); 117EXPORT_SYMBOL(outsw);
120EXPORT_SYMBOL(outsl); 118EXPORT_SYMBOL(outsl);
diff --git a/block/Kconfig b/block/Kconfig
index 5536839886ff..b6f5f0a79655 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -27,10 +27,10 @@ config BLK_DEV_IO_TRACE
27config LSF 27config LSF
28 bool "Support for Large Single Files" 28 bool "Support for Large Single Files"
29 depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML 29 depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
30 default n
31 help 30 help
32 When CONFIG_LBD is disabled, say Y here if you want to 31 Say Y here if you want to be able to handle very large files (bigger
33 handle large file(bigger than 2TB), otherwise say N. 32 than 2TB), otherwise say N.
34 When CONFIG_LBD is enabled, Y is set automatically. 33
34 If unsure, say Y.
35 35
36source block/Kconfig.iosched 36source block/Kconfig.iosched
diff --git a/block/elevator.c b/block/elevator.c
index 56c2ed06a9e2..0d6be03d929e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -145,7 +145,7 @@ static int __init elevator_setup(char *str)
145 strcpy(chosen_elevator, "anticipatory"); 145 strcpy(chosen_elevator, "anticipatory");
146 else 146 else
147 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); 147 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
148 return 0; 148 return 1;
149} 149}
150 150
151__setup("elevator=", elevator_setup); 151__setup("elevator=", elevator_setup);
diff --git a/block/genhd.c b/block/genhd.c
index db4c60c802d6..5a8d3bf02f17 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,8 +17,6 @@
17#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19 19
20#define MAX_PROBE_HASH 255 /* random */
21
22static struct subsystem block_subsys; 20static struct subsystem block_subsys;
23 21
24static DEFINE_MUTEX(block_subsys_lock); 22static DEFINE_MUTEX(block_subsys_lock);
@@ -31,108 +29,29 @@ static struct blk_major_name {
31 struct blk_major_name *next; 29 struct blk_major_name *next;
32 int major; 30 int major;
33 char name[16]; 31 char name[16];
34} *major_names[MAX_PROBE_HASH]; 32} *major_names[BLKDEV_MAJOR_HASH_SIZE];
35 33
36/* index in the above - for now: assume no multimajor ranges */ 34/* index in the above - for now: assume no multimajor ranges */
37static inline int major_to_index(int major) 35static inline int major_to_index(int major)
38{ 36{
39 return major % MAX_PROBE_HASH; 37 return major % BLKDEV_MAJOR_HASH_SIZE;
40}
41
42struct blkdev_info {
43 int index;
44 struct blk_major_name *bd;
45};
46
47/*
48 * iterate over a list of blkdev_info structures. allows
49 * the major_names array to be iterated over from outside this file
50 * must be called with the block_subsys_lock held
51 */
52void *get_next_blkdev(void *dev)
53{
54 struct blkdev_info *info;
55
56 if (dev == NULL) {
57 info = kmalloc(sizeof(*info), GFP_KERNEL);
58 if (!info)
59 goto out;
60 info->index=0;
61 info->bd = major_names[info->index];
62 if (info->bd)
63 goto out;
64 } else {
65 info = dev;
66 }
67
68 while (info->index < ARRAY_SIZE(major_names)) {
69 if (info->bd)
70 info->bd = info->bd->next;
71 if (info->bd)
72 goto out;
73 /*
74 * No devices on this chain, move to the next
75 */
76 info->index++;
77 info->bd = (info->index < ARRAY_SIZE(major_names)) ?
78 major_names[info->index] : NULL;
79 if (info->bd)
80 goto out;
81 }
82
83out:
84 return info;
85}
86
87void *acquire_blkdev_list(void)
88{
89 mutex_lock(&block_subsys_lock);
90 return get_next_blkdev(NULL);
91}
92
93void release_blkdev_list(void *dev)
94{
95 mutex_unlock(&block_subsys_lock);
96 kfree(dev);
97} 38}
98 39
40#ifdef CONFIG_PROC_FS
99 41
100/* 42void blkdev_show(struct seq_file *f, off_t offset)
101 * Count the number of records in the blkdev_list.
102 * must be called with the block_subsys_lock held
103 */
104int count_blkdev_list(void)
105{ 43{
106 struct blk_major_name *n; 44 struct blk_major_name *dp;
107 int i, count;
108 45
109 count = 0; 46 if (offset < BLKDEV_MAJOR_HASH_SIZE) {
110 47 mutex_lock(&block_subsys_lock);
111 for (i = 0; i < ARRAY_SIZE(major_names); i++) { 48 for (dp = major_names[offset]; dp; dp = dp->next)
112 for (n = major_names[i]; n; n = n->next) 49 seq_printf(f, "%3d %s\n", dp->major, dp->name);
113 count++; 50 mutex_unlock(&block_subsys_lock);
114 } 51 }
115
116 return count;
117}
118
119/*
120 * extract the major and name values from a blkdev_info struct
121 * passed in as a void to *dev. Must be called with
122 * block_subsys_lock held
123 */
124int get_blkdev_info(void *dev, int *major, char **name)
125{
126 struct blkdev_info *info = dev;
127
128 if (info->bd == NULL)
129 return 1;
130
131 *major = info->bd->major;
132 *name = info->bd->name;
133 return 0;
134} 52}
135 53
54#endif /* CONFIG_PROC_FS */
136 55
137int register_blkdev(unsigned int major, const char *name) 56int register_blkdev(unsigned int major, const char *name)
138{ 57{
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 5b26af8597f3..e112d1a5dab6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1740,7 +1740,7 @@ EXPORT_SYMBOL(blk_run_queue);
1740 1740
1741/** 1741/**
1742 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed 1742 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
1743 * @q: the request queue to be released 1743 * @kobj: the kobj belonging of the request queue to be released
1744 * 1744 *
1745 * Description: 1745 * Description:
1746 * blk_cleanup_queue is the pair to blk_init_queue() or 1746 * blk_cleanup_queue is the pair to blk_init_queue() or
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9f5c0da57c90..5c91d6afb117 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -64,6 +64,8 @@ source "drivers/usb/Kconfig"
64 64
65source "drivers/mmc/Kconfig" 65source "drivers/mmc/Kconfig"
66 66
67source "drivers/leds/Kconfig"
68
67source "drivers/infiniband/Kconfig" 69source "drivers/infiniband/Kconfig"
68 70
69source "drivers/sn/Kconfig" 71source "drivers/sn/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 424955274e60..447d8e68887a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -25,9 +25,6 @@ obj-$(CONFIG_CONNECTOR) += connector/
25obj-$(CONFIG_FB_I810) += video/i810/ 25obj-$(CONFIG_FB_I810) += video/i810/
26obj-$(CONFIG_FB_INTEL) += video/intelfb/ 26obj-$(CONFIG_FB_INTEL) += video/intelfb/
27 27
28# we also need input/serio early so serio bus is initialized by the time
29# serial drivers start registering their serio ports
30obj-$(CONFIG_SERIO) += input/serio/
31obj-y += serial/ 28obj-y += serial/
32obj-$(CONFIG_PARPORT) += parport/ 29obj-$(CONFIG_PARPORT) += parport/
33obj-y += base/ block/ misc/ mfd/ net/ media/ 30obj-y += base/ block/ misc/ mfd/ net/ media/
@@ -53,6 +50,7 @@ obj-$(CONFIG_TC) += tc/
53obj-$(CONFIG_USB) += usb/ 50obj-$(CONFIG_USB) += usb/
54obj-$(CONFIG_PCI) += usb/ 51obj-$(CONFIG_PCI) += usb/
55obj-$(CONFIG_USB_GADGET) += usb/gadget/ 52obj-$(CONFIG_USB_GADGET) += usb/gadget/
53obj-$(CONFIG_SERIO) += input/serio/
56obj-$(CONFIG_GAMEPORT) += input/gameport/ 54obj-$(CONFIG_GAMEPORT) += input/gameport/
57obj-$(CONFIG_INPUT) += input/ 55obj-$(CONFIG_INPUT) += input/
58obj-$(CONFIG_I2O) += message/ 56obj-$(CONFIG_I2O) += message/
@@ -69,7 +67,9 @@ obj-$(CONFIG_MCA) += mca/
69obj-$(CONFIG_EISA) += eisa/ 67obj-$(CONFIG_EISA) += eisa/
70obj-$(CONFIG_CPU_FREQ) += cpufreq/ 68obj-$(CONFIG_CPU_FREQ) += cpufreq/
71obj-$(CONFIG_MMC) += mmc/ 69obj-$(CONFIG_MMC) += mmc/
70obj-$(CONFIG_NEW_LEDS) += leds/
72obj-$(CONFIG_INFINIBAND) += infiniband/ 71obj-$(CONFIG_INFINIBAND) += infiniband/
72obj-$(CONFIG_IPATH_CORE) += infiniband/
73obj-$(CONFIG_SGI_SN) += sn/ 73obj-$(CONFIG_SGI_SN) += sn/
74obj-y += firmware/ 74obj-y += firmware/
75obj-$(CONFIG_CRYPTO) += crypto/ 75obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 79b09d76c180..eee0864ba300 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1572,7 +1572,7 @@ static void __exit acpi_ec_exit(void)
1572static int __init acpi_fake_ecdt_setup(char *str) 1572static int __init acpi_fake_ecdt_setup(char *str)
1573{ 1573{
1574 acpi_fake_ecdt_enabled = 1; 1574 acpi_fake_ecdt_enabled = 1;
1575 return 0; 1575 return 1;
1576} 1576}
1577 1577
1578__setup("acpi_fake_ecdt", acpi_fake_ecdt_setup); 1578__setup("acpi_fake_ecdt", acpi_fake_ecdt_setup);
@@ -1591,7 +1591,7 @@ static int __init acpi_ec_set_intr_mode(char *str)
1591 acpi_ec_driver.ops.add = acpi_ec_poll_add; 1591 acpi_ec_driver.ops.add = acpi_ec_poll_add;
1592 } 1592 }
1593 printk(KERN_INFO PREFIX "EC %s mode.\n", intr ? "interrupt" : "polling"); 1593 printk(KERN_INFO PREFIX "EC %s mode.\n", intr ? "interrupt" : "polling");
1594 return 0; 1594 return 1;
1595} 1595}
1596 1596
1597__setup("ec_intr=", acpi_ec_set_intr_mode); 1597__setup("ec_intr=", acpi_ec_set_intr_mode);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index b6e290956214..2a8af685926f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1850,6 +1850,7 @@ static int __init amiga_floppy_setup (char *str)
1850 return 0; 1850 return 0;
1851 printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n); 1851 printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n);
1852 fd_def_df0 = n; 1852 fd_def_df0 = n;
1853 return 1;
1853} 1854}
1854 1855
1855__setup("floppy=", amiga_floppy_setup); 1856__setup("floppy=", amiga_floppy_setup);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 9888bc151755..473a13b22b29 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -65,7 +65,7 @@ MODULE_LICENSE("GPL");
65 65
66 66
67typedef struct bluecard_info_t { 67typedef struct bluecard_info_t {
68 dev_link_t link; 68 struct pcmcia_device *p_dev;
69 dev_node_t node; 69 dev_node_t node;
70 70
71 struct hci_dev *hdev; 71 struct hci_dev *hdev;
@@ -85,8 +85,8 @@ typedef struct bluecard_info_t {
85} bluecard_info_t; 85} bluecard_info_t;
86 86
87 87
88static void bluecard_config(dev_link_t *link); 88static int bluecard_config(struct pcmcia_device *link);
89static void bluecard_release(dev_link_t *link); 89static void bluecard_release(struct pcmcia_device *link);
90 90
91static void bluecard_detach(struct pcmcia_device *p_dev); 91static void bluecard_detach(struct pcmcia_device *p_dev);
92 92
@@ -162,7 +162,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
162static void bluecard_activity_led_timeout(u_long arg) 162static void bluecard_activity_led_timeout(u_long arg)
163{ 163{
164 bluecard_info_t *info = (bluecard_info_t *)arg; 164 bluecard_info_t *info = (bluecard_info_t *)arg;
165 unsigned int iobase = info->link.io.BasePort1; 165 unsigned int iobase = info->p_dev->io.BasePort1;
166 166
167 if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) 167 if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
168 return; 168 return;
@@ -179,7 +179,7 @@ static void bluecard_activity_led_timeout(u_long arg)
179 179
180static void bluecard_enable_activity_led(bluecard_info_t *info) 180static void bluecard_enable_activity_led(bluecard_info_t *info)
181{ 181{
182 unsigned int iobase = info->link.io.BasePort1; 182 unsigned int iobase = info->p_dev->io.BasePort1;
183 183
184 if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) 184 if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
185 return; 185 return;
@@ -235,7 +235,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
235 } 235 }
236 236
237 do { 237 do {
238 register unsigned int iobase = info->link.io.BasePort1; 238 register unsigned int iobase = info->p_dev->io.BasePort1;
239 register unsigned int offset; 239 register unsigned int offset;
240 register unsigned char command; 240 register unsigned char command;
241 register unsigned long ready_bit; 241 register unsigned long ready_bit;
@@ -244,7 +244,7 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
244 244
245 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 245 clear_bit(XMIT_WAKEUP, &(info->tx_state));
246 246
247 if (!(info->link.state & DEV_PRESENT)) 247 if (!pcmcia_dev_present(info->p_dev))
248 return; 248 return;
249 249
250 if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) { 250 if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
@@ -382,7 +382,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
382 return; 382 return;
383 } 383 }
384 384
385 iobase = info->link.io.BasePort1; 385 iobase = info->p_dev->io.BasePort1;
386 386
387 if (test_bit(XMIT_SENDING_READY, &(info->tx_state))) 387 if (test_bit(XMIT_SENDING_READY, &(info->tx_state)))
388 bluecard_enable_activity_led(info); 388 bluecard_enable_activity_led(info);
@@ -512,7 +512,7 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst, struct pt_regs *r
512 if (!test_bit(CARD_READY, &(info->hw_state))) 512 if (!test_bit(CARD_READY, &(info->hw_state)))
513 return IRQ_HANDLED; 513 return IRQ_HANDLED;
514 514
515 iobase = info->link.io.BasePort1; 515 iobase = info->p_dev->io.BasePort1;
516 516
517 spin_lock(&(info->lock)); 517 spin_lock(&(info->lock));
518 518
@@ -626,7 +626,7 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
626static int bluecard_hci_open(struct hci_dev *hdev) 626static int bluecard_hci_open(struct hci_dev *hdev)
627{ 627{
628 bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data); 628 bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
629 unsigned int iobase = info->link.io.BasePort1; 629 unsigned int iobase = info->p_dev->io.BasePort1;
630 630
631 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) 631 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
632 bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); 632 bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
@@ -646,7 +646,7 @@ static int bluecard_hci_open(struct hci_dev *hdev)
646static int bluecard_hci_close(struct hci_dev *hdev) 646static int bluecard_hci_close(struct hci_dev *hdev)
647{ 647{
648 bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data); 648 bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data);
649 unsigned int iobase = info->link.io.BasePort1; 649 unsigned int iobase = info->p_dev->io.BasePort1;
650 650
651 if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) 651 if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
652 return 0; 652 return 0;
@@ -713,7 +713,7 @@ static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned l
713 713
714static int bluecard_open(bluecard_info_t *info) 714static int bluecard_open(bluecard_info_t *info)
715{ 715{
716 unsigned int iobase = info->link.io.BasePort1; 716 unsigned int iobase = info->p_dev->io.BasePort1;
717 struct hci_dev *hdev; 717 struct hci_dev *hdev;
718 unsigned char id; 718 unsigned char id;
719 719
@@ -831,7 +831,7 @@ static int bluecard_open(bluecard_info_t *info)
831 831
832static int bluecard_close(bluecard_info_t *info) 832static int bluecard_close(bluecard_info_t *info)
833{ 833{
834 unsigned int iobase = info->link.io.BasePort1; 834 unsigned int iobase = info->p_dev->io.BasePort1;
835 struct hci_dev *hdev = info->hdev; 835 struct hci_dev *hdev = info->hdev;
836 836
837 if (!hdev) 837 if (!hdev)
@@ -856,17 +856,16 @@ static int bluecard_close(bluecard_info_t *info)
856 return 0; 856 return 0;
857} 857}
858 858
859static int bluecard_attach(struct pcmcia_device *p_dev) 859static int bluecard_probe(struct pcmcia_device *link)
860{ 860{
861 bluecard_info_t *info; 861 bluecard_info_t *info;
862 dev_link_t *link;
863 862
864 /* Create new info device */ 863 /* Create new info device */
865 info = kzalloc(sizeof(*info), GFP_KERNEL); 864 info = kzalloc(sizeof(*info), GFP_KERNEL);
866 if (!info) 865 if (!info)
867 return -ENOMEM; 866 return -ENOMEM;
868 867
869 link = &info->link; 868 info->p_dev = link;
870 link->priv = info; 869 link->priv = info;
871 870
872 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 871 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -878,32 +877,22 @@ static int bluecard_attach(struct pcmcia_device *p_dev)
878 link->irq.Instance = info; 877 link->irq.Instance = info;
879 878
880 link->conf.Attributes = CONF_ENABLE_IRQ; 879 link->conf.Attributes = CONF_ENABLE_IRQ;
881 link->conf.Vcc = 50;
882 link->conf.IntType = INT_MEMORY_AND_IO; 880 link->conf.IntType = INT_MEMORY_AND_IO;
883 881
884 link->handle = p_dev; 882 return bluecard_config(link);
885 p_dev->instance = link;
886
887 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
888 bluecard_config(link);
889
890 return 0;
891} 883}
892 884
893 885
894static void bluecard_detach(struct pcmcia_device *p_dev) 886static void bluecard_detach(struct pcmcia_device *link)
895{ 887{
896 dev_link_t *link = dev_to_instance(p_dev);
897 bluecard_info_t *info = link->priv; 888 bluecard_info_t *info = link->priv;
898 889
899 if (link->state & DEV_CONFIG) 890 bluecard_release(link);
900 bluecard_release(link);
901
902 kfree(info); 891 kfree(info);
903} 892}
904 893
905 894
906static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 895static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
907{ 896{
908 int i; 897 int i;
909 898
@@ -918,14 +907,12 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse
918 return pcmcia_parse_tuple(handle, tuple, parse); 907 return pcmcia_parse_tuple(handle, tuple, parse);
919} 908}
920 909
921static void bluecard_config(dev_link_t *link) 910static int bluecard_config(struct pcmcia_device *link)
922{ 911{
923 client_handle_t handle = link->handle;
924 bluecard_info_t *info = link->priv; 912 bluecard_info_t *info = link->priv;
925 tuple_t tuple; 913 tuple_t tuple;
926 u_short buf[256]; 914 u_short buf[256];
927 cisparse_t parse; 915 cisparse_t parse;
928 config_info_t config;
929 int i, n, last_ret, last_fn; 916 int i, n, last_ret, last_fn;
930 917
931 tuple.TupleData = (cisdata_t *)buf; 918 tuple.TupleData = (cisdata_t *)buf;
@@ -935,7 +922,7 @@ static void bluecard_config(dev_link_t *link)
935 922
936 /* Get configuration register information */ 923 /* Get configuration register information */
937 tuple.DesiredTuple = CISTPL_CONFIG; 924 tuple.DesiredTuple = CISTPL_CONFIG;
938 last_ret = first_tuple(handle, &tuple, &parse); 925 last_ret = first_tuple(link, &tuple, &parse);
939 if (last_ret != CS_SUCCESS) { 926 if (last_ret != CS_SUCCESS) {
940 last_fn = ParseTuple; 927 last_fn = ParseTuple;
941 goto cs_failed; 928 goto cs_failed;
@@ -943,36 +930,31 @@ static void bluecard_config(dev_link_t *link)
943 link->conf.ConfigBase = parse.config.base; 930 link->conf.ConfigBase = parse.config.base;
944 link->conf.Present = parse.config.rmask[0]; 931 link->conf.Present = parse.config.rmask[0];
945 932
946 /* Configure card */
947 link->state |= DEV_CONFIG;
948 i = pcmcia_get_configuration_info(handle, &config);
949 link->conf.Vcc = config.Vcc;
950
951 link->conf.ConfigIndex = 0x20; 933 link->conf.ConfigIndex = 0x20;
952 link->io.NumPorts1 = 64; 934 link->io.NumPorts1 = 64;
953 link->io.IOAddrLines = 6; 935 link->io.IOAddrLines = 6;
954 936
955 for (n = 0; n < 0x400; n += 0x40) { 937 for (n = 0; n < 0x400; n += 0x40) {
956 link->io.BasePort1 = n ^ 0x300; 938 link->io.BasePort1 = n ^ 0x300;
957 i = pcmcia_request_io(link->handle, &link->io); 939 i = pcmcia_request_io(link, &link->io);
958 if (i == CS_SUCCESS) 940 if (i == CS_SUCCESS)
959 break; 941 break;
960 } 942 }
961 943
962 if (i != CS_SUCCESS) { 944 if (i != CS_SUCCESS) {
963 cs_error(link->handle, RequestIO, i); 945 cs_error(link, RequestIO, i);
964 goto failed; 946 goto failed;
965 } 947 }
966 948
967 i = pcmcia_request_irq(link->handle, &link->irq); 949 i = pcmcia_request_irq(link, &link->irq);
968 if (i != CS_SUCCESS) { 950 if (i != CS_SUCCESS) {
969 cs_error(link->handle, RequestIRQ, i); 951 cs_error(link, RequestIRQ, i);
970 link->irq.AssignedIRQ = 0; 952 link->irq.AssignedIRQ = 0;
971 } 953 }
972 954
973 i = pcmcia_request_configuration(link->handle, &link->conf); 955 i = pcmcia_request_configuration(link, &link->conf);
974 if (i != CS_SUCCESS) { 956 if (i != CS_SUCCESS) {
975 cs_error(link->handle, RequestConfiguration, i); 957 cs_error(link, RequestConfiguration, i);
976 goto failed; 958 goto failed;
977 } 959 }
978 960
@@ -980,57 +962,28 @@ static void bluecard_config(dev_link_t *link)
980 goto failed; 962 goto failed;
981 963
982 strcpy(info->node.dev_name, info->hdev->name); 964 strcpy(info->node.dev_name, info->hdev->name);
983 link->dev = &info->node; 965 link->dev_node = &info->node;
984 link->state &= ~DEV_CONFIG_PENDING;
985 966
986 return; 967 return 0;
987 968
988cs_failed: 969cs_failed:
989 cs_error(link->handle, last_fn, last_ret); 970 cs_error(link, last_fn, last_ret);
990 971
991failed: 972failed:
992 bluecard_release(link); 973 bluecard_release(link);
974 return -ENODEV;
993} 975}
994 976
995 977
996static void bluecard_release(dev_link_t *link) 978static void bluecard_release(struct pcmcia_device *link)
997{ 979{
998 bluecard_info_t *info = link->priv; 980 bluecard_info_t *info = link->priv;
999 981
1000 if (link->state & DEV_PRESENT) 982 bluecard_close(info);
1001 bluecard_close(info);
1002 983
1003 del_timer(&(info->timer)); 984 del_timer(&(info->timer));
1004 985
1005 link->dev = NULL; 986 pcmcia_disable_device(link);
1006
1007 pcmcia_release_configuration(link->handle);
1008 pcmcia_release_io(link->handle, &link->io);
1009 pcmcia_release_irq(link->handle, &link->irq);
1010
1011 link->state &= ~DEV_CONFIG;
1012}
1013
1014static int bluecard_suspend(struct pcmcia_device *dev)
1015{
1016 dev_link_t *link = dev_to_instance(dev);
1017
1018 link->state |= DEV_SUSPEND;
1019 if (link->state & DEV_CONFIG)
1020 pcmcia_release_configuration(link->handle);
1021
1022 return 0;
1023}
1024
1025static int bluecard_resume(struct pcmcia_device *dev)
1026{
1027 dev_link_t *link = dev_to_instance(dev);
1028
1029 link->state &= ~DEV_SUSPEND;
1030 if (DEV_OK(link))
1031 pcmcia_request_configuration(link->handle, &link->conf);
1032
1033 return 0;
1034} 987}
1035 988
1036static struct pcmcia_device_id bluecard_ids[] = { 989static struct pcmcia_device_id bluecard_ids[] = {
@@ -1046,11 +999,9 @@ static struct pcmcia_driver bluecard_driver = {
1046 .drv = { 999 .drv = {
1047 .name = "bluecard_cs", 1000 .name = "bluecard_cs",
1048 }, 1001 },
1049 .probe = bluecard_attach, 1002 .probe = bluecard_probe,
1050 .remove = bluecard_detach, 1003 .remove = bluecard_detach,
1051 .id_table = bluecard_ids, 1004 .id_table = bluecard_ids,
1052 .suspend = bluecard_suspend,
1053 .resume = bluecard_resume,
1054}; 1005};
1055 1006
1056static int __init init_bluecard_cs(void) 1007static int __init init_bluecard_cs(void)
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 7e21b1ff27c4..b94ac2f9f7ba 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -72,7 +72,7 @@ MODULE_LICENSE("GPL");
72 72
73 73
74typedef struct bt3c_info_t { 74typedef struct bt3c_info_t {
75 dev_link_t link; 75 struct pcmcia_device *p_dev;
76 dev_node_t node; 76 dev_node_t node;
77 77
78 struct hci_dev *hdev; 78 struct hci_dev *hdev;
@@ -88,8 +88,8 @@ typedef struct bt3c_info_t {
88} bt3c_info_t; 88} bt3c_info_t;
89 89
90 90
91static void bt3c_config(dev_link_t *link); 91static int bt3c_config(struct pcmcia_device *link);
92static void bt3c_release(dev_link_t *link); 92static void bt3c_release(struct pcmcia_device *link);
93 93
94static void bt3c_detach(struct pcmcia_device *p_dev); 94static void bt3c_detach(struct pcmcia_device *p_dev);
95 95
@@ -191,11 +191,11 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
191 return; 191 return;
192 192
193 do { 193 do {
194 register unsigned int iobase = info->link.io.BasePort1; 194 register unsigned int iobase = info->p_dev->io.BasePort1;
195 register struct sk_buff *skb; 195 register struct sk_buff *skb;
196 register int len; 196 register int len;
197 197
198 if (!(info->link.state & DEV_PRESENT)) 198 if (!pcmcia_dev_present(info->p_dev))
199 break; 199 break;
200 200
201 201
@@ -229,7 +229,7 @@ static void bt3c_receive(bt3c_info_t *info)
229 return; 229 return;
230 } 230 }
231 231
232 iobase = info->link.io.BasePort1; 232 iobase = info->p_dev->io.BasePort1;
233 233
234 avail = bt3c_read(iobase, 0x7006); 234 avail = bt3c_read(iobase, 0x7006);
235 //printk("bt3c_cs: receiving %d bytes\n", avail); 235 //printk("bt3c_cs: receiving %d bytes\n", avail);
@@ -350,7 +350,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst, struct pt_regs *regs)
350 return IRQ_NONE; 350 return IRQ_NONE;
351 } 351 }
352 352
353 iobase = info->link.io.BasePort1; 353 iobase = info->p_dev->io.BasePort1;
354 354
355 spin_lock(&(info->lock)); 355 spin_lock(&(info->lock));
356 356
@@ -481,7 +481,7 @@ static int bt3c_load_firmware(bt3c_info_t *info, unsigned char *firmware, int co
481 unsigned int iobase, size, addr, fcs, tmp; 481 unsigned int iobase, size, addr, fcs, tmp;
482 int i, err = 0; 482 int i, err = 0;
483 483
484 iobase = info->link.io.BasePort1; 484 iobase = info->p_dev->io.BasePort1;
485 485
486 /* Reset */ 486 /* Reset */
487 bt3c_io_write(iobase, 0x8040, 0x0404); 487 bt3c_io_write(iobase, 0x8040, 0x0404);
@@ -562,7 +562,6 @@ static int bt3c_open(bt3c_info_t *info)
562{ 562{
563 const struct firmware *firmware; 563 const struct firmware *firmware;
564 struct hci_dev *hdev; 564 struct hci_dev *hdev;
565 client_handle_t handle;
566 int err; 565 int err;
567 566
568 spin_lock_init(&(info->lock)); 567 spin_lock_init(&(info->lock));
@@ -594,10 +593,8 @@ static int bt3c_open(bt3c_info_t *info)
594 593
595 hdev->owner = THIS_MODULE; 594 hdev->owner = THIS_MODULE;
596 595
597 handle = info->link.handle;
598
599 /* Load firmware */ 596 /* Load firmware */
600 err = request_firmware(&firmware, "BT3CPCC.bin", &handle_to_dev(handle)); 597 err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
601 if (err < 0) { 598 if (err < 0) {
602 BT_ERR("Firmware request failed"); 599 BT_ERR("Firmware request failed");
603 goto error; 600 goto error;
@@ -648,17 +645,16 @@ static int bt3c_close(bt3c_info_t *info)
648 return 0; 645 return 0;
649} 646}
650 647
651static int bt3c_attach(struct pcmcia_device *p_dev) 648static int bt3c_probe(struct pcmcia_device *link)
652{ 649{
653 bt3c_info_t *info; 650 bt3c_info_t *info;
654 dev_link_t *link;
655 651
656 /* Create new info device */ 652 /* Create new info device */
657 info = kzalloc(sizeof(*info), GFP_KERNEL); 653 info = kzalloc(sizeof(*info), GFP_KERNEL);
658 if (!info) 654 if (!info)
659 return -ENOMEM; 655 return -ENOMEM;
660 656
661 link = &info->link; 657 info->p_dev = link;
662 link->priv = info; 658 link->priv = info;
663 659
664 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 660 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -670,31 +666,21 @@ static int bt3c_attach(struct pcmcia_device *p_dev)
670 link->irq.Instance = info; 666 link->irq.Instance = info;
671 667
672 link->conf.Attributes = CONF_ENABLE_IRQ; 668 link->conf.Attributes = CONF_ENABLE_IRQ;
673 link->conf.Vcc = 50;
674 link->conf.IntType = INT_MEMORY_AND_IO; 669 link->conf.IntType = INT_MEMORY_AND_IO;
675 670
676 link->handle = p_dev; 671 return bt3c_config(link);
677 p_dev->instance = link;
678
679 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
680 bt3c_config(link);
681
682 return 0;
683} 672}
684 673
685 674
686static void bt3c_detach(struct pcmcia_device *p_dev) 675static void bt3c_detach(struct pcmcia_device *link)
687{ 676{
688 dev_link_t *link = dev_to_instance(p_dev);
689 bt3c_info_t *info = link->priv; 677 bt3c_info_t *info = link->priv;
690 678
691 if (link->state & DEV_CONFIG) 679 bt3c_release(link);
692 bt3c_release(link);
693
694 kfree(info); 680 kfree(info);
695} 681}
696 682
697static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 683static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
698{ 684{
699 int i; 685 int i;
700 686
@@ -705,30 +691,28 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
705 return pcmcia_parse_tuple(handle, tuple, parse); 691 return pcmcia_parse_tuple(handle, tuple, parse);
706} 692}
707 693
708static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 694static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
709{ 695{
710 if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS) 696 if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS)
711 return CS_NO_MORE_ITEMS; 697 return CS_NO_MORE_ITEMS;
712 return get_tuple(handle, tuple, parse); 698 return get_tuple(handle, tuple, parse);
713} 699}
714 700
715static int next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 701static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
716{ 702{
717 if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS) 703 if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS)
718 return CS_NO_MORE_ITEMS; 704 return CS_NO_MORE_ITEMS;
719 return get_tuple(handle, tuple, parse); 705 return get_tuple(handle, tuple, parse);
720} 706}
721 707
722static void bt3c_config(dev_link_t *link) 708static int bt3c_config(struct pcmcia_device *link)
723{ 709{
724 static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; 710 static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
725 client_handle_t handle = link->handle;
726 bt3c_info_t *info = link->priv; 711 bt3c_info_t *info = link->priv;
727 tuple_t tuple; 712 tuple_t tuple;
728 u_short buf[256]; 713 u_short buf[256];
729 cisparse_t parse; 714 cisparse_t parse;
730 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 715 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
731 config_info_t config;
732 int i, j, try, last_ret, last_fn; 716 int i, j, try, last_ret, last_fn;
733 717
734 tuple.TupleData = (cisdata_t *)buf; 718 tuple.TupleData = (cisdata_t *)buf;
@@ -738,7 +722,7 @@ static void bt3c_config(dev_link_t *link)
738 722
739 /* Get configuration register information */ 723 /* Get configuration register information */
740 tuple.DesiredTuple = CISTPL_CONFIG; 724 tuple.DesiredTuple = CISTPL_CONFIG;
741 last_ret = first_tuple(handle, &tuple, &parse); 725 last_ret = first_tuple(link, &tuple, &parse);
742 if (last_ret != CS_SUCCESS) { 726 if (last_ret != CS_SUCCESS) {
743 last_fn = ParseTuple; 727 last_fn = ParseTuple;
744 goto cs_failed; 728 goto cs_failed;
@@ -746,11 +730,6 @@ static void bt3c_config(dev_link_t *link)
746 link->conf.ConfigBase = parse.config.base; 730 link->conf.ConfigBase = parse.config.base;
747 link->conf.Present = parse.config.rmask[0]; 731 link->conf.Present = parse.config.rmask[0];
748 732
749 /* Configure card */
750 link->state |= DEV_CONFIG;
751 i = pcmcia_get_configuration_info(handle, &config);
752 link->conf.Vcc = config.Vcc;
753
754 /* First pass: look for a config entry that looks normal. */ 733 /* First pass: look for a config entry that looks normal. */
755 tuple.TupleData = (cisdata_t *)buf; 734 tuple.TupleData = (cisdata_t *)buf;
756 tuple.TupleOffset = 0; 735 tuple.TupleOffset = 0;
@@ -759,59 +738,59 @@ static void bt3c_config(dev_link_t *link)
759 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 738 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
760 /* Two tries: without IO aliases, then with aliases */ 739 /* Two tries: without IO aliases, then with aliases */
761 for (try = 0; try < 2; try++) { 740 for (try = 0; try < 2; try++) {
762 i = first_tuple(handle, &tuple, &parse); 741 i = first_tuple(link, &tuple, &parse);
763 while (i != CS_NO_MORE_ITEMS) { 742 while (i != CS_NO_MORE_ITEMS) {
764 if (i != CS_SUCCESS) 743 if (i != CS_SUCCESS)
765 goto next_entry; 744 goto next_entry;
766 if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) 745 if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
767 link->conf.Vpp1 = link->conf.Vpp2 = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; 746 link->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
768 if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && (cf->io.win[0].base != 0)) { 747 if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && (cf->io.win[0].base != 0)) {
769 link->conf.ConfigIndex = cf->index; 748 link->conf.ConfigIndex = cf->index;
770 link->io.BasePort1 = cf->io.win[0].base; 749 link->io.BasePort1 = cf->io.win[0].base;
771 link->io.IOAddrLines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK; 750 link->io.IOAddrLines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
772 i = pcmcia_request_io(link->handle, &link->io); 751 i = pcmcia_request_io(link, &link->io);
773 if (i == CS_SUCCESS) 752 if (i == CS_SUCCESS)
774 goto found_port; 753 goto found_port;
775 } 754 }
776next_entry: 755next_entry:
777 i = next_tuple(handle, &tuple, &parse); 756 i = next_tuple(link, &tuple, &parse);
778 } 757 }
779 } 758 }
780 759
781 /* Second pass: try to find an entry that isn't picky about 760 /* Second pass: try to find an entry that isn't picky about
782 its base address, then try to grab any standard serial port 761 its base address, then try to grab any standard serial port
783 address, and finally try to get any free port. */ 762 address, and finally try to get any free port. */
784 i = first_tuple(handle, &tuple, &parse); 763 i = first_tuple(link, &tuple, &parse);
785 while (i != CS_NO_MORE_ITEMS) { 764 while (i != CS_NO_MORE_ITEMS) {
786 if ((i == CS_SUCCESS) && (cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { 765 if ((i == CS_SUCCESS) && (cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
787 link->conf.ConfigIndex = cf->index; 766 link->conf.ConfigIndex = cf->index;
788 for (j = 0; j < 5; j++) { 767 for (j = 0; j < 5; j++) {
789 link->io.BasePort1 = base[j]; 768 link->io.BasePort1 = base[j];
790 link->io.IOAddrLines = base[j] ? 16 : 3; 769 link->io.IOAddrLines = base[j] ? 16 : 3;
791 i = pcmcia_request_io(link->handle, &link->io); 770 i = pcmcia_request_io(link, &link->io);
792 if (i == CS_SUCCESS) 771 if (i == CS_SUCCESS)
793 goto found_port; 772 goto found_port;
794 } 773 }
795 } 774 }
796 i = next_tuple(handle, &tuple, &parse); 775 i = next_tuple(link, &tuple, &parse);
797 } 776 }
798 777
799found_port: 778found_port:
800 if (i != CS_SUCCESS) { 779 if (i != CS_SUCCESS) {
801 BT_ERR("No usable port range found"); 780 BT_ERR("No usable port range found");
802 cs_error(link->handle, RequestIO, i); 781 cs_error(link, RequestIO, i);
803 goto failed; 782 goto failed;
804 } 783 }
805 784
806 i = pcmcia_request_irq(link->handle, &link->irq); 785 i = pcmcia_request_irq(link, &link->irq);
807 if (i != CS_SUCCESS) { 786 if (i != CS_SUCCESS) {
808 cs_error(link->handle, RequestIRQ, i); 787 cs_error(link, RequestIRQ, i);
809 link->irq.AssignedIRQ = 0; 788 link->irq.AssignedIRQ = 0;
810 } 789 }
811 790
812 i = pcmcia_request_configuration(link->handle, &link->conf); 791 i = pcmcia_request_configuration(link, &link->conf);
813 if (i != CS_SUCCESS) { 792 if (i != CS_SUCCESS) {
814 cs_error(link->handle, RequestConfiguration, i); 793 cs_error(link, RequestConfiguration, i);
815 goto failed; 794 goto failed;
816 } 795 }
817 796
@@ -819,55 +798,26 @@ found_port:
819 goto failed; 798 goto failed;
820 799
821 strcpy(info->node.dev_name, info->hdev->name); 800 strcpy(info->node.dev_name, info->hdev->name);
822 link->dev = &info->node; 801 link->dev_node = &info->node;
823 link->state &= ~DEV_CONFIG_PENDING;
824 802
825 return; 803 return 0;
826 804
827cs_failed: 805cs_failed:
828 cs_error(link->handle, last_fn, last_ret); 806 cs_error(link, last_fn, last_ret);
829 807
830failed: 808failed:
831 bt3c_release(link); 809 bt3c_release(link);
810 return -ENODEV;
832} 811}
833 812
834 813
835static void bt3c_release(dev_link_t *link) 814static void bt3c_release(struct pcmcia_device *link)
836{ 815{
837 bt3c_info_t *info = link->priv; 816 bt3c_info_t *info = link->priv;
838 817
839 if (link->state & DEV_PRESENT) 818 bt3c_close(info);
840 bt3c_close(info);
841
842 link->dev = NULL;
843
844 pcmcia_release_configuration(link->handle);
845 pcmcia_release_io(link->handle, &link->io);
846 pcmcia_release_irq(link->handle, &link->irq);
847
848 link->state &= ~DEV_CONFIG;
849}
850
851static int bt3c_suspend(struct pcmcia_device *dev)
852{
853 dev_link_t *link = dev_to_instance(dev);
854 819
855 link->state |= DEV_SUSPEND; 820 pcmcia_disable_device(link);
856 if (link->state & DEV_CONFIG)
857 pcmcia_release_configuration(link->handle);
858
859 return 0;
860}
861
862static int bt3c_resume(struct pcmcia_device *dev)
863{
864 dev_link_t *link = dev_to_instance(dev);
865
866 link->state &= ~DEV_SUSPEND;
867 if (DEV_OK(link))
868 pcmcia_request_configuration(link->handle, &link->conf);
869
870 return 0;
871} 821}
872 822
873 823
@@ -882,11 +832,9 @@ static struct pcmcia_driver bt3c_driver = {
882 .drv = { 832 .drv = {
883 .name = "bt3c_cs", 833 .name = "bt3c_cs",
884 }, 834 },
885 .probe = bt3c_attach, 835 .probe = bt3c_probe,
886 .remove = bt3c_detach, 836 .remove = bt3c_detach,
887 .id_table = bt3c_ids, 837 .id_table = bt3c_ids,
888 .suspend = bt3c_suspend,
889 .resume = bt3c_resume,
890}; 838};
891 839
892static int __init init_bt3c_cs(void) 840static int __init init_bt3c_cs(void)
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 7b4bff4cfa2d..9ce4c93467e5 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -68,7 +68,7 @@ MODULE_LICENSE("GPL");
68 68
69 69
70typedef struct btuart_info_t { 70typedef struct btuart_info_t {
71 dev_link_t link; 71 struct pcmcia_device *p_dev;
72 dev_node_t node; 72 dev_node_t node;
73 73
74 struct hci_dev *hdev; 74 struct hci_dev *hdev;
@@ -84,8 +84,8 @@ typedef struct btuart_info_t {
84} btuart_info_t; 84} btuart_info_t;
85 85
86 86
87static void btuart_config(dev_link_t *link); 87static int btuart_config(struct pcmcia_device *link);
88static void btuart_release(dev_link_t *link); 88static void btuart_release(struct pcmcia_device *link);
89 89
90static void btuart_detach(struct pcmcia_device *p_dev); 90static void btuart_detach(struct pcmcia_device *p_dev);
91 91
@@ -146,13 +146,13 @@ static void btuart_write_wakeup(btuart_info_t *info)
146 } 146 }
147 147
148 do { 148 do {
149 register unsigned int iobase = info->link.io.BasePort1; 149 register unsigned int iobase = info->p_dev->io.BasePort1;
150 register struct sk_buff *skb; 150 register struct sk_buff *skb;
151 register int len; 151 register int len;
152 152
153 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 153 clear_bit(XMIT_WAKEUP, &(info->tx_state));
154 154
155 if (!(info->link.state & DEV_PRESENT)) 155 if (!pcmcia_dev_present(info->p_dev))
156 return; 156 return;
157 157
158 if (!(skb = skb_dequeue(&(info->txq)))) 158 if (!(skb = skb_dequeue(&(info->txq))))
@@ -187,7 +187,7 @@ static void btuart_receive(btuart_info_t *info)
187 return; 187 return;
188 } 188 }
189 189
190 iobase = info->link.io.BasePort1; 190 iobase = info->p_dev->io.BasePort1;
191 191
192 do { 192 do {
193 info->hdev->stat.byte_rx++; 193 info->hdev->stat.byte_rx++;
@@ -301,7 +301,7 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst, struct pt_regs *reg
301 return IRQ_NONE; 301 return IRQ_NONE;
302 } 302 }
303 303
304 iobase = info->link.io.BasePort1; 304 iobase = info->p_dev->io.BasePort1;
305 305
306 spin_lock(&(info->lock)); 306 spin_lock(&(info->lock));
307 307
@@ -357,7 +357,7 @@ static void btuart_change_speed(btuart_info_t *info, unsigned int speed)
357 return; 357 return;
358 } 358 }
359 359
360 iobase = info->link.io.BasePort1; 360 iobase = info->p_dev->io.BasePort1;
361 361
362 spin_lock_irqsave(&(info->lock), flags); 362 spin_lock_irqsave(&(info->lock), flags);
363 363
@@ -481,7 +481,7 @@ static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned lon
481static int btuart_open(btuart_info_t *info) 481static int btuart_open(btuart_info_t *info)
482{ 482{
483 unsigned long flags; 483 unsigned long flags;
484 unsigned int iobase = info->link.io.BasePort1; 484 unsigned int iobase = info->p_dev->io.BasePort1;
485 struct hci_dev *hdev; 485 struct hci_dev *hdev;
486 486
487 spin_lock_init(&(info->lock)); 487 spin_lock_init(&(info->lock));
@@ -550,7 +550,7 @@ static int btuart_open(btuart_info_t *info)
550static int btuart_close(btuart_info_t *info) 550static int btuart_close(btuart_info_t *info)
551{ 551{
552 unsigned long flags; 552 unsigned long flags;
553 unsigned int iobase = info->link.io.BasePort1; 553 unsigned int iobase = info->p_dev->io.BasePort1;
554 struct hci_dev *hdev = info->hdev; 554 struct hci_dev *hdev = info->hdev;
555 555
556 if (!hdev) 556 if (!hdev)
@@ -576,17 +576,16 @@ static int btuart_close(btuart_info_t *info)
576 return 0; 576 return 0;
577} 577}
578 578
579static int btuart_attach(struct pcmcia_device *p_dev) 579static int btuart_probe(struct pcmcia_device *link)
580{ 580{
581 btuart_info_t *info; 581 btuart_info_t *info;
582 dev_link_t *link;
583 582
584 /* Create new info device */ 583 /* Create new info device */
585 info = kzalloc(sizeof(*info), GFP_KERNEL); 584 info = kzalloc(sizeof(*info), GFP_KERNEL);
586 if (!info) 585 if (!info)
587 return -ENOMEM; 586 return -ENOMEM;
588 587
589 link = &info->link; 588 info->p_dev = link;
590 link->priv = info; 589 link->priv = info;
591 590
592 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 591 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -598,31 +597,21 @@ static int btuart_attach(struct pcmcia_device *p_dev)
598 link->irq.Instance = info; 597 link->irq.Instance = info;
599 598
600 link->conf.Attributes = CONF_ENABLE_IRQ; 599 link->conf.Attributes = CONF_ENABLE_IRQ;
601 link->conf.Vcc = 50;
602 link->conf.IntType = INT_MEMORY_AND_IO; 600 link->conf.IntType = INT_MEMORY_AND_IO;
603 601
604 link->handle = p_dev; 602 return btuart_config(link);
605 p_dev->instance = link;
606
607 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
608 btuart_config(link);
609
610 return 0;
611} 603}
612 604
613 605
614static void btuart_detach(struct pcmcia_device *p_dev) 606static void btuart_detach(struct pcmcia_device *link)
615{ 607{
616 dev_link_t *link = dev_to_instance(p_dev);
617 btuart_info_t *info = link->priv; 608 btuart_info_t *info = link->priv;
618 609
619 if (link->state & DEV_CONFIG) 610 btuart_release(link);
620 btuart_release(link);
621
622 kfree(info); 611 kfree(info);
623} 612}
624 613
625static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 614static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
626{ 615{
627 int i; 616 int i;
628 617
@@ -633,30 +622,28 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
633 return pcmcia_parse_tuple(handle, tuple, parse); 622 return pcmcia_parse_tuple(handle, tuple, parse);
634} 623}
635 624
636static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 625static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
637{ 626{
638 if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS) 627 if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS)
639 return CS_NO_MORE_ITEMS; 628 return CS_NO_MORE_ITEMS;
640 return get_tuple(handle, tuple, parse); 629 return get_tuple(handle, tuple, parse);
641} 630}
642 631
643static int next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 632static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
644{ 633{
645 if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS) 634 if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS)
646 return CS_NO_MORE_ITEMS; 635 return CS_NO_MORE_ITEMS;
647 return get_tuple(handle, tuple, parse); 636 return get_tuple(handle, tuple, parse);
648} 637}
649 638
650static void btuart_config(dev_link_t *link) 639static int btuart_config(struct pcmcia_device *link)
651{ 640{
652 static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; 641 static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
653 client_handle_t handle = link->handle;
654 btuart_info_t *info = link->priv; 642 btuart_info_t *info = link->priv;
655 tuple_t tuple; 643 tuple_t tuple;
656 u_short buf[256]; 644 u_short buf[256];
657 cisparse_t parse; 645 cisparse_t parse;
658 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 646 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
659 config_info_t config;
660 int i, j, try, last_ret, last_fn; 647 int i, j, try, last_ret, last_fn;
661 648
662 tuple.TupleData = (cisdata_t *)buf; 649 tuple.TupleData = (cisdata_t *)buf;
@@ -666,7 +653,7 @@ static void btuart_config(dev_link_t *link)
666 653
667 /* Get configuration register information */ 654 /* Get configuration register information */
668 tuple.DesiredTuple = CISTPL_CONFIG; 655 tuple.DesiredTuple = CISTPL_CONFIG;
669 last_ret = first_tuple(handle, &tuple, &parse); 656 last_ret = first_tuple(link, &tuple, &parse);
670 if (last_ret != CS_SUCCESS) { 657 if (last_ret != CS_SUCCESS) {
671 last_fn = ParseTuple; 658 last_fn = ParseTuple;
672 goto cs_failed; 659 goto cs_failed;
@@ -674,11 +661,6 @@ static void btuart_config(dev_link_t *link)
674 link->conf.ConfigBase = parse.config.base; 661 link->conf.ConfigBase = parse.config.base;
675 link->conf.Present = parse.config.rmask[0]; 662 link->conf.Present = parse.config.rmask[0];
676 663
677 /* Configure card */
678 link->state |= DEV_CONFIG;
679 i = pcmcia_get_configuration_info(handle, &config);
680 link->conf.Vcc = config.Vcc;
681
682 /* First pass: look for a config entry that looks normal. */ 664 /* First pass: look for a config entry that looks normal. */
683 tuple.TupleData = (cisdata_t *) buf; 665 tuple.TupleData = (cisdata_t *) buf;
684 tuple.TupleOffset = 0; 666 tuple.TupleOffset = 0;
@@ -687,29 +669,29 @@ static void btuart_config(dev_link_t *link)
687 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 669 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
688 /* Two tries: without IO aliases, then with aliases */ 670 /* Two tries: without IO aliases, then with aliases */
689 for (try = 0; try < 2; try++) { 671 for (try = 0; try < 2; try++) {
690 i = first_tuple(handle, &tuple, &parse); 672 i = first_tuple(link, &tuple, &parse);
691 while (i != CS_NO_MORE_ITEMS) { 673 while (i != CS_NO_MORE_ITEMS) {
692 if (i != CS_SUCCESS) 674 if (i != CS_SUCCESS)
693 goto next_entry; 675 goto next_entry;
694 if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) 676 if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
695 link->conf.Vpp1 = link->conf.Vpp2 = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; 677 link->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
696 if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && (cf->io.win[0].base != 0)) { 678 if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && (cf->io.win[0].base != 0)) {
697 link->conf.ConfigIndex = cf->index; 679 link->conf.ConfigIndex = cf->index;
698 link->io.BasePort1 = cf->io.win[0].base; 680 link->io.BasePort1 = cf->io.win[0].base;
699 link->io.IOAddrLines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK; 681 link->io.IOAddrLines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
700 i = pcmcia_request_io(link->handle, &link->io); 682 i = pcmcia_request_io(link, &link->io);
701 if (i == CS_SUCCESS) 683 if (i == CS_SUCCESS)
702 goto found_port; 684 goto found_port;
703 } 685 }
704next_entry: 686next_entry:
705 i = next_tuple(handle, &tuple, &parse); 687 i = next_tuple(link, &tuple, &parse);
706 } 688 }
707 } 689 }
708 690
709 /* Second pass: try to find an entry that isn't picky about 691 /* Second pass: try to find an entry that isn't picky about
710 its base address, then try to grab any standard serial port 692 its base address, then try to grab any standard serial port
711 address, and finally try to get any free port. */ 693 address, and finally try to get any free port. */
712 i = first_tuple(handle, &tuple, &parse); 694 i = first_tuple(link, &tuple, &parse);
713 while (i != CS_NO_MORE_ITEMS) { 695 while (i != CS_NO_MORE_ITEMS) {
714 if ((i == CS_SUCCESS) && (cf->io.nwin > 0) 696 if ((i == CS_SUCCESS) && (cf->io.nwin > 0)
715 && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { 697 && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
@@ -717,30 +699,30 @@ next_entry:
717 for (j = 0; j < 5; j++) { 699 for (j = 0; j < 5; j++) {
718 link->io.BasePort1 = base[j]; 700 link->io.BasePort1 = base[j];
719 link->io.IOAddrLines = base[j] ? 16 : 3; 701 link->io.IOAddrLines = base[j] ? 16 : 3;
720 i = pcmcia_request_io(link->handle, &link->io); 702 i = pcmcia_request_io(link, &link->io);
721 if (i == CS_SUCCESS) 703 if (i == CS_SUCCESS)
722 goto found_port; 704 goto found_port;
723 } 705 }
724 } 706 }
725 i = next_tuple(handle, &tuple, &parse); 707 i = next_tuple(link, &tuple, &parse);
726 } 708 }
727 709
728found_port: 710found_port:
729 if (i != CS_SUCCESS) { 711 if (i != CS_SUCCESS) {
730 BT_ERR("No usable port range found"); 712 BT_ERR("No usable port range found");
731 cs_error(link->handle, RequestIO, i); 713 cs_error(link, RequestIO, i);
732 goto failed; 714 goto failed;
733 } 715 }
734 716
735 i = pcmcia_request_irq(link->handle, &link->irq); 717 i = pcmcia_request_irq(link, &link->irq);
736 if (i != CS_SUCCESS) { 718 if (i != CS_SUCCESS) {
737 cs_error(link->handle, RequestIRQ, i); 719 cs_error(link, RequestIRQ, i);
738 link->irq.AssignedIRQ = 0; 720 link->irq.AssignedIRQ = 0;
739 } 721 }
740 722
741 i = pcmcia_request_configuration(link->handle, &link->conf); 723 i = pcmcia_request_configuration(link, &link->conf);
742 if (i != CS_SUCCESS) { 724 if (i != CS_SUCCESS) {
743 cs_error(link->handle, RequestConfiguration, i); 725 cs_error(link, RequestConfiguration, i);
744 goto failed; 726 goto failed;
745 } 727 }
746 728
@@ -748,58 +730,28 @@ found_port:
748 goto failed; 730 goto failed;
749 731
750 strcpy(info->node.dev_name, info->hdev->name); 732 strcpy(info->node.dev_name, info->hdev->name);
751 link->dev = &info->node; 733 link->dev_node = &info->node;
752 link->state &= ~DEV_CONFIG_PENDING;
753 734
754 return; 735 return 0;
755 736
756cs_failed: 737cs_failed:
757 cs_error(link->handle, last_fn, last_ret); 738 cs_error(link, last_fn, last_ret);
758 739
759failed: 740failed:
760 btuart_release(link); 741 btuart_release(link);
742 return -ENODEV;
761} 743}
762 744
763 745
764static void btuart_release(dev_link_t *link) 746static void btuart_release(struct pcmcia_device *link)
765{ 747{
766 btuart_info_t *info = link->priv; 748 btuart_info_t *info = link->priv;
767 749
768 if (link->state & DEV_PRESENT) 750 btuart_close(info);
769 btuart_close(info);
770
771 link->dev = NULL;
772
773 pcmcia_release_configuration(link->handle);
774 pcmcia_release_io(link->handle, &link->io);
775 pcmcia_release_irq(link->handle, &link->irq);
776
777 link->state &= ~DEV_CONFIG;
778}
779
780static int btuart_suspend(struct pcmcia_device *dev)
781{
782 dev_link_t *link = dev_to_instance(dev);
783
784 link->state |= DEV_SUSPEND;
785 if (link->state & DEV_CONFIG)
786 pcmcia_release_configuration(link->handle);
787 751
788 return 0; 752 pcmcia_disable_device(link);
789} 753}
790 754
791static int btuart_resume(struct pcmcia_device *dev)
792{
793 dev_link_t *link = dev_to_instance(dev);
794
795 link->state &= ~DEV_SUSPEND;
796 if (DEV_OK(link))
797 pcmcia_request_configuration(link->handle, &link->conf);
798
799 return 0;
800}
801
802
803static struct pcmcia_device_id btuart_ids[] = { 755static struct pcmcia_device_id btuart_ids[] = {
804 /* don't use this driver. Use serial_cs + hci_uart instead */ 756 /* don't use this driver. Use serial_cs + hci_uart instead */
805 PCMCIA_DEVICE_NULL 757 PCMCIA_DEVICE_NULL
@@ -811,11 +763,9 @@ static struct pcmcia_driver btuart_driver = {
811 .drv = { 763 .drv = {
812 .name = "btuart_cs", 764 .name = "btuart_cs",
813 }, 765 },
814 .probe = btuart_attach, 766 .probe = btuart_probe,
815 .remove = btuart_detach, 767 .remove = btuart_detach,
816 .id_table = btuart_ids, 768 .id_table = btuart_ids,
817 .suspend = btuart_suspend,
818 .resume = btuart_resume,
819}; 769};
820 770
821static int __init init_btuart_cs(void) 771static int __init init_btuart_cs(void)
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 0449bc45ae5e..a71a240611e0 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -68,7 +68,7 @@ MODULE_LICENSE("GPL");
68 68
69 69
70typedef struct dtl1_info_t { 70typedef struct dtl1_info_t {
71 dev_link_t link; 71 struct pcmcia_device *p_dev;
72 dev_node_t node; 72 dev_node_t node;
73 73
74 struct hci_dev *hdev; 74 struct hci_dev *hdev;
@@ -87,8 +87,8 @@ typedef struct dtl1_info_t {
87} dtl1_info_t; 87} dtl1_info_t;
88 88
89 89
90static void dtl1_config(dev_link_t *link); 90static int dtl1_config(struct pcmcia_device *link);
91static void dtl1_release(dev_link_t *link); 91static void dtl1_release(struct pcmcia_device *link);
92 92
93static void dtl1_detach(struct pcmcia_device *p_dev); 93static void dtl1_detach(struct pcmcia_device *p_dev);
94 94
@@ -153,13 +153,13 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
153 } 153 }
154 154
155 do { 155 do {
156 register unsigned int iobase = info->link.io.BasePort1; 156 register unsigned int iobase = info->p_dev->io.BasePort1;
157 register struct sk_buff *skb; 157 register struct sk_buff *skb;
158 register int len; 158 register int len;
159 159
160 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 160 clear_bit(XMIT_WAKEUP, &(info->tx_state));
161 161
162 if (!(info->link.state & DEV_PRESENT)) 162 if (!pcmcia_dev_present(info->p_dev))
163 return; 163 return;
164 164
165 if (!(skb = skb_dequeue(&(info->txq)))) 165 if (!(skb = skb_dequeue(&(info->txq))))
@@ -218,7 +218,7 @@ static void dtl1_receive(dtl1_info_t *info)
218 return; 218 return;
219 } 219 }
220 220
221 iobase = info->link.io.BasePort1; 221 iobase = info->p_dev->io.BasePort1;
222 222
223 do { 223 do {
224 info->hdev->stat.byte_rx++; 224 info->hdev->stat.byte_rx++;
@@ -305,7 +305,7 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst, struct pt_regs *regs)
305 return IRQ_NONE; 305 return IRQ_NONE;
306 } 306 }
307 307
308 iobase = info->link.io.BasePort1; 308 iobase = info->p_dev->io.BasePort1;
309 309
310 spin_lock(&(info->lock)); 310 spin_lock(&(info->lock));
311 311
@@ -458,7 +458,7 @@ static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long
458static int dtl1_open(dtl1_info_t *info) 458static int dtl1_open(dtl1_info_t *info)
459{ 459{
460 unsigned long flags; 460 unsigned long flags;
461 unsigned int iobase = info->link.io.BasePort1; 461 unsigned int iobase = info->p_dev->io.BasePort1;
462 struct hci_dev *hdev; 462 struct hci_dev *hdev;
463 463
464 spin_lock_init(&(info->lock)); 464 spin_lock_init(&(info->lock));
@@ -504,7 +504,7 @@ static int dtl1_open(dtl1_info_t *info)
504 outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */ 504 outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */
505 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR); 505 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);
506 506
507 info->ri_latch = inb(info->link.io.BasePort1 + UART_MSR) & UART_MSR_RI; 507 info->ri_latch = inb(info->p_dev->io.BasePort1 + UART_MSR) & UART_MSR_RI;
508 508
509 /* Turn on interrupts */ 509 /* Turn on interrupts */
510 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); 510 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
@@ -529,7 +529,7 @@ static int dtl1_open(dtl1_info_t *info)
529static int dtl1_close(dtl1_info_t *info) 529static int dtl1_close(dtl1_info_t *info)
530{ 530{
531 unsigned long flags; 531 unsigned long flags;
532 unsigned int iobase = info->link.io.BasePort1; 532 unsigned int iobase = info->p_dev->io.BasePort1;
533 struct hci_dev *hdev = info->hdev; 533 struct hci_dev *hdev = info->hdev;
534 534
535 if (!hdev) 535 if (!hdev)
@@ -555,17 +555,16 @@ static int dtl1_close(dtl1_info_t *info)
555 return 0; 555 return 0;
556} 556}
557 557
558static int dtl1_attach(struct pcmcia_device *p_dev) 558static int dtl1_probe(struct pcmcia_device *link)
559{ 559{
560 dtl1_info_t *info; 560 dtl1_info_t *info;
561 dev_link_t *link;
562 561
563 /* Create new info device */ 562 /* Create new info device */
564 info = kzalloc(sizeof(*info), GFP_KERNEL); 563 info = kzalloc(sizeof(*info), GFP_KERNEL);
565 if (!info) 564 if (!info)
566 return -ENOMEM; 565 return -ENOMEM;
567 566
568 link = &info->link; 567 info->p_dev = link;
569 link->priv = info; 568 link->priv = info;
570 569
571 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 570 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -577,31 +576,22 @@ static int dtl1_attach(struct pcmcia_device *p_dev)
577 link->irq.Instance = info; 576 link->irq.Instance = info;
578 577
579 link->conf.Attributes = CONF_ENABLE_IRQ; 578 link->conf.Attributes = CONF_ENABLE_IRQ;
580 link->conf.Vcc = 50;
581 link->conf.IntType = INT_MEMORY_AND_IO; 579 link->conf.IntType = INT_MEMORY_AND_IO;
582 580
583 link->handle = p_dev; 581 return dtl1_config(link);
584 p_dev->instance = link;
585
586 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
587 dtl1_config(link);
588
589 return 0;
590} 582}
591 583
592 584
593static void dtl1_detach(struct pcmcia_device *p_dev) 585static void dtl1_detach(struct pcmcia_device *link)
594{ 586{
595 dev_link_t *link = dev_to_instance(p_dev);
596 dtl1_info_t *info = link->priv; 587 dtl1_info_t *info = link->priv;
597 588
598 if (link->state & DEV_CONFIG) 589 dtl1_release(link);
599 dtl1_release(link);
600 590
601 kfree(info); 591 kfree(info);
602} 592}
603 593
604static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 594static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
605{ 595{
606 int i; 596 int i;
607 597
@@ -612,29 +602,27 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
612 return pcmcia_parse_tuple(handle, tuple, parse); 602 return pcmcia_parse_tuple(handle, tuple, parse);
613} 603}
614 604
615static int first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 605static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
616{ 606{
617 if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS) 607 if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS)
618 return CS_NO_MORE_ITEMS; 608 return CS_NO_MORE_ITEMS;
619 return get_tuple(handle, tuple, parse); 609 return get_tuple(handle, tuple, parse);
620} 610}
621 611
622static int next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 612static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
623{ 613{
624 if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS) 614 if (pcmcia_get_next_tuple(handle, tuple) != CS_SUCCESS)
625 return CS_NO_MORE_ITEMS; 615 return CS_NO_MORE_ITEMS;
626 return get_tuple(handle, tuple, parse); 616 return get_tuple(handle, tuple, parse);
627} 617}
628 618
629static void dtl1_config(dev_link_t *link) 619static int dtl1_config(struct pcmcia_device *link)
630{ 620{
631 client_handle_t handle = link->handle;
632 dtl1_info_t *info = link->priv; 621 dtl1_info_t *info = link->priv;
633 tuple_t tuple; 622 tuple_t tuple;
634 u_short buf[256]; 623 u_short buf[256];
635 cisparse_t parse; 624 cisparse_t parse;
636 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 625 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
637 config_info_t config;
638 int i, last_ret, last_fn; 626 int i, last_ret, last_fn;
639 627
640 tuple.TupleData = (cisdata_t *)buf; 628 tuple.TupleData = (cisdata_t *)buf;
@@ -644,7 +632,7 @@ static void dtl1_config(dev_link_t *link)
644 632
645 /* Get configuration register information */ 633 /* Get configuration register information */
646 tuple.DesiredTuple = CISTPL_CONFIG; 634 tuple.DesiredTuple = CISTPL_CONFIG;
647 last_ret = first_tuple(handle, &tuple, &parse); 635 last_ret = first_tuple(link, &tuple, &parse);
648 if (last_ret != CS_SUCCESS) { 636 if (last_ret != CS_SUCCESS) {
649 last_fn = ParseTuple; 637 last_fn = ParseTuple;
650 goto cs_failed; 638 goto cs_failed;
@@ -652,11 +640,6 @@ static void dtl1_config(dev_link_t *link)
652 link->conf.ConfigBase = parse.config.base; 640 link->conf.ConfigBase = parse.config.base;
653 link->conf.Present = parse.config.rmask[0]; 641 link->conf.Present = parse.config.rmask[0];
654 642
655 /* Configure card */
656 link->state |= DEV_CONFIG;
657 i = pcmcia_get_configuration_info(handle, &config);
658 link->conf.Vcc = config.Vcc;
659
660 tuple.TupleData = (cisdata_t *)buf; 643 tuple.TupleData = (cisdata_t *)buf;
661 tuple.TupleOffset = 0; 644 tuple.TupleOffset = 0;
662 tuple.TupleDataMax = 255; 645 tuple.TupleDataMax = 255;
@@ -665,34 +648,34 @@ static void dtl1_config(dev_link_t *link)
665 648
666 /* Look for a generic full-sized window */ 649 /* Look for a generic full-sized window */
667 link->io.NumPorts1 = 8; 650 link->io.NumPorts1 = 8;
668 i = first_tuple(handle, &tuple, &parse); 651 i = first_tuple(link, &tuple, &parse);
669 while (i != CS_NO_MORE_ITEMS) { 652 while (i != CS_NO_MORE_ITEMS) {
670 if ((i == CS_SUCCESS) && (cf->io.nwin == 1) && (cf->io.win[0].len > 8)) { 653 if ((i == CS_SUCCESS) && (cf->io.nwin == 1) && (cf->io.win[0].len > 8)) {
671 link->conf.ConfigIndex = cf->index; 654 link->conf.ConfigIndex = cf->index;
672 link->io.BasePort1 = cf->io.win[0].base; 655 link->io.BasePort1 = cf->io.win[0].base;
673 link->io.NumPorts1 = cf->io.win[0].len; /*yo */ 656 link->io.NumPorts1 = cf->io.win[0].len; /*yo */
674 link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK; 657 link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
675 i = pcmcia_request_io(link->handle, &link->io); 658 i = pcmcia_request_io(link, &link->io);
676 if (i == CS_SUCCESS) 659 if (i == CS_SUCCESS)
677 break; 660 break;
678 } 661 }
679 i = next_tuple(handle, &tuple, &parse); 662 i = next_tuple(link, &tuple, &parse);
680 } 663 }
681 664
682 if (i != CS_SUCCESS) { 665 if (i != CS_SUCCESS) {
683 cs_error(link->handle, RequestIO, i); 666 cs_error(link, RequestIO, i);
684 goto failed; 667 goto failed;
685 } 668 }
686 669
687 i = pcmcia_request_irq(link->handle, &link->irq); 670 i = pcmcia_request_irq(link, &link->irq);
688 if (i != CS_SUCCESS) { 671 if (i != CS_SUCCESS) {
689 cs_error(link->handle, RequestIRQ, i); 672 cs_error(link, RequestIRQ, i);
690 link->irq.AssignedIRQ = 0; 673 link->irq.AssignedIRQ = 0;
691 } 674 }
692 675
693 i = pcmcia_request_configuration(link->handle, &link->conf); 676 i = pcmcia_request_configuration(link, &link->conf);
694 if (i != CS_SUCCESS) { 677 if (i != CS_SUCCESS) {
695 cs_error(link->handle, RequestConfiguration, i); 678 cs_error(link, RequestConfiguration, i);
696 goto failed; 679 goto failed;
697 } 680 }
698 681
@@ -700,55 +683,26 @@ static void dtl1_config(dev_link_t *link)
700 goto failed; 683 goto failed;
701 684
702 strcpy(info->node.dev_name, info->hdev->name); 685 strcpy(info->node.dev_name, info->hdev->name);
703 link->dev = &info->node; 686 link->dev_node = &info->node;
704 link->state &= ~DEV_CONFIG_PENDING;
705 687
706 return; 688 return 0;
707 689
708cs_failed: 690cs_failed:
709 cs_error(link->handle, last_fn, last_ret); 691 cs_error(link, last_fn, last_ret);
710 692
711failed: 693failed:
712 dtl1_release(link); 694 dtl1_release(link);
695 return -ENODEV;
713} 696}
714 697
715 698
716static void dtl1_release(dev_link_t *link) 699static void dtl1_release(struct pcmcia_device *link)
717{ 700{
718 dtl1_info_t *info = link->priv; 701 dtl1_info_t *info = link->priv;
719 702
720 if (link->state & DEV_PRESENT) 703 dtl1_close(info);
721 dtl1_close(info);
722
723 link->dev = NULL;
724
725 pcmcia_release_configuration(link->handle);
726 pcmcia_release_io(link->handle, &link->io);
727 pcmcia_release_irq(link->handle, &link->irq);
728
729 link->state &= ~DEV_CONFIG;
730}
731
732static int dtl1_suspend(struct pcmcia_device *dev)
733{
734 dev_link_t *link = dev_to_instance(dev);
735
736 link->state |= DEV_SUSPEND;
737 if (link->state & DEV_CONFIG)
738 pcmcia_release_configuration(link->handle);
739
740 return 0;
741}
742
743static int dtl1_resume(struct pcmcia_device *dev)
744{
745 dev_link_t *link = dev_to_instance(dev);
746 704
747 link->state &= ~DEV_SUSPEND; 705 pcmcia_disable_device(link);
748 if (DEV_OK(link))
749 pcmcia_request_configuration(link->handle, &link->conf);
750
751 return 0;
752} 706}
753 707
754 708
@@ -765,11 +719,9 @@ static struct pcmcia_driver dtl1_driver = {
765 .drv = { 719 .drv = {
766 .name = "dtl1_cs", 720 .name = "dtl1_cs",
767 }, 721 },
768 .probe = dtl1_attach, 722 .probe = dtl1_probe,
769 .remove = dtl1_detach, 723 .remove = dtl1_detach,
770 .id_table = dtl1_ids, 724 .id_table = dtl1_ids,
771 .suspend = dtl1_suspend,
772 .resume = dtl1_resume,
773}; 725};
774 726
775static int __init init_dtl1_cs(void) 727static int __init init_dtl1_cs(void)
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 327b00c3c45e..8d97b3911293 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -904,7 +904,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
904 * It is possible the vty-server was removed after the irq was 904 * It is possible the vty-server was removed after the irq was
905 * requested but before we have time to enable interrupts. 905 * requested but before we have time to enable interrupts.
906 */ 906 */
907 if (vio_enable_interrupts(vdev) == H_Success) 907 if (vio_enable_interrupts(vdev) == H_SUCCESS)
908 return 0; 908 return 0;
909 else { 909 else {
910 printk(KERN_ERR "HVCS: int enable failed for" 910 printk(KERN_ERR "HVCS: int enable failed for"
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 932feedda262..e1c95374984c 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -42,7 +42,7 @@
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/devfs_fs_kernel.h> 43#include <linux/devfs_fs_kernel.h>
44#include <linux/ipmi.h> 44#include <linux/ipmi.h>
45#include <asm/semaphore.h> 45#include <linux/mutex.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/device.h> 47#include <linux/device.h>
48#include <linux/compat.h> 48#include <linux/compat.h>
@@ -55,7 +55,7 @@ struct ipmi_file_private
55 struct file *file; 55 struct file *file;
56 struct fasync_struct *fasync_queue; 56 struct fasync_struct *fasync_queue;
57 wait_queue_head_t wait; 57 wait_queue_head_t wait;
58 struct semaphore recv_sem; 58 struct mutex recv_mutex;
59 int default_retries; 59 int default_retries;
60 unsigned int default_retry_time_ms; 60 unsigned int default_retry_time_ms;
61}; 61};
@@ -141,7 +141,7 @@ static int ipmi_open(struct inode *inode, struct file *file)
141 INIT_LIST_HEAD(&(priv->recv_msgs)); 141 INIT_LIST_HEAD(&(priv->recv_msgs));
142 init_waitqueue_head(&priv->wait); 142 init_waitqueue_head(&priv->wait);
143 priv->fasync_queue = NULL; 143 priv->fasync_queue = NULL;
144 sema_init(&(priv->recv_sem), 1); 144 mutex_init(&priv->recv_mutex);
145 145
146 /* Use the low-level defaults. */ 146 /* Use the low-level defaults. */
147 priv->default_retries = -1; 147 priv->default_retries = -1;
@@ -285,15 +285,15 @@ static int ipmi_ioctl(struct inode *inode,
285 break; 285 break;
286 } 286 }
287 287
288 /* We claim a semaphore because we don't want two 288 /* We claim a mutex because we don't want two
289 users getting something from the queue at a time. 289 users getting something from the queue at a time.
290 Since we have to release the spinlock before we can 290 Since we have to release the spinlock before we can
291 copy the data to the user, it's possible another 291 copy the data to the user, it's possible another
292 user will grab something from the queue, too. Then 292 user will grab something from the queue, too. Then
293 the messages might get out of order if something 293 the messages might get out of order if something
294 fails and the message gets put back onto the 294 fails and the message gets put back onto the
295 queue. This semaphore prevents that problem. */ 295 queue. This mutex prevents that problem. */
296 down(&(priv->recv_sem)); 296 mutex_lock(&priv->recv_mutex);
297 297
298 /* Grab the message off the list. */ 298 /* Grab the message off the list. */
299 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 299 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
@@ -352,7 +352,7 @@ static int ipmi_ioctl(struct inode *inode,
352 goto recv_putback_on_err; 352 goto recv_putback_on_err;
353 } 353 }
354 354
355 up(&(priv->recv_sem)); 355 mutex_unlock(&priv->recv_mutex);
356 ipmi_free_recv_msg(msg); 356 ipmi_free_recv_msg(msg);
357 break; 357 break;
358 358
@@ -362,11 +362,11 @@ static int ipmi_ioctl(struct inode *inode,
362 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 362 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
363 list_add(entry, &(priv->recv_msgs)); 363 list_add(entry, &(priv->recv_msgs));
364 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 364 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
365 up(&(priv->recv_sem)); 365 mutex_unlock(&priv->recv_mutex);
366 break; 366 break;
367 367
368 recv_err: 368 recv_err:
369 up(&(priv->recv_sem)); 369 mutex_unlock(&priv->recv_mutex);
370 break; 370 break;
371 } 371 }
372 372
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index da1554194d3d..2062675f9e99 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -227,7 +227,7 @@ static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
227static inline int check_obf(struct si_sm_data *kcs, unsigned char status, 227static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
228 long time) 228 long time)
229{ 229{
230 if (! GET_STATUS_OBF(status)) { 230 if (!GET_STATUS_OBF(status)) {
231 kcs->obf_timeout -= time; 231 kcs->obf_timeout -= time;
232 if (kcs->obf_timeout < 0) { 232 if (kcs->obf_timeout < 0) {
233 start_error_recovery(kcs, "OBF not ready in time"); 233 start_error_recovery(kcs, "OBF not ready in time");
@@ -407,7 +407,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
407 } 407 }
408 408
409 if (state == KCS_READ_STATE) { 409 if (state == KCS_READ_STATE) {
410 if (! check_obf(kcs, status, time)) 410 if (!check_obf(kcs, status, time))
411 return SI_SM_CALL_WITH_DELAY; 411 return SI_SM_CALL_WITH_DELAY;
412 read_next_byte(kcs); 412 read_next_byte(kcs);
413 } else { 413 } else {
@@ -447,7 +447,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
447 "Not in read state for error2"); 447 "Not in read state for error2");
448 break; 448 break;
449 } 449 }
450 if (! check_obf(kcs, status, time)) 450 if (!check_obf(kcs, status, time))
451 return SI_SM_CALL_WITH_DELAY; 451 return SI_SM_CALL_WITH_DELAY;
452 452
453 clear_obf(kcs, status); 453 clear_obf(kcs, status);
@@ -462,7 +462,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
462 break; 462 break;
463 } 463 }
464 464
465 if (! check_obf(kcs, status, time)) 465 if (!check_obf(kcs, status, time))
466 return SI_SM_CALL_WITH_DELAY; 466 return SI_SM_CALL_WITH_DELAY;
467 467
468 clear_obf(kcs, status); 468 clear_obf(kcs, status);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 40eb005b9d77..0ded046d5aa8 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,6 +38,7 @@
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/poll.h> 39#include <linux/poll.h>
40#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/mutex.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/ipmi.h> 43#include <linux/ipmi.h>
43#include <linux/ipmi_smi.h> 44#include <linux/ipmi_smi.h>
@@ -234,7 +235,7 @@ struct ipmi_smi
234 235
235 /* The list of command receivers that are registered for commands 236 /* The list of command receivers that are registered for commands
236 on this interface. */ 237 on this interface. */
237 struct semaphore cmd_rcvrs_lock; 238 struct mutex cmd_rcvrs_mutex;
238 struct list_head cmd_rcvrs; 239 struct list_head cmd_rcvrs;
239 240
240 /* Events that were queues because no one was there to receive 241 /* Events that were queues because no one was there to receive
@@ -387,10 +388,10 @@ static void clean_up_interface_data(ipmi_smi_t intf)
387 388
388 /* Wholesale remove all the entries from the list in the 389 /* Wholesale remove all the entries from the list in the
389 * interface and wait for RCU to know that none are in use. */ 390 * interface and wait for RCU to know that none are in use. */
390 down(&intf->cmd_rcvrs_lock); 391 mutex_lock(&intf->cmd_rcvrs_mutex);
391 list_add_rcu(&list, &intf->cmd_rcvrs); 392 list_add_rcu(&list, &intf->cmd_rcvrs);
392 list_del_rcu(&intf->cmd_rcvrs); 393 list_del_rcu(&intf->cmd_rcvrs);
393 up(&intf->cmd_rcvrs_lock); 394 mutex_unlock(&intf->cmd_rcvrs_mutex);
394 synchronize_rcu(); 395 synchronize_rcu();
395 396
396 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 397 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
@@ -557,7 +558,7 @@ unsigned int ipmi_addr_length(int addr_type)
557 558
558static void deliver_response(struct ipmi_recv_msg *msg) 559static void deliver_response(struct ipmi_recv_msg *msg)
559{ 560{
560 if (! msg->user) { 561 if (!msg->user) {
561 ipmi_smi_t intf = msg->user_msg_data; 562 ipmi_smi_t intf = msg->user_msg_data;
562 unsigned long flags; 563 unsigned long flags;
563 564
@@ -598,11 +599,11 @@ static int intf_next_seq(ipmi_smi_t intf,
598 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 599 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
599 i = (i+1)%IPMI_IPMB_NUM_SEQ) 600 i = (i+1)%IPMI_IPMB_NUM_SEQ)
600 { 601 {
601 if (! intf->seq_table[i].inuse) 602 if (!intf->seq_table[i].inuse)
602 break; 603 break;
603 } 604 }
604 605
605 if (! intf->seq_table[i].inuse) { 606 if (!intf->seq_table[i].inuse) {
606 intf->seq_table[i].recv_msg = recv_msg; 607 intf->seq_table[i].recv_msg = recv_msg;
607 608
608 /* Start with the maximum timeout, when the send response 609 /* Start with the maximum timeout, when the send response
@@ -763,7 +764,7 @@ int ipmi_create_user(unsigned int if_num,
763 } 764 }
764 765
765 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 766 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
766 if (! new_user) 767 if (!new_user)
767 return -ENOMEM; 768 return -ENOMEM;
768 769
769 spin_lock_irqsave(&interfaces_lock, flags); 770 spin_lock_irqsave(&interfaces_lock, flags);
@@ -819,14 +820,13 @@ static void free_user(struct kref *ref)
819 820
820int ipmi_destroy_user(ipmi_user_t user) 821int ipmi_destroy_user(ipmi_user_t user)
821{ 822{
822 int rv = -ENODEV;
823 ipmi_smi_t intf = user->intf; 823 ipmi_smi_t intf = user->intf;
824 int i; 824 int i;
825 unsigned long flags; 825 unsigned long flags;
826 struct cmd_rcvr *rcvr; 826 struct cmd_rcvr *rcvr;
827 struct cmd_rcvr *rcvrs = NULL; 827 struct cmd_rcvr *rcvrs = NULL;
828 828
829 user->valid = 1; 829 user->valid = 0;
830 830
831 /* Remove the user from the interface's sequence table. */ 831 /* Remove the user from the interface's sequence table. */
832 spin_lock_irqsave(&intf->seq_lock, flags); 832 spin_lock_irqsave(&intf->seq_lock, flags);
@@ -847,7 +847,7 @@ int ipmi_destroy_user(ipmi_user_t user)
847 * since other things may be using it till we do 847 * since other things may be using it till we do
848 * synchronize_rcu()) then free everything in that list. 848 * synchronize_rcu()) then free everything in that list.
849 */ 849 */
850 down(&intf->cmd_rcvrs_lock); 850 mutex_lock(&intf->cmd_rcvrs_mutex);
851 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 851 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
852 if (rcvr->user == user) { 852 if (rcvr->user == user) {
853 list_del_rcu(&rcvr->link); 853 list_del_rcu(&rcvr->link);
@@ -855,7 +855,7 @@ int ipmi_destroy_user(ipmi_user_t user)
855 rcvrs = rcvr; 855 rcvrs = rcvr;
856 } 856 }
857 } 857 }
858 up(&intf->cmd_rcvrs_lock); 858 mutex_unlock(&intf->cmd_rcvrs_mutex);
859 synchronize_rcu(); 859 synchronize_rcu();
860 while (rcvrs) { 860 while (rcvrs) {
861 rcvr = rcvrs; 861 rcvr = rcvrs;
@@ -871,7 +871,7 @@ int ipmi_destroy_user(ipmi_user_t user)
871 871
872 kref_put(&user->refcount, free_user); 872 kref_put(&user->refcount, free_user);
873 873
874 return rv; 874 return 0;
875} 875}
876 876
877void ipmi_get_version(ipmi_user_t user, 877void ipmi_get_version(ipmi_user_t user,
@@ -936,7 +936,8 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
936 936
937 if (val) { 937 if (val) {
938 /* Deliver any queued events. */ 938 /* Deliver any queued events. */
939 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) { 939 list_for_each_entry_safe(msg, msg2, &intf->waiting_events,
940 link) {
940 list_del(&msg->link); 941 list_del(&msg->link);
941 list_add_tail(&msg->link, &msgs); 942 list_add_tail(&msg->link, &msgs);
942 } 943 }
@@ -978,13 +979,13 @@ int ipmi_register_for_cmd(ipmi_user_t user,
978 979
979 980
980 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 981 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
981 if (! rcvr) 982 if (!rcvr)
982 return -ENOMEM; 983 return -ENOMEM;
983 rcvr->cmd = cmd; 984 rcvr->cmd = cmd;
984 rcvr->netfn = netfn; 985 rcvr->netfn = netfn;
985 rcvr->user = user; 986 rcvr->user = user;
986 987
987 down(&intf->cmd_rcvrs_lock); 988 mutex_lock(&intf->cmd_rcvrs_mutex);
988 /* Make sure the command/netfn is not already registered. */ 989 /* Make sure the command/netfn is not already registered. */
989 entry = find_cmd_rcvr(intf, netfn, cmd); 990 entry = find_cmd_rcvr(intf, netfn, cmd);
990 if (entry) { 991 if (entry) {
@@ -995,7 +996,7 @@ int ipmi_register_for_cmd(ipmi_user_t user,
995 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 996 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
996 997
997 out_unlock: 998 out_unlock:
998 up(&intf->cmd_rcvrs_lock); 999 mutex_unlock(&intf->cmd_rcvrs_mutex);
999 if (rv) 1000 if (rv)
1000 kfree(rcvr); 1001 kfree(rcvr);
1001 1002
@@ -1009,17 +1010,17 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
1009 ipmi_smi_t intf = user->intf; 1010 ipmi_smi_t intf = user->intf;
1010 struct cmd_rcvr *rcvr; 1011 struct cmd_rcvr *rcvr;
1011 1012
1012 down(&intf->cmd_rcvrs_lock); 1013 mutex_lock(&intf->cmd_rcvrs_mutex);
1013 /* Make sure the command/netfn is not already registered. */ 1014 /* Make sure the command/netfn is not already registered. */
1014 rcvr = find_cmd_rcvr(intf, netfn, cmd); 1015 rcvr = find_cmd_rcvr(intf, netfn, cmd);
1015 if ((rcvr) && (rcvr->user == user)) { 1016 if ((rcvr) && (rcvr->user == user)) {
1016 list_del_rcu(&rcvr->link); 1017 list_del_rcu(&rcvr->link);
1017 up(&intf->cmd_rcvrs_lock); 1018 mutex_unlock(&intf->cmd_rcvrs_mutex);
1018 synchronize_rcu(); 1019 synchronize_rcu();
1019 kfree(rcvr); 1020 kfree(rcvr);
1020 return 0; 1021 return 0;
1021 } else { 1022 } else {
1022 up(&intf->cmd_rcvrs_lock); 1023 mutex_unlock(&intf->cmd_rcvrs_mutex);
1023 return -ENOENT; 1024 return -ENOENT;
1024 } 1025 }
1025} 1026}
@@ -1514,7 +1515,7 @@ int ipmi_request_settime(ipmi_user_t user,
1514 unsigned char saddr, lun; 1515 unsigned char saddr, lun;
1515 int rv; 1516 int rv;
1516 1517
1517 if (! user) 1518 if (!user)
1518 return -EINVAL; 1519 return -EINVAL;
1519 rv = check_addr(user->intf, addr, &saddr, &lun); 1520 rv = check_addr(user->intf, addr, &saddr, &lun);
1520 if (rv) 1521 if (rv)
@@ -1545,7 +1546,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1545 unsigned char saddr, lun; 1546 unsigned char saddr, lun;
1546 int rv; 1547 int rv;
1547 1548
1548 if (! user) 1549 if (!user)
1549 return -EINVAL; 1550 return -EINVAL;
1550 rv = check_addr(user->intf, addr, &saddr, &lun); 1551 rv = check_addr(user->intf, addr, &saddr, &lun);
1551 if (rv) 1552 if (rv)
@@ -1570,7 +1571,7 @@ static int ipmb_file_read_proc(char *page, char **start, off_t off,
1570 char *out = (char *) page; 1571 char *out = (char *) page;
1571 ipmi_smi_t intf = data; 1572 ipmi_smi_t intf = data;
1572 int i; 1573 int i;
1573 int rv= 0; 1574 int rv = 0;
1574 1575
1575 for (i = 0; i < IPMI_MAX_CHANNELS; i++) 1576 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1576 rv += sprintf(out+rv, "%x ", intf->channels[i].address); 1577 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
@@ -1989,7 +1990,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
1989 } else { 1990 } else {
1990 bmc->dev = platform_device_alloc("ipmi_bmc", 1991 bmc->dev = platform_device_alloc("ipmi_bmc",
1991 bmc->id.device_id); 1992 bmc->id.device_id);
1992 if (! bmc->dev) { 1993 if (!bmc->dev) {
1993 printk(KERN_ERR 1994 printk(KERN_ERR
1994 "ipmi_msghandler:" 1995 "ipmi_msghandler:"
1995 " Unable to allocate platform device\n"); 1996 " Unable to allocate platform device\n");
@@ -2305,8 +2306,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2305 void *send_info, 2306 void *send_info,
2306 struct ipmi_device_id *device_id, 2307 struct ipmi_device_id *device_id,
2307 struct device *si_dev, 2308 struct device *si_dev,
2308 unsigned char slave_addr, 2309 unsigned char slave_addr)
2309 ipmi_smi_t *new_intf)
2310{ 2310{
2311 int i, j; 2311 int i, j;
2312 int rv; 2312 int rv;
@@ -2366,7 +2366,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2366 spin_lock_init(&intf->events_lock); 2366 spin_lock_init(&intf->events_lock);
2367 INIT_LIST_HEAD(&intf->waiting_events); 2367 INIT_LIST_HEAD(&intf->waiting_events);
2368 intf->waiting_events_count = 0; 2368 intf->waiting_events_count = 0;
2369 init_MUTEX(&intf->cmd_rcvrs_lock); 2369 mutex_init(&intf->cmd_rcvrs_mutex);
2370 INIT_LIST_HEAD(&intf->cmd_rcvrs); 2370 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2371 init_waitqueue_head(&intf->waitq); 2371 init_waitqueue_head(&intf->waitq);
2372 2372
@@ -2388,9 +2388,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2388 if (rv) 2388 if (rv)
2389 goto out; 2389 goto out;
2390 2390
2391 /* FIXME - this is an ugly kludge, this sets the intf for the 2391 rv = handlers->start_processing(send_info, intf);
2392 caller before sending any messages with it. */ 2392 if (rv)
2393 *new_intf = intf; 2393 goto out;
2394 2394
2395 get_guid(intf); 2395 get_guid(intf);
2396 2396
@@ -2622,7 +2622,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2622 spin_unlock_irqrestore(&intf->counter_lock, flags); 2622 spin_unlock_irqrestore(&intf->counter_lock, flags);
2623 2623
2624 recv_msg = ipmi_alloc_recv_msg(); 2624 recv_msg = ipmi_alloc_recv_msg();
2625 if (! recv_msg) { 2625 if (!recv_msg) {
2626 /* We couldn't allocate memory for the 2626 /* We couldn't allocate memory for the
2627 message, so requeue it for handling 2627 message, so requeue it for handling
2628 later. */ 2628 later. */
@@ -2777,7 +2777,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2777 spin_unlock_irqrestore(&intf->counter_lock, flags); 2777 spin_unlock_irqrestore(&intf->counter_lock, flags);
2778 2778
2779 recv_msg = ipmi_alloc_recv_msg(); 2779 recv_msg = ipmi_alloc_recv_msg();
2780 if (! recv_msg) { 2780 if (!recv_msg) {
2781 /* We couldn't allocate memory for the 2781 /* We couldn't allocate memory for the
2782 message, so requeue it for handling 2782 message, so requeue it for handling
2783 later. */ 2783 later. */
@@ -2869,13 +2869,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2869 events. */ 2869 events. */
2870 rcu_read_lock(); 2870 rcu_read_lock();
2871 list_for_each_entry_rcu(user, &intf->users, link) { 2871 list_for_each_entry_rcu(user, &intf->users, link) {
2872 if (! user->gets_events) 2872 if (!user->gets_events)
2873 continue; 2873 continue;
2874 2874
2875 recv_msg = ipmi_alloc_recv_msg(); 2875 recv_msg = ipmi_alloc_recv_msg();
2876 if (! recv_msg) { 2876 if (!recv_msg) {
2877 rcu_read_unlock(); 2877 rcu_read_unlock();
2878 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 2878 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
2879 link) {
2879 list_del(&recv_msg->link); 2880 list_del(&recv_msg->link);
2880 ipmi_free_recv_msg(recv_msg); 2881 ipmi_free_recv_msg(recv_msg);
2881 } 2882 }
@@ -2905,7 +2906,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2905 /* No one to receive the message, put it in queue if there's 2906 /* No one to receive the message, put it in queue if there's
2906 not already too many things in the queue. */ 2907 not already too many things in the queue. */
2907 recv_msg = ipmi_alloc_recv_msg(); 2908 recv_msg = ipmi_alloc_recv_msg();
2908 if (! recv_msg) { 2909 if (!recv_msg) {
2909 /* We couldn't allocate memory for the 2910 /* We couldn't allocate memory for the
2910 message, so requeue it for handling 2911 message, so requeue it for handling
2911 later. */ 2912 later. */
@@ -3190,7 +3191,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3190 3191
3191 rcu_read_lock(); 3192 rcu_read_lock();
3192 list_for_each_entry_rcu(user, &intf->users, link) { 3193 list_for_each_entry_rcu(user, &intf->users, link) {
3193 if (! user->handler->ipmi_watchdog_pretimeout) 3194 if (!user->handler->ipmi_watchdog_pretimeout)
3194 continue; 3195 continue;
3195 3196
3196 user->handler->ipmi_watchdog_pretimeout(user->handler_data); 3197 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
@@ -3278,7 +3279,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3278 3279
3279 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 3280 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3280 ent->seqid); 3281 ent->seqid);
3281 if (! smi_msg) 3282 if (!smi_msg)
3282 return; 3283 return;
3283 3284
3284 spin_unlock_irqrestore(&intf->seq_lock, *flags); 3285 spin_unlock_irqrestore(&intf->seq_lock, *flags);
@@ -3314,8 +3315,9 @@ static void ipmi_timeout_handler(long timeout_period)
3314 3315
3315 /* See if any waiting messages need to be processed. */ 3316 /* See if any waiting messages need to be processed. */
3316 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3317 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3317 list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) { 3318 list_for_each_entry_safe(smi_msg, smi_msg2,
3318 if (! handle_new_recv_msg(intf, smi_msg)) { 3319 &intf->waiting_msgs, link) {
3320 if (!handle_new_recv_msg(intf, smi_msg)) {
3319 list_del(&smi_msg->link); 3321 list_del(&smi_msg->link);
3320 ipmi_free_smi_msg(smi_msg); 3322 ipmi_free_smi_msg(smi_msg);
3321 } else { 3323 } else {
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 786a2802ca34..d0b5c08e7b4e 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -346,7 +346,7 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user)
346{ 346{
347 const char ipmi_version_major = ipmi_version & 0xF; 347 const char ipmi_version_major = ipmi_version & 0xF;
348 const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; 348 const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
349 const char mfr[3]=DELL_IANA_MFR_ID; 349 const char mfr[3] = DELL_IANA_MFR_ID;
350 if (!memcmp(mfr, &mfg_id, sizeof(mfr)) && 350 if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
351 ipmi_version_major <= 1 && 351 ipmi_version_major <= 1 &&
352 ipmi_version_minor < 5) 352 ipmi_version_minor < 5)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 35fbd4d8ed4b..a86c0f29953e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -803,7 +803,7 @@ static int ipmi_thread(void *data)
803 set_user_nice(current, 19); 803 set_user_nice(current, 19);
804 while (!kthread_should_stop()) { 804 while (!kthread_should_stop()) {
805 spin_lock_irqsave(&(smi_info->si_lock), flags); 805 spin_lock_irqsave(&(smi_info->si_lock), flags);
806 smi_result=smi_event_handler(smi_info, 0); 806 smi_result = smi_event_handler(smi_info, 0);
807 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 807 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
808 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 808 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
809 /* do nothing */ 809 /* do nothing */
@@ -972,10 +972,37 @@ static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
972 return si_irq_handler(irq, data, regs); 972 return si_irq_handler(irq, data, regs);
973} 973}
974 974
975static int smi_start_processing(void *send_info,
976 ipmi_smi_t intf)
977{
978 struct smi_info *new_smi = send_info;
979
980 new_smi->intf = intf;
981
982 /* Set up the timer that drives the interface. */
983 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
984 new_smi->last_timeout_jiffies = jiffies;
985 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
986
987 if (new_smi->si_type != SI_BT) {
988 new_smi->thread = kthread_run(ipmi_thread, new_smi,
989 "kipmi%d", new_smi->intf_num);
990 if (IS_ERR(new_smi->thread)) {
991 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
992 " kernel thread due to error %ld, only using"
993 " timers to drive the interface\n",
994 PTR_ERR(new_smi->thread));
995 new_smi->thread = NULL;
996 }
997 }
998
999 return 0;
1000}
975 1001
976static struct ipmi_smi_handlers handlers = 1002static struct ipmi_smi_handlers handlers =
977{ 1003{
978 .owner = THIS_MODULE, 1004 .owner = THIS_MODULE,
1005 .start_processing = smi_start_processing,
979 .sender = sender, 1006 .sender = sender,
980 .request_events = request_events, 1007 .request_events = request_events,
981 .set_run_to_completion = set_run_to_completion, 1008 .set_run_to_completion = set_run_to_completion,
@@ -987,7 +1014,7 @@ static struct ipmi_smi_handlers handlers =
987 1014
988#define SI_MAX_PARMS 4 1015#define SI_MAX_PARMS 4
989static LIST_HEAD(smi_infos); 1016static LIST_HEAD(smi_infos);
990static DECLARE_MUTEX(smi_infos_lock); 1017static DEFINE_MUTEX(smi_infos_lock);
991static int smi_num; /* Used to sequence the SMIs */ 1018static int smi_num; /* Used to sequence the SMIs */
992 1019
993#define DEFAULT_REGSPACING 1 1020#define DEFAULT_REGSPACING 1
@@ -2162,9 +2189,13 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
2162 2189
2163static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 2190static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2164{ 2191{
2165 if (smi_info->thread != NULL && smi_info->thread != ERR_PTR(-ENOMEM)) 2192 if (smi_info->intf) {
2166 kthread_stop(smi_info->thread); 2193 /* The timer and thread are only running if the
2167 del_timer_sync(&smi_info->si_timer); 2194 interface has been started up and registered. */
2195 if (smi_info->thread != NULL)
2196 kthread_stop(smi_info->thread);
2197 del_timer_sync(&smi_info->si_timer);
2198 }
2168} 2199}
2169 2200
2170static struct ipmi_default_vals 2201static struct ipmi_default_vals
@@ -2245,7 +2276,7 @@ static int try_smi_init(struct smi_info *new_smi)
2245 new_smi->slave_addr, new_smi->irq); 2276 new_smi->slave_addr, new_smi->irq);
2246 } 2277 }
2247 2278
2248 down(&smi_infos_lock); 2279 mutex_lock(&smi_infos_lock);
2249 if (!is_new_interface(new_smi)) { 2280 if (!is_new_interface(new_smi)) {
2250 printk(KERN_WARNING "ipmi_si: duplicate interface\n"); 2281 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2251 rv = -EBUSY; 2282 rv = -EBUSY;
@@ -2341,21 +2372,6 @@ static int try_smi_init(struct smi_info *new_smi)
2341 if (new_smi->irq) 2372 if (new_smi->irq)
2342 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; 2373 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2343 2374
2344 /* The ipmi_register_smi() code does some operations to
2345 determine the channel information, so we must be ready to
2346 handle operations before it is called. This means we have
2347 to stop the timer if we get an error after this point. */
2348 init_timer(&(new_smi->si_timer));
2349 new_smi->si_timer.data = (long) new_smi;
2350 new_smi->si_timer.function = smi_timeout;
2351 new_smi->last_timeout_jiffies = jiffies;
2352 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2353
2354 add_timer(&(new_smi->si_timer));
2355 if (new_smi->si_type != SI_BT)
2356 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2357 "kipmi%d", new_smi->intf_num);
2358
2359 if (!new_smi->dev) { 2375 if (!new_smi->dev) {
2360 /* If we don't already have a device from something 2376 /* If we don't already have a device from something
2361 * else (like PCI), then register a new one. */ 2377 * else (like PCI), then register a new one. */
@@ -2365,7 +2381,7 @@ static int try_smi_init(struct smi_info *new_smi)
2365 printk(KERN_ERR 2381 printk(KERN_ERR
2366 "ipmi_si_intf:" 2382 "ipmi_si_intf:"
2367 " Unable to allocate platform device\n"); 2383 " Unable to allocate platform device\n");
2368 goto out_err_stop_timer; 2384 goto out_err;
2369 } 2385 }
2370 new_smi->dev = &new_smi->pdev->dev; 2386 new_smi->dev = &new_smi->pdev->dev;
2371 new_smi->dev->driver = &ipmi_driver; 2387 new_smi->dev->driver = &ipmi_driver;
@@ -2377,7 +2393,7 @@ static int try_smi_init(struct smi_info *new_smi)
2377 " Unable to register system interface device:" 2393 " Unable to register system interface device:"
2378 " %d\n", 2394 " %d\n",
2379 rv); 2395 rv);
2380 goto out_err_stop_timer; 2396 goto out_err;
2381 } 2397 }
2382 new_smi->dev_registered = 1; 2398 new_smi->dev_registered = 1;
2383 } 2399 }
@@ -2386,8 +2402,7 @@ static int try_smi_init(struct smi_info *new_smi)
2386 new_smi, 2402 new_smi,
2387 &new_smi->device_id, 2403 &new_smi->device_id,
2388 new_smi->dev, 2404 new_smi->dev,
2389 new_smi->slave_addr, 2405 new_smi->slave_addr);
2390 &(new_smi->intf));
2391 if (rv) { 2406 if (rv) {
2392 printk(KERN_ERR 2407 printk(KERN_ERR
2393 "ipmi_si: Unable to register device: error %d\n", 2408 "ipmi_si: Unable to register device: error %d\n",
@@ -2417,7 +2432,7 @@ static int try_smi_init(struct smi_info *new_smi)
2417 2432
2418 list_add_tail(&new_smi->link, &smi_infos); 2433 list_add_tail(&new_smi->link, &smi_infos);
2419 2434
2420 up(&smi_infos_lock); 2435 mutex_unlock(&smi_infos_lock);
2421 2436
2422 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); 2437 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2423 2438
@@ -2454,7 +2469,7 @@ static int try_smi_init(struct smi_info *new_smi)
2454 2469
2455 kfree(new_smi); 2470 kfree(new_smi);
2456 2471
2457 up(&smi_infos_lock); 2472 mutex_unlock(&smi_infos_lock);
2458 2473
2459 return rv; 2474 return rv;
2460} 2475}
@@ -2512,26 +2527,26 @@ static __devinit int init_ipmi_si(void)
2512#endif 2527#endif
2513 2528
2514 if (si_trydefaults) { 2529 if (si_trydefaults) {
2515 down(&smi_infos_lock); 2530 mutex_lock(&smi_infos_lock);
2516 if (list_empty(&smi_infos)) { 2531 if (list_empty(&smi_infos)) {
2517 /* No BMC was found, try defaults. */ 2532 /* No BMC was found, try defaults. */
2518 up(&smi_infos_lock); 2533 mutex_unlock(&smi_infos_lock);
2519 default_find_bmc(); 2534 default_find_bmc();
2520 } else { 2535 } else {
2521 up(&smi_infos_lock); 2536 mutex_unlock(&smi_infos_lock);
2522 } 2537 }
2523 } 2538 }
2524 2539
2525 down(&smi_infos_lock); 2540 mutex_lock(&smi_infos_lock);
2526 if (list_empty(&smi_infos)) { 2541 if (list_empty(&smi_infos)) {
2527 up(&smi_infos_lock); 2542 mutex_unlock(&smi_infos_lock);
2528#ifdef CONFIG_PCI 2543#ifdef CONFIG_PCI
2529 pci_unregister_driver(&ipmi_pci_driver); 2544 pci_unregister_driver(&ipmi_pci_driver);
2530#endif 2545#endif
2531 printk("ipmi_si: Unable to find any System Interface(s)\n"); 2546 printk("ipmi_si: Unable to find any System Interface(s)\n");
2532 return -ENODEV; 2547 return -ENODEV;
2533 } else { 2548 } else {
2534 up(&smi_infos_lock); 2549 mutex_unlock(&smi_infos_lock);
2535 return 0; 2550 return 0;
2536 } 2551 }
2537} 2552}
@@ -2607,10 +2622,10 @@ static __exit void cleanup_ipmi_si(void)
2607 pci_unregister_driver(&ipmi_pci_driver); 2622 pci_unregister_driver(&ipmi_pci_driver);
2608#endif 2623#endif
2609 2624
2610 down(&smi_infos_lock); 2625 mutex_lock(&smi_infos_lock);
2611 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) 2626 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2612 cleanup_one_si(e); 2627 cleanup_one_si(e);
2613 up(&smi_infos_lock); 2628 mutex_unlock(&smi_infos_lock);
2614 2629
2615 driver_unregister(&ipmi_driver); 2630 driver_unregister(&ipmi_driver);
2616} 2631}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 7ece9f3c8f70..2d11ddd99e55 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -39,6 +39,7 @@
39#include <linux/watchdog.h> 39#include <linux/watchdog.h>
40#include <linux/miscdevice.h> 40#include <linux/miscdevice.h>
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/completion.h>
42#include <linux/rwsem.h> 43#include <linux/rwsem.h>
43#include <linux/errno.h> 44#include <linux/errno.h>
44#include <asm/uaccess.h> 45#include <asm/uaccess.h>
@@ -303,21 +304,22 @@ static int ipmi_heartbeat(void);
303static void panic_halt_ipmi_heartbeat(void); 304static void panic_halt_ipmi_heartbeat(void);
304 305
305 306
306/* We use a semaphore to make sure that only one thing can send a set 307/* We use a mutex to make sure that only one thing can send a set
307 timeout at one time, because we only have one copy of the data. 308 timeout at one time, because we only have one copy of the data.
308 The semaphore is claimed when the set_timeout is sent and freed 309 The mutex is claimed when the set_timeout is sent and freed
309 when both messages are free. */ 310 when both messages are free. */
310static atomic_t set_timeout_tofree = ATOMIC_INIT(0); 311static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
311static DECLARE_MUTEX(set_timeout_lock); 312static DEFINE_MUTEX(set_timeout_lock);
313static DECLARE_COMPLETION(set_timeout_wait);
312static void set_timeout_free_smi(struct ipmi_smi_msg *msg) 314static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
313{ 315{
314 if (atomic_dec_and_test(&set_timeout_tofree)) 316 if (atomic_dec_and_test(&set_timeout_tofree))
315 up(&set_timeout_lock); 317 complete(&set_timeout_wait);
316} 318}
317static void set_timeout_free_recv(struct ipmi_recv_msg *msg) 319static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
318{ 320{
319 if (atomic_dec_and_test(&set_timeout_tofree)) 321 if (atomic_dec_and_test(&set_timeout_tofree))
320 up(&set_timeout_lock); 322 complete(&set_timeout_wait);
321} 323}
322static struct ipmi_smi_msg set_timeout_smi_msg = 324static struct ipmi_smi_msg set_timeout_smi_msg =
323{ 325{
@@ -399,7 +401,7 @@ static int ipmi_set_timeout(int do_heartbeat)
399 401
400 402
401 /* We can only send one of these at a time. */ 403 /* We can only send one of these at a time. */
402 down(&set_timeout_lock); 404 mutex_lock(&set_timeout_lock);
403 405
404 atomic_set(&set_timeout_tofree, 2); 406 atomic_set(&set_timeout_tofree, 2);
405 407
@@ -407,16 +409,21 @@ static int ipmi_set_timeout(int do_heartbeat)
407 &set_timeout_recv_msg, 409 &set_timeout_recv_msg,
408 &send_heartbeat_now); 410 &send_heartbeat_now);
409 if (rv) { 411 if (rv) {
410 up(&set_timeout_lock); 412 mutex_unlock(&set_timeout_lock);
411 } else { 413 goto out;
412 if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
413 || ((send_heartbeat_now)
414 && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
415 {
416 rv = ipmi_heartbeat();
417 }
418 } 414 }
419 415
416 wait_for_completion(&set_timeout_wait);
417
418 if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
419 || ((send_heartbeat_now)
420 && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
421 {
422 rv = ipmi_heartbeat();
423 }
424 mutex_unlock(&set_timeout_lock);
425
426out:
420 return rv; 427 return rv;
421} 428}
422 429
@@ -458,17 +465,17 @@ static void panic_halt_ipmi_set_timeout(void)
458 The semaphore is claimed when the set_timeout is sent and freed 465 The semaphore is claimed when the set_timeout is sent and freed
459 when both messages are free. */ 466 when both messages are free. */
460static atomic_t heartbeat_tofree = ATOMIC_INIT(0); 467static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
461static DECLARE_MUTEX(heartbeat_lock); 468static DEFINE_MUTEX(heartbeat_lock);
462static DECLARE_MUTEX_LOCKED(heartbeat_wait_lock); 469static DECLARE_COMPLETION(heartbeat_wait);
463static void heartbeat_free_smi(struct ipmi_smi_msg *msg) 470static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
464{ 471{
465 if (atomic_dec_and_test(&heartbeat_tofree)) 472 if (atomic_dec_and_test(&heartbeat_tofree))
466 up(&heartbeat_wait_lock); 473 complete(&heartbeat_wait);
467} 474}
468static void heartbeat_free_recv(struct ipmi_recv_msg *msg) 475static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
469{ 476{
470 if (atomic_dec_and_test(&heartbeat_tofree)) 477 if (atomic_dec_and_test(&heartbeat_tofree))
471 up(&heartbeat_wait_lock); 478 complete(&heartbeat_wait);
472} 479}
473static struct ipmi_smi_msg heartbeat_smi_msg = 480static struct ipmi_smi_msg heartbeat_smi_msg =
474{ 481{
@@ -511,14 +518,14 @@ static int ipmi_heartbeat(void)
511 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); 518 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
512 } 519 }
513 520
514 down(&heartbeat_lock); 521 mutex_lock(&heartbeat_lock);
515 522
516 atomic_set(&heartbeat_tofree, 2); 523 atomic_set(&heartbeat_tofree, 2);
517 524
518 /* Don't reset the timer if we have the timer turned off, that 525 /* Don't reset the timer if we have the timer turned off, that
519 re-enables the watchdog. */ 526 re-enables the watchdog. */
520 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { 527 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
521 up(&heartbeat_lock); 528 mutex_unlock(&heartbeat_lock);
522 return 0; 529 return 0;
523 } 530 }
524 531
@@ -539,14 +546,14 @@ static int ipmi_heartbeat(void)
539 &heartbeat_recv_msg, 546 &heartbeat_recv_msg,
540 1); 547 1);
541 if (rv) { 548 if (rv) {
542 up(&heartbeat_lock); 549 mutex_unlock(&heartbeat_lock);
543 printk(KERN_WARNING PFX "heartbeat failure: %d\n", 550 printk(KERN_WARNING PFX "heartbeat failure: %d\n",
544 rv); 551 rv);
545 return rv; 552 return rv;
546 } 553 }
547 554
548 /* Wait for the heartbeat to be sent. */ 555 /* Wait for the heartbeat to be sent. */
549 down(&heartbeat_wait_lock); 556 wait_for_completion(&heartbeat_wait);
550 557
551 if (heartbeat_recv_msg.msg.data[0] != 0) { 558 if (heartbeat_recv_msg.msg.data[0] != 0) {
552 /* Got an error in the heartbeat response. It was already 559 /* Got an error in the heartbeat response. It was already
@@ -555,7 +562,7 @@ static int ipmi_heartbeat(void)
555 rv = -EINVAL; 562 rv = -EINVAL;
556 } 563 }
557 564
558 up(&heartbeat_lock); 565 mutex_unlock(&heartbeat_lock);
559 566
560 return rv; 567 return rv;
561} 568}
@@ -589,7 +596,7 @@ static void panic_halt_ipmi_heartbeat(void)
589 1); 596 1);
590} 597}
591 598
592static struct watchdog_info ident= 599static struct watchdog_info ident =
593{ 600{
594 .options = 0, /* WDIOF_SETTIMEOUT, */ 601 .options = 0, /* WDIOF_SETTIMEOUT, */
595 .firmware_version = 1, 602 .firmware_version = 1,
@@ -790,13 +797,13 @@ static int ipmi_fasync(int fd, struct file *file, int on)
790 797
791static int ipmi_close(struct inode *ino, struct file *filep) 798static int ipmi_close(struct inode *ino, struct file *filep)
792{ 799{
793 if (iminor(ino)==WATCHDOG_MINOR) 800 if (iminor(ino) == WATCHDOG_MINOR) {
794 {
795 if (expect_close == 42) { 801 if (expect_close == 42) {
796 ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 802 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
797 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); 803 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
798 } else { 804 } else {
799 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 805 printk(KERN_CRIT PFX
806 "Unexpected close, not stopping watchdog!\n");
800 ipmi_heartbeat(); 807 ipmi_heartbeat();
801 } 808 }
802 clear_bit(0, &ipmi_wdog_open); 809 clear_bit(0, &ipmi_wdog_open);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index e5247f85a446..ef20c1fc9c4c 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -706,7 +706,6 @@ static int stli_portcmdstats(stliport_t *portp);
706static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp); 706static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp);
707static int stli_getportstruct(stliport_t __user *arg); 707static int stli_getportstruct(stliport_t __user *arg);
708static int stli_getbrdstruct(stlibrd_t __user *arg); 708static int stli_getbrdstruct(stlibrd_t __user *arg);
709static void *stli_memalloc(int len);
710static stlibrd_t *stli_allocbrd(void); 709static stlibrd_t *stli_allocbrd(void);
711 710
712static void stli_ecpinit(stlibrd_t *brdp); 711static void stli_ecpinit(stlibrd_t *brdp);
@@ -997,17 +996,6 @@ static int stli_parsebrd(stlconf_t *confp, char **argp)
997 996
998/*****************************************************************************/ 997/*****************************************************************************/
999 998
1000/*
1001 * Local driver kernel malloc routine.
1002 */
1003
1004static void *stli_memalloc(int len)
1005{
1006 return((void *) kmalloc(len, GFP_KERNEL));
1007}
1008
1009/*****************************************************************************/
1010
1011static int stli_open(struct tty_struct *tty, struct file *filp) 999static int stli_open(struct tty_struct *tty, struct file *filp)
1012{ 1000{
1013 stlibrd_t *brdp; 1001 stlibrd_t *brdp;
@@ -3227,13 +3215,12 @@ static int stli_initports(stlibrd_t *brdp)
3227#endif 3215#endif
3228 3216
3229 for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) { 3217 for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) {
3230 portp = (stliport_t *) stli_memalloc(sizeof(stliport_t)); 3218 portp = kzalloc(sizeof(stliport_t), GFP_KERNEL);
3231 if (portp == (stliport_t *) NULL) { 3219 if (!portp) {
3232 printk("STALLION: failed to allocate port structure\n"); 3220 printk("STALLION: failed to allocate port structure\n");
3233 continue; 3221 continue;
3234 } 3222 }
3235 3223
3236 memset(portp, 0, sizeof(stliport_t));
3237 portp->magic = STLI_PORTMAGIC; 3224 portp->magic = STLI_PORTMAGIC;
3238 portp->portnr = i; 3225 portp->portnr = i;
3239 portp->brdnr = brdp->brdnr; 3226 portp->brdnr = brdp->brdnr;
@@ -4610,14 +4597,13 @@ static stlibrd_t *stli_allocbrd(void)
4610{ 4597{
4611 stlibrd_t *brdp; 4598 stlibrd_t *brdp;
4612 4599
4613 brdp = (stlibrd_t *) stli_memalloc(sizeof(stlibrd_t)); 4600 brdp = kzalloc(sizeof(stlibrd_t), GFP_KERNEL);
4614 if (brdp == (stlibrd_t *) NULL) { 4601 if (!brdp) {
4615 printk(KERN_ERR "STALLION: failed to allocate memory " 4602 printk(KERN_ERR "STALLION: failed to allocate memory "
4616 "(size=%d)\n", sizeof(stlibrd_t)); 4603 "(size=%d)\n", sizeof(stlibrd_t));
4617 return((stlibrd_t *) NULL); 4604 return NULL;
4618 } 4605 }
4619 4606
4620 memset(brdp, 0, sizeof(stlibrd_t));
4621 brdp->magic = STLI_BOARDMAGIC; 4607 brdp->magic = STLI_BOARDMAGIC;
4622 return(brdp); 4608 return(brdp);
4623} 4609}
@@ -5210,12 +5196,12 @@ int __init stli_init(void)
5210/* 5196/*
5211 * Allocate a temporary write buffer. 5197 * Allocate a temporary write buffer.
5212 */ 5198 */
5213 stli_tmpwritebuf = (char *) stli_memalloc(STLI_TXBUFSIZE); 5199 stli_tmpwritebuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
5214 if (stli_tmpwritebuf == (char *) NULL) 5200 if (!stli_tmpwritebuf)
5215 printk(KERN_ERR "STALLION: failed to allocate memory " 5201 printk(KERN_ERR "STALLION: failed to allocate memory "
5216 "(size=%d)\n", STLI_TXBUFSIZE); 5202 "(size=%d)\n", STLI_TXBUFSIZE);
5217 stli_txcookbuf = stli_memalloc(STLI_TXBUFSIZE); 5203 stli_txcookbuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
5218 if (stli_txcookbuf == (char *) NULL) 5204 if (!stli_txcookbuf)
5219 printk(KERN_ERR "STALLION: failed to allocate memory " 5205 printk(KERN_ERR "STALLION: failed to allocate memory "
5220 "(size=%d)\n", STLI_TXBUFSIZE); 5206 "(size=%d)\n", STLI_TXBUFSIZE);
5221 5207
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 8b603b2d1c42..935670a3cd98 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -74,7 +74,7 @@ void compute_shiftstate(void);
74 k_self, k_fn, k_spec, k_pad,\ 74 k_self, k_fn, k_spec, k_pad,\
75 k_dead, k_cons, k_cur, k_shift,\ 75 k_dead, k_cons, k_cur, k_shift,\
76 k_meta, k_ascii, k_lock, k_lowercase,\ 76 k_meta, k_ascii, k_lock, k_lowercase,\
77 k_slock, k_dead2, k_ignore, k_ignore 77 k_slock, k_dead2, k_brl, k_ignore
78 78
79typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value, 79typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
80 char up_flag, struct pt_regs *regs); 80 char up_flag, struct pt_regs *regs);
@@ -100,7 +100,7 @@ static fn_handler_fn *fn_handler[] = { FN_HANDLERS };
100const int max_vals[] = { 100const int max_vals[] = {
101 255, ARRAY_SIZE(func_table) - 1, ARRAY_SIZE(fn_handler) - 1, NR_PAD - 1, 101 255, ARRAY_SIZE(func_table) - 1, ARRAY_SIZE(fn_handler) - 1, NR_PAD - 1,
102 NR_DEAD - 1, 255, 3, NR_SHIFT - 1, 255, NR_ASCII - 1, NR_LOCK - 1, 102 NR_DEAD - 1, 255, 3, NR_SHIFT - 1, 255, NR_ASCII - 1, NR_LOCK - 1,
103 255, NR_LOCK - 1, 255 103 255, NR_LOCK - 1, 255, NR_BRL - 1
104}; 104};
105 105
106const int NR_TYPES = ARRAY_SIZE(max_vals); 106const int NR_TYPES = ARRAY_SIZE(max_vals);
@@ -126,7 +126,7 @@ static unsigned long key_down[NBITS(KEY_MAX)]; /* keyboard key bitmap */
126static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ 126static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
127static int dead_key_next; 127static int dead_key_next;
128static int npadch = -1; /* -1 or number assembled on pad */ 128static int npadch = -1; /* -1 or number assembled on pad */
129static unsigned char diacr; 129static unsigned int diacr;
130static char rep; /* flag telling character repeat */ 130static char rep; /* flag telling character repeat */
131 131
132static unsigned char ledstate = 0xff; /* undefined */ 132static unsigned char ledstate = 0xff; /* undefined */
@@ -394,22 +394,30 @@ void compute_shiftstate(void)
394 * Otherwise, conclude that DIACR was not combining after all, 394 * Otherwise, conclude that DIACR was not combining after all,
395 * queue it and return CH. 395 * queue it and return CH.
396 */ 396 */
397static unsigned char handle_diacr(struct vc_data *vc, unsigned char ch) 397static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
398{ 398{
399 int d = diacr; 399 unsigned int d = diacr;
400 unsigned int i; 400 unsigned int i;
401 401
402 diacr = 0; 402 diacr = 0;
403 403
404 for (i = 0; i < accent_table_size; i++) { 404 if ((d & ~0xff) == BRL_UC_ROW) {
405 if (accent_table[i].diacr == d && accent_table[i].base == ch) 405 if ((ch & ~0xff) == BRL_UC_ROW)
406 return accent_table[i].result; 406 return d | ch;
407 } else {
408 for (i = 0; i < accent_table_size; i++)
409 if (accent_table[i].diacr == d && accent_table[i].base == ch)
410 return accent_table[i].result;
407 } 411 }
408 412
409 if (ch == ' ' || ch == d) 413 if (ch == ' ' || ch == (BRL_UC_ROW|0) || ch == d)
410 return d; 414 return d;
411 415
412 put_queue(vc, d); 416 if (kbd->kbdmode == VC_UNICODE)
417 to_utf8(vc, d);
418 else if (d < 0x100)
419 put_queue(vc, d);
420
413 return ch; 421 return ch;
414} 422}
415 423
@@ -419,7 +427,10 @@ static unsigned char handle_diacr(struct vc_data *vc, unsigned char ch)
419static void fn_enter(struct vc_data *vc, struct pt_regs *regs) 427static void fn_enter(struct vc_data *vc, struct pt_regs *regs)
420{ 428{
421 if (diacr) { 429 if (diacr) {
422 put_queue(vc, diacr); 430 if (kbd->kbdmode == VC_UNICODE)
431 to_utf8(vc, diacr);
432 else if (diacr < 0x100)
433 put_queue(vc, diacr);
423 diacr = 0; 434 diacr = 0;
424 } 435 }
425 put_queue(vc, 13); 436 put_queue(vc, 13);
@@ -615,7 +626,7 @@ static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag, s
615 printk(KERN_ERR "keyboard.c: k_lowercase was called - impossible\n"); 626 printk(KERN_ERR "keyboard.c: k_lowercase was called - impossible\n");
616} 627}
617 628
618static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs) 629static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag, struct pt_regs *regs)
619{ 630{
620 if (up_flag) 631 if (up_flag)
621 return; /* no action, if this is a key release */ 632 return; /* no action, if this is a key release */
@@ -628,7 +639,10 @@ static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct
628 diacr = value; 639 diacr = value;
629 return; 640 return;
630 } 641 }
631 put_queue(vc, value); 642 if (kbd->kbdmode == VC_UNICODE)
643 to_utf8(vc, value);
644 else if (value < 0x100)
645 put_queue(vc, value);
632} 646}
633 647
634/* 648/*
@@ -636,13 +650,23 @@ static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct
636 * dead keys modifying the same character. Very useful 650 * dead keys modifying the same character. Very useful
637 * for Vietnamese. 651 * for Vietnamese.
638 */ 652 */
639static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs) 653static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag, struct pt_regs *regs)
640{ 654{
641 if (up_flag) 655 if (up_flag)
642 return; 656 return;
643 diacr = (diacr ? handle_diacr(vc, value) : value); 657 diacr = (diacr ? handle_diacr(vc, value) : value);
644} 658}
645 659
660static void k_self(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
661{
662 k_unicode(vc, value, up_flag, regs);
663}
664
665static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
666{
667 k_deadunicode(vc, value, up_flag, regs);
668}
669
646/* 670/*
647 * Obsolete - for backwards compatibility only 671 * Obsolete - for backwards compatibility only
648 */ 672 */
@@ -650,7 +674,7 @@ static void k_dead(struct vc_data *vc, unsigned char value, char up_flag, struct
650{ 674{
651 static unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' }; 675 static unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
652 value = ret_diacr[value]; 676 value = ret_diacr[value];
653 k_dead2(vc, value, up_flag, regs); 677 k_deadunicode(vc, value, up_flag, regs);
654} 678}
655 679
656static void k_cons(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs) 680static void k_cons(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
@@ -835,6 +859,62 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag, struc
835 } 859 }
836} 860}
837 861
862/* by default, 300ms interval for combination release */
863static long brl_timeout = 300;
864MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for combination on first release, < 0 for dead characters)");
865module_param(brl_timeout, long, 0644);
866static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
867{
868 static unsigned pressed,committing;
869 static unsigned long releasestart;
870
871 if (kbd->kbdmode != VC_UNICODE) {
872 if (!up_flag)
873 printk("keyboard mode must be unicode for braille patterns\n");
874 return;
875 }
876
877 if (!value) {
878 k_unicode(vc, BRL_UC_ROW, up_flag, regs);
879 return;
880 }
881
882 if (value > 8)
883 return;
884
885 if (brl_timeout < 0) {
886 k_deadunicode(vc, BRL_UC_ROW | (1 << (value - 1)), up_flag, regs);
887 return;
888 }
889
890 if (up_flag) {
891 if (brl_timeout) {
892 if (!committing ||
893 jiffies - releasestart > (brl_timeout * HZ) / 1000) {
894 committing = pressed;
895 releasestart = jiffies;
896 }
897 pressed &= ~(1 << (value - 1));
898 if (!pressed) {
899 if (committing) {
900 k_unicode(vc, BRL_UC_ROW | committing, 0, regs);
901 committing = 0;
902 }
903 }
904 } else {
905 if (committing) {
906 k_unicode(vc, BRL_UC_ROW | committing, 0, regs);
907 committing = 0;
908 }
909 pressed &= ~(1 << (value - 1));
910 }
911 } else {
912 pressed |= 1 << (value - 1);
913 if (!brl_timeout)
914 committing = pressed;
915 }
916}
917
838/* 918/*
839 * The leds display either (i) the status of NumLock, CapsLock, ScrollLock, 919 * The leds display either (i) the status of NumLock, CapsLock, ScrollLock,
840 * or (ii) whatever pattern of lights people want to show using KDSETLED, 920 * or (ii) whatever pattern of lights people want to show using KDSETLED,
@@ -1125,9 +1205,13 @@ static void kbd_keycode(unsigned int keycode, int down,
1125 } 1205 }
1126 1206
1127 if (keycode > NR_KEYS) 1207 if (keycode > NR_KEYS)
1128 return; 1208 if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
1209 keysym = K(KT_BRL, keycode - KEY_BRL_DOT1 + 1);
1210 else
1211 return;
1212 else
1213 keysym = key_map[keycode];
1129 1214
1130 keysym = key_map[keycode];
1131 type = KTYP(keysym); 1215 type = KTYP(keysym);
1132 1216
1133 if (type < 0xf0) { 1217 if (type < 0xf0) {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 5fdf18515433..02114a0bd0d9 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -46,7 +46,7 @@
46/* #define ATR_CSUM */ 46/* #define ATR_CSUM */
47 47
48#ifdef PCMCIA_DEBUG 48#ifdef PCMCIA_DEBUG
49#define reader_to_dev(x) (&handle_to_dev(x->link.handle)) 49#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
50static int pc_debug = PCMCIA_DEBUG; 50static int pc_debug = PCMCIA_DEBUG;
51module_param(pc_debug, int, 0600); 51module_param(pc_debug, int, 0600);
52#define DEBUGP(n, rdr, x, args...) do { \ 52#define DEBUGP(n, rdr, x, args...) do { \
@@ -67,7 +67,7 @@ static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte";
67#define T_100MSEC msecs_to_jiffies(100) 67#define T_100MSEC msecs_to_jiffies(100)
68#define T_500MSEC msecs_to_jiffies(500) 68#define T_500MSEC msecs_to_jiffies(500)
69 69
70static void cm4000_release(dev_link_t *link); 70static void cm4000_release(struct pcmcia_device *link);
71 71
72static int major; /* major number we get from the kernel */ 72static int major; /* major number we get from the kernel */
73 73
@@ -106,7 +106,7 @@ static int major; /* major number we get from the kernel */
106#define REG_STOPBITS(x) (x + 7) 106#define REG_STOPBITS(x) (x + 7)
107 107
108struct cm4000_dev { 108struct cm4000_dev {
109 dev_link_t link; /* pcmcia link */ 109 struct pcmcia_device *p_dev;
110 dev_node_t node; /* OS node (major,minor) */ 110 dev_node_t node; /* OS node (major,minor) */
111 111
112 unsigned char atr[MAX_ATR]; 112 unsigned char atr[MAX_ATR];
@@ -149,14 +149,14 @@ struct cm4000_dev {
149#define ZERO_DEV(dev) \ 149#define ZERO_DEV(dev) \
150 memset(&dev->atr_csum,0, \ 150 memset(&dev->atr_csum,0, \
151 sizeof(struct cm4000_dev) - \ 151 sizeof(struct cm4000_dev) - \
152 /*link*/ sizeof(dev_link_t) - \ 152 /*link*/ sizeof(struct pcmcia_device) - \
153 /*node*/ sizeof(dev_node_t) - \ 153 /*node*/ sizeof(dev_node_t) - \
154 /*atr*/ MAX_ATR*sizeof(char) - \ 154 /*atr*/ MAX_ATR*sizeof(char) - \
155 /*rbuf*/ 512*sizeof(char) - \ 155 /*rbuf*/ 512*sizeof(char) - \
156 /*sbuf*/ 512*sizeof(char) - \ 156 /*sbuf*/ 512*sizeof(char) - \
157 /*queue*/ 4*sizeof(wait_queue_head_t)) 157 /*queue*/ 4*sizeof(wait_queue_head_t))
158 158
159static dev_link_t *dev_table[CM4000_MAX_DEV]; 159static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
160static struct class *cmm_class; 160static struct class *cmm_class;
161 161
162/* This table doesn't use spaces after the comma between fields and thus 162/* This table doesn't use spaces after the comma between fields and thus
@@ -454,7 +454,7 @@ static struct card_fixup card_fixups[] = {
454static void set_cardparameter(struct cm4000_dev *dev) 454static void set_cardparameter(struct cm4000_dev *dev)
455{ 455{
456 int i; 456 int i;
457 ioaddr_t iobase = dev->link.io.BasePort1; 457 ioaddr_t iobase = dev->p_dev->io.BasePort1;
458 u_int8_t stopbits = 0x02; /* ISO default */ 458 u_int8_t stopbits = 0x02; /* ISO default */
459 459
460 DEBUGP(3, dev, "-> set_cardparameter\n"); 460 DEBUGP(3, dev, "-> set_cardparameter\n");
@@ -487,7 +487,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
487 unsigned short num_bytes_read; 487 unsigned short num_bytes_read;
488 unsigned char pts_reply[4]; 488 unsigned char pts_reply[4];
489 ssize_t rc; 489 ssize_t rc;
490 ioaddr_t iobase = dev->link.io.BasePort1; 490 ioaddr_t iobase = dev->p_dev->io.BasePort1;
491 491
492 rc = 0; 492 rc = 0;
493 493
@@ -699,7 +699,7 @@ static void terminate_monitor(struct cm4000_dev *dev)
699static void monitor_card(unsigned long p) 699static void monitor_card(unsigned long p)
700{ 700{
701 struct cm4000_dev *dev = (struct cm4000_dev *) p; 701 struct cm4000_dev *dev = (struct cm4000_dev *) p;
702 ioaddr_t iobase = dev->link.io.BasePort1; 702 ioaddr_t iobase = dev->p_dev->io.BasePort1;
703 unsigned short s; 703 unsigned short s;
704 struct ptsreq ptsreq; 704 struct ptsreq ptsreq;
705 int i, atrc; 705 int i, atrc;
@@ -962,7 +962,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
962 loff_t *ppos) 962 loff_t *ppos)
963{ 963{
964 struct cm4000_dev *dev = filp->private_data; 964 struct cm4000_dev *dev = filp->private_data;
965 ioaddr_t iobase = dev->link.io.BasePort1; 965 ioaddr_t iobase = dev->p_dev->io.BasePort1;
966 ssize_t rc; 966 ssize_t rc;
967 int i, j, k; 967 int i, j, k;
968 968
@@ -971,7 +971,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
971 if (count == 0) /* according to manpage */ 971 if (count == 0) /* according to manpage */
972 return 0; 972 return 0;
973 973
974 if ((dev->link.state & DEV_PRESENT) == 0 || /* socket removed */ 974 if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
975 test_bit(IS_CMM_ABSENT, &dev->flags)) 975 test_bit(IS_CMM_ABSENT, &dev->flags))
976 return -ENODEV; 976 return -ENODEV;
977 977
@@ -1083,7 +1083,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
1083 size_t count, loff_t *ppos) 1083 size_t count, loff_t *ppos)
1084{ 1084{
1085 struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data; 1085 struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data;
1086 ioaddr_t iobase = dev->link.io.BasePort1; 1086 ioaddr_t iobase = dev->p_dev->io.BasePort1;
1087 unsigned short s; 1087 unsigned short s;
1088 unsigned char tmp; 1088 unsigned char tmp;
1089 unsigned char infolen; 1089 unsigned char infolen;
@@ -1108,7 +1108,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
1108 1108
1109 sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0; 1109 sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0;
1110 1110
1111 if ((dev->link.state & DEV_PRESENT) == 0 || /* socket removed */ 1111 if (!pcmcia_dev_present(dev->p_dev) || /* device removed */
1112 test_bit(IS_CMM_ABSENT, &dev->flags)) 1112 test_bit(IS_CMM_ABSENT, &dev->flags))
1113 return -ENODEV; 1113 return -ENODEV;
1114 1114
@@ -1440,8 +1440,8 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1440 unsigned long arg) 1440 unsigned long arg)
1441{ 1441{
1442 struct cm4000_dev *dev = filp->private_data; 1442 struct cm4000_dev *dev = filp->private_data;
1443 ioaddr_t iobase = dev->link.io.BasePort1; 1443 ioaddr_t iobase = dev->p_dev->io.BasePort1;
1444 dev_link_t *link; 1444 struct pcmcia_device *link;
1445 int size; 1445 int size;
1446 int rc; 1446 int rc;
1447 void __user *argp = (void __user *)arg; 1447 void __user *argp = (void __user *)arg;
@@ -1458,7 +1458,7 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1458 iminor(inode), ioctl_names[_IOC_NR(cmd)]); 1458 iminor(inode), ioctl_names[_IOC_NR(cmd)]);
1459 1459
1460 link = dev_table[iminor(inode)]; 1460 link = dev_table[iminor(inode)];
1461 if (!(DEV_OK(link))) { 1461 if (!pcmcia_dev_present(link)) {
1462 DEBUGP(4, dev, "DEV_OK false\n"); 1462 DEBUGP(4, dev, "DEV_OK false\n");
1463 return -ENODEV; 1463 return -ENODEV;
1464 } 1464 }
@@ -1660,14 +1660,14 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1660static int cmm_open(struct inode *inode, struct file *filp) 1660static int cmm_open(struct inode *inode, struct file *filp)
1661{ 1661{
1662 struct cm4000_dev *dev; 1662 struct cm4000_dev *dev;
1663 dev_link_t *link; 1663 struct pcmcia_device *link;
1664 int rc, minor = iminor(inode); 1664 int rc, minor = iminor(inode);
1665 1665
1666 if (minor >= CM4000_MAX_DEV) 1666 if (minor >= CM4000_MAX_DEV)
1667 return -ENODEV; 1667 return -ENODEV;
1668 1668
1669 link = dev_table[minor]; 1669 link = dev_table[minor];
1670 if (link == NULL || !(DEV_OK(link))) 1670 if (link == NULL || !pcmcia_dev_present(link))
1671 return -ENODEV; 1671 return -ENODEV;
1672 1672
1673 if (link->open) 1673 if (link->open)
@@ -1709,7 +1709,7 @@ static int cmm_open(struct inode *inode, struct file *filp)
1709static int cmm_close(struct inode *inode, struct file *filp) 1709static int cmm_close(struct inode *inode, struct file *filp)
1710{ 1710{
1711 struct cm4000_dev *dev; 1711 struct cm4000_dev *dev;
1712 dev_link_t *link; 1712 struct pcmcia_device *link;
1713 int minor = iminor(inode); 1713 int minor = iminor(inode);
1714 1714
1715 if (minor >= CM4000_MAX_DEV) 1715 if (minor >= CM4000_MAX_DEV)
@@ -1735,7 +1735,7 @@ static int cmm_close(struct inode *inode, struct file *filp)
1735 return 0; 1735 return 0;
1736} 1736}
1737 1737
1738static void cmm_cm4000_release(dev_link_t * link) 1738static void cmm_cm4000_release(struct pcmcia_device * link)
1739{ 1739{
1740 struct cm4000_dev *dev = link->priv; 1740 struct cm4000_dev *dev = link->priv;
1741 1741
@@ -1759,13 +1759,11 @@ static void cmm_cm4000_release(dev_link_t * link)
1759 1759
1760/*==== Interface to PCMCIA Layer =======================================*/ 1760/*==== Interface to PCMCIA Layer =======================================*/
1761 1761
1762static void cm4000_config(dev_link_t * link, int devno) 1762static int cm4000_config(struct pcmcia_device * link, int devno)
1763{ 1763{
1764 client_handle_t handle = link->handle;
1765 struct cm4000_dev *dev; 1764 struct cm4000_dev *dev;
1766 tuple_t tuple; 1765 tuple_t tuple;
1767 cisparse_t parse; 1766 cisparse_t parse;
1768 config_info_t conf;
1769 u_char buf[64]; 1767 u_char buf[64];
1770 int fail_fn, fail_rc; 1768 int fail_fn, fail_rc;
1771 int rc; 1769 int rc;
@@ -1777,41 +1775,34 @@ static void cm4000_config(dev_link_t * link, int devno)
1777 tuple.TupleDataMax = sizeof(buf); 1775 tuple.TupleDataMax = sizeof(buf);
1778 tuple.TupleOffset = 0; 1776 tuple.TupleOffset = 0;
1779 1777
1780 if ((fail_rc = pcmcia_get_first_tuple(handle, &tuple)) != CS_SUCCESS) { 1778 if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
1781 fail_fn = GetFirstTuple; 1779 fail_fn = GetFirstTuple;
1782 goto cs_failed; 1780 goto cs_failed;
1783 } 1781 }
1784 if ((fail_rc = pcmcia_get_tuple_data(handle, &tuple)) != CS_SUCCESS) { 1782 if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
1785 fail_fn = GetTupleData; 1783 fail_fn = GetTupleData;
1786 goto cs_failed; 1784 goto cs_failed;
1787 } 1785 }
1788 if ((fail_rc = 1786 if ((fail_rc =
1789 pcmcia_parse_tuple(handle, &tuple, &parse)) != CS_SUCCESS) { 1787 pcmcia_parse_tuple(link, &tuple, &parse)) != CS_SUCCESS) {
1790 fail_fn = ParseTuple; 1788 fail_fn = ParseTuple;
1791 goto cs_failed; 1789 goto cs_failed;
1792 } 1790 }
1793 if ((fail_rc =
1794 pcmcia_get_configuration_info(handle, &conf)) != CS_SUCCESS) {
1795 fail_fn = GetConfigurationInfo;
1796 goto cs_failed;
1797 }
1798 1791
1799 link->state |= DEV_CONFIG;
1800 link->conf.ConfigBase = parse.config.base; 1792 link->conf.ConfigBase = parse.config.base;
1801 link->conf.Present = parse.config.rmask[0]; 1793 link->conf.Present = parse.config.rmask[0];
1802 link->conf.Vcc = conf.Vcc;
1803 1794
1804 link->io.BasePort2 = 0; 1795 link->io.BasePort2 = 0;
1805 link->io.NumPorts2 = 0; 1796 link->io.NumPorts2 = 0;
1806 link->io.Attributes2 = 0; 1797 link->io.Attributes2 = 0;
1807 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 1798 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
1808 for (rc = pcmcia_get_first_tuple(handle, &tuple); 1799 for (rc = pcmcia_get_first_tuple(link, &tuple);
1809 rc == CS_SUCCESS; rc = pcmcia_get_next_tuple(handle, &tuple)) { 1800 rc == CS_SUCCESS; rc = pcmcia_get_next_tuple(link, &tuple)) {
1810 1801
1811 rc = pcmcia_get_tuple_data(handle, &tuple); 1802 rc = pcmcia_get_tuple_data(link, &tuple);
1812 if (rc != CS_SUCCESS) 1803 if (rc != CS_SUCCESS)
1813 continue; 1804 continue;
1814 rc = pcmcia_parse_tuple(handle, &tuple, &parse); 1805 rc = pcmcia_parse_tuple(link, &tuple, &parse);
1815 if (rc != CS_SUCCESS) 1806 if (rc != CS_SUCCESS)
1816 continue; 1807 continue;
1817 1808
@@ -1831,7 +1822,7 @@ static void cm4000_config(dev_link_t * link, int devno)
1831 link->io.IOAddrLines = parse.cftable_entry.io.flags 1822 link->io.IOAddrLines = parse.cftable_entry.io.flags
1832 & CISTPL_IO_LINES_MASK; 1823 & CISTPL_IO_LINES_MASK;
1833 1824
1834 rc = pcmcia_request_io(handle, &link->io); 1825 rc = pcmcia_request_io(link, &link->io);
1835 if (rc == CS_SUCCESS) 1826 if (rc == CS_SUCCESS)
1836 break; /* we are done */ 1827 break; /* we are done */
1837 } 1828 }
@@ -1841,7 +1832,7 @@ static void cm4000_config(dev_link_t * link, int devno)
1841 link->conf.IntType = 00000002; 1832 link->conf.IntType = 00000002;
1842 1833
1843 if ((fail_rc = 1834 if ((fail_rc =
1844 pcmcia_request_configuration(handle, &link->conf)) != CS_SUCCESS) { 1835 pcmcia_request_configuration(link, &link->conf)) != CS_SUCCESS) {
1845 fail_fn = RequestConfiguration; 1836 fail_fn = RequestConfiguration;
1846 goto cs_release; 1837 goto cs_release;
1847 } 1838 }
@@ -1851,63 +1842,48 @@ static void cm4000_config(dev_link_t * link, int devno)
1851 dev->node.major = major; 1842 dev->node.major = major;
1852 dev->node.minor = devno; 1843 dev->node.minor = devno;
1853 dev->node.next = NULL; 1844 dev->node.next = NULL;
1854 link->dev = &dev->node; 1845 link->dev_node = &dev->node;
1855 link->state &= ~DEV_CONFIG_PENDING;
1856 1846
1857 return; 1847 return 0;
1858 1848
1859cs_failed: 1849cs_failed:
1860 cs_error(handle, fail_fn, fail_rc); 1850 cs_error(link, fail_fn, fail_rc);
1861cs_release: 1851cs_release:
1862 cm4000_release(link); 1852 cm4000_release(link);
1863 1853 return -ENODEV;
1864 link->state &= ~DEV_CONFIG_PENDING;
1865} 1854}
1866 1855
1867static int cm4000_suspend(struct pcmcia_device *p_dev) 1856static int cm4000_suspend(struct pcmcia_device *link)
1868{ 1857{
1869 dev_link_t *link = dev_to_instance(p_dev);
1870 struct cm4000_dev *dev; 1858 struct cm4000_dev *dev;
1871 1859
1872 dev = link->priv; 1860 dev = link->priv;
1873
1874 link->state |= DEV_SUSPEND;
1875 if (link->state & DEV_CONFIG)
1876 pcmcia_release_configuration(link->handle);
1877 stop_monitor(dev); 1861 stop_monitor(dev);
1878 1862
1879 return 0; 1863 return 0;
1880} 1864}
1881 1865
1882static int cm4000_resume(struct pcmcia_device *p_dev) 1866static int cm4000_resume(struct pcmcia_device *link)
1883{ 1867{
1884 dev_link_t *link = dev_to_instance(p_dev);
1885 struct cm4000_dev *dev; 1868 struct cm4000_dev *dev;
1886 1869
1887 dev = link->priv; 1870 dev = link->priv;
1888
1889 link->state &= ~DEV_SUSPEND;
1890 if (link->state & DEV_CONFIG)
1891 pcmcia_request_configuration(link->handle, &link->conf);
1892
1893 if (link->open) 1871 if (link->open)
1894 start_monitor(dev); 1872 start_monitor(dev);
1895 1873
1896 return 0; 1874 return 0;
1897} 1875}
1898 1876
1899static void cm4000_release(dev_link_t *link) 1877static void cm4000_release(struct pcmcia_device *link)
1900{ 1878{
1901 cmm_cm4000_release(link->priv); /* delay release until device closed */ 1879 cmm_cm4000_release(link->priv); /* delay release until device closed */
1902 pcmcia_release_configuration(link->handle); 1880 pcmcia_disable_device(link);
1903 pcmcia_release_io(link->handle, &link->io);
1904} 1881}
1905 1882
1906static int cm4000_attach(struct pcmcia_device *p_dev) 1883static int cm4000_probe(struct pcmcia_device *link)
1907{ 1884{
1908 struct cm4000_dev *dev; 1885 struct cm4000_dev *dev;
1909 dev_link_t *link; 1886 int i, ret;
1910 int i;
1911 1887
1912 for (i = 0; i < CM4000_MAX_DEV; i++) 1888 for (i = 0; i < CM4000_MAX_DEV; i++)
1913 if (dev_table[i] == NULL) 1889 if (dev_table[i] == NULL)
@@ -1923,7 +1899,7 @@ static int cm4000_attach(struct pcmcia_device *p_dev)
1923 if (dev == NULL) 1899 if (dev == NULL)
1924 return -ENOMEM; 1900 return -ENOMEM;
1925 1901
1926 link = &dev->link; 1902 dev->p_dev = link;
1927 link->priv = dev; 1903 link->priv = dev;
1928 link->conf.IntType = INT_MEMORY_AND_IO; 1904 link->conf.IntType = INT_MEMORY_AND_IO;
1929 dev_table[i] = link; 1905 dev_table[i] = link;
@@ -1933,11 +1909,9 @@ static int cm4000_attach(struct pcmcia_device *p_dev)
1933 init_waitqueue_head(&dev->atrq); 1909 init_waitqueue_head(&dev->atrq);
1934 init_waitqueue_head(&dev->readq); 1910 init_waitqueue_head(&dev->readq);
1935 1911
1936 link->handle = p_dev; 1912 ret = cm4000_config(link, i);
1937 p_dev->instance = link; 1913 if (ret)
1938 1914 return ret;
1939 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
1940 cm4000_config(link, i);
1941 1915
1942 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL, 1916 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL,
1943 "cmm%d", i); 1917 "cmm%d", i);
@@ -1945,9 +1919,8 @@ static int cm4000_attach(struct pcmcia_device *p_dev)
1945 return 0; 1919 return 0;
1946} 1920}
1947 1921
1948static void cm4000_detach(struct pcmcia_device *p_dev) 1922static void cm4000_detach(struct pcmcia_device *link)
1949{ 1923{
1950 dev_link_t *link = dev_to_instance(p_dev);
1951 struct cm4000_dev *dev = link->priv; 1924 struct cm4000_dev *dev = link->priv;
1952 int devno; 1925 int devno;
1953 1926
@@ -1958,11 +1931,9 @@ static void cm4000_detach(struct pcmcia_device *p_dev)
1958 if (devno == CM4000_MAX_DEV) 1931 if (devno == CM4000_MAX_DEV)
1959 return; 1932 return;
1960 1933
1961 link->state &= ~DEV_PRESENT;
1962 stop_monitor(dev); 1934 stop_monitor(dev);
1963 1935
1964 if (link->state & DEV_CONFIG) 1936 cm4000_release(link);
1965 cm4000_release(link);
1966 1937
1967 dev_table[devno] = NULL; 1938 dev_table[devno] = NULL;
1968 kfree(dev); 1939 kfree(dev);
@@ -1993,7 +1964,7 @@ static struct pcmcia_driver cm4000_driver = {
1993 .drv = { 1964 .drv = {
1994 .name = "cm4000_cs", 1965 .name = "cm4000_cs",
1995 }, 1966 },
1996 .probe = cm4000_attach, 1967 .probe = cm4000_probe,
1997 .remove = cm4000_detach, 1968 .remove = cm4000_detach,
1998 .suspend = cm4000_suspend, 1969 .suspend = cm4000_suspend,
1999 .resume = cm4000_resume, 1970 .resume = cm4000_resume,
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 466e33bab029..29efa64580a8 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -41,7 +41,7 @@
41 41
42 42
43#ifdef PCMCIA_DEBUG 43#ifdef PCMCIA_DEBUG
44#define reader_to_dev(x) (&handle_to_dev(x->link.handle)) 44#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
45static int pc_debug = PCMCIA_DEBUG; 45static int pc_debug = PCMCIA_DEBUG;
46module_param(pc_debug, int, 0600); 46module_param(pc_debug, int, 0600);
47#define DEBUGP(n, rdr, x, args...) do { \ 47#define DEBUGP(n, rdr, x, args...) do { \
@@ -65,7 +65,7 @@ static char *version =
65/* how often to poll for fifo status change */ 65/* how often to poll for fifo status change */
66#define POLL_PERIOD msecs_to_jiffies(10) 66#define POLL_PERIOD msecs_to_jiffies(10)
67 67
68static void reader_release(dev_link_t *link); 68static void reader_release(struct pcmcia_device *link);
69 69
70static int major; 70static int major;
71static struct class *cmx_class; 71static struct class *cmx_class;
@@ -74,7 +74,7 @@ static struct class *cmx_class;
74#define BS_WRITABLE 0x02 74#define BS_WRITABLE 0x02
75 75
76struct reader_dev { 76struct reader_dev {
77 dev_link_t link; 77 struct pcmcia_device *p_dev;
78 dev_node_t node; 78 dev_node_t node;
79 wait_queue_head_t devq; 79 wait_queue_head_t devq;
80 wait_queue_head_t poll_wait; 80 wait_queue_head_t poll_wait;
@@ -87,7 +87,7 @@ struct reader_dev {
87 struct timer_list poll_timer; 87 struct timer_list poll_timer;
88}; 88};
89 89
90static dev_link_t *dev_table[CM_MAX_DEV]; 90static struct pcmcia_device *dev_table[CM_MAX_DEV];
91 91
92#ifndef PCMCIA_DEBUG 92#ifndef PCMCIA_DEBUG
93#define xoutb outb 93#define xoutb outb
@@ -116,7 +116,7 @@ static inline unsigned char xinb(unsigned short port)
116static void cm4040_do_poll(unsigned long dummy) 116static void cm4040_do_poll(unsigned long dummy)
117{ 117{
118 struct reader_dev *dev = (struct reader_dev *) dummy; 118 struct reader_dev *dev = (struct reader_dev *) dummy;
119 unsigned int obs = xinb(dev->link.io.BasePort1 119 unsigned int obs = xinb(dev->p_dev->io.BasePort1
120 + REG_OFFSET_BUFFER_STATUS); 120 + REG_OFFSET_BUFFER_STATUS);
121 121
122 if ((obs & BSR_BULK_IN_FULL)) { 122 if ((obs & BSR_BULK_IN_FULL)) {
@@ -147,7 +147,7 @@ static void cm4040_stop_poll(struct reader_dev *dev)
147static int wait_for_bulk_out_ready(struct reader_dev *dev) 147static int wait_for_bulk_out_ready(struct reader_dev *dev)
148{ 148{
149 int i, rc; 149 int i, rc;
150 int iobase = dev->link.io.BasePort1; 150 int iobase = dev->p_dev->io.BasePort1;
151 151
152 for (i = 0; i < POLL_LOOP_COUNT; i++) { 152 for (i = 0; i < POLL_LOOP_COUNT; i++) {
153 if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS) 153 if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
@@ -177,7 +177,7 @@ static int wait_for_bulk_out_ready(struct reader_dev *dev)
177/* Write to Sync Control Register */ 177/* Write to Sync Control Register */
178static int write_sync_reg(unsigned char val, struct reader_dev *dev) 178static int write_sync_reg(unsigned char val, struct reader_dev *dev)
179{ 179{
180 int iobase = dev->link.io.BasePort1; 180 int iobase = dev->p_dev->io.BasePort1;
181 int rc; 181 int rc;
182 182
183 rc = wait_for_bulk_out_ready(dev); 183 rc = wait_for_bulk_out_ready(dev);
@@ -195,7 +195,7 @@ static int write_sync_reg(unsigned char val, struct reader_dev *dev)
195static int wait_for_bulk_in_ready(struct reader_dev *dev) 195static int wait_for_bulk_in_ready(struct reader_dev *dev)
196{ 196{
197 int i, rc; 197 int i, rc;
198 int iobase = dev->link.io.BasePort1; 198 int iobase = dev->p_dev->io.BasePort1;
199 199
200 for (i = 0; i < POLL_LOOP_COUNT; i++) { 200 for (i = 0; i < POLL_LOOP_COUNT; i++) {
201 if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS) 201 if ((xinb(iobase + REG_OFFSET_BUFFER_STATUS)
@@ -225,7 +225,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
225 size_t count, loff_t *ppos) 225 size_t count, loff_t *ppos)
226{ 226{
227 struct reader_dev *dev = filp->private_data; 227 struct reader_dev *dev = filp->private_data;
228 int iobase = dev->link.io.BasePort1; 228 int iobase = dev->p_dev->io.BasePort1;
229 size_t bytes_to_read; 229 size_t bytes_to_read;
230 unsigned long i; 230 unsigned long i;
231 size_t min_bytes_to_read; 231 size_t min_bytes_to_read;
@@ -246,7 +246,7 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
246 return -EAGAIN; 246 return -EAGAIN;
247 } 247 }
248 248
249 if ((dev->link.state & DEV_PRESENT)==0) 249 if (!pcmcia_dev_present(dev->p_dev))
250 return -ENODEV; 250 return -ENODEV;
251 251
252 for (i = 0; i < 5; i++) { 252 for (i = 0; i < 5; i++) {
@@ -328,7 +328,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
328 size_t count, loff_t *ppos) 328 size_t count, loff_t *ppos)
329{ 329{
330 struct reader_dev *dev = filp->private_data; 330 struct reader_dev *dev = filp->private_data;
331 int iobase = dev->link.io.BasePort1; 331 int iobase = dev->p_dev->io.BasePort1;
332 ssize_t rc; 332 ssize_t rc;
333 int i; 333 int i;
334 unsigned int bytes_to_write; 334 unsigned int bytes_to_write;
@@ -351,7 +351,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
351 return -EAGAIN; 351 return -EAGAIN;
352 } 352 }
353 353
354 if ((dev->link.state & DEV_PRESENT) == 0) 354 if (!pcmcia_dev_present(dev->p_dev))
355 return -ENODEV; 355 return -ENODEV;
356 356
357 bytes_to_write = count; 357 bytes_to_write = count;
@@ -445,14 +445,14 @@ static unsigned int cm4040_poll(struct file *filp, poll_table *wait)
445static int cm4040_open(struct inode *inode, struct file *filp) 445static int cm4040_open(struct inode *inode, struct file *filp)
446{ 446{
447 struct reader_dev *dev; 447 struct reader_dev *dev;
448 dev_link_t *link; 448 struct pcmcia_device *link;
449 int minor = iminor(inode); 449 int minor = iminor(inode);
450 450
451 if (minor >= CM_MAX_DEV) 451 if (minor >= CM_MAX_DEV)
452 return -ENODEV; 452 return -ENODEV;
453 453
454 link = dev_table[minor]; 454 link = dev_table[minor];
455 if (link == NULL || !(DEV_OK(link))) 455 if (link == NULL || !pcmcia_dev_present(link))
456 return -ENODEV; 456 return -ENODEV;
457 457
458 if (link->open) 458 if (link->open)
@@ -478,7 +478,7 @@ static int cm4040_open(struct inode *inode, struct file *filp)
478static int cm4040_close(struct inode *inode, struct file *filp) 478static int cm4040_close(struct inode *inode, struct file *filp)
479{ 479{
480 struct reader_dev *dev = filp->private_data; 480 struct reader_dev *dev = filp->private_data;
481 dev_link_t *link; 481 struct pcmcia_device *link;
482 int minor = iminor(inode); 482 int minor = iminor(inode);
483 483
484 DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode), 484 DEBUGP(2, dev, "-> cm4040_close(maj/min=%d.%d)\n", imajor(inode),
@@ -500,7 +500,7 @@ static int cm4040_close(struct inode *inode, struct file *filp)
500 return 0; 500 return 0;
501} 501}
502 502
503static void cm4040_reader_release(dev_link_t *link) 503static void cm4040_reader_release(struct pcmcia_device *link)
504{ 504{
505 struct reader_dev *dev = link->priv; 505 struct reader_dev *dev = link->priv;
506 506
@@ -514,60 +514,49 @@ static void cm4040_reader_release(dev_link_t *link)
514 return; 514 return;
515} 515}
516 516
517static void reader_config(dev_link_t *link, int devno) 517static int reader_config(struct pcmcia_device *link, int devno)
518{ 518{
519 client_handle_t handle;
520 struct reader_dev *dev; 519 struct reader_dev *dev;
521 tuple_t tuple; 520 tuple_t tuple;
522 cisparse_t parse; 521 cisparse_t parse;
523 config_info_t conf;
524 u_char buf[64]; 522 u_char buf[64];
525 int fail_fn, fail_rc; 523 int fail_fn, fail_rc;
526 int rc; 524 int rc;
527 525
528 handle = link->handle;
529
530 tuple.DesiredTuple = CISTPL_CONFIG; 526 tuple.DesiredTuple = CISTPL_CONFIG;
531 tuple.Attributes = 0; 527 tuple.Attributes = 0;
532 tuple.TupleData = buf; 528 tuple.TupleData = buf;
533 tuple.TupleDataMax = sizeof(buf); 529 tuple.TupleDataMax = sizeof(buf);
534 tuple.TupleOffset = 0; 530 tuple.TupleOffset = 0;
535 531
536 if ((fail_rc = pcmcia_get_first_tuple(handle, &tuple)) != CS_SUCCESS) { 532 if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
537 fail_fn = GetFirstTuple; 533 fail_fn = GetFirstTuple;
538 goto cs_failed; 534 goto cs_failed;
539 } 535 }
540 if ((fail_rc = pcmcia_get_tuple_data(handle, &tuple)) != CS_SUCCESS) { 536 if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
541 fail_fn = GetTupleData; 537 fail_fn = GetTupleData;
542 goto cs_failed; 538 goto cs_failed;
543 } 539 }
544 if ((fail_rc = pcmcia_parse_tuple(handle, &tuple, &parse)) 540 if ((fail_rc = pcmcia_parse_tuple(link, &tuple, &parse))
545 != CS_SUCCESS) { 541 != CS_SUCCESS) {
546 fail_fn = ParseTuple; 542 fail_fn = ParseTuple;
547 goto cs_failed; 543 goto cs_failed;
548 } 544 }
549 if ((fail_rc = pcmcia_get_configuration_info(handle, &conf))
550 != CS_SUCCESS) {
551 fail_fn = GetConfigurationInfo;
552 goto cs_failed;
553 }
554 545
555 link->state |= DEV_CONFIG;
556 link->conf.ConfigBase = parse.config.base; 546 link->conf.ConfigBase = parse.config.base;
557 link->conf.Present = parse.config.rmask[0]; 547 link->conf.Present = parse.config.rmask[0];
558 link->conf.Vcc = conf.Vcc;
559 548
560 link->io.BasePort2 = 0; 549 link->io.BasePort2 = 0;
561 link->io.NumPorts2 = 0; 550 link->io.NumPorts2 = 0;
562 link->io.Attributes2 = 0; 551 link->io.Attributes2 = 0;
563 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 552 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
564 for (rc = pcmcia_get_first_tuple(handle, &tuple); 553 for (rc = pcmcia_get_first_tuple(link, &tuple);
565 rc == CS_SUCCESS; 554 rc == CS_SUCCESS;
566 rc = pcmcia_get_next_tuple(handle, &tuple)) { 555 rc = pcmcia_get_next_tuple(link, &tuple)) {
567 rc = pcmcia_get_tuple_data(handle, &tuple); 556 rc = pcmcia_get_tuple_data(link, &tuple);
568 if (rc != CS_SUCCESS) 557 if (rc != CS_SUCCESS)
569 continue; 558 continue;
570 rc = pcmcia_parse_tuple(handle, &tuple, &parse); 559 rc = pcmcia_parse_tuple(link, &tuple, &parse);
571 if (rc != CS_SUCCESS) 560 if (rc != CS_SUCCESS)
572 continue; 561 continue;
573 562
@@ -585,13 +574,13 @@ static void reader_config(dev_link_t *link, int devno)
585 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 574 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
586 link->io.IOAddrLines = parse.cftable_entry.io.flags 575 link->io.IOAddrLines = parse.cftable_entry.io.flags
587 & CISTPL_IO_LINES_MASK; 576 & CISTPL_IO_LINES_MASK;
588 rc = pcmcia_request_io(handle, &link->io); 577 rc = pcmcia_request_io(link, &link->io);
589 578
590 dev_printk(KERN_INFO, &handle_to_dev(handle), "foo"); 579 dev_printk(KERN_INFO, &handle_to_dev(link), "foo");
591 if (rc == CS_SUCCESS) 580 if (rc == CS_SUCCESS)
592 break; 581 break;
593 else 582 else
594 dev_printk(KERN_INFO, &handle_to_dev(handle), 583 dev_printk(KERN_INFO, &handle_to_dev(link),
595 "pcmcia_request_io failed 0x%x\n", rc); 584 "pcmcia_request_io failed 0x%x\n", rc);
596 } 585 }
597 if (rc != CS_SUCCESS) 586 if (rc != CS_SUCCESS)
@@ -599,10 +588,10 @@ static void reader_config(dev_link_t *link, int devno)
599 588
600 link->conf.IntType = 00000002; 589 link->conf.IntType = 00000002;
601 590
602 if ((fail_rc = pcmcia_request_configuration(handle,&link->conf)) 591 if ((fail_rc = pcmcia_request_configuration(link,&link->conf))
603 !=CS_SUCCESS) { 592 !=CS_SUCCESS) {
604 fail_fn = RequestConfiguration; 593 fail_fn = RequestConfiguration;
605 dev_printk(KERN_INFO, &handle_to_dev(handle), 594 dev_printk(KERN_INFO, &handle_to_dev(link),
606 "pcmcia_request_configuration failed 0x%x\n", 595 "pcmcia_request_configuration failed 0x%x\n",
607 fail_rc); 596 fail_rc);
608 goto cs_release; 597 goto cs_release;
@@ -612,57 +601,31 @@ static void reader_config(dev_link_t *link, int devno)
612 sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno); 601 sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
613 dev->node.major = major; 602 dev->node.major = major;
614 dev->node.minor = devno; 603 dev->node.minor = devno;
615 dev->node.next = NULL; 604 dev->node.next = &dev->node;
616 link->dev = &dev->node;
617 link->state &= ~DEV_CONFIG_PENDING;
618 605
619 DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno, 606 DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno,
620 link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1); 607 link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1);
621 DEBUGP(2, dev, "<- reader_config (succ)\n"); 608 DEBUGP(2, dev, "<- reader_config (succ)\n");
622 609
623 return; 610 return 0;
624 611
625cs_failed: 612cs_failed:
626 cs_error(handle, fail_fn, fail_rc); 613 cs_error(link, fail_fn, fail_rc);
627cs_release: 614cs_release:
628 reader_release(link); 615 reader_release(link);
629 link->state &= ~DEV_CONFIG_PENDING; 616 return -ENODEV;
630}
631
632static int reader_suspend(struct pcmcia_device *p_dev)
633{
634 dev_link_t *link = dev_to_instance(p_dev);
635
636 link->state |= DEV_SUSPEND;
637 if (link->state & DEV_CONFIG)
638 pcmcia_release_configuration(link->handle);
639
640 return 0;
641} 617}
642 618
643static int reader_resume(struct pcmcia_device *p_dev) 619static void reader_release(struct pcmcia_device *link)
644{
645 dev_link_t *link = dev_to_instance(p_dev);
646
647 link->state &= ~DEV_SUSPEND;
648 if (link->state & DEV_CONFIG)
649 pcmcia_request_configuration(link->handle, &link->conf);
650
651 return 0;
652}
653
654static void reader_release(dev_link_t *link)
655{ 620{
656 cm4040_reader_release(link->priv); 621 cm4040_reader_release(link->priv);
657 pcmcia_release_configuration(link->handle); 622 pcmcia_disable_device(link);
658 pcmcia_release_io(link->handle, &link->io);
659} 623}
660 624
661static int reader_attach(struct pcmcia_device *p_dev) 625static int reader_probe(struct pcmcia_device *link)
662{ 626{
663 struct reader_dev *dev; 627 struct reader_dev *dev;
664 dev_link_t *link; 628 int i, ret;
665 int i;
666 629
667 for (i = 0; i < CM_MAX_DEV; i++) { 630 for (i = 0; i < CM_MAX_DEV; i++) {
668 if (dev_table[i] == NULL) 631 if (dev_table[i] == NULL)
@@ -679,8 +642,8 @@ static int reader_attach(struct pcmcia_device *p_dev)
679 dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT; 642 dev->timeout = CCID_DRIVER_MINIMUM_TIMEOUT;
680 dev->buffer_status = 0; 643 dev->buffer_status = 0;
681 644
682 link = &dev->link;
683 link->priv = dev; 645 link->priv = dev;
646 dev->p_dev = link;
684 647
685 link->conf.IntType = INT_MEMORY_AND_IO; 648 link->conf.IntType = INT_MEMORY_AND_IO;
686 dev_table[i] = link; 649 dev_table[i] = link;
@@ -692,11 +655,9 @@ static int reader_attach(struct pcmcia_device *p_dev)
692 init_timer(&dev->poll_timer); 655 init_timer(&dev->poll_timer);
693 dev->poll_timer.function = &cm4040_do_poll; 656 dev->poll_timer.function = &cm4040_do_poll;
694 657
695 link->handle = p_dev; 658 ret = reader_config(link, i);
696 p_dev->instance = link; 659 if (ret)
697 660 return ret;
698 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
699 reader_config(link, i);
700 661
701 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL, 662 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL,
702 "cmx%d", i); 663 "cmx%d", i);
@@ -704,9 +665,8 @@ static int reader_attach(struct pcmcia_device *p_dev)
704 return 0; 665 return 0;
705} 666}
706 667
707static void reader_detach(struct pcmcia_device *p_dev) 668static void reader_detach(struct pcmcia_device *link)
708{ 669{
709 dev_link_t *link = dev_to_instance(p_dev);
710 struct reader_dev *dev = link->priv; 670 struct reader_dev *dev = link->priv;
711 int devno; 671 int devno;
712 672
@@ -718,10 +678,7 @@ static void reader_detach(struct pcmcia_device *p_dev)
718 if (devno == CM_MAX_DEV) 678 if (devno == CM_MAX_DEV)
719 return; 679 return;
720 680
721 link->state &= ~DEV_PRESENT; 681 reader_release(link);
722
723 if (link->state & DEV_CONFIG)
724 reader_release(link);
725 682
726 dev_table[devno] = NULL; 683 dev_table[devno] = NULL;
727 kfree(dev); 684 kfree(dev);
@@ -753,10 +710,8 @@ static struct pcmcia_driver reader_driver = {
753 .drv = { 710 .drv = {
754 .name = "cm4040_cs", 711 .name = "cm4040_cs",
755 }, 712 },
756 .probe = reader_attach, 713 .probe = reader_probe,
757 .remove = reader_detach, 714 .remove = reader_detach,
758 .suspend = reader_suspend,
759 .resume = reader_resume,
760 .id_table = cm4040_ids, 715 .id_table = cm4040_ids,
761}; 716};
762 717
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index e6b714b6390d..07213454c458 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -228,7 +228,7 @@ typedef struct _mgslpc_info {
228 struct _input_signal_events input_signal_events; 228 struct _input_signal_events input_signal_events;
229 229
230 /* PCMCIA support */ 230 /* PCMCIA support */
231 dev_link_t link; 231 struct pcmcia_device *p_dev;
232 dev_node_t node; 232 dev_node_t node;
233 int stop; 233 int stop;
234 234
@@ -484,7 +484,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout);
484 484
485/* PCMCIA prototypes */ 485/* PCMCIA prototypes */
486 486
487static void mgslpc_config(dev_link_t *link); 487static int mgslpc_config(struct pcmcia_device *link);
488static void mgslpc_release(u_long arg); 488static void mgslpc_release(u_long arg);
489static void mgslpc_detach(struct pcmcia_device *p_dev); 489static void mgslpc_detach(struct pcmcia_device *p_dev);
490 490
@@ -533,14 +533,14 @@ static void ldisc_receive_buf(struct tty_struct *tty,
533 } 533 }
534} 534}
535 535
536static int mgslpc_attach(struct pcmcia_device *p_dev) 536static int mgslpc_probe(struct pcmcia_device *link)
537{ 537{
538 MGSLPC_INFO *info; 538 MGSLPC_INFO *info;
539 dev_link_t *link; 539 int ret;
540 540
541 if (debug_level >= DEBUG_LEVEL_INFO) 541 if (debug_level >= DEBUG_LEVEL_INFO)
542 printk("mgslpc_attach\n"); 542 printk("mgslpc_attach\n");
543 543
544 info = (MGSLPC_INFO *)kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); 544 info = (MGSLPC_INFO *)kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
545 if (!info) { 545 if (!info) {
546 printk("Error can't allocate device instance data\n"); 546 printk("Error can't allocate device instance data\n");
@@ -565,25 +565,22 @@ static int mgslpc_attach(struct pcmcia_device *p_dev)
565 info->imrb_value = 0xffff; 565 info->imrb_value = 0xffff;
566 info->pim_value = 0xff; 566 info->pim_value = 0xff;
567 567
568 link = &info->link; 568 info->p_dev = link;
569 link->priv = info; 569 link->priv = info;
570 570
571 /* Initialize the dev_link_t structure */ 571 /* Initialize the struct pcmcia_device structure */
572 572
573 /* Interrupt setup */ 573 /* Interrupt setup */
574 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 574 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
575 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 575 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
576 link->irq.Handler = NULL; 576 link->irq.Handler = NULL;
577 577
578 link->conf.Attributes = 0; 578 link->conf.Attributes = 0;
579 link->conf.Vcc = 50;
580 link->conf.IntType = INT_MEMORY_AND_IO; 579 link->conf.IntType = INT_MEMORY_AND_IO;
581 580
582 link->handle = p_dev; 581 ret = mgslpc_config(link);
583 p_dev->instance = link; 582 if (ret)
584 583 return ret;
585 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
586 mgslpc_config(link);
587 584
588 mgslpc_add_device(info); 585 mgslpc_add_device(info);
589 586
@@ -596,15 +593,13 @@ static int mgslpc_attach(struct pcmcia_device *p_dev)
596#define CS_CHECK(fn, ret) \ 593#define CS_CHECK(fn, ret) \
597do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 594do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
598 595
599static void mgslpc_config(dev_link_t *link) 596static int mgslpc_config(struct pcmcia_device *link)
600{ 597{
601 client_handle_t handle = link->handle;
602 MGSLPC_INFO *info = link->priv; 598 MGSLPC_INFO *info = link->priv;
603 tuple_t tuple; 599 tuple_t tuple;
604 cisparse_t parse; 600 cisparse_t parse;
605 int last_fn, last_ret; 601 int last_fn, last_ret;
606 u_char buf[64]; 602 u_char buf[64];
607 config_info_t conf;
608 cistpl_cftable_entry_t dflt = { 0 }; 603 cistpl_cftable_entry_t dflt = { 0 };
609 cistpl_cftable_entry_t *cfg; 604 cistpl_cftable_entry_t *cfg;
610 605
@@ -617,27 +612,20 @@ static void mgslpc_config(dev_link_t *link)
617 tuple.TupleData = buf; 612 tuple.TupleData = buf;
618 tuple.TupleDataMax = sizeof(buf); 613 tuple.TupleDataMax = sizeof(buf);
619 tuple.TupleOffset = 0; 614 tuple.TupleOffset = 0;
620 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 615 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
621 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 616 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
622 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 617 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
623 link->conf.ConfigBase = parse.config.base; 618 link->conf.ConfigBase = parse.config.base;
624 link->conf.Present = parse.config.rmask[0]; 619 link->conf.Present = parse.config.rmask[0];
625
626 /* Configure card */
627 link->state |= DEV_CONFIG;
628
629 /* Look up the current Vcc */
630 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
631 link->conf.Vcc = conf.Vcc;
632 620
633 /* get CIS configuration entry */ 621 /* get CIS configuration entry */
634 622
635 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 623 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
636 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 624 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
637 625
638 cfg = &(parse.cftable_entry); 626 cfg = &(parse.cftable_entry);
639 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 627 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
640 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 628 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
641 629
642 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; 630 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
643 if (cfg->index == 0) 631 if (cfg->index == 0)
@@ -658,11 +646,10 @@ static void mgslpc_config(dev_link_t *link)
658 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; 646 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
659 link->io.BasePort1 = io->win[0].base; 647 link->io.BasePort1 = io->win[0].base;
660 link->io.NumPorts1 = io->win[0].len; 648 link->io.NumPorts1 = io->win[0].len;
661 CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io)); 649 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
662 } 650 }
663 651
664 link->conf.Attributes = CONF_ENABLE_IRQ; 652 link->conf.Attributes = CONF_ENABLE_IRQ;
665 link->conf.Vcc = 50;
666 link->conf.IntType = INT_MEMORY_AND_IO; 653 link->conf.IntType = INT_MEMORY_AND_IO;
667 link->conf.ConfigIndex = 8; 654 link->conf.ConfigIndex = 8;
668 link->conf.Present = PRESENT_OPTION; 655 link->conf.Present = PRESENT_OPTION;
@@ -670,9 +657,9 @@ static void mgslpc_config(dev_link_t *link)
670 link->irq.Attributes |= IRQ_HANDLE_PRESENT; 657 link->irq.Attributes |= IRQ_HANDLE_PRESENT;
671 link->irq.Handler = mgslpc_isr; 658 link->irq.Handler = mgslpc_isr;
672 link->irq.Instance = info; 659 link->irq.Instance = info;
673 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 660 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
674 661
675 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 662 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
676 663
677 info->io_base = link->io.BasePort1; 664 info->io_base = link->io.BasePort1;
678 info->irq_level = link->irq.AssignedIRQ; 665 info->irq_level = link->irq.AssignedIRQ;
@@ -680,7 +667,7 @@ static void mgslpc_config(dev_link_t *link)
680 /* add to linked list of devices */ 667 /* add to linked list of devices */
681 sprintf(info->node.dev_name, "mgslpc0"); 668 sprintf(info->node.dev_name, "mgslpc0");
682 info->node.major = info->node.minor = 0; 669 info->node.major = info->node.minor = 0;
683 link->dev = &info->node; 670 link->dev_node = &info->node;
684 671
685 printk(KERN_INFO "%s: index 0x%02x:", 672 printk(KERN_INFO "%s: index 0x%02x:",
686 info->node.dev_name, link->conf.ConfigIndex); 673 info->node.dev_name, link->conf.ConfigIndex);
@@ -690,13 +677,12 @@ static void mgslpc_config(dev_link_t *link)
690 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 677 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
691 link->io.BasePort1+link->io.NumPorts1-1); 678 link->io.BasePort1+link->io.NumPorts1-1);
692 printk("\n"); 679 printk("\n");
693 680 return 0;
694 link->state &= ~DEV_CONFIG_PENDING;
695 return;
696 681
697cs_failed: 682cs_failed:
698 cs_error(link->handle, last_fn, last_ret); 683 cs_error(link, last_fn, last_ret);
699 mgslpc_release((u_long)link); 684 mgslpc_release((u_long)link);
685 return -ENODEV;
700} 686}
701 687
702/* Card has been removed. 688/* Card has been removed.
@@ -705,58 +691,38 @@ cs_failed:
705 */ 691 */
706static void mgslpc_release(u_long arg) 692static void mgslpc_release(u_long arg)
707{ 693{
708 dev_link_t *link = (dev_link_t *)arg; 694 struct pcmcia_device *link = (struct pcmcia_device *)arg;
709 695
710 if (debug_level >= DEBUG_LEVEL_INFO) 696 if (debug_level >= DEBUG_LEVEL_INFO)
711 printk("mgslpc_release(0x%p)\n", link); 697 printk("mgslpc_release(0x%p)\n", link);
712
713 /* Unlink the device chain */
714 link->dev = NULL;
715 link->state &= ~DEV_CONFIG;
716 698
717 pcmcia_release_configuration(link->handle); 699 pcmcia_disable_device(link);
718 if (link->io.NumPorts1)
719 pcmcia_release_io(link->handle, &link->io);
720 if (link->irq.AssignedIRQ)
721 pcmcia_release_irq(link->handle, &link->irq);
722} 700}
723 701
724static void mgslpc_detach(struct pcmcia_device *p_dev) 702static void mgslpc_detach(struct pcmcia_device *link)
725{ 703{
726 dev_link_t *link = dev_to_instance(p_dev); 704 if (debug_level >= DEBUG_LEVEL_INFO)
727 705 printk("mgslpc_detach(0x%p)\n", link);
728 if (debug_level >= DEBUG_LEVEL_INFO)
729 printk("mgslpc_detach(0x%p)\n", link);
730 706
731 if (link->state & DEV_CONFIG) { 707 ((MGSLPC_INFO *)link->priv)->stop = 1;
732 ((MGSLPC_INFO *)link->priv)->stop = 1; 708 mgslpc_release((u_long)link);
733 mgslpc_release((u_long)link);
734 }
735 709
736 mgslpc_remove_device((MGSLPC_INFO *)link->priv); 710 mgslpc_remove_device((MGSLPC_INFO *)link->priv);
737} 711}
738 712
739static int mgslpc_suspend(struct pcmcia_device *dev) 713static int mgslpc_suspend(struct pcmcia_device *link)
740{ 714{
741 dev_link_t *link = dev_to_instance(dev);
742 MGSLPC_INFO *info = link->priv; 715 MGSLPC_INFO *info = link->priv;
743 716
744 link->state |= DEV_SUSPEND;
745 info->stop = 1; 717 info->stop = 1;
746 if (link->state & DEV_CONFIG)
747 pcmcia_release_configuration(link->handle);
748 718
749 return 0; 719 return 0;
750} 720}
751 721
752static int mgslpc_resume(struct pcmcia_device *dev) 722static int mgslpc_resume(struct pcmcia_device *link)
753{ 723{
754 dev_link_t *link = dev_to_instance(dev);
755 MGSLPC_INFO *info = link->priv; 724 MGSLPC_INFO *info = link->priv;
756 725
757 link->state &= ~DEV_SUSPEND;
758 if (link->state & DEV_CONFIG)
759 pcmcia_request_configuration(link->handle, &link->conf);
760 info->stop = 0; 726 info->stop = 0;
761 727
762 return 0; 728 return 0;
@@ -1280,7 +1246,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id, struct pt_regs * regs)
1280 if (!info) 1246 if (!info)
1281 return IRQ_NONE; 1247 return IRQ_NONE;
1282 1248
1283 if (!(info->link.state & DEV_CONFIG)) 1249 if (!(info->p_dev->_locked))
1284 return IRQ_HANDLED; 1250 return IRQ_HANDLED;
1285 1251
1286 spin_lock(&info->lock); 1252 spin_lock(&info->lock);
@@ -3033,7 +2999,7 @@ static struct pcmcia_driver mgslpc_driver = {
3033 .drv = { 2999 .drv = {
3034 .name = "synclink_cs", 3000 .name = "synclink_cs",
3035 }, 3001 },
3036 .probe = mgslpc_attach, 3002 .probe = mgslpc_probe,
3037 .remove = mgslpc_detach, 3003 .remove = mgslpc_detach,
3038 .id_table = mgslpc_ids, 3004 .id_table = mgslpc_ids,
3039 .suspend = mgslpc_suspend, 3005 .suspend = mgslpc_suspend,
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 3f5d6077f39c..a9c5a7230f89 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -504,7 +504,6 @@ static int stl_echmcaintr(stlbrd_t *brdp);
504static int stl_echpciintr(stlbrd_t *brdp); 504static int stl_echpciintr(stlbrd_t *brdp);
505static int stl_echpci64intr(stlbrd_t *brdp); 505static int stl_echpci64intr(stlbrd_t *brdp);
506static void stl_offintr(void *private); 506static void stl_offintr(void *private);
507static void *stl_memalloc(int len);
508static stlbrd_t *stl_allocbrd(void); 507static stlbrd_t *stl_allocbrd(void);
509static stlport_t *stl_getport(int brdnr, int panelnr, int portnr); 508static stlport_t *stl_getport(int brdnr, int panelnr, int portnr);
510 509
@@ -940,17 +939,6 @@ static int stl_parsebrd(stlconf_t *confp, char **argp)
940/*****************************************************************************/ 939/*****************************************************************************/
941 940
942/* 941/*
943 * Local driver kernel memory allocation routine.
944 */
945
946static void *stl_memalloc(int len)
947{
948 return (void *) kmalloc(len, GFP_KERNEL);
949}
950
951/*****************************************************************************/
952
953/*
954 * Allocate a new board structure. Fill out the basic info in it. 942 * Allocate a new board structure. Fill out the basic info in it.
955 */ 943 */
956 944
@@ -958,14 +946,13 @@ static stlbrd_t *stl_allocbrd(void)
958{ 946{
959 stlbrd_t *brdp; 947 stlbrd_t *brdp;
960 948
961 brdp = (stlbrd_t *) stl_memalloc(sizeof(stlbrd_t)); 949 brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL);
962 if (brdp == (stlbrd_t *) NULL) { 950 if (!brdp) {
963 printk("STALLION: failed to allocate memory (size=%d)\n", 951 printk("STALLION: failed to allocate memory (size=%d)\n",
964 sizeof(stlbrd_t)); 952 sizeof(stlbrd_t));
965 return (stlbrd_t *) NULL; 953 return NULL;
966 } 954 }
967 955
968 memset(brdp, 0, sizeof(stlbrd_t));
969 brdp->magic = STL_BOARDMAGIC; 956 brdp->magic = STL_BOARDMAGIC;
970 return brdp; 957 return brdp;
971} 958}
@@ -1017,9 +1004,9 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
1017 portp->refcount++; 1004 portp->refcount++;
1018 1005
1019 if ((portp->flags & ASYNC_INITIALIZED) == 0) { 1006 if ((portp->flags & ASYNC_INITIALIZED) == 0) {
1020 if (portp->tx.buf == (char *) NULL) { 1007 if (!portp->tx.buf) {
1021 portp->tx.buf = (char *) stl_memalloc(STL_TXBUFSIZE); 1008 portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
1022 if (portp->tx.buf == (char *) NULL) 1009 if (!portp->tx.buf)
1023 return -ENOMEM; 1010 return -ENOMEM;
1024 portp->tx.head = portp->tx.buf; 1011 portp->tx.head = portp->tx.buf;
1025 portp->tx.tail = portp->tx.buf; 1012 portp->tx.tail = portp->tx.buf;
@@ -2178,13 +2165,12 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp)
2178 * each ports data structures. 2165 * each ports data structures.
2179 */ 2166 */
2180 for (i = 0; (i < panelp->nrports); i++) { 2167 for (i = 0; (i < panelp->nrports); i++) {
2181 portp = (stlport_t *) stl_memalloc(sizeof(stlport_t)); 2168 portp = kzalloc(sizeof(stlport_t), GFP_KERNEL);
2182 if (portp == (stlport_t *) NULL) { 2169 if (!portp) {
2183 printk("STALLION: failed to allocate memory " 2170 printk("STALLION: failed to allocate memory "
2184 "(size=%d)\n", sizeof(stlport_t)); 2171 "(size=%d)\n", sizeof(stlport_t));
2185 break; 2172 break;
2186 } 2173 }
2187 memset(portp, 0, sizeof(stlport_t));
2188 2174
2189 portp->magic = STL_PORTMAGIC; 2175 portp->magic = STL_PORTMAGIC;
2190 portp->portnr = i; 2176 portp->portnr = i;
@@ -2315,13 +2301,12 @@ static inline int stl_initeio(stlbrd_t *brdp)
2315 * can complete the setup. 2301 * can complete the setup.
2316 */ 2302 */
2317 2303
2318 panelp = (stlpanel_t *) stl_memalloc(sizeof(stlpanel_t)); 2304 panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
2319 if (panelp == (stlpanel_t *) NULL) { 2305 if (!panelp) {
2320 printk(KERN_WARNING "STALLION: failed to allocate memory " 2306 printk(KERN_WARNING "STALLION: failed to allocate memory "
2321 "(size=%d)\n", sizeof(stlpanel_t)); 2307 "(size=%d)\n", sizeof(stlpanel_t));
2322 return(-ENOMEM); 2308 return -ENOMEM;
2323 } 2309 }
2324 memset(panelp, 0, sizeof(stlpanel_t));
2325 2310
2326 panelp->magic = STL_PANELMAGIC; 2311 panelp->magic = STL_PANELMAGIC;
2327 panelp->brdnr = brdp->brdnr; 2312 panelp->brdnr = brdp->brdnr;
@@ -2490,13 +2475,12 @@ static inline int stl_initech(stlbrd_t *brdp)
2490 status = inb(ioaddr + ECH_PNLSTATUS); 2475 status = inb(ioaddr + ECH_PNLSTATUS);
2491 if ((status & ECH_PNLIDMASK) != nxtid) 2476 if ((status & ECH_PNLIDMASK) != nxtid)
2492 break; 2477 break;
2493 panelp = (stlpanel_t *) stl_memalloc(sizeof(stlpanel_t)); 2478 panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
2494 if (panelp == (stlpanel_t *) NULL) { 2479 if (!panelp) {
2495 printk("STALLION: failed to allocate memory " 2480 printk("STALLION: failed to allocate memory "
2496 "(size=%d)\n", sizeof(stlpanel_t)); 2481 "(size=%d)\n", sizeof(stlpanel_t));
2497 break; 2482 break;
2498 } 2483 }
2499 memset(panelp, 0, sizeof(stlpanel_t));
2500 panelp->magic = STL_PANELMAGIC; 2484 panelp->magic = STL_PANELMAGIC;
2501 panelp->brdnr = brdp->brdnr; 2485 panelp->brdnr = brdp->brdnr;
2502 panelp->panelnr = panelnr; 2486 panelp->panelnr = panelnr;
@@ -3074,8 +3058,8 @@ static int __init stl_init(void)
3074/* 3058/*
3075 * Allocate a temporary write buffer. 3059 * Allocate a temporary write buffer.
3076 */ 3060 */
3077 stl_tmpwritebuf = (char *) stl_memalloc(STL_TXBUFSIZE); 3061 stl_tmpwritebuf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
3078 if (stl_tmpwritebuf == (char *) NULL) 3062 if (!stl_tmpwritebuf)
3079 printk("STALLION: failed to allocate memory (size=%d)\n", 3063 printk("STALLION: failed to allocate memory (size=%d)\n",
3080 STL_TXBUFSIZE); 3064 STL_TXBUFSIZE);
3081 3065
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0bfd1b63662e..98b126c2ded8 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -376,7 +376,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, s
376 return copied; 376 return copied;
377} 377}
378 378
379EXPORT_SYMBOL_GPL(tty_insert_flip_string); 379EXPORT_SYMBOL(tty_insert_flip_string);
380 380
381int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size) 381int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size)
382{ 382{
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index ca4844c527da..acc5d47844eb 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2328,6 +2328,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2328 case TIOCL_SETVESABLANK: 2328 case TIOCL_SETVESABLANK:
2329 set_vesa_blanking(p); 2329 set_vesa_blanking(p);
2330 break; 2330 break;
2331 case TIOCL_GETKMSGREDIRECT:
2332 data = kmsg_redirect;
2333 ret = __put_user(data, p);
2334 break;
2331 case TIOCL_SETKMSGREDIRECT: 2335 case TIOCL_SETKMSGREDIRECT:
2332 if (!capable(CAP_SYS_ADMIN)) { 2336 if (!capable(CAP_SYS_ADMIN)) {
2333 ret = -EPERM; 2337 ret = -EPERM;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 16e99db2e12d..d53f664a4dd8 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -60,6 +60,13 @@ config SOFT_WATCHDOG
60 60
61# ARM Architecture 61# ARM Architecture
62 62
63config AT91_WATCHDOG
64 tristate "AT91RM9200 watchdog"
65 depends on WATCHDOG && ARCH_AT91RM9200
66 help
67 Watchdog timer embedded into AT91RM9200 chips. This will reboot your
68 system when the timeout is reached.
69
63config 21285_WATCHDOG 70config 21285_WATCHDOG
64 tristate "DC21285 watchdog" 71 tristate "DC21285 watchdog"
65 depends on WATCHDOG && FOOTBRIDGE 72 depends on WATCHDOG && FOOTBRIDGE
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index d6f27fde9905..6ab77b61a643 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_WDTPCI) += wdt_pci.o
23obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o 23obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
24 24
25# ARM Architecture 25# ARM Architecture
26obj-$(CONFIG_AT91_WATCHDOG) += at91_wdt.o
26obj-$(CONFIG_21285_WATCHDOG) += wdt285.o 27obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
27obj-$(CONFIG_977_WATCHDOG) += wdt977.o 28obj-$(CONFIG_977_WATCHDOG) += wdt977.o
28obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o 29obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
diff --git a/drivers/char/watchdog/at91_wdt.c b/drivers/char/watchdog/at91_wdt.c
new file mode 100644
index 000000000000..ac83bc4b019a
--- /dev/null
+++ b/drivers/char/watchdog/at91_wdt.c
@@ -0,0 +1,228 @@
1/*
2 * Watchdog driver for Atmel AT91RM9200 (Thunder)
3 *
4 * Copyright (C) 2003 SAN People (Pty) Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/fs.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/miscdevice.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/types.h>
21#include <linux/watchdog.h>
22#include <asm/bitops.h>
23#include <asm/uaccess.h>
24
25
26#define WDT_DEFAULT_TIME 5 /* 5 seconds */
27#define WDT_MAX_TIME 256 /* 256 seconds */
28
29static int wdt_time = WDT_DEFAULT_TIME;
30static int nowayout = WATCHDOG_NOWAYOUT;
31
32module_param(wdt_time, int, 0);
33MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="__MODULE_STRING(WDT_DEFAULT_TIME) ")");
34
35module_param(nowayout, int, 0);
36MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
37
38
39static unsigned long at91wdt_busy;
40
41/* ......................................................................... */
42
43/*
44 * Disable the watchdog.
45 */
46static void inline at91_wdt_stop(void)
47{
48 at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN);
49}
50
51/*
52 * Enable and reset the watchdog.
53 */
54static void inline at91_wdt_start(void)
55{
56 at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN | (((65536 * wdt_time) >> 8) & AT91_ST_WDV));
57 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
58}
59
60/*
61 * Reload the watchdog timer. (ie, pat the watchdog)
62 */
63static void inline at91_wdt_reload(void)
64{
65 at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
66}
67
68/* ......................................................................... */
69
70/*
71 * Watchdog device is opened, and watchdog starts running.
72 */
73static int at91_wdt_open(struct inode *inode, struct file *file)
74{
75 if (test_and_set_bit(0, &at91wdt_busy))
76 return -EBUSY;
77
78 at91_wdt_start();
79 return nonseekable_open(inode, file);
80}
81
82/*
83 * Close the watchdog device.
84 * If CONFIG_WATCHDOG_NOWAYOUT is NOT defined then the watchdog is also
85 * disabled.
86 */
87static int at91_wdt_close(struct inode *inode, struct file *file)
88{
89 if (!nowayout)
90 at91_wdt_stop(); /* Disable the watchdog when file is closed */
91
92 clear_bit(0, &at91wdt_busy);
93 return 0;
94}
95
96/*
97 * Change the watchdog time interval.
98 */
99static int at91_wdt_settimeout(int new_time)
100{
101 /*
102 * All counting occurs at SLOW_CLOCK / 128 = 0.256 Hz
103 *
104 * Since WDV is a 16-bit counter, the maximum period is
105 * 65536 / 0.256 = 256 seconds.
106 */
107 if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
108 return -EINVAL;
109
110 /* Set new watchdog time. It will be used when at91_wdt_start() is called. */
111 wdt_time = new_time;
112 return 0;
113}
114
115static struct watchdog_info at91_wdt_info = {
116 .identity = "at91 watchdog",
117 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
118};
119
120/*
121 * Handle commands from user-space.
122 */
123static int at91_wdt_ioctl(struct inode *inode, struct file *file,
124 unsigned int cmd, unsigned long arg)
125{
126 void __user *argp = (void __user *)arg;
127 int __user *p = argp;
128 int new_value;
129
130 switch(cmd) {
131 case WDIOC_KEEPALIVE:
132 at91_wdt_reload(); /* pat the watchdog */
133 return 0;
134
135 case WDIOC_GETSUPPORT:
136 return copy_to_user(argp, &at91_wdt_info, sizeof(at91_wdt_info)) ? -EFAULT : 0;
137
138 case WDIOC_SETTIMEOUT:
139 if (get_user(new_value, p))
140 return -EFAULT;
141
142 if (at91_wdt_settimeout(new_value))
143 return -EINVAL;
144
145 /* Enable new time value */
146 at91_wdt_start();
147
148 /* Return current value */
149 return put_user(wdt_time, p);
150
151 case WDIOC_GETTIMEOUT:
152 return put_user(wdt_time, p);
153
154 case WDIOC_GETSTATUS:
155 case WDIOC_GETBOOTSTATUS:
156 return put_user(0, p);
157
158 case WDIOC_SETOPTIONS:
159 if (get_user(new_value, p))
160 return -EFAULT;
161
162 if (new_value & WDIOS_DISABLECARD)
163 at91_wdt_stop();
164 if (new_value & WDIOS_ENABLECARD)
165 at91_wdt_start();
166 return 0;
167
168 default:
169 return -ENOIOCTLCMD;
170 }
171}
172
173/*
174 * Pat the watchdog whenever device is written to.
175 */
176static ssize_t at91_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
177{
178 at91_wdt_reload(); /* pat the watchdog */
179 return len;
180}
181
182/* ......................................................................... */
183
184static struct file_operations at91wdt_fops = {
185 .owner = THIS_MODULE,
186 .llseek = no_llseek,
187 .ioctl = at91_wdt_ioctl,
188 .open = at91_wdt_open,
189 .release = at91_wdt_close,
190 .write = at91_wdt_write,
191};
192
193static struct miscdevice at91wdt_miscdev = {
194 .minor = WATCHDOG_MINOR,
195 .name = "watchdog",
196 .fops = &at91wdt_fops,
197};
198
199static int __init at91_wdt_init(void)
200{
201 int res;
202
203 /* Check that the heartbeat value is within range; if not reset to the default */
204 if (at91_wdt_settimeout(wdt_time)) {
205 at91_wdt_settimeout(WDT_DEFAULT_TIME);
206 printk(KERN_INFO "at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time);
207 }
208
209 res = misc_register(&at91wdt_miscdev);
210 if (res)
211 return res;
212
213 printk("AT91 Watchdog Timer enabled (%d seconds, nowayout=%d)\n", wdt_time, nowayout);
214 return 0;
215}
216
217static void __exit at91_wdt_exit(void)
218{
219 misc_deregister(&at91wdt_miscdev);
220}
221
222module_init(at91_wdt_init);
223module_exit(at91_wdt_exit);
224
225MODULE_AUTHOR("Andrew Victor");
226MODULE_DESCRIPTION("Watchdog driver for Atmel AT91RM9200");
227MODULE_LICENSE("GPL");
228MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/pcwd.c b/drivers/char/watchdog/pcwd.c
index 8d6b249ad66b..6d44ca68312d 100644
--- a/drivers/char/watchdog/pcwd.c
+++ b/drivers/char/watchdog/pcwd.c
@@ -66,15 +66,13 @@
66#include <linux/fs.h> /* For file operations */ 66#include <linux/fs.h> /* For file operations */
67#include <linux/ioport.h> /* For io-port access */ 67#include <linux/ioport.h> /* For io-port access */
68#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ 68#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
69#include <linux/sched.h> /* TASK_INTERRUPTIBLE, set_current_state() and friends */
70#include <linux/slab.h> /* For kmalloc */
71 69
72#include <asm/uaccess.h> /* For copy_to_user/put_user/... */ 70#include <asm/uaccess.h> /* For copy_to_user/put_user/... */
73#include <asm/io.h> /* For inb/outb/... */ 71#include <asm/io.h> /* For inb/outb/... */
74 72
75/* Module and version information */ 73/* Module and version information */
76#define WATCHDOG_VERSION "1.16" 74#define WATCHDOG_VERSION "1.17"
77#define WATCHDOG_DATE "03 Jan 2006" 75#define WATCHDOG_DATE "12 Feb 2006"
78#define WATCHDOG_DRIVER_NAME "ISA-PC Watchdog" 76#define WATCHDOG_DRIVER_NAME "ISA-PC Watchdog"
79#define WATCHDOG_NAME "pcwd" 77#define WATCHDOG_NAME "pcwd"
80#define PFX WATCHDOG_NAME ": " 78#define PFX WATCHDOG_NAME ": "
@@ -96,15 +94,19 @@
96 * PCI-PC Watchdog card. 94 * PCI-PC Watchdog card.
97*/ 95*/
98/* Port 1 : Control Status #1 for the PC Watchdog card, revision A. */ 96/* Port 1 : Control Status #1 for the PC Watchdog card, revision A. */
99#define WD_WDRST 0x01 /* Previously reset state */ 97#define WD_WDRST 0x01 /* Previously reset state */
100#define WD_T110 0x02 /* Temperature overheat sense */ 98#define WD_T110 0x02 /* Temperature overheat sense */
101#define WD_HRTBT 0x04 /* Heartbeat sense */ 99#define WD_HRTBT 0x04 /* Heartbeat sense */
102#define WD_RLY2 0x08 /* External relay triggered */ 100#define WD_RLY2 0x08 /* External relay triggered */
103#define WD_SRLY2 0x80 /* Software external relay triggered */ 101#define WD_SRLY2 0x80 /* Software external relay triggered */
104/* Port 1 : Control Status #1 for the PC Watchdog card, revision C. */ 102/* Port 1 : Control Status #1 for the PC Watchdog card, revision C. */
105#define WD_REVC_WTRP 0x01 /* Watchdog Trip status */ 103#define WD_REVC_WTRP 0x01 /* Watchdog Trip status */
106#define WD_REVC_HRBT 0x02 /* Watchdog Heartbeat */ 104#define WD_REVC_HRBT 0x02 /* Watchdog Heartbeat */
107#define WD_REVC_TTRP 0x04 /* Temperature Trip status */ 105#define WD_REVC_TTRP 0x04 /* Temperature Trip status */
106#define WD_REVC_RL2A 0x08 /* Relay 2 activated by on-board processor */
107#define WD_REVC_RL1A 0x10 /* Relay 1 active */
108#define WD_REVC_R2DS 0x40 /* Relay 2 disable */
109#define WD_REVC_RLY2 0x80 /* Relay 2 activated? */
108/* Port 2 : Control Status #2 */ 110/* Port 2 : Control Status #2 */
109#define WD_WDIS 0x10 /* Watchdog Disabled */ 111#define WD_WDIS 0x10 /* Watchdog Disabled */
110#define WD_ENTP 0x20 /* Watchdog Enable Temperature Trip */ 112#define WD_ENTP 0x20 /* Watchdog Enable Temperature Trip */
@@ -122,9 +124,14 @@
122#define CMD_ISA_VERSION_HUNDRETH 0x03 124#define CMD_ISA_VERSION_HUNDRETH 0x03
123#define CMD_ISA_VERSION_MINOR 0x04 125#define CMD_ISA_VERSION_MINOR 0x04
124#define CMD_ISA_SWITCH_SETTINGS 0x05 126#define CMD_ISA_SWITCH_SETTINGS 0x05
127#define CMD_ISA_RESET_PC 0x06
128#define CMD_ISA_ARM_0 0x07
129#define CMD_ISA_ARM_30 0x08
130#define CMD_ISA_ARM_60 0x09
125#define CMD_ISA_DELAY_TIME_2SECS 0x0A 131#define CMD_ISA_DELAY_TIME_2SECS 0x0A
126#define CMD_ISA_DELAY_TIME_4SECS 0x0B 132#define CMD_ISA_DELAY_TIME_4SECS 0x0B
127#define CMD_ISA_DELAY_TIME_8SECS 0x0C 133#define CMD_ISA_DELAY_TIME_8SECS 0x0C
134#define CMD_ISA_RESET_RELAYS 0x0D
128 135
129/* 136/*
130 * We are using an kernel timer to do the pinging of the watchdog 137 * We are using an kernel timer to do the pinging of the watchdog
@@ -142,6 +149,7 @@ static atomic_t open_allowed = ATOMIC_INIT(1);
142static char expect_close; 149static char expect_close;
143static int temp_panic; 150static int temp_panic;
144static struct { /* this is private data for each ISA-PC watchdog card */ 151static struct { /* this is private data for each ISA-PC watchdog card */
152 char fw_ver_str[6]; /* The cards firmware version */
145 int revision; /* The card's revision */ 153 int revision; /* The card's revision */
146 int supports_temp; /* Wether or not the card has a temperature device */ 154 int supports_temp; /* Wether or not the card has a temperature device */
147 int command_mode; /* Wether or not the card is in command mode */ 155 int command_mode; /* Wether or not the card is in command mode */
@@ -153,6 +161,13 @@ static struct { /* this is private data for each ISA-PC watchdog card */
153} pcwd_private; 161} pcwd_private;
154 162
155/* module parameters */ 163/* module parameters */
164#define QUIET 0 /* Default */
165#define VERBOSE 1 /* Verbose */
166#define DEBUG 2 /* print fancy stuff too */
167static int debug = QUIET;
168module_param(debug, int, 0);
169MODULE_PARM_DESC(debug, "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)");
170
156#define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat */ 171#define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat */
157static int heartbeat = WATCHDOG_HEARTBEAT; 172static int heartbeat = WATCHDOG_HEARTBEAT;
158module_param(heartbeat, int, 0); 173module_param(heartbeat, int, 0);
@@ -172,6 +187,10 @@ static int send_isa_command(int cmd)
172 int control_status; 187 int control_status;
173 int port0, last_port0; /* Double read for stabilising */ 188 int port0, last_port0; /* Double read for stabilising */
174 189
190 if (debug >= DEBUG)
191 printk(KERN_DEBUG PFX "sending following data cmd=0x%02x\n",
192 cmd);
193
175 /* The WCMD bit must be 1 and the command is only 4 bits in size */ 194 /* The WCMD bit must be 1 and the command is only 4 bits in size */
176 control_status = (cmd & 0x0F) | WD_WCMD; 195 control_status = (cmd & 0x0F) | WD_WCMD;
177 outb_p(control_status, pcwd_private.io_addr + 2); 196 outb_p(control_status, pcwd_private.io_addr + 2);
@@ -188,6 +207,10 @@ static int send_isa_command(int cmd)
188 udelay (250); 207 udelay (250);
189 } 208 }
190 209
210 if (debug >= DEBUG)
211 printk(KERN_DEBUG PFX "received following data for cmd=0x%02x: port0=0x%02x last_port0=0x%02x\n",
212 cmd, port0, last_port0);
213
191 return port0; 214 return port0;
192} 215}
193 216
@@ -214,6 +237,10 @@ static int set_command_mode(void)
214 spin_unlock(&pcwd_private.io_lock); 237 spin_unlock(&pcwd_private.io_lock);
215 pcwd_private.command_mode = found; 238 pcwd_private.command_mode = found;
216 239
240 if (debug >= DEBUG)
241 printk(KERN_DEBUG PFX "command_mode=%d\n",
242 pcwd_private.command_mode);
243
217 return(found); 244 return(found);
218} 245}
219 246
@@ -226,6 +253,10 @@ static void unset_command_mode(void)
226 spin_unlock(&pcwd_private.io_lock); 253 spin_unlock(&pcwd_private.io_lock);
227 254
228 pcwd_private.command_mode = 0; 255 pcwd_private.command_mode = 0;
256
257 if (debug >= DEBUG)
258 printk(KERN_DEBUG PFX "command_mode=%d\n",
259 pcwd_private.command_mode);
229} 260}
230 261
231static inline void pcwd_check_temperature_support(void) 262static inline void pcwd_check_temperature_support(void)
@@ -234,27 +265,22 @@ static inline void pcwd_check_temperature_support(void)
234 pcwd_private.supports_temp = 1; 265 pcwd_private.supports_temp = 1;
235} 266}
236 267
237static inline char *get_firmware(void) 268static inline void pcwd_get_firmware(void)
238{ 269{
239 int one, ten, hund, minor; 270 int one, ten, hund, minor;
240 char *ret;
241 271
242 ret = kmalloc(6, GFP_KERNEL); 272 strcpy(pcwd_private.fw_ver_str, "ERROR");
243 if(ret == NULL)
244 return NULL;
245 273
246 if (set_command_mode()) { 274 if (set_command_mode()) {
247 one = send_isa_command(CMD_ISA_VERSION_INTEGER); 275 one = send_isa_command(CMD_ISA_VERSION_INTEGER);
248 ten = send_isa_command(CMD_ISA_VERSION_TENTH); 276 ten = send_isa_command(CMD_ISA_VERSION_TENTH);
249 hund = send_isa_command(CMD_ISA_VERSION_HUNDRETH); 277 hund = send_isa_command(CMD_ISA_VERSION_HUNDRETH);
250 minor = send_isa_command(CMD_ISA_VERSION_MINOR); 278 minor = send_isa_command(CMD_ISA_VERSION_MINOR);
251 sprintf(ret, "%c.%c%c%c", one, ten, hund, minor); 279 sprintf(pcwd_private.fw_ver_str, "%c.%c%c%c", one, ten, hund, minor);
252 } 280 }
253 else
254 sprintf(ret, "ERROR");
255
256 unset_command_mode(); 281 unset_command_mode();
257 return(ret); 282
283 return;
258} 284}
259 285
260static inline int pcwd_get_option_switches(void) 286static inline int pcwd_get_option_switches(void)
@@ -272,17 +298,15 @@ static inline int pcwd_get_option_switches(void)
272 298
273static void pcwd_show_card_info(void) 299static void pcwd_show_card_info(void)
274{ 300{
275 char *firmware;
276 int option_switches; 301 int option_switches;
277 302
278 /* Get some extra info from the hardware (in command/debug/diag mode) */ 303 /* Get some extra info from the hardware (in command/debug/diag mode) */
279 if (pcwd_private.revision == PCWD_REVISION_A) 304 if (pcwd_private.revision == PCWD_REVISION_A)
280 printk(KERN_INFO PFX "ISA-PC Watchdog (REV.A) detected at port 0x%04x\n", pcwd_private.io_addr); 305 printk(KERN_INFO PFX "ISA-PC Watchdog (REV.A) detected at port 0x%04x\n", pcwd_private.io_addr);
281 else if (pcwd_private.revision == PCWD_REVISION_C) { 306 else if (pcwd_private.revision == PCWD_REVISION_C) {
282 firmware = get_firmware(); 307 pcwd_get_firmware();
283 printk(KERN_INFO PFX "ISA-PC Watchdog (REV.C) detected at port 0x%04x (Firmware version: %s)\n", 308 printk(KERN_INFO PFX "ISA-PC Watchdog (REV.C) detected at port 0x%04x (Firmware version: %s)\n",
284 pcwd_private.io_addr, firmware); 309 pcwd_private.io_addr, pcwd_private.fw_ver_str);
285 kfree(firmware);
286 option_switches = pcwd_get_option_switches(); 310 option_switches = pcwd_get_option_switches();
287 printk(KERN_INFO PFX "Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n", 311 printk(KERN_INFO PFX "Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n",
288 option_switches, 312 option_switches,
@@ -362,6 +386,10 @@ static int pcwd_start(void)
362 return -EIO; 386 return -EIO;
363 } 387 }
364 } 388 }
389
390 if (debug >= VERBOSE)
391 printk(KERN_DEBUG PFX "Watchdog started\n");
392
365 return 0; 393 return 0;
366} 394}
367 395
@@ -386,6 +414,10 @@ static int pcwd_stop(void)
386 return -EIO; 414 return -EIO;
387 } 415 }
388 } 416 }
417
418 if (debug >= VERBOSE)
419 printk(KERN_DEBUG PFX "Watchdog stopped\n");
420
389 return 0; 421 return 0;
390} 422}
391 423
@@ -393,6 +425,10 @@ static int pcwd_keepalive(void)
393{ 425{
394 /* user land ping */ 426 /* user land ping */
395 pcwd_private.next_heartbeat = jiffies + (heartbeat * HZ); 427 pcwd_private.next_heartbeat = jiffies + (heartbeat * HZ);
428
429 if (debug >= DEBUG)
430 printk(KERN_DEBUG PFX "Watchdog keepalive signal send\n");
431
396 return 0; 432 return 0;
397} 433}
398 434
@@ -402,12 +438,17 @@ static int pcwd_set_heartbeat(int t)
402 return -EINVAL; 438 return -EINVAL;
403 439
404 heartbeat = t; 440 heartbeat = t;
441
442 if (debug >= VERBOSE)
443 printk(KERN_DEBUG PFX "New heartbeat: %d\n",
444 heartbeat);
445
405 return 0; 446 return 0;
406} 447}
407 448
408static int pcwd_get_status(int *status) 449static int pcwd_get_status(int *status)
409{ 450{
410 int card_status; 451 int control_status;
411 452
412 *status=0; 453 *status=0;
413 spin_lock(&pcwd_private.io_lock); 454 spin_lock(&pcwd_private.io_lock);
@@ -415,37 +456,39 @@ static int pcwd_get_status(int *status)
415 /* Rev A cards return status information from 456 /* Rev A cards return status information from
416 * the base register, which is used for the 457 * the base register, which is used for the
417 * temperature in other cards. */ 458 * temperature in other cards. */
418 card_status = inb(pcwd_private.io_addr); 459 control_status = inb(pcwd_private.io_addr);
419 else { 460 else {
420 /* Rev C cards return card status in the base 461 /* Rev C cards return card status in the base
421 * address + 1 register. And use different bits 462 * address + 1 register. And use different bits
422 * to indicate a card initiated reset, and an 463 * to indicate a card initiated reset, and an
423 * over-temperature condition. And the reboot 464 * over-temperature condition. And the reboot
424 * status can be reset. */ 465 * status can be reset. */
425 card_status = inb(pcwd_private.io_addr + 1); 466 control_status = inb(pcwd_private.io_addr + 1);
426 } 467 }
427 spin_unlock(&pcwd_private.io_lock); 468 spin_unlock(&pcwd_private.io_lock);
428 469
429 if (pcwd_private.revision == PCWD_REVISION_A) { 470 if (pcwd_private.revision == PCWD_REVISION_A) {
430 if (card_status & WD_WDRST) 471 if (control_status & WD_WDRST)
431 *status |= WDIOF_CARDRESET; 472 *status |= WDIOF_CARDRESET;
432 473
433 if (card_status & WD_T110) { 474 if (control_status & WD_T110) {
434 *status |= WDIOF_OVERHEAT; 475 *status |= WDIOF_OVERHEAT;
435 if (temp_panic) { 476 if (temp_panic) {
436 printk (KERN_INFO PFX "Temperature overheat trip!\n"); 477 printk (KERN_INFO PFX "Temperature overheat trip!\n");
437 kernel_power_off(); 478 kernel_power_off();
479 /* or should we just do a: panic(PFX "Temperature overheat trip!\n"); */
438 } 480 }
439 } 481 }
440 } else { 482 } else {
441 if (card_status & WD_REVC_WTRP) 483 if (control_status & WD_REVC_WTRP)
442 *status |= WDIOF_CARDRESET; 484 *status |= WDIOF_CARDRESET;
443 485
444 if (card_status & WD_REVC_TTRP) { 486 if (control_status & WD_REVC_TTRP) {
445 *status |= WDIOF_OVERHEAT; 487 *status |= WDIOF_OVERHEAT;
446 if (temp_panic) { 488 if (temp_panic) {
447 printk (KERN_INFO PFX "Temperature overheat trip!\n"); 489 printk (KERN_INFO PFX "Temperature overheat trip!\n");
448 kernel_power_off(); 490 kernel_power_off();
491 /* or should we just do a: panic(PFX "Temperature overheat trip!\n"); */
449 } 492 }
450 } 493 }
451 } 494 }
@@ -455,9 +498,25 @@ static int pcwd_get_status(int *status)
455 498
456static int pcwd_clear_status(void) 499static int pcwd_clear_status(void)
457{ 500{
501 int control_status;
502
458 if (pcwd_private.revision == PCWD_REVISION_C) { 503 if (pcwd_private.revision == PCWD_REVISION_C) {
459 spin_lock(&pcwd_private.io_lock); 504 spin_lock(&pcwd_private.io_lock);
460 outb_p(0x00, pcwd_private.io_addr + 1); /* clear reset status */ 505
506 if (debug >= VERBOSE)
507 printk(KERN_INFO PFX "clearing watchdog trip status\n");
508
509 control_status = inb_p(pcwd_private.io_addr + 1);
510
511 if (debug >= DEBUG) {
512 printk(KERN_DEBUG PFX "status was: 0x%02x\n", control_status);
513 printk(KERN_DEBUG PFX "sending: 0x%02x\n",
514 (control_status & WD_REVC_R2DS));
515 }
516
517 /* clear reset status & Keep Relay 2 disable state as it is */
518 outb_p((control_status & WD_REVC_R2DS), pcwd_private.io_addr + 1);
519
461 spin_unlock(&pcwd_private.io_lock); 520 spin_unlock(&pcwd_private.io_lock);
462 } 521 }
463 return 0; 522 return 0;
@@ -481,6 +540,11 @@ static int pcwd_get_temperature(int *temperature)
481 *temperature = ((inb(pcwd_private.io_addr)) * 9 / 5) + 32; 540 *temperature = ((inb(pcwd_private.io_addr)) * 9 / 5) + 32;
482 spin_unlock(&pcwd_private.io_lock); 541 spin_unlock(&pcwd_private.io_lock);
483 542
543 if (debug >= DEBUG) {
544 printk(KERN_DEBUG PFX "temperature is: %d F\n",
545 *temperature);
546 }
547
484 return 0; 548 return 0;
485} 549}
486 550
@@ -599,6 +663,8 @@ static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len,
599static int pcwd_open(struct inode *inode, struct file *file) 663static int pcwd_open(struct inode *inode, struct file *file)
600{ 664{
601 if (!atomic_dec_and_test(&open_allowed) ) { 665 if (!atomic_dec_and_test(&open_allowed) ) {
666 if (debug >= VERBOSE)
667 printk(KERN_ERR PFX "Attempt to open already opened device.\n");
602 atomic_inc( &open_allowed ); 668 atomic_inc( &open_allowed );
603 return -EBUSY; 669 return -EBUSY;
604 } 670 }
@@ -922,7 +988,8 @@ static void __exit pcwd_cleanup_module(void)
922{ 988{
923 if (pcwd_private.io_addr) 989 if (pcwd_private.io_addr)
924 pcwatchdog_exit(); 990 pcwatchdog_exit();
925 return; 991
992 printk(KERN_INFO PFX "Watchdog Module Unloaded.\n");
926} 993}
927 994
928module_init(pcwd_init_module); 995module_init(pcwd_init_module);
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 2700c5c45b8a..3fdfda9324fa 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -705,7 +705,8 @@ err_out_misc_deregister:
705err_out_unregister_reboot: 705err_out_unregister_reboot:
706 unregister_reboot_notifier(&usb_pcwd_notifier); 706 unregister_reboot_notifier(&usb_pcwd_notifier);
707error: 707error:
708 usb_pcwd_delete (usb_pcwd); 708 if (usb_pcwd)
709 usb_pcwd_delete(usb_pcwd);
709 usb_pcwd_device = NULL; 710 usb_pcwd_device = NULL;
710 return retval; 711 return retval;
711} 712}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index b582d0cdc24f..4f0898400c6d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -71,7 +71,7 @@ config EDAC_E7XXX
71 71
72config EDAC_E752X 72config EDAC_E752X
73 tristate "Intel e752x (e7520, e7525, e7320)" 73 tristate "Intel e752x (e7520, e7525, e7320)"
74 depends on EDAC_MM_EDAC && PCI && X86 74 depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
75 help 75 help
76 Support for error detection and correction on the Intel 76 Support for error detection and correction on the Intel
77 E7520, E7525, E7320 server chipsets. 77 E7520, E7525, E7320 server chipsets.
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index 7636c1a58f9c..23a9e1ea8e32 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -33,7 +33,6 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <linux/mutex.h>
37#include <asm/io.h> 36#include <asm/io.h>
38 37
39#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */ 38#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
@@ -71,10 +70,10 @@ static u8 km_activity;
71static int rest_x; 70static int rest_x;
72static int rest_y; 71static int rest_y;
73 72
74static DEFINE_MUTEX(hdaps_mutex); 73static DECLARE_MUTEX(hdaps_sem);
75 74
76/* 75/*
77 * __get_latch - Get the value from a given port. Callers must hold hdaps_mutex. 76 * __get_latch - Get the value from a given port. Callers must hold hdaps_sem.
78 */ 77 */
79static inline u8 __get_latch(u16 port) 78static inline u8 __get_latch(u16 port)
80{ 79{
@@ -83,7 +82,7 @@ static inline u8 __get_latch(u16 port)
83 82
84/* 83/*
85 * __check_latch - Check a port latch for a given value. Returns zero if the 84 * __check_latch - Check a port latch for a given value. Returns zero if the
86 * port contains the given value. Callers must hold hdaps_mutex. 85 * port contains the given value. Callers must hold hdaps_sem.
87 */ 86 */
88static inline int __check_latch(u16 port, u8 val) 87static inline int __check_latch(u16 port, u8 val)
89{ 88{
@@ -94,7 +93,7 @@ static inline int __check_latch(u16 port, u8 val)
94 93
95/* 94/*
96 * __wait_latch - Wait up to 100us for a port latch to get a certain value, 95 * __wait_latch - Wait up to 100us for a port latch to get a certain value,
97 * returning zero if the value is obtained. Callers must hold hdaps_mutex. 96 * returning zero if the value is obtained. Callers must hold hdaps_sem.
98 */ 97 */
99static int __wait_latch(u16 port, u8 val) 98static int __wait_latch(u16 port, u8 val)
100{ 99{
@@ -111,7 +110,7 @@ static int __wait_latch(u16 port, u8 val)
111 110
112/* 111/*
113 * __device_refresh - request a refresh from the accelerometer. Does not wait 112 * __device_refresh - request a refresh from the accelerometer. Does not wait
114 * for refresh to complete. Callers must hold hdaps_mutex. 113 * for refresh to complete. Callers must hold hdaps_sem.
115 */ 114 */
116static void __device_refresh(void) 115static void __device_refresh(void)
117{ 116{
@@ -125,7 +124,7 @@ static void __device_refresh(void)
125/* 124/*
126 * __device_refresh_sync - request a synchronous refresh from the 125 * __device_refresh_sync - request a synchronous refresh from the
127 * accelerometer. We wait for the refresh to complete. Returns zero if 126 * accelerometer. We wait for the refresh to complete. Returns zero if
128 * successful and nonzero on error. Callers must hold hdaps_mutex. 127 * successful and nonzero on error. Callers must hold hdaps_sem.
129 */ 128 */
130static int __device_refresh_sync(void) 129static int __device_refresh_sync(void)
131{ 130{
@@ -135,7 +134,7 @@ static int __device_refresh_sync(void)
135 134
136/* 135/*
137 * __device_complete - indicate to the accelerometer that we are done reading 136 * __device_complete - indicate to the accelerometer that we are done reading
138 * data, and then initiate an async refresh. Callers must hold hdaps_mutex. 137 * data, and then initiate an async refresh. Callers must hold hdaps_sem.
139 */ 138 */
140static inline void __device_complete(void) 139static inline void __device_complete(void)
141{ 140{
@@ -153,7 +152,7 @@ static int hdaps_readb_one(unsigned int port, u8 *val)
153{ 152{
154 int ret; 153 int ret;
155 154
156 mutex_lock(&hdaps_mutex); 155 down(&hdaps_sem);
157 156
158 /* do a sync refresh -- we need to be sure that we read fresh data */ 157 /* do a sync refresh -- we need to be sure that we read fresh data */
159 ret = __device_refresh_sync(); 158 ret = __device_refresh_sync();
@@ -164,7 +163,7 @@ static int hdaps_readb_one(unsigned int port, u8 *val)
164 __device_complete(); 163 __device_complete();
165 164
166out: 165out:
167 mutex_unlock(&hdaps_mutex); 166 up(&hdaps_sem);
168 return ret; 167 return ret;
169} 168}
170 169
@@ -199,9 +198,9 @@ static int hdaps_read_pair(unsigned int port1, unsigned int port2,
199{ 198{
200 int ret; 199 int ret;
201 200
202 mutex_lock(&hdaps_mutex); 201 down(&hdaps_sem);
203 ret = __hdaps_read_pair(port1, port2, val1, val2); 202 ret = __hdaps_read_pair(port1, port2, val1, val2);
204 mutex_unlock(&hdaps_mutex); 203 up(&hdaps_sem);
205 204
206 return ret; 205 return ret;
207} 206}
@@ -214,7 +213,7 @@ static int hdaps_device_init(void)
214{ 213{
215 int total, ret = -ENXIO; 214 int total, ret = -ENXIO;
216 215
217 mutex_lock(&hdaps_mutex); 216 down(&hdaps_sem);
218 217
219 outb(0x13, 0x1610); 218 outb(0x13, 0x1610);
220 outb(0x01, 0x161f); 219 outb(0x01, 0x161f);
@@ -280,7 +279,7 @@ static int hdaps_device_init(void)
280 } 279 }
281 280
282out: 281out:
283 mutex_unlock(&hdaps_mutex); 282 up(&hdaps_sem);
284 return ret; 283 return ret;
285} 284}
286 285
@@ -314,7 +313,7 @@ static struct platform_driver hdaps_driver = {
314}; 313};
315 314
316/* 315/*
317 * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_mutex. 316 * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_sem.
318 */ 317 */
319static void hdaps_calibrate(void) 318static void hdaps_calibrate(void)
320{ 319{
@@ -326,7 +325,7 @@ static void hdaps_mousedev_poll(unsigned long unused)
326 int x, y; 325 int x, y;
327 326
328 /* Cannot sleep. Try nonblockingly. If we fail, try again later. */ 327 /* Cannot sleep. Try nonblockingly. If we fail, try again later. */
329 if (!mutex_trylock(&hdaps_mutex)) { 328 if (down_trylock(&hdaps_sem)) {
330 mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD); 329 mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD);
331 return; 330 return;
332 } 331 }
@@ -341,7 +340,7 @@ static void hdaps_mousedev_poll(unsigned long unused)
341 mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD); 340 mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
342 341
343out: 342out:
344 mutex_unlock(&hdaps_mutex); 343 up(&hdaps_sem);
345} 344}
346 345
347 346
@@ -421,9 +420,9 @@ static ssize_t hdaps_calibrate_store(struct device *dev,
421 struct device_attribute *attr, 420 struct device_attribute *attr,
422 const char *buf, size_t count) 421 const char *buf, size_t count)
423{ 422{
424 mutex_lock(&hdaps_mutex); 423 down(&hdaps_sem);
425 hdaps_calibrate(); 424 hdaps_calibrate();
426 mutex_unlock(&hdaps_mutex); 425 up(&hdaps_sem);
427 426
428 return count; 427 return count;
429} 428}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index ccf528d733bf..a5017de72da5 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -61,6 +61,7 @@
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include <linux/delay.h> 62#include <linux/delay.h>
63#include <linux/mutex.h> 63#include <linux/mutex.h>
64#include <linux/leds.h>
64 65
65#define _IDE_DISK 66#define _IDE_DISK
66 67
@@ -317,6 +318,8 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
317 return ide_stopped; 318 return ide_stopped;
318 } 319 }
319 320
321 ledtrig_ide_activity();
322
320 pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n", 323 pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
321 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", 324 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
322 (unsigned long long)block, rq->nr_sectors, 325 (unsigned long long)block, rq->nr_sectors,
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 0606bd2f6020..9233b8109a0f 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -375,7 +375,13 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
375 } 375 }
376 } 376 }
377 377
378 ide_end_request(drive, 1, rq->hard_nr_sectors); 378 if (rq->rq_disk) {
379 ide_driver_t *drv;
380
381 drv = *(ide_driver_t **)rq->rq_disk->private_data;;
382 drv->end_request(drive, 1, rq->hard_nr_sectors);
383 } else
384 ide_end_request(drive, 1, rq->hard_nr_sectors);
379} 385}
380 386
381/* 387/*
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 6213bd3caee5..4961f1e764a7 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -81,14 +81,14 @@ static const char ide_major[] = {
81}; 81};
82 82
83typedef struct ide_info_t { 83typedef struct ide_info_t {
84 dev_link_t link; 84 struct pcmcia_device *p_dev;
85 int ndev; 85 int ndev;
86 dev_node_t node; 86 dev_node_t node;
87 int hd; 87 int hd;
88} ide_info_t; 88} ide_info_t;
89 89
90static void ide_release(dev_link_t *); 90static void ide_release(struct pcmcia_device *);
91static void ide_config(dev_link_t *); 91static int ide_config(struct pcmcia_device *);
92 92
93static void ide_detach(struct pcmcia_device *p_dev); 93static void ide_detach(struct pcmcia_device *p_dev);
94 94
@@ -103,10 +103,9 @@ static void ide_detach(struct pcmcia_device *p_dev);
103 103
104======================================================================*/ 104======================================================================*/
105 105
106static int ide_attach(struct pcmcia_device *p_dev) 106static int ide_probe(struct pcmcia_device *link)
107{ 107{
108 ide_info_t *info; 108 ide_info_t *info;
109 dev_link_t *link;
110 109
111 DEBUG(0, "ide_attach()\n"); 110 DEBUG(0, "ide_attach()\n");
112 111
@@ -114,7 +113,9 @@ static int ide_attach(struct pcmcia_device *p_dev)
114 info = kzalloc(sizeof(*info), GFP_KERNEL); 113 info = kzalloc(sizeof(*info), GFP_KERNEL);
115 if (!info) 114 if (!info)
116 return -ENOMEM; 115 return -ENOMEM;
117 link = &info->link; link->priv = info; 116
117 info->p_dev = link;
118 link->priv = info;
118 119
119 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 120 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
120 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 121 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
@@ -122,16 +123,9 @@ static int ide_attach(struct pcmcia_device *p_dev)
122 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 123 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
123 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 124 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
124 link->conf.Attributes = CONF_ENABLE_IRQ; 125 link->conf.Attributes = CONF_ENABLE_IRQ;
125 link->conf.Vcc = 50;
126 link->conf.IntType = INT_MEMORY_AND_IO; 126 link->conf.IntType = INT_MEMORY_AND_IO;
127 127
128 link->handle = p_dev; 128 return ide_config(link);
129 p_dev->instance = link;
130
131 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
132 ide_config(link);
133
134 return 0;
135} /* ide_attach */ 129} /* ide_attach */
136 130
137/*====================================================================== 131/*======================================================================
@@ -143,14 +137,11 @@ static int ide_attach(struct pcmcia_device *p_dev)
143 137
144======================================================================*/ 138======================================================================*/
145 139
146static void ide_detach(struct pcmcia_device *p_dev) 140static void ide_detach(struct pcmcia_device *link)
147{ 141{
148 dev_link_t *link = dev_to_instance(p_dev);
149
150 DEBUG(0, "ide_detach(0x%p)\n", link); 142 DEBUG(0, "ide_detach(0x%p)\n", link);
151 143
152 if (link->state & DEV_CONFIG) 144 ide_release(link);
153 ide_release(link);
154 145
155 kfree(link->priv); 146 kfree(link->priv);
156} /* ide_detach */ 147} /* ide_detach */
@@ -177,9 +168,8 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
177#define CS_CHECK(fn, ret) \ 168#define CS_CHECK(fn, ret) \
178do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 169do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
179 170
180static void ide_config(dev_link_t *link) 171static int ide_config(struct pcmcia_device *link)
181{ 172{
182 client_handle_t handle = link->handle;
183 ide_info_t *info = link->priv; 173 ide_info_t *info = link->priv;
184 tuple_t tuple; 174 tuple_t tuple;
185 struct { 175 struct {
@@ -203,34 +193,30 @@ static void ide_config(dev_link_t *link)
203 tuple.TupleDataMax = 255; 193 tuple.TupleDataMax = 255;
204 tuple.Attributes = 0; 194 tuple.Attributes = 0;
205 tuple.DesiredTuple = CISTPL_CONFIG; 195 tuple.DesiredTuple = CISTPL_CONFIG;
206 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 196 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
207 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 197 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
208 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &stk->parse)); 198 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &stk->parse));
209 link->conf.ConfigBase = stk->parse.config.base; 199 link->conf.ConfigBase = stk->parse.config.base;
210 link->conf.Present = stk->parse.config.rmask[0]; 200 link->conf.Present = stk->parse.config.rmask[0];
211 201
212 tuple.DesiredTuple = CISTPL_MANFID; 202 tuple.DesiredTuple = CISTPL_MANFID;
213 if (!pcmcia_get_first_tuple(handle, &tuple) && 203 if (!pcmcia_get_first_tuple(link, &tuple) &&
214 !pcmcia_get_tuple_data(handle, &tuple) && 204 !pcmcia_get_tuple_data(link, &tuple) &&
215 !pcmcia_parse_tuple(handle, &tuple, &stk->parse)) 205 !pcmcia_parse_tuple(link, &tuple, &stk->parse))
216 is_kme = ((stk->parse.manfid.manf == MANFID_KME) && 206 is_kme = ((stk->parse.manfid.manf == MANFID_KME) &&
217 ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || 207 ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) ||
218 (stk->parse.manfid.card == PRODID_KME_KXLC005_B))); 208 (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
219 209
220 /* Configure card */
221 link->state |= DEV_CONFIG;
222
223 /* Not sure if this is right... look up the current Vcc */ 210 /* Not sure if this is right... look up the current Vcc */
224 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &stk->conf)); 211 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &stk->conf));
225 link->conf.Vcc = stk->conf.Vcc;
226 212
227 pass = io_base = ctl_base = 0; 213 pass = io_base = ctl_base = 0;
228 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 214 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
229 tuple.Attributes = 0; 215 tuple.Attributes = 0;
230 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 216 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
231 while (1) { 217 while (1) {
232 if (pcmcia_get_tuple_data(handle, &tuple) != 0) goto next_entry; 218 if (pcmcia_get_tuple_data(link, &tuple) != 0) goto next_entry;
233 if (pcmcia_parse_tuple(handle, &tuple, &stk->parse) != 0) goto next_entry; 219 if (pcmcia_parse_tuple(link, &tuple, &stk->parse) != 0) goto next_entry;
234 220
235 /* Check for matching Vcc, unless we're desperate */ 221 /* Check for matching Vcc, unless we're desperate */
236 if (!pass) { 222 if (!pass) {
@@ -244,10 +230,10 @@ static void ide_config(dev_link_t *link)
244 } 230 }
245 231
246 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) 232 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
247 link->conf.Vpp1 = link->conf.Vpp2 = 233 link->conf.Vpp =
248 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; 234 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
249 else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) 235 else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
250 link->conf.Vpp1 = link->conf.Vpp2 = 236 link->conf.Vpp =
251 stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000; 237 stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
252 238
253 if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) { 239 if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) {
@@ -261,14 +247,14 @@ static void ide_config(dev_link_t *link)
261 link->io.NumPorts1 = 8; 247 link->io.NumPorts1 = 8;
262 link->io.BasePort2 = io->win[1].base; 248 link->io.BasePort2 = io->win[1].base;
263 link->io.NumPorts2 = (is_kme) ? 2 : 1; 249 link->io.NumPorts2 = (is_kme) ? 2 : 1;
264 if (pcmcia_request_io(link->handle, &link->io) != 0) 250 if (pcmcia_request_io(link, &link->io) != 0)
265 goto next_entry; 251 goto next_entry;
266 io_base = link->io.BasePort1; 252 io_base = link->io.BasePort1;
267 ctl_base = link->io.BasePort2; 253 ctl_base = link->io.BasePort2;
268 } else if ((io->nwin == 1) && (io->win[0].len >= 16)) { 254 } else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
269 link->io.NumPorts1 = io->win[0].len; 255 link->io.NumPorts1 = io->win[0].len;
270 link->io.NumPorts2 = 0; 256 link->io.NumPorts2 = 0;
271 if (pcmcia_request_io(link->handle, &link->io) != 0) 257 if (pcmcia_request_io(link, &link->io) != 0)
272 goto next_entry; 258 goto next_entry;
273 io_base = link->io.BasePort1; 259 io_base = link->io.BasePort1;
274 ctl_base = link->io.BasePort1 + 0x0e; 260 ctl_base = link->io.BasePort1 + 0x0e;
@@ -281,16 +267,16 @@ static void ide_config(dev_link_t *link)
281 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) 267 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
282 memcpy(&stk->dflt, cfg, sizeof(stk->dflt)); 268 memcpy(&stk->dflt, cfg, sizeof(stk->dflt));
283 if (pass) { 269 if (pass) {
284 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 270 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
285 } else if (pcmcia_get_next_tuple(handle, &tuple) != 0) { 271 } else if (pcmcia_get_next_tuple(link, &tuple) != 0) {
286 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 272 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
287 memset(&stk->dflt, 0, sizeof(stk->dflt)); 273 memset(&stk->dflt, 0, sizeof(stk->dflt));
288 pass++; 274 pass++;
289 } 275 }
290 } 276 }
291 277
292 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 278 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
293 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 279 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
294 280
295 /* disable drive interrupts during IDE probe */ 281 /* disable drive interrupts during IDE probe */
296 outb(0x02, ctl_base); 282 outb(0x02, ctl_base);
@@ -301,12 +287,12 @@ static void ide_config(dev_link_t *link)
301 287
302 /* retry registration in case device is still spinning up */ 288 /* retry registration in case device is still spinning up */
303 for (hd = -1, i = 0; i < 10; i++) { 289 for (hd = -1, i = 0; i < 10; i++) {
304 hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, handle); 290 hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
305 if (hd >= 0) break; 291 if (hd >= 0) break;
306 if (link->io.NumPorts1 == 0x20) { 292 if (link->io.NumPorts1 == 0x20) {
307 outb(0x02, ctl_base + 0x10); 293 outb(0x02, ctl_base + 0x10);
308 hd = idecs_register(io_base + 0x10, ctl_base + 0x10, 294 hd = idecs_register(io_base + 0x10, ctl_base + 0x10,
309 link->irq.AssignedIRQ, handle); 295 link->irq.AssignedIRQ, link);
310 if (hd >= 0) { 296 if (hd >= 0) {
311 io_base += 0x10; 297 io_base += 0x10;
312 ctl_base += 0x10; 298 ctl_base += 0x10;
@@ -328,25 +314,23 @@ static void ide_config(dev_link_t *link)
328 info->node.major = ide_major[hd]; 314 info->node.major = ide_major[hd];
329 info->node.minor = 0; 315 info->node.minor = 0;
330 info->hd = hd; 316 info->hd = hd;
331 link->dev = &info->node; 317 link->dev_node = &info->node;
332 printk(KERN_INFO "ide-cs: %s: Vcc = %d.%d, Vpp = %d.%d\n", 318 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
333 info->node.dev_name, link->conf.Vcc / 10, link->conf.Vcc % 10, 319 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
334 link->conf.Vpp1 / 10, link->conf.Vpp1 % 10);
335 320
336 link->state &= ~DEV_CONFIG_PENDING;
337 kfree(stk); 321 kfree(stk);
338 return; 322 return 0;
339 323
340err_mem: 324err_mem:
341 printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n"); 325 printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n");
342 goto failed; 326 goto failed;
343 327
344cs_failed: 328cs_failed:
345 cs_error(link->handle, last_fn, last_ret); 329 cs_error(link, last_fn, last_ret);
346failed: 330failed:
347 kfree(stk); 331 kfree(stk);
348 ide_release(link); 332 ide_release(link);
349 link->state &= ~DEV_CONFIG_PENDING; 333 return -ENODEV;
350} /* ide_config */ 334} /* ide_config */
351 335
352/*====================================================================== 336/*======================================================================
@@ -357,7 +341,7 @@ failed:
357 341
358======================================================================*/ 342======================================================================*/
359 343
360void ide_release(dev_link_t *link) 344void ide_release(struct pcmcia_device *link)
361{ 345{
362 ide_info_t *info = link->priv; 346 ide_info_t *info = link->priv;
363 347
@@ -369,37 +353,10 @@ void ide_release(dev_link_t *link)
369 ide_unregister(info->hd); 353 ide_unregister(info->hd);
370 } 354 }
371 info->ndev = 0; 355 info->ndev = 0;
372 link->dev = NULL;
373
374 pcmcia_release_configuration(link->handle);
375 pcmcia_release_io(link->handle, &link->io);
376 pcmcia_release_irq(link->handle, &link->irq);
377
378 link->state &= ~DEV_CONFIG;
379 356
357 pcmcia_disable_device(link);
380} /* ide_release */ 358} /* ide_release */
381 359
382static int ide_suspend(struct pcmcia_device *dev)
383{
384 dev_link_t *link = dev_to_instance(dev);
385
386 link->state |= DEV_SUSPEND;
387 if (link->state & DEV_CONFIG)
388 pcmcia_release_configuration(link->handle);
389
390 return 0;
391}
392
393static int ide_resume(struct pcmcia_device *dev)
394{
395 dev_link_t *link = dev_to_instance(dev);
396
397 link->state &= ~DEV_SUSPEND;
398 if (DEV_OK(link))
399 pcmcia_request_configuration(link->handle, &link->conf);
400
401 return 0;
402}
403 360
404/*====================================================================== 361/*======================================================================
405 362
@@ -459,11 +416,9 @@ static struct pcmcia_driver ide_cs_driver = {
459 .drv = { 416 .drv = {
460 .name = "ide-cs", 417 .name = "ide-cs",
461 }, 418 },
462 .probe = ide_attach, 419 .probe = ide_probe,
463 .remove = ide_detach, 420 .remove = ide_detach,
464 .id_table = ide_ids, 421 .id_table = ide_ids,
465 .suspend = ide_suspend,
466 .resume = ide_resume,
467}; 422};
468 423
469static int __init init_ide_cs(void) 424static int __init init_ide_cs(void)
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 2c765ca5aa50..f4206604db03 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -496,22 +496,17 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
496/* 496/*
497 * This function finds the sbp2_command for a given outstanding SCpnt. 497 * This function finds the sbp2_command for a given outstanding SCpnt.
498 * Only looks at the inuse list. 498 * Only looks at the inuse list.
499 * Must be called with scsi_id->sbp2_command_orb_lock held.
499 */ 500 */
500static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt) 501static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
502 struct scsi_id_instance_data *scsi_id, void *SCpnt)
501{ 503{
502 struct sbp2_command_info *command; 504 struct sbp2_command_info *command;
503 unsigned long flags;
504 505
505 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 506 if (!list_empty(&scsi_id->sbp2_command_orb_inuse))
506 if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) { 507 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list)
507 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { 508 if (command->Current_SCpnt == SCpnt)
508 if (command->Current_SCpnt == SCpnt) {
509 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
510 return command; 509 return command;
511 }
512 }
513 }
514 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
515 return NULL; 510 return NULL;
516} 511}
517 512
@@ -580,17 +575,15 @@ static void sbp2util_free_command_dma(struct sbp2_command_info *command)
580 575
581/* 576/*
582 * This function moves a command to the completed orb list. 577 * This function moves a command to the completed orb list.
578 * Must be called with scsi_id->sbp2_command_orb_lock held.
583 */ 579 */
584static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, 580static void sbp2util_mark_command_completed(
585 struct sbp2_command_info *command) 581 struct scsi_id_instance_data *scsi_id,
582 struct sbp2_command_info *command)
586{ 583{
587 unsigned long flags;
588
589 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
590 list_del(&command->list); 584 list_del(&command->list);
591 sbp2util_free_command_dma(command); 585 sbp2util_free_command_dma(command);
592 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed); 586 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
593 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
594} 587}
595 588
596/* 589/*
@@ -2148,7 +2141,9 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2148 * Matched status with command, now grab scsi command pointers and check status 2141 * Matched status with command, now grab scsi command pointers and check status
2149 */ 2142 */
2150 SCpnt = command->Current_SCpnt; 2143 SCpnt = command->Current_SCpnt;
2144 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2151 sbp2util_mark_command_completed(scsi_id, command); 2145 sbp2util_mark_command_completed(scsi_id, command);
2146 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2152 2147
2153 if (SCpnt) { 2148 if (SCpnt) {
2154 2149
@@ -2484,6 +2479,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2484 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2479 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2485 struct sbp2scsi_host_info *hi = scsi_id->hi; 2480 struct sbp2scsi_host_info *hi = scsi_id->hi;
2486 struct sbp2_command_info *command; 2481 struct sbp2_command_info *command;
2482 unsigned long flags;
2487 2483
2488 SBP2_ERR("aborting sbp2 command"); 2484 SBP2_ERR("aborting sbp2 command");
2489 scsi_print_command(SCpnt); 2485 scsi_print_command(SCpnt);
@@ -2494,6 +2490,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2494 * Right now, just return any matching command structures 2490 * Right now, just return any matching command structures
2495 * to the free pool. 2491 * to the free pool.
2496 */ 2492 */
2493 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2497 command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt); 2494 command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
2498 if (command) { 2495 if (command) {
2499 SBP2_DEBUG("Found command to abort"); 2496 SBP2_DEBUG("Found command to abort");
@@ -2511,6 +2508,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2511 command->Current_done(command->Current_SCpnt); 2508 command->Current_done(command->Current_SCpnt);
2512 } 2509 }
2513 } 2510 }
2511 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
2514 2512
2515 /* 2513 /*
2516 * Initiate a fetch agent reset. 2514 * Initiate a fetch agent reset.
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index bdf0891a92dd..afc612b8577d 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -30,6 +30,7 @@ config INFINIBAND_USER_ACCESS
30 <http://www.openib.org>. 30 <http://www.openib.org>.
31 31
32source "drivers/infiniband/hw/mthca/Kconfig" 32source "drivers/infiniband/hw/mthca/Kconfig"
33source "drivers/infiniband/hw/ipath/Kconfig"
33 34
34source "drivers/infiniband/ulp/ipoib/Kconfig" 35source "drivers/infiniband/ulp/ipoib/Kconfig"
35 36
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index a43fb34cca94..eea27322a22d 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_INFINIBAND) += core/ 1obj-$(CONFIG_INFINIBAND) += core/
2obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ 2obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
3obj-$(CONFIG_IPATH_CORE) += hw/ipath/
3obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 4obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
4obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 5obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
new file mode 100644
index 000000000000..9ea67c409b6d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -0,0 +1,16 @@
1config IPATH_CORE
2 tristate "PathScale InfiniPath Driver"
3 depends on 64BIT && PCI_MSI && NET
4 ---help---
5 This is a low-level driver for PathScale InfiniPath host channel
6 adapters (HCAs) based on the HT-400 and PE-800 chips.
7
8config INFINIBAND_IPATH
9 tristate "PathScale InfiniPath Verbs Driver"
10 depends on IPATH_CORE && INFINIBAND
11 ---help---
12 This is a driver that provides InfiniBand verbs support for
13 PathScale InfiniPath host channel adapters (HCAs). This
14 allows these devices to be used with both kernel upper level
15 protocols such as IP-over-InfiniBand as well as with userspace
16 applications (in conjunction with InfiniBand userspace access).
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
new file mode 100644
index 000000000000..b4d084abfd22
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -0,0 +1,36 @@
1EXTRA_CFLAGS += -DIPATH_IDSTR='"PathScale kernel.org driver"' \
2 -DIPATH_KERN_TYPE=0
3
4obj-$(CONFIG_IPATH_CORE) += ipath_core.o
5obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
6
7ipath_core-y := \
8 ipath_diag.o \
9 ipath_driver.o \
10 ipath_eeprom.o \
11 ipath_file_ops.o \
12 ipath_fs.o \
13 ipath_ht400.o \
14 ipath_init_chip.o \
15 ipath_intr.o \
16 ipath_layer.o \
17 ipath_pe800.o \
18 ipath_stats.o \
19 ipath_sysfs.o \
20 ipath_user_pages.o
21
22ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o
23
24ib_ipath-y := \
25 ipath_cq.o \
26 ipath_keys.o \
27 ipath_mad.o \
28 ipath_mr.o \
29 ipath_qp.o \
30 ipath_rc.o \
31 ipath_ruc.o \
32 ipath_srq.o \
33 ipath_uc.o \
34 ipath_ud.o \
35 ipath_verbs.o \
36 ipath_verbs_mcast.o
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
new file mode 100644
index 000000000000..48a55247b832
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -0,0 +1,616 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _IPATH_COMMON_H
34#define _IPATH_COMMON_H
35
36/*
37 * This file contains defines, structures, etc. that are used
38 * to communicate between kernel and user code.
39 */
40
41/* This is the IEEE-assigned OUI for PathScale, Inc. */
42#define IPATH_SRC_OUI_1 0x00
43#define IPATH_SRC_OUI_2 0x11
44#define IPATH_SRC_OUI_3 0x75
45
46/* version of protocol header (known to chip also). In the long run,
47 * we should be able to generate and accept a range of version numbers;
48 * for now we only accept one, and it's compiled in.
49 */
50#define IPS_PROTO_VERSION 2
51
52/*
53 * These are compile time constants that you may want to enable or disable
54 * if you are trying to debug problems with code or performance.
55 * IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
56 * fastpath code
57 * IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
58 * traced in faspath code
59 * _IPATH_TRACING define as 0 if you want to remove all tracing in a
60 * compilation unit
61 * _IPATH_DEBUGGING define as 0 if you want to remove debug prints
62 */
63
64/*
65 * The value in the BTH QP field that InfiniPath uses to differentiate
66 * an infinipath protocol IB packet vs standard IB transport
67 */
68#define IPATH_KD_QP 0x656b79
69
70/*
71 * valid states passed to ipath_set_linkstate() user call
72 */
73#define IPATH_IB_LINKDOWN 0
74#define IPATH_IB_LINKARM 1
75#define IPATH_IB_LINKACTIVE 2
76#define IPATH_IB_LINKINIT 3
77#define IPATH_IB_LINKDOWN_SLEEP 4
78#define IPATH_IB_LINKDOWN_DISABLE 5
79
80/*
81 * stats maintained by the driver. For now, at least, this is global
82 * to all minor devices.
83 */
84struct infinipath_stats {
85 /* number of interrupts taken */
86 __u64 sps_ints;
87 /* number of interrupts for errors */
88 __u64 sps_errints;
89 /* number of errors from chip (not incl. packet errors or CRC) */
90 __u64 sps_errs;
91 /* number of packet errors from chip other than CRC */
92 __u64 sps_pkterrs;
93 /* number of packets with CRC errors (ICRC and VCRC) */
94 __u64 sps_crcerrs;
95 /* number of hardware errors reported (parity, etc.) */
96 __u64 sps_hwerrs;
97 /* number of times IB link changed state unexpectedly */
98 __u64 sps_iblink;
99 /* no longer used; left for compatibility */
100 __u64 sps_unused3;
101 /* number of kernel (port0) packets received */
102 __u64 sps_port0pkts;
103 /* number of "ethernet" packets sent by driver */
104 __u64 sps_ether_spkts;
105 /* number of "ethernet" packets received by driver */
106 __u64 sps_ether_rpkts;
107 /* number of SMA packets sent by driver */
108 __u64 sps_sma_spkts;
109 /* number of SMA packets received by driver */
110 __u64 sps_sma_rpkts;
111 /* number of times all ports rcvhdrq was full and packet dropped */
112 __u64 sps_hdrqfull;
113 /* number of times all ports egrtid was full and packet dropped */
114 __u64 sps_etidfull;
115 /*
116 * number of times we tried to send from driver, but no pio buffers
117 * avail
118 */
119 __u64 sps_nopiobufs;
120 /* number of ports currently open */
121 __u64 sps_ports;
122 /* list of pkeys (other than default) accepted (0 means not set) */
123 __u16 sps_pkeys[4];
124 /* lids for up to 4 infinipaths, indexed by infinipath # */
125 __u16 sps_lid[4];
126 /* number of user ports per chip (not IB ports) */
127 __u32 sps_nports;
128 /* not our interrupt, or already handled */
129 __u32 sps_nullintr;
130 /* max number of packets handled per receive call */
131 __u32 sps_maxpkts_call;
132 /* avg number of packets handled per receive call */
133 __u32 sps_avgpkts_call;
134 /* total number of pages locked */
135 __u64 sps_pagelocks;
136 /* total number of pages unlocked */
137 __u64 sps_pageunlocks;
138 /*
139 * Number of packets dropped in kernel other than errors (ether
140 * packets if ipath not configured, sma/mad, etc.)
141 */
142 __u64 sps_krdrops;
143 /* mlids for up to 4 infinipaths, indexed by infinipath # */
144 __u16 sps_mlid[4];
145 /* pad for future growth */
146 __u64 __sps_pad[45];
147};
148
149/*
150 * These are the status bits readable (in ascii form, 64bit value)
151 * from the "status" sysfs file.
152 */
153#define IPATH_STATUS_INITTED 0x1 /* basic initialization done */
154#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */
155/* Device has been disabled via admin request */
156#define IPATH_STATUS_ADMIN_DISABLED 0x4
157#define IPATH_STATUS_OIB_SMA 0x8 /* ipath_mad kernel SMA running */
158#define IPATH_STATUS_SMA 0x10 /* user SMA running */
159/* Chip has been found and initted */
160#define IPATH_STATUS_CHIP_PRESENT 0x20
161/* IB link is at ACTIVE, usable for data traffic */
162#define IPATH_STATUS_IB_READY 0x40
163/* link is configured, LID, MTU, etc. have been set */
164#define IPATH_STATUS_IB_CONF 0x80
165/* no link established, probably no cable */
166#define IPATH_STATUS_IB_NOCABLE 0x100
167/* A Fatal hardware error has occurred. */
168#define IPATH_STATUS_HWERROR 0x200
169
170/*
171 * The list of usermode accessible registers. Also see Reg_* later in file.
172 */
173typedef enum _ipath_ureg {
174 /* (RO) DMA RcvHdr to be used next. */
175 ur_rcvhdrtail = 0,
176 /* (RW) RcvHdr entry to be processed next by host. */
177 ur_rcvhdrhead = 1,
178 /* (RO) Index of next Eager index to use. */
179 ur_rcvegrindextail = 2,
180 /* (RW) Eager TID to be processed next */
181 ur_rcvegrindexhead = 3,
182 /* For internal use only; max register number. */
183 _IPATH_UregMax
184} ipath_ureg;
185
186/* bit values for spi_runtime_flags */
187#define IPATH_RUNTIME_HT 0x1
188#define IPATH_RUNTIME_PCIE 0x2
189#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
190#define IPATH_RUNTIME_RCVHDR_COPY 0x8
191
192/*
193 * This structure is returned by ipath_userinit() immediately after
194 * open to get implementation-specific info, and info specific to this
195 * instance.
196 *
197 * This struct must have explict pad fields where type sizes
198 * may result in different alignments between 32 and 64 bit
199 * programs, since the 64 bit * bit kernel requires the user code
200 * to have matching offsets
201 */
202struct ipath_base_info {
203 /* version of hardware, for feature checking. */
204 __u32 spi_hw_version;
205 /* version of software, for feature checking. */
206 __u32 spi_sw_version;
207 /* InfiniPath port assigned, goes into sent packets */
208 __u32 spi_port;
209 /*
210 * IB MTU, packets IB data must be less than this.
211 * The MTU is in bytes, and will be a multiple of 4 bytes.
212 */
213 __u32 spi_mtu;
214 /*
215 * Size of a PIO buffer. Any given packet's total size must be less
216 * than this (in words). Included is the starting control word, so
217 * if 513 is returned, then total pkt size is 512 words or less.
218 */
219 __u32 spi_piosize;
220 /* size of the TID cache in infinipath, in entries */
221 __u32 spi_tidcnt;
222 /* size of the TID Eager list in infinipath, in entries */
223 __u32 spi_tidegrcnt;
224 /* size of a single receive header queue entry. */
225 __u32 spi_rcvhdrent_size;
226 /*
227 * Count of receive header queue entries allocated.
228 * This may be less than the spu_rcvhdrcnt passed in!.
229 */
230 __u32 spi_rcvhdr_cnt;
231
232 /* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
233 __u32 spi_runtime_flags;
234
235 /* address where receive buffer queue is mapped into */
236 __u64 spi_rcvhdr_base;
237
238 /* user program. */
239
240 /* base address of eager TID receive buffers. */
241 __u64 spi_rcv_egrbufs;
242
243 /* Allocated by initialization code, not by protocol. */
244
245 /*
246 * Size of each TID buffer in host memory, starting at
247 * spi_rcv_egrbufs. The buffers are virtually contiguous.
248 */
249 __u32 spi_rcv_egrbufsize;
250 /*
251 * The special QP (queue pair) value that identifies an infinipath
252 * protocol packet from standard IB packets. More, probably much
253 * more, to be added.
254 */
255 __u32 spi_qpair;
256
257 /*
258 * User register base for init code, not to be used directly by
259 * protocol or applications.
260 */
261 __u64 __spi_uregbase;
262 /*
263 * Maximum buffer size in bytes that can be used in a single TID
264 * entry (assuming the buffer is aligned to this boundary). This is
265 * the minimum of what the hardware and software support Guaranteed
266 * to be a power of 2.
267 */
268 __u32 spi_tid_maxsize;
269 /*
270 * alignment of each pio send buffer (byte count
271 * to add to spi_piobufbase to get to second buffer)
272 */
273 __u32 spi_pioalign;
274 /*
275 * The index of the first pio buffer available to this process;
276 * needed to do lookup in spi_pioavailaddr; not added to
277 * spi_piobufbase.
278 */
279 __u32 spi_pioindex;
280 /* number of buffers mapped for this process */
281 __u32 spi_piocnt;
282
283 /*
284 * Base address of writeonly pio buffers for this process.
285 * Each buffer has spi_piosize words, and is aligned on spi_pioalign
286 * boundaries. spi_piocnt buffers are mapped from this address
287 */
288 __u64 spi_piobufbase;
289
290 /*
291 * Base address of readonly memory copy of the pioavail registers.
292 * There are 2 bits for each buffer.
293 */
294 __u64 spi_pioavailaddr;
295
296 /*
297 * Address where driver updates a copy of the interface and driver
298 * status (IPATH_STATUS_*) as a 64 bit value. It's followed by a
299 * string indicating hardware error, if there was one.
300 */
301 __u64 spi_status;
302
303 /* number of chip ports available to user processes */
304 __u32 spi_nports;
305 /* unit number of chip we are using */
306 __u32 spi_unit;
307 /* num bufs in each contiguous set */
308 __u32 spi_rcv_egrperchunk;
309 /* size in bytes of each contiguous set */
310 __u32 spi_rcv_egrchunksize;
311 /* total size of mmap to cover full rcvegrbuffers */
312 __u32 spi_rcv_egrbuftotlen;
313} __attribute__ ((aligned(8)));
314
315
316/*
317 * This version number is given to the driver by the user code during
318 * initialization in the spu_userversion field of ipath_user_info, so
319 * the driver can check for compatibility with user code.
320 *
321 * The major version changes when data structures
322 * change in an incompatible way. The driver must be the same or higher
323 * for initialization to succeed. In some cases, a higher version
324 * driver will not interoperate with older software, and initialization
325 * will return an error.
326 */
327#define IPATH_USER_SWMAJOR 1
328
329/*
330 * Minor version differences are always compatible
331 * a within a major version, however if if user software is larger
332 * than driver software, some new features and/or structure fields
333 * may not be implemented; the user code must deal with this if it
334 * cares, or it must abort after initialization reports the difference
335 */
336#define IPATH_USER_SWMINOR 2
337
338#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
339
340#define IPATH_KERN_TYPE 0
341
342/*
343 * Similarly, this is the kernel version going back to the user. It's
344 * slightly different, in that we want to tell if the driver was built as
345 * part of a PathScale release, or from the driver from OpenIB, kernel.org,
346 * or a standard distribution, for support reasons. The high bit is 0 for
347 * non-PathScale, and 1 for PathScale-built/supplied.
348 *
349 * It's returned by the driver to the user code during initialization in the
350 * spi_sw_version field of ipath_base_info, so the user code can in turn
351 * check for compatibility with the kernel.
352*/
353#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
354
355/*
356 * This structure is passed to ipath_userinit() to tell the driver where
357 * user code buffers are, sizes, etc. The offsets and sizes of the
358 * fields must remain unchanged, for binary compatibility. It can
359 * be extended, if userversion is changed so user code can tell, if needed
360 */
361struct ipath_user_info {
362 /*
363 * version of user software, to detect compatibility issues.
364 * Should be set to IPATH_USER_SWVERSION.
365 */
366 __u32 spu_userversion;
367
368 /* desired number of receive header queue entries */
369 __u32 spu_rcvhdrcnt;
370
371 /* size of struct base_info to write to */
372 __u32 spu_base_info_size;
373
374 /*
375 * number of words in KD protocol header
376 * This tells InfiniPath how many words to copy to rcvhdrq. If 0,
377 * kernel uses a default. Once set, attempts to set any other value
378 * are an error (EAGAIN) until driver is reloaded.
379 */
380 __u32 spu_rcvhdrsize;
381
382 /*
383 * cache line aligned (64 byte) user address to
384 * which the rcvhdrtail register will be written by infinipath
385 * whenever it changes, so that no chip registers are read in
386 * the performance path.
387 */
388 __u64 spu_rcvhdraddr;
389
390 /*
391 * address of struct base_info to write to
392 */
393 __u64 spu_base_info;
394
395} __attribute__ ((aligned(8)));
396
397/* User commands. */
398
399#define IPATH_CMD_MIN 16
400
401#define IPATH_CMD_USER_INIT 16 /* set up userspace */
402#define IPATH_CMD_PORT_INFO 17 /* find out what resources we got */
403#define IPATH_CMD_RECV_CTRL 18 /* control receipt of packets */
404#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */
405#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */
406#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */
407
408#define IPATH_CMD_MAX 21
409
410struct ipath_port_info {
411 __u32 num_active; /* number of active units */
412 __u32 unit; /* unit (chip) assigned to caller */
413 __u32 port; /* port on unit assigned to caller */
414};
415
416struct ipath_tid_info {
417 __u32 tidcnt;
418 /* make structure same size in 32 and 64 bit */
419 __u32 tid__unused;
420 /* virtual address of first page in transfer */
421 __u64 tidvaddr;
422 /* pointer (same size 32/64 bit) to __u16 tid array */
423 __u64 tidlist;
424
425 /*
426 * pointer (same size 32/64 bit) to bitmap of TIDs used
427 * for this call; checked for being large enough at open
428 */
429 __u64 tidmap;
430};
431
432struct ipath_cmd {
433 __u32 type; /* command type */
434 union {
435 struct ipath_tid_info tid_info;
436 struct ipath_user_info user_info;
437 /* address in userspace of struct ipath_port_info to
438 write result to */
439 __u64 port_info;
440 /* enable/disable receipt of packets */
441 __u32 recv_ctrl;
442 /* partition key to set */
443 __u16 part_key;
444 } cmd;
445};
446
447struct ipath_iovec {
448 /* Pointer to data, but same size 32 and 64 bit */
449 __u64 iov_base;
450
451 /*
452 * Length of data; don't need 64 bits, but want
453 * ipath_sendpkt to remain same size as before 32 bit changes, so...
454 */
455 __u64 iov_len;
456};
457
458/*
459 * Describes a single packet for send. Each packet can have one or more
460 * buffers, but the total length (exclusive of IB headers) must be less
461 * than the MTU, and if using the PIO method, entire packet length,
462 * including IB headers, must be less than the ipath_piosize value (words).
463 * Use of this necessitates including sys/uio.h
464 */
465struct __ipath_sendpkt {
466 __u32 sps_flags; /* flags for packet (TBD) */
467 __u32 sps_cnt; /* number of entries to use in sps_iov */
468 /* array of iov's describing packet. TEMPORARY */
469 struct ipath_iovec sps_iov[4];
470};
471
472/* Passed into SMA special file's ->read and ->write methods. */
473struct ipath_sma_pkt
474{
475 __u32 unit; /* unit on which to send packet */
476 __u64 data; /* address of payload in userspace */
477 __u32 len; /* length of payload */
478};
479
480/*
481 * Data layout in I2C flash (for GUID, etc.)
482 * All fields are little-endian binary unless otherwise stated
483 */
484#define IPATH_FLASH_VERSION 1
485struct ipath_flash {
486 /* flash layout version (IPATH_FLASH_VERSION) */
487 __u8 if_fversion;
488 /* checksum protecting if_length bytes */
489 __u8 if_csum;
490 /*
491 * valid length (in use, protected by if_csum), including
492 * if_fversion and if_sum themselves)
493 */
494 __u8 if_length;
495 /* the GUID, in network order */
496 __u8 if_guid[8];
497 /* number of GUIDs to use, starting from if_guid */
498 __u8 if_numguid;
499 /* the board serial number, in ASCII */
500 char if_serial[12];
501 /* board mfg date (YYYYMMDD ASCII) */
502 char if_mfgdate[8];
503 /* last board rework/test date (YYYYMMDD ASCII) */
504 char if_testdate[8];
505 /* logging of error counts, TBD */
506 __u8 if_errcntp[4];
507 /* powered on hours, updated at driver unload */
508 __u8 if_powerhour[2];
509 /* ASCII free-form comment field */
510 char if_comment[32];
511 /* 78 bytes used, min flash size is 128 bytes */
512 __u8 if_future[50];
513};
514
515/*
516 * These are the counters implemented in the chip, and are listed in order.
517 * The InterCaps naming is taken straight from the chip spec.
518 */
519struct infinipath_counters {
520 __u64 LBIntCnt;
521 __u64 LBFlowStallCnt;
522 __u64 Reserved1;
523 __u64 TxUnsupVLErrCnt;
524 __u64 TxDataPktCnt;
525 __u64 TxFlowPktCnt;
526 __u64 TxDwordCnt;
527 __u64 TxLenErrCnt;
528 __u64 TxMaxMinLenErrCnt;
529 __u64 TxUnderrunCnt;
530 __u64 TxFlowStallCnt;
531 __u64 TxDroppedPktCnt;
532 __u64 RxDroppedPktCnt;
533 __u64 RxDataPktCnt;
534 __u64 RxFlowPktCnt;
535 __u64 RxDwordCnt;
536 __u64 RxLenErrCnt;
537 __u64 RxMaxMinLenErrCnt;
538 __u64 RxICRCErrCnt;
539 __u64 RxVCRCErrCnt;
540 __u64 RxFlowCtrlErrCnt;
541 __u64 RxBadFormatCnt;
542 __u64 RxLinkProblemCnt;
543 __u64 RxEBPCnt;
544 __u64 RxLPCRCErrCnt;
545 __u64 RxBufOvflCnt;
546 __u64 RxTIDFullErrCnt;
547 __u64 RxTIDValidErrCnt;
548 __u64 RxPKeyMismatchCnt;
549 __u64 RxP0HdrEgrOvflCnt;
550 __u64 RxP1HdrEgrOvflCnt;
551 __u64 RxP2HdrEgrOvflCnt;
552 __u64 RxP3HdrEgrOvflCnt;
553 __u64 RxP4HdrEgrOvflCnt;
554 __u64 RxP5HdrEgrOvflCnt;
555 __u64 RxP6HdrEgrOvflCnt;
556 __u64 RxP7HdrEgrOvflCnt;
557 __u64 RxP8HdrEgrOvflCnt;
558 __u64 Reserved6;
559 __u64 Reserved7;
560 __u64 IBStatusChangeCnt;
561 __u64 IBLinkErrRecoveryCnt;
562 __u64 IBLinkDownedCnt;
563 __u64 IBSymbolErrCnt;
564};
565
566/*
567 * The next set of defines are for packet headers, and chip register
568 * and memory bits that are visible to and/or used by user-mode software
569 * The other bits that are used only by the driver or diags are in
570 * ipath_registers.h
571 */
572
573/* RcvHdrFlags bits */
574#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
575#define INFINIPATH_RHF_LENGTH_SHIFT 0
576#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
577#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
578#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF
579#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
580#define INFINIPATH_RHF_H_ICRCERR 0x80000000
581#define INFINIPATH_RHF_H_VCRCERR 0x40000000
582#define INFINIPATH_RHF_H_PARITYERR 0x20000000
583#define INFINIPATH_RHF_H_LENERR 0x10000000
584#define INFINIPATH_RHF_H_MTUERR 0x08000000
585#define INFINIPATH_RHF_H_IHDRERR 0x04000000
586#define INFINIPATH_RHF_H_TIDERR 0x02000000
587#define INFINIPATH_RHF_H_MKERR 0x01000000
588#define INFINIPATH_RHF_H_IBERR 0x00800000
589#define INFINIPATH_RHF_L_SWA 0x00008000
590#define INFINIPATH_RHF_L_SWB 0x00004000
591
592/* infinipath header fields */
593#define INFINIPATH_I_VERS_MASK 0xF
594#define INFINIPATH_I_VERS_SHIFT 28
595#define INFINIPATH_I_PORT_MASK 0xF
596#define INFINIPATH_I_PORT_SHIFT 24
597#define INFINIPATH_I_TID_MASK 0x7FF
598#define INFINIPATH_I_TID_SHIFT 13
599#define INFINIPATH_I_OFFSET_MASK 0x1FFF
600#define INFINIPATH_I_OFFSET_SHIFT 0
601
602/* K_PktFlags bits */
603#define INFINIPATH_KPF_INTR 0x1
604
605/* SendPIO per-buffer control */
606#define INFINIPATH_SP_LENGTHP1_MASK 0x3FF
607#define INFINIPATH_SP_LENGTHP1_SHIFT 0
608#define INFINIPATH_SP_INTR 0x80000000
609#define INFINIPATH_SP_TEST 0x40000000
610#define INFINIPATH_SP_TESTEBP 0x20000000
611
612/* SendPIOAvail bits */
613#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
614#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
615
616#endif /* _IPATH_COMMON_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
new file mode 100644
index 000000000000..7ece1135ddfe
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/err.h>
34#include <linux/vmalloc.h>
35
36#include "ipath_verbs.h"
37
38/**
39 * ipath_cq_enter - add a new entry to the completion queue
40 * @cq: completion queue
41 * @entry: work completion entry to add
42 * @sig: true if @entry is a solicitated entry
43 *
44 * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
45 */
46void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
47{
48 unsigned long flags;
49 u32 next;
50
51 spin_lock_irqsave(&cq->lock, flags);
52
53 if (cq->head == cq->ibcq.cqe)
54 next = 0;
55 else
56 next = cq->head + 1;
57 if (unlikely(next == cq->tail)) {
58 spin_unlock_irqrestore(&cq->lock, flags);
59 if (cq->ibcq.event_handler) {
60 struct ib_event ev;
61
62 ev.device = cq->ibcq.device;
63 ev.element.cq = &cq->ibcq;
64 ev.event = IB_EVENT_CQ_ERR;
65 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
66 }
67 return;
68 }
69 cq->queue[cq->head] = *entry;
70 cq->head = next;
71
72 if (cq->notify == IB_CQ_NEXT_COMP ||
73 (cq->notify == IB_CQ_SOLICITED && solicited)) {
74 cq->notify = IB_CQ_NONE;
75 cq->triggered++;
76 /*
77 * This will cause send_complete() to be called in
78 * another thread.
79 */
80 tasklet_hi_schedule(&cq->comptask);
81 }
82
83 spin_unlock_irqrestore(&cq->lock, flags);
84
85 if (entry->status != IB_WC_SUCCESS)
86 to_idev(cq->ibcq.device)->n_wqe_errs++;
87}
88
89/**
90 * ipath_poll_cq - poll for work completion entries
91 * @ibcq: the completion queue to poll
92 * @num_entries: the maximum number of entries to return
93 * @entry: pointer to array where work completions are placed
94 *
95 * Returns the number of completion entries polled.
96 *
97 * This may be called from interrupt context. Also called by ib_poll_cq()
98 * in the generic verbs code.
99 */
100int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
101{
102 struct ipath_cq *cq = to_icq(ibcq);
103 unsigned long flags;
104 int npolled;
105
106 spin_lock_irqsave(&cq->lock, flags);
107
108 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
109 if (cq->tail == cq->head)
110 break;
111 *entry = cq->queue[cq->tail];
112 if (cq->tail == cq->ibcq.cqe)
113 cq->tail = 0;
114 else
115 cq->tail++;
116 }
117
118 spin_unlock_irqrestore(&cq->lock, flags);
119
120 return npolled;
121}
122
123static void send_complete(unsigned long data)
124{
125 struct ipath_cq *cq = (struct ipath_cq *)data;
126
127 /*
128 * The completion handler will most likely rearm the notification
129 * and poll for all pending entries. If a new completion entry
130 * is added while we are in this routine, tasklet_hi_schedule()
131 * won't call us again until we return so we check triggered to
132 * see if we need to call the handler again.
133 */
134 for (;;) {
135 u8 triggered = cq->triggered;
136
137 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
138
139 if (cq->triggered == triggered)
140 return;
141 }
142}
143
144/**
145 * ipath_create_cq - create a completion queue
146 * @ibdev: the device this completion queue is attached to
147 * @entries: the minimum size of the completion queue
148 * @context: unused by the InfiniPath driver
149 * @udata: unused by the InfiniPath driver
150 *
151 * Returns a pointer to the completion queue or negative errno values
152 * for failure.
153 *
154 * Called by ib_create_cq() in the generic verbs code.
155 */
156struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
157 struct ib_ucontext *context,
158 struct ib_udata *udata)
159{
160 struct ipath_cq *cq;
161 struct ib_wc *wc;
162 struct ib_cq *ret;
163
164 /*
165 * Need to use vmalloc() if we want to support large #s of
166 * entries.
167 */
168 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
169 if (!cq) {
170 ret = ERR_PTR(-ENOMEM);
171 goto bail;
172 }
173
174 /*
175 * Need to use vmalloc() if we want to support large #s of entries.
176 */
177 wc = vmalloc(sizeof(*wc) * (entries + 1));
178 if (!wc) {
179 kfree(cq);
180 ret = ERR_PTR(-ENOMEM);
181 goto bail;
182 }
183 /*
184 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
185 * The number of entries should be >= the number requested or return
186 * an error.
187 */
188 cq->ibcq.cqe = entries;
189 cq->notify = IB_CQ_NONE;
190 cq->triggered = 0;
191 spin_lock_init(&cq->lock);
192 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
193 cq->head = 0;
194 cq->tail = 0;
195 cq->queue = wc;
196
197 ret = &cq->ibcq;
198
199bail:
200 return ret;
201}
202
203/**
204 * ipath_destroy_cq - destroy a completion queue
205 * @ibcq: the completion queue to destroy.
206 *
207 * Returns 0 for success.
208 *
209 * Called by ib_destroy_cq() in the generic verbs code.
210 */
211int ipath_destroy_cq(struct ib_cq *ibcq)
212{
213 struct ipath_cq *cq = to_icq(ibcq);
214
215 tasklet_kill(&cq->comptask);
216 vfree(cq->queue);
217 kfree(cq);
218
219 return 0;
220}
221
222/**
223 * ipath_req_notify_cq - change the notification type for a completion queue
224 * @ibcq: the completion queue
225 * @notify: the type of notification to request
226 *
227 * Returns 0 for success.
228 *
229 * This may be called from interrupt context. Also called by
230 * ib_req_notify_cq() in the generic verbs code.
231 */
232int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
233{
234 struct ipath_cq *cq = to_icq(ibcq);
235 unsigned long flags;
236
237 spin_lock_irqsave(&cq->lock, flags);
238 /*
239 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
240 * any other transitions.
241 */
242 if (cq->notify != IB_CQ_NEXT_COMP)
243 cq->notify = notify;
244 spin_unlock_irqrestore(&cq->lock, flags);
245 return 0;
246}
247
248int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
249{
250 struct ipath_cq *cq = to_icq(ibcq);
251 struct ib_wc *wc, *old_wc;
252 u32 n;
253 int ret;
254
255 /*
256 * Need to use vmalloc() if we want to support large #s of entries.
257 */
258 wc = vmalloc(sizeof(*wc) * (cqe + 1));
259 if (!wc) {
260 ret = -ENOMEM;
261 goto bail;
262 }
263
264 spin_lock_irq(&cq->lock);
265 if (cq->head < cq->tail)
266 n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
267 else
268 n = cq->head - cq->tail;
269 if (unlikely((u32)cqe < n)) {
270 spin_unlock_irq(&cq->lock);
271 vfree(wc);
272 ret = -EOVERFLOW;
273 goto bail;
274 }
275 for (n = 0; cq->tail != cq->head; n++) {
276 wc[n] = cq->queue[cq->tail];
277 if (cq->tail == cq->ibcq.cqe)
278 cq->tail = 0;
279 else
280 cq->tail++;
281 }
282 cq->ibcq.cqe = cqe;
283 cq->head = n;
284 cq->tail = 0;
285 old_wc = cq->queue;
286 cq->queue = wc;
287 spin_unlock_irq(&cq->lock);
288
289 vfree(old_wc);
290
291 ret = 0;
292
293bail:
294 return ret;
295}
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
new file mode 100644
index 000000000000..593e28969c69
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _IPATH_DEBUG_H
34#define _IPATH_DEBUG_H
35
36#ifndef _IPATH_DEBUGGING /* debugging enabled or not */
37#define _IPATH_DEBUGGING 1
38#endif
39
40#if _IPATH_DEBUGGING
41
42/*
43 * Mask values for debugging. The scheme allows us to compile out any
44 * of the debug tracing stuff, and if compiled in, to enable or disable
45 * dynamically. This can be set at modprobe time also:
46 * modprobe infinipath.ko infinipath_debug=7
47 */
48
49#define __IPATH_INFO 0x1 /* generic low verbosity stuff */
50#define __IPATH_DBG 0x2 /* generic debug */
51#define __IPATH_TRSAMPLE 0x8 /* generate trace buffer sample entries */
52/* leave some low verbosity spots open */
53#define __IPATH_VERBDBG 0x40 /* very verbose debug */
54#define __IPATH_PKTDBG 0x80 /* print packet data */
55/* print process startup (init)/exit messages */
56#define __IPATH_PROCDBG 0x100
57/* print mmap/nopage stuff, not using VDBG any more */
58#define __IPATH_MMDBG 0x200
59#define __IPATH_USER_SEND 0x1000 /* use user mode send */
60#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
61#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
62#define __IPATH_SMADBG 0x8000 /* sma packet debug */
63#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */
64#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */
65#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */
66#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */
67#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */
68
69#else /* _IPATH_DEBUGGING */
70
71/*
72 * define all of these even with debugging off, for the few places that do
73 * if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
74 * compiler eliminate the code
75 */
76
77#define __IPATH_INFO 0x0 /* generic low verbosity stuff */
78#define __IPATH_DBG 0x0 /* generic debug */
79#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
80#define __IPATH_VERBDBG 0x0 /* very verbose debug */
81#define __IPATH_PKTDBG 0x0 /* print packet data */
82#define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */
83/* print mmap/nopage stuff, not using VDBG any more */
84#define __IPATH_MMDBG 0x0
85#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
86#define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
87#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
88#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
89#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
90#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
91
92#endif /* _IPATH_DEBUGGING */
93
94#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
95
96#endif /* _IPATH_DEBUG_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
new file mode 100644
index 000000000000..cd533cf951c2
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -0,0 +1,379 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains support for diagnostic functions. It is accessed by
35 * opening the ipath_diag device, normally minor number 129. Diagnostic use
36 * of the InfiniPath chip may render the chip or board unusable until the
37 * driver is unloaded, or in some cases, until the system is rebooted.
38 *
39 * Accesses to the chip through this interface are not similar to going
40 * through the /sys/bus/pci resource mmap interface.
41 */
42
43#include <linux/pci.h>
44#include <asm/uaccess.h>
45
46#include "ipath_common.h"
47#include "ipath_kernel.h"
48#include "ips_common.h"
49#include "ipath_layer.h"
50
51int ipath_diag_inuse;
52static int diag_set_link;
53
54static int ipath_diag_open(struct inode *in, struct file *fp);
55static int ipath_diag_release(struct inode *in, struct file *fp);
56static ssize_t ipath_diag_read(struct file *fp, char __user *data,
57 size_t count, loff_t *off);
58static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
59 size_t count, loff_t *off);
60
61static struct file_operations diag_file_ops = {
62 .owner = THIS_MODULE,
63 .write = ipath_diag_write,
64 .read = ipath_diag_read,
65 .open = ipath_diag_open,
66 .release = ipath_diag_release
67};
68
69static struct cdev *diag_cdev;
70static struct class_device *diag_class_dev;
71
72int ipath_diag_init(void)
73{
74 return ipath_cdev_init(IPATH_DIAG_MINOR, "ipath_diag",
75 &diag_file_ops, &diag_cdev, &diag_class_dev);
76}
77
78void ipath_diag_cleanup(void)
79{
80 ipath_cdev_cleanup(&diag_cdev, &diag_class_dev);
81}
82
83/**
84 * ipath_read_umem64 - read a 64-bit quantity from the chip into user space
85 * @dd: the infinipath device
86 * @uaddr: the location to store the data in user memory
87 * @caddr: the source chip address (full pointer, not offset)
88 * @count: number of bytes to copy (multiple of 32 bits)
89 *
90 * This function also localizes all chip memory accesses.
91 * The copy should be written such that we read full cacheline packets
92 * from the chip. This is usually used for a single qword
93 *
94 * NOTE: This assumes the chip address is 64-bit aligned.
95 */
96static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
97 const void __iomem *caddr, size_t count)
98{
99 const u64 __iomem *reg_addr = caddr;
100 const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
101 int ret;
102
103 /* not very efficient, but it works for now */
104 if (reg_addr < dd->ipath_kregbase ||
105 reg_end > dd->ipath_kregend) {
106 ret = -EINVAL;
107 goto bail;
108 }
109 while (reg_addr < reg_end) {
110 u64 data = readq(reg_addr);
111 if (copy_to_user(uaddr, &data, sizeof(u64))) {
112 ret = -EFAULT;
113 goto bail;
114 }
115 reg_addr++;
116 uaddr++;
117 }
118 ret = 0;
119bail:
120 return ret;
121}
122
123/**
124 * ipath_write_umem64 - write a 64-bit quantity to the chip from user space
125 * @dd: the infinipath device
126 * @caddr: the destination chip address (full pointer, not offset)
127 * @uaddr: the source of the data in user memory
128 * @count: the number of bytes to copy (multiple of 32 bits)
129 *
130 * This is usually used for a single qword
131 * NOTE: This assumes the chip address is 64-bit aligned.
132 */
133
134static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
135 const void __user *uaddr, size_t count)
136{
137 u64 __iomem *reg_addr = caddr;
138 const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
139 int ret;
140
141 /* not very efficient, but it works for now */
142 if (reg_addr < dd->ipath_kregbase ||
143 reg_end > dd->ipath_kregend) {
144 ret = -EINVAL;
145 goto bail;
146 }
147 while (reg_addr < reg_end) {
148 u64 data;
149 if (copy_from_user(&data, uaddr, sizeof(data))) {
150 ret = -EFAULT;
151 goto bail;
152 }
153 writeq(data, reg_addr);
154
155 reg_addr++;
156 uaddr++;
157 }
158 ret = 0;
159bail:
160 return ret;
161}
162
163/**
164 * ipath_read_umem32 - read a 32-bit quantity from the chip into user space
165 * @dd: the infinipath device
166 * @uaddr: the location to store the data in user memory
167 * @caddr: the source chip address (full pointer, not offset)
168 * @count: number of bytes to copy
169 *
170 * read 32 bit values, not 64 bit; for memories that only
171 * support 32 bit reads; usually a single dword.
172 */
173static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
174 const void __iomem *caddr, size_t count)
175{
176 const u32 __iomem *reg_addr = caddr;
177 const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
178 int ret;
179
180 if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
181 reg_end > (u32 __iomem *) dd->ipath_kregend) {
182 ret = -EINVAL;
183 goto bail;
184 }
185 /* not very efficient, but it works for now */
186 while (reg_addr < reg_end) {
187 u32 data = readl(reg_addr);
188 if (copy_to_user(uaddr, &data, sizeof(data))) {
189 ret = -EFAULT;
190 goto bail;
191 }
192
193 reg_addr++;
194 uaddr++;
195 }
196 ret = 0;
197bail:
198 return ret;
199}
200
201/**
202 * ipath_write_umem32 - write a 32-bit quantity to the chip from user space
203 * @dd: the infinipath device
204 * @caddr: the destination chip address (full pointer, not offset)
205 * @uaddr: the source of the data in user memory
206 * @count: number of bytes to copy
207 *
208 * write 32 bit values, not 64 bit; for memories that only
209 * support 32 bit write; usually a single dword.
210 */
211
212static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
213 const void __user *uaddr, size_t count)
214{
215 u32 __iomem *reg_addr = caddr;
216 const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
217 int ret;
218
219 if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
220 reg_end > (u32 __iomem *) dd->ipath_kregend) {
221 ret = -EINVAL;
222 goto bail;
223 }
224 while (reg_addr < reg_end) {
225 u32 data;
226 if (copy_from_user(&data, uaddr, sizeof(data))) {
227 ret = -EFAULT;
228 goto bail;
229 }
230 writel(data, reg_addr);
231
232 reg_addr++;
233 uaddr++;
234 }
235 ret = 0;
236bail:
237 return ret;
238}
239
240static int ipath_diag_open(struct inode *in, struct file *fp)
241{
242 struct ipath_devdata *dd;
243 int unit = 0; /* XXX this is bogus */
244 unsigned long flags;
245 int ret;
246
247 dd = ipath_lookup(unit);
248
249 mutex_lock(&ipath_mutex);
250 spin_lock_irqsave(&ipath_devs_lock, flags);
251
252 if (ipath_diag_inuse) {
253 ret = -EBUSY;
254 goto bail;
255 }
256
257 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
258 /*
259 * we need at least one infinipath device to be present
260 * (don't use INITTED, because we want to be able to open
261 * even if device is in freeze mode, which cleared INITTED).
262 * There is a small amount of risk to this, which is why we
263 * also verify kregbase is set.
264 */
265
266 if (!(dd->ipath_flags & IPATH_PRESENT) ||
267 !dd->ipath_kregbase)
268 continue;
269
270 ipath_diag_inuse = 1;
271 diag_set_link = 0;
272 ret = 0;
273 goto bail;
274 }
275
276 ret = -ENODEV;
277
278bail:
279 spin_unlock_irqrestore(&ipath_devs_lock, flags);
280 mutex_unlock(&ipath_mutex);
281
282 /* Only expose a way to reset the device if we
283 make it into diag mode. */
284 if (ret == 0)
285 ipath_expose_reset(&dd->pcidev->dev);
286
287 return ret;
288}
289
290static int ipath_diag_release(struct inode *i, struct file *f)
291{
292 mutex_lock(&ipath_mutex);
293 ipath_diag_inuse = 0;
294 mutex_unlock(&ipath_mutex);
295 return 0;
296}
297
298static ssize_t ipath_diag_read(struct file *fp, char __user *data,
299 size_t count, loff_t *off)
300{
301 int unit = 0; /* XXX provide for reads on other units some day */
302 struct ipath_devdata *dd;
303 void __iomem *kreg_base;
304 ssize_t ret;
305
306 dd = ipath_lookup(unit);
307 if (!dd) {
308 ret = -ENODEV;
309 goto bail;
310 }
311
312 kreg_base = dd->ipath_kregbase;
313
314 if (count == 0)
315 ret = 0;
316 else if ((count % 4) || (*off % 4))
317 /* address or length is not 32-bit aligned, hence invalid */
318 ret = -EINVAL;
319 else if ((count % 8) || (*off % 8))
320 /* address or length not 64-bit aligned; do 32-bit reads */
321 ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
322 else
323 ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
324
325 if (ret >= 0) {
326 *off += count;
327 ret = count;
328 }
329
330bail:
331 return ret;
332}
333
334static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
335 size_t count, loff_t *off)
336{
337 int unit = 0; /* XXX this is bogus */
338 struct ipath_devdata *dd;
339 void __iomem *kreg_base;
340 ssize_t ret;
341
342 dd = ipath_lookup(unit);
343 if (!dd) {
344 ret = -ENODEV;
345 goto bail;
346 }
347 kreg_base = dd->ipath_kregbase;
348
349 if (count == 0)
350 ret = 0;
351 else if ((count % 4) || (*off % 4))
352 /* address or length is not 32-bit aligned, hence invalid */
353 ret = -EINVAL;
354 else if ((count % 8) || (*off % 8))
355 /* address or length not 64-bit aligned; do 32-bit writes */
356 ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
357 else
358 ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
359
360 if (ret >= 0) {
361 *off += count;
362 ret = count;
363 }
364
365bail:
366 return ret;
367}
368
369void ipath_diag_bringup_link(struct ipath_devdata *dd)
370{
371 if (diag_set_link || (dd->ipath_flags & IPATH_LINKACTIVE))
372 return;
373
374 diag_set_link = 1;
375 ipath_cdbg(VERBOSE, "Trying to set to set link active for "
376 "diag pkt\n");
377 ipath_layer_set_linkstate(dd, IPATH_IB_LINKARM);
378 ipath_layer_set_linkstate(dd, IPATH_IB_LINKACTIVE);
379}
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
new file mode 100644
index 000000000000..58a94efb0070
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -0,0 +1,1983 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/idr.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39
40#include "ipath_kernel.h"
41#include "ips_common.h"
42#include "ipath_layer.h"
43
44static void ipath_update_pio_bufs(struct ipath_devdata *);
45
46const char *ipath_get_unit_name(int unit)
47{
48 static char iname[16];
49 snprintf(iname, sizeof iname, "infinipath%u", unit);
50 return iname;
51}
52
53EXPORT_SYMBOL_GPL(ipath_get_unit_name);
54
55#define DRIVER_LOAD_MSG "PathScale " IPATH_DRV_NAME " loaded: "
56#define PFX IPATH_DRV_NAME ": "
57
58/*
59 * The size has to be longer than this string, so we can append
60 * board/chip information to it in the init code.
61 */
62const char ipath_core_version[] = IPATH_IDSTR "\n";
63
64static struct idr unit_table;
65DEFINE_SPINLOCK(ipath_devs_lock);
66LIST_HEAD(ipath_dev_list);
67
68wait_queue_head_t ipath_sma_state_wait;
69
70unsigned ipath_debug = __IPATH_INFO;
71
72module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
73MODULE_PARM_DESC(debug, "mask for debug prints");
74EXPORT_SYMBOL_GPL(ipath_debug);
75
76MODULE_LICENSE("GPL");
77MODULE_AUTHOR("PathScale <support@pathscale.com>");
78MODULE_DESCRIPTION("Pathscale InfiniPath driver");
79
80const char *ipath_ibcstatus_str[] = {
81 "Disabled",
82 "LinkUp",
83 "PollActive",
84 "PollQuiet",
85 "SleepDelay",
86 "SleepQuiet",
87 "LState6", /* unused */
88 "LState7", /* unused */
89 "CfgDebounce",
90 "CfgRcvfCfg",
91 "CfgWaitRmt",
92 "CfgIdle",
93 "RecovRetrain",
94 "LState0xD", /* unused */
95 "RecovWaitRmt",
96 "RecovIdle",
97};
98
99/*
100 * These variables are initialized in the chip-specific files
101 * but are defined here.
102 */
103u16 ipath_gpio_sda_num, ipath_gpio_scl_num;
104u64 ipath_gpio_sda, ipath_gpio_scl;
105u64 infinipath_i_bitsextant;
106ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
107u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
108
109static void __devexit ipath_remove_one(struct pci_dev *);
110static int __devinit ipath_init_one(struct pci_dev *,
111 const struct pci_device_id *);
112
113/* Only needed for registration, nothing else needs this info */
114#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
115#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
116#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
117
118static const struct pci_device_id ipath_pci_tbl[] = {
119 {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
120 PCI_DEVICE_ID_INFINIPATH_HT)},
121 {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
122 PCI_DEVICE_ID_INFINIPATH_PE800)},
123};
124
125MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
126
127static struct pci_driver ipath_driver = {
128 .name = IPATH_DRV_NAME,
129 .probe = ipath_init_one,
130 .remove = __devexit_p(ipath_remove_one),
131 .id_table = ipath_pci_tbl,
132};
133
134/*
135 * This is where port 0's rcvhdrtail register is written back; we also
136 * want nothing else sharing the cache line, so make it a cache line
137 * in size. Used for all units.
138 */
139volatile __le64 *ipath_port0_rcvhdrtail;
140dma_addr_t ipath_port0_rcvhdrtail_dma;
141static int port0_rcvhdrtail_refs;
142
143static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
144 u32 *bar0, u32 *bar1)
145{
146 int ret;
147
148 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
149 if (ret)
150 ipath_dev_err(dd, "failed to read bar0 before enable: "
151 "error %d\n", -ret);
152
153 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
154 if (ret)
155 ipath_dev_err(dd, "failed to read bar1 before enable: "
156 "error %d\n", -ret);
157
158 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
159}
160
161static void ipath_free_devdata(struct pci_dev *pdev,
162 struct ipath_devdata *dd)
163{
164 unsigned long flags;
165
166 pci_set_drvdata(pdev, NULL);
167
168 if (dd->ipath_unit != -1) {
169 spin_lock_irqsave(&ipath_devs_lock, flags);
170 idr_remove(&unit_table, dd->ipath_unit);
171 list_del(&dd->ipath_list);
172 spin_unlock_irqrestore(&ipath_devs_lock, flags);
173 }
174 dma_free_coherent(&pdev->dev, sizeof(*dd), dd, dd->ipath_dma_addr);
175}
176
177static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
178{
179 unsigned long flags;
180 struct ipath_devdata *dd;
181 dma_addr_t dma_addr;
182 int ret;
183
184 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
185 dd = ERR_PTR(-ENOMEM);
186 goto bail;
187 }
188
189 dd = dma_alloc_coherent(&pdev->dev, sizeof(*dd), &dma_addr,
190 GFP_KERNEL);
191
192 if (!dd) {
193 dd = ERR_PTR(-ENOMEM);
194 goto bail;
195 }
196
197 dd->ipath_dma_addr = dma_addr;
198 dd->ipath_unit = -1;
199
200 spin_lock_irqsave(&ipath_devs_lock, flags);
201
202 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
203 if (ret < 0) {
204 printk(KERN_ERR IPATH_DRV_NAME
205 ": Could not allocate unit ID: error %d\n", -ret);
206 ipath_free_devdata(pdev, dd);
207 dd = ERR_PTR(ret);
208 goto bail_unlock;
209 }
210
211 dd->pcidev = pdev;
212 pci_set_drvdata(pdev, dd);
213
214 list_add(&dd->ipath_list, &ipath_dev_list);
215
216bail_unlock:
217 spin_unlock_irqrestore(&ipath_devs_lock, flags);
218
219bail:
220 return dd;
221}
222
223static inline struct ipath_devdata *__ipath_lookup(int unit)
224{
225 return idr_find(&unit_table, unit);
226}
227
228struct ipath_devdata *ipath_lookup(int unit)
229{
230 struct ipath_devdata *dd;
231 unsigned long flags;
232
233 spin_lock_irqsave(&ipath_devs_lock, flags);
234 dd = __ipath_lookup(unit);
235 spin_unlock_irqrestore(&ipath_devs_lock, flags);
236
237 return dd;
238}
239
240int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
241{
242 int nunits, npresent, nup;
243 struct ipath_devdata *dd;
244 unsigned long flags;
245 u32 maxports;
246
247 nunits = npresent = nup = maxports = 0;
248
249 spin_lock_irqsave(&ipath_devs_lock, flags);
250
251 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
252 nunits++;
253 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
254 npresent++;
255 if (dd->ipath_lid &&
256 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
257 | IPATH_LINKUNK)))
258 nup++;
259 if (dd->ipath_cfgports > maxports)
260 maxports = dd->ipath_cfgports;
261 }
262
263 spin_unlock_irqrestore(&ipath_devs_lock, flags);
264
265 if (npresentp)
266 *npresentp = npresent;
267 if (nupp)
268 *nupp = nup;
269 if (maxportsp)
270 *maxportsp = maxports;
271
272 return nunits;
273}
274
275static int init_port0_rcvhdrtail(struct pci_dev *pdev)
276{
277 int ret;
278
279 mutex_lock(&ipath_mutex);
280
281 if (!ipath_port0_rcvhdrtail) {
282 ipath_port0_rcvhdrtail =
283 dma_alloc_coherent(&pdev->dev,
284 IPATH_PORT0_RCVHDRTAIL_SIZE,
285 &ipath_port0_rcvhdrtail_dma,
286 GFP_KERNEL);
287
288 if (!ipath_port0_rcvhdrtail) {
289 ret = -ENOMEM;
290 goto bail;
291 }
292 }
293 port0_rcvhdrtail_refs++;
294 ret = 0;
295
296bail:
297 mutex_unlock(&ipath_mutex);
298
299 return ret;
300}
301
302static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev)
303{
304 mutex_lock(&ipath_mutex);
305
306 if (!--port0_rcvhdrtail_refs) {
307 dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE,
308 (void *) ipath_port0_rcvhdrtail,
309 ipath_port0_rcvhdrtail_dma);
310 ipath_port0_rcvhdrtail = NULL;
311 }
312
313 mutex_unlock(&ipath_mutex);
314}
315
316/*
317 * These next two routines are placeholders in case we don't have per-arch
318 * code for controlling write combining. If explicit control of write
319 * combining is not available, performance will probably be awful.
320 */
321
322int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
323{
324 return -EOPNOTSUPP;
325}
326
327void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
328{
329}
330
331static int __devinit ipath_init_one(struct pci_dev *pdev,
332 const struct pci_device_id *ent)
333{
334 int ret, len, j;
335 struct ipath_devdata *dd;
336 unsigned long long addr;
337 u32 bar0 = 0, bar1 = 0;
338 u8 rev;
339
340 ret = init_port0_rcvhdrtail(pdev);
341 if (ret < 0) {
342 printk(KERN_ERR IPATH_DRV_NAME
343 ": Could not allocate port0_rcvhdrtail: error %d\n",
344 -ret);
345 goto bail;
346 }
347
348 dd = ipath_alloc_devdata(pdev);
349 if (IS_ERR(dd)) {
350 ret = PTR_ERR(dd);
351 printk(KERN_ERR IPATH_DRV_NAME
352 ": Could not allocate devdata: error %d\n", -ret);
353 goto bail_rcvhdrtail;
354 }
355
356 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
357
358 read_bars(dd, pdev, &bar0, &bar1);
359
360 ret = pci_enable_device(pdev);
361 if (ret) {
362 /* This can happen iff:
363 *
364 * We did a chip reset, and then failed to reprogram the
365 * BAR, or the chip reset due to an internal error. We then
366 * unloaded the driver and reloaded it.
367 *
368 * Both reset cases set the BAR back to initial state. For
369 * the latter case, the AER sticky error bit at offset 0x718
370 * should be set, but the Linux kernel doesn't yet know
371 * about that, it appears. If the original BAR was retained
372 * in the kernel data structures, this may be OK.
373 */
374 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
375 dd->ipath_unit, -ret);
376 goto bail_devdata;
377 }
378 addr = pci_resource_start(pdev, 0);
379 len = pci_resource_len(pdev, 0);
380 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %x, vend %x/%x "
381 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
382 ent->device, ent->driver_data);
383
384 read_bars(dd, pdev, &bar0, &bar1);
385
386 if (!bar1 && !(bar0 & ~0xf)) {
387 if (addr) {
388 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
389 "rewriting as %llx\n", addr);
390 ret = pci_write_config_dword(
391 pdev, PCI_BASE_ADDRESS_0, addr);
392 if (ret) {
393 ipath_dev_err(dd, "rewrite of BAR0 "
394 "failed: err %d\n", -ret);
395 goto bail_disable;
396 }
397 ret = pci_write_config_dword(
398 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
399 if (ret) {
400 ipath_dev_err(dd, "rewrite of BAR1 "
401 "failed: err %d\n", -ret);
402 goto bail_disable;
403 }
404 } else {
405 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
406 "not usable until reboot\n");
407 ret = -ENODEV;
408 goto bail_disable;
409 }
410 }
411
412 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
413 if (ret) {
414 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
415 "err %d\n", dd->ipath_unit, -ret);
416 goto bail_disable;
417 }
418
419 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
420 if (ret) {
421 dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
422 "fails: %d\n", dd->ipath_unit, ret);
423 goto bail_regions;
424 }
425
426 pci_set_master(pdev);
427
428 /*
429 * Save BARs to rewrite after device reset. Save all 64 bits of
430 * BAR, just in case.
431 */
432 dd->ipath_pcibar0 = addr;
433 dd->ipath_pcibar1 = addr >> 32;
434 dd->ipath_deviceid = ent->device; /* save for later use */
435 dd->ipath_vendorid = ent->vendor;
436
437 /* setup the chip-specific functions, as early as possible. */
438 switch (ent->device) {
439 case PCI_DEVICE_ID_INFINIPATH_HT:
440 ipath_init_ht400_funcs(dd);
441 break;
442 case PCI_DEVICE_ID_INFINIPATH_PE800:
443 ipath_init_pe800_funcs(dd);
444 break;
445 default:
446 ipath_dev_err(dd, "Found unknown PathScale deviceid 0x%x, "
447 "failing\n", ent->device);
448 return -ENODEV;
449 }
450
451 for (j = 0; j < 6; j++) {
452 if (!pdev->resource[j].start)
453 continue;
454 ipath_cdbg(VERBOSE, "BAR %d start %lx, end %lx, len %lx\n",
455 j, pdev->resource[j].start,
456 pdev->resource[j].end,
457 pci_resource_len(pdev, j));
458 }
459
460 if (!addr) {
461 ipath_dev_err(dd, "No valid address in BAR 0!\n");
462 ret = -ENODEV;
463 goto bail_regions;
464 }
465
466 dd->ipath_deviceid = ent->device; /* save for later use */
467 dd->ipath_vendorid = ent->vendor;
468
469 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
470 if (ret) {
471 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
472 "%u: err %d\n", dd->ipath_unit, -ret);
473 goto bail_regions; /* shouldn't ever happen */
474 }
475 dd->ipath_pcirev = rev;
476
477 dd->ipath_kregbase = ioremap_nocache(addr, len);
478
479 if (!dd->ipath_kregbase) {
480 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
481 addr);
482 ret = -ENOMEM;
483 goto bail_iounmap;
484 }
485 dd->ipath_kregend = (u64 __iomem *)
486 ((void __iomem *)dd->ipath_kregbase + len);
487 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
488 /* for user mmap */
489 dd->ipath_kregvirt = (u64 __iomem *) phys_to_virt(addr);
490 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p "
491 "kregvirt %p\n", addr, dd->ipath_kregbase,
492 dd->ipath_kregvirt);
493
494 /*
495 * clear ipath_flags here instead of in ipath_init_chip as it is set
496 * by ipath_setup_htconfig.
497 */
498 dd->ipath_flags = 0;
499
500 if (dd->ipath_f_bus(dd, pdev))
501 ipath_dev_err(dd, "Failed to setup config space; "
502 "continuing anyway\n");
503
504 /*
505 * set up our interrupt handler; SA_SHIRQ probably not needed,
506 * since MSI interrupts shouldn't be shared but won't hurt for now.
507 * check 0 irq after we return from chip-specific bus setup, since
508 * that can affect this due to setup
509 */
510 if (!pdev->irq)
511 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
512 "work\n");
513 else {
514 ret = request_irq(pdev->irq, ipath_intr, SA_SHIRQ,
515 IPATH_DRV_NAME, dd);
516 if (ret) {
517 ipath_dev_err(dd, "Couldn't setup irq handler, "
518 "irq=%u: %d\n", pdev->irq, ret);
519 goto bail_iounmap;
520 }
521 }
522
523 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
524 if (ret)
525 goto bail_iounmap;
526
527 ret = ipath_enable_wc(dd);
528
529 if (ret) {
530 ipath_dev_err(dd, "Write combining not enabled "
531 "(err %d): performance may be poor\n",
532 -ret);
533 ret = 0;
534 }
535
536 ipath_device_create_group(&pdev->dev, dd);
537 ipathfs_add_device(dd);
538 ipath_user_add(dd);
539 ipath_layer_add(dd);
540
541 goto bail;
542
543bail_iounmap:
544 iounmap((volatile void __iomem *) dd->ipath_kregbase);
545
546bail_regions:
547 pci_release_regions(pdev);
548
549bail_disable:
550 pci_disable_device(pdev);
551
552bail_devdata:
553 ipath_free_devdata(pdev, dd);
554
555bail_rcvhdrtail:
556 cleanup_port0_rcvhdrtail(pdev);
557
558bail:
559 return ret;
560}
561
562static void __devexit ipath_remove_one(struct pci_dev *pdev)
563{
564 struct ipath_devdata *dd;
565
566 ipath_cdbg(VERBOSE, "removing, pdev=%p\n", pdev);
567 if (!pdev)
568 return;
569
570 dd = pci_get_drvdata(pdev);
571 ipath_layer_del(dd);
572 ipath_user_del(dd);
573 ipathfs_remove_device(dd);
574 ipath_device_remove_group(&pdev->dev, dd);
575 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
576 "unit %u\n", dd, (u32) dd->ipath_unit);
577 if (dd->ipath_kregbase) {
578 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n",
579 dd->ipath_kregbase);
580 iounmap((volatile void __iomem *) dd->ipath_kregbase);
581 dd->ipath_kregbase = NULL;
582 }
583 pci_release_regions(pdev);
584 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
585 pci_disable_device(pdev);
586
587 ipath_free_devdata(pdev, dd);
588 cleanup_port0_rcvhdrtail(pdev);
589}
590
591/* general driver use */
592DEFINE_MUTEX(ipath_mutex);
593
594static DEFINE_SPINLOCK(ipath_pioavail_lock);
595
596/**
597 * ipath_disarm_piobufs - cancel a range of PIO buffers
598 * @dd: the infinipath device
599 * @first: the first PIO buffer to cancel
600 * @cnt: the number of PIO buffers to cancel
601 *
602 * cancel a range of PIO buffers, used when they might be armed, but
603 * not triggered. Used at init to ensure buffer state, and also user
604 * process close, in case it died while writing to a PIO buffer
605 * Also after errors.
606 */
607void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
608 unsigned cnt)
609{
610 unsigned i, last = first + cnt;
611 u64 sendctrl, sendorig;
612
613 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
614 sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM;
615 for (i = first; i < last; i++) {
616 sendctrl = sendorig |
617 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
618 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
619 sendctrl);
620 }
621
622 /*
623 * Write it again with current value, in case ipath_sendctrl changed
624 * while we were looping; no critical bits that would require
625 * locking.
626 *
627 * Write a 0, and then the original value, reading scratch in
628 * between. This seems to avoid a chip timing race that causes
629 * pioavail updates to memory to stop.
630 */
631 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
632 0);
633 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
634 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
635 dd->ipath_sendctrl);
636}
637
638/**
639 * ipath_wait_linkstate - wait for an IB link state change to occur
640 * @dd: the infinipath device
641 * @state: the state to wait for
642 * @msecs: the number of milliseconds to wait
643 *
644 * wait up to msecs milliseconds for IB link state change to occur for
645 * now, take the easy polling route. Currently used only by
646 * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise
647 * -ETIMEDOUT state can have multiple states set, for any of several
648 * transitions.
649 */
650int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
651{
652 dd->ipath_sma_state_wanted = state;
653 wait_event_interruptible_timeout(ipath_sma_state_wait,
654 (dd->ipath_flags & state),
655 msecs_to_jiffies(msecs));
656 dd->ipath_sma_state_wanted = 0;
657
658 if (!(dd->ipath_flags & state)) {
659 u64 val;
660 ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n",
661 /* test INIT ahead of DOWN, both can be set */
662 (state & IPATH_LINKINIT) ? "INIT" :
663 ((state & IPATH_LINKDOWN) ? "DOWN" :
664 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
665 msecs);
666 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
667 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
668 (unsigned long long) ipath_read_kreg64(
669 dd, dd->ipath_kregs->kr_ibcctrl),
670 (unsigned long long) val,
671 ipath_ibcstatus_str[val & 0xf]);
672 }
673 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
674}
675
676void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
677{
678 *buf = '\0';
679 if (err & INFINIPATH_E_RHDRLEN)
680 strlcat(buf, "rhdrlen ", blen);
681 if (err & INFINIPATH_E_RBADTID)
682 strlcat(buf, "rbadtid ", blen);
683 if (err & INFINIPATH_E_RBADVERSION)
684 strlcat(buf, "rbadversion ", blen);
685 if (err & INFINIPATH_E_RHDR)
686 strlcat(buf, "rhdr ", blen);
687 if (err & INFINIPATH_E_RLONGPKTLEN)
688 strlcat(buf, "rlongpktlen ", blen);
689 if (err & INFINIPATH_E_RSHORTPKTLEN)
690 strlcat(buf, "rshortpktlen ", blen);
691 if (err & INFINIPATH_E_RMAXPKTLEN)
692 strlcat(buf, "rmaxpktlen ", blen);
693 if (err & INFINIPATH_E_RMINPKTLEN)
694 strlcat(buf, "rminpktlen ", blen);
695 if (err & INFINIPATH_E_RFORMATERR)
696 strlcat(buf, "rformaterr ", blen);
697 if (err & INFINIPATH_E_RUNSUPVL)
698 strlcat(buf, "runsupvl ", blen);
699 if (err & INFINIPATH_E_RUNEXPCHAR)
700 strlcat(buf, "runexpchar ", blen);
701 if (err & INFINIPATH_E_RIBFLOW)
702 strlcat(buf, "ribflow ", blen);
703 if (err & INFINIPATH_E_REBP)
704 strlcat(buf, "EBP ", blen);
705 if (err & INFINIPATH_E_SUNDERRUN)
706 strlcat(buf, "sunderrun ", blen);
707 if (err & INFINIPATH_E_SPIOARMLAUNCH)
708 strlcat(buf, "spioarmlaunch ", blen);
709 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
710 strlcat(buf, "sunexperrpktnum ", blen);
711 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
712 strlcat(buf, "sdroppeddatapkt ", blen);
713 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
714 strlcat(buf, "sdroppedsmppkt ", blen);
715 if (err & INFINIPATH_E_SMAXPKTLEN)
716 strlcat(buf, "smaxpktlen ", blen);
717 if (err & INFINIPATH_E_SMINPKTLEN)
718 strlcat(buf, "sminpktlen ", blen);
719 if (err & INFINIPATH_E_SUNSUPVL)
720 strlcat(buf, "sunsupVL ", blen);
721 if (err & INFINIPATH_E_SPKTLEN)
722 strlcat(buf, "spktlen ", blen);
723 if (err & INFINIPATH_E_INVALIDADDR)
724 strlcat(buf, "invalidaddr ", blen);
725 if (err & INFINIPATH_E_RICRC)
726 strlcat(buf, "CRC ", blen);
727 if (err & INFINIPATH_E_RVCRC)
728 strlcat(buf, "VCRC ", blen);
729 if (err & INFINIPATH_E_RRCVEGRFULL)
730 strlcat(buf, "rcvegrfull ", blen);
731 if (err & INFINIPATH_E_RRCVHDRFULL)
732 strlcat(buf, "rcvhdrfull ", blen);
733 if (err & INFINIPATH_E_IBSTATUSCHANGED)
734 strlcat(buf, "ibcstatuschg ", blen);
735 if (err & INFINIPATH_E_RIBLOSTLINK)
736 strlcat(buf, "riblostlink ", blen);
737 if (err & INFINIPATH_E_HARDWARE)
738 strlcat(buf, "hardware ", blen);
739 if (err & INFINIPATH_E_RESET)
740 strlcat(buf, "reset ", blen);
741}
742
743/**
744 * get_rhf_errstring - decode RHF errors
745 * @err: the err number
746 * @msg: the output buffer
747 * @len: the length of the output buffer
748 *
749 * only used one place now, may want more later
750 */
751static void get_rhf_errstring(u32 err, char *msg, size_t len)
752{
753 /* if no errors, and so don't need to check what's first */
754 *msg = '\0';
755
756 if (err & INFINIPATH_RHF_H_ICRCERR)
757 strlcat(msg, "icrcerr ", len);
758 if (err & INFINIPATH_RHF_H_VCRCERR)
759 strlcat(msg, "vcrcerr ", len);
760 if (err & INFINIPATH_RHF_H_PARITYERR)
761 strlcat(msg, "parityerr ", len);
762 if (err & INFINIPATH_RHF_H_LENERR)
763 strlcat(msg, "lenerr ", len);
764 if (err & INFINIPATH_RHF_H_MTUERR)
765 strlcat(msg, "mtuerr ", len);
766 if (err & INFINIPATH_RHF_H_IHDRERR)
767 /* infinipath hdr checksum error */
768 strlcat(msg, "ipathhdrerr ", len);
769 if (err & INFINIPATH_RHF_H_TIDERR)
770 strlcat(msg, "tiderr ", len);
771 if (err & INFINIPATH_RHF_H_MKERR)
772 /* bad port, offset, etc. */
773 strlcat(msg, "invalid ipathhdr ", len);
774 if (err & INFINIPATH_RHF_H_IBERR)
775 strlcat(msg, "iberr ", len);
776 if (err & INFINIPATH_RHF_L_SWA)
777 strlcat(msg, "swA ", len);
778 if (err & INFINIPATH_RHF_L_SWB)
779 strlcat(msg, "swB ", len);
780}
781
782/**
783 * ipath_get_egrbuf - get an eager buffer
784 * @dd: the infinipath device
785 * @bufnum: the eager buffer to get
786 * @err: unused
787 *
788 * must only be called if ipath_pd[port] is known to be allocated
789 */
790static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
791 int err)
792{
793 return dd->ipath_port0_skbs ?
794 (void *)dd->ipath_port0_skbs[bufnum]->data : NULL;
795}
796
797/**
798 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
799 * @dd: the infinipath device
800 * @gfp_mask: the sk_buff SFP mask
801 */
802struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
803 gfp_t gfp_mask)
804{
805 struct sk_buff *skb;
806 u32 len;
807
808 /*
809 * Only fully supported way to handle this is to allocate lots
810 * extra, align as needed, and then do skb_reserve(). That wastes
811 * a lot of memory... I'll have to hack this into infinipath_copy
812 * also.
813 */
814
815 /*
816 * We need 4 extra bytes for unaligned transfer copying
817 */
818 if (dd->ipath_flags & IPATH_4BYTE_TID) {
819 /* we need a 4KB multiple alignment, and there is no way
820 * to do it except to allocate extra and then skb_reserve
821 * enough to bring it up to the right alignment.
822 */
823 len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1;
824 }
825 else
826 len = dd->ipath_ibmaxlen + 4;
827 skb = __dev_alloc_skb(len, gfp_mask);
828 if (!skb) {
829 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
830 len);
831 goto bail;
832 }
833 if (dd->ipath_flags & IPATH_4BYTE_TID) {
834 u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4);
835 if (una)
836 skb_reserve(skb, 4 + (1 << 11) - una);
837 else
838 skb_reserve(skb, 4);
839 } else
840 skb_reserve(skb, 4);
841
842bail:
843 return skb;
844}
845
846/**
847 * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
848 * @dd: the infinipath device
849 * @etail: the sk_buff number
850 * @tlen: the total packet length
851 * @hdr: the ethernet header
852 *
853 * Separate routine for better overall optimization
854 */
855static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
856 u32 tlen, struct ether_header *hdr)
857{
858 u32 elen;
859 u8 pad, *bthbytes;
860 struct sk_buff *skb, *nskb;
861
862 if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) {
863 /*
864 * Allocate a new sk_buff to replace the one we give
865 * to the network stack.
866 */
867 nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
868 if (!nskb) {
869 /* count OK packets that we drop */
870 ipath_stats.sps_krdrops++;
871 return;
872 }
873
874 bthbytes = (u8 *) hdr->bth;
875 pad = (bthbytes[1] >> 4) & 3;
876 /* +CRC32 */
877 elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
878
879 skb = dd->ipath_port0_skbs[etail];
880 dd->ipath_port0_skbs[etail] = nskb;
881 skb_put(skb, elen);
882
883 dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
884 ((char __iomem *) dd->ipath_kregbase
885 + dd->ipath_rcvegrbase), 0,
886 virt_to_phys(nskb->data));
887
888 __ipath_layer_rcv(dd, hdr, skb);
889
890 /* another ether packet received */
891 ipath_stats.sps_ether_rpkts++;
892 }
893 else if (hdr->sub_opcode == OPCODE_LID_ARP)
894 __ipath_layer_rcv_lid(dd, hdr);
895}
896
897/*
898 * ipath_kreceive - receive a packet
899 * @dd: the infinipath device
900 *
901 * called from interrupt handler for errors or receive interrupt
902 */
903void ipath_kreceive(struct ipath_devdata *dd)
904{
905 u64 *rc;
906 void *ebuf;
907 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
908 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
909 u32 etail = -1, l, hdrqtail;
910 struct ips_message_header *hdr;
911 u32 eflags, i, etype, tlen, pkttot = 0;
912 static u64 totcalls; /* stats, may eventually remove */
913 char emsg[128];
914
915 if (!dd->ipath_hdrqtailptr) {
916 ipath_dev_err(dd,
917 "hdrqtailptr not set, can't do receives\n");
918 goto bail;
919 }
920
921 /* There is already a thread processing this queue. */
922 if (test_and_set_bit(0, &dd->ipath_rcv_pending))
923 goto bail;
924
925 if (dd->ipath_port0head ==
926 (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
927 goto done;
928
929gotmore:
930 /*
931 * read only once at start. If in flood situation, this helps
932 * performance slightly. If more arrive while we are processing,
933 * we'll come back here and do them
934 */
935 hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
936
937 for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
938 u32 qp;
939 u8 *bthbytes;
940
941 rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
942 hdr = (struct ips_message_header *)&rc[1];
943 /*
944 * could make a network order version of IPATH_KD_QP, and
945 * do the obvious shift before masking to speed this up.
946 */
947 qp = ntohl(hdr->bth[1]) & 0xffffff;
948 bthbytes = (u8 *) hdr->bth;
949
950 eflags = ips_get_hdr_err_flags((__le32 *) rc);
951 etype = ips_get_rcv_type((__le32 *) rc);
952 /* total length */
953 tlen = ips_get_length_in_bytes((__le32 *) rc);
954 ebuf = NULL;
955 if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
956 /*
957 * it turns out that the chips uses an eager buffer
958 * for all non-expected packets, whether it "needs"
959 * one or not. So always get the index, but don't
960 * set ebuf (so we try to copy data) unless the
961 * length requires it.
962 */
963 etail = ips_get_index((__le32 *) rc);
964 if (tlen > sizeof(*hdr) ||
965 etype == RCVHQ_RCV_TYPE_NON_KD)
966 ebuf = ipath_get_egrbuf(dd, etail, 0);
967 }
968
969 /*
970 * both tiderr and ipathhdrerr are set for all plain IB
971 * packets; only ipathhdrerr should be set.
972 */
973
974 if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
975 RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver(
976 hdr->iph.ver_port_tid_offset) !=
977 IPS_PROTO_VERSION) {
978 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
979 "%x\n", etype);
980 }
981
982 if (eflags & ~(INFINIPATH_RHF_H_TIDERR |
983 INFINIPATH_RHF_H_IHDRERR)) {
984 get_rhf_errstring(eflags, emsg, sizeof emsg);
985 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
986 "tlen=%x opcode=%x egridx=%x: %s\n",
987 eflags, l, etype, tlen, bthbytes[0],
988 ips_get_index((__le32 *) rc), emsg);
989 } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
990 int ret = __ipath_verbs_rcv(dd, rc + 1,
991 ebuf, tlen);
992 if (ret == -ENODEV)
993 ipath_cdbg(VERBOSE,
994 "received IB packet, "
995 "not SMA (QP=%x)\n", qp);
996 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
997 if (qp == IPATH_KD_QP &&
998 bthbytes[0] == ipath_layer_rcv_opcode &&
999 ebuf)
1000 ipath_rcv_layer(dd, etail, tlen,
1001 (struct ether_header *)hdr);
1002 else
1003 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1004 "qp=%x), len %x; ignored\n",
1005 etype, bthbytes[0], qp, tlen);
1006 }
1007 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1008 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1009 be32_to_cpu(hdr->bth[0]) & 0xff);
1010 else if (eflags & (INFINIPATH_RHF_H_TIDERR |
1011 INFINIPATH_RHF_H_IHDRERR)) {
1012 /*
1013 * This is a type 3 packet, only the LRH is in the
1014 * rcvhdrq, the rest of the header is in the eager
1015 * buffer.
1016 */
1017 u8 opcode;
1018 if (ebuf) {
1019 bthbytes = (u8 *) ebuf;
1020 opcode = *bthbytes;
1021 }
1022 else
1023 opcode = 0;
1024 get_rhf_errstring(eflags, emsg, sizeof emsg);
1025 ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
1026 "len %x\n", eflags, emsg, opcode, etail,
1027 tlen);
1028 } else {
1029 /*
1030 * error packet, type of error unknown.
1031 * Probably type 3, but we don't know, so don't
1032 * even try to print the opcode, etc.
1033 */
1034 ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
1035 "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
1036 "hdr %llx %llx %llx %llx %llx\n",
1037 etail, tlen, (unsigned long) rc, l,
1038 (unsigned long long) rc[0],
1039 (unsigned long long) rc[1],
1040 (unsigned long long) rc[2],
1041 (unsigned long long) rc[3],
1042 (unsigned long long) rc[4],
1043 (unsigned long long) rc[5]);
1044 }
1045 l += rsize;
1046 if (l >= maxcnt)
1047 l = 0;
1048 /*
1049 * update for each packet, to help prevent overflows if we
1050 * have lots of packets.
1051 */
1052 (void)ipath_write_ureg(dd, ur_rcvhdrhead,
1053 dd->ipath_rhdrhead_intr_off | l, 0);
1054 if (etype != RCVHQ_RCV_TYPE_EXPECTED)
1055 (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
1056 etail, 0);
1057 }
1058
1059 pkttot += i;
1060
1061 dd->ipath_port0head = l;
1062
1063 if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
1064 /* more arrived while we handled first batch */
1065 goto gotmore;
1066
1067 if (pkttot > ipath_stats.sps_maxpkts_call)
1068 ipath_stats.sps_maxpkts_call = pkttot;
1069 ipath_stats.sps_port0pkts += pkttot;
1070 ipath_stats.sps_avgpkts_call =
1071 ipath_stats.sps_port0pkts / ++totcalls;
1072
1073done:
1074 clear_bit(0, &dd->ipath_rcv_pending);
1075 smp_mb__after_clear_bit();
1076
1077bail:;
1078}
1079
1080/**
1081 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
1082 * @dd: the infinipath device
1083 *
1084 * called whenever our local copy indicates we have run out of send buffers
1085 * NOTE: This can be called from interrupt context by some code
1086 * and from non-interrupt context by ipath_getpiobuf().
1087 */
1088
1089static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1090{
1091 unsigned long flags;
1092 int i;
1093 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1094
1095 /* If the generation (check) bits have changed, then we update the
1096 * busy bit for the corresponding PIO buffer. This algorithm will
1097 * modify positions to the value they already have in some cases
1098 * (i.e., no change), but it's faster than changing only the bits
1099 * that have changed.
1100 *
1101 * We would like to do this atomicly, to avoid spinlocks in the
1102 * critical send path, but that's not really possible, given the
1103 * type of changes, and that this routine could be called on
1104 * multiple cpu's simultaneously, so we lock in this routine only,
1105 * to avoid conflicting updates; all we change is the shadow, and
1106 * it's a single 64 bit memory location, so by definition the update
1107 * is atomic in terms of what other cpu's can see in testing the
1108 * bits. The spin_lock overhead isn't too bad, since it only
1109 * happens when all buffers are in use, so only cpu overhead, not
1110 * latency or bandwidth is affected.
1111 */
1112#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
1113 if (!dd->ipath_pioavailregs_dma) {
1114 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1115 return;
1116 }
1117 if (ipath_debug & __IPATH_VERBDBG) {
1118 /* only if packet debug and verbose */
1119 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1120 unsigned long *shadow = dd->ipath_pioavailshadow;
1121
1122 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1123 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1124 "s3=%lx\n",
1125 (unsigned long long) le64_to_cpu(dma[0]),
1126 shadow[0],
1127 (unsigned long long) le64_to_cpu(dma[1]),
1128 shadow[1],
1129 (unsigned long long) le64_to_cpu(dma[2]),
1130 shadow[2],
1131 (unsigned long long) le64_to_cpu(dma[3]),
1132 shadow[3]);
1133 if (piobregs > 4)
1134 ipath_cdbg(
1135 PKT, "2nd group, dma4=%llx shad4=%lx, "
1136 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1137 "d7=%llx s7=%lx\n",
1138 (unsigned long long) le64_to_cpu(dma[4]),
1139 shadow[4],
1140 (unsigned long long) le64_to_cpu(dma[5]),
1141 shadow[5],
1142 (unsigned long long) le64_to_cpu(dma[6]),
1143 shadow[6],
1144 (unsigned long long) le64_to_cpu(dma[7]),
1145 shadow[7]);
1146 }
1147 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1148 for (i = 0; i < piobregs; i++) {
1149 u64 pchbusy, pchg, piov, pnew;
1150 /*
1151 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1152 */
1153 if (i > 3) {
1154 if (i & 1)
1155 piov = le64_to_cpu(
1156 dd->ipath_pioavailregs_dma[i - 1]);
1157 else
1158 piov = le64_to_cpu(
1159 dd->ipath_pioavailregs_dma[i + 1]);
1160 } else
1161 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1162 pchg = _IPATH_ALL_CHECKBITS &
1163 ~(dd->ipath_pioavailshadow[i] ^ piov);
1164 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1165 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1166 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1167 pnew |= piov & pchbusy;
1168 dd->ipath_pioavailshadow[i] = pnew;
1169 }
1170 }
1171 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1172}
1173
1174/**
1175 * ipath_setrcvhdrsize - set the receive header size
1176 * @dd: the infinipath device
1177 * @rhdrsize: the receive header size
1178 *
1179 * called from user init code, and also layered driver init
1180 */
1181int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1182{
1183 int ret = 0;
1184
1185 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1186 if (dd->ipath_rcvhdrsize != rhdrsize) {
1187 dev_info(&dd->pcidev->dev,
1188 "Error: can't set protocol header "
1189 "size %u, already %u\n",
1190 rhdrsize, dd->ipath_rcvhdrsize);
1191 ret = -EAGAIN;
1192 } else
1193 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1194 "size %u\n", dd->ipath_rcvhdrsize);
1195 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1196 (sizeof(u64) / sizeof(u32)))) {
1197 ipath_dbg("Error: can't set protocol header size %u "
1198 "(> max %u)\n", rhdrsize,
1199 dd->ipath_rcvhdrentsize -
1200 (u32) (sizeof(u64) / sizeof(u32)));
1201 ret = -EOVERFLOW;
1202 } else {
1203 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1204 dd->ipath_rcvhdrsize = rhdrsize;
1205 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1206 dd->ipath_rcvhdrsize);
1207 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1208 dd->ipath_rcvhdrsize);
1209 }
1210 return ret;
1211}
1212
1213/**
1214 * ipath_getpiobuf - find an available pio buffer
1215 * @dd: the infinipath device
1216 * @pbufnum: the buffer number is placed here
1217 *
1218 * do appropriate marking as busy, etc.
1219 * returns buffer number if one found (>=0), negative number is error.
1220 * Used by ipath_sma_send_pkt and ipath_layer_send
1221 */
1222u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1223{
1224 int i, j, starti, updated = 0;
1225 unsigned piobcnt, iter;
1226 unsigned long flags;
1227 unsigned long *shadow = dd->ipath_pioavailshadow;
1228 u32 __iomem *buf;
1229
1230 piobcnt = (unsigned)(dd->ipath_piobcnt2k
1231 + dd->ipath_piobcnt4k);
1232 starti = dd->ipath_lastport_piobuf;
1233 iter = piobcnt - starti;
1234 if (dd->ipath_upd_pio_shadow) {
1235 /*
1236 * Minor optimization. If we had no buffers on last call,
1237 * start out by doing the update; continue and do scan even
1238 * if no buffers were updated, to be paranoid
1239 */
1240 ipath_update_pio_bufs(dd);
1241 /* we scanned here, don't do it at end of scan */
1242 updated = 1;
1243 i = starti;
1244 } else
1245 i = dd->ipath_lastpioindex;
1246
1247rescan:
1248 /*
1249 * while test_and_set_bit() is atomic, we do that and then the
1250 * change_bit(), and the pair is not. See if this is the cause
1251 * of the remaining armlaunch errors.
1252 */
1253 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1254 for (j = 0; j < iter; j++, i++) {
1255 if (i >= piobcnt)
1256 i = starti;
1257 /*
1258 * To avoid bus lock overhead, we first find a candidate
1259 * buffer, then do the test and set, and continue if that
1260 * fails.
1261 */
1262 if (test_bit((2 * i) + 1, shadow) ||
1263 test_and_set_bit((2 * i) + 1, shadow))
1264 continue;
1265 /* flip generation bit */
1266 change_bit(2 * i, shadow);
1267 break;
1268 }
1269 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1270
1271 if (j == iter) {
1272 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1273
1274 /*
1275 * first time through; shadow exhausted, but may be real
1276 * buffers available, so go see; if any updated, rescan
1277 * (once)
1278 */
1279 if (!updated) {
1280 ipath_update_pio_bufs(dd);
1281 updated = 1;
1282 i = starti;
1283 goto rescan;
1284 }
1285 dd->ipath_upd_pio_shadow = 1;
1286 /*
1287 * not atomic, but if we lose one once in a while, that's OK
1288 */
1289 ipath_stats.sps_nopiobufs++;
1290 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1291 ipath_dbg(
1292 "%u pio sends with no bufavail; dmacopy: "
1293 "%llx %llx %llx %llx; shadow: "
1294 "%lx %lx %lx %lx\n",
1295 dd->ipath_consec_nopiobuf,
1296 (unsigned long long) le64_to_cpu(dma[0]),
1297 (unsigned long long) le64_to_cpu(dma[1]),
1298 (unsigned long long) le64_to_cpu(dma[2]),
1299 (unsigned long long) le64_to_cpu(dma[3]),
1300 shadow[0], shadow[1], shadow[2],
1301 shadow[3]);
1302 /*
1303 * 4 buffers per byte, 4 registers above, cover rest
1304 * below
1305 */
1306 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1307 (sizeof(shadow[0]) * 4 * 4))
1308 ipath_dbg("2nd group: dmacopy: %llx %llx "
1309 "%llx %llx; shadow: %lx %lx "
1310 "%lx %lx\n",
1311 (unsigned long long)
1312 le64_to_cpu(dma[4]),
1313 (unsigned long long)
1314 le64_to_cpu(dma[5]),
1315 (unsigned long long)
1316 le64_to_cpu(dma[6]),
1317 (unsigned long long)
1318 le64_to_cpu(dma[7]),
1319 shadow[4], shadow[5],
1320 shadow[6], shadow[7]);
1321 }
1322 buf = NULL;
1323 goto bail;
1324 }
1325
1326 if (updated)
1327 /*
1328 * ran out of bufs, now some (at least this one we just
1329 * got) are now available, so tell the layered driver.
1330 */
1331 __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
1332
1333 /*
1334 * set next starting place. Since it's just an optimization,
1335 * it doesn't matter who wins on this, so no locking
1336 */
1337 dd->ipath_lastpioindex = i + 1;
1338 if (dd->ipath_upd_pio_shadow)
1339 dd->ipath_upd_pio_shadow = 0;
1340 if (dd->ipath_consec_nopiobuf)
1341 dd->ipath_consec_nopiobuf = 0;
1342 if (i < dd->ipath_piobcnt2k)
1343 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1344 i * dd->ipath_palign);
1345 else
1346 buf = (u32 __iomem *)
1347 (dd->ipath_pio4kbase +
1348 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1349 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1350 i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1351 if (pbufnum)
1352 *pbufnum = i;
1353
1354bail:
1355 return buf;
1356}
1357
1358/**
1359 * ipath_create_rcvhdrq - create a receive header queue
1360 * @dd: the infinipath device
1361 * @pd: the port data
1362 *
1363 * this *must* be physically contiguous memory, and for now,
1364 * that limits it to what kmalloc can do.
1365 */
1366int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1367 struct ipath_portdata *pd)
1368{
1369 int ret = 0, amt;
1370
1371 amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1372 sizeof(u32), PAGE_SIZE);
1373 if (!pd->port_rcvhdrq) {
1374 /*
1375 * not using REPEAT isn't viable; at 128KB, we can easily
1376 * fail this. The problem with REPEAT is we can block here
1377 * "forever". There isn't an inbetween, unfortunately. We
1378 * could reduce the risk by never freeing the rcvhdrq except
1379 * at unload, but even then, the first time a port is used,
1380 * we could delay for some time...
1381 */
1382 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1383
1384 pd->port_rcvhdrq = dma_alloc_coherent(
1385 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1386 gfp_flags);
1387
1388 if (!pd->port_rcvhdrq) {
1389 ipath_dev_err(dd, "attempt to allocate %d bytes "
1390 "for port %u rcvhdrq failed\n",
1391 amt, pd->port_port);
1392 ret = -ENOMEM;
1393 goto bail;
1394 }
1395
1396 pd->port_rcvhdrq_size = amt;
1397
1398 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1399 "for port %u rcvhdr Q\n",
1400 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1401 (unsigned long) pd->port_rcvhdrq_phys,
1402 (unsigned long) pd->port_rcvhdrq_size,
1403 pd->port_port);
1404 } else {
1405 /*
1406 * clear for security, sanity, and/or debugging, each
1407 * time we reuse
1408 */
1409 memset(pd->port_rcvhdrq, 0, amt);
1410 }
1411
1412 /*
1413 * tell chip each time we init it, even if we are re-using previous
1414 * memory (we zero it at process close)
1415 */
1416 ipath_cdbg(VERBOSE, "writing port %d rcvhdraddr as %lx\n",
1417 pd->port_port, (unsigned long) pd->port_rcvhdrq_phys);
1418 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1419 pd->port_port, pd->port_rcvhdrq_phys);
1420
1421 ret = 0;
1422bail:
1423 return ret;
1424}
1425
1426int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
1427 u64 bits_to_wait_for, u64 * valp)
1428{
1429 unsigned long timeout;
1430 u64 lastval, val;
1431 int ret;
1432
1433 lastval = ipath_read_kreg64(dd, reg_id);
1434 /* wait a ridiculously long time */
1435 timeout = jiffies + msecs_to_jiffies(5);
1436 do {
1437 val = ipath_read_kreg64(dd, reg_id);
1438 /* set so they have something, even on failures. */
1439 *valp = val;
1440 if ((val & bits_to_wait_for) == bits_to_wait_for) {
1441 ret = 0;
1442 break;
1443 }
1444 if (val != lastval)
1445 ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
1446 "waiting for %llx bits\n",
1447 (unsigned long long) lastval,
1448 (unsigned long long) val,
1449 (unsigned long long) bits_to_wait_for);
1450 cond_resched();
1451 if (time_after(jiffies, timeout)) {
1452 ipath_dbg("Didn't get bits %llx in register 0x%x, "
1453 "got %llx\n",
1454 (unsigned long long) bits_to_wait_for,
1455 reg_id, (unsigned long long) *valp);
1456 ret = -ENODEV;
1457 break;
1458 }
1459 } while (1);
1460
1461 return ret;
1462}
1463
1464/**
1465 * ipath_waitfor_mdio_cmdready - wait for last command to complete
1466 * @dd: the infinipath device
1467 *
1468 * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
1469 * away indicating the last command has completed. It doesn't return data
1470 */
1471int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1472{
1473 unsigned long timeout;
1474 u64 val;
1475 int ret;
1476
1477 /* wait a ridiculously long time */
1478 timeout = jiffies + msecs_to_jiffies(5);
1479 do {
1480 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
1481 if (!(val & IPATH_MDIO_CMDVALID)) {
1482 ret = 0;
1483 break;
1484 }
1485 cond_resched();
1486 if (time_after(jiffies, timeout)) {
1487 ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
1488 (unsigned long long) val);
1489 ret = -ENODEV;
1490 break;
1491 }
1492 } while (1);
1493
1494 return ret;
1495}
1496
1497void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1498{
1499 static const char *what[4] = {
1500 [0] = "DOWN",
1501 [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT",
1502 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1503 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1504 };
1505 ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
1506 "is %s\n", dd->ipath_unit,
1507 what[(which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
1508 INFINIPATH_IBCC_LINKCMD_MASK],
1509 ipath_ibcstatus_str[
1510 (ipath_read_kreg64
1511 (dd, dd->ipath_kregs->kr_ibcstatus) >>
1512 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1513 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
1514
1515 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1516 dd->ipath_ibcctrl | which);
1517}
1518
1519/**
1520 * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
1521 * @dd: the infinipath device
1522 * @regno: the register number to read
1523 * @port: the port containing the register
1524 *
1525 * Registers that vary with the chip implementation constants (port)
1526 * use this routine.
1527 */
1528u64 ipath_read_kreg64_port(const struct ipath_devdata *dd, ipath_kreg regno,
1529 unsigned port)
1530{
1531 u16 where;
1532
1533 if (port < dd->ipath_portcnt &&
1534 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1535 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1536 where = regno + port;
1537 else
1538 where = -1;
1539
1540 return ipath_read_kreg64(dd, where);
1541}
1542
1543/**
1544 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
1545 * @dd: the infinipath device
1546 * @regno: the register number to write
1547 * @port: the port containing the register
1548 * @value: the value to write
1549 *
1550 * Registers that vary with the chip implementation constants (port)
1551 * use this routine.
1552 */
1553void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1554 unsigned port, u64 value)
1555{
1556 u16 where;
1557
1558 if (port < dd->ipath_portcnt &&
1559 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1560 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1561 where = regno + port;
1562 else
1563 where = -1;
1564
1565 ipath_write_kreg(dd, where, value);
1566}
1567
1568/**
1569 * ipath_shutdown_device - shut down a device
1570 * @dd: the infinipath device
1571 *
1572 * This is called to make the device quiet when we are about to
1573 * unload the driver, and also when the device is administratively
1574 * disabled. It does not free any data structures.
1575 * Everything it does has to be setup again by ipath_init_chip(dd,1)
1576 */
1577void ipath_shutdown_device(struct ipath_devdata *dd)
1578{
1579 u64 val;
1580
1581 ipath_dbg("Shutting down the device\n");
1582
1583 dd->ipath_flags |= IPATH_LINKUNK;
1584 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
1585 IPATH_LINKINIT | IPATH_LINKARMED |
1586 IPATH_LINKACTIVE);
1587 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
1588 IPATH_STATUS_IB_READY);
1589
1590 /* mask interrupts, but not errors */
1591 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
1592
1593 dd->ipath_rcvctrl = 0;
1594 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1595 dd->ipath_rcvctrl);
1596
1597 /*
1598 * gracefully stop all sends allowing any in progress to trickle out
1599 * first.
1600 */
1601 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
1602 /* flush it */
1603 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1604 /*
1605 * enough for anything that's going to trickle out to have actually
1606 * done so.
1607 */
1608 udelay(5);
1609
1610 /*
1611 * abort any armed or launched PIO buffers that didn't go. (self
1612 * clearing). Will cause any packet currently being transmitted to
1613 * go out with an EBP, and may also cause a short packet error on
1614 * the receiver.
1615 */
1616 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1617 INFINIPATH_S_ABORT);
1618
1619 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1620 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1621
1622 /*
1623 * we are shutting down, so tell the layered driver. We don't do
1624 * this on just a link state change, much like ethernet, a cable
1625 * unplug, etc. doesn't change driver state
1626 */
1627 ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
1628
1629 /* disable IBC */
1630 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
1631 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1632 dd->ipath_control);
1633
1634 /*
1635 * clear SerdesEnable and turn the leds off; do this here because
1636 * we are unloading, so don't count on interrupts to move along
1637 * Turn the LEDs off explictly for the same reason.
1638 */
1639 dd->ipath_f_quiet_serdes(dd);
1640 dd->ipath_f_setextled(dd, 0, 0);
1641
1642 if (dd->ipath_stats_timer_active) {
1643 del_timer_sync(&dd->ipath_stats_timer);
1644 dd->ipath_stats_timer_active = 0;
1645 }
1646
1647 /*
1648 * clear all interrupts and errors, so that the next time the driver
1649 * is loaded or device is enabled, we know that whatever is set
1650 * happened while we were unloaded
1651 */
1652 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
1653 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
1654 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
1655 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
1656}
1657
1658/**
1659 * ipath_free_pddata - free a port's allocated data
1660 * @dd: the infinipath device
1661 * @port: the port
1662 * @freehdrq: free the port data structure if true
1663 *
1664 * when closing, free up any allocated data for a port, if the
1665 * reference count goes to zero
1666 * Note: this also optionally frees the portdata itself!
1667 * Any changes here have to be matched up with the reinit case
1668 * of ipath_init_chip(), which calls this routine on reinit after reset.
1669 */
1670void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
1671{
1672 struct ipath_portdata *pd = dd->ipath_pd[port];
1673
1674 if (!pd)
1675 return;
1676 if (freehdrq)
1677 /*
1678 * only clear and free portdata if we are going to also
1679 * release the hdrq, otherwise we leak the hdrq on each
1680 * open/close cycle
1681 */
1682 dd->ipath_pd[port] = NULL;
1683 if (freehdrq && pd->port_rcvhdrq) {
1684 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
1685 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
1686 (unsigned long) pd->port_rcvhdrq_size);
1687 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
1688 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
1689 pd->port_rcvhdrq = NULL;
1690 }
1691 if (port && pd->port_rcvegrbuf) {
1692 /* always free this */
1693 if (pd->port_rcvegrbuf) {
1694 unsigned e;
1695
1696 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
1697 void *base = pd->port_rcvegrbuf[e];
1698 size_t size = pd->port_rcvegrbuf_size;
1699
1700 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
1701 "chunk %u/%u\n", base,
1702 (unsigned long) size,
1703 e, pd->port_rcvegrbuf_chunks);
1704 dma_free_coherent(
1705 &dd->pcidev->dev, size, base,
1706 pd->port_rcvegrbuf_phys[e]);
1707 }
1708 vfree(pd->port_rcvegrbuf);
1709 pd->port_rcvegrbuf = NULL;
1710 vfree(pd->port_rcvegrbuf_phys);
1711 pd->port_rcvegrbuf_phys = NULL;
1712 }
1713 pd->port_rcvegrbuf_chunks = 0;
1714 } else if (port == 0 && dd->ipath_port0_skbs) {
1715 unsigned e;
1716 struct sk_buff **skbs = dd->ipath_port0_skbs;
1717
1718 dd->ipath_port0_skbs = NULL;
1719 ipath_cdbg(VERBOSE, "free closed port %d ipath_port0_skbs "
1720 "@ %p\n", pd->port_port, skbs);
1721 for (e = 0; e < dd->ipath_rcvegrcnt; e++)
1722 if (skbs[e])
1723 dev_kfree_skb(skbs[e]);
1724 vfree(skbs);
1725 }
1726 if (freehdrq) {
1727 kfree(pd->port_tid_pg_list);
1728 kfree(pd);
1729 }
1730}
1731
1732int __init infinipath_init(void)
1733{
1734 int ret;
1735
1736 ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version);
1737
1738 /*
1739 * These must be called before the driver is registered with
1740 * the PCI subsystem.
1741 */
1742 idr_init(&unit_table);
1743 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
1744 ret = -ENOMEM;
1745 goto bail;
1746 }
1747
1748 ret = pci_register_driver(&ipath_driver);
1749 if (ret < 0) {
1750 printk(KERN_ERR IPATH_DRV_NAME
1751 ": Unable to register driver: error %d\n", -ret);
1752 goto bail_unit;
1753 }
1754
1755 ret = ipath_driver_create_group(&ipath_driver.driver);
1756 if (ret < 0) {
1757 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
1758 "sysfs entries: error %d\n", -ret);
1759 goto bail_pci;
1760 }
1761
1762 ret = ipath_init_ipathfs();
1763 if (ret < 0) {
1764 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
1765 "ipathfs: error %d\n", -ret);
1766 goto bail_group;
1767 }
1768
1769 goto bail;
1770
1771bail_group:
1772 ipath_driver_remove_group(&ipath_driver.driver);
1773
1774bail_pci:
1775 pci_unregister_driver(&ipath_driver);
1776
1777bail_unit:
1778 idr_destroy(&unit_table);
1779
1780bail:
1781 return ret;
1782}
1783
1784static void cleanup_device(struct ipath_devdata *dd)
1785{
1786 int port;
1787
1788 ipath_shutdown_device(dd);
1789
1790 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
1791 /* can't do anything more with chip; needs re-init */
1792 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
1793 if (dd->ipath_kregbase) {
1794 /*
1795 * if we haven't already cleaned up before these are
1796 * to ensure any register reads/writes "fail" until
1797 * re-init
1798 */
1799 dd->ipath_kregbase = NULL;
1800 dd->ipath_kregvirt = NULL;
1801 dd->ipath_uregbase = 0;
1802 dd->ipath_sregbase = 0;
1803 dd->ipath_cregbase = 0;
1804 dd->ipath_kregsize = 0;
1805 }
1806 ipath_disable_wc(dd);
1807 }
1808
1809 if (dd->ipath_pioavailregs_dma) {
1810 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1811 (void *) dd->ipath_pioavailregs_dma,
1812 dd->ipath_pioavailregs_phys);
1813 dd->ipath_pioavailregs_dma = NULL;
1814 }
1815
1816 if (dd->ipath_pageshadow) {
1817 struct page **tmpp = dd->ipath_pageshadow;
1818 int i, cnt = 0;
1819
1820 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
1821 "locked\n");
1822 for (port = 0; port < dd->ipath_cfgports; port++) {
1823 int port_tidbase = port * dd->ipath_rcvtidcnt;
1824 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
1825 for (i = port_tidbase; i < maxtid; i++) {
1826 if (!tmpp[i])
1827 continue;
1828 ipath_release_user_pages(&tmpp[i], 1);
1829 tmpp[i] = NULL;
1830 cnt++;
1831 }
1832 }
1833 if (cnt) {
1834 ipath_stats.sps_pageunlocks += cnt;
1835 ipath_cdbg(VERBOSE, "There were still %u expTID "
1836 "entries locked\n", cnt);
1837 }
1838 if (ipath_stats.sps_pagelocks ||
1839 ipath_stats.sps_pageunlocks)
1840 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
1841 "unlocked via ipath_m{un}lock\n",
1842 (unsigned long long)
1843 ipath_stats.sps_pagelocks,
1844 (unsigned long long)
1845 ipath_stats.sps_pageunlocks);
1846
1847 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
1848 dd->ipath_pageshadow);
1849 vfree(dd->ipath_pageshadow);
1850 dd->ipath_pageshadow = NULL;
1851 }
1852
1853 /*
1854 * free any resources still in use (usually just kernel ports)
1855 * at unload
1856 */
1857 for (port = 0; port < dd->ipath_cfgports; port++)
1858 ipath_free_pddata(dd, port, 1);
1859 kfree(dd->ipath_pd);
1860 /*
1861 * debuggability, in case some cleanup path tries to use it
1862 * after this
1863 */
1864 dd->ipath_pd = NULL;
1865}
1866
1867static void __exit infinipath_cleanup(void)
1868{
1869 struct ipath_devdata *dd, *tmp;
1870 unsigned long flags;
1871
1872 ipath_exit_ipathfs();
1873
1874 ipath_driver_remove_group(&ipath_driver.driver);
1875
1876 spin_lock_irqsave(&ipath_devs_lock, flags);
1877
1878 /*
1879 * turn off rcv, send, and interrupts for all ports, all drivers
1880 * should also hard reset the chip here?
1881 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
1882 * for all versions of the driver, if they were allocated
1883 */
1884 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
1885 spin_unlock_irqrestore(&ipath_devs_lock, flags);
1886
1887 if (dd->ipath_kregbase)
1888 cleanup_device(dd);
1889
1890 if (dd->pcidev) {
1891 if (dd->pcidev->irq) {
1892 ipath_cdbg(VERBOSE,
1893 "unit %u free_irq of irq %x\n",
1894 dd->ipath_unit, dd->pcidev->irq);
1895 free_irq(dd->pcidev->irq, dd);
1896 } else
1897 ipath_dbg("irq is 0, not doing free_irq "
1898 "for unit %u\n", dd->ipath_unit);
1899 dd->pcidev = NULL;
1900 }
1901
1902 /*
1903 * we check for NULL here, because it's outside the kregbase
1904 * check, and we need to call it after the free_irq. Thus
1905 * it's possible that the function pointers were never
1906 * initialized.
1907 */
1908 if (dd->ipath_f_cleanup)
1909 /* clean up chip-specific stuff */
1910 dd->ipath_f_cleanup(dd);
1911
1912 spin_lock_irqsave(&ipath_devs_lock, flags);
1913 }
1914
1915 spin_unlock_irqrestore(&ipath_devs_lock, flags);
1916
1917 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
1918 pci_unregister_driver(&ipath_driver);
1919
1920 idr_destroy(&unit_table);
1921}
1922
1923/**
1924 * ipath_reset_device - reset the chip if possible
1925 * @unit: the device to reset
1926 *
1927 * Whether or not reset is successful, we attempt to re-initialize the chip
1928 * (that is, much like a driver unload/reload). We clear the INITTED flag
1929 * so that the various entry points will fail until we reinitialize. For
1930 * now, we only allow this if no user ports are open that use chip resources
1931 */
1932int ipath_reset_device(int unit)
1933{
1934 int ret, i;
1935 struct ipath_devdata *dd = ipath_lookup(unit);
1936
1937 if (!dd) {
1938 ret = -ENODEV;
1939 goto bail;
1940 }
1941
1942 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
1943
1944 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
1945 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
1946 "not initialized or not present\n", unit);
1947 ret = -ENXIO;
1948 goto bail;
1949 }
1950
1951 if (dd->ipath_pd)
1952 for (i = 1; i < dd->ipath_portcnt; i++) {
1953 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
1954 ipath_dbg("unit %u port %d is in use "
1955 "(PID %u cmd %s), can't reset\n",
1956 unit, i,
1957 dd->ipath_pd[i]->port_pid,
1958 dd->ipath_pd[i]->port_comm);
1959 ret = -EBUSY;
1960 goto bail;
1961 }
1962 }
1963
1964 dd->ipath_flags &= ~IPATH_INITTED;
1965 ret = dd->ipath_f_reset(dd);
1966 if (ret != 1)
1967 ipath_dbg("reset was not successful\n");
1968 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
1969 unit);
1970 ret = ipath_init_chip(dd, 1);
1971 if (ret)
1972 ipath_dev_err(dd, "Reinitialize unit %u after "
1973 "reset failed with %d\n", unit, ret);
1974 else
1975 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
1976 "resetting\n", unit);
1977
1978bail:
1979 return ret;
1980}
1981
1982module_init(infinipath_init);
1983module_exit(infinipath_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
new file mode 100644
index 000000000000..f11a900e8cd7
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -0,0 +1,613 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/delay.h>
34#include <linux/pci.h>
35#include <linux/vmalloc.h>
36
37#include "ipath_kernel.h"
38
39/*
40 * InfiniPath I2C driver for a serial eeprom. This is not a generic
41 * I2C interface. For a start, the device we're using (Atmel AT24C11)
42 * doesn't work like a regular I2C device. It looks like one
43 * electrically, but not logically. Normal I2C devices have a single
44 * 7-bit or 10-bit I2C address that they respond to. Valid 7-bit
45 * addresses range from 0x03 to 0x77. Addresses 0x00 to 0x02 and 0x78
46 * to 0x7F are special reserved addresses (e.g. 0x00 is the "general
47 * call" address.) The Atmel device, on the other hand, responds to ALL
48 * 7-bit addresses. It's designed to be the only device on a given I2C
49 * bus. A 7-bit address corresponds to the memory address within the
50 * Atmel device itself.
51 *
52 * Also, the timing requirements mean more than simple software
53 * bitbanging, with readbacks from chip to ensure timing (simple udelay
54 * is not enough).
55 *
56 * This all means that accessing the device is specialized enough
57 * that using the standard kernel I2C bitbanging interface would be
58 * impossible. For example, the core I2C eeprom driver expects to find
59 * a device at one or more of a limited set of addresses only. It doesn't
60 * allow writing to an eeprom. It also doesn't provide any means of
61 * accessing eeprom contents from within the kernel, only via sysfs.
62 */
63
64enum i2c_type {
65 i2c_line_scl = 0,
66 i2c_line_sda
67};
68
69enum i2c_state {
70 i2c_line_low = 0,
71 i2c_line_high
72};
73
74#define READ_CMD 1
75#define WRITE_CMD 0
76
77static int eeprom_init;
78
79/*
80 * The gpioval manipulation really should be protected by spinlocks
81 * or be converted to use atomic operations.
82 */
83
84/**
85 * i2c_gpio_set - set a GPIO line
86 * @dd: the infinipath device
87 * @line: the line to set
88 * @new_line_state: the state to set
89 *
90 * Returns 0 if the line was set to the new state successfully, non-zero
91 * on error.
92 */
93static int i2c_gpio_set(struct ipath_devdata *dd,
94 enum i2c_type line,
95 enum i2c_state new_line_state)
96{
97 u64 read_val, write_val, mask, *gpioval;
98
99 gpioval = &dd->ipath_gpio_out;
100 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
101 if (line == i2c_line_scl)
102 mask = ipath_gpio_scl;
103 else
104 mask = ipath_gpio_sda;
105
106 if (new_line_state == i2c_line_high)
107 /* tri-state the output rather than force high */
108 write_val = read_val & ~mask;
109 else
110 /* config line to be an output */
111 write_val = read_val | mask;
112 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
113
114 /* set high and verify */
115 if (new_line_state == i2c_line_high)
116 write_val = 0x1UL;
117 else
118 write_val = 0x0UL;
119
120 if (line == i2c_line_scl) {
121 write_val <<= ipath_gpio_scl_num;
122 *gpioval = *gpioval & ~(1UL << ipath_gpio_scl_num);
123 *gpioval |= write_val;
124 } else {
125 write_val <<= ipath_gpio_sda_num;
126 *gpioval = *gpioval & ~(1UL << ipath_gpio_sda_num);
127 *gpioval |= write_val;
128 }
129 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
130
131 return 0;
132}
133
134/**
135 * i2c_gpio_get - get a GPIO line state
136 * @dd: the infinipath device
137 * @line: the line to get
138 * @curr_statep: where to put the line state
139 *
140 * Returns 0 if the line was set to the new state successfully, non-zero
141 * on error. curr_state is not set on error.
142 */
143static int i2c_gpio_get(struct ipath_devdata *dd,
144 enum i2c_type line,
145 enum i2c_state *curr_statep)
146{
147 u64 read_val, write_val, mask;
148 int ret;
149
150 /* check args */
151 if (curr_statep == NULL) {
152 ret = 1;
153 goto bail;
154 }
155
156 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
157 /* config line to be an input */
158 if (line == i2c_line_scl)
159 mask = ipath_gpio_scl;
160 else
161 mask = ipath_gpio_sda;
162 write_val = read_val & ~mask;
163 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
164 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
165
166 if (read_val & mask)
167 *curr_statep = i2c_line_high;
168 else
169 *curr_statep = i2c_line_low;
170
171 ret = 0;
172
173bail:
174 return ret;
175}
176
177/**
178 * i2c_wait_for_writes - wait for a write
179 * @dd: the infinipath device
180 *
181 * We use this instead of udelay directly, so we can make sure
182 * that previous register writes have been flushed all the way
183 * to the chip. Since we are delaying anyway, the cost doesn't
184 * hurt, and makes the bit twiddling more regular
185 */
186static void i2c_wait_for_writes(struct ipath_devdata *dd)
187{
188 (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
189}
190
191static void scl_out(struct ipath_devdata *dd, u8 bit)
192{
193 i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
194
195 i2c_wait_for_writes(dd);
196}
197
198static void sda_out(struct ipath_devdata *dd, u8 bit)
199{
200 i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
201
202 i2c_wait_for_writes(dd);
203}
204
205static u8 sda_in(struct ipath_devdata *dd, int wait)
206{
207 enum i2c_state bit;
208
209 if (i2c_gpio_get(dd, i2c_line_sda, &bit))
210 ipath_dbg("get bit failed!\n");
211
212 if (wait)
213 i2c_wait_for_writes(dd);
214
215 return bit == i2c_line_high ? 1U : 0;
216}
217
218/**
219 * i2c_ackrcv - see if ack following write is true
220 * @dd: the infinipath device
221 */
222static int i2c_ackrcv(struct ipath_devdata *dd)
223{
224 u8 ack_received;
225
226 /* AT ENTRY SCL = LOW */
227 /* change direction, ignore data */
228 ack_received = sda_in(dd, 1);
229 scl_out(dd, i2c_line_high);
230 ack_received = sda_in(dd, 1) == 0;
231 scl_out(dd, i2c_line_low);
232 return ack_received;
233}
234
235/**
236 * wr_byte - write a byte, one bit at a time
237 * @dd: the infinipath device
238 * @data: the byte to write
239 *
240 * Returns 0 if we got the following ack, otherwise 1
241 */
242static int wr_byte(struct ipath_devdata *dd, u8 data)
243{
244 int bit_cntr;
245 u8 bit;
246
247 for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
248 bit = (data >> bit_cntr) & 1;
249 sda_out(dd, bit);
250 scl_out(dd, i2c_line_high);
251 scl_out(dd, i2c_line_low);
252 }
253 return (!i2c_ackrcv(dd)) ? 1 : 0;
254}
255
256static void send_ack(struct ipath_devdata *dd)
257{
258 sda_out(dd, i2c_line_low);
259 scl_out(dd, i2c_line_high);
260 scl_out(dd, i2c_line_low);
261 sda_out(dd, i2c_line_high);
262}
263
264/**
265 * i2c_startcmd - transmit the start condition, followed by address/cmd
266 * @dd: the infinipath device
267 * @offset_dir: direction byte
268 *
269 * (both clock/data high, clock high, data low while clock is high)
270 */
271static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
272{
273 int res;
274
275 /* issue start sequence */
276 sda_out(dd, i2c_line_high);
277 scl_out(dd, i2c_line_high);
278 sda_out(dd, i2c_line_low);
279 scl_out(dd, i2c_line_low);
280
281 /* issue length and direction byte */
282 res = wr_byte(dd, offset_dir);
283
284 if (res)
285 ipath_cdbg(VERBOSE, "No ack to complete start\n");
286
287 return res;
288}
289
290/**
291 * stop_cmd - transmit the stop condition
292 * @dd: the infinipath device
293 *
294 * (both clock/data low, clock high, data high while clock is high)
295 */
296static void stop_cmd(struct ipath_devdata *dd)
297{
298 scl_out(dd, i2c_line_low);
299 sda_out(dd, i2c_line_low);
300 scl_out(dd, i2c_line_high);
301 sda_out(dd, i2c_line_high);
302 udelay(2);
303}
304
305/**
306 * eeprom_reset - reset I2C communication
307 * @dd: the infinipath device
308 */
309
310static int eeprom_reset(struct ipath_devdata *dd)
311{
312 int clock_cycles_left = 9;
313 u64 *gpioval = &dd->ipath_gpio_out;
314 int ret;
315
316 eeprom_init = 1;
317 *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
318 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
319 "is %llx\n", (unsigned long long) *gpioval);
320
321 /*
322 * This is to get the i2c into a known state, by first going low,
323 * then tristate sda (and then tristate scl as first thing
324 * in loop)
325 */
326 scl_out(dd, i2c_line_low);
327 sda_out(dd, i2c_line_high);
328
329 while (clock_cycles_left--) {
330 scl_out(dd, i2c_line_high);
331
332 if (sda_in(dd, 0)) {
333 sda_out(dd, i2c_line_low);
334 scl_out(dd, i2c_line_low);
335 ret = 0;
336 goto bail;
337 }
338
339 scl_out(dd, i2c_line_low);
340 }
341
342 ret = 1;
343
344bail:
345 return ret;
346}
347
348/**
349 * ipath_eeprom_read - receives bytes from the eeprom via I2C
350 * @dd: the infinipath device
351 * @eeprom_offset: address to read from
352 * @buffer: where to store result
353 * @len: number of bytes to receive
354 */
355
356int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
357 void *buffer, int len)
358{
359 /* compiler complains unless initialized */
360 u8 single_byte = 0;
361 int bit_cntr;
362 int ret;
363
364 if (!eeprom_init)
365 eeprom_reset(dd);
366
367 eeprom_offset = (eeprom_offset << 1) | READ_CMD;
368
369 if (i2c_startcmd(dd, eeprom_offset)) {
370 ipath_dbg("Failed startcmd\n");
371 stop_cmd(dd);
372 ret = 1;
373 goto bail;
374 }
375
376 /*
377 * eeprom keeps clocking data out as long as we ack, automatically
378 * incrementing the address.
379 */
380 while (len-- > 0) {
381 /* get data */
382 single_byte = 0;
383 for (bit_cntr = 8; bit_cntr; bit_cntr--) {
384 u8 bit;
385 scl_out(dd, i2c_line_high);
386 bit = sda_in(dd, 0);
387 single_byte |= bit << (bit_cntr - 1);
388 scl_out(dd, i2c_line_low);
389 }
390
391 /* send ack if not the last byte */
392 if (len)
393 send_ack(dd);
394
395 *((u8 *) buffer) = single_byte;
396 buffer++;
397 }
398
399 stop_cmd(dd);
400
401 ret = 0;
402
403bail:
404 return ret;
405}
406
407/**
408 * ipath_eeprom_write - writes data to the eeprom via I2C
409 * @dd: the infinipath device
410 * @eeprom_offset: where to place data
411 * @buffer: data to write
412 * @len: number of bytes to write
413 */
414int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
415 const void *buffer, int len)
416{
417 u8 single_byte;
418 int sub_len;
419 const u8 *bp = buffer;
420 int max_wait_time, i;
421 int ret;
422
423 if (!eeprom_init)
424 eeprom_reset(dd);
425
426 while (len > 0) {
427 if (i2c_startcmd(dd, (eeprom_offset << 1) | WRITE_CMD)) {
428 ipath_dbg("Failed to start cmd offset %u\n",
429 eeprom_offset);
430 goto failed_write;
431 }
432
433 sub_len = min(len, 4);
434 eeprom_offset += sub_len;
435 len -= sub_len;
436
437 for (i = 0; i < sub_len; i++) {
438 if (wr_byte(dd, *bp++)) {
439 ipath_dbg("no ack after byte %u/%u (%u "
440 "total remain)\n", i, sub_len,
441 len + sub_len - i);
442 goto failed_write;
443 }
444 }
445
446 stop_cmd(dd);
447
448 /*
449 * wait for write complete by waiting for a successful
450 * read (the chip replies with a zero after the write
451 * cmd completes, and before it writes to the eeprom.
452 * The startcmd for the read will fail the ack until
453 * the writes have completed. We do this inline to avoid
454 * the debug prints that are in the real read routine
455 * if the startcmd fails.
456 */
457 max_wait_time = 100;
458 while (i2c_startcmd(dd, READ_CMD)) {
459 stop_cmd(dd);
460 if (!--max_wait_time) {
461 ipath_dbg("Did not get successful read to "
462 "complete write\n");
463 goto failed_write;
464 }
465 }
466 /* now read the zero byte */
467 for (i = single_byte = 0; i < 8; i++) {
468 u8 bit;
469 scl_out(dd, i2c_line_high);
470 bit = sda_in(dd, 0);
471 scl_out(dd, i2c_line_low);
472 single_byte <<= 1;
473 single_byte |= bit;
474 }
475 stop_cmd(dd);
476 }
477
478 ret = 0;
479 goto bail;
480
481failed_write:
482 stop_cmd(dd);
483 ret = 1;
484
485bail:
486 return ret;
487}
488
489static u8 flash_csum(struct ipath_flash *ifp, int adjust)
490{
491 u8 *ip = (u8 *) ifp;
492 u8 csum = 0, len;
493
494 for (len = 0; len < ifp->if_length; len++)
495 csum += *ip++;
496 csum -= ifp->if_csum;
497 csum = ~csum;
498 if (adjust)
499 ifp->if_csum = csum;
500
501 return csum;
502}
503
504/**
505 * ipath_get_guid - get the GUID from the i2c device
506 * @dd: the infinipath device
507 *
508 * When we add the multi-chip support, we will probably have to add
509 * the ability to use the number of guids field, and get the guid from
510 * the first chip's flash, to use for all of them.
511 */
512void ipath_get_guid(struct ipath_devdata *dd)
513{
514 void *buf;
515 struct ipath_flash *ifp;
516 __be64 guid;
517 int len;
518 u8 csum, *bguid;
519 int t = dd->ipath_unit;
520 struct ipath_devdata *dd0 = ipath_lookup(0);
521
522 if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
523 u8 *bguid, oguid;
524 dd->ipath_guid = dd0->ipath_guid;
525 bguid = (u8 *) & dd->ipath_guid;
526
527 oguid = bguid[7];
528 bguid[7] += t;
529 if (oguid > bguid[7]) {
530 if (bguid[6] == 0xff) {
531 if (bguid[5] == 0xff) {
532 ipath_dev_err(
533 dd,
534 "Can't set %s GUID from "
535 "base, wraps to OUI!\n",
536 ipath_get_unit_name(t));
537 dd->ipath_guid = 0;
538 goto bail;
539 }
540 bguid[5]++;
541 }
542 bguid[6]++;
543 }
544 dd->ipath_nguid = 1;
545
546 ipath_dbg("nguid %u, so adding %u to device 0 guid, "
547 "for %llx\n",
548 dd0->ipath_nguid, t,
549 (unsigned long long) be64_to_cpu(dd->ipath_guid));
550 goto bail;
551 }
552
553 len = offsetof(struct ipath_flash, if_future);
554 buf = vmalloc(len);
555 if (!buf) {
556 ipath_dev_err(dd, "Couldn't allocate memory to read %u "
557 "bytes from eeprom for GUID\n", len);
558 goto bail;
559 }
560
561 if (ipath_eeprom_read(dd, 0, buf, len)) {
562 ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
563 goto done;
564 }
565 ifp = (struct ipath_flash *)buf;
566
567 csum = flash_csum(ifp, 0);
568 if (csum != ifp->if_csum) {
569 dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
570 "0x%x, not 0x%x\n", csum, ifp->if_csum);
571 goto done;
572 }
573 if (*(__be64 *) ifp->if_guid == 0ULL ||
574 *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) {
575 ipath_dev_err(dd, "Invalid GUID %llx from flash; "
576 "ignoring\n",
577 *(unsigned long long *) ifp->if_guid);
578 /* don't allow GUID if all 0 or all 1's */
579 goto done;
580 }
581
582 /* complain, but allow it */
583 if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
584 dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
585 "default, probably not correct!\n",
586 *(unsigned long long *) ifp->if_guid);
587
588 bguid = ifp->if_guid;
589 if (!bguid[0] && !bguid[1] && !bguid[2]) {
590 /* original incorrect GUID format in flash; fix in
591 * core copy, by shifting up 2 octets; don't need to
592 * change top octet, since both it and shifted are
593 * 0.. */
594 bguid[1] = bguid[3];
595 bguid[2] = bguid[4];
596 bguid[3] = bguid[4] = 0;
597 guid = *(__be64 *) ifp->if_guid;
598 ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
599 "shifting 2 octets\n");
600 } else
601 guid = *(__be64 *) ifp->if_guid;
602 dd->ipath_guid = guid;
603 dd->ipath_nguid = ifp->if_numguid;
604 memcpy(dd->ipath_serial, ifp->if_serial,
605 sizeof(ifp->if_serial));
606 ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
607 (unsigned long long) be64_to_cpu(dd->ipath_guid));
608
609done:
610 vfree(buf);
611
612bail:;
613}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
new file mode 100644
index 000000000000..c347191f02bf
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -0,0 +1,1910 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/pci.h>
34#include <linux/poll.h>
35#include <linux/cdev.h>
36#include <linux/swap.h>
37#include <linux/vmalloc.h>
38#include <asm/pgtable.h>
39
40#include "ipath_kernel.h"
41#include "ips_common.h"
42#include "ipath_layer.h"
43
44static int ipath_open(struct inode *, struct file *);
45static int ipath_close(struct inode *, struct file *);
46static ssize_t ipath_write(struct file *, const char __user *, size_t,
47 loff_t *);
48static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
49static int ipath_mmap(struct file *, struct vm_area_struct *);
50
51static struct file_operations ipath_file_ops = {
52 .owner = THIS_MODULE,
53 .write = ipath_write,
54 .open = ipath_open,
55 .release = ipath_close,
56 .poll = ipath_poll,
57 .mmap = ipath_mmap
58};
59
60static int ipath_get_base_info(struct ipath_portdata *pd,
61 void __user *ubase, size_t ubase_size)
62{
63 int ret = 0;
64 struct ipath_base_info *kinfo = NULL;
65 struct ipath_devdata *dd = pd->port_dd;
66
67 if (ubase_size < sizeof(*kinfo)) {
68 ipath_cdbg(PROC,
69 "Base size %lu, need %lu (version mismatch?)\n",
70 (unsigned long) ubase_size,
71 (unsigned long) sizeof(*kinfo));
72 ret = -EINVAL;
73 goto bail;
74 }
75
76 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
77 if (kinfo == NULL) {
78 ret = -ENOMEM;
79 goto bail;
80 }
81
82 ret = dd->ipath_f_get_base_info(pd, kinfo);
83 if (ret < 0)
84 goto bail;
85
86 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
87 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
88 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
89 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
90 /*
91 * have to mmap whole thing
92 */
93 kinfo->spi_rcv_egrbuftotlen =
94 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
95 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
96 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
97 pd->port_rcvegrbuf_chunks;
98 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt;
99 /*
100 * for this use, may be ipath_cfgports summed over all chips that
101 * are are configured and present
102 */
103 kinfo->spi_nports = dd->ipath_cfgports;
104 /* unit (chip/board) our port is on */
105 kinfo->spi_unit = dd->ipath_unit;
106 /* for now, only a single page */
107 kinfo->spi_tid_maxsize = PAGE_SIZE;
108
109 /*
110 * Doing this per port, and based on the skip value, etc. This has
111 * to be the actual buffer size, since the protocol code treats it
112 * as an array.
113 *
114 * These have to be set to user addresses in the user code via mmap.
115 * These values are used on return to user code for the mmap target
116 * addresses only. For 32 bit, same 44 bit address problem, so use
117 * the physical address, not virtual. Before 2.6.11, using the
118 * page_address() macro worked, but in 2.6.11, even that returns the
119 * full 64 bit address (upper bits all 1's). So far, using the
120 * physical addresses (or chip offsets, for chip mapping) works, but
121 * no doubt some future kernel release will chang that, and we'll be
122 * on to yet another method of dealing with this
123 */
124 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
125 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
126 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
127 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
128 (void *) dd->ipath_statusp -
129 (void *) dd->ipath_pioavailregs_dma;
130 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
131 kinfo->__spi_uregbase =
132 dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
133
134 kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1);
135 kinfo->spi_piocnt = dd->ipath_pbufsport;
136 kinfo->spi_pioalign = dd->ipath_palign;
137
138 kinfo->spi_qpair = IPATH_KD_QP;
139 kinfo->spi_piosize = dd->ipath_ibmaxlen;
140 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
141 kinfo->spi_port = pd->port_port;
142 kinfo->spi_sw_version = IPATH_USER_SWVERSION;
143 kinfo->spi_hw_version = dd->ipath_revision;
144
145 if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
146 ret = -EFAULT;
147
148bail:
149 kfree(kinfo);
150 return ret;
151}
152
153/**
154 * ipath_tid_update - update a port TID
155 * @pd: the port
156 * @ti: the TID information
157 *
158 * The new implementation as of Oct 2004 is that the driver assigns
159 * the tid and returns it to the caller. To make it easier to
160 * catch bugs, and to reduce search time, we keep a cursor for
161 * each port, walking the shadow tid array to find one that's not
162 * in use.
163 *
164 * For now, if we can't allocate the full list, we fail, although
165 * in the long run, we'll allocate as many as we can, and the
166 * caller will deal with that by trying the remaining pages later.
167 * That means that when we fail, we have to mark the tids as not in
168 * use again, in our shadow copy.
169 *
170 * It's up to the caller to free the tids when they are done.
171 * We'll unlock the pages as they free them.
172 *
173 * Also, right now we are locking one page at a time, but since
174 * the intended use of this routine is for a single group of
175 * virtually contiguous pages, that should change to improve
176 * performance.
177 */
178static int ipath_tid_update(struct ipath_portdata *pd,
179 const struct ipath_tid_info *ti)
180{
181 int ret = 0, ntids;
182 u32 tid, porttid, cnt, i, tidcnt;
183 u16 *tidlist;
184 struct ipath_devdata *dd = pd->port_dd;
185 u64 physaddr;
186 unsigned long vaddr;
187 u64 __iomem *tidbase;
188 unsigned long tidmap[8];
189 struct page **pagep = NULL;
190
191 if (!dd->ipath_pageshadow) {
192 ret = -ENOMEM;
193 goto done;
194 }
195
196 cnt = ti->tidcnt;
197 if (!cnt) {
198 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
199 (unsigned long long) ti->tidlist);
200 /*
201 * Should we treat as success? likely a bug
202 */
203 ret = -EFAULT;
204 goto done;
205 }
206 tidcnt = dd->ipath_rcvtidcnt;
207 if (cnt >= tidcnt) {
208 /* make sure it all fits in port_tid_pg_list */
209 dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
210 "TIDs, only trying max (%u)\n", cnt, tidcnt);
211 cnt = tidcnt;
212 }
213 pagep = (struct page **)pd->port_tid_pg_list;
214 tidlist = (u16 *) (&pagep[cnt]);
215
216 memset(tidmap, 0, sizeof(tidmap));
217 tid = pd->port_tidcursor;
218 /* before decrement; chip actual # */
219 porttid = pd->port_port * tidcnt;
220 ntids = tidcnt;
221 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
222 dd->ipath_rcvtidbase +
223 porttid * sizeof(*tidbase));
224
225 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
226 pd->port_port, cnt, tid, tidbase);
227
228 /* virtual address of first page in transfer */
229 vaddr = ti->tidvaddr;
230 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
231 cnt * PAGE_SIZE)) {
232 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
233 (void *)vaddr, cnt);
234 ret = -EFAULT;
235 goto done;
236 }
237 ret = ipath_get_user_pages(vaddr, cnt, pagep);
238 if (ret) {
239 if (ret == -EBUSY) {
240 ipath_dbg("Failed to lock addr %p, %u pages "
241 "(already locked)\n",
242 (void *) vaddr, cnt);
243 /*
244 * for now, continue, and see what happens but with
245 * the new implementation, this should never happen,
246 * unless perhaps the user has mpin'ed the pages
247 * themselves (something we need to test)
248 */
249 ret = 0;
250 } else {
251 dev_info(&dd->pcidev->dev,
252 "Failed to lock addr %p, %u pages: "
253 "errno %d\n", (void *) vaddr, cnt, -ret);
254 goto done;
255 }
256 }
257 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
258 for (; ntids--; tid++) {
259 if (tid == tidcnt)
260 tid = 0;
261 if (!dd->ipath_pageshadow[porttid + tid])
262 break;
263 }
264 if (ntids < 0) {
265 /*
266 * oops, wrapped all the way through their TIDs,
267 * and didn't have enough free; see comments at
268 * start of routine
269 */
270 ipath_dbg("Not enough free TIDs for %u pages "
271 "(index %d), failing\n", cnt, i);
272 i--; /* last tidlist[i] not filled in */
273 ret = -ENOMEM;
274 break;
275 }
276 tidlist[i] = tid;
277 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
278 "vaddr %lx\n", i, tid, vaddr);
279 /* we "know" system pages and TID pages are same size */
280 dd->ipath_pageshadow[porttid + tid] = pagep[i];
281 /*
282 * don't need atomic or it's overhead
283 */
284 __set_bit(tid, tidmap);
285 physaddr = page_to_phys(pagep[i]);
286 ipath_stats.sps_pagelocks++;
287 ipath_cdbg(VERBOSE,
288 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
289 tid, vaddr, (unsigned long long) physaddr,
290 pagep[i]);
291 dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr);
292 /*
293 * don't check this tid in ipath_portshadow, since we
294 * just filled it in; start with the next one.
295 */
296 tid++;
297 }
298
299 if (ret) {
300 u32 limit;
301 cleanup:
302 /* jump here if copy out of updated info failed... */
303 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
304 -ret, i, cnt);
305 /* same code that's in ipath_free_tid() */
306 limit = sizeof(tidmap) * BITS_PER_BYTE;
307 if (limit > tidcnt)
308 /* just in case size changes in future */
309 limit = tidcnt;
310 tid = find_first_bit((const unsigned long *)tidmap, limit);
311 for (; tid < limit; tid++) {
312 if (!test_bit(tid, tidmap))
313 continue;
314 if (dd->ipath_pageshadow[porttid + tid]) {
315 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
316 tid);
317 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
318 dd->ipath_tidinvalid);
319 dd->ipath_pageshadow[porttid + tid] = NULL;
320 ipath_stats.sps_pageunlocks++;
321 }
322 }
323 ipath_release_user_pages(pagep, cnt);
324 } else {
325 /*
326 * Copy the updated array, with ipath_tid's filled in, back
327 * to user. Since we did the copy in already, this "should
328 * never fail" If it does, we have to clean up...
329 */
330 if (copy_to_user((void __user *)
331 (unsigned long) ti->tidlist,
332 tidlist, cnt * sizeof(*tidlist))) {
333 ret = -EFAULT;
334 goto cleanup;
335 }
336 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
337 tidmap, sizeof tidmap)) {
338 ret = -EFAULT;
339 goto cleanup;
340 }
341 if (tid == tidcnt)
342 tid = 0;
343 pd->port_tidcursor = tid;
344 }
345
346done:
347 if (ret)
348 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
349 ti->tidcnt, -ret);
350 return ret;
351}
352
353/**
354 * ipath_tid_free - free a port TID
355 * @pd: the port
356 * @ti: the TID info
357 *
358 * right now we are unlocking one page at a time, but since
359 * the intended use of this routine is for a single group of
360 * virtually contiguous pages, that should change to improve
361 * performance. We check that the TID is in range for this port
362 * but otherwise don't check validity; if user has an error and
363 * frees the wrong tid, it's only their own data that can thereby
364 * be corrupted. We do check that the TID was in use, for sanity
365 * We always use our idea of the saved address, not the address that
366 * they pass in to us.
367 */
368
369static int ipath_tid_free(struct ipath_portdata *pd,
370 const struct ipath_tid_info *ti)
371{
372 int ret = 0;
373 u32 tid, porttid, cnt, limit, tidcnt;
374 struct ipath_devdata *dd = pd->port_dd;
375 u64 __iomem *tidbase;
376 unsigned long tidmap[8];
377
378 if (!dd->ipath_pageshadow) {
379 ret = -ENOMEM;
380 goto done;
381 }
382
383 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
384 sizeof tidmap)) {
385 ret = -EFAULT;
386 goto done;
387 }
388
389 porttid = pd->port_port * dd->ipath_rcvtidcnt;
390 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
391 dd->ipath_rcvtidbase +
392 porttid * sizeof(*tidbase));
393
394 tidcnt = dd->ipath_rcvtidcnt;
395 limit = sizeof(tidmap) * BITS_PER_BYTE;
396 if (limit > tidcnt)
397 /* just in case size changes in future */
398 limit = tidcnt;
399 tid = find_first_bit(tidmap, limit);
400 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
401 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
402 limit, tid, porttid);
403 for (cnt = 0; tid < limit; tid++) {
404 /*
405 * small optimization; if we detect a run of 3 or so without
406 * any set, use find_first_bit again. That's mainly to
407 * accelerate the case where we wrapped, so we have some at
408 * the beginning, and some at the end, and a big gap
409 * in the middle.
410 */
411 if (!test_bit(tid, tidmap))
412 continue;
413 cnt++;
414 if (dd->ipath_pageshadow[porttid + tid]) {
415 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
416 pd->port_pid, tid);
417 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
418 dd->ipath_tidinvalid);
419 ipath_release_user_pages(
420 &dd->ipath_pageshadow[porttid + tid], 1);
421 dd->ipath_pageshadow[porttid + tid] = NULL;
422 ipath_stats.sps_pageunlocks++;
423 } else
424 ipath_dbg("Unused tid %u, ignoring\n", tid);
425 }
426 if (cnt != ti->tidcnt)
427 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
428 ti->tidcnt, cnt);
429done:
430 if (ret)
431 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
432 ti->tidcnt, -ret);
433 return ret;
434}
435
436/**
437 * ipath_set_part_key - set a partition key
438 * @pd: the port
439 * @key: the key
440 *
441 * We can have up to 4 active at a time (other than the default, which is
442 * always allowed). This is somewhat tricky, since multiple ports may set
443 * the same key, so we reference count them, and clean up at exit. All 4
444 * partition keys are packed into a single infinipath register. It's an
445 * error for a process to set the same pkey multiple times. We provide no
446 * mechanism to de-allocate a pkey at this time, we may eventually need to
447 * do that. I've used the atomic operations, and no locking, and only make
448 * a single pass through what's available. This should be more than
449 * adequate for some time. I'll think about spinlocks or the like if and as
450 * it's necessary.
451 */
452static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
453{
454 struct ipath_devdata *dd = pd->port_dd;
455 int i, any = 0, pidx = -1;
456 u16 lkey = key & 0x7FFF;
457 int ret;
458
459 if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) {
460 /* nothing to do; this key always valid */
461 ret = 0;
462 goto bail;
463 }
464
465 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
466 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
467 pd->port_port, key, dd->ipath_pkeys[0],
468 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
469 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
470 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
471 atomic_read(&dd->ipath_pkeyrefs[3]));
472
473 if (!lkey) {
474 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
475 pd->port_port);
476 ret = -EINVAL;
477 goto bail;
478 }
479
480 /*
481 * Set the full membership bit, because it has to be
482 * set in the register or the packet, and it seems
483 * cleaner to set in the register than to force all
484 * callers to set it. (see bug 4331)
485 */
486 key |= 0x8000;
487
488 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
489 if (!pd->port_pkeys[i] && pidx == -1)
490 pidx = i;
491 if (pd->port_pkeys[i] == key) {
492 ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
493 "(%x) more than once\n",
494 pd->port_port, key);
495 ret = -EEXIST;
496 goto bail;
497 }
498 }
499 if (pidx == -1) {
500 ipath_dbg("All pkeys for port %u already in use, "
501 "can't set %x\n", pd->port_port, key);
502 ret = -EBUSY;
503 goto bail;
504 }
505 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
506 if (!dd->ipath_pkeys[i]) {
507 any++;
508 continue;
509 }
510 if (dd->ipath_pkeys[i] == key) {
511 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
512
513 if (atomic_inc_return(pkrefs) > 1) {
514 pd->port_pkeys[pidx] = key;
515 ipath_cdbg(VERBOSE, "p%u set key %x "
516 "matches #%d, count now %d\n",
517 pd->port_port, key, i,
518 atomic_read(pkrefs));
519 ret = 0;
520 goto bail;
521 } else {
522 /*
523 * lost race, decrement count, catch below
524 */
525 atomic_dec(pkrefs);
526 ipath_cdbg(VERBOSE, "Lost race, count was "
527 "0, after dec, it's %d\n",
528 atomic_read(pkrefs));
529 any++;
530 }
531 }
532 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
533 /*
534 * It makes no sense to have both the limited and
535 * full membership PKEY set at the same time since
536 * the unlimited one will disable the limited one.
537 */
538 ret = -EEXIST;
539 goto bail;
540 }
541 }
542 if (!any) {
543 ipath_dbg("port %u, all pkeys already in use, "
544 "can't set %x\n", pd->port_port, key);
545 ret = -EBUSY;
546 goto bail;
547 }
548 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
549 if (!dd->ipath_pkeys[i] &&
550 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
551 u64 pkey;
552
553 /* for ipathstats, etc. */
554 ipath_stats.sps_pkeys[i] = lkey;
555 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
556 pkey =
557 (u64) dd->ipath_pkeys[0] |
558 ((u64) dd->ipath_pkeys[1] << 16) |
559 ((u64) dd->ipath_pkeys[2] << 32) |
560 ((u64) dd->ipath_pkeys[3] << 48);
561 ipath_cdbg(PROC, "p%u set key %x in #%d, "
562 "portidx %d, new pkey reg %llx\n",
563 pd->port_port, key, i, pidx,
564 (unsigned long long) pkey);
565 ipath_write_kreg(
566 dd, dd->ipath_kregs->kr_partitionkey, pkey);
567
568 ret = 0;
569 goto bail;
570 }
571 }
572 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
573 "can't set %x\n", pd->port_port, key);
574 ret = -EBUSY;
575
576bail:
577 return ret;
578}
579
580/**
581 * ipath_manage_rcvq - manage a port's receive queue
582 * @pd: the port
583 * @start_stop: action to carry out
584 *
585 * start_stop == 0 disables receive on the port, for use in queue
586 * overflow conditions. start_stop==1 re-enables, to be used to
587 * re-init the software copy of the head register
588 */
589static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop)
590{
591 struct ipath_devdata *dd = pd->port_dd;
592 u64 tval;
593
594 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u\n",
595 start_stop ? "en" : "dis", dd->ipath_unit,
596 pd->port_port);
597 /* atomically clear receive enable port. */
598 if (start_stop) {
599 /*
600 * On enable, force in-memory copy of the tail register to
601 * 0, so that protocol code doesn't have to worry about
602 * whether or not the chip has yet updated the in-memory
603 * copy or not on return from the system call. The chip
604 * always resets it's tail register back to 0 on a
605 * transition from disabled to enabled. This could cause a
606 * problem if software was broken, and did the enable w/o
607 * the disable, but eventually the in-memory copy will be
608 * updated and correct itself, even in the face of software
609 * bugs.
610 */
611 *pd->port_rcvhdrtail_kvaddr = 0;
612 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
613 &dd->ipath_rcvctrl);
614 } else
615 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
616 &dd->ipath_rcvctrl);
617 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
618 dd->ipath_rcvctrl);
619 /* now be sure chip saw it before we return */
620 tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
621 if (start_stop) {
622 /*
623 * And try to be sure that tail reg update has happened too.
624 * This should in theory interlock with the RXE changes to
625 * the tail register. Don't assign it to the tail register
626 * in memory copy, since we could overwrite an update by the
627 * chip if we did.
628 */
629 tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
630 }
631 /* always; new head should be equal to new tail; see above */
632 return 0;
633}
634
635static void ipath_clean_part_key(struct ipath_portdata *pd,
636 struct ipath_devdata *dd)
637{
638 int i, j, pchanged = 0;
639 u64 oldpkey;
640
641 /* for debugging only */
642 oldpkey = (u64) dd->ipath_pkeys[0] |
643 ((u64) dd->ipath_pkeys[1] << 16) |
644 ((u64) dd->ipath_pkeys[2] << 32) |
645 ((u64) dd->ipath_pkeys[3] << 48);
646
647 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
648 if (!pd->port_pkeys[i])
649 continue;
650 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
651 pd->port_pkeys[i]);
652 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
653 /* check for match independent of the global bit */
654 if ((dd->ipath_pkeys[j] & 0x7fff) !=
655 (pd->port_pkeys[i] & 0x7fff))
656 continue;
657 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
658 ipath_cdbg(VERBOSE, "p%u clear key "
659 "%x matches #%d\n",
660 pd->port_port,
661 pd->port_pkeys[i], j);
662 ipath_stats.sps_pkeys[j] =
663 dd->ipath_pkeys[j] = 0;
664 pchanged++;
665 }
666 else ipath_cdbg(
667 VERBOSE, "p%u key %x matches #%d, "
668 "but ref still %d\n", pd->port_port,
669 pd->port_pkeys[i], j,
670 atomic_read(&dd->ipath_pkeyrefs[j]));
671 break;
672 }
673 pd->port_pkeys[i] = 0;
674 }
675 if (pchanged) {
676 u64 pkey = (u64) dd->ipath_pkeys[0] |
677 ((u64) dd->ipath_pkeys[1] << 16) |
678 ((u64) dd->ipath_pkeys[2] << 32) |
679 ((u64) dd->ipath_pkeys[3] << 48);
680 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
681 "new pkey reg %llx\n", pd->port_port,
682 (unsigned long long) oldpkey,
683 (unsigned long long) pkey);
684 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
685 pkey);
686 }
687}
688
689/**
690 * ipath_create_user_egr - allocate eager TID buffers
691 * @pd: the port to allocate TID buffers for
692 *
693 * This routine is now quite different for user and kernel, because
694 * the kernel uses skb's, for the accelerated network performance
695 * This is the user port version
696 *
697 * Allocate the eager TID buffers and program them into infinipath
698 * They are no longer completely contiguous, we do multiple allocation
699 * calls.
700 */
701static int ipath_create_user_egr(struct ipath_portdata *pd)
702{
703 struct ipath_devdata *dd = pd->port_dd;
704 unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
705 size_t size;
706 int ret;
707
708 egrcnt = dd->ipath_rcvegrcnt;
709 /* TID number offset for this port */
710 egroff = pd->port_port * egrcnt;
711 egrsize = dd->ipath_rcvegrbufsize;
712 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
713 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
714
715 /*
716 * to avoid wasting a lot of memory, we allocate 32KB chunks of
717 * physically contiguous memory, advance through it until used up
718 * and then allocate more. Of course, we need memory to store those
719 * extra pointers, now. Started out with 256KB, but under heavy
720 * memory pressure (creating large files and then copying them over
721 * NFS while doing lots of MPI jobs), we hit some allocation
722 * failures, even though we can sleep... (2.6.10) Still get
723 * failures at 64K. 32K is the lowest we can go without waiting
724 * more memory again. It seems likely that the coalescing in
725 * free_pages, etc. still has issues (as it has had previously
726 * during 2.6.x development).
727 */
728 size = 0x8000;
729 alloced = ALIGN(egrsize * egrcnt, size);
730 egrperchunk = size / egrsize;
731 chunk = (egrcnt + egrperchunk - 1) / egrperchunk;
732 pd->port_rcvegrbuf_chunks = chunk;
733 pd->port_rcvegrbufs_perchunk = egrperchunk;
734 pd->port_rcvegrbuf_size = size;
735 pd->port_rcvegrbuf = vmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]));
736 if (!pd->port_rcvegrbuf) {
737 ret = -ENOMEM;
738 goto bail;
739 }
740 pd->port_rcvegrbuf_phys =
741 vmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]));
742 if (!pd->port_rcvegrbuf_phys) {
743 ret = -ENOMEM;
744 goto bail_rcvegrbuf;
745 }
746 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
747 /*
748 * GFP_USER, but without GFP_FS, so buffer cache can be
749 * coalesced (we hope); otherwise, even at order 4,
750 * heavy filesystem activity makes these fail
751 */
752 gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
753
754 pd->port_rcvegrbuf[e] = dma_alloc_coherent(
755 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
756 gfp_flags);
757
758 if (!pd->port_rcvegrbuf[e]) {
759 ret = -ENOMEM;
760 goto bail_rcvegrbuf_phys;
761 }
762 }
763
764 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
765
766 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
767 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
768 unsigned i;
769
770 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
771 dd->ipath_f_put_tid(dd, e + egroff +
772 (u64 __iomem *)
773 ((char __iomem *)
774 dd->ipath_kregbase +
775 dd->ipath_rcvegrbase), 0, pa);
776 pa += egrsize;
777 }
778 cond_resched(); /* don't hog the cpu */
779 }
780
781 ret = 0;
782 goto bail;
783
784bail_rcvegrbuf_phys:
785 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
786 pd->port_rcvegrbuf[e]; e++)
787 dma_free_coherent(&dd->pcidev->dev, size,
788 pd->port_rcvegrbuf[e],
789 pd->port_rcvegrbuf_phys[e]);
790
791 vfree(pd->port_rcvegrbuf_phys);
792 pd->port_rcvegrbuf_phys = NULL;
793bail_rcvegrbuf:
794 vfree(pd->port_rcvegrbuf);
795 pd->port_rcvegrbuf = NULL;
796bail:
797 return ret;
798}
799
800static int ipath_do_user_init(struct ipath_portdata *pd,
801 const struct ipath_user_info *uinfo)
802{
803 int ret = 0;
804 struct ipath_devdata *dd = pd->port_dd;
805 u64 physaddr, uaddr, off, atmp;
806 struct page *pagep;
807 u32 head32;
808 u64 head;
809
810 /* for now, if major version is different, bail */
811 if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
812 dev_info(&dd->pcidev->dev,
813 "User major version %d not same as driver "
814 "major %d\n", uinfo->spu_userversion >> 16,
815 IPATH_USER_SWMAJOR);
816 ret = -ENODEV;
817 goto done;
818 }
819
820 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
821 ipath_dbg("User minor version %d not same as driver "
822 "minor %d\n", uinfo->spu_userversion & 0xffff,
823 IPATH_USER_SWMINOR);
824
825 if (uinfo->spu_rcvhdrsize) {
826 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
827 if (ret)
828 goto done;
829 }
830
831 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
832
833 /* set up for the rcvhdr Q tail register writeback to user memory */
834 if (!uinfo->spu_rcvhdraddr ||
835 !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
836 uinfo->spu_rcvhdraddr, sizeof(u64))) {
837 ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
838 pd->port_port,
839 (unsigned long long) uinfo->spu_rcvhdraddr);
840 ret = -EINVAL;
841 goto done;
842 }
843
844 off = offset_in_page(uinfo->spu_rcvhdraddr);
845 uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
846 ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
847 if (ret) {
848 dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
849 "address %llx for rcvhdrtail: errno %d\n",
850 (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
851 goto done;
852 }
853 ipath_stats.sps_pagelocks++;
854 pd->port_rcvhdrtail_uaddr = uaddr;
855 pd->port_rcvhdrtail_pagep = pagep;
856 pd->port_rcvhdrtail_kvaddr =
857 page_address(pagep);
858 pd->port_rcvhdrtail_kvaddr += off;
859 physaddr = page_to_phys(pagep) + off;
860 ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
861 "physical (off=%llx)\n",
862 pd->port_port,
863 (unsigned long long) uinfo->spu_rcvhdraddr,
864 (unsigned long long) physaddr, (unsigned long long) off);
865 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
866 pd->port_port, physaddr);
867 atmp = ipath_read_kreg64_port(dd,
868 dd->ipath_kregs->kr_rcvhdrtailaddr,
869 pd->port_port);
870 if (physaddr != atmp) {
871 ipath_dev_err(dd,
872 "Catastrophic software error, "
873 "RcvHdrTailAddr%u written as %llx, "
874 "read back as %llx\n", pd->port_port,
875 (unsigned long long) physaddr,
876 (unsigned long long) atmp);
877 ret = -EINVAL;
878 goto done;
879 }
880
881 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
882 pd->port_piobufs = dd->ipath_piobufbase +
883 dd->ipath_pbufsport * (pd->port_port -
884 1) * dd->ipath_palign;
885 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
886 pd->port_port, pd->port_piobufs);
887
888 /*
889 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
890 * array for time being. If pd->port_port > chip-supported,
891 * we need to do extra stuff here to handle by handling overflow
892 * through port 0, someday
893 */
894 ret = ipath_create_rcvhdrq(dd, pd);
895 if (!ret)
896 ret = ipath_create_user_egr(pd);
897 if (ret)
898 goto done;
899 /* enable receives now */
900 /* atomically set enable bit for this port */
901 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
902 &dd->ipath_rcvctrl);
903
904 /*
905 * set the head registers for this port to the current values
906 * of the tail pointers, since we don't know if they were
907 * updated on last use of the port.
908 */
909 head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
910 head = (u64) head32;
911 ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
912 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
913 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
914 dd->ipath_lastegrheads[pd->port_port] = -1;
915 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
916 ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from "
917 "tail regs\n", pd->port_port,
918 (unsigned long long) head, head32);
919 pd->port_tidcursor = 0; /* start at beginning after open */
920 /*
921 * now enable the port; the tail registers will be written to memory
922 * by the chip as soon as it sees the write to
923 * dd->ipath_kregs->kr_rcvctrl. The update only happens on
924 * transition from 0 to 1, so clear it first, then set it as part of
925 * enabling the port. This will (very briefly) affect any other
926 * open ports, but it shouldn't be long enough to be an issue.
927 */
928 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
929 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
930 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
931 dd->ipath_rcvctrl);
932
933done:
934 return ret;
935}
936
937static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
938 u64 ureg)
939{
940 unsigned long phys;
941 int ret;
942
943 /* it's the real hardware, so io_remap works */
944
945 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
946 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
947 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
948 ret = -EFAULT;
949 } else {
950 phys = dd->ipath_physaddr + ureg;
951 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
952
953 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
954 ret = io_remap_pfn_range(vma, vma->vm_start,
955 phys >> PAGE_SHIFT,
956 vma->vm_end - vma->vm_start,
957 vma->vm_page_prot);
958 }
959 return ret;
960}
961
962static int mmap_piobufs(struct vm_area_struct *vma,
963 struct ipath_devdata *dd,
964 struct ipath_portdata *pd)
965{
966 unsigned long phys;
967 int ret;
968
969 /*
970 * When we map the PIO buffers, we want to map them as writeonly, no
971 * read possible.
972 */
973
974 if ((vma->vm_end - vma->vm_start) >
975 (dd->ipath_pbufsport * dd->ipath_palign)) {
976 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
977 "reqlen %lx > PAGE\n",
978 vma->vm_end - vma->vm_start);
979 ret = -EFAULT;
980 goto bail;
981 }
982
983 phys = dd->ipath_physaddr + pd->port_piobufs;
984 /*
985 * Do *NOT* mark this as non-cached (PWT bit), or we don't get the
986 * write combining behavior we want on the PIO buffers!
987 * vma->vm_page_prot =
988 * pgprot_noncached(vma->vm_page_prot);
989 */
990
991 if (vma->vm_flags & VM_READ) {
992 dev_info(&dd->pcidev->dev,
993 "Can't map piobufs as readable (flags=%lx)\n",
994 vma->vm_flags);
995 ret = -EPERM;
996 goto bail;
997 }
998
999 /* don't allow them to later change to readable with mprotect */
1000
1001 vma->vm_flags &= ~VM_MAYWRITE;
1002 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1003
1004 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
1005 vma->vm_end - vma->vm_start,
1006 vma->vm_page_prot);
1007bail:
1008 return ret;
1009}
1010
1011static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1012 struct ipath_portdata *pd)
1013{
1014 struct ipath_devdata *dd = pd->port_dd;
1015 unsigned long start, size;
1016 size_t total_size, i;
1017 dma_addr_t *phys;
1018 int ret;
1019
1020 if (!pd->port_rcvegrbuf) {
1021 ret = -EFAULT;
1022 goto bail;
1023 }
1024
1025 size = pd->port_rcvegrbuf_size;
1026 total_size = pd->port_rcvegrbuf_chunks * size;
1027 if ((vma->vm_end - vma->vm_start) > total_size) {
1028 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
1029 "reqlen %lx > actual %lx\n",
1030 vma->vm_end - vma->vm_start,
1031 (unsigned long) total_size);
1032 ret = -EFAULT;
1033 goto bail;
1034 }
1035
1036 if (vma->vm_flags & VM_WRITE) {
1037 dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
1038 "writable (flags=%lx)\n", vma->vm_flags);
1039 ret = -EPERM;
1040 goto bail;
1041 }
1042
1043 start = vma->vm_start;
1044 phys = pd->port_rcvegrbuf_phys;
1045
1046 /* don't allow them to later change to writeable with mprotect */
1047 vma->vm_flags &= ~VM_MAYWRITE;
1048
1049 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1050 ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
1051 size, vma->vm_page_prot);
1052 if (ret < 0)
1053 goto bail;
1054 }
1055 ret = 0;
1056
1057bail:
1058 return ret;
1059}
1060
1061static int mmap_rcvhdrq(struct vm_area_struct *vma,
1062 struct ipath_portdata *pd)
1063{
1064 struct ipath_devdata *dd = pd->port_dd;
1065 size_t total_size;
1066 int ret;
1067
1068 /*
1069 * kmalloc'ed memory, physically contiguous; this is from
1070 * spi_rcvhdr_base; we allow user to map read-write so they can
1071 * write hdrq entries to allow protocol code to directly poll
1072 * whether a hdrq entry has been written.
1073 */
1074 total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1075 sizeof(u32), PAGE_SIZE);
1076 if ((vma->vm_end - vma->vm_start) > total_size) {
1077 dev_info(&dd->pcidev->dev,
1078 "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
1079 vma->vm_end - vma->vm_start,
1080 (unsigned long) total_size);
1081 ret = -EFAULT;
1082 goto bail;
1083 }
1084
1085 ret = remap_pfn_range(vma, vma->vm_start,
1086 pd->port_rcvhdrq_phys >> PAGE_SHIFT,
1087 vma->vm_end - vma->vm_start,
1088 vma->vm_page_prot);
1089bail:
1090 return ret;
1091}
1092
1093static int mmap_pioavailregs(struct vm_area_struct *vma,
1094 struct ipath_portdata *pd)
1095{
1096 struct ipath_devdata *dd = pd->port_dd;
1097 int ret;
1098
1099 /*
1100 * when we map the PIO bufferavail registers, we want to map them as
1101 * readonly, no write possible.
1102 *
1103 * kmalloc'ed memory, physically contiguous, one page only, readonly
1104 */
1105
1106 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
1107 dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
1108 "reqlen %lx > actual %lx\n",
1109 vma->vm_end - vma->vm_start,
1110 (unsigned long) PAGE_SIZE);
1111 ret = -EFAULT;
1112 goto bail;
1113 }
1114
1115 if (vma->vm_flags & VM_WRITE) {
1116 dev_info(&dd->pcidev->dev,
1117 "Can't map pioavailregs as writable (flags=%lx)\n",
1118 vma->vm_flags);
1119 ret = -EPERM;
1120 goto bail;
1121 }
1122
1123 /* don't allow them to later change with mprotect */
1124 vma->vm_flags &= ~VM_MAYWRITE;
1125
1126 ret = remap_pfn_range(vma, vma->vm_start,
1127 dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
1128 PAGE_SIZE, vma->vm_page_prot);
1129bail:
1130 return ret;
1131}
1132
1133/**
1134 * ipath_mmap - mmap various structures into user space
1135 * @fp: the file pointer
1136 * @vma: the VM area
1137 *
1138 * We use this to have a shared buffer between the kernel and the user code
1139 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1140 * buffers in the chip. We have the open and close entries so we can bump
1141 * the ref count and keep the driver from being unloaded while still mapped.
1142 */
1143static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1144{
1145 struct ipath_portdata *pd;
1146 struct ipath_devdata *dd;
1147 u64 pgaddr, ureg;
1148 int ret;
1149
1150 pd = port_fp(fp);
1151 dd = pd->port_dd;
1152 /*
1153 * This is the ipath_do_user_init() code, mapping the shared buffers
1154 * into the user process. The address referred to by vm_pgoff is the
1155 * virtual, not physical, address; we only do one mmap for each
1156 * space mapped.
1157 */
1158 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1159
1160 /*
1161 * note that ureg does *NOT* have the kregvirt as part of it, to be
1162 * sure that for 32 bit programs, we don't end up trying to map a >
1163 * 44 address. Has to match ipath_get_base_info() code that sets
1164 * __spi_uregbase
1165 */
1166
1167 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1168
1169 ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
1170 (unsigned long long) pgaddr, vma->vm_start,
1171 vma->vm_end - vma->vm_start);
1172
1173 if (pgaddr == ureg)
1174 ret = mmap_ureg(vma, dd, ureg);
1175 else if (pgaddr == pd->port_piobufs)
1176 ret = mmap_piobufs(vma, dd, pd);
1177 else if (pgaddr == (u64) pd->port_rcvegr_phys)
1178 ret = mmap_rcvegrbufs(vma, pd);
1179 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
1180 ret = mmap_rcvhdrq(vma, pd);
1181 else if (pgaddr == dd->ipath_pioavailregs_phys)
1182 ret = mmap_pioavailregs(vma, pd);
1183 else
1184 ret = -EINVAL;
1185
1186 vma->vm_private_data = NULL;
1187
1188 if (ret < 0)
1189 dev_info(&dd->pcidev->dev,
1190 "Failure %d on addr %lx, off %lx\n",
1191 -ret, vma->vm_start, vma->vm_pgoff);
1192
1193 return ret;
1194}
1195
1196static unsigned int ipath_poll(struct file *fp,
1197 struct poll_table_struct *pt)
1198{
1199 struct ipath_portdata *pd;
1200 u32 head, tail;
1201 int bit;
1202 struct ipath_devdata *dd;
1203
1204 pd = port_fp(fp);
1205 dd = pd->port_dd;
1206
1207 bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
1208 set_bit(bit, &dd->ipath_rcvctrl);
1209
1210 /*
1211 * Before blocking, make sure that head is still == tail,
1212 * reading from the chip, so we can be sure the interrupt
1213 * enable has made it to the chip. If not equal, disable
1214 * interrupt again and return immediately. This avoids races,
1215 * and the overhead of the chip read doesn't matter much at
1216 * this point, since we are waiting for something anyway.
1217 */
1218
1219 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1220 dd->ipath_rcvctrl);
1221
1222 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1223 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1224
1225 if (tail == head) {
1226 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1227 poll_wait(fp, &pd->port_wait, pt);
1228
1229 if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
1230 /* timed out, no packets received */
1231 clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1232 pd->port_rcvwait_to++;
1233 }
1234 }
1235 else {
1236 /* it's already happened; don't do wait_event overhead */
1237 pd->port_rcvnowait++;
1238 }
1239
1240 clear_bit(bit, &dd->ipath_rcvctrl);
1241 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1242 dd->ipath_rcvctrl);
1243
1244 return 0;
1245}
1246
1247static int try_alloc_port(struct ipath_devdata *dd, int port,
1248 struct file *fp)
1249{
1250 int ret;
1251
1252 if (!dd->ipath_pd[port]) {
1253 void *p, *ptmp;
1254
1255 p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
1256
1257 /*
1258 * Allocate memory for use in ipath_tid_update() just once
1259 * at open, not per call. Reduces cost of expected send
1260 * setup.
1261 */
1262 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
1263 dd->ipath_rcvtidcnt * sizeof(struct page **),
1264 GFP_KERNEL);
1265 if (!p || !ptmp) {
1266 ipath_dev_err(dd, "Unable to allocate portdata "
1267 "memory, failing open\n");
1268 ret = -ENOMEM;
1269 kfree(p);
1270 kfree(ptmp);
1271 goto bail;
1272 }
1273 dd->ipath_pd[port] = p;
1274 dd->ipath_pd[port]->port_port = port;
1275 dd->ipath_pd[port]->port_dd = dd;
1276 dd->ipath_pd[port]->port_tid_pg_list = ptmp;
1277 init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
1278 }
1279 if (!dd->ipath_pd[port]->port_cnt) {
1280 dd->ipath_pd[port]->port_cnt = 1;
1281 fp->private_data = (void *) dd->ipath_pd[port];
1282 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
1283 current->comm, current->pid, dd->ipath_unit,
1284 port);
1285 dd->ipath_pd[port]->port_pid = current->pid;
1286 strncpy(dd->ipath_pd[port]->port_comm, current->comm,
1287 sizeof(dd->ipath_pd[port]->port_comm));
1288 ipath_stats.sps_ports++;
1289 ret = 0;
1290 goto bail;
1291 }
1292 ret = -EBUSY;
1293
1294bail:
1295 return ret;
1296}
1297
1298static inline int usable(struct ipath_devdata *dd)
1299{
1300 return dd &&
1301 (dd->ipath_flags & IPATH_PRESENT) &&
1302 dd->ipath_kregbase &&
1303 dd->ipath_lid &&
1304 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
1305 | IPATH_LINKUNK));
1306}
1307
1308static int find_free_port(int unit, struct file *fp)
1309{
1310 struct ipath_devdata *dd = ipath_lookup(unit);
1311 int ret, i;
1312
1313 if (!dd) {
1314 ret = -ENODEV;
1315 goto bail;
1316 }
1317
1318 if (!usable(dd)) {
1319 ret = -ENETDOWN;
1320 goto bail;
1321 }
1322
1323 for (i = 0; i < dd->ipath_cfgports; i++) {
1324 ret = try_alloc_port(dd, i, fp);
1325 if (ret != -EBUSY)
1326 goto bail;
1327 }
1328 ret = -EBUSY;
1329
1330bail:
1331 return ret;
1332}
1333
1334static int find_best_unit(struct file *fp)
1335{
1336 int ret = 0, i, prefunit = -1, devmax;
1337 int maxofallports, npresent, nup;
1338 int ndev;
1339
1340 (void) ipath_count_units(&npresent, &nup, &maxofallports);
1341
1342 /*
1343 * This code is present to allow a knowledgeable person to
1344 * specify the layout of processes to processors before opening
1345 * this driver, and then we'll assign the process to the "closest"
1346 * HT-400 to that processor (we assume reasonable connectivity,
1347 * for now). This code assumes that if affinity has been set
1348 * before this point, that at most one cpu is set; for now this
1349 * is reasonable. I check for both cpus_empty() and cpus_full(),
1350 * in case some kernel variant sets none of the bits when no
1351 * affinity is set. 2.6.11 and 12 kernels have all present
1352 * cpus set. Some day we'll have to fix it up further to handle
1353 * a cpu subset. This algorithm fails for two HT-400's connected
1354 * in tunnel fashion. Eventually this needs real topology
1355 * information. There may be some issues with dual core numbering
1356 * as well. This needs more work prior to release.
1357 */
1358 if (!cpus_empty(current->cpus_allowed) &&
1359 !cpus_full(current->cpus_allowed)) {
1360 int ncpus = num_online_cpus(), curcpu = -1;
1361 for (i = 0; i < ncpus; i++)
1362 if (cpu_isset(i, current->cpus_allowed)) {
1363 ipath_cdbg(PROC, "%s[%u] affinity set for "
1364 "cpu %d\n", current->comm,
1365 current->pid, i);
1366 curcpu = i;
1367 }
1368 if (curcpu != -1) {
1369 if (npresent) {
1370 prefunit = curcpu / (ncpus / npresent);
1371 ipath_dbg("%s[%u] %d chips, %d cpus, "
1372 "%d cpus/chip, select unit %d\n",
1373 current->comm, current->pid,
1374 npresent, ncpus, ncpus / npresent,
1375 prefunit);
1376 }
1377 }
1378 }
1379
1380 /*
1381 * user ports start at 1, kernel port is 0
1382 * For now, we do round-robin access across all chips
1383 */
1384
1385 if (prefunit != -1)
1386 devmax = prefunit + 1;
1387 else
1388 devmax = ipath_count_units(NULL, NULL, NULL);
1389recheck:
1390 for (i = 1; i < maxofallports; i++) {
1391 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1392 ndev++) {
1393 struct ipath_devdata *dd = ipath_lookup(ndev);
1394
1395 if (!usable(dd))
1396 continue; /* can't use this unit */
1397 if (i >= dd->ipath_cfgports)
1398 /*
1399 * Maxed out on users of this unit. Try
1400 * next.
1401 */
1402 continue;
1403 ret = try_alloc_port(dd, i, fp);
1404 if (!ret)
1405 goto done;
1406 }
1407 }
1408
1409 if (npresent) {
1410 if (nup == 0) {
1411 ret = -ENETDOWN;
1412 ipath_dbg("No ports available (none initialized "
1413 "and ready)\n");
1414 } else {
1415 if (prefunit > 0) {
1416 /* if started above 0, retry from 0 */
1417 ipath_cdbg(PROC,
1418 "%s[%u] no ports on prefunit "
1419 "%d, clear and re-check\n",
1420 current->comm, current->pid,
1421 prefunit);
1422 devmax = ipath_count_units(NULL, NULL,
1423 NULL);
1424 prefunit = -1;
1425 goto recheck;
1426 }
1427 ret = -EBUSY;
1428 ipath_dbg("No ports available\n");
1429 }
1430 } else {
1431 ret = -ENXIO;
1432 ipath_dbg("No boards found\n");
1433 }
1434
1435done:
1436 return ret;
1437}
1438
1439static int ipath_open(struct inode *in, struct file *fp)
1440{
1441 int ret, minor;
1442
1443 mutex_lock(&ipath_mutex);
1444
1445 minor = iminor(in);
1446 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
1447 (long)in->i_rdev, minor);
1448
1449 if (minor)
1450 ret = find_free_port(minor - 1, fp);
1451 else
1452 ret = find_best_unit(fp);
1453
1454 mutex_unlock(&ipath_mutex);
1455 return ret;
1456}
1457
1458/**
1459 * unlock_exptid - unlock any expected TID entries port still had in use
1460 * @pd: port
1461 *
1462 * We don't actually update the chip here, because we do a bulk update
1463 * below, using ipath_f_clear_tids.
1464 */
1465static void unlock_expected_tids(struct ipath_portdata *pd)
1466{
1467 struct ipath_devdata *dd = pd->port_dd;
1468 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
1469 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
1470
1471 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
1472 pd->port_port);
1473 for (i = port_tidbase; i < maxtid; i++) {
1474 if (!dd->ipath_pageshadow[i])
1475 continue;
1476
1477 ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
1478 1);
1479 dd->ipath_pageshadow[i] = NULL;
1480 cnt++;
1481 ipath_stats.sps_pageunlocks++;
1482 }
1483 if (cnt)
1484 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
1485 pd->port_port, cnt);
1486
1487 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
1488 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
1489 (unsigned long long) ipath_stats.sps_pagelocks,
1490 (unsigned long long)
1491 ipath_stats.sps_pageunlocks);
1492}
1493
1494static int ipath_close(struct inode *in, struct file *fp)
1495{
1496 int ret = 0;
1497 struct ipath_portdata *pd;
1498 struct ipath_devdata *dd;
1499 unsigned port;
1500
1501 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
1502 (long)in->i_rdev, fp->private_data);
1503
1504 mutex_lock(&ipath_mutex);
1505
1506 pd = port_fp(fp);
1507 port = pd->port_port;
1508 fp->private_data = NULL;
1509 dd = pd->port_dd;
1510
1511 if (pd->port_hdrqfull) {
1512 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
1513 "during run\n", pd->port_comm, pd->port_pid,
1514 pd->port_hdrqfull);
1515 pd->port_hdrqfull = 0;
1516 }
1517
1518 if (pd->port_rcvwait_to || pd->port_piowait_to
1519 || pd->port_rcvnowait || pd->port_pionowait) {
1520 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
1521 "%u rcv %u, pio already\n",
1522 pd->port_port, pd->port_rcvwait_to,
1523 pd->port_piowait_to, pd->port_rcvnowait,
1524 pd->port_pionowait);
1525 pd->port_rcvwait_to = pd->port_piowait_to =
1526 pd->port_rcvnowait = pd->port_pionowait = 0;
1527 }
1528 if (pd->port_flag) {
1529 ipath_dbg("port %u port_flag still set to 0x%lx\n",
1530 pd->port_port, pd->port_flag);
1531 pd->port_flag = 0;
1532 }
1533
1534 if (dd->ipath_kregbase) {
1535 if (pd->port_rcvhdrtail_uaddr) {
1536 pd->port_rcvhdrtail_uaddr = 0;
1537 pd->port_rcvhdrtail_kvaddr = NULL;
1538 ipath_release_user_pages_on_close(
1539 &pd->port_rcvhdrtail_pagep, 1);
1540 pd->port_rcvhdrtail_pagep = NULL;
1541 ipath_stats.sps_pageunlocks++;
1542 }
1543 ipath_write_kreg_port(
1544 dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1545 port, 0ULL);
1546 ipath_write_kreg_port(
1547 dd, dd->ipath_kregs->kr_rcvhdraddr,
1548 pd->port_port, 0);
1549
1550 /* clean up the pkeys for this port user */
1551 ipath_clean_part_key(pd, dd);
1552
1553 if (port < dd->ipath_cfgports) {
1554 int i = dd->ipath_pbufsport * (port - 1);
1555 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
1556
1557 /* atomically clear receive enable port. */
1558 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
1559 &dd->ipath_rcvctrl);
1560 ipath_write_kreg(
1561 dd,
1562 dd->ipath_kregs->kr_rcvctrl,
1563 dd->ipath_rcvctrl);
1564
1565 if (dd->ipath_pageshadow)
1566 unlock_expected_tids(pd);
1567 ipath_stats.sps_ports--;
1568 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
1569 pd->port_comm, pd->port_pid,
1570 dd->ipath_unit, port);
1571 }
1572 }
1573
1574 pd->port_cnt = 0;
1575 pd->port_pid = 0;
1576
1577 dd->ipath_f_clear_tids(dd, pd->port_port);
1578
1579 ipath_free_pddata(dd, pd->port_port, 0);
1580
1581 mutex_unlock(&ipath_mutex);
1582
1583 return ret;
1584}
1585
1586static int ipath_port_info(struct ipath_portdata *pd,
1587 struct ipath_port_info __user *uinfo)
1588{
1589 struct ipath_port_info info;
1590 int nup;
1591 int ret;
1592
1593 (void) ipath_count_units(NULL, &nup, NULL);
1594 info.num_active = nup;
1595 info.unit = pd->port_dd->ipath_unit;
1596 info.port = pd->port_port;
1597
1598 if (copy_to_user(uinfo, &info, sizeof(info))) {
1599 ret = -EFAULT;
1600 goto bail;
1601 }
1602 ret = 0;
1603
1604bail:
1605 return ret;
1606}
1607
1608static ssize_t ipath_write(struct file *fp, const char __user *data,
1609 size_t count, loff_t *off)
1610{
1611 const struct ipath_cmd __user *ucmd;
1612 struct ipath_portdata *pd;
1613 const void __user *src;
1614 size_t consumed, copy;
1615 struct ipath_cmd cmd;
1616 ssize_t ret = 0;
1617 void *dest;
1618
1619 if (count < sizeof(cmd.type)) {
1620 ret = -EINVAL;
1621 goto bail;
1622 }
1623
1624 ucmd = (const struct ipath_cmd __user *) data;
1625
1626 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
1627 ret = -EFAULT;
1628 goto bail;
1629 }
1630
1631 consumed = sizeof(cmd.type);
1632
1633 switch (cmd.type) {
1634 case IPATH_CMD_USER_INIT:
1635 copy = sizeof(cmd.cmd.user_info);
1636 dest = &cmd.cmd.user_info;
1637 src = &ucmd->cmd.user_info;
1638 break;
1639 case IPATH_CMD_RECV_CTRL:
1640 copy = sizeof(cmd.cmd.recv_ctrl);
1641 dest = &cmd.cmd.recv_ctrl;
1642 src = &ucmd->cmd.recv_ctrl;
1643 break;
1644 case IPATH_CMD_PORT_INFO:
1645 copy = sizeof(cmd.cmd.port_info);
1646 dest = &cmd.cmd.port_info;
1647 src = &ucmd->cmd.port_info;
1648 break;
1649 case IPATH_CMD_TID_UPDATE:
1650 case IPATH_CMD_TID_FREE:
1651 copy = sizeof(cmd.cmd.tid_info);
1652 dest = &cmd.cmd.tid_info;
1653 src = &ucmd->cmd.tid_info;
1654 break;
1655 case IPATH_CMD_SET_PART_KEY:
1656 copy = sizeof(cmd.cmd.part_key);
1657 dest = &cmd.cmd.part_key;
1658 src = &ucmd->cmd.part_key;
1659 break;
1660 default:
1661 ret = -EINVAL;
1662 goto bail;
1663 }
1664
1665 if ((count - consumed) < copy) {
1666 ret = -EINVAL;
1667 goto bail;
1668 }
1669
1670 if (copy_from_user(dest, src, copy)) {
1671 ret = -EFAULT;
1672 goto bail;
1673 }
1674
1675 consumed += copy;
1676 pd = port_fp(fp);
1677
1678 switch (cmd.type) {
1679 case IPATH_CMD_USER_INIT:
1680 ret = ipath_do_user_init(pd, &cmd.cmd.user_info);
1681 if (ret < 0)
1682 goto bail;
1683 ret = ipath_get_base_info(
1684 pd, (void __user *) (unsigned long)
1685 cmd.cmd.user_info.spu_base_info,
1686 cmd.cmd.user_info.spu_base_info_size);
1687 break;
1688 case IPATH_CMD_RECV_CTRL:
1689 ret = ipath_manage_rcvq(pd, cmd.cmd.recv_ctrl);
1690 break;
1691 case IPATH_CMD_PORT_INFO:
1692 ret = ipath_port_info(pd,
1693 (struct ipath_port_info __user *)
1694 (unsigned long) cmd.cmd.port_info);
1695 break;
1696 case IPATH_CMD_TID_UPDATE:
1697 ret = ipath_tid_update(pd, &cmd.cmd.tid_info);
1698 break;
1699 case IPATH_CMD_TID_FREE:
1700 ret = ipath_tid_free(pd, &cmd.cmd.tid_info);
1701 break;
1702 case IPATH_CMD_SET_PART_KEY:
1703 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
1704 break;
1705 }
1706
1707 if (ret >= 0)
1708 ret = consumed;
1709
1710bail:
1711 return ret;
1712}
1713
1714static struct class *ipath_class;
1715
1716static int init_cdev(int minor, char *name, struct file_operations *fops,
1717 struct cdev **cdevp, struct class_device **class_devp)
1718{
1719 const dev_t dev = MKDEV(IPATH_MAJOR, minor);
1720 struct cdev *cdev = NULL;
1721 struct class_device *class_dev = NULL;
1722 int ret;
1723
1724 cdev = cdev_alloc();
1725 if (!cdev) {
1726 printk(KERN_ERR IPATH_DRV_NAME
1727 ": Could not allocate cdev for minor %d, %s\n",
1728 minor, name);
1729 ret = -ENOMEM;
1730 goto done;
1731 }
1732
1733 cdev->owner = THIS_MODULE;
1734 cdev->ops = fops;
1735 kobject_set_name(&cdev->kobj, name);
1736
1737 ret = cdev_add(cdev, dev, 1);
1738 if (ret < 0) {
1739 printk(KERN_ERR IPATH_DRV_NAME
1740 ": Could not add cdev for minor %d, %s (err %d)\n",
1741 minor, name, -ret);
1742 goto err_cdev;
1743 }
1744
1745 class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
1746
1747 if (IS_ERR(class_dev)) {
1748 ret = PTR_ERR(class_dev);
1749 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
1750 "class_dev for minor %d, %s (err %d)\n",
1751 minor, name, -ret);
1752 goto err_cdev;
1753 }
1754
1755 goto done;
1756
1757err_cdev:
1758 cdev_del(cdev);
1759 cdev = NULL;
1760
1761done:
1762 if (ret >= 0) {
1763 *cdevp = cdev;
1764 *class_devp = class_dev;
1765 } else {
1766 *cdevp = NULL;
1767 *class_devp = NULL;
1768 }
1769
1770 return ret;
1771}
1772
1773int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
1774 struct cdev **cdevp, struct class_device **class_devp)
1775{
1776 return init_cdev(minor, name, fops, cdevp, class_devp);
1777}
1778
1779static void cleanup_cdev(struct cdev **cdevp,
1780 struct class_device **class_devp)
1781{
1782 struct class_device *class_dev = *class_devp;
1783
1784 if (class_dev) {
1785 class_device_unregister(class_dev);
1786 *class_devp = NULL;
1787 }
1788
1789 if (*cdevp) {
1790 cdev_del(*cdevp);
1791 *cdevp = NULL;
1792 }
1793}
1794
1795void ipath_cdev_cleanup(struct cdev **cdevp,
1796 struct class_device **class_devp)
1797{
1798 cleanup_cdev(cdevp, class_devp);
1799}
1800
1801static struct cdev *wildcard_cdev;
1802static struct class_device *wildcard_class_dev;
1803
1804static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
1805
1806static int user_init(void)
1807{
1808 int ret;
1809
1810 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
1811 if (ret < 0) {
1812 printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
1813 "chrdev region (err %d)\n", -ret);
1814 goto done;
1815 }
1816
1817 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
1818
1819 if (IS_ERR(ipath_class)) {
1820 ret = PTR_ERR(ipath_class);
1821 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
1822 "device class (err %d)\n", -ret);
1823 goto bail;
1824 }
1825
1826 goto done;
1827bail:
1828 unregister_chrdev_region(dev, IPATH_NMINORS);
1829done:
1830 return ret;
1831}
1832
1833static void user_cleanup(void)
1834{
1835 if (ipath_class) {
1836 class_destroy(ipath_class);
1837 ipath_class = NULL;
1838 }
1839
1840 unregister_chrdev_region(dev, IPATH_NMINORS);
1841}
1842
1843static atomic_t user_count = ATOMIC_INIT(0);
1844static atomic_t user_setup = ATOMIC_INIT(0);
1845
1846int ipath_user_add(struct ipath_devdata *dd)
1847{
1848 char name[10];
1849 int ret;
1850
1851 if (atomic_inc_return(&user_count) == 1) {
1852 ret = user_init();
1853 if (ret < 0) {
1854 ipath_dev_err(dd, "Unable to set up user support: "
1855 "error %d\n", -ret);
1856 goto bail;
1857 }
1858 ret = ipath_diag_init();
1859 if (ret < 0) {
1860 ipath_dev_err(dd, "Unable to set up diag support: "
1861 "error %d\n", -ret);
1862 goto bail_sma;
1863 }
1864
1865 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
1866 &wildcard_class_dev);
1867 if (ret < 0) {
1868 ipath_dev_err(dd, "Could not create wildcard "
1869 "minor: error %d\n", -ret);
1870 goto bail_diag;
1871 }
1872
1873 atomic_set(&user_setup, 1);
1874 }
1875
1876 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
1877
1878 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
1879 &dd->cdev, &dd->class_dev);
1880 if (ret < 0)
1881 ipath_dev_err(dd, "Could not create user minor %d, %s\n",
1882 dd->ipath_unit + 1, name);
1883
1884 goto bail;
1885
1886bail_diag:
1887 ipath_diag_cleanup();
1888bail_sma:
1889 user_cleanup();
1890bail:
1891 return ret;
1892}
1893
1894void ipath_user_del(struct ipath_devdata *dd)
1895{
1896 cleanup_cdev(&dd->cdev, &dd->class_dev);
1897
1898 if (atomic_dec_return(&user_count) == 0) {
1899 if (atomic_read(&user_setup) == 0)
1900 goto bail;
1901
1902 cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
1903 ipath_diag_cleanup();
1904 user_cleanup();
1905
1906 atomic_set(&user_setup, 0);
1907 }
1908bail:
1909 return;
1910}
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
new file mode 100644
index 000000000000..e274120567e1
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -0,0 +1,605 @@
1/*
2 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/version.h>
34#include <linux/config.h>
35#include <linux/module.h>
36#include <linux/fs.h>
37#include <linux/mount.h>
38#include <linux/pagemap.h>
39#include <linux/init.h>
40#include <linux/namei.h>
41#include <linux/pci.h>
42
43#include "ipath_kernel.h"
44
45#define IPATHFS_MAGIC 0x726a77
46
47static struct super_block *ipath_super;
48
49static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
50 int mode, struct file_operations *fops,
51 void *data)
52{
53 int error;
54 struct inode *inode = new_inode(dir->i_sb);
55
56 if (!inode) {
57 error = -EPERM;
58 goto bail;
59 }
60
61 inode->i_mode = mode;
62 inode->i_uid = 0;
63 inode->i_gid = 0;
64 inode->i_blksize = PAGE_CACHE_SIZE;
65 inode->i_blocks = 0;
66 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
67 inode->u.generic_ip = data;
68 if ((mode & S_IFMT) == S_IFDIR) {
69 inode->i_op = &simple_dir_inode_operations;
70 inode->i_nlink++;
71 dir->i_nlink++;
72 }
73
74 inode->i_fop = fops;
75
76 d_instantiate(dentry, inode);
77 error = 0;
78
79bail:
80 return error;
81}
82
83static int create_file(const char *name, mode_t mode,
84 struct dentry *parent, struct dentry **dentry,
85 struct file_operations *fops, void *data)
86{
87 int error;
88
89 *dentry = NULL;
90 mutex_lock(&parent->d_inode->i_mutex);
91 *dentry = lookup_one_len(name, parent, strlen(name));
92 if (!IS_ERR(dentry))
93 error = ipathfs_mknod(parent->d_inode, *dentry,
94 mode, fops, data);
95 else
96 error = PTR_ERR(dentry);
97 mutex_unlock(&parent->d_inode->i_mutex);
98
99 return error;
100}
101
102static ssize_t atomic_stats_read(struct file *file, char __user *buf,
103 size_t count, loff_t *ppos)
104{
105 return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
106 sizeof ipath_stats);
107}
108
109static struct file_operations atomic_stats_ops = {
110 .read = atomic_stats_read,
111};
112
113#define NUM_COUNTERS sizeof(struct infinipath_counters) / sizeof(u64)
114
115static ssize_t atomic_counters_read(struct file *file, char __user *buf,
116 size_t count, loff_t *ppos)
117{
118 u64 counters[NUM_COUNTERS];
119 u16 i;
120 struct ipath_devdata *dd;
121
122 dd = file->f_dentry->d_inode->u.generic_ip;
123
124 for (i = 0; i < NUM_COUNTERS; i++)
125 counters[i] = ipath_snap_cntr(dd, i);
126
127 return simple_read_from_buffer(buf, count, ppos, counters,
128 sizeof counters);
129}
130
131static struct file_operations atomic_counters_ops = {
132 .read = atomic_counters_read,
133};
134
135static ssize_t atomic_node_info_read(struct file *file, char __user *buf,
136 size_t count, loff_t *ppos)
137{
138 u32 nodeinfo[10];
139 struct ipath_devdata *dd;
140 u64 guid;
141
142 dd = file->f_dentry->d_inode->u.generic_ip;
143
144 guid = be64_to_cpu(dd->ipath_guid);
145
146 nodeinfo[0] = /* BaseVersion is SMA */
147 /* ClassVersion is SMA */
148 (1 << 8) /* NodeType */
149 | (1 << 0); /* NumPorts */
150 nodeinfo[1] = (u32) (guid >> 32);
151 nodeinfo[2] = (u32) (guid & 0xffffffff);
152 /* PortGUID == SystemImageGUID for us */
153 nodeinfo[3] = nodeinfo[1];
154 /* PortGUID == SystemImageGUID for us */
155 nodeinfo[4] = nodeinfo[2];
156 /* PortGUID == NodeGUID for us */
157 nodeinfo[5] = nodeinfo[3];
158 /* PortGUID == NodeGUID for us */
159 nodeinfo[6] = nodeinfo[4];
160 nodeinfo[7] = (4 << 16) /* we support 4 pkeys */
161 | (dd->ipath_deviceid << 0);
162 /* our chip version as 16 bits major, 16 bits minor */
163 nodeinfo[8] = dd->ipath_minrev | (dd->ipath_majrev << 16);
164 nodeinfo[9] = (dd->ipath_unit << 24) | (dd->ipath_vendorid << 0);
165
166 return simple_read_from_buffer(buf, count, ppos, nodeinfo,
167 sizeof nodeinfo);
168}
169
170static struct file_operations atomic_node_info_ops = {
171 .read = atomic_node_info_read,
172};
173
174static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
175 size_t count, loff_t *ppos)
176{
177 u32 portinfo[13];
178 u32 tmp, tmp2;
179 struct ipath_devdata *dd;
180
181 dd = file->f_dentry->d_inode->u.generic_ip;
182
183 /* so we only initialize non-zero fields. */
184 memset(portinfo, 0, sizeof portinfo);
185
186 /*
187 * Notimpl yet M_Key (64)
188 * Notimpl yet GID (64)
189 */
190
191 portinfo[4] = (dd->ipath_lid << 16);
192
193 /*
194 * Notimpl yet SMLID (should we store this in the driver, in case
195 * SMA dies?) CapabilityMask is 0, we don't support any of these
196 * DiagCode is 0; we don't store any diag info for now Notimpl yet
197 * M_KeyLeasePeriod (we don't support M_Key)
198 */
199
200 /* LocalPortNum is whichever port number they ask for */
201 portinfo[7] = (dd->ipath_unit << 24)
202 /* LinkWidthEnabled */
203 | (2 << 16)
204 /* LinkWidthSupported (really 2, but not IB valid) */
205 | (3 << 8)
206 /* LinkWidthActive */
207 | (2 << 0);
208 tmp = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
209 tmp2 = 5;
210 if (tmp == IPATH_IBSTATE_INIT)
211 tmp = 2;
212 else if (tmp == IPATH_IBSTATE_ARM)
213 tmp = 3;
214 else if (tmp == IPATH_IBSTATE_ACTIVE)
215 tmp = 4;
216 else {
217 tmp = 0; /* down */
218 tmp2 = tmp & 0xf;
219 }
220
221 portinfo[8] = (1 << 28) /* LinkSpeedSupported */
222 | (tmp << 24) /* PortState */
223 | (tmp2 << 20) /* PortPhysicalState */
224 | (2 << 16)
225
226 /* LinkDownDefaultState */
227 /* M_KeyProtectBits == 0 */
228 /* NotImpl yet LMC == 0 (we can support all values) */
229 | (1 << 4) /* LinkSpeedActive */
230 | (1 << 0); /* LinkSpeedEnabled */
231 switch (dd->ipath_ibmtu) {
232 case 4096:
233 tmp = 5;
234 break;
235 case 2048:
236 tmp = 4;
237 break;
238 case 1024:
239 tmp = 3;
240 break;
241 case 512:
242 tmp = 2;
243 break;
244 case 256:
245 tmp = 1;
246 break;
247 default: /* oops, something is wrong */
248 ipath_dbg("Problem, ipath_ibmtu 0x%x not a valid IB MTU, "
249 "treat as 2048\n", dd->ipath_ibmtu);
250 tmp = 4;
251 break;
252 }
253 portinfo[9] = (tmp << 28)
254 /* NeighborMTU */
255 /* Notimpl MasterSMSL */
256 | (1 << 20)
257
258 /* VLCap */
259 /* Notimpl InitType (actually, an SMA decision) */
260 /* VLHighLimit is 0 (only one VL) */
261 ; /* VLArbitrationHighCap is 0 (only one VL) */
262 portinfo[10] = /* VLArbitrationLowCap is 0 (only one VL) */
263 /* InitTypeReply is SMA decision */
264 (5 << 16) /* MTUCap 4096 */
265 | (7 << 13) /* VLStallCount */
266 | (0x1f << 8) /* HOQLife */
267 | (1 << 4)
268
269 /* OperationalVLs 0 */
270 /* PartitionEnforcementInbound */
271 /* PartitionEnforcementOutbound not enforced */
272 /* FilterRawinbound not enforced */
273 ; /* FilterRawOutbound not enforced */
274 /* M_KeyViolations are not counted by hardware, SMA can count */
275 tmp = ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
276 /* P_KeyViolations are counted by hardware. */
277 portinfo[11] = ((tmp & 0xffff) << 0);
278 portinfo[12] =
279 /* Q_KeyViolations are not counted by hardware */
280 (1 << 8)
281
282 /* GUIDCap */
283 /* SubnetTimeOut handled by SMA */
284 /* RespTimeValue handled by SMA */
285 ;
286 /* LocalPhyErrors are programmed to max */
287 portinfo[12] |= (0xf << 20)
288 | (0xf << 16) /* OverRunErrors are programmed to max */
289 ;
290
291 return simple_read_from_buffer(buf, count, ppos, portinfo,
292 sizeof portinfo);
293}
294
295static struct file_operations atomic_port_info_ops = {
296 .read = atomic_port_info_read,
297};
298
299static ssize_t flash_read(struct file *file, char __user *buf,
300 size_t count, loff_t *ppos)
301{
302 struct ipath_devdata *dd;
303 ssize_t ret;
304 loff_t pos;
305 char *tmp;
306
307 pos = *ppos;
308
309 if ( pos < 0) {
310 ret = -EINVAL;
311 goto bail;
312 }
313
314 if (pos >= sizeof(struct ipath_flash)) {
315 ret = 0;
316 goto bail;
317 }
318
319 if (count > sizeof(struct ipath_flash) - pos)
320 count = sizeof(struct ipath_flash) - pos;
321
322 tmp = kmalloc(count, GFP_KERNEL);
323 if (!tmp) {
324 ret = -ENOMEM;
325 goto bail;
326 }
327
328 dd = file->f_dentry->d_inode->u.generic_ip;
329 if (ipath_eeprom_read(dd, pos, tmp, count)) {
330 ipath_dev_err(dd, "failed to read from flash\n");
331 ret = -ENXIO;
332 goto bail_tmp;
333 }
334
335 if (copy_to_user(buf, tmp, count)) {
336 ret = -EFAULT;
337 goto bail_tmp;
338 }
339
340 *ppos = pos + count;
341 ret = count;
342
343bail_tmp:
344 kfree(tmp);
345
346bail:
347 return ret;
348}
349
350static ssize_t flash_write(struct file *file, const char __user *buf,
351 size_t count, loff_t *ppos)
352{
353 struct ipath_devdata *dd;
354 ssize_t ret;
355 loff_t pos;
356 char *tmp;
357
358 pos = *ppos;
359
360 if ( pos < 0) {
361 ret = -EINVAL;
362 goto bail;
363 }
364
365 if (pos >= sizeof(struct ipath_flash)) {
366 ret = 0;
367 goto bail;
368 }
369
370 if (count > sizeof(struct ipath_flash) - pos)
371 count = sizeof(struct ipath_flash) - pos;
372
373 tmp = kmalloc(count, GFP_KERNEL);
374 if (!tmp) {
375 ret = -ENOMEM;
376 goto bail;
377 }
378
379 if (copy_from_user(tmp, buf, count)) {
380 ret = -EFAULT;
381 goto bail_tmp;
382 }
383
384 dd = file->f_dentry->d_inode->u.generic_ip;
385 if (ipath_eeprom_write(dd, pos, tmp, count)) {
386 ret = -ENXIO;
387 ipath_dev_err(dd, "failed to write to flash\n");
388 goto bail_tmp;
389 }
390
391 *ppos = pos + count;
392 ret = count;
393
394bail_tmp:
395 kfree(tmp);
396
397bail:
398 return ret;
399}
400
401static struct file_operations flash_ops = {
402 .read = flash_read,
403 .write = flash_write,
404};
405
406static int create_device_files(struct super_block *sb,
407 struct ipath_devdata *dd)
408{
409 struct dentry *dir, *tmp;
410 char unit[10];
411 int ret;
412
413 snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
414 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
415 (struct file_operations *) &simple_dir_operations,
416 dd);
417 if (ret) {
418 printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
419 goto bail;
420 }
421
422 ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
423 &atomic_counters_ops, dd);
424 if (ret) {
425 printk(KERN_ERR "create_file(%s/atomic_counters) "
426 "failed: %d\n", unit, ret);
427 goto bail;
428 }
429
430 ret = create_file("node_info", S_IFREG|S_IRUGO, dir, &tmp,
431 &atomic_node_info_ops, dd);
432 if (ret) {
433 printk(KERN_ERR "create_file(%s/node_info) "
434 "failed: %d\n", unit, ret);
435 goto bail;
436 }
437
438 ret = create_file("port_info", S_IFREG|S_IRUGO, dir, &tmp,
439 &atomic_port_info_ops, dd);
440 if (ret) {
441 printk(KERN_ERR "create_file(%s/port_info) "
442 "failed: %d\n", unit, ret);
443 goto bail;
444 }
445
446 ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
447 &flash_ops, dd);
448 if (ret) {
449 printk(KERN_ERR "create_file(%s/flash) "
450 "failed: %d\n", unit, ret);
451 goto bail;
452 }
453
454bail:
455 return ret;
456}
457
458static void remove_file(struct dentry *parent, char *name)
459{
460 struct dentry *tmp;
461
462 tmp = lookup_one_len(name, parent, strlen(name));
463
464 spin_lock(&dcache_lock);
465 spin_lock(&tmp->d_lock);
466 if (!(d_unhashed(tmp) && tmp->d_inode)) {
467 dget_locked(tmp);
468 __d_drop(tmp);
469 spin_unlock(&tmp->d_lock);
470 spin_unlock(&dcache_lock);
471 simple_unlink(parent->d_inode, tmp);
472 } else {
473 spin_unlock(&tmp->d_lock);
474 spin_unlock(&dcache_lock);
475 }
476}
477
478static int remove_device_files(struct super_block *sb,
479 struct ipath_devdata *dd)
480{
481 struct dentry *dir, *root;
482 char unit[10];
483 int ret;
484
485 root = dget(sb->s_root);
486 mutex_lock(&root->d_inode->i_mutex);
487 snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
488 dir = lookup_one_len(unit, root, strlen(unit));
489
490 if (IS_ERR(dir)) {
491 ret = PTR_ERR(dir);
492 printk(KERN_ERR "Lookup of %s failed\n", unit);
493 goto bail;
494 }
495
496 remove_file(dir, "flash");
497 remove_file(dir, "port_info");
498 remove_file(dir, "node_info");
499 remove_file(dir, "atomic_counters");
500 d_delete(dir);
501 ret = simple_rmdir(root->d_inode, dir);
502
503bail:
504 mutex_unlock(&root->d_inode->i_mutex);
505 dput(root);
506 return ret;
507}
508
509static int ipathfs_fill_super(struct super_block *sb, void *data,
510 int silent)
511{
512 struct ipath_devdata *dd, *tmp;
513 unsigned long flags;
514 int ret;
515
516 static struct tree_descr files[] = {
517 [1] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
518 {""},
519 };
520
521 ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
522 if (ret) {
523 printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
524 goto bail;
525 }
526
527 spin_lock_irqsave(&ipath_devs_lock, flags);
528
529 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
530 spin_unlock_irqrestore(&ipath_devs_lock, flags);
531 ret = create_device_files(sb, dd);
532 if (ret) {
533 deactivate_super(sb);
534 goto bail;
535 }
536 spin_lock_irqsave(&ipath_devs_lock, flags);
537 }
538
539 spin_unlock_irqrestore(&ipath_devs_lock, flags);
540
541bail:
542 return ret;
543}
544
545static struct super_block *ipathfs_get_sb(struct file_system_type *fs_type,
546 int flags, const char *dev_name,
547 void *data)
548{
549 ipath_super = get_sb_single(fs_type, flags, data,
550 ipathfs_fill_super);
551 return ipath_super;
552}
553
554static void ipathfs_kill_super(struct super_block *s)
555{
556 kill_litter_super(s);
557 ipath_super = NULL;
558}
559
560int ipathfs_add_device(struct ipath_devdata *dd)
561{
562 int ret;
563
564 if (ipath_super == NULL) {
565 ret = 0;
566 goto bail;
567 }
568
569 ret = create_device_files(ipath_super, dd);
570
571bail:
572 return ret;
573}
574
575int ipathfs_remove_device(struct ipath_devdata *dd)
576{
577 int ret;
578
579 if (ipath_super == NULL) {
580 ret = 0;
581 goto bail;
582 }
583
584 ret = remove_device_files(ipath_super, dd);
585
586bail:
587 return ret;
588}
589
590static struct file_system_type ipathfs_fs_type = {
591 .owner = THIS_MODULE,
592 .name = "ipathfs",
593 .get_sb = ipathfs_get_sb,
594 .kill_sb = ipathfs_kill_super,
595};
596
597int __init ipath_init_ipathfs(void)
598{
599 return register_filesystem(&ipathfs_fs_type);
600}
601
602void __exit ipath_exit_ipathfs(void)
603{
604 unregister_filesystem(&ipathfs_fs_type);
605}
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_ht400.c
new file mode 100644
index 000000000000..4652435998f3
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_ht400.c
@@ -0,0 +1,1586 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains all of the code that is specific to the InfiniPath
35 * HT-400 chip.
36 */
37
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41#include "ipath_kernel.h"
42#include "ipath_registers.h"
43
44/*
45 * This lists the InfiniPath HT400 registers, in the actual chip layout.
46 * This structure should never be directly accessed.
47 *
48 * The names are in InterCap form because they're taken straight from
49 * the chip specification. Since they're only used in this file, they
50 * don't pollute the rest of the source.
51*/
52
53struct _infinipath_do_not_use_kernel_regs {
54 unsigned long long Revision;
55 unsigned long long Control;
56 unsigned long long PageAlign;
57 unsigned long long PortCnt;
58 unsigned long long DebugPortSelect;
59 unsigned long long DebugPort;
60 unsigned long long SendRegBase;
61 unsigned long long UserRegBase;
62 unsigned long long CounterRegBase;
63 unsigned long long Scratch;
64 unsigned long long ReservedMisc1;
65 unsigned long long InterruptConfig;
66 unsigned long long IntBlocked;
67 unsigned long long IntMask;
68 unsigned long long IntStatus;
69 unsigned long long IntClear;
70 unsigned long long ErrorMask;
71 unsigned long long ErrorStatus;
72 unsigned long long ErrorClear;
73 unsigned long long HwErrMask;
74 unsigned long long HwErrStatus;
75 unsigned long long HwErrClear;
76 unsigned long long HwDiagCtrl;
77 unsigned long long MDIO;
78 unsigned long long IBCStatus;
79 unsigned long long IBCCtrl;
80 unsigned long long ExtStatus;
81 unsigned long long ExtCtrl;
82 unsigned long long GPIOOut;
83 unsigned long long GPIOMask;
84 unsigned long long GPIOStatus;
85 unsigned long long GPIOClear;
86 unsigned long long RcvCtrl;
87 unsigned long long RcvBTHQP;
88 unsigned long long RcvHdrSize;
89 unsigned long long RcvHdrCnt;
90 unsigned long long RcvHdrEntSize;
91 unsigned long long RcvTIDBase;
92 unsigned long long RcvTIDCnt;
93 unsigned long long RcvEgrBase;
94 unsigned long long RcvEgrCnt;
95 unsigned long long RcvBufBase;
96 unsigned long long RcvBufSize;
97 unsigned long long RxIntMemBase;
98 unsigned long long RxIntMemSize;
99 unsigned long long RcvPartitionKey;
100 unsigned long long ReservedRcv[10];
101 unsigned long long SendCtrl;
102 unsigned long long SendPIOBufBase;
103 unsigned long long SendPIOSize;
104 unsigned long long SendPIOBufCnt;
105 unsigned long long SendPIOAvailAddr;
106 unsigned long long TxIntMemBase;
107 unsigned long long TxIntMemSize;
108 unsigned long long ReservedSend[9];
109 unsigned long long SendBufferError;
110 unsigned long long SendBufferErrorCONT1;
111 unsigned long long SendBufferErrorCONT2;
112 unsigned long long SendBufferErrorCONT3;
113 unsigned long long ReservedSBE[4];
114 unsigned long long RcvHdrAddr0;
115 unsigned long long RcvHdrAddr1;
116 unsigned long long RcvHdrAddr2;
117 unsigned long long RcvHdrAddr3;
118 unsigned long long RcvHdrAddr4;
119 unsigned long long RcvHdrAddr5;
120 unsigned long long RcvHdrAddr6;
121 unsigned long long RcvHdrAddr7;
122 unsigned long long RcvHdrAddr8;
123 unsigned long long ReservedRHA[7];
124 unsigned long long RcvHdrTailAddr0;
125 unsigned long long RcvHdrTailAddr1;
126 unsigned long long RcvHdrTailAddr2;
127 unsigned long long RcvHdrTailAddr3;
128 unsigned long long RcvHdrTailAddr4;
129 unsigned long long RcvHdrTailAddr5;
130 unsigned long long RcvHdrTailAddr6;
131 unsigned long long RcvHdrTailAddr7;
132 unsigned long long RcvHdrTailAddr8;
133 unsigned long long ReservedRHTA[7];
134 unsigned long long Sync; /* Software only */
135 unsigned long long Dump; /* Software only */
136 unsigned long long SimVer; /* Software only */
137 unsigned long long ReservedSW[5];
138 unsigned long long SerdesConfig0;
139 unsigned long long SerdesConfig1;
140 unsigned long long SerdesStatus;
141 unsigned long long XGXSConfig;
142 unsigned long long ReservedSW2[4];
143};
144
145#define IPATH_KREG_OFFSET(field) (offsetof(struct \
146 _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
147#define IPATH_CREG_OFFSET(field) (offsetof( \
148 struct infinipath_counters, field) / sizeof(u64))
149
150static const struct ipath_kregs ipath_ht_kregs = {
151 .kr_control = IPATH_KREG_OFFSET(Control),
152 .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
153 .kr_debugport = IPATH_KREG_OFFSET(DebugPort),
154 .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
155 .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
156 .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
157 .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
158 .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
159 .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
160 .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
161 .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
162 .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
163 .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
164 .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
165 .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
166 .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
167 .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
168 .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
169 .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
170 .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
171 .kr_intclear = IPATH_KREG_OFFSET(IntClear),
172 .kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
173 .kr_intmask = IPATH_KREG_OFFSET(IntMask),
174 .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
175 .kr_mdio = IPATH_KREG_OFFSET(MDIO),
176 .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
177 .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
178 .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
179 .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
180 .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
181 .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
182 .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
183 .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
184 .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
185 .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
186 .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
187 .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
188 .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
189 .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
190 .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
191 .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
192 .kr_revision = IPATH_KREG_OFFSET(Revision),
193 .kr_scratch = IPATH_KREG_OFFSET(Scratch),
194 .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
195 .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
196 .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
197 .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
198 .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
199 .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
200 .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
201 .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
202 .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
203 .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
204 .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
205 .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
206 .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
207 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
208 /*
209 * These should not be used directly via ipath_read_kreg64(),
210 * use them with ipath_read_kreg64_port(),
211 */
212 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
213 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
214};
215
216static const struct ipath_cregs ipath_ht_cregs = {
217 .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
218 .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
219 .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
220 .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
221 .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
222 .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
223 .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
224 .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
225 .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
226 .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
227 .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
228 .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
229 /* calc from Reg_CounterRegBase + offset */
230 .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
231 .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
232 .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
233 .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
234 .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
235 .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
236 .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
237 .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
238 .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
239 .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
240 .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
241 .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
242 .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
243 .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
244 .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
245 .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
246 .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
247 .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
248 .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
249 .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
250 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
251};
252
253/* kr_intstatus, kr_intclear, kr_intmask bits */
254#define INFINIPATH_I_RCVURG_MASK 0x1FF
255#define INFINIPATH_I_RCVAVAIL_MASK 0x1FF
256
257/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
258#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
259#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
260#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR 0x0000000000800000ULL
261#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR 0x0000000001000000ULL
262#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR 0x0000000002000000ULL
263#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR 0x0000000004000000ULL
264#define INFINIPATH_HWE_HTCMISCERR4 0x0000000008000000ULL
265#define INFINIPATH_HWE_HTCMISCERR5 0x0000000010000000ULL
266#define INFINIPATH_HWE_HTCMISCERR6 0x0000000020000000ULL
267#define INFINIPATH_HWE_HTCMISCERR7 0x0000000040000000ULL
268#define INFINIPATH_HWE_HTCBUSTREQPARITYERR 0x0000000080000000ULL
269#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
270#define INFINIPATH_HWE_HTCBUSIREQPARITYERR 0x0000000200000000ULL
271#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
272#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
273#define INFINIPATH_HWE_HTBPLL_FBSLIP 0x0200000000000000ULL
274#define INFINIPATH_HWE_HTBPLL_RFSLIP 0x0400000000000000ULL
275#define INFINIPATH_HWE_HTAPLL_FBSLIP 0x0800000000000000ULL
276#define INFINIPATH_HWE_HTAPLL_RFSLIP 0x1000000000000000ULL
277#define INFINIPATH_HWE_SERDESPLLFAILED 0x2000000000000000ULL
278
279/* kr_extstatus bits */
280#define INFINIPATH_EXTS_FREQSEL 0x2
281#define INFINIPATH_EXTS_SERDESSEL 0x4
282#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
283#define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000
284
285/*
286 * masks and bits that are different in different chips, or present only
287 * in one
288 */
289static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
290 INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
291static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
292 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
293
294static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
295 INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
296static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
297 INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
298static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
299 INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
300static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
301 INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
302
303#define _IPATH_GPIO_SDA_NUM 1
304#define _IPATH_GPIO_SCL_NUM 0
305
306#define IPATH_GPIO_SDA \
307 (1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
308#define IPATH_GPIO_SCL \
309 (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
310
311/* keep the code below somewhat more readonable; not used elsewhere */
312#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
313 infinipath_hwe_htclnkabyte1crcerr)
314#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr | \
315 infinipath_hwe_htclnkbbyte1crcerr)
316#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \
317 infinipath_hwe_htclnkbbyte0crcerr)
318#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr | \
319 infinipath_hwe_htclnkbbyte1crcerr)
320
321static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
322 char *msg, size_t msgl)
323{
324 char bitsmsg[64];
325 ipath_err_t crcbits = hwerrs &
326 (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
327 /* don't check if 8bit HT */
328 if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
329 crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
330 /* don't check if 8bit HT */
331 if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
332 crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
333 /*
334 * we'll want to ignore link errors on link that is
335 * not in use, if any. For now, complain about both
336 */
337 if (crcbits) {
338 u16 ctrl0, ctrl1;
339 snprintf(bitsmsg, sizeof bitsmsg,
340 "[HT%s lane %s CRC (%llx); ignore till reload]",
341 !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
342 "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
343 ? "1 (B)" : "0+1 (A+B)"),
344 !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
345 : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
346 "0+1"), (unsigned long long) crcbits);
347 strlcat(msg, bitsmsg, msgl);
348
349 /*
350 * print extra info for debugging. slave/primary
351 * config word 4, 8 (link control 0, 1)
352 */
353
354 if (pci_read_config_word(dd->pcidev,
355 dd->ipath_ht_slave_off + 0x4,
356 &ctrl0))
357 dev_info(&dd->pcidev->dev, "Couldn't read "
358 "linkctrl0 of slave/primary "
359 "config block\n");
360 else if (!(ctrl0 & 1 << 6))
361 /* not if EOC bit set */
362 ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
363 ((ctrl0 >> 8) & 7) ? " CRC" : "",
364 ((ctrl0 >> 4) & 1) ? "linkfail" :
365 "");
366 if (pci_read_config_word(dd->pcidev,
367 dd->ipath_ht_slave_off + 0x8,
368 &ctrl1))
369 dev_info(&dd->pcidev->dev, "Couldn't read "
370 "linkctrl1 of slave/primary "
371 "config block\n");
372 else if (!(ctrl1 & 1 << 6))
373 /* not if EOC bit set */
374 ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
375 ((ctrl1 >> 8) & 7) ? " CRC" : "",
376 ((ctrl1 >> 4) & 1) ? "linkfail" :
377 "");
378
379 /* disable until driver reloaded */
380 dd->ipath_hwerrmask &= ~crcbits;
381 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
382 dd->ipath_hwerrmask);
383 ipath_dbg("HT crc errs: %s\n", msg);
384 } else
385 ipath_dbg("ignoring HT crc errors 0x%llx, "
386 "not in use\n", (unsigned long long)
387 (hwerrs & (_IPATH_HTLINK0_CRCBITS |
388 _IPATH_HTLINK1_CRCBITS)));
389}
390
391/**
392 * ipath_ht_handle_hwerrors - display hardware errors
393 * @dd: the infinipath device
394 * @msg: the output buffer
395 * @msgl: the size of the output buffer
396 *
397 * Use same msg buffer as regular errors to avoid
398 * excessive stack use. Most hardware errors are catastrophic, but for
399 * right now, we'll print them and continue.
400 * We reuse the same message buffer as ipath_handle_errors() to avoid
401 * excessive stack usage.
402 */
403static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
404 size_t msgl)
405{
406 ipath_err_t hwerrs;
407 u32 bits, ctrl;
408 int isfatal = 0;
409 char bitsmsg[64];
410
411 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
412
413 if (!hwerrs) {
414 ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
415 /*
416 * better than printing cofusing messages
417 * This seems to be related to clearing the crc error, or
418 * the pll error during init.
419 */
420 goto bail;
421 } else if (hwerrs == -1LL) {
422 ipath_dev_err(dd, "Read of hardware error status failed "
423 "(all bits set); ignoring\n");
424 goto bail;
425 }
426 ipath_stats.sps_hwerrs++;
427
428 /* Always clear the error status register, except MEMBISTFAIL,
429 * regardless of whether we continue or stop using the chip.
430 * We want that set so we know it failed, even across driver reload.
431 * We'll still ignore it in the hwerrmask. We do this partly for
432 * diagnostics, but also for support */
433 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
434 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
435
436 hwerrs &= dd->ipath_hwerrmask;
437
438 /*
439 * make sure we get this much out, unless told to be quiet,
440 * or it's occurred within the last 5 seconds
441 */
442 if ((hwerrs & ~dd->ipath_lasthwerror) ||
443 (ipath_debug & __IPATH_VERBDBG))
444 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
445 "(cleared)\n", (unsigned long long) hwerrs);
446 dd->ipath_lasthwerror |= hwerrs;
447
448 if (hwerrs & ~infinipath_hwe_bitsextant)
449 ipath_dev_err(dd, "hwerror interrupt with unknown errors "
450 "%llx set\n", (unsigned long long)
451 (hwerrs & ~infinipath_hwe_bitsextant));
452
453 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
454 if (ctrl & INFINIPATH_C_FREEZEMODE) {
455 if (hwerrs) {
456 /*
457 * if any set that we aren't ignoring; only
458 * make the complaint once, in case it's stuck
459 * or recurring, and we get here multiple
460 * times.
461 */
462 if (dd->ipath_flags & IPATH_INITTED) {
463 ipath_dev_err(dd, "Fatal Error (freeze "
464 "mode), no longer usable\n");
465 isfatal = 1;
466 }
467 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
468 /* mark as having had error */
469 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
470 /*
471 * mark as not usable, at a minimum until driver
472 * is reloaded, probably until reboot, since no
473 * other reset is possible.
474 */
475 dd->ipath_flags &= ~IPATH_INITTED;
476 } else {
477 ipath_dbg("Clearing freezemode on ignored hardware "
478 "error\n");
479 ctrl &= ~INFINIPATH_C_FREEZEMODE;
480 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
481 ctrl);
482 }
483 }
484
485 *msg = '\0';
486
487 /*
488 * may someday want to decode into which bits are which
489 * functional area for parity errors, etc.
490 */
491 if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
492 << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
493 bits = (u32) ((hwerrs >>
494 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
495 INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
496 snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
497 bits);
498 strlcat(msg, bitsmsg, msgl);
499 }
500 if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
501 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
502 bits = (u32) ((hwerrs >>
503 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
504 INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
505 snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
506 bits);
507 strlcat(msg, bitsmsg, msgl);
508 }
509 if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
510 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
511 bits = (u32) ((hwerrs >>
512 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
513 INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
514 snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
515 bits);
516 strlcat(msg, bitsmsg, msgl);
517 }
518 if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
519 strlcat(msg, "[IB2IPATH Parity]", msgl);
520 if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
521 strlcat(msg, "[IPATH2IB Parity]", msgl);
522 if (hwerrs & INFINIPATH_HWE_HTCBUSIREQPARITYERR)
523 strlcat(msg, "[HTC Ireq Parity]", msgl);
524 if (hwerrs & INFINIPATH_HWE_HTCBUSTREQPARITYERR)
525 strlcat(msg, "[HTC Treq Parity]", msgl);
526 if (hwerrs & INFINIPATH_HWE_HTCBUSTRESPPARITYERR)
527 strlcat(msg, "[HTC Tresp Parity]", msgl);
528
529 if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
530 hwerr_crcbits(dd, hwerrs, msg, msgl);
531
532 if (hwerrs & INFINIPATH_HWE_HTCMISCERR5)
533 strlcat(msg, "[HT core Misc5]", msgl);
534 if (hwerrs & INFINIPATH_HWE_HTCMISCERR6)
535 strlcat(msg, "[HT core Misc6]", msgl);
536 if (hwerrs & INFINIPATH_HWE_HTCMISCERR7)
537 strlcat(msg, "[HT core Misc7]", msgl);
538 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
539 strlcat(msg, "[Memory BIST test failed, HT-400 unusable]",
540 msgl);
541 /* ignore from now on, so disable until driver reloaded */
542 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
543 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
544 dd->ipath_hwerrmask);
545 }
546#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
547 INFINIPATH_HWE_COREPLL_RFSLIP | \
548 INFINIPATH_HWE_HTBPLL_FBSLIP | \
549 INFINIPATH_HWE_HTBPLL_RFSLIP | \
550 INFINIPATH_HWE_HTAPLL_FBSLIP | \
551 INFINIPATH_HWE_HTAPLL_RFSLIP)
552
553 if (hwerrs & _IPATH_PLL_FAIL) {
554 snprintf(bitsmsg, sizeof bitsmsg,
555 "[PLL failed (%llx), HT-400 unusable]",
556 (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
557 strlcat(msg, bitsmsg, msgl);
558 /* ignore from now on, so disable until driver reloaded */
559 dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
560 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
561 dd->ipath_hwerrmask);
562 }
563
564 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
565 /*
566 * If it occurs, it is left masked since the eternal
567 * interface is unused
568 */
569 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
570 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
571 dd->ipath_hwerrmask);
572 }
573
574 if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
575 strlcat(msg, "[Rx Dsync]", msgl);
576 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
577 strlcat(msg, "[SerDes PLL]", msgl);
578
579 ipath_dev_err(dd, "%s hardware error\n", msg);
580 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
581 /*
582 * for status file; if no trailing brace is copied,
583 * we'll know it was truncated.
584 */
585 snprintf(dd->ipath_freezemsg,
586 dd->ipath_freezelen, "{%s}", msg);
587
588bail:;
589}
590
591/**
592 * ipath_ht_boardname - fill in the board name
593 * @dd: the infinipath device
594 * @name: the output buffer
595 * @namelen: the size of the output buffer
596 *
597 * fill in the board name, based on the board revision register
598 */
599static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
600 size_t namelen)
601{
602 char *n = NULL;
603 u8 boardrev = dd->ipath_boardrev;
604 int ret;
605
606 switch (boardrev) {
607 case 4: /* Ponderosa is one of the bringup boards */
608 n = "Ponderosa";
609 break;
610 case 5: /* HT-460 original production board */
611 n = "InfiniPath_HT-460";
612 break;
613 case 6:
614 n = "OEM_Board_3";
615 break;
616 case 7:
617 /* HT-460 small form factor production board */
618 n = "InfiniPath_HT-465";
619 break;
620 case 8:
621 n = "LS/X-1";
622 break;
623 case 9: /* Comstock bringup test board */
624 n = "Comstock";
625 break;
626 case 10:
627 n = "OEM_Board_2";
628 break;
629 case 11:
630 n = "InfiniPath_HT-470";
631 break;
632 case 12:
633 n = "OEM_Board_4";
634 break;
635 default: /* don't know, just print the number */
636 ipath_dev_err(dd, "Don't yet know about board "
637 "with ID %u\n", boardrev);
638 snprintf(name, namelen, "Unknown_InfiniPath_HT-4xx_%u",
639 boardrev);
640 break;
641 }
642 if (n)
643 snprintf(name, namelen, "%s", n);
644
645 if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) {
646 /*
647 * This version of the driver only supports the HT-400
648 * Rev 3.2
649 */
650 ipath_dev_err(dd,
651 "Unsupported HT-400 revision %u.%u!\n",
652 dd->ipath_majrev, dd->ipath_minrev);
653 ret = 1;
654 goto bail;
655 }
656 /*
657 * pkt/word counters are 32 bit, and therefore wrap fast enough
658 * that we snapshot them from a timer, and maintain 64 bit shadow
659 * copies
660 */
661 dd->ipath_flags |= IPATH_32BITCOUNTERS;
662 if (dd->ipath_htspeed != 800)
663 ipath_dev_err(dd,
664 "Incorrectly configured for HT @ %uMHz\n",
665 dd->ipath_htspeed);
666 if (dd->ipath_boardrev == 7 || dd->ipath_boardrev == 11 ||
667 dd->ipath_boardrev == 6)
668 dd->ipath_flags |= IPATH_GPIO_INTR;
669 else
670 dd->ipath_flags |= IPATH_POLL_RX_INTR;
671 if (dd->ipath_boardrev == 8) { /* LS/X-1 */
672 u64 val;
673 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
674 if (val & INFINIPATH_EXTS_SERDESSEL) {
675 /*
676 * hardware disabled
677 *
678 * This means that the chip is hardware disabled,
679 * and will not be able to bring up the link,
680 * in any case. We special case this and abort
681 * early, to avoid later messages. We also set
682 * the DISABLED status bit
683 */
684 ipath_dbg("Unit %u is hardware-disabled\n",
685 dd->ipath_unit);
686 *dd->ipath_statusp |= IPATH_STATUS_DISABLED;
687 /* this value is handled differently */
688 ret = 2;
689 goto bail;
690 }
691 }
692 ret = 0;
693
694bail:
695 return ret;
696}
697
698static void ipath_check_htlink(struct ipath_devdata *dd)
699{
700 u8 linkerr, link_off, i;
701
702 for (i = 0; i < 2; i++) {
703 link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
704 if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
705 dev_info(&dd->pcidev->dev, "Couldn't read "
706 "linkerror%d of HT slave/primary block\n",
707 i);
708 else if (linkerr & 0xf0) {
709 ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
710 "clearing\n", linkerr >> 4, i);
711 /*
712 * writing the linkerr bits that are set should
713 * clear them
714 */
715 if (pci_write_config_byte(dd->pcidev, link_off,
716 linkerr))
717 ipath_dbg("Failed write to clear HT "
718 "linkerror%d\n", i);
719 if (pci_read_config_byte(dd->pcidev, link_off,
720 &linkerr))
721 dev_info(&dd->pcidev->dev,
722 "Couldn't reread linkerror%d of "
723 "HT slave/primary block\n", i);
724 else if (linkerr & 0xf0)
725 dev_info(&dd->pcidev->dev,
726 "HT linkerror%d bits 0x%x "
727 "couldn't be cleared\n",
728 i, linkerr >> 4);
729 }
730 }
731}
732
733static int ipath_setup_ht_reset(struct ipath_devdata *dd)
734{
735 ipath_dbg("No reset possible for HT-400\n");
736 return 0;
737}
738
739#define HT_CAPABILITY_ID 0x08 /* HT capabilities not defined in kernel */
740#define HT_INTR_DISC_CONFIG 0x80 /* HT interrupt and discovery cap */
741#define HT_INTR_REG_INDEX 2 /* intconfig requires indirect accesses */
742
743/*
744 * Bits 13-15 of command==0 is slave/primary block. Clear any HT CRC
745 * errors. We only bother to do this at load time, because it's OK if
746 * it happened before we were loaded (first time after boot/reset),
747 * but any time after that, it's fatal anyway. Also need to not check
748 * for for upper byte errors if we are in 8 bit mode, so figure out
749 * our width. For now, at least, also complain if it's 8 bit.
750 */
751static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
752 int pos, u8 cap_type)
753{
754 u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
755 u16 linkctrl = 0;
756 int i;
757
758 dd->ipath_ht_slave_off = pos;
759 /* command word, master_host bit */
760 /* master host || slave */
761 if ((cap_type >> 2) & 1)
762 link_a_b_off = 4;
763 else
764 link_a_b_off = 0;
765 ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
766 link_a_b_off ? 1 : 0,
767 link_a_b_off ? 'B' : 'A');
768
769 link_a_b_off += pos;
770
771 /*
772 * check both link control registers; clear both HT CRC sets if
773 * necessary.
774 */
775 for (i = 0; i < 2; i++) {
776 link_off = pos + i * 4 + 0x4;
777 if (pci_read_config_word(pdev, link_off, &linkctrl))
778 ipath_dev_err(dd, "Couldn't read HT link control%d "
779 "register\n", i);
780 else if (linkctrl & (0xf << 8)) {
781 ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
782 "bits %x\n", i, linkctrl & (0xf << 8));
783 /*
784 * now write them back to clear the error.
785 */
786 pci_write_config_byte(pdev, link_off,
787 linkctrl & (0xf << 8));
788 }
789 }
790
791 /*
792 * As with HT CRC bits, same for protocol errors that might occur
793 * during boot.
794 */
795 for (i = 0; i < 2; i++) {
796 link_off = pos + i * 4 + 0xd;
797 if (pci_read_config_byte(pdev, link_off, &linkerr))
798 dev_info(&pdev->dev, "Couldn't read linkerror%d "
799 "of HT slave/primary block\n", i);
800 else if (linkerr & 0xf0) {
801 ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
802 "clearing\n", linkerr >> 4, i);
803 /*
804 * writing the linkerr bits that are set will clear
805 * them
806 */
807 if (pci_write_config_byte
808 (pdev, link_off, linkerr))
809 ipath_dbg("Failed write to clear HT "
810 "linkerror%d\n", i);
811 if (pci_read_config_byte(pdev, link_off, &linkerr))
812 dev_info(&pdev->dev, "Couldn't reread "
813 "linkerror%d of HT slave/primary "
814 "block\n", i);
815 else if (linkerr & 0xf0)
816 dev_info(&pdev->dev, "HT linkerror%d bits "
817 "0x%x couldn't be cleared\n",
818 i, linkerr >> 4);
819 }
820 }
821
822 /*
823 * this is just for our link to the host, not devices connected
824 * through tunnel.
825 */
826
827 if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
828 ipath_dev_err(dd, "Couldn't read HT link width "
829 "config register\n");
830 else {
831 u32 width;
832 switch (linkwidth & 7) {
833 case 5:
834 width = 4;
835 break;
836 case 4:
837 width = 2;
838 break;
839 case 3:
840 width = 32;
841 break;
842 case 1:
843 width = 16;
844 break;
845 case 0:
846 default: /* if wrong, assume 8 bit */
847 width = 8;
848 break;
849 }
850
851 dd->ipath_htwidth = width;
852
853 if (linkwidth != 0x11) {
854 ipath_dev_err(dd, "Not configured for 16 bit HT "
855 "(%x)\n", linkwidth);
856 if (!(linkwidth & 0xf)) {
857 ipath_dbg("Will ignore HT lane1 errors\n");
858 dd->ipath_flags |= IPATH_8BIT_IN_HT0;
859 }
860 }
861 }
862
863 /*
864 * this is just for our link to the host, not devices connected
865 * through tunnel.
866 */
867 if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
868 ipath_dev_err(dd, "Couldn't read HT link frequency "
869 "config register\n");
870 else {
871 u32 speed;
872 switch (linkwidth & 0xf) {
873 case 6:
874 speed = 1000;
875 break;
876 case 5:
877 speed = 800;
878 break;
879 case 4:
880 speed = 600;
881 break;
882 case 3:
883 speed = 500;
884 break;
885 case 2:
886 speed = 400;
887 break;
888 case 1:
889 speed = 300;
890 break;
891 default:
892 /*
893 * assume reserved and vendor-specific are 200...
894 */
895 case 0:
896 speed = 200;
897 break;
898 }
899 dd->ipath_htspeed = speed;
900 }
901}
902
903static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
904 int pos)
905{
906 u32 int_handler_addr_lower;
907 u32 int_handler_addr_upper;
908 u64 ihandler;
909 u32 intvec;
910
911 /* use indirection register to get the intr handler */
912 pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x10);
913 pci_read_config_dword(pdev, pos + 4, &int_handler_addr_lower);
914 pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x11);
915 pci_read_config_dword(pdev, pos + 4, &int_handler_addr_upper);
916
917 ihandler = (u64) int_handler_addr_lower |
918 ((u64) int_handler_addr_upper << 32);
919
920 /*
921 * kernels with CONFIG_PCI_MSI set the vector in the irq field of
922 * struct pci_device, so we use that to program the HT-400 internal
923 * interrupt register (not config space) with that value. The BIOS
924 * must still have done the basic MSI setup.
925 */
926 intvec = pdev->irq;
927 /*
928 * clear any vector bits there; normally not set but we'll overload
929 * this for some debug purposes (setting the HTC debug register
930 * value from software, rather than GPIOs), so it might be set on a
931 * driver reload.
932 */
933 ihandler &= ~0xff0000;
934 /* x86 vector goes in intrinfo[23:16] */
935 ihandler |= intvec << 16;
936 ipath_cdbg(VERBOSE, "ihandler lower %x, upper %x, intvec %x, "
937 "interruptconfig %llx\n", int_handler_addr_lower,
938 int_handler_addr_upper, intvec,
939 (unsigned long long) ihandler);
940
941 /* can't program yet, so save for interrupt setup */
942 dd->ipath_intconfig = ihandler;
943 /* keep going, so we find link control stuff also */
944
945 return ihandler != 0;
946}
947
948/**
949 * ipath_setup_ht_config - setup the interruptconfig register
950 * @dd: the infinipath device
951 * @pdev: the PCI device
952 *
953 * setup the interruptconfig register from the HT config info.
954 * Also clear CRC errors in HT linkcontrol, if necessary.
955 * This is done only for the real hardware. It is done before
956 * chip address space is initted, so can't touch infinipath registers
957 */
958static int ipath_setup_ht_config(struct ipath_devdata *dd,
959 struct pci_dev *pdev)
960{
961 int pos, ret = 0;
962 int ihandler = 0;
963
964 /*
965 * Read the capability info to find the interrupt info, and also
966 * handle clearing CRC errors in linkctrl register if necessary. We
967 * do this early, before we ever enable errors or hardware errors,
968 * mostly to avoid causing the chip to enter freeze mode.
969 */
970 pos = pci_find_capability(pdev, HT_CAPABILITY_ID);
971 if (!pos) {
972 ipath_dev_err(dd, "Couldn't find HyperTransport "
973 "capability; no interrupts\n");
974 ret = -ENODEV;
975 goto bail;
976 }
977 do {
978 u8 cap_type;
979
980 /* the HT capability type byte is 3 bytes after the
981 * capability byte.
982 */
983 if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
984 dev_info(&pdev->dev, "Couldn't read config "
985 "command @ %d\n", pos);
986 continue;
987 }
988 if (!(cap_type & 0xE0))
989 slave_or_pri_blk(dd, pdev, pos, cap_type);
990 else if (cap_type == HT_INTR_DISC_CONFIG)
991 ihandler = set_int_handler(dd, pdev, pos);
992 } while ((pos = pci_find_next_capability(pdev, pos,
993 HT_CAPABILITY_ID)));
994
995 if (!ihandler) {
996 ipath_dev_err(dd, "Couldn't find interrupt handler in "
997 "config space\n");
998 ret = -ENODEV;
999 }
1000
1001bail:
1002 return ret;
1003}
1004
1005/**
1006 * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
1007 * @dd: the infinipath device
1008 *
1009 * Called during driver unload.
1010 * This is currently a nop for the HT-400, not for all chips
1011 */
1012static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
1013{
1014}
1015
1016/**
1017 * ipath_setup_ht_setextled - set the state of the two external LEDs
1018 * @dd: the infinipath device
1019 * @lst: the L state
1020 * @ltst: the LT state
1021 *
1022 * Set the state of the two external LEDs, to indicate physical and
1023 * logical state of IB link. For this chip (at least with recommended
1024 * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
1025 * (logical state)
1026 *
1027 * Note: We try to match the Mellanox HCA LED behavior as best
1028 * we can. Green indicates physical link state is OK (something is
1029 * plugged in, and we can train).
1030 * Amber indicates the link is logically up (ACTIVE).
1031 * Mellanox further blinks the amber LED to indicate data packet
1032 * activity, but we have no hardware support for that, so it would
1033 * require waking up every 10-20 msecs and checking the counters
1034 * on the chip, and then turning the LED off if appropriate. That's
1035 * visible overhead, so not something we will do.
1036 *
1037 */
1038static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
1039 u64 lst, u64 ltst)
1040{
1041 u64 extctl;
1042
1043 /* the diags use the LED to indicate diag info, so we leave
1044 * the external LED alone when the diags are running */
1045 if (ipath_diag_inuse)
1046 return;
1047
1048 /*
1049 * start by setting both LED control bits to off, then turn
1050 * on the appropriate bit(s).
1051 */
1052 if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
1053 /*
1054 * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
1055 * is inverted, because it is normally used to indicate
1056 * a hardware fault at reset, if there were errors
1057 */
1058 extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
1059 | INFINIPATH_EXTC_LEDGBLERR_OFF;
1060 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
1061 extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
1062 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
1063 extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
1064 }
1065 else {
1066 extctl = dd->ipath_extctrl &
1067 ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
1068 INFINIPATH_EXTC_LED2PRIPORT_ON);
1069 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
1070 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
1071 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
1072 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
1073 }
1074 dd->ipath_extctrl = extctl;
1075 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
1076}
1077
1078static void ipath_init_ht_variables(void)
1079{
1080 ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
1081 ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
1082 ipath_gpio_sda = IPATH_GPIO_SDA;
1083 ipath_gpio_scl = IPATH_GPIO_SCL;
1084
1085 infinipath_i_bitsextant =
1086 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
1087 (INFINIPATH_I_RCVAVAIL_MASK <<
1088 INFINIPATH_I_RCVAVAIL_SHIFT) |
1089 INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
1090 INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
1091
1092 infinipath_e_bitsextant =
1093 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
1094 INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
1095 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
1096 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
1097 INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
1098 INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
1099 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
1100 INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
1101 INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
1102 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
1103 INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
1104 INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
1105 INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
1106 INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
1107 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
1108 INFINIPATH_E_HARDWARE;
1109
1110 infinipath_hwe_bitsextant =
1111 (INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
1112 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
1113 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1114 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
1115 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1116 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
1117 INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
1118 INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
1119 INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
1120 INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
1121 INFINIPATH_HWE_HTCMISCERR4 |
1122 INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
1123 INFINIPATH_HWE_HTCMISCERR7 |
1124 INFINIPATH_HWE_HTCBUSTREQPARITYERR |
1125 INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
1126 INFINIPATH_HWE_HTCBUSIREQPARITYERR |
1127 INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
1128 INFINIPATH_HWE_MEMBISTFAILED |
1129 INFINIPATH_HWE_COREPLL_FBSLIP |
1130 INFINIPATH_HWE_COREPLL_RFSLIP |
1131 INFINIPATH_HWE_HTBPLL_FBSLIP |
1132 INFINIPATH_HWE_HTBPLL_RFSLIP |
1133 INFINIPATH_HWE_HTAPLL_FBSLIP |
1134 INFINIPATH_HWE_HTAPLL_RFSLIP |
1135 INFINIPATH_HWE_SERDESPLLFAILED |
1136 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
1137 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
1138
1139 infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1140 infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1141}
1142
1143/**
1144 * ipath_ht_init_hwerrors - enable hardware errors
1145 * @dd: the infinipath device
1146 *
1147 * now that we have finished initializing everything that might reasonably
1148 * cause a hardware error, and cleared those errors bits as they occur,
1149 * we can enable hardware errors in the mask (potentially enabling
1150 * freeze mode), and enable hardware errors as errors (along with
1151 * everything else) in errormask
1152 */
1153static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
1154{
1155 ipath_err_t val;
1156 u64 extsval;
1157
1158 extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
1159
1160 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
1161 ipath_dev_err(dd, "MemBIST did not complete!\n");
1162
1163 ipath_check_htlink(dd);
1164
1165 /* barring bugs, all hwerrors become interrupts, which can */
1166 val = -1LL;
1167 /* don't look at crc lane1 if 8 bit */
1168 if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
1169 val &= ~infinipath_hwe_htclnkabyte1crcerr;
1170 /* don't look at crc lane1 if 8 bit */
1171 if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
1172 val &= ~infinipath_hwe_htclnkbbyte1crcerr;
1173
1174 /*
1175 * disable RXDSYNCMEMPARITY because external serdes is unused,
1176 * and therefore the logic will never be used or initialized,
1177 * and uninitialized state will normally result in this error
1178 * being asserted. Similarly for the external serdess pll
1179 * lock signal.
1180 */
1181 val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
1182 INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
1183
1184 /*
1185 * Disable MISCERR4 because of an inversion in the HT core
1186 * logic checking for errors that cause this bit to be set.
1187 * The errata can also cause the protocol error bit to be set
1188 * in the HT config space linkerror register(s).
1189 */
1190 val &= ~INFINIPATH_HWE_HTCMISCERR4;
1191
1192 /*
1193 * PLL ignored because MDIO interface has a logic problem
1194 * for reads, on Comstock and Ponderosa. BRINGUP
1195 */
1196 if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
1197 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
1198 dd->ipath_hwerrmask = val;
1199}
1200
1201/**
1202 * ipath_ht_bringup_serdes - bring up the serdes
1203 * @dd: the infinipath device
1204 */
1205static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
1206{
1207 u64 val, config1;
1208 int ret = 0, change = 0;
1209
1210 ipath_dbg("Trying to bringup serdes\n");
1211
1212 if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
1213 INFINIPATH_HWE_SERDESPLLFAILED)
1214 {
1215 ipath_dbg("At start, serdes PLL failed bit set in "
1216 "hwerrstatus, clearing and continuing\n");
1217 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
1218 INFINIPATH_HWE_SERDESPLLFAILED);
1219 }
1220
1221 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
1222 config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
1223
1224 ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
1225 "config1=%llx, sstatus=%llx xgxs %llx\n",
1226 (unsigned long long) val, (unsigned long long) config1,
1227 (unsigned long long)
1228 ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
1229 (unsigned long long)
1230 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1231
1232 /* force reset on */
1233 val |= INFINIPATH_SERDC0_RESET_PLL
1234 /* | INFINIPATH_SERDC0_RESET_MASK */
1235 ;
1236 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
1237 udelay(15); /* need pll reset set at least for a bit */
1238
1239 if (val & INFINIPATH_SERDC0_RESET_PLL) {
1240 u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
1241 /* set lane resets, and tx idle, during pll reset */
1242 val2 |= INFINIPATH_SERDC0_RESET_MASK |
1243 INFINIPATH_SERDC0_TXIDLE;
1244 ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
1245 "%llx)\n", (unsigned long long) val2);
1246 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
1247 val2);
1248 /*
1249 * be sure chip saw it
1250 */
1251 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1252 /*
1253 * need pll reset clear at least 11 usec before lane
1254 * resets cleared; give it a few more
1255 */
1256 udelay(15);
1257 val = val2; /* for check below */
1258 }
1259
1260 if (val & (INFINIPATH_SERDC0_RESET_PLL |
1261 INFINIPATH_SERDC0_RESET_MASK |
1262 INFINIPATH_SERDC0_TXIDLE)) {
1263 val &= ~(INFINIPATH_SERDC0_RESET_PLL |
1264 INFINIPATH_SERDC0_RESET_MASK |
1265 INFINIPATH_SERDC0_TXIDLE);
1266 /* clear them */
1267 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
1268 val);
1269 }
1270
1271 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1272 if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
1273 INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
1274 val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
1275 INFINIPATH_XGXS_MDIOADDR_SHIFT);
1276 /*
1277 * we use address 3
1278 */
1279 val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
1280 change = 1;
1281 }
1282 if (val & INFINIPATH_XGXS_RESET) {
1283 /* normally true after boot */
1284 val &= ~INFINIPATH_XGXS_RESET;
1285 change = 1;
1286 }
1287 if (change)
1288 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1289
1290 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
1291
1292 /* clear current and de-emphasis bits */
1293 config1 &= ~0x0ffffffff00ULL;
1294 /* set current to 20ma */
1295 config1 |= 0x00000000000ULL;
1296 /* set de-emphasis to -5.68dB */
1297 config1 |= 0x0cccc000000ULL;
1298 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
1299
1300 ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
1301 "config1=%llx, sstatus=%llx xgxs %llx\n",
1302 (unsigned long long) val, (unsigned long long) config1,
1303 (unsigned long long)
1304 ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
1305 (unsigned long long)
1306 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1307
1308 if (!ipath_waitfor_mdio_cmdready(dd)) {
1309 ipath_write_kreg(dd, dd->ipath_kregs->kr_mdio,
1310 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
1311 IPATH_MDIO_CTRL_XGXS_REG_8,
1312 0));
1313 if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
1314 IPATH_MDIO_DATAVALID, &val))
1315 ipath_dbg("Never got MDIO data for XGXS status "
1316 "read\n");
1317 else
1318 ipath_cdbg(VERBOSE, "MDIO Read reg8, "
1319 "'bank' 31 %x\n", (u32) val);
1320 } else
1321 ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
1322
1323 return ret; /* for now, say we always succeeded */
1324}
1325
1326/**
1327 * ipath_ht_quiet_serdes - set serdes to txidle
1328 * @dd: the infinipath device
1329 * driver is being unloaded
1330 */
1331static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
1332{
1333 u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
1334
1335 val |= INFINIPATH_SERDC0_TXIDLE;
1336 ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
1337 (unsigned long long) val);
1338 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
1339}
1340
1341static int ipath_ht_intconfig(struct ipath_devdata *dd)
1342{
1343 int ret;
1344
1345 if (!dd->ipath_intconfig) {
1346 ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
1347 "interrupt address\n");
1348 ret = 1;
1349 goto bail;
1350 }
1351
1352 ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
1353 dd->ipath_intconfig); /* interrupt address */
1354 ret = 0;
1355
1356bail:
1357 return ret;
1358}
1359
1360/**
1361 * ipath_pe_put_tid - write a TID in chip
1362 * @dd: the infinipath device
1363 * @tidptr: pointer to the expected TID (in chip) to udpate
1364 * @tidtype: 0 for eager, 1 for expected
1365 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1366 *
1367 * This exists as a separate routine to allow for special locking etc.
1368 * It's used for both the full cleanup on exit, as well as the normal
1369 * setup and teardown.
1370 */
1371static void ipath_ht_put_tid(struct ipath_devdata *dd,
1372 u64 __iomem *tidptr, u32 type,
1373 unsigned long pa)
1374{
1375 if (pa != dd->ipath_tidinvalid) {
1376 if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
1377 dev_info(&dd->pcidev->dev,
1378 "physaddr %lx has more than "
1379 "40 bits, using only 40!!!\n", pa);
1380 pa &= INFINIPATH_RT_ADDR_MASK;
1381 }
1382 if (type == 0)
1383 pa |= dd->ipath_tidtemplate;
1384 else {
1385 /* in words (fixed, full page). */
1386 u64 lenvalid = PAGE_SIZE >> 2;
1387 lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
1388 pa |= lenvalid | INFINIPATH_RT_VALID;
1389 }
1390 }
1391 if (dd->ipath_kregbase)
1392 writeq(pa, tidptr);
1393}
1394
1395/**
1396 * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
1397 * @dd: the infinipath device
1398 * @port: the port
1399 *
1400 * Used from ipath_close(), and at chip initialization.
1401 */
1402static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
1403{
1404 u64 __iomem *tidbase;
1405 int i;
1406
1407 if (!dd->ipath_kregbase)
1408 return;
1409
1410 ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
1411
1412 /*
1413 * need to invalidate all of the expected TID entries for this
1414 * port, so we don't have valid entries that might somehow get
1415 * used (early in next use of this port, or through some bug)
1416 */
1417 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
1418 dd->ipath_rcvtidbase +
1419 port * dd->ipath_rcvtidcnt *
1420 sizeof(*tidbase));
1421 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1422 ipath_ht_put_tid(dd, &tidbase[i], 1, dd->ipath_tidinvalid);
1423
1424 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
1425 dd->ipath_rcvegrbase +
1426 port * dd->ipath_rcvegrcnt *
1427 sizeof(*tidbase));
1428
1429 for (i = 0; i < dd->ipath_rcvegrcnt; i++)
1430 ipath_ht_put_tid(dd, &tidbase[i], 0, dd->ipath_tidinvalid);
1431}
1432
1433/**
1434 * ipath_ht_tidtemplate - setup constants for TID updates
1435 * @dd: the infinipath device
1436 *
1437 * We setup stuff that we use a lot, to avoid calculating each time
1438 */
1439static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
1440{
1441 dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
1442 dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
1443 dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
1444
1445 /*
1446 * work around chip errata bug 7358, by marking invalid tids
1447 * as having max length
1448 */
1449 dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
1450 INFINIPATH_RT_BUFSIZE_SHIFT;
1451}
1452
1453static int ipath_ht_early_init(struct ipath_devdata *dd)
1454{
1455 u32 __iomem *piobuf;
1456 u32 pioincr, val32, egrsize;
1457 int i;
1458
1459 /*
1460 * one cache line; long IB headers will spill over into received
1461 * buffer
1462 */
1463 dd->ipath_rcvhdrentsize = 16;
1464 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1465
1466 /*
1467 * For HT-400, we allocate a somewhat overly large eager buffer,
1468 * such that we can guarantee that we can receive the largest
1469 * packet that we can send out. To truly support a 4KB MTU,
1470 * we need to bump this to a large value. To date, other than
1471 * testing, we have never encountered an HCA that can really
1472 * send 4KB MTU packets, so we do not handle that (we'll get
1473 * errors interrupts if we ever see one).
1474 */
1475 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
1476 egrsize = dd->ipath_rcvegrbufsize;
1477
1478 /*
1479 * the min() check here is currently a nop, but it may not
1480 * always be, depending on just how we do ipath_rcvegrbufsize
1481 */
1482 dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
1483 dd->ipath_rcvegrbufsize);
1484 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1485 ipath_ht_tidtemplate(dd);
1486
1487 /*
1488 * zero all the TID entries at startup. We do this for sanity,
1489 * in case of a previous driver crash of some kind, and also
1490 * because the chip powers up with these memories in an unknown
1491 * state. Use portcnt, not cfgports, since this is for the
1492 * full chip, not for current (possibly different) configuration
1493 * value.
1494 * Chip Errata bug 6447
1495 */
1496 for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
1497 ipath_ht_clear_tids(dd, val32);
1498
1499 /*
1500 * write the pbc of each buffer, to be sure it's initialized, then
1501 * cancel all the buffers, and also abort any packets that might
1502 * have been in flight for some reason (the latter is for driver
1503 * unload/reload, but isn't a bad idea at first init). PIO send
1504 * isn't enabled at this point, so there is no danger of sending
1505 * these out on the wire.
1506 * Chip Errata bug 6610
1507 */
1508 piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
1509 dd->ipath_piobufbase);
1510 pioincr = dd->ipath_palign / sizeof(*piobuf);
1511 for (i = 0; i < dd->ipath_piobcnt2k; i++) {
1512 /*
1513 * reasonable word count, just to init pbc
1514 */
1515 writel(16, piobuf);
1516 piobuf += pioincr;
1517 }
1518 /*
1519 * self-clearing
1520 */
1521 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1522 INFINIPATH_S_ABORT);
1523 return 0;
1524}
1525
1526/**
1527 * ipath_init_ht_get_base_info - set chip-specific flags for user code
1528 * @dd: the infinipath device
1529 * @kbase: ipath_base_info pointer
1530 *
1531 * We set the PCIE flag because the lower bandwidth on PCIe vs
1532 * HyperTransport can affect some user packet algorithims.
1533 */
1534static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
1535{
1536 struct ipath_base_info *kinfo = kbase;
1537
1538 kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
1539 IPATH_RUNTIME_RCVHDR_COPY;
1540
1541 return 0;
1542}
1543
1544/**
1545 * ipath_init_ht400_funcs - set up the chip-specific function pointers
1546 * @dd: the infinipath device
1547 *
1548 * This is global, and is called directly at init to set up the
1549 * chip-specific function pointers for later use.
1550 */
1551void ipath_init_ht400_funcs(struct ipath_devdata *dd)
1552{
1553 dd->ipath_f_intrsetup = ipath_ht_intconfig;
1554 dd->ipath_f_bus = ipath_setup_ht_config;
1555 dd->ipath_f_reset = ipath_setup_ht_reset;
1556 dd->ipath_f_get_boardname = ipath_ht_boardname;
1557 dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
1558 dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
1559 dd->ipath_f_early_init = ipath_ht_early_init;
1560 dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
1561 dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
1562 dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
1563 dd->ipath_f_clear_tids = ipath_ht_clear_tids;
1564 dd->ipath_f_put_tid = ipath_ht_put_tid;
1565 dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
1566 dd->ipath_f_setextled = ipath_setup_ht_setextled;
1567 dd->ipath_f_get_base_info = ipath_ht_get_base_info;
1568
1569 /*
1570 * initialize chip-specific variables
1571 */
1572 dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
1573
1574 /*
1575 * setup the register offsets, since they are different for each
1576 * chip
1577 */
1578 dd->ipath_kregs = &ipath_ht_kregs;
1579 dd->ipath_cregs = &ipath_ht_cregs;
1580
1581 /*
1582 * do very early init that is needed before ipath_f_bus is
1583 * called
1584 */
1585 ipath_init_ht_variables();
1586}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
new file mode 100644
index 000000000000..2823ff9c0c62
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -0,0 +1,951 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/pci.h>
34#include <linux/netdevice.h>
35#include <linux/vmalloc.h>
36
37#include "ipath_kernel.h"
38#include "ips_common.h"
39
40/*
41 * min buffers we want to have per port, after driver
42 */
43#define IPATH_MIN_USER_PORT_BUFCNT 8
44
45/*
46 * Number of ports we are configured to use (to allow for more pio
47 * buffers per port, etc.) Zero means use chip value.
48 */
49static ushort ipath_cfgports;
50
51module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
52MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
53
54/*
55 * Number of buffers reserved for driver (layered drivers and SMA
56 * send). Reserved at end of buffer list.
57 */
58static ushort ipath_kpiobufs = 32;
59
60static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
61
62module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint,
63 &ipath_kpiobufs, S_IWUSR | S_IRUGO);
64MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
65
66/**
67 * create_port0_egr - allocate the eager TID buffers
68 * @dd: the infinipath device
69 *
70 * This code is now quite different for user and kernel, because
71 * the kernel uses skb's, for the accelerated network performance.
72 * This is the kernel (port0) version.
73 *
74 * Allocate the eager TID buffers and program them into infinipath.
75 * We use the network layer alloc_skb() allocator to allocate the
76 * memory, and either use the buffers as is for things like SMA
77 * packets, or pass the buffers up to the ipath layered driver and
78 * thence the network layer, replacing them as we do so (see
79 * ipath_rcv_layer()).
80 */
81static int create_port0_egr(struct ipath_devdata *dd)
82{
83 unsigned e, egrcnt;
84 struct sk_buff **skbs;
85 int ret;
86
87 egrcnt = dd->ipath_rcvegrcnt;
88
89 skbs = vmalloc(sizeof(*dd->ipath_port0_skbs) * egrcnt);
90 if (skbs == NULL) {
91 ipath_dev_err(dd, "allocation error for eager TID "
92 "skb array\n");
93 ret = -ENOMEM;
94 goto bail;
95 }
96 for (e = 0; e < egrcnt; e++) {
97 /*
98 * This is a bit tricky in that we allocate extra
99 * space for 2 bytes of the 14 byte ethernet header.
100 * These two bytes are passed in the ipath header so
101 * the rest of the data is word aligned. We allocate
102 * 4 bytes so that the data buffer stays word aligned.
103 * See ipath_kreceive() for more details.
104 */
105 skbs[e] = ipath_alloc_skb(dd, GFP_KERNEL);
106 if (!skbs[e]) {
107 ipath_dev_err(dd, "SKB allocation error for "
108 "eager TID %u\n", e);
109 while (e != 0)
110 dev_kfree_skb(skbs[--e]);
111 ret = -ENOMEM;
112 goto bail;
113 }
114 }
115 /*
116 * After loop above, so we can test non-NULL to see if ready
117 * to use at receive, etc.
118 */
119 dd->ipath_port0_skbs = skbs;
120
121 for (e = 0; e < egrcnt; e++) {
122 unsigned long phys =
123 virt_to_phys(dd->ipath_port0_skbs[e]->data);
124 dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
125 ((char __iomem *) dd->ipath_kregbase +
126 dd->ipath_rcvegrbase), 0, phys);
127 }
128
129 ret = 0;
130
131bail:
132 return ret;
133}
134
135static int bringup_link(struct ipath_devdata *dd)
136{
137 u64 val, ibc;
138 int ret = 0;
139
140 /* hold IBC in reset */
141 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
142 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
143 dd->ipath_control);
144
145 /*
146 * Note that prior to try 14 or 15 of IB, the credit scaling
147 * wasn't working, because it was swapped for writes with the
148 * 1 bit default linkstate field
149 */
150
151 /* ignore pbc and align word */
152 val = dd->ipath_piosize2k - 2 * sizeof(u32);
153 /*
154 * for ICRC, which we only send in diag test pkt mode, and we
155 * don't need to worry about that for mtu
156 */
157 val += 1;
158 /*
159 * Set the IBC maxpktlength to the size of our pio buffers the
160 * maxpktlength is in words. This is *not* the IB data MTU.
161 */
162 ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
163 /* in KB */
164 ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
165 /*
166 * How often flowctrl sent. More or less in usecs; balance against
167 * watermark value, so that in theory senders always get a flow
168 * control update in time to not let the IB link go idle.
169 */
170 ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
171 /* max error tolerance */
172 ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
173 /* use "real" buffer space for */
174 ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
175 /* IB credit flow control. */
176 ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
177 /* initially come up waiting for TS1, without sending anything. */
178 dd->ipath_ibcctrl = ibc;
179 /*
180 * Want to start out with both LINKCMD and LINKINITCMD in NOP
181 * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
182 * to stay a NOP
183 */
184 ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
185 INFINIPATH_IBCC_LINKINITCMD_SHIFT;
186 ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
187 (unsigned long long) ibc);
188 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
189
190 // be sure chip saw it
191 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
192
193 ret = dd->ipath_f_bringup_serdes(dd);
194
195 if (ret)
196 dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
197 "not usable\n");
198 else {
199 /* enable IBC */
200 dd->ipath_control |= INFINIPATH_C_LINKENABLE;
201 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
202 dd->ipath_control);
203 }
204
205 return ret;
206}
207
208static int init_chip_first(struct ipath_devdata *dd,
209 struct ipath_portdata **pdp)
210{
211 struct ipath_portdata *pd = NULL;
212 int ret = 0;
213 u64 val;
214
215 /*
216 * skip cfgports stuff because we are not allocating memory,
217 * and we don't want problems if the portcnt changed due to
218 * cfgports. We do still check and report a difference, if
219 * not same (should be impossible).
220 */
221 dd->ipath_portcnt =
222 ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
223 if (!ipath_cfgports)
224 dd->ipath_cfgports = dd->ipath_portcnt;
225 else if (ipath_cfgports <= dd->ipath_portcnt) {
226 dd->ipath_cfgports = ipath_cfgports;
227 ipath_dbg("Configured to use %u ports out of %u in chip\n",
228 dd->ipath_cfgports, dd->ipath_portcnt);
229 } else {
230 dd->ipath_cfgports = dd->ipath_portcnt;
231 ipath_dbg("Tried to configured to use %u ports; chip "
232 "only supports %u\n", ipath_cfgports,
233 dd->ipath_portcnt);
234 }
235 dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports,
236 GFP_KERNEL);
237
238 if (!dd->ipath_pd) {
239 ipath_dev_err(dd, "Unable to allocate portdata array, "
240 "failing\n");
241 ret = -ENOMEM;
242 goto done;
243 }
244
245 dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads)
246 * dd->ipath_cfgports,
247 GFP_KERNEL);
248 dd->ipath_lastrcvhdrqtails =
249 kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails)
250 * dd->ipath_cfgports, GFP_KERNEL);
251
252 if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) {
253 ipath_dev_err(dd, "Unable to allocate head arrays, "
254 "failing\n");
255 ret = -ENOMEM;
256 goto done;
257 }
258
259 dd->ipath_pd[0] = kzalloc(sizeof(*pd), GFP_KERNEL);
260
261 if (!dd->ipath_pd[0]) {
262 ipath_dev_err(dd, "Unable to allocate portdata for port "
263 "0, failing\n");
264 ret = -ENOMEM;
265 goto done;
266 }
267 pd = dd->ipath_pd[0];
268 pd->port_dd = dd;
269 pd->port_port = 0;
270 pd->port_cnt = 1;
271 /* The port 0 pkey table is used by the layer interface. */
272 pd->port_pkeys[0] = IPS_DEFAULT_P_KEY;
273 dd->ipath_rcvtidcnt =
274 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
275 dd->ipath_rcvtidbase =
276 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
277 dd->ipath_rcvegrcnt =
278 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
279 dd->ipath_rcvegrbase =
280 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
281 dd->ipath_palign =
282 ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
283 dd->ipath_piobufbase =
284 ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
285 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
286 dd->ipath_piosize2k = val & ~0U;
287 dd->ipath_piosize4k = val >> 32;
288 dd->ipath_ibmtu = 4096; /* default to largest legal MTU */
289 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
290 dd->ipath_piobcnt2k = val & ~0U;
291 dd->ipath_piobcnt4k = val >> 32;
292 dd->ipath_pio2kbase =
293 (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
294 (dd->ipath_piobufbase & 0xffffffff));
295 if (dd->ipath_piobcnt4k) {
296 dd->ipath_pio4kbase = (u32 __iomem *)
297 (((char __iomem *) dd->ipath_kregbase) +
298 (dd->ipath_piobufbase >> 32));
299 /*
300 * 4K buffers take 2 pages; we use roundup just to be
301 * paranoid; we calculate it once here, rather than on
302 * ever buf allocate
303 */
304 dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
305 dd->ipath_palign);
306 ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
307 "(%x aligned)\n",
308 dd->ipath_piobcnt2k, dd->ipath_piosize2k,
309 dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
310 dd->ipath_piosize4k, dd->ipath_pio4kbase,
311 dd->ipath_4kalign);
312 }
313 else ipath_dbg("%u 2k piobufs @ %p\n",
314 dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
315
316 spin_lock_init(&dd->ipath_tid_lock);
317
318done:
319 *pdp = pd;
320 return ret;
321}
322
323/**
324 * init_chip_reset - re-initialize after a reset, or enable
325 * @dd: the infinipath device
326 * @pdp: output for port data
327 *
328 * sanity check at least some of the values after reset, and
329 * ensure no receive or transmit (explictly, in case reset
330 * failed
331 */
332static int init_chip_reset(struct ipath_devdata *dd,
333 struct ipath_portdata **pdp)
334{
335 struct ipath_portdata *pd;
336 u32 rtmp;
337
338 *pdp = pd = dd->ipath_pd[0];
339 /* ensure chip does no sends or receives while we re-initialize */
340 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
341 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
342 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
343 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
344
345 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
346 if (dd->ipath_portcnt != rtmp)
347 dev_info(&dd->pcidev->dev, "portcnt was %u before "
348 "reset, now %u, using original\n",
349 dd->ipath_portcnt, rtmp);
350 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
351 if (rtmp != dd->ipath_rcvtidcnt)
352 dev_info(&dd->pcidev->dev, "tidcnt was %u before "
353 "reset, now %u, using original\n",
354 dd->ipath_rcvtidcnt, rtmp);
355 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
356 if (rtmp != dd->ipath_rcvtidbase)
357 dev_info(&dd->pcidev->dev, "tidbase was %u before "
358 "reset, now %u, using original\n",
359 dd->ipath_rcvtidbase, rtmp);
360 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
361 if (rtmp != dd->ipath_rcvegrcnt)
362 dev_info(&dd->pcidev->dev, "egrcnt was %u before "
363 "reset, now %u, using original\n",
364 dd->ipath_rcvegrcnt, rtmp);
365 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
366 if (rtmp != dd->ipath_rcvegrbase)
367 dev_info(&dd->pcidev->dev, "egrbase was %u before "
368 "reset, now %u, using original\n",
369 dd->ipath_rcvegrbase, rtmp);
370
371 return 0;
372}
373
374static int init_pioavailregs(struct ipath_devdata *dd)
375{
376 int ret;
377
378 dd->ipath_pioavailregs_dma = dma_alloc_coherent(
379 &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
380 GFP_KERNEL);
381 if (!dd->ipath_pioavailregs_dma) {
382 ipath_dev_err(dd, "failed to allocate PIOavail reg area "
383 "in memory\n");
384 ret = -ENOMEM;
385 goto done;
386 }
387
388 /*
389 * we really want L2 cache aligned, but for current CPUs of
390 * interest, they are the same.
391 */
392 dd->ipath_statusp = (u64 *)
393 ((char *)dd->ipath_pioavailregs_dma +
394 ((2 * L1_CACHE_BYTES +
395 dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
396 /* copy the current value now that it's really allocated */
397 *dd->ipath_statusp = dd->_ipath_status;
398 /*
399 * setup buffer to hold freeze msg, accessible to apps,
400 * following statusp
401 */
402 dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
403 /* and its length */
404 dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
405
406 if (dd->ipath_unit * 64 > (IPATH_PORT0_RCVHDRTAIL_SIZE - 64)) {
407 ipath_dev_err(dd, "unit %u too large for port 0 "
408 "rcvhdrtail buffer size\n", dd->ipath_unit);
409 ret = -ENODEV;
410 }
411 else
412 ret = 0;
413
414 /* so we can get current tail in ipath_kreceive(), per chip */
415 dd->ipath_hdrqtailptr = &ipath_port0_rcvhdrtail[
416 dd->ipath_unit * (64 / sizeof(*ipath_port0_rcvhdrtail))];
417done:
418 return ret;
419}
420
421/**
422 * init_shadow_tids - allocate the shadow TID array
423 * @dd: the infinipath device
424 *
425 * allocate the shadow TID array, so we can ipath_munlock previous
426 * entries. It may make more sense to move the pageshadow to the
427 * port data structure, so we only allocate memory for ports actually
428 * in use, since we at 8k per port, now.
429 */
430static void init_shadow_tids(struct ipath_devdata *dd)
431{
432 dd->ipath_pageshadow = (struct page **)
433 vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
434 sizeof(struct page *));
435 if (!dd->ipath_pageshadow)
436 ipath_dev_err(dd, "failed to allocate shadow page * "
437 "array, no expected sends!\n");
438 else
439 memset(dd->ipath_pageshadow, 0,
440 dd->ipath_cfgports * dd->ipath_rcvtidcnt *
441 sizeof(struct page *));
442}
443
444static void enable_chip(struct ipath_devdata *dd,
445 struct ipath_portdata *pd, int reinit)
446{
447 u32 val;
448 int i;
449
450 if (!reinit) {
451 init_waitqueue_head(&ipath_sma_state_wait);
452 }
453 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
454 dd->ipath_rcvctrl);
455
456 /* Enable PIO send, and update of PIOavail regs to memory. */
457 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
458 INFINIPATH_S_PIOBUFAVAILUPD;
459 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
460 dd->ipath_sendctrl);
461
462 /*
463 * enable port 0 receive, and receive interrupt. other ports
464 * done as user opens and inits them.
465 */
466 dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD |
467 (1ULL << INFINIPATH_R_PORTENABLE_SHIFT) |
468 (1ULL << INFINIPATH_R_INTRAVAIL_SHIFT);
469 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
470 dd->ipath_rcvctrl);
471
472 /*
473 * now ready for use. this should be cleared whenever we
474 * detect a reset, or initiate one.
475 */
476 dd->ipath_flags |= IPATH_INITTED;
477
478 /*
479 * init our shadow copies of head from tail values, and write
480 * head values to match.
481 */
482 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
483 (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
484 dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
485
486 /* Initialize so we interrupt on next packet received */
487 (void)ipath_write_ureg(dd, ur_rcvhdrhead,
488 dd->ipath_rhdrhead_intr_off |
489 dd->ipath_port0head, 0);
490
491 /*
492 * by now pioavail updates to memory should have occurred, so
493 * copy them into our working/shadow registers; this is in
494 * case something went wrong with abort, but mostly to get the
495 * initial values of the generation bit correct.
496 */
497 for (i = 0; i < dd->ipath_pioavregs; i++) {
498 __le64 val;
499
500 /*
501 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
502 */
503 if (i > 3) {
504 if (i & 1)
505 val = dd->ipath_pioavailregs_dma[i - 1];
506 else
507 val = dd->ipath_pioavailregs_dma[i + 1];
508 }
509 else
510 val = dd->ipath_pioavailregs_dma[i];
511 dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
512 }
513 /* can get counters, stats, etc. */
514 dd->ipath_flags |= IPATH_PRESENT;
515}
516
517static int init_housekeeping(struct ipath_devdata *dd,
518 struct ipath_portdata **pdp, int reinit)
519{
520 char boardn[32];
521 int ret = 0;
522
523 /*
524 * have to clear shadow copies of registers at init that are
525 * not otherwise set here, or all kinds of bizarre things
526 * happen with driver on chip reset
527 */
528 dd->ipath_rcvhdrsize = 0;
529
530 /*
531 * Don't clear ipath_flags as 8bit mode was set before
532 * entering this func. However, we do set the linkstate to
533 * unknown, so we can watch for a transition.
534 */
535 dd->ipath_flags |= IPATH_LINKUNK;
536 dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
537 IPATH_LINKDOWN | IPATH_LINKINIT);
538
539 ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
540 dd->ipath_revision =
541 ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
542
543 /*
544 * set up fundamental info we need to use the chip; we assume
545 * if the revision reg and these regs are OK, we don't need to
546 * special case the rest
547 */
548 dd->ipath_sregbase =
549 ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
550 dd->ipath_cregbase =
551 ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
552 dd->ipath_uregbase =
553 ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
554 ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
555 "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
556 dd->ipath_uregbase, dd->ipath_cregbase);
557 if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
558 || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
559 || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
560 || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
561 ipath_dev_err(dd, "Register read failures from chip, "
562 "giving up initialization\n");
563 ret = -ENODEV;
564 goto done;
565 }
566
567 /* clear the initial reset flag, in case first driver load */
568 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
569 INFINIPATH_E_RESET);
570
571 if (reinit)
572 ret = init_chip_reset(dd, pdp);
573 else
574 ret = init_chip_first(dd, pdp);
575
576 if (ret)
577 goto done;
578
579 ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
580 "%u egrtids\n", (unsigned long long) dd->ipath_revision,
581 dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
582 dd->ipath_rcvegrcnt);
583
584 if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
585 INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
586 ipath_dev_err(dd, "Driver only handles version %d, "
587 "chip swversion is %d (%llx), failng\n",
588 IPATH_CHIP_SWVERSION,
589 (int)(dd->ipath_revision >>
590 INFINIPATH_R_SOFTWARE_SHIFT) &
591 INFINIPATH_R_SOFTWARE_MASK,
592 (unsigned long long) dd->ipath_revision);
593 ret = -ENOSYS;
594 goto done;
595 }
596 dd->ipath_majrev = (u8) ((dd->ipath_revision >>
597 INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
598 INFINIPATH_R_CHIPREVMAJOR_MASK);
599 dd->ipath_minrev = (u8) ((dd->ipath_revision >>
600 INFINIPATH_R_CHIPREVMINOR_SHIFT) &
601 INFINIPATH_R_CHIPREVMINOR_MASK);
602 dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
603 INFINIPATH_R_BOARDID_SHIFT) &
604 INFINIPATH_R_BOARDID_MASK);
605
606 ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
607
608 snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
609 "Driver %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
610 "SW Compat %u\n",
611 IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
612 (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
613 INFINIPATH_R_ARCH_MASK,
614 dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
615 (unsigned)(dd->ipath_revision >>
616 INFINIPATH_R_SOFTWARE_SHIFT) &
617 INFINIPATH_R_SOFTWARE_MASK);
618
619 ipath_dbg("%s", dd->ipath_boardversion);
620
621done:
622 return ret;
623}
624
625
626/**
627 * ipath_init_chip - do the actual initialization sequence on the chip
628 * @dd: the infinipath device
629 * @reinit: reinitializing, so don't allocate new memory
630 *
631 * Do the actual initialization sequence on the chip. This is done
632 * both from the init routine called from the PCI infrastructure, and
633 * when we reset the chip, or detect that it was reset internally,
634 * or it's administratively re-enabled.
635 *
636 * Memory allocation here and in called routines is only done in
637 * the first case (reinit == 0). We have to be careful, because even
638 * without memory allocation, we need to re-write all the chip registers
639 * TIDs, etc. after the reset or enable has completed.
640 */
641int ipath_init_chip(struct ipath_devdata *dd, int reinit)
642{
643 int ret = 0, i;
644 u32 val32, kpiobufs;
645 u64 val, atmp;
646 struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
647
648 ret = init_housekeeping(dd, &pd, reinit);
649 if (ret)
650 goto done;
651
652 /*
653 * we ignore most issues after reporting them, but have to specially
654 * handle hardware-disabled chips.
655 */
656 if (ret == 2) {
657 /* unique error, known to ipath_init_one */
658 ret = -EPERM;
659 goto done;
660 }
661
662 /*
663 * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
664 * but then it no longer nicely fits power of two, and since
665 * we now use routines that backend onto __get_free_pages, the
666 * rest would be wasted.
667 */
668 dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt;
669 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
670 dd->ipath_rcvhdrcnt);
671
672 /*
673 * Set up the shadow copies of the piobufavail registers,
674 * which we compare against the chip registers for now, and
675 * the in memory DMA'ed copies of the registers. This has to
676 * be done early, before we calculate lastport, etc.
677 */
678 val = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
679 /*
680 * calc number of pioavail registers, and save it; we have 2
681 * bits per buffer.
682 */
683 dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
684 / (sizeof(u64) * BITS_PER_BYTE / 2);
685 if (!ipath_kpiobufs) /* have to have at least 1, for SMA */
686 kpiobufs = ipath_kpiobufs = 1;
687 else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) <
688 (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) {
689 dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) "
690 "for %u ports to have %u each!\n",
691 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
692 dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT);
693 kpiobufs = 1; /* reserve just the minimum for SMA/ether */
694 } else
695 kpiobufs = ipath_kpiobufs;
696
697 if (kpiobufs >
698 (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
699 (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) {
700 i = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
701 (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT);
702 if (i < 0)
703 i = 0;
704 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs for "
705 "kernel leaves too few for %d user ports "
706 "(%d each); using %u\n", kpiobufs,
707 dd->ipath_cfgports - 1,
708 IPATH_MIN_USER_PORT_BUFCNT, i);
709 /*
710 * shouldn't change ipath_kpiobufs, because could be
711 * different for different devices...
712 */
713 kpiobufs = i;
714 }
715 dd->ipath_lastport_piobuf =
716 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - kpiobufs;
717 dd->ipath_pbufsport = dd->ipath_cfgports > 1
718 ? dd->ipath_lastport_piobuf / (dd->ipath_cfgports - 1)
719 : 0;
720 val32 = dd->ipath_lastport_piobuf -
721 (dd->ipath_pbufsport * (dd->ipath_cfgports - 1));
722 if (val32 > 0) {
723 ipath_dbg("allocating %u pbufs/port leaves %u unused, "
724 "add to kernel\n", dd->ipath_pbufsport, val32);
725 dd->ipath_lastport_piobuf -= val32;
726 ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
727 dd->ipath_pbufsport, val32);
728 }
729 dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
730 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
731 "each for %u user ports\n", kpiobufs,
732 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
733 dd->ipath_pbufsport, dd->ipath_cfgports - 1);
734
735 dd->ipath_f_early_init(dd);
736
737 /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
738 * done after early_init */
739 dd->ipath_hdrqlast =
740 dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
741 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
742 dd->ipath_rcvhdrentsize);
743 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
744 dd->ipath_rcvhdrsize);
745
746 if (!reinit) {
747 ret = init_pioavailregs(dd);
748 init_shadow_tids(dd);
749 if (ret)
750 goto done;
751 }
752
753 (void)ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
754 dd->ipath_pioavailregs_phys);
755 /*
756 * this is to detect s/w errors, which the h/w works around by
757 * ignoring the low 6 bits of address, if it wasn't aligned.
758 */
759 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
760 if (val != dd->ipath_pioavailregs_phys) {
761 ipath_dev_err(dd, "Catastrophic software error, "
762 "SendPIOAvailAddr written as %lx, "
763 "read back as %llx\n",
764 (unsigned long) dd->ipath_pioavailregs_phys,
765 (unsigned long long) val);
766 ret = -EINVAL;
767 goto done;
768 }
769
770 val = ipath_port0_rcvhdrtail_dma + dd->ipath_unit * 64;
771
772 /* verify that the alignment requirement was met */
773 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
774 0, val);
775 atmp = ipath_read_kreg64_port(
776 dd, dd->ipath_kregs->kr_rcvhdrtailaddr, 0);
777 if (val != atmp) {
778 ipath_dev_err(dd, "Catastrophic software error, "
779 "RcvHdrTailAddr0 written as %llx, "
780 "read back as %llx from %x\n",
781 (unsigned long long) val,
782 (unsigned long long) atmp,
783 dd->ipath_kregs->kr_rcvhdrtailaddr);
784 ret = -EINVAL;
785 goto done;
786 }
787
788 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
789
790 /*
791 * make sure we are not in freeze, and PIO send enabled, so
792 * writes to pbc happen
793 */
794 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
795 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
796 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
797 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
798 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
799 INFINIPATH_S_PIOENABLE);
800
801 /*
802 * before error clears, since we expect serdes pll errors during
803 * this, the first time after reset
804 */
805 if (bringup_link(dd)) {
806 dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
807 ret = -ENETDOWN;
808 goto done;
809 }
810
811 /*
812 * clear any "expected" hwerrs from reset and/or initialization
813 * clear any that aren't enabled (at least this once), and then
814 * set the enable mask
815 */
816 dd->ipath_f_init_hwerrors(dd);
817 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
818 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
819 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
820 dd->ipath_hwerrmask);
821
822 dd->ipath_maskederrs = dd->ipath_ignorederrs;
823 /* clear all */
824 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
825 /* enable errors that are masked, at least this first time. */
826 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
827 ~dd->ipath_maskederrs);
828 /* clear any interrups up to this point (ints still not enabled) */
829 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
830
831 ipath_stats.sps_lid[dd->ipath_unit] = dd->ipath_lid;
832
833 /*
834 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
835 * re-init, the simplest way to handle this is to free
836 * existing, and re-allocate.
837 */
838 if (reinit)
839 ipath_free_pddata(dd, 0, 0);
840 dd->ipath_f_tidtemplate(dd);
841 ret = ipath_create_rcvhdrq(dd, pd);
842 if (!ret)
843 ret = create_port0_egr(dd);
844 if (ret)
845 ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
846 "rcvhdrq and/or egr bufs\n");
847 else
848 enable_chip(dd, pd, reinit);
849
850 /*
851 * cause retrigger of pending interrupts ignored during init,
852 * even if we had errors
853 */
854 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
855
856 if(!dd->ipath_stats_timer_active) {
857 /*
858 * first init, or after an admin disable/enable
859 * set up stats retrieval timer, even if we had errors
860 * in last portion of setup
861 */
862 init_timer(&dd->ipath_stats_timer);
863 dd->ipath_stats_timer.function = ipath_get_faststats;
864 dd->ipath_stats_timer.data = (unsigned long) dd;
865 /* every 5 seconds; */
866 dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
867 /* takes ~16 seconds to overflow at full IB 4x bandwdith */
868 add_timer(&dd->ipath_stats_timer);
869 dd->ipath_stats_timer_active = 1;
870 }
871
872done:
873 if (!ret) {
874 ipath_get_guid(dd);
875 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
876 if (!dd->ipath_f_intrsetup(dd)) {
877 /* now we can enable all interrupts from the chip */
878 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
879 -1LL);
880 /* force re-interrupt of any pending interrupts. */
881 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
882 0ULL);
883 /* chip is usable; mark it as initialized */
884 *dd->ipath_statusp |= IPATH_STATUS_INITTED;
885 } else
886 ipath_dev_err(dd, "No interrupts enabled, couldn't "
887 "setup interrupt address\n");
888
889 if (dd->ipath_cfgports > ipath_stats.sps_nports)
890 /*
891 * sps_nports is a global, so, we set it to
892 * the highest number of ports of any of the
893 * chips we find; we never decrement it, at
894 * least for now. Since this might have changed
895 * over disable/enable or prior to reset, always
896 * do the check and potentially adjust.
897 */
898 ipath_stats.sps_nports = dd->ipath_cfgports;
899 } else
900 ipath_dbg("Failed (%d) to initialize chip\n", ret);
901
902 /* if ret is non-zero, we probably should do some cleanup
903 here... */
904 return ret;
905}
906
907static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
908{
909 struct ipath_devdata *dd;
910 unsigned long flags;
911 unsigned short val;
912 int ret;
913
914 ret = ipath_parse_ushort(str, &val);
915
916 spin_lock_irqsave(&ipath_devs_lock, flags);
917
918 if (ret < 0)
919 goto bail;
920
921 if (val == 0) {
922 ret = -EINVAL;
923 goto bail;
924 }
925
926 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
927 if (dd->ipath_kregbase)
928 continue;
929 if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
930 (dd->ipath_cfgports *
931 IPATH_MIN_USER_PORT_BUFCNT)))
932 {
933 ipath_dev_err(
934 dd,
935 "Allocating %d PIO bufs for kernel leaves "
936 "too few for %d user ports (%d each)\n",
937 val, dd->ipath_cfgports - 1,
938 IPATH_MIN_USER_PORT_BUFCNT);
939 ret = -EINVAL;
940 goto bail;
941 }
942 dd->ipath_lastport_piobuf =
943 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
944 }
945
946 ret = 0;
947bail:
948 spin_unlock_irqrestore(&ipath_devs_lock, flags);
949
950 return ret;
951}
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
new file mode 100644
index 000000000000..60f5f4108069
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -0,0 +1,841 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/pci.h>
34
35#include "ipath_kernel.h"
36#include "ips_common.h"
37#include "ipath_layer.h"
38
39#define E_SUM_PKTERRS \
40 (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
41 INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
42 INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
43 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
44 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
45 INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
46
47#define E_SUM_ERRS \
48 (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
49 INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
50 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
51 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
52 INFINIPATH_E_INVALIDADDR)
53
54static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
55{
56 unsigned long sbuf[4];
57 u64 ignore_this_time = 0;
58 u32 piobcnt;
59
60 /* if possible that sendbuffererror could be valid */
61 piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
62 /* read these before writing errorclear */
63 sbuf[0] = ipath_read_kreg64(
64 dd, dd->ipath_kregs->kr_sendbuffererror);
65 sbuf[1] = ipath_read_kreg64(
66 dd, dd->ipath_kregs->kr_sendbuffererror + 1);
67 if (piobcnt > 128) {
68 sbuf[2] = ipath_read_kreg64(
69 dd, dd->ipath_kregs->kr_sendbuffererror + 2);
70 sbuf[3] = ipath_read_kreg64(
71 dd, dd->ipath_kregs->kr_sendbuffererror + 3);
72 }
73
74 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
75 int i;
76
77 ipath_cdbg(PKT, "SendbufErrs %lx %lx ", sbuf[0], sbuf[1]);
78 if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
79 printk("%lx %lx ", sbuf[2], sbuf[3]);
80 for (i = 0; i < piobcnt; i++) {
81 if (test_bit(i, sbuf)) {
82 u32 __iomem *piobuf;
83 if (i < dd->ipath_piobcnt2k)
84 piobuf = (u32 __iomem *)
85 (dd->ipath_pio2kbase +
86 i * dd->ipath_palign);
87 else
88 piobuf = (u32 __iomem *)
89 (dd->ipath_pio4kbase +
90 (i - dd->ipath_piobcnt2k) *
91 dd->ipath_4kalign);
92
93 ipath_cdbg(PKT,
94 "PIObuf[%u] @%p pbc is %x; ",
95 i, piobuf, readl(piobuf));
96
97 ipath_disarm_piobufs(dd, i, 1);
98 }
99 }
100 if (ipath_debug & __IPATH_PKTDBG)
101 printk("\n");
102 }
103 if ((errs & (INFINIPATH_E_SDROPPEDDATAPKT |
104 INFINIPATH_E_SDROPPEDSMPPKT |
105 INFINIPATH_E_SMINPKTLEN)) &&
106 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
107 /*
108 * This can happen when SMA is trying to bring the link
109 * up, but the IB link changes state at the "wrong" time.
110 * The IB logic then complains that the packet isn't
111 * valid. We don't want to confuse people, so we just
112 * don't print them, except at debug
113 */
114 ipath_dbg("Ignoring pktsend errors %llx, because not "
115 "yet active\n", (unsigned long long) errs);
116 ignore_this_time = INFINIPATH_E_SDROPPEDDATAPKT |
117 INFINIPATH_E_SDROPPEDSMPPKT |
118 INFINIPATH_E_SMINPKTLEN;
119 }
120
121 return ignore_this_time;
122}
123
124/* return the strings for the most common link states */
125static char *ib_linkstate(u32 linkstate)
126{
127 char *ret;
128
129 switch (linkstate) {
130 case IPATH_IBSTATE_INIT:
131 ret = "Init";
132 break;
133 case IPATH_IBSTATE_ARM:
134 ret = "Arm";
135 break;
136 case IPATH_IBSTATE_ACTIVE:
137 ret = "Active";
138 break;
139 default:
140 ret = "Down";
141 }
142
143 return ret;
144}
145
146static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
147 ipath_err_t errs, int noprint)
148{
149 u64 val;
150 u32 ltstate, lstate;
151
152 /*
153 * even if diags are enabled, we want to notice LINKINIT, etc.
154 * We just don't want to change the LED state, or
155 * dd->ipath_kregs->kr_ibcctrl
156 */
157 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
158 lstate = val & IPATH_IBSTATE_MASK;
159 if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
160 lstate == IPATH_IBSTATE_ACTIVE) {
161 /*
162 * only print at SMA if there is a change, debug if not
163 * (sometimes we want to know that, usually not).
164 */
165 if (lstate == ((unsigned) dd->ipath_lastibcstat
166 & IPATH_IBSTATE_MASK)) {
167 ipath_dbg("Status change intr but no change (%s)\n",
168 ib_linkstate(lstate));
169 }
170 else
171 ipath_cdbg(SMA, "Unit %u link state %s, last "
172 "was %s\n", dd->ipath_unit,
173 ib_linkstate(lstate),
174 ib_linkstate((unsigned)
175 dd->ipath_lastibcstat
176 & IPATH_IBSTATE_MASK));
177 }
178 else {
179 lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
180 if (lstate == IPATH_IBSTATE_INIT ||
181 lstate == IPATH_IBSTATE_ARM ||
182 lstate == IPATH_IBSTATE_ACTIVE)
183 ipath_cdbg(SMA, "Unit %u link state down"
184 " (state 0x%x), from %s\n",
185 dd->ipath_unit,
186 (u32)val & IPATH_IBSTATE_MASK,
187 ib_linkstate(lstate));
188 else
189 ipath_cdbg(VERBOSE, "Unit %u link state changed "
190 "to 0x%x from down (%x)\n",
191 dd->ipath_unit, (u32) val, lstate);
192 }
193 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
194 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
195 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
196 INFINIPATH_IBCS_LINKSTATE_MASK;
197
198 if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
199 ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
200 u32 last_ltstate;
201
202 /*
203 * Ignore cycling back and forth from Polling.Active
204 * to Polling.Quiet while waiting for the other end of
205 * the link to come up. We will cycle back and forth
206 * between them if no cable is plugged in,
207 * the other device is powered off or disabled, etc.
208 */
209 last_ltstate = (dd->ipath_lastibcstat >>
210 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT)
211 & INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
212 if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE
213 || last_ltstate ==
214 INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
215 if (dd->ipath_ibpollcnt > 40) {
216 dd->ipath_flags |= IPATH_NOCABLE;
217 *dd->ipath_statusp |=
218 IPATH_STATUS_IB_NOCABLE;
219 } else
220 dd->ipath_ibpollcnt++;
221 goto skip_ibchange;
222 }
223 }
224 dd->ipath_ibpollcnt = 0; /* some state other than 2 or 3 */
225 ipath_stats.sps_iblink++;
226 if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
227 dd->ipath_flags |= IPATH_LINKDOWN;
228 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
229 | IPATH_LINKACTIVE |
230 IPATH_LINKARMED);
231 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
232 if (!noprint) {
233 if (((dd->ipath_lastibcstat >>
234 INFINIPATH_IBCS_LINKSTATE_SHIFT) &
235 INFINIPATH_IBCS_LINKSTATE_MASK)
236 == INFINIPATH_IBCS_L_STATE_ACTIVE)
237 /* if from up to down be more vocal */
238 ipath_cdbg(SMA,
239 "Unit %u link now down (%s)\n",
240 dd->ipath_unit,
241 ipath_ibcstatus_str[ltstate]);
242 else
243 ipath_cdbg(VERBOSE, "Unit %u link is "
244 "down (%s)\n", dd->ipath_unit,
245 ipath_ibcstatus_str[ltstate]);
246 }
247
248 dd->ipath_f_setextled(dd, lstate, ltstate);
249 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) {
250 dd->ipath_flags |= IPATH_LINKACTIVE;
251 dd->ipath_flags &=
252 ~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN |
253 IPATH_LINKARMED | IPATH_NOCABLE);
254 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
255 *dd->ipath_statusp |=
256 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
257 dd->ipath_f_setextled(dd, lstate, ltstate);
258
259 __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP);
260 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
261 /*
262 * set INIT and DOWN. Down is checked by most of the other
263 * code, but INIT is useful to know in a few places.
264 */
265 dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;
266 dd->ipath_flags &=
267 ~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED
268 | IPATH_NOCABLE);
269 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
270 | IPATH_STATUS_IB_READY);
271 dd->ipath_f_setextled(dd, lstate, ltstate);
272 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {
273 dd->ipath_flags |= IPATH_LINKARMED;
274 dd->ipath_flags &=
275 ~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |
276 IPATH_LINKACTIVE | IPATH_NOCABLE);
277 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
278 | IPATH_STATUS_IB_READY);
279 dd->ipath_f_setextled(dd, lstate, ltstate);
280 } else {
281 if (!noprint)
282 ipath_dbg("IBstatuschange unit %u: %s (%x)\n",
283 dd->ipath_unit,
284 ipath_ibcstatus_str[ltstate], ltstate);
285 }
286skip_ibchange:
287 dd->ipath_lastibcstat = val;
288}
289
290static void handle_supp_msgs(struct ipath_devdata *dd,
291 unsigned supp_msgs, char msg[512])
292{
293 /*
294 * Print the message unless it's ibc status change only, which
295 * happens so often we never want to count it.
296 */
297 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
298 ipath_decode_err(msg, sizeof msg, dd->ipath_lasterror &
299 ~INFINIPATH_E_IBSTATUSCHANGED);
300 if (dd->ipath_lasterror &
301 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
302 ipath_dev_err(dd, "Suppressed %u messages for "
303 "fast-repeating errors (%s) (%llx)\n",
304 supp_msgs, msg,
305 (unsigned long long)
306 dd->ipath_lasterror);
307 else {
308 /*
309 * rcvegrfull and rcvhdrqfull are "normal", for some
310 * types of processes (mostly benchmarks) that send
311 * huge numbers of messages, while not processing
312 * them. So only complain about these at debug
313 * level.
314 */
315 ipath_dbg("Suppressed %u messages for %s\n",
316 supp_msgs, msg);
317 }
318 }
319}
320
321static unsigned handle_frequent_errors(struct ipath_devdata *dd,
322 ipath_err_t errs, char msg[512],
323 int *noprint)
324{
325 unsigned long nc;
326 static unsigned long nextmsg_time;
327 static unsigned nmsgs, supp_msgs;
328
329 /*
330 * Throttle back "fast" messages to no more than 10 per 5 seconds.
331 * This isn't perfect, but it's a reasonable heuristic. If we get
332 * more than 10, give a 6x longer delay.
333 */
334 nc = jiffies;
335 if (nmsgs > 10) {
336 if (time_before(nc, nextmsg_time)) {
337 *noprint = 1;
338 if (!supp_msgs++)
339 nextmsg_time = nc + HZ * 3;
340 }
341 else if (supp_msgs) {
342 handle_supp_msgs(dd, supp_msgs, msg);
343 supp_msgs = 0;
344 nmsgs = 0;
345 }
346 }
347 else if (!nmsgs++ || time_after(nc, nextmsg_time))
348 nextmsg_time = nc + HZ / 2;
349
350 return supp_msgs;
351}
352
353static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
354{
355 char msg[512];
356 u64 ignore_this_time = 0;
357 int i;
358 int chkerrpkts = 0, noprint = 0;
359 unsigned supp_msgs;
360
361 supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
362
363 /*
364 * don't report errors that are masked (includes those always
365 * ignored)
366 */
367 errs &= ~dd->ipath_maskederrs;
368
369 /* do these first, they are most important */
370 if (errs & INFINIPATH_E_HARDWARE) {
371 /* reuse same msg buf */
372 dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
373 }
374
375 if (!noprint && (errs & ~infinipath_e_bitsextant))
376 ipath_dev_err(dd, "error interrupt with unknown errors "
377 "%llx set\n", (unsigned long long)
378 (errs & ~infinipath_e_bitsextant));
379
380 if (errs & E_SUM_ERRS)
381 ignore_this_time = handle_e_sum_errs(dd, errs);
382
383 if (supp_msgs == 250000) {
384 /*
385 * It's not entirely reasonable assuming that the errors set
386 * in the last clear period are all responsible for the
387 * problem, but the alternative is to assume it's the only
388 * ones on this particular interrupt, which also isn't great
389 */
390 dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
391 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
392 ~dd->ipath_maskederrs);
393 ipath_decode_err(msg, sizeof msg,
394 (dd->ipath_maskederrs & ~dd->
395 ipath_ignorederrs));
396
397 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
398 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
399 ipath_dev_err(dd, "Disabling error(s) %llx because "
400 "occuring too frequently (%s)\n",
401 (unsigned long long)
402 (dd->ipath_maskederrs &
403 ~dd->ipath_ignorederrs), msg);
404 else {
405 /*
406 * rcvegrfull and rcvhdrqfull are "normal",
407 * for some types of processes (mostly benchmarks)
408 * that send huge numbers of messages, while not
409 * processing them. So only complain about
410 * these at debug level.
411 */
412 ipath_dbg("Disabling frequent queue full errors "
413 "(%s)\n", msg);
414 }
415
416 /*
417 * Re-enable the masked errors after around 3 minutes. in
418 * ipath_get_faststats(). If we have a series of fast
419 * repeating but different errors, the interval will keep
420 * stretching out, but that's OK, as that's pretty
421 * catastrophic.
422 */
423 dd->ipath_unmasktime = jiffies + HZ * 180;
424 }
425
426 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
427 if (ignore_this_time)
428 errs &= ~ignore_this_time;
429 if (errs & ~dd->ipath_lasterror) {
430 errs &= ~dd->ipath_lasterror;
431 /* never suppress duplicate hwerrors or ibstatuschange */
432 dd->ipath_lasterror |= errs &
433 ~(INFINIPATH_E_HARDWARE |
434 INFINIPATH_E_IBSTATUSCHANGED);
435 }
436 if (!errs)
437 return;
438
439 if (!noprint)
440 /*
441 * the ones we mask off are handled specially below or above
442 */
443 ipath_decode_err(msg, sizeof msg,
444 errs & ~(INFINIPATH_E_IBSTATUSCHANGED |
445 INFINIPATH_E_RRCVEGRFULL |
446 INFINIPATH_E_RRCVHDRFULL |
447 INFINIPATH_E_HARDWARE));
448 else
449 /* so we don't need if (!noprint) at strlcat's below */
450 *msg = 0;
451
452 if (errs & E_SUM_PKTERRS) {
453 ipath_stats.sps_pkterrs++;
454 chkerrpkts = 1;
455 }
456 if (errs & E_SUM_ERRS)
457 ipath_stats.sps_errs++;
458
459 if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
460 ipath_stats.sps_crcerrs++;
461 chkerrpkts = 1;
462 }
463
464 /*
465 * We don't want to print these two as they happen, or we can make
466 * the situation even worse, because it takes so long to print
467 * messages to serial consoles. Kernel ports get printed from
468 * fast_stats, no more than every 5 seconds, user ports get printed
469 * on close
470 */
471 if (errs & INFINIPATH_E_RRCVHDRFULL) {
472 int any;
473 u32 hd, tl;
474 ipath_stats.sps_hdrqfull++;
475 for (any = i = 0; i < dd->ipath_cfgports; i++) {
476 struct ipath_portdata *pd = dd->ipath_pd[i];
477 if (i == 0) {
478 hd = dd->ipath_port0head;
479 tl = (u32) le64_to_cpu(
480 *dd->ipath_hdrqtailptr);
481 } else if (pd && pd->port_cnt &&
482 pd->port_rcvhdrtail_kvaddr) {
483 /*
484 * don't report same point multiple times,
485 * except kernel
486 */
487 tl = (u32) * pd->port_rcvhdrtail_kvaddr;
488 if (tl == dd->ipath_lastrcvhdrqtails[i])
489 continue;
490 hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
491 i);
492 } else
493 continue;
494 if (hd == (tl + 1) ||
495 (!hd && tl == dd->ipath_hdrqlast)) {
496 dd->ipath_lastrcvhdrqtails[i] = tl;
497 pd->port_hdrqfull++;
498 if (i == 0)
499 chkerrpkts = 1;
500 }
501 }
502 }
503 if (errs & INFINIPATH_E_RRCVEGRFULL) {
504 /*
505 * since this is of less importance and not likely to
506 * happen without also getting hdrfull, only count
507 * occurrences; don't check each port (or even the kernel
508 * vs user)
509 */
510 ipath_stats.sps_etidfull++;
511 if (dd->ipath_port0head !=
512 (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
513 chkerrpkts = 1;
514 }
515
516 /*
517 * do this before IBSTATUSCHANGED, in case both bits set in a single
518 * interrupt; we want the STATUSCHANGE to "win", so we do our
519 * internal copy of state machine correctly
520 */
521 if (errs & INFINIPATH_E_RIBLOSTLINK) {
522 /*
523 * force through block below
524 */
525 errs |= INFINIPATH_E_IBSTATUSCHANGED;
526 ipath_stats.sps_iblink++;
527 dd->ipath_flags |= IPATH_LINKDOWN;
528 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
529 | IPATH_LINKARMED | IPATH_LINKACTIVE);
530 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
531 if (!noprint) {
532 u64 st = ipath_read_kreg64(
533 dd, dd->ipath_kregs->kr_ibcstatus);
534
535 ipath_dbg("Lost link, link now down (%s)\n",
536 ipath_ibcstatus_str[st & 0xf]);
537 }
538 }
539 if (errs & INFINIPATH_E_IBSTATUSCHANGED)
540 handle_e_ibstatuschanged(dd, errs, noprint);
541
542 if (errs & INFINIPATH_E_RESET) {
543 if (!noprint)
544 ipath_dev_err(dd, "Got reset, requires re-init "
545 "(unload and reload driver)\n");
546 dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */
547 /* mark as having had error */
548 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
549 *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
550 }
551
552 if (!noprint && *msg)
553 ipath_dev_err(dd, "%s error\n", msg);
554 if (dd->ipath_sma_state_wanted & dd->ipath_flags) {
555 ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, "
556 "waking\n", dd->ipath_sma_state_wanted,
557 dd->ipath_flags);
558 wake_up_interruptible(&ipath_sma_state_wait);
559 }
560
561 if (chkerrpkts)
562 /* process possible error packets in hdrq */
563 ipath_kreceive(dd);
564}
565
566/* this is separate to allow for better optimization of ipath_intr() */
567
568static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
569{
570 /*
571 * sometimes happen during driver init and unload, don't want
572 * to process any interrupts at that point
573 */
574
575 /* this is just a bandaid, not a fix, if something goes badly
576 * wrong */
577 if (++*unexpectp > 100) {
578 if (++*unexpectp > 105) {
579 /*
580 * ok, we must be taking somebody else's interrupts,
581 * due to a messed up mptable and/or PIRQ table, so
582 * unregister the interrupt. We've seen this during
583 * linuxbios development work, and it may happen in
584 * the future again.
585 */
586 if (dd->pcidev && dd->pcidev->irq) {
587 ipath_dev_err(dd, "Now %u unexpected "
588 "interrupts, unregistering "
589 "interrupt handler\n",
590 *unexpectp);
591 ipath_dbg("free_irq of irq %x\n",
592 dd->pcidev->irq);
593 free_irq(dd->pcidev->irq, dd);
594 }
595 }
596 if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) {
597 ipath_dev_err(dd, "%u unexpected interrupts, "
598 "disabling interrupts completely\n",
599 *unexpectp);
600 /*
601 * disable all interrupts, something is very wrong
602 */
603 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
604 0ULL);
605 }
606 } else if (*unexpectp > 1)
607 ipath_dbg("Interrupt when not ready, should not happen, "
608 "ignoring\n");
609}
610
611static void ipath_bad_regread(struct ipath_devdata *dd)
612{
613 static int allbits;
614
615 /* separate routine, for better optimization of ipath_intr() */
616
617 /*
618 * We print the message and disable interrupts, in hope of
619 * having a better chance of debugging the problem.
620 */
621 ipath_dev_err(dd,
622 "Read of interrupt status failed (all bits set)\n");
623 if (allbits++) {
624 /* disable all interrupts, something is very wrong */
625 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
626 if (allbits == 2) {
627 ipath_dev_err(dd, "Still bad interrupt status, "
628 "unregistering interrupt\n");
629 free_irq(dd->pcidev->irq, dd);
630 } else if (allbits > 2) {
631 if ((allbits % 10000) == 0)
632 printk(".");
633 } else
634 ipath_dev_err(dd, "Disabling interrupts, "
635 "multiple errors\n");
636 }
637}
638
639static void handle_port_pioavail(struct ipath_devdata *dd)
640{
641 u32 i;
642 /*
643 * start from port 1, since for now port 0 is never using
644 * wait_event for PIO
645 */
646 for (i = 1; dd->ipath_portpiowait && i < dd->ipath_cfgports; i++) {
647 struct ipath_portdata *pd = dd->ipath_pd[i];
648
649 if (pd && pd->port_cnt &&
650 dd->ipath_portpiowait & (1U << i)) {
651 clear_bit(i, &dd->ipath_portpiowait);
652 if (test_bit(IPATH_PORT_WAITING_PIO,
653 &pd->port_flag)) {
654 clear_bit(IPATH_PORT_WAITING_PIO,
655 &pd->port_flag);
656 wake_up_interruptible(&pd->port_wait);
657 }
658 }
659 }
660}
661
662static void handle_layer_pioavail(struct ipath_devdata *dd)
663{
664 int ret;
665
666 ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
667 if (ret > 0)
668 goto clear;
669
670 ret = __ipath_verbs_piobufavail(dd);
671 if (ret > 0)
672 goto clear;
673
674 return;
675clear:
676 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
677 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
678 dd->ipath_sendctrl);
679}
680
681static void handle_rcv(struct ipath_devdata *dd, u32 istat)
682{
683 u64 portr;
684 int i;
685 int rcvdint = 0;
686
687 portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
688 infinipath_i_rcvavail_mask)
689 | ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
690 infinipath_i_rcvurg_mask);
691 for (i = 0; i < dd->ipath_cfgports; i++) {
692 struct ipath_portdata *pd = dd->ipath_pd[i];
693 if (portr & (1 << i) && pd &&
694 pd->port_cnt) {
695 if (i == 0)
696 ipath_kreceive(dd);
697 else if (test_bit(IPATH_PORT_WAITING_RCV,
698 &pd->port_flag)) {
699 int rcbit;
700 clear_bit(IPATH_PORT_WAITING_RCV,
701 &pd->port_flag);
702 rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
703 clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
704 wake_up_interruptible(&pd->port_wait);
705 rcvdint = 1;
706 }
707 }
708 }
709 if (rcvdint) {
710 /* only want to take one interrupt, so turn off the rcv
711 * interrupt for all the ports that we did the wakeup on
712 * (but never for kernel port)
713 */
714 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
715 dd->ipath_rcvctrl);
716 }
717}
718
719irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
720{
721 struct ipath_devdata *dd = data;
722 u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
723 ipath_err_t estat = 0;
724 static unsigned unexpected = 0;
725 irqreturn_t ret;
726
727 if (unlikely(!istat)) {
728 ipath_stats.sps_nullintr++;
729 ret = IRQ_NONE; /* not our interrupt, or already handled */
730 goto bail;
731 }
732 if (unlikely(istat == -1)) {
733 ipath_bad_regread(dd);
734 /* don't know if it was our interrupt or not */
735 ret = IRQ_NONE;
736 goto bail;
737 }
738
739 ipath_stats.sps_ints++;
740
741 /*
742 * this needs to be flags&initted, not statusp, so we keep
743 * taking interrupts even after link goes down, etc.
744 * Also, we *must* clear the interrupt at some point, or we won't
745 * take it again, which can be real bad for errors, etc...
746 */
747
748 if (!(dd->ipath_flags & IPATH_INITTED)) {
749 ipath_bad_intr(dd, &unexpected);
750 ret = IRQ_NONE;
751 goto bail;
752 }
753 if (unexpected)
754 unexpected = 0;
755
756 ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
757
758 if (istat & ~infinipath_i_bitsextant)
759 ipath_dev_err(dd,
760 "interrupt with unknown interrupts %x set\n",
761 istat & (u32) ~ infinipath_i_bitsextant);
762
763 if (istat & INFINIPATH_I_ERROR) {
764 ipath_stats.sps_errints++;
765 estat = ipath_read_kreg64(dd,
766 dd->ipath_kregs->kr_errorstatus);
767 if (!estat)
768 dev_info(&dd->pcidev->dev, "error interrupt (%x), "
769 "but no error bits set!\n", istat);
770 else if (estat == -1LL)
771 /*
772 * should we try clearing all, or hope next read
773 * works?
774 */
775 ipath_dev_err(dd, "Read of error status failed "
776 "(all bits set); ignoring\n");
777 else
778 handle_errors(dd, estat);
779 }
780
781 if (istat & INFINIPATH_I_GPIO) {
782 if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) {
783 u32 gpiostatus;
784 gpiostatus = ipath_read_kreg32(
785 dd, dd->ipath_kregs->kr_gpio_status);
786 ipath_dbg("Unexpected GPIO interrupt bits %x\n",
787 gpiostatus);
788 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
789 gpiostatus);
790 }
791 else {
792 /* Clear GPIO status bit 2 */
793 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
794 (u64) (1 << 2));
795
796 /*
797 * Packets are available in the port 0 rcv queue.
798 * Eventually this needs to be generalized to check
799 * IPATH_GPIO_INTR, and the specific GPIO bit, if
800 * GPIO interrupts are used for anything else.
801 */
802 ipath_kreceive(dd);
803 }
804 }
805
806 /*
807 * clear the ones we will deal with on this round
808 * We clear it early, mostly for receive interrupts, so we
809 * know the chip will have seen this by the time we process
810 * the queue, and will re-interrupt if necessary. The processor
811 * itself won't take the interrupt again until we return.
812 */
813 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
814
815 if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
816 clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
817 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
818 dd->ipath_sendctrl);
819
820 if (dd->ipath_portpiowait)
821 handle_port_pioavail(dd);
822
823 handle_layer_pioavail(dd);
824 }
825
826 /*
827 * we check for both transition from empty to non-empty, and urgent
828 * packets (those with the interrupt bit set in the header)
829 */
830
831 if (istat & ((infinipath_i_rcvavail_mask <<
832 INFINIPATH_I_RCVAVAIL_SHIFT)
833 | (infinipath_i_rcvurg_mask <<
834 INFINIPATH_I_RCVURG_SHIFT)))
835 handle_rcv(dd, istat);
836
837 ret = IRQ_HANDLED;
838
839bail:
840 return ret;
841}
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
new file mode 100644
index 000000000000..159d0aed31a5
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -0,0 +1,884 @@
1#ifndef _IPATH_KERNEL_H
2#define _IPATH_KERNEL_H
3/*
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35/*
36 * This header file is the base header file for infinipath kernel code
37 * ipath_user.h serves a similar purpose for user code.
38 */
39
40#include <linux/interrupt.h>
41#include <asm/io.h>
42
43#include "ipath_common.h"
44#include "ipath_debug.h"
45#include "ipath_registers.h"
46
47/* only s/w major version of InfiniPath we can handle */
48#define IPATH_CHIP_VERS_MAJ 2U
49
50/* don't care about this except printing */
51#define IPATH_CHIP_VERS_MIN 0U
52
53/* temporary, maybe always */
54extern struct infinipath_stats ipath_stats;
55
56#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
57
58struct ipath_portdata {
59 void **port_rcvegrbuf;
60 dma_addr_t *port_rcvegrbuf_phys;
61 /* rcvhdrq base, needs mmap before useful */
62 void *port_rcvhdrq;
63 /* kernel virtual address where hdrqtail is updated */
64 u64 *port_rcvhdrtail_kvaddr;
65 /* page * used for uaddr */
66 struct page *port_rcvhdrtail_pagep;
67 /*
68 * temp buffer for expected send setup, allocated at open, instead
69 * of each setup call
70 */
71 void *port_tid_pg_list;
72 /* when waiting for rcv or pioavail */
73 wait_queue_head_t port_wait;
74 /*
75 * rcvegr bufs base, physical, must fit
76 * in 44 bits so 32 bit programs mmap64 44 bit works)
77 */
78 dma_addr_t port_rcvegr_phys;
79 /* mmap of hdrq, must fit in 44 bits */
80 dma_addr_t port_rcvhdrq_phys;
81 /*
82 * the actual user address that we ipath_mlock'ed, so we can
83 * ipath_munlock it at close
84 */
85 unsigned long port_rcvhdrtail_uaddr;
86 /*
87 * number of opens on this instance (0 or 1; ignoring forks, dup,
88 * etc. for now)
89 */
90 int port_cnt;
91 /*
92 * how much space to leave at start of eager TID entries for
93 * protocol use, on each TID
94 */
95 /* instead of calculating it */
96 unsigned port_port;
97 /* chip offset of PIO buffers for this port */
98 u32 port_piobufs;
99 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
100 u32 port_rcvegrbuf_chunks;
101 /* how many egrbufs per chunk */
102 u32 port_rcvegrbufs_perchunk;
103 /* order for port_rcvegrbuf_pages */
104 size_t port_rcvegrbuf_size;
105 /* rcvhdrq size (for freeing) */
106 size_t port_rcvhdrq_size;
107 /* next expected TID to check when looking for free */
108 u32 port_tidcursor;
109 /* next expected TID to check */
110 unsigned long port_flag;
111 /* WAIT_RCV that timed out, no interrupt */
112 u32 port_rcvwait_to;
113 /* WAIT_PIO that timed out, no interrupt */
114 u32 port_piowait_to;
115 /* WAIT_RCV already happened, no wait */
116 u32 port_rcvnowait;
117 /* WAIT_PIO already happened, no wait */
118 u32 port_pionowait;
119 /* total number of rcvhdrqfull errors */
120 u32 port_hdrqfull;
121 /* pid of process using this port */
122 pid_t port_pid;
123 /* same size as task_struct .comm[] */
124 char port_comm[16];
125 /* pkeys set by this use of this port */
126 u16 port_pkeys[4];
127 /* so file ops can get at unit */
128 struct ipath_devdata *port_dd;
129};
130
131struct sk_buff;
132
133/*
134 * control information for layered drivers
135 */
136struct _ipath_layer {
137 void *l_arg;
138};
139
140/* Verbs layer interface */
141struct _verbs_layer {
142 void *l_arg;
143 struct timer_list l_timer;
144};
145
146struct ipath_devdata {
147 struct list_head ipath_list;
148
149 struct ipath_kregs const *ipath_kregs;
150 struct ipath_cregs const *ipath_cregs;
151
152 /* mem-mapped pointer to base of chip regs */
153 u64 __iomem *ipath_kregbase;
154 /* end of mem-mapped chip space; range checking */
155 u64 __iomem *ipath_kregend;
156 /* physical address of chip for io_remap, etc. */
157 unsigned long ipath_physaddr;
158 /* base of memory alloced for ipath_kregbase, for free */
159 u64 *ipath_kregalloc;
160 /*
161 * version of kregbase that doesn't have high bits set (for 32 bit
162 * programs, so mmap64 44 bit works)
163 */
164 u64 __iomem *ipath_kregvirt;
165 /*
166 * virtual address where port0 rcvhdrqtail updated for this unit.
167 * only written to by the chip, not the driver.
168 */
169 volatile __le64 *ipath_hdrqtailptr;
170 dma_addr_t ipath_dma_addr;
171 /* ipath_cfgports pointers */
172 struct ipath_portdata **ipath_pd;
173 /* sk_buffs used by port 0 eager receive queue */
174 struct sk_buff **ipath_port0_skbs;
175 /* kvirt address of 1st 2k pio buffer */
176 void __iomem *ipath_pio2kbase;
177 /* kvirt address of 1st 4k pio buffer */
178 void __iomem *ipath_pio4kbase;
179 /*
180 * points to area where PIOavail registers will be DMA'ed.
181 * Has to be on a page of it's own, because the page will be
182 * mapped into user program space. This copy is *ONLY* ever
183 * written by DMA, not by the driver! Need a copy per device
184 * when we get to multiple devices
185 */
186 volatile __le64 *ipath_pioavailregs_dma;
187 /* physical address where updates occur */
188 dma_addr_t ipath_pioavailregs_phys;
189 struct _ipath_layer ipath_layer;
190 /* setup intr */
191 int (*ipath_f_intrsetup)(struct ipath_devdata *);
192 /* setup on-chip bus config */
193 int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
194 /* hard reset chip */
195 int (*ipath_f_reset)(struct ipath_devdata *);
196 int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
197 size_t);
198 void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
199 void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
200 size_t);
201 void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
202 int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
203 int (*ipath_f_early_init)(struct ipath_devdata *);
204 void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
205 void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
206 u32, unsigned long);
207 void (*ipath_f_tidtemplate)(struct ipath_devdata *);
208 void (*ipath_f_cleanup)(struct ipath_devdata *);
209 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
210 /* fill out chip-specific fields */
211 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
212 struct _verbs_layer verbs_layer;
213 /* total dwords sent (summed from counter) */
214 u64 ipath_sword;
215 /* total dwords rcvd (summed from counter) */
216 u64 ipath_rword;
217 /* total packets sent (summed from counter) */
218 u64 ipath_spkts;
219 /* total packets rcvd (summed from counter) */
220 u64 ipath_rpkts;
221 /* ipath_statusp initially points to this. */
222 u64 _ipath_status;
223 /* GUID for this interface, in network order */
224 __be64 ipath_guid;
225 /*
226 * aggregrate of error bits reported since last cleared, for
227 * limiting of error reporting
228 */
229 ipath_err_t ipath_lasterror;
230 /*
231 * aggregrate of error bits reported since last cleared, for
232 * limiting of hwerror reporting
233 */
234 ipath_err_t ipath_lasthwerror;
235 /*
236 * errors masked because they occur too fast, also includes errors
237 * that are always ignored (ipath_ignorederrs)
238 */
239 ipath_err_t ipath_maskederrs;
240 /* time in jiffies at which to re-enable maskederrs */
241 unsigned long ipath_unmasktime;
242 /*
243 * errors always ignored (masked), at least for a given
244 * chip/device, because they are wrong or not useful
245 */
246 ipath_err_t ipath_ignorederrs;
247 /* count of egrfull errors, combined for all ports */
248 u64 ipath_last_tidfull;
249 /* for ipath_qcheck() */
250 u64 ipath_lastport0rcv_cnt;
251 /* template for writing TIDs */
252 u64 ipath_tidtemplate;
253 /* value to write to free TIDs */
254 u64 ipath_tidinvalid;
255 /* PE-800 rcv interrupt setup */
256 u64 ipath_rhdrhead_intr_off;
257
258 /* size of memory at ipath_kregbase */
259 u32 ipath_kregsize;
260 /* number of registers used for pioavail */
261 u32 ipath_pioavregs;
262 /* IPATH_POLL, etc. */
263 u32 ipath_flags;
264 /* ipath_flags sma is waiting for */
265 u32 ipath_sma_state_wanted;
266 /* last buffer for user use, first buf for kernel use is this
267 * index. */
268 u32 ipath_lastport_piobuf;
269 /* is a stats timer active */
270 u32 ipath_stats_timer_active;
271 /* dwords sent read from counter */
272 u32 ipath_lastsword;
273 /* dwords received read from counter */
274 u32 ipath_lastrword;
275 /* sent packets read from counter */
276 u32 ipath_lastspkts;
277 /* received packets read from counter */
278 u32 ipath_lastrpkts;
279 /* pio bufs allocated per port */
280 u32 ipath_pbufsport;
281 /*
282 * number of ports configured as max; zero is set to number chip
283 * supports, less gives more pio bufs/port, etc.
284 */
285 u32 ipath_cfgports;
286 /* port0 rcvhdrq head offset */
287 u32 ipath_port0head;
288 /* count of port 0 hdrqfull errors */
289 u32 ipath_p0_hdrqfull;
290
291 /*
292 * (*cfgports) used to suppress multiple instances of same
293 * port staying stuck at same point
294 */
295 u32 *ipath_lastrcvhdrqtails;
296 /*
297 * (*cfgports) used to suppress multiple instances of same
298 * port staying stuck at same point
299 */
300 u32 *ipath_lastegrheads;
301 /*
302 * index of last piobuffer we used. Speeds up searching, by
303 * starting at this point. Doesn't matter if multiple cpu's use and
304 * update, last updater is only write that matters. Whenever it
305 * wraps, we update shadow copies. Need a copy per device when we
306 * get to multiple devices
307 */
308 u32 ipath_lastpioindex;
309 /* max length of freezemsg */
310 u32 ipath_freezelen;
311 /*
312 * consecutive times we wanted a PIO buffer but were unable to
313 * get one
314 */
315 u32 ipath_consec_nopiobuf;
316 /*
317 * hint that we should update ipath_pioavailshadow before
318 * looking for a PIO buffer
319 */
320 u32 ipath_upd_pio_shadow;
321 /* so we can rewrite it after a chip reset */
322 u32 ipath_pcibar0;
323 /* so we can rewrite it after a chip reset */
324 u32 ipath_pcibar1;
325 /* sequential tries for SMA send and no bufs */
326 u32 ipath_nosma_bufs;
327 /* duration (seconds) ipath_nosma_bufs set */
328 u32 ipath_nosma_secs;
329
330 /* HT/PCI Vendor ID (here for NodeInfo) */
331 u16 ipath_vendorid;
332 /* HT/PCI Device ID (here for NodeInfo) */
333 u16 ipath_deviceid;
334 /* offset in HT config space of slave/primary interface block */
335 u8 ipath_ht_slave_off;
336 /* for write combining settings */
337 unsigned long ipath_wc_cookie;
338 /* ref count for each pkey */
339 atomic_t ipath_pkeyrefs[4];
340 /* shadow copy of all exptids physaddr; used only by funcsim */
341 u64 *ipath_tidsimshadow;
342 /* shadow copy of struct page *'s for exp tid pages */
343 struct page **ipath_pageshadow;
344 /* lock to workaround chip bug 9437 */
345 spinlock_t ipath_tid_lock;
346
347 /*
348 * IPATH_STATUS_*,
349 * this address is mapped readonly into user processes so they can
350 * get status cheaply, whenever they want.
351 */
352 u64 *ipath_statusp;
353 /* freeze msg if hw error put chip in freeze */
354 char *ipath_freezemsg;
355 /* pci access data structure */
356 struct pci_dev *pcidev;
357 struct cdev *cdev;
358 struct class_device *class_dev;
359 /* timer used to prevent stats overflow, error throttling, etc. */
360 struct timer_list ipath_stats_timer;
361 /* check for stale messages in rcv queue */
362 /* only allow one intr at a time. */
363 unsigned long ipath_rcv_pending;
364
365 /*
366 * Shadow copies of registers; size indicates read access size.
367 * Most of them are readonly, but some are write-only register,
368 * where we manipulate the bits in the shadow copy, and then write
369 * the shadow copy to infinipath.
370 *
371 * We deliberately make most of these 32 bits, since they have
372 * restricted range. For any that we read, we won't to generate 32
373 * bit accesses, since Opteron will generate 2 separate 32 bit HT
374 * transactions for a 64 bit read, and we want to avoid unnecessary
375 * HT transactions.
376 */
377
378 /* This is the 64 bit group */
379
380 /*
381 * shadow of pioavail, check to be sure it's large enough at
382 * init time.
383 */
384 unsigned long ipath_pioavailshadow[8];
385 /* shadow of kr_gpio_out, for rmw ops */
386 u64 ipath_gpio_out;
387 /* kr_revision shadow */
388 u64 ipath_revision;
389 /*
390 * shadow of ibcctrl, for interrupt handling of link changes,
391 * etc.
392 */
393 u64 ipath_ibcctrl;
394 /*
395 * last ibcstatus, to suppress "duplicate" status change messages,
396 * mostly from 2 to 3
397 */
398 u64 ipath_lastibcstat;
399 /* hwerrmask shadow */
400 ipath_err_t ipath_hwerrmask;
401 /* interrupt config reg shadow */
402 u64 ipath_intconfig;
403 /* kr_sendpiobufbase value */
404 u64 ipath_piobufbase;
405
406 /* these are the "32 bit" regs */
407
408 /*
409 * number of GUIDs in the flash for this interface; may need some
410 * rethinking for setting on other ifaces
411 */
412 u32 ipath_nguid;
413 /*
414 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
415 * all expect bit fields to be "unsigned long"
416 */
417 /* shadow kr_rcvctrl */
418 unsigned long ipath_rcvctrl;
419 /* shadow kr_sendctrl */
420 unsigned long ipath_sendctrl;
421
422 /* value we put in kr_rcvhdrcnt */
423 u32 ipath_rcvhdrcnt;
424 /* value we put in kr_rcvhdrsize */
425 u32 ipath_rcvhdrsize;
426 /* value we put in kr_rcvhdrentsize */
427 u32 ipath_rcvhdrentsize;
428 /* offset of last entry in rcvhdrq */
429 u32 ipath_hdrqlast;
430 /* kr_portcnt value */
431 u32 ipath_portcnt;
432 /* kr_pagealign value */
433 u32 ipath_palign;
434 /* number of "2KB" PIO buffers */
435 u32 ipath_piobcnt2k;
436 /* size in bytes of "2KB" PIO buffers */
437 u32 ipath_piosize2k;
438 /* number of "4KB" PIO buffers */
439 u32 ipath_piobcnt4k;
440 /* size in bytes of "4KB" PIO buffers */
441 u32 ipath_piosize4k;
442 /* kr_rcvegrbase value */
443 u32 ipath_rcvegrbase;
444 /* kr_rcvegrcnt value */
445 u32 ipath_rcvegrcnt;
446 /* kr_rcvtidbase value */
447 u32 ipath_rcvtidbase;
448 /* kr_rcvtidcnt value */
449 u32 ipath_rcvtidcnt;
450 /* kr_sendregbase */
451 u32 ipath_sregbase;
452 /* kr_userregbase */
453 u32 ipath_uregbase;
454 /* kr_counterregbase */
455 u32 ipath_cregbase;
456 /* shadow the control register contents */
457 u32 ipath_control;
458 /* shadow the gpio output contents */
459 u32 ipath_extctrl;
460 /* PCI revision register (HTC rev on FPGA) */
461 u32 ipath_pcirev;
462
463 /* chip address space used by 4k pio buffers */
464 u32 ipath_4kalign;
465 /* The MTU programmed for this unit */
466 u32 ipath_ibmtu;
467 /*
468 * The max size IB packet, included IB headers that we can send.
469 * Starts same as ipath_piosize, but is affected when ibmtu is
470 * changed, or by size of eager buffers
471 */
472 u32 ipath_ibmaxlen;
473 /*
474 * ibmaxlen at init time, limited by chip and by receive buffer
475 * size. Not changed after init.
476 */
477 u32 ipath_init_ibmaxlen;
478 /* size of each rcvegrbuffer */
479 u32 ipath_rcvegrbufsize;
480 /* width (2,4,8,16,32) from HT config reg */
481 u32 ipath_htwidth;
482 /* HT speed (200,400,800,1000) from HT config */
483 u32 ipath_htspeed;
484 /* ports waiting for PIOavail intr */
485 unsigned long ipath_portpiowait;
486 /*
487 * number of sequential ibcstatus change for polling active/quiet
488 * (i.e., link not coming up).
489 */
490 u32 ipath_ibpollcnt;
491 /* low and high portions of MSI capability/vector */
492 u32 ipath_msi_lo;
493 /* saved after PCIe init for restore after reset */
494 u32 ipath_msi_hi;
495 /* MSI data (vector) saved for restore */
496 u16 ipath_msi_data;
497 /* MLID programmed for this instance */
498 u16 ipath_mlid;
499 /* LID programmed for this instance */
500 u16 ipath_lid;
501 /* list of pkeys programmed; 0 if not set */
502 u16 ipath_pkeys[4];
503 /* ASCII serial number, from flash */
504 u8 ipath_serial[12];
505 /* human readable board version */
506 u8 ipath_boardversion[80];
507 /* chip major rev, from ipath_revision */
508 u8 ipath_majrev;
509 /* chip minor rev, from ipath_revision */
510 u8 ipath_minrev;
511 /* board rev, from ipath_revision */
512 u8 ipath_boardrev;
513 /* unit # of this chip, if present */
514 int ipath_unit;
515 /* saved for restore after reset */
516 u8 ipath_pci_cacheline;
517 /* LID mask control */
518 u8 ipath_lmc;
519};
520
521extern volatile __le64 *ipath_port0_rcvhdrtail;
522extern dma_addr_t ipath_port0_rcvhdrtail_dma;
523
524#define IPATH_PORT0_RCVHDRTAIL_SIZE PAGE_SIZE
525
526extern struct list_head ipath_dev_list;
527extern spinlock_t ipath_devs_lock;
528extern struct ipath_devdata *ipath_lookup(int unit);
529
530extern u16 ipath_layer_rcv_opcode;
531extern int ipath_verbs_registered;
532extern int __ipath_layer_intr(struct ipath_devdata *, u32);
533extern int ipath_layer_intr(struct ipath_devdata *, u32);
534extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
535 struct sk_buff *);
536extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *);
537extern int __ipath_verbs_piobufavail(struct ipath_devdata *);
538extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
539
540void ipath_layer_add(struct ipath_devdata *);
541void ipath_layer_del(struct ipath_devdata *);
542
543int ipath_init_chip(struct ipath_devdata *, int);
544int ipath_enable_wc(struct ipath_devdata *dd);
545void ipath_disable_wc(struct ipath_devdata *dd);
546int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
547void ipath_shutdown_device(struct ipath_devdata *);
548
549struct file_operations;
550int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
551 struct cdev **cdevp, struct class_device **class_devp);
552void ipath_cdev_cleanup(struct cdev **cdevp,
553 struct class_device **class_devp);
554
555int ipath_diag_init(void);
556void ipath_diag_cleanup(void);
557void ipath_diag_bringup_link(struct ipath_devdata *);
558
559extern wait_queue_head_t ipath_sma_state_wait;
560
561int ipath_user_add(struct ipath_devdata *dd);
562void ipath_user_del(struct ipath_devdata *dd);
563
564struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
565
566extern int ipath_diag_inuse;
567
568irqreturn_t ipath_intr(int irq, void *devid, struct pt_regs *regs);
569void ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
570#if __IPATH_INFO || __IPATH_DBG
571extern const char *ipath_ibcstatus_str[];
572#endif
573
574/* clean up any per-chip chip-specific stuff */
575void ipath_chip_cleanup(struct ipath_devdata *);
576/* clean up any chip type-specific stuff */
577void ipath_chip_done(void);
578
579/* check to see if we have to force ordering for write combining */
580int ipath_unordered_wc(void);
581
582void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
583 unsigned cnt);
584
585int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
586void ipath_free_pddata(struct ipath_devdata *, u32, int);
587
588int ipath_parse_ushort(const char *str, unsigned short *valp);
589
590int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
591void ipath_set_ib_lstate(struct ipath_devdata *, int);
592void ipath_kreceive(struct ipath_devdata *);
593int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
594int ipath_reset_device(int);
595void ipath_get_faststats(unsigned long);
596
597/* for use in system calls, where we want to know device type, etc. */
598#define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data)
599
600/*
601 * values for ipath_flags
602 */
603/* The chip is up and initted */
604#define IPATH_INITTED 0x2
605 /* set if any user code has set kr_rcvhdrsize */
606#define IPATH_RCVHDRSZ_SET 0x4
607 /* The chip is present and valid for accesses */
608#define IPATH_PRESENT 0x8
609 /* HT link0 is only 8 bits wide, ignore upper byte crc
610 * errors, etc. */
611#define IPATH_8BIT_IN_HT0 0x10
612 /* HT link1 is only 8 bits wide, ignore upper byte crc
613 * errors, etc. */
614#define IPATH_8BIT_IN_HT1 0x20
615 /* The link is down */
616#define IPATH_LINKDOWN 0x40
617 /* The link level is up (0x11) */
618#define IPATH_LINKINIT 0x80
619 /* The link is in the armed (0x21) state */
620#define IPATH_LINKARMED 0x100
621 /* The link is in the active (0x31) state */
622#define IPATH_LINKACTIVE 0x200
623 /* link current state is unknown */
624#define IPATH_LINKUNK 0x400
625 /* no IB cable, or no device on IB cable */
626#define IPATH_NOCABLE 0x4000
627 /* Supports port zero per packet receive interrupts via
628 * GPIO */
629#define IPATH_GPIO_INTR 0x8000
630 /* uses the coded 4byte TID, not 8 byte */
631#define IPATH_4BYTE_TID 0x10000
632 /* packet/word counters are 32 bit, else those 4 counters
633 * are 64bit */
634#define IPATH_32BITCOUNTERS 0x20000
635 /* can miss port0 rx interrupts */
636#define IPATH_POLL_RX_INTR 0x40000
637#define IPATH_DISABLED 0x80000 /* administratively disabled */
638
639/* portdata flag bit offsets */
640 /* waiting for a packet to arrive */
641#define IPATH_PORT_WAITING_RCV 2
642 /* waiting for a PIO buffer to be available */
643#define IPATH_PORT_WAITING_PIO 3
644
645/* free up any allocated data at closes */
646void ipath_free_data(struct ipath_portdata *dd);
647int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
648int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
649u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
650/* init PE-800-specific func */
651void ipath_init_pe800_funcs(struct ipath_devdata *);
652/* init HT-400-specific func */
653void ipath_init_ht400_funcs(struct ipath_devdata *);
654void ipath_get_guid(struct ipath_devdata *);
655u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
656
657/*
658 * number of words used for protocol header if not set by ipath_userinit();
659 */
660#define IPATH_DFLT_RCVHDRSIZE 9
661
662#define IPATH_MDIO_CMD_WRITE 1
663#define IPATH_MDIO_CMD_READ 2
664#define IPATH_MDIO_CLD_DIV 25 /* to get 2.5 Mhz mdio clock */
665#define IPATH_MDIO_CMDVALID 0x40000000 /* bit 30 */
666#define IPATH_MDIO_DATAVALID 0x80000000 /* bit 31 */
667#define IPATH_MDIO_CTRL_STD 0x0
668
669static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
670{
671 return (((u64) IPATH_MDIO_CLD_DIV) << 32) |
672 (cmd << 26) |
673 (dev << 21) |
674 (reg << 16) |
675 (data & 0xFFFF);
676}
677
678 /* signal and fifo status, in bank 31 */
679#define IPATH_MDIO_CTRL_XGXS_REG_8 0x8
680 /* controls loopback, redundancy */
681#define IPATH_MDIO_CTRL_8355_REG_1 0x10
682 /* premph, encdec, etc. */
683#define IPATH_MDIO_CTRL_8355_REG_2 0x11
684 /* Kchars, etc. */
685#define IPATH_MDIO_CTRL_8355_REG_6 0x15
686#define IPATH_MDIO_CTRL_8355_REG_9 0x18
687#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
688
689int ipath_get_user_pages(unsigned long, size_t, struct page **);
690int ipath_get_user_pages_nocopy(unsigned long, struct page **);
691void ipath_release_user_pages(struct page **, size_t);
692void ipath_release_user_pages_on_close(struct page **, size_t);
693int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
694int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
695
696/* these are used for the registers that vary with port */
697void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
698 unsigned, u64);
699u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
700 unsigned);
701
702/*
703 * We could have a single register get/put routine, that takes a group type,
704 * but this is somewhat clearer and cleaner. It also gives us some error
705 * checking. 64 bit register reads should always work, but are inefficient
706 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
707 * so we use kreg32 wherever possible. User register and counter register
708 * reads are always 32 bit reads, so only one form of those routines.
709 */
710
711/*
712 * At the moment, none of the s-registers are writable, so no
713 * ipath_write_sreg(), and none of the c-registers are writable, so no
714 * ipath_write_creg().
715 */
716
717/**
718 * ipath_read_ureg32 - read 32-bit virtualized per-port register
719 * @dd: device
720 * @regno: register number
721 * @port: port number
722 *
723 * Return the contents of a register that is virtualized to be per port.
724 * Prints a debug message and returns -1 on errors (not distinguishable from
725 * valid contents at runtime; we may add a separate error variable at some
726 * point).
727 *
728 * This is normally not used by the kernel, but may be for debugging, and
729 * has a different implementation than user mode, which is why it's not in
730 * _common.h.
731 */
732static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
733 ipath_ureg regno, int port)
734{
735 if (!dd->ipath_kregbase)
736 return 0;
737
738 return readl(regno + (u64 __iomem *)
739 (dd->ipath_uregbase +
740 (char __iomem *)dd->ipath_kregbase +
741 dd->ipath_palign * port));
742}
743
744/**
745 * ipath_write_ureg - write 32-bit virtualized per-port register
746 * @dd: device
747 * @regno: register number
748 * @value: value
749 * @port: port
750 *
751 * Write the contents of a register that is virtualized to be per port.
752 */
753static inline void ipath_write_ureg(const struct ipath_devdata *dd,
754 ipath_ureg regno, u64 value, int port)
755{
756 u64 __iomem *ubase = (u64 __iomem *)
757 (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
758 dd->ipath_palign * port);
759 if (dd->ipath_kregbase)
760 writeq(value, &ubase[regno]);
761}
762
763static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
764 ipath_kreg regno)
765{
766 if (!dd->ipath_kregbase)
767 return -1;
768 return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
769}
770
771static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
772 ipath_kreg regno)
773{
774 if (!dd->ipath_kregbase)
775 return -1;
776
777 return readq(&dd->ipath_kregbase[regno]);
778}
779
780static inline void ipath_write_kreg(const struct ipath_devdata *dd,
781 ipath_kreg regno, u64 value)
782{
783 if (dd->ipath_kregbase)
784 writeq(value, &dd->ipath_kregbase[regno]);
785}
786
787static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
788 ipath_sreg regno)
789{
790 if (!dd->ipath_kregbase)
791 return 0;
792
793 return readq(regno + (u64 __iomem *)
794 (dd->ipath_cregbase +
795 (char __iomem *)dd->ipath_kregbase));
796}
797
798static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
799 ipath_sreg regno)
800{
801 if (!dd->ipath_kregbase)
802 return 0;
803 return readl(regno + (u64 __iomem *)
804 (dd->ipath_cregbase +
805 (char __iomem *)dd->ipath_kregbase));
806}
807
808/*
809 * sysfs interface.
810 */
811
812struct device_driver;
813
814extern const char ipath_core_version[];
815
816int ipath_driver_create_group(struct device_driver *);
817void ipath_driver_remove_group(struct device_driver *);
818
819int ipath_device_create_group(struct device *, struct ipath_devdata *);
820void ipath_device_remove_group(struct device *, struct ipath_devdata *);
821int ipath_expose_reset(struct device *);
822
823int ipath_init_ipathfs(void);
824void ipath_exit_ipathfs(void);
825int ipathfs_add_device(struct ipath_devdata *);
826int ipathfs_remove_device(struct ipath_devdata *);
827
828/*
829 * Flush write combining store buffers (if present) and perform a write
830 * barrier.
831 */
832#if defined(CONFIG_X86_64)
833#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
834#else
835#define ipath_flush_wc() wmb()
836#endif
837
838extern unsigned ipath_debug; /* debugging bit mask */
839
840const char *ipath_get_unit_name(int unit);
841
842extern struct mutex ipath_mutex;
843
844#define IPATH_DRV_NAME "ipath_core"
845#define IPATH_MAJOR 233
846#define IPATH_SMA_MINOR 128
847#define IPATH_DIAG_MINOR 129
848#define IPATH_NMINORS 130
849
850#define ipath_dev_err(dd,fmt,...) \
851 do { \
852 const struct ipath_devdata *__dd = (dd); \
853 if (__dd->pcidev) \
854 dev_err(&__dd->pcidev->dev, "%s: " fmt, \
855 ipath_get_unit_name(__dd->ipath_unit), \
856 ##__VA_ARGS__); \
857 else \
858 printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
859 ipath_get_unit_name(__dd->ipath_unit), \
860 ##__VA_ARGS__); \
861 } while (0)
862
863#if _IPATH_DEBUGGING
864
865# define __IPATH_DBG_WHICH(which,fmt,...) \
866 do { \
867 if(unlikely(ipath_debug&(which))) \
868 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
869 __func__,##__VA_ARGS__); \
870 } while(0)
871
872# define ipath_dbg(fmt,...) \
873 __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
874# define ipath_cdbg(which,fmt,...) \
875 __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
876
877#else /* ! _IPATH_DEBUGGING */
878
879# define ipath_dbg(fmt,...)
880# define ipath_cdbg(which,fmt,...)
881
882#endif /* _IPATH_DEBUGGING */
883
884#endif /* _IPATH_KERNEL_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
new file mode 100644
index 000000000000..aa33b0e9f2f6
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -0,0 +1,236 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <asm/io.h>
34
35#include "ipath_verbs.h"
36
37/**
38 * ipath_alloc_lkey - allocate an lkey
39 * @rkt: lkey table in which to allocate the lkey
40 * @mr: memory region that this lkey protects
41 *
42 * Returns 1 if successful, otherwise returns 0.
43 */
44
45int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
46{
47 unsigned long flags;
48 u32 r;
49 u32 n;
50 int ret;
51
52 spin_lock_irqsave(&rkt->lock, flags);
53
54 /* Find the next available LKEY */
55 r = n = rkt->next;
56 for (;;) {
57 if (rkt->table[r] == NULL)
58 break;
59 r = (r + 1) & (rkt->max - 1);
60 if (r == n) {
61 spin_unlock_irqrestore(&rkt->lock, flags);
62 _VERBS_INFO("LKEY table full\n");
63 ret = 0;
64 goto bail;
65 }
66 }
67 rkt->next = (r + 1) & (rkt->max - 1);
68 /*
69 * Make sure lkey is never zero which is reserved to indicate an
70 * unrestricted LKEY.
71 */
72 rkt->gen++;
73 mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
74 ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
75 << 8);
76 if (mr->lkey == 0) {
77 mr->lkey |= 1 << 8;
78 rkt->gen++;
79 }
80 rkt->table[r] = mr;
81 spin_unlock_irqrestore(&rkt->lock, flags);
82
83 ret = 1;
84
85bail:
86 return ret;
87}
88
89/**
90 * ipath_free_lkey - free an lkey
91 * @rkt: table from which to free the lkey
92 * @lkey: lkey id to free
93 */
94void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
95{
96 unsigned long flags;
97 u32 r;
98
99 if (lkey == 0)
100 return;
101 r = lkey >> (32 - ib_ipath_lkey_table_size);
102 spin_lock_irqsave(&rkt->lock, flags);
103 rkt->table[r] = NULL;
104 spin_unlock_irqrestore(&rkt->lock, flags);
105}
106
107/**
108 * ipath_lkey_ok - check IB SGE for validity and initialize
109 * @rkt: table containing lkey to check SGE against
110 * @isge: outgoing internal SGE
111 * @sge: SGE to check
112 * @acc: access flags
113 *
114 * Return 1 if valid and successful, otherwise returns 0.
115 *
116 * Check the IB SGE for validity and initialize our internal version
117 * of it.
118 */
119int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
120 struct ib_sge *sge, int acc)
121{
122 struct ipath_mregion *mr;
123 size_t off;
124 int ret;
125
126 /*
127 * We use LKEY == zero to mean a physical kmalloc() address.
128 * This is a bit of a hack since we rely on dma_map_single()
129 * being reversible by calling bus_to_virt().
130 */
131 if (sge->lkey == 0) {
132 isge->mr = NULL;
133 isge->vaddr = bus_to_virt(sge->addr);
134 isge->length = sge->length;
135 isge->sge_length = sge->length;
136 ret = 1;
137 goto bail;
138 }
139 spin_lock(&rkt->lock);
140 mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
141 spin_unlock(&rkt->lock);
142 if (unlikely(mr == NULL || mr->lkey != sge->lkey)) {
143 ret = 0;
144 goto bail;
145 }
146
147 off = sge->addr - mr->user_base;
148 if (unlikely(sge->addr < mr->user_base ||
149 off + sge->length > mr->length ||
150 (mr->access_flags & acc) != acc)) {
151 ret = 0;
152 goto bail;
153 }
154
155 off += mr->offset;
156 isge->mr = mr;
157 isge->m = 0;
158 isge->n = 0;
159 while (off >= mr->map[isge->m]->segs[isge->n].length) {
160 off -= mr->map[isge->m]->segs[isge->n].length;
161 isge->n++;
162 if (isge->n >= IPATH_SEGSZ) {
163 isge->m++;
164 isge->n = 0;
165 }
166 }
167 isge->vaddr = mr->map[isge->m]->segs[isge->n].vaddr + off;
168 isge->length = mr->map[isge->m]->segs[isge->n].length - off;
169 isge->sge_length = sge->length;
170
171 ret = 1;
172
173bail:
174 return ret;
175}
176
177/**
178 * ipath_rkey_ok - check the IB virtual address, length, and RKEY
179 * @dev: infiniband device
180 * @ss: SGE state
181 * @len: length of data
182 * @vaddr: virtual address to place data
183 * @rkey: rkey to check
184 * @acc: access flags
185 *
186 * Return 1 if successful, otherwise 0.
187 *
188 * The QP r_rq.lock should be held.
189 */
190int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
191 u32 len, u64 vaddr, u32 rkey, int acc)
192{
193 struct ipath_lkey_table *rkt = &dev->lk_table;
194 struct ipath_sge *sge = &ss->sge;
195 struct ipath_mregion *mr;
196 size_t off;
197 int ret;
198
199 spin_lock(&rkt->lock);
200 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
201 spin_unlock(&rkt->lock);
202 if (unlikely(mr == NULL || mr->lkey != rkey)) {
203 ret = 0;
204 goto bail;
205 }
206
207 off = vaddr - mr->iova;
208 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
209 (mr->access_flags & acc) == 0)) {
210 ret = 0;
211 goto bail;
212 }
213
214 off += mr->offset;
215 sge->mr = mr;
216 sge->m = 0;
217 sge->n = 0;
218 while (off >= mr->map[sge->m]->segs[sge->n].length) {
219 off -= mr->map[sge->m]->segs[sge->n].length;
220 sge->n++;
221 if (sge->n >= IPATH_SEGSZ) {
222 sge->m++;
223 sge->n = 0;
224 }
225 }
226 sge->vaddr = mr->map[sge->m]->segs[sge->n].vaddr + off;
227 sge->length = mr->map[sge->m]->segs[sge->n].length - off;
228 sge->sge_length = len;
229 ss->sg_list = NULL;
230 ss->num_sge = 1;
231
232 ret = 1;
233
234bail:
235 return ret;
236}
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
new file mode 100644
index 000000000000..2cabf6340572
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -0,0 +1,1515 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * These are the routines used by layered drivers, currently just the
35 * layered ethernet driver and verbs layer.
36 */
37
38#include <linux/io.h>
39#include <linux/pci.h>
40#include <asm/byteorder.h>
41
42#include "ipath_kernel.h"
43#include "ips_common.h"
44#include "ipath_layer.h"
45
46/* Acquire before ipath_devs_lock. */
47static DEFINE_MUTEX(ipath_layer_mutex);
48
49u16 ipath_layer_rcv_opcode;
50static int (*layer_intr)(void *, u32);
51static int (*layer_rcv)(void *, void *, struct sk_buff *);
52static int (*layer_rcv_lid)(void *, void *);
53static int (*verbs_piobufavail)(void *);
54static void (*verbs_rcv)(void *, void *, void *, u32);
55int ipath_verbs_registered;
56
57static void *(*layer_add_one)(int, struct ipath_devdata *);
58static void (*layer_remove_one)(void *);
59static void *(*verbs_add_one)(int, struct ipath_devdata *);
60static void (*verbs_remove_one)(void *);
61static void (*verbs_timer_cb)(void *);
62
63int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
64{
65 int ret = -ENODEV;
66
67 if (dd->ipath_layer.l_arg && layer_intr)
68 ret = layer_intr(dd->ipath_layer.l_arg, arg);
69
70 return ret;
71}
72
73int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
74{
75 int ret;
76
77 mutex_lock(&ipath_layer_mutex);
78
79 ret = __ipath_layer_intr(dd, arg);
80
81 mutex_unlock(&ipath_layer_mutex);
82
83 return ret;
84}
85
86int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
87 struct sk_buff *skb)
88{
89 int ret = -ENODEV;
90
91 if (dd->ipath_layer.l_arg && layer_rcv)
92 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
93
94 return ret;
95}
96
97int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
98{
99 int ret = -ENODEV;
100
101 if (dd->ipath_layer.l_arg && layer_rcv_lid)
102 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
103
104 return ret;
105}
106
107int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
108{
109 int ret = -ENODEV;
110
111 if (dd->verbs_layer.l_arg && verbs_piobufavail)
112 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
113
114 return ret;
115}
116
117int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
118 u32 tlen)
119{
120 int ret = -ENODEV;
121
122 if (dd->verbs_layer.l_arg && verbs_rcv) {
123 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
124 ret = 0;
125 }
126
127 return ret;
128}
129
130int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
131{
132 u32 lstate;
133 int ret;
134
135 switch (newstate) {
136 case IPATH_IB_LINKDOWN:
137 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
138 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
139 /* don't wait */
140 ret = 0;
141 goto bail;
142
143 case IPATH_IB_LINKDOWN_SLEEP:
144 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
145 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
146 /* don't wait */
147 ret = 0;
148 goto bail;
149
150 case IPATH_IB_LINKDOWN_DISABLE:
151 ipath_set_ib_lstate(dd,
152 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
153 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
154 /* don't wait */
155 ret = 0;
156 goto bail;
157
158 case IPATH_IB_LINKINIT:
159 if (dd->ipath_flags & IPATH_LINKINIT) {
160 ret = 0;
161 goto bail;
162 }
163 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
164 INFINIPATH_IBCC_LINKCMD_SHIFT);
165 lstate = IPATH_LINKINIT;
166 break;
167
168 case IPATH_IB_LINKARM:
169 if (dd->ipath_flags & IPATH_LINKARMED) {
170 ret = 0;
171 goto bail;
172 }
173 if (!(dd->ipath_flags &
174 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
175 ret = -EINVAL;
176 goto bail;
177 }
178 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
179 INFINIPATH_IBCC_LINKCMD_SHIFT);
180 /*
181 * Since the port can transition to ACTIVE by receiving
182 * a non VL 15 packet, wait for either state.
183 */
184 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
185 break;
186
187 case IPATH_IB_LINKACTIVE:
188 if (dd->ipath_flags & IPATH_LINKACTIVE) {
189 ret = 0;
190 goto bail;
191 }
192 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
193 ret = -EINVAL;
194 goto bail;
195 }
196 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
197 INFINIPATH_IBCC_LINKCMD_SHIFT);
198 lstate = IPATH_LINKACTIVE;
199 break;
200
201 default:
202 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
203 ret = -EINVAL;
204 goto bail;
205 }
206 ret = ipath_wait_linkstate(dd, lstate, 2000);
207
208bail:
209 return ret;
210}
211
212EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
213
214/**
215 * ipath_layer_set_mtu - set the MTU
216 * @dd: the infinipath device
217 * @arg: the new MTU
218 *
219 * we can handle "any" incoming size, the issue here is whether we
220 * need to restrict our outgoing size. For now, we don't do any
221 * sanity checking on this, and we don't deal with what happens to
222 * programs that are already running when the size changes.
223 * NOTE: changing the MTU will usually cause the IBC to go back to
224 * link initialize (IPATH_IBSTATE_INIT) state...
225 */
226int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
227{
228 u32 piosize;
229 int changed = 0;
230 int ret;
231
232 /*
233 * mtu is IB data payload max. It's the largest power of 2 less
234 * than piosize (or even larger, since it only really controls the
235 * largest we can receive; we can send the max of the mtu and
236 * piosize). We check that it's one of the valid IB sizes.
237 */
238 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
239 arg != 4096) {
240 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
241 ret = -EINVAL;
242 goto bail;
243 }
244 if (dd->ipath_ibmtu == arg) {
245 ret = 0; /* same as current */
246 goto bail;
247 }
248
249 piosize = dd->ipath_ibmaxlen;
250 dd->ipath_ibmtu = arg;
251
252 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
253 /* Only if it's not the initial value (or reset to it) */
254 if (piosize != dd->ipath_init_ibmaxlen) {
255 dd->ipath_ibmaxlen = piosize;
256 changed = 1;
257 }
258 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
259 piosize = arg + IPATH_PIO_MAXIBHDR;
260 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
261 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
262 arg);
263 dd->ipath_ibmaxlen = piosize;
264 changed = 1;
265 }
266
267 if (changed) {
268 /*
269 * set the IBC maxpktlength to the size of our pio
270 * buffers in words
271 */
272 u64 ibc = dd->ipath_ibcctrl;
273 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
274 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
275
276 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
277 dd->ipath_ibmaxlen = piosize;
278 piosize /= sizeof(u32); /* in words */
279 /*
280 * for ICRC, which we only send in diag test pkt mode, and
281 * we don't need to worry about that for mtu
282 */
283 piosize += 1;
284
285 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
286 dd->ipath_ibcctrl = ibc;
287 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
288 dd->ipath_ibcctrl);
289 dd->ipath_f_tidtemplate(dd);
290 }
291
292 ret = 0;
293
294bail:
295 return ret;
296}
297
298EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
299
300int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
301{
302 ipath_stats.sps_lid[dd->ipath_unit] = arg;
303 dd->ipath_lid = arg;
304 dd->ipath_lmc = lmc;
305
306 mutex_lock(&ipath_layer_mutex);
307
308 if (dd->ipath_layer.l_arg && layer_intr)
309 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
310
311 mutex_unlock(&ipath_layer_mutex);
312
313 return 0;
314}
315
316EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
317
318int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
319{
320 /* XXX - need to inform anyone who cares this just happened. */
321 dd->ipath_guid = guid;
322 return 0;
323}
324
325EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
326
327__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
328{
329 return dd->ipath_guid;
330}
331
332EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
333
334u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
335{
336 return dd->ipath_nguid;
337}
338
339EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
340
341int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
342 u32 * boardrev, u32 * majrev, u32 * minrev)
343{
344 *vendor = dd->ipath_vendorid;
345 *boardrev = dd->ipath_boardrev;
346 *majrev = dd->ipath_majrev;
347 *minrev = dd->ipath_minrev;
348
349 return 0;
350}
351
352EXPORT_SYMBOL_GPL(ipath_layer_query_device);
353
354u32 ipath_layer_get_flags(struct ipath_devdata *dd)
355{
356 return dd->ipath_flags;
357}
358
359EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
360
361struct device *ipath_layer_get_device(struct ipath_devdata *dd)
362{
363 return &dd->pcidev->dev;
364}
365
366EXPORT_SYMBOL_GPL(ipath_layer_get_device);
367
368u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
369{
370 return dd->ipath_deviceid;
371}
372
373EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
374
375u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
376{
377 return dd->ipath_lastibcstat;
378}
379
380EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
381
382u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
383{
384 return dd->ipath_ibmtu;
385}
386
387EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
388
389void ipath_layer_add(struct ipath_devdata *dd)
390{
391 mutex_lock(&ipath_layer_mutex);
392
393 if (layer_add_one)
394 dd->ipath_layer.l_arg =
395 layer_add_one(dd->ipath_unit, dd);
396
397 if (verbs_add_one)
398 dd->verbs_layer.l_arg =
399 verbs_add_one(dd->ipath_unit, dd);
400
401 mutex_unlock(&ipath_layer_mutex);
402}
403
404void ipath_layer_del(struct ipath_devdata *dd)
405{
406 mutex_lock(&ipath_layer_mutex);
407
408 if (dd->ipath_layer.l_arg && layer_remove_one) {
409 layer_remove_one(dd->ipath_layer.l_arg);
410 dd->ipath_layer.l_arg = NULL;
411 }
412
413 if (dd->verbs_layer.l_arg && verbs_remove_one) {
414 verbs_remove_one(dd->verbs_layer.l_arg);
415 dd->verbs_layer.l_arg = NULL;
416 }
417
418 mutex_unlock(&ipath_layer_mutex);
419}
420
421int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
422 void (*l_remove)(void *),
423 int (*l_intr)(void *, u32),
424 int (*l_rcv)(void *, void *, struct sk_buff *),
425 u16 l_rcv_opcode,
426 int (*l_rcv_lid)(void *, void *))
427{
428 struct ipath_devdata *dd, *tmp;
429 unsigned long flags;
430
431 mutex_lock(&ipath_layer_mutex);
432
433 layer_add_one = l_add;
434 layer_remove_one = l_remove;
435 layer_intr = l_intr;
436 layer_rcv = l_rcv;
437 layer_rcv_lid = l_rcv_lid;
438 ipath_layer_rcv_opcode = l_rcv_opcode;
439
440 spin_lock_irqsave(&ipath_devs_lock, flags);
441
442 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
443 if (!(dd->ipath_flags & IPATH_INITTED))
444 continue;
445
446 if (dd->ipath_layer.l_arg)
447 continue;
448
449 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
450 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
451
452 spin_unlock_irqrestore(&ipath_devs_lock, flags);
453 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
454 spin_lock_irqsave(&ipath_devs_lock, flags);
455 }
456
457 spin_unlock_irqrestore(&ipath_devs_lock, flags);
458 mutex_unlock(&ipath_layer_mutex);
459
460 return 0;
461}
462
463EXPORT_SYMBOL_GPL(ipath_layer_register);
464
465void ipath_layer_unregister(void)
466{
467 struct ipath_devdata *dd, *tmp;
468 unsigned long flags;
469
470 mutex_lock(&ipath_layer_mutex);
471 spin_lock_irqsave(&ipath_devs_lock, flags);
472
473 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
474 if (dd->ipath_layer.l_arg && layer_remove_one) {
475 spin_unlock_irqrestore(&ipath_devs_lock, flags);
476 layer_remove_one(dd->ipath_layer.l_arg);
477 spin_lock_irqsave(&ipath_devs_lock, flags);
478 dd->ipath_layer.l_arg = NULL;
479 }
480 }
481
482 spin_unlock_irqrestore(&ipath_devs_lock, flags);
483
484 layer_add_one = NULL;
485 layer_remove_one = NULL;
486 layer_intr = NULL;
487 layer_rcv = NULL;
488 layer_rcv_lid = NULL;
489
490 mutex_unlock(&ipath_layer_mutex);
491}
492
493EXPORT_SYMBOL_GPL(ipath_layer_unregister);
494
495static void __ipath_verbs_timer(unsigned long arg)
496{
497 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
498
499 /*
500 * If port 0 receive packet interrupts are not available, or
501 * can be missed, poll the receive queue
502 */
503 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
504 ipath_kreceive(dd);
505
506 /* Handle verbs layer timeouts. */
507 if (dd->verbs_layer.l_arg && verbs_timer_cb)
508 verbs_timer_cb(dd->verbs_layer.l_arg);
509
510 mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
511}
512
513/**
514 * ipath_verbs_register - verbs layer registration
515 * @l_piobufavail: callback for when PIO buffers become available
516 * @l_rcv: callback for receiving a packet
517 * @l_timer_cb: timer callback
518 * @ipath_devdata: device data structure is put here
519 */
520int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
521 void (*l_remove)(void *arg),
522 int (*l_piobufavail) (void *arg),
523 void (*l_rcv) (void *arg, void *rhdr,
524 void *data, u32 tlen),
525 void (*l_timer_cb) (void *arg))
526{
527 struct ipath_devdata *dd, *tmp;
528 unsigned long flags;
529
530 mutex_lock(&ipath_layer_mutex);
531
532 verbs_add_one = l_add;
533 verbs_remove_one = l_remove;
534 verbs_piobufavail = l_piobufavail;
535 verbs_rcv = l_rcv;
536 verbs_timer_cb = l_timer_cb;
537
538 spin_lock_irqsave(&ipath_devs_lock, flags);
539
540 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
541 if (!(dd->ipath_flags & IPATH_INITTED))
542 continue;
543
544 if (dd->verbs_layer.l_arg)
545 continue;
546
547 spin_unlock_irqrestore(&ipath_devs_lock, flags);
548 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
549 spin_lock_irqsave(&ipath_devs_lock, flags);
550 }
551
552 spin_unlock_irqrestore(&ipath_devs_lock, flags);
553 mutex_unlock(&ipath_layer_mutex);
554
555 ipath_verbs_registered = 1;
556
557 return 0;
558}
559
560EXPORT_SYMBOL_GPL(ipath_verbs_register);
561
562void ipath_verbs_unregister(void)
563{
564 struct ipath_devdata *dd, *tmp;
565 unsigned long flags;
566
567 mutex_lock(&ipath_layer_mutex);
568 spin_lock_irqsave(&ipath_devs_lock, flags);
569
570 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
571 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
572
573 if (dd->verbs_layer.l_arg && verbs_remove_one) {
574 spin_unlock_irqrestore(&ipath_devs_lock, flags);
575 verbs_remove_one(dd->verbs_layer.l_arg);
576 spin_lock_irqsave(&ipath_devs_lock, flags);
577 dd->verbs_layer.l_arg = NULL;
578 }
579 }
580
581 spin_unlock_irqrestore(&ipath_devs_lock, flags);
582
583 verbs_add_one = NULL;
584 verbs_remove_one = NULL;
585 verbs_piobufavail = NULL;
586 verbs_rcv = NULL;
587 verbs_timer_cb = NULL;
588
589 mutex_unlock(&ipath_layer_mutex);
590}
591
592EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
593
594int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
595{
596 int ret;
597 u32 intval = 0;
598
599 mutex_lock(&ipath_layer_mutex);
600
601 if (!dd->ipath_layer.l_arg) {
602 ret = -EINVAL;
603 goto bail;
604 }
605
606 ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
607
608 if (ret < 0)
609 goto bail;
610
611 *pktmax = dd->ipath_ibmaxlen;
612
613 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
614 intval |= IPATH_LAYER_INT_IF_UP;
615 if (ipath_stats.sps_lid[dd->ipath_unit])
616 intval |= IPATH_LAYER_INT_LID;
617 if (ipath_stats.sps_mlid[dd->ipath_unit])
618 intval |= IPATH_LAYER_INT_BCAST;
619 /*
620 * do this on open, in case low level is already up and
621 * just layered driver was reloaded, etc.
622 */
623 if (intval)
624 layer_intr(dd->ipath_layer.l_arg, intval);
625
626 ret = 0;
627bail:
628 mutex_unlock(&ipath_layer_mutex);
629
630 return ret;
631}
632
633EXPORT_SYMBOL_GPL(ipath_layer_open);
634
635u16 ipath_layer_get_lid(struct ipath_devdata *dd)
636{
637 return dd->ipath_lid;
638}
639
640EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
641
642/**
643 * ipath_layer_get_mac - get the MAC address
644 * @dd: the infinipath device
645 * @mac: the MAC is put here
646 *
647 * This is the EUID-64 OUI octets (top 3), then
648 * skip the next 2 (which should both be zero or 0xff).
649 * The returned MAC is in network order
650 * mac points to at least 6 bytes of buffer
651 * We assume that by the time the LID is set, that the GUID is as valid
652 * as it's ever going to be, rather than adding yet another status bit.
653 */
654
655int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
656{
657 u8 *guid;
658
659 guid = (u8 *) &dd->ipath_guid;
660
661 mac[0] = guid[0];
662 mac[1] = guid[1];
663 mac[2] = guid[2];
664 mac[3] = guid[5];
665 mac[4] = guid[6];
666 mac[5] = guid[7];
667 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
668 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
669 "%x %x\n", guid[3], guid[4]);
670 return 0;
671}
672
673EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
674
675u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
676{
677 return dd->ipath_mlid;
678}
679
680EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
681
682u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
683{
684 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
685}
686
687EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
688
689static void update_sge(struct ipath_sge_state *ss, u32 length)
690{
691 struct ipath_sge *sge = &ss->sge;
692
693 sge->vaddr += length;
694 sge->length -= length;
695 sge->sge_length -= length;
696 if (sge->sge_length == 0) {
697 if (--ss->num_sge)
698 *sge = *ss->sg_list++;
699 } else if (sge->length == 0 && sge->mr != NULL) {
700 if (++sge->n >= IPATH_SEGSZ) {
701 if (++sge->m >= sge->mr->mapsz)
702 return;
703 sge->n = 0;
704 }
705 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
706 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
707 }
708}
709
710#ifdef __LITTLE_ENDIAN
711static inline u32 get_upper_bits(u32 data, u32 shift)
712{
713 return data >> shift;
714}
715
716static inline u32 set_upper_bits(u32 data, u32 shift)
717{
718 return data << shift;
719}
720
721static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
722{
723 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
724 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
725 return data;
726}
727#else
728static inline u32 get_upper_bits(u32 data, u32 shift)
729{
730 return data << shift;
731}
732
733static inline u32 set_upper_bits(u32 data, u32 shift)
734{
735 return data >> shift;
736}
737
738static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
739{
740 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
741 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
742 return data;
743}
744#endif
745
746static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
747 u32 length)
748{
749 u32 extra = 0;
750 u32 data = 0;
751 u32 last;
752
753 while (1) {
754 u32 len = ss->sge.length;
755 u32 off;
756
757 BUG_ON(len == 0);
758 if (len > length)
759 len = length;
760 if (len > ss->sge.sge_length)
761 len = ss->sge.sge_length;
762 /* If the source address is not aligned, try to align it. */
763 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
764 if (off) {
765 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
766 ~(sizeof(u32) - 1));
767 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
768 u32 y;
769
770 y = sizeof(u32) - off;
771 if (len > y)
772 len = y;
773 if (len + extra >= sizeof(u32)) {
774 data |= set_upper_bits(v, extra *
775 BITS_PER_BYTE);
776 len = sizeof(u32) - extra;
777 if (len == length) {
778 last = data;
779 break;
780 }
781 __raw_writel(data, piobuf);
782 piobuf++;
783 extra = 0;
784 data = 0;
785 } else {
786 /* Clear unused upper bytes */
787 data |= clear_upper_bytes(v, len, extra);
788 if (len == length) {
789 last = data;
790 break;
791 }
792 extra += len;
793 }
794 } else if (extra) {
795 /* Source address is aligned. */
796 u32 *addr = (u32 *) ss->sge.vaddr;
797 int shift = extra * BITS_PER_BYTE;
798 int ushift = 32 - shift;
799 u32 l = len;
800
801 while (l >= sizeof(u32)) {
802 u32 v = *addr;
803
804 data |= set_upper_bits(v, shift);
805 __raw_writel(data, piobuf);
806 data = get_upper_bits(v, ushift);
807 piobuf++;
808 addr++;
809 l -= sizeof(u32);
810 }
811 /*
812 * We still have 'extra' number of bytes leftover.
813 */
814 if (l) {
815 u32 v = *addr;
816
817 if (l + extra >= sizeof(u32)) {
818 data |= set_upper_bits(v, shift);
819 len -= l + extra - sizeof(u32);
820 if (len == length) {
821 last = data;
822 break;
823 }
824 __raw_writel(data, piobuf);
825 piobuf++;
826 extra = 0;
827 data = 0;
828 } else {
829 /* Clear unused upper bytes */
830 data |= clear_upper_bytes(v, l,
831 extra);
832 if (len == length) {
833 last = data;
834 break;
835 }
836 extra += l;
837 }
838 } else if (len == length) {
839 last = data;
840 break;
841 }
842 } else if (len == length) {
843 u32 w;
844
845 /*
846 * Need to round up for the last dword in the
847 * packet.
848 */
849 w = (len + 3) >> 2;
850 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
851 piobuf += w - 1;
852 last = ((u32 *) ss->sge.vaddr)[w - 1];
853 break;
854 } else {
855 u32 w = len >> 2;
856
857 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
858 piobuf += w;
859
860 extra = len & (sizeof(u32) - 1);
861 if (extra) {
862 u32 v = ((u32 *) ss->sge.vaddr)[w];
863
864 /* Clear unused upper bytes */
865 data = clear_upper_bytes(v, extra, 0);
866 }
867 }
868 update_sge(ss, len);
869 length -= len;
870 }
871 /* must flush early everything before trigger word */
872 ipath_flush_wc();
873 __raw_writel(last, piobuf);
874 /* be sure trigger word is written */
875 ipath_flush_wc();
876 update_sge(ss, length);
877}
878
879/**
880 * ipath_verbs_send - send a packet from the verbs layer
881 * @dd: the infinipath device
882 * @hdrwords: the number of works in the header
883 * @hdr: the packet header
884 * @len: the length of the packet in bytes
885 * @ss: the SGE to send
886 *
887 * This is like ipath_sma_send_pkt() in that we need to be able to send
888 * packets after the chip is initialized (MADs) but also like
889 * ipath_layer_send_hdr() since its used by the verbs layer.
890 */
891int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
892 u32 *hdr, u32 len, struct ipath_sge_state *ss)
893{
894 u32 __iomem *piobuf;
895 u32 plen;
896 int ret;
897
898 /* +1 is for the qword padding of pbc */
899 plen = hdrwords + ((len + 3) >> 2) + 1;
900 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
901 ipath_dbg("packet len 0x%x too long, failing\n", plen);
902 ret = -EINVAL;
903 goto bail;
904 }
905
906 /* Get a PIO buffer to use. */
907 piobuf = ipath_getpiobuf(dd, NULL);
908 if (unlikely(piobuf == NULL)) {
909 ret = -EBUSY;
910 goto bail;
911 }
912
913 /*
914 * Write len to control qword, no flags.
915 * We have to flush after the PBC for correctness on some cpus
916 * or WC buffer can be written out of order.
917 */
918 writeq(plen, piobuf);
919 ipath_flush_wc();
920 piobuf += 2;
921 if (len == 0) {
922 /*
923 * If there is just the header portion, must flush before
924 * writing last word of header for correctness, and after
925 * the last header word (trigger word).
926 */
927 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
928 ipath_flush_wc();
929 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
930 ipath_flush_wc();
931 ret = 0;
932 goto bail;
933 }
934
935 __iowrite32_copy(piobuf, hdr, hdrwords);
936 piobuf += hdrwords;
937
938 /* The common case is aligned and contained in one segment. */
939 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
940 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
941 u32 w;
942
943 /* Need to round up for the last dword in the packet. */
944 w = (len + 3) >> 2;
945 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
946 /* must flush early everything before trigger word */
947 ipath_flush_wc();
948 __raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
949 piobuf + w - 1);
950 /* be sure trigger word is written */
951 ipath_flush_wc();
952 update_sge(ss, len);
953 ret = 0;
954 goto bail;
955 }
956 copy_io(piobuf, ss, len);
957 ret = 0;
958
959bail:
960 return ret;
961}
962
963EXPORT_SYMBOL_GPL(ipath_verbs_send);
964
965int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
966 u64 *rwords, u64 *spkts, u64 *rpkts,
967 u64 *xmit_wait)
968{
969 int ret;
970
971 if (!(dd->ipath_flags & IPATH_INITTED)) {
972 /* no hardware, freeze, etc. */
973 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
974 ret = -EINVAL;
975 goto bail;
976 }
977 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
978 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
979 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
980 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
981 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
982
983 ret = 0;
984
985bail:
986 return ret;
987}
988
989EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
990
991/**
992 * ipath_layer_get_counters - get various chip counters
993 * @dd: the infinipath device
994 * @cntrs: counters are placed here
995 *
996 * Return the counters needed by recv_pma_get_portcounters().
997 */
998int ipath_layer_get_counters(struct ipath_devdata *dd,
999 struct ipath_layer_counters *cntrs)
1000{
1001 int ret;
1002
1003 if (!(dd->ipath_flags & IPATH_INITTED)) {
1004 /* no hardware, freeze, etc. */
1005 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1006 ret = -EINVAL;
1007 goto bail;
1008 }
1009 cntrs->symbol_error_counter =
1010 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1011 cntrs->link_error_recovery_counter =
1012 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
1013 cntrs->link_downed_counter =
1014 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1015 cntrs->port_rcv_errors =
1016 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1017 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1018 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
1019 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
1020 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1021 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1022 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1023 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1024 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
1025 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
1026 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1027 cntrs->port_rcv_remphys_errors =
1028 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1029 cntrs->port_xmit_discards =
1030 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1031 cntrs->port_xmit_data =
1032 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1033 cntrs->port_rcv_data =
1034 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1035 cntrs->port_xmit_packets =
1036 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1037 cntrs->port_rcv_packets =
1038 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1039
1040 ret = 0;
1041
1042bail:
1043 return ret;
1044}
1045
1046EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1047
1048int ipath_layer_want_buffer(struct ipath_devdata *dd)
1049{
1050 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1051 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1052 dd->ipath_sendctrl);
1053
1054 return 0;
1055}
1056
1057EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1058
1059int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1060{
1061 int ret = 0;
1062 u32 __iomem *piobuf;
1063 u32 plen, *uhdr;
1064 size_t count;
1065 __be16 vlsllnh;
1066
1067 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
1068 ipath_dbg("send while not open\n");
1069 ret = -EINVAL;
1070 } else
1071 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
1072 dd->ipath_lid == 0) {
1073 /*
1074 * lid check is for when sma hasn't yet configured
1075 */
1076 ret = -ENETDOWN;
1077 ipath_cdbg(VERBOSE, "send while not ready, "
1078 "mylid=%u, flags=0x%x\n",
1079 dd->ipath_lid, dd->ipath_flags);
1080 }
1081
1082 vlsllnh = *((__be16 *) hdr);
1083 if (vlsllnh != htons(IPS_LRH_BTH)) {
1084 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1085 "not sending\n", be16_to_cpu(vlsllnh),
1086 IPS_LRH_BTH);
1087 ret = -EINVAL;
1088 }
1089 if (ret)
1090 goto done;
1091
1092 /* Get a PIO buffer to use. */
1093 piobuf = ipath_getpiobuf(dd, NULL);
1094 if (piobuf == NULL) {
1095 ret = -EBUSY;
1096 goto done;
1097 }
1098
1099 plen = (sizeof(*hdr) >> 2); /* actual length */
1100 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
1101
1102 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
1103 ipath_flush_wc();
1104 piobuf += 2;
1105 uhdr = (u32 *)hdr;
1106 count = plen-1; /* amount we can copy before trigger word */
1107 __iowrite32_copy(piobuf, uhdr, count);
1108 ipath_flush_wc();
1109 __raw_writel(uhdr[count], piobuf + count);
1110 ipath_flush_wc(); /* ensure it's sent, now */
1111
1112 ipath_stats.sps_ether_spkts++; /* ether packet sent */
1113
1114done:
1115 return ret;
1116}
1117
1118EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
1119
1120int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1121{
1122 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1123
1124 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1125 dd->ipath_sendctrl);
1126 return 0;
1127}
1128
1129EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1130
1131int ipath_layer_enable_timer(struct ipath_devdata *dd)
1132{
1133 /*
1134 * HT-400 has a design flaw where the chip and kernel idea
1135 * of the tail register don't always agree, and therefore we won't
1136 * get an interrupt on the next packet received.
1137 * If the board supports per packet receive interrupts, use it.
1138 * Otherwise, the timer function periodically checks for packets
1139 * to cover this case.
1140 * Either way, the timer is needed for verbs layer related
1141 * processing.
1142 */
1143 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1144 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1145 0x2074076542310ULL);
1146 /* Enable GPIO bit 2 interrupt */
1147 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1148 (u64) (1 << 2));
1149 }
1150
1151 init_timer(&dd->verbs_layer.l_timer);
1152 dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1153 dd->verbs_layer.l_timer.data = (unsigned long)dd;
1154 dd->verbs_layer.l_timer.expires = jiffies + 1;
1155 add_timer(&dd->verbs_layer.l_timer);
1156
1157 return 0;
1158}
1159
1160EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1161
1162int ipath_layer_disable_timer(struct ipath_devdata *dd)
1163{
1164 /* Disable GPIO bit 2 interrupt */
1165 if (dd->ipath_flags & IPATH_GPIO_INTR)
1166 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1167
1168 del_timer_sync(&dd->verbs_layer.l_timer);
1169
1170 return 0;
1171}
1172
1173EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1174
1175/**
1176 * ipath_layer_set_verbs_flags - set the verbs layer flags
1177 * @dd: the infinipath device
1178 * @flags: the flags to set
1179 */
1180int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1181{
1182 struct ipath_devdata *ss;
1183 unsigned long lflags;
1184
1185 spin_lock_irqsave(&ipath_devs_lock, lflags);
1186
1187 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1188 if (!(ss->ipath_flags & IPATH_INITTED))
1189 continue;
1190 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1191 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1192 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1193 else
1194 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1195 }
1196
1197 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1198
1199 return 0;
1200}
1201
1202EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1203
1204/**
1205 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1206 * @dd: the infinipath device
1207 */
1208unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1209{
1210 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1211}
1212
1213EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1214
1215/**
1216 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1217 * @dd: the infinipath device
1218 * @index: the PKEY index
1219 */
1220unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1221{
1222 unsigned ret;
1223
1224 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1225 ret = 0;
1226 else
1227 ret = dd->ipath_pd[0]->port_pkeys[index];
1228
1229 return ret;
1230}
1231
1232EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1233
1234/**
1235 * ipath_layer_get_pkeys - return the PKEY table for port 0
1236 * @dd: the infinipath device
1237 * @pkeys: the pkey table is placed here
1238 */
1239int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1240{
1241 struct ipath_portdata *pd = dd->ipath_pd[0];
1242
1243 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1244
1245 return 0;
1246}
1247
1248EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1249
1250/**
1251 * rm_pkey - decrecment the reference count for the given PKEY
1252 * @dd: the infinipath device
1253 * @key: the PKEY index
1254 *
1255 * Return true if this was the last reference and the hardware table entry
1256 * needs to be changed.
1257 */
1258static int rm_pkey(struct ipath_devdata *dd, u16 key)
1259{
1260 int i;
1261 int ret;
1262
1263 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1264 if (dd->ipath_pkeys[i] != key)
1265 continue;
1266 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1267 dd->ipath_pkeys[i] = 0;
1268 ret = 1;
1269 goto bail;
1270 }
1271 break;
1272 }
1273
1274 ret = 0;
1275
1276bail:
1277 return ret;
1278}
1279
1280/**
1281 * add_pkey - add the given PKEY to the hardware table
1282 * @dd: the infinipath device
1283 * @key: the PKEY
1284 *
1285 * Return an error code if unable to add the entry, zero if no change,
1286 * or 1 if the hardware PKEY register needs to be updated.
1287 */
1288static int add_pkey(struct ipath_devdata *dd, u16 key)
1289{
1290 int i;
1291 u16 lkey = key & 0x7FFF;
1292 int any = 0;
1293 int ret;
1294
1295 if (lkey == 0x7FFF) {
1296 ret = 0;
1297 goto bail;
1298 }
1299
1300 /* Look for an empty slot or a matching PKEY. */
1301 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1302 if (!dd->ipath_pkeys[i]) {
1303 any++;
1304 continue;
1305 }
1306 /* If it matches exactly, try to increment the ref count */
1307 if (dd->ipath_pkeys[i] == key) {
1308 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1309 ret = 0;
1310 goto bail;
1311 }
1312 /* Lost the race. Look for an empty slot below. */
1313 atomic_dec(&dd->ipath_pkeyrefs[i]);
1314 any++;
1315 }
1316 /*
1317 * It makes no sense to have both the limited and unlimited
1318 * PKEY set at the same time since the unlimited one will
1319 * disable the limited one.
1320 */
1321 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1322 ret = -EEXIST;
1323 goto bail;
1324 }
1325 }
1326 if (!any) {
1327 ret = -EBUSY;
1328 goto bail;
1329 }
1330 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1331 if (!dd->ipath_pkeys[i] &&
1332 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1333 /* for ipathstats, etc. */
1334 ipath_stats.sps_pkeys[i] = lkey;
1335 dd->ipath_pkeys[i] = key;
1336 ret = 1;
1337 goto bail;
1338 }
1339 }
1340 ret = -EBUSY;
1341
1342bail:
1343 return ret;
1344}
1345
1346/**
1347 * ipath_layer_set_pkeys - set the PKEY table for port 0
1348 * @dd: the infinipath device
1349 * @pkeys: the PKEY table
1350 */
1351int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1352{
1353 struct ipath_portdata *pd;
1354 int i;
1355 int changed = 0;
1356
1357 pd = dd->ipath_pd[0];
1358
1359 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1360 u16 key = pkeys[i];
1361 u16 okey = pd->port_pkeys[i];
1362
1363 if (key == okey)
1364 continue;
1365 /*
1366 * The value of this PKEY table entry is changing.
1367 * Remove the old entry in the hardware's array of PKEYs.
1368 */
1369 if (okey & 0x7FFF)
1370 changed |= rm_pkey(dd, okey);
1371 if (key & 0x7FFF) {
1372 int ret = add_pkey(dd, key);
1373
1374 if (ret < 0)
1375 key = 0;
1376 else
1377 changed |= ret;
1378 }
1379 pd->port_pkeys[i] = key;
1380 }
1381 if (changed) {
1382 u64 pkey;
1383
1384 pkey = (u64) dd->ipath_pkeys[0] |
1385 ((u64) dd->ipath_pkeys[1] << 16) |
1386 ((u64) dd->ipath_pkeys[2] << 32) |
1387 ((u64) dd->ipath_pkeys[3] << 48);
1388 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1389 (unsigned long long) pkey);
1390 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1391 pkey);
1392 }
1393 return 0;
1394}
1395
1396EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1397
1398/**
1399 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1400 * @dd: the infinipath device
1401 *
1402 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1403 */
1404int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1405{
1406 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1407}
1408
1409EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1410
1411/**
1412 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1413 * @dd: the infinipath device
1414 * @sleep: the new state
1415 *
1416 * Note that this will only take effect when the link state changes.
1417 */
1418int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1419 int sleep)
1420{
1421 if (sleep)
1422 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1423 else
1424 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1425 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1426 dd->ipath_ibcctrl);
1427 return 0;
1428}
1429
1430EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1431
1432int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1433{
1434 return (dd->ipath_ibcctrl >>
1435 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1436 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1437}
1438
1439EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1440
1441/**
1442 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1443 * @dd: the infinipath device
1444 * @n: the new threshold
1445 *
1446 * Note that this will only take effect when the link state changes.
1447 */
1448int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1449{
1450 unsigned v;
1451
1452 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1453 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1454 if (v != n) {
1455 dd->ipath_ibcctrl &=
1456 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1457 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1458 dd->ipath_ibcctrl |=
1459 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1460 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1461 dd->ipath_ibcctrl);
1462 }
1463 return 0;
1464}
1465
1466EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1467
1468int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1469{
1470 return (dd->ipath_ibcctrl >>
1471 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1472 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1473}
1474
1475EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1476
1477/**
1478 * ipath_layer_set_overrunthreshold - set the overrun threshold
1479 * @dd: the infinipath device
1480 * @n: the new threshold
1481 *
1482 * Note that this will only take effect when the link state changes.
1483 */
1484int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1485{
1486 unsigned v;
1487
1488 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1489 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1490 if (v != n) {
1491 dd->ipath_ibcctrl &=
1492 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1493 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1494 dd->ipath_ibcctrl |=
1495 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1496 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1497 dd->ipath_ibcctrl);
1498 }
1499 return 0;
1500}
1501
1502EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1503
1504int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1505 size_t namelen)
1506{
1507 return dd->ipath_f_get_boardname(dd, name, namelen);
1508}
1509EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1510
1511u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1512{
1513 return dd->ipath_rcvhdrentsize;
1514}
1515EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
new file mode 100644
index 000000000000..6fefd15bd2da
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_layer.h
@@ -0,0 +1,181 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _IPATH_LAYER_H
34#define _IPATH_LAYER_H
35
36/*
37 * This header file is for symbols shared between the infinipath driver
38 * and drivers layered upon it (such as ipath).
39 */
40
41struct sk_buff;
42struct ipath_sge_state;
43struct ipath_devdata;
44struct ether_header;
45
46struct ipath_layer_counters {
47 u64 symbol_error_counter;
48 u64 link_error_recovery_counter;
49 u64 link_downed_counter;
50 u64 port_rcv_errors;
51 u64 port_rcv_remphys_errors;
52 u64 port_xmit_discards;
53 u64 port_xmit_data;
54 u64 port_rcv_data;
55 u64 port_xmit_packets;
56 u64 port_rcv_packets;
57};
58
59/*
60 * A segment is a linear region of low physical memory.
61 * XXX Maybe we should use phys addr here and kmap()/kunmap().
62 * Used by the verbs layer.
63 */
64struct ipath_seg {
65 void *vaddr;
66 size_t length;
67};
68
69/* The number of ipath_segs that fit in a page. */
70#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
71
72struct ipath_segarray {
73 struct ipath_seg segs[IPATH_SEGSZ];
74};
75
76struct ipath_mregion {
77 u64 user_base; /* User's address for this region */
78 u64 iova; /* IB start address of this region */
79 size_t length;
80 u32 lkey;
81 u32 offset; /* offset (bytes) to start of region */
82 int access_flags;
83 u32 max_segs; /* number of ipath_segs in all the arrays */
84 u32 mapsz; /* size of the map array */
85 struct ipath_segarray *map[0]; /* the segments */
86};
87
88/*
89 * These keep track of the copy progress within a memory region.
90 * Used by the verbs layer.
91 */
92struct ipath_sge {
93 struct ipath_mregion *mr;
94 void *vaddr; /* current pointer into the segment */
95 u32 sge_length; /* length of the SGE */
96 u32 length; /* remaining length of the segment */
97 u16 m; /* current index: mr->map[m] */
98 u16 n; /* current index: mr->map[m]->segs[n] */
99};
100
101struct ipath_sge_state {
102 struct ipath_sge *sg_list; /* next SGE to be used if any */
103 struct ipath_sge sge; /* progress state for the current SGE */
104 u8 num_sge;
105};
106
107int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
108 void (*l_remove)(void *),
109 int (*l_intr)(void *, u32),
110 int (*l_rcv)(void *, void *,
111 struct sk_buff *),
112 u16 rcv_opcode,
113 int (*l_rcv_lid)(void *, void *));
114int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
115 void (*l_remove)(void *arg),
116 int (*l_piobufavail)(void *arg),
117 void (*l_rcv)(void *arg, void *rhdr,
118 void *data, u32 tlen),
119 void (*l_timer_cb)(void *arg));
120void ipath_layer_unregister(void);
121void ipath_verbs_unregister(void);
122int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
123u16 ipath_layer_get_lid(struct ipath_devdata *dd);
124int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
125u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
126u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
127int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
128int ipath_layer_set_mtu(struct ipath_devdata *, u16);
129int ipath_set_sps_lid(struct ipath_devdata *, u32, u8);
130int ipath_layer_send_hdr(struct ipath_devdata *dd,
131 struct ether_header *hdr);
132int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
133 u32 * hdr, u32 len, struct ipath_sge_state *ss);
134int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
135int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
136 size_t namelen);
137int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
138 u64 *rwords, u64 *spkts, u64 *rpkts,
139 u64 *xmit_wait);
140int ipath_layer_get_counters(struct ipath_devdata *dd,
141 struct ipath_layer_counters *cntrs);
142int ipath_layer_want_buffer(struct ipath_devdata *dd);
143int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
144__be64 ipath_layer_get_guid(struct ipath_devdata *);
145u32 ipath_layer_get_nguid(struct ipath_devdata *);
146int ipath_layer_query_device(struct ipath_devdata *, u32 * vendor,
147 u32 * boardrev, u32 * majrev, u32 * minrev);
148u32 ipath_layer_get_flags(struct ipath_devdata *dd);
149struct device *ipath_layer_get_device(struct ipath_devdata *dd);
150u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
151u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
152u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
153int ipath_layer_enable_timer(struct ipath_devdata *dd);
154int ipath_layer_disable_timer(struct ipath_devdata *dd);
155int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags);
156unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd);
157unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index);
158int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys);
159int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys);
160int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd);
161int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
162 int sleep);
163int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd);
164int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n);
165int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd);
166int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n);
167u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
168
169/* ipath_ether interrupt values */
170#define IPATH_LAYER_INT_IF_UP 0x2
171#define IPATH_LAYER_INT_IF_DOWN 0x4
172#define IPATH_LAYER_INT_LID 0x8
173#define IPATH_LAYER_INT_SEND_CONTINUE 0x10
174#define IPATH_LAYER_INT_BCAST 0x40
175
176/* _verbs_layer.l_flags */
177#define IPATH_VERBS_KERNEL_SMA 0x1
178
179extern unsigned ipath_debug; /* debugging bit mask */
180
181#endif /* _IPATH_LAYER_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
new file mode 100644
index 000000000000..f7f8391fe43f
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -0,0 +1,1352 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_smi.h>
34
35#include "ipath_kernel.h"
36#include "ipath_verbs.h"
37#include "ips_common.h"
38
39#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004)
40#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008)
41#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C)
42#define IB_SMP_INVALID_FIELD __constant_htons(0x001C)
43
44static int reply(struct ib_smp *smp)
45{
46 /*
47 * The verbs framework will handle the directed/LID route
48 * packet changes.
49 */
50 smp->method = IB_MGMT_METHOD_GET_RESP;
51 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
52 smp->status |= IB_SMP_DIRECTION;
53 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
54}
55
56static int recv_subn_get_nodedescription(struct ib_smp *smp,
57 struct ib_device *ibdev)
58{
59 if (smp->attr_mod)
60 smp->status |= IB_SMP_INVALID_FIELD;
61
62 strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
63
64 return reply(smp);
65}
66
67struct nodeinfo {
68 u8 base_version;
69 u8 class_version;
70 u8 node_type;
71 u8 num_ports;
72 __be64 sys_guid;
73 __be64 node_guid;
74 __be64 port_guid;
75 __be16 partition_cap;
76 __be16 device_id;
77 __be32 revision;
78 u8 local_port_num;
79 u8 vendor_id[3];
80} __attribute__ ((packed));
81
82static int recv_subn_get_nodeinfo(struct ib_smp *smp,
83 struct ib_device *ibdev, u8 port)
84{
85 struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
86 struct ipath_devdata *dd = to_idev(ibdev)->dd;
87 u32 vendor, boardid, majrev, minrev;
88
89 if (smp->attr_mod)
90 smp->status |= IB_SMP_INVALID_FIELD;
91
92 nip->base_version = 1;
93 nip->class_version = 1;
94 nip->node_type = 1; /* channel adapter */
95 /*
96 * XXX The num_ports value will need a layer function to get
97 * the value if we ever have more than one IB port on a chip.
98 * We will also need to get the GUID for the port.
99 */
100 nip->num_ports = ibdev->phys_port_cnt;
101 /* This is already in network order */
102 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
103 nip->node_guid = ipath_layer_get_guid(dd);
104 nip->port_guid = nip->sys_guid;
105 nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
106 nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
107 ipath_layer_query_device(dd, &vendor, &boardid, &majrev, &minrev);
108 nip->revision = cpu_to_be32((majrev << 16) | minrev);
109 nip->local_port_num = port;
110 nip->vendor_id[0] = 0;
111 nip->vendor_id[1] = vendor >> 8;
112 nip->vendor_id[2] = vendor;
113
114 return reply(smp);
115}
116
117static int recv_subn_get_guidinfo(struct ib_smp *smp,
118 struct ib_device *ibdev)
119{
120 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
121 __be64 *p = (__be64 *) smp->data;
122
123 /* 32 blocks of 8 64-bit GUIDs per block */
124
125 memset(smp->data, 0, sizeof(smp->data));
126
127 /*
128 * We only support one GUID for now. If this changes, the
129 * portinfo.guid_cap field needs to be updated too.
130 */
131 if (startgx == 0)
132 /* The first is a copy of the read-only HW GUID. */
133 *p = ipath_layer_get_guid(to_idev(ibdev)->dd);
134 else
135 smp->status |= IB_SMP_INVALID_FIELD;
136
137 return reply(smp);
138}
139
140struct port_info {
141 __be64 mkey;
142 __be64 gid_prefix;
143 __be16 lid;
144 __be16 sm_lid;
145 __be32 cap_mask;
146 __be16 diag_code;
147 __be16 mkey_lease_period;
148 u8 local_port_num;
149 u8 link_width_enabled;
150 u8 link_width_supported;
151 u8 link_width_active;
152 u8 linkspeed_portstate; /* 4 bits, 4 bits */
153 u8 portphysstate_linkdown; /* 4 bits, 4 bits */
154 u8 mkeyprot_resv_lmc; /* 2 bits, 3, 3 */
155 u8 linkspeedactive_enabled; /* 4 bits, 4 bits */
156 u8 neighbormtu_mastersmsl; /* 4 bits, 4 bits */
157 u8 vlcap_inittype; /* 4 bits, 4 bits */
158 u8 vl_high_limit;
159 u8 vl_arb_high_cap;
160 u8 vl_arb_low_cap;
161 u8 inittypereply_mtucap; /* 4 bits, 4 bits */
162 u8 vlstallcnt_hoqlife; /* 3 bits, 5 bits */
163 u8 operationalvl_pei_peo_fpi_fpo; /* 4 bits, 1, 1, 1, 1 */
164 __be16 mkey_violations;
165 __be16 pkey_violations;
166 __be16 qkey_violations;
167 u8 guid_cap;
168 u8 clientrereg_resv_subnetto; /* 1 bit, 2 bits, 5 */
169 u8 resv_resptimevalue; /* 3 bits, 5 bits */
170 u8 localphyerrors_overrunerrors; /* 4 bits, 4 bits */
171 __be16 max_credit_hint;
172 u8 resv;
173 u8 link_roundtrip_latency[3];
174} __attribute__ ((packed));
175
176static int recv_subn_get_portinfo(struct ib_smp *smp,
177 struct ib_device *ibdev, u8 port)
178{
179 struct ipath_ibdev *dev;
180 struct port_info *pip = (struct port_info *)smp->data;
181 u16 lid;
182 u8 ibcstat;
183 u8 mtu;
184 int ret;
185
186 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
187 smp->status |= IB_SMP_INVALID_FIELD;
188 ret = reply(smp);
189 goto bail;
190 }
191
192 dev = to_idev(ibdev);
193
194 /* Clear all fields. Only set the non-zero fields. */
195 memset(smp->data, 0, sizeof(smp->data));
196
197 /* Only return the mkey if the protection field allows it. */
198 if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
199 (dev->mkeyprot_resv_lmc >> 6) == 0)
200 pip->mkey = dev->mkey;
201 pip->gid_prefix = dev->gid_prefix;
202 lid = ipath_layer_get_lid(dev->dd);
203 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
204 pip->sm_lid = cpu_to_be16(dev->sm_lid);
205 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
206 /* pip->diag_code; */
207 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
208 pip->local_port_num = port;
209 pip->link_width_enabled = dev->link_width_enabled;
210 pip->link_width_supported = 3; /* 1x or 4x */
211 pip->link_width_active = 2; /* 4x */
212 pip->linkspeed_portstate = 0x10; /* 2.5Gbps */
213 ibcstat = ipath_layer_get_lastibcstat(dev->dd);
214 pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
215 pip->portphysstate_linkdown =
216 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
217 (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2);
218 pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc;
219 pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */
220 switch (ipath_layer_get_ibmtu(dev->dd)) {
221 case 4096:
222 mtu = IB_MTU_4096;
223 break;
224 case 2048:
225 mtu = IB_MTU_2048;
226 break;
227 case 1024:
228 mtu = IB_MTU_1024;
229 break;
230 case 512:
231 mtu = IB_MTU_512;
232 break;
233 case 256:
234 mtu = IB_MTU_256;
235 break;
236 default: /* oops, something is wrong */
237 mtu = IB_MTU_2048;
238 break;
239 }
240 pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
241 pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */
242 pip->vl_high_limit = dev->vl_high_limit;
243 /* pip->vl_arb_high_cap; // only one VL */
244 /* pip->vl_arb_low_cap; // only one VL */
245 /* InitTypeReply = 0 */
246 pip->inittypereply_mtucap = IB_MTU_4096;
247 // HCAs ignore VLStallCount and HOQLife
248 /* pip->vlstallcnt_hoqlife; */
249 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
250 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
251 /* P_KeyViolations are counted by hardware. */
252 pip->pkey_violations =
253 cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
254 dev->n_pkey_violations) & 0xFFFF);
255 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
256 /* Only the hardware GUID is supported for now */
257 pip->guid_cap = 1;
258 pip->clientrereg_resv_subnetto = dev->subnet_timeout;
259 /* 32.768 usec. response time (guessing) */
260 pip->resv_resptimevalue = 3;
261 pip->localphyerrors_overrunerrors =
262 (ipath_layer_get_phyerrthreshold(dev->dd) << 4) |
263 ipath_layer_get_overrunthreshold(dev->dd);
264 /* pip->max_credit_hint; */
265 /* pip->link_roundtrip_latency[3]; */
266
267 ret = reply(smp);
268
269bail:
270 return ret;
271}
272
273static int recv_subn_get_pkeytable(struct ib_smp *smp,
274 struct ib_device *ibdev)
275{
276 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
277 u16 *p = (u16 *) smp->data;
278 __be16 *q = (__be16 *) smp->data;
279
280 /* 64 blocks of 32 16-bit P_Key entries */
281
282 memset(smp->data, 0, sizeof(smp->data));
283 if (startpx == 0) {
284 struct ipath_ibdev *dev = to_idev(ibdev);
285 unsigned i, n = ipath_layer_get_npkeys(dev->dd);
286
287 ipath_layer_get_pkeys(dev->dd, p);
288
289 for (i = 0; i < n; i++)
290 q[i] = cpu_to_be16(p[i]);
291 } else
292 smp->status |= IB_SMP_INVALID_FIELD;
293
294 return reply(smp);
295}
296
297static int recv_subn_set_guidinfo(struct ib_smp *smp,
298 struct ib_device *ibdev)
299{
300 /* The only GUID we support is the first read-only entry. */
301 return recv_subn_get_guidinfo(smp, ibdev);
302}
303
304/**
305 * recv_subn_set_portinfo - set port information
306 * @smp: the incoming SM packet
307 * @ibdev: the infiniband device
308 * @port: the port on the device
309 *
310 * Set Portinfo (see ch. 14.2.5.6).
311 */
312static int recv_subn_set_portinfo(struct ib_smp *smp,
313 struct ib_device *ibdev, u8 port)
314{
315 struct port_info *pip = (struct port_info *)smp->data;
316 struct ib_event event;
317 struct ipath_ibdev *dev;
318 u32 flags;
319 char clientrereg = 0;
320 u16 lid, smlid;
321 u8 lwe;
322 u8 lse;
323 u8 state;
324 u16 lstate;
325 u32 mtu;
326 int ret;
327
328 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
329 goto err;
330
331 dev = to_idev(ibdev);
332 event.device = ibdev;
333 event.element.port_num = port;
334
335 dev->mkey = pip->mkey;
336 dev->gid_prefix = pip->gid_prefix;
337 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
338
339 lid = be16_to_cpu(pip->lid);
340 if (lid != ipath_layer_get_lid(dev->dd)) {
341 /* Must be a valid unicast LID address. */
342 if (lid == 0 || lid >= IPS_MULTICAST_LID_BASE)
343 goto err;
344 ipath_set_sps_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
345 event.event = IB_EVENT_LID_CHANGE;
346 ib_dispatch_event(&event);
347 }
348
349 smlid = be16_to_cpu(pip->sm_lid);
350 if (smlid != dev->sm_lid) {
351 /* Must be a valid unicast LID address. */
352 if (smlid == 0 || smlid >= IPS_MULTICAST_LID_BASE)
353 goto err;
354 dev->sm_lid = smlid;
355 event.event = IB_EVENT_SM_CHANGE;
356 ib_dispatch_event(&event);
357 }
358
359 /* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */
360 lwe = pip->link_width_enabled;
361 if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE))
362 goto err;
363 if (lwe == 0xFF)
364 dev->link_width_enabled = 3; /* 1x or 4x */
365 else if (lwe)
366 dev->link_width_enabled = lwe;
367
368 /* Only 2.5 Gbs supported. */
369 lse = pip->linkspeedactive_enabled & 0xF;
370 if (lse >= 2 && lse <= 0xE)
371 goto err;
372
373 /* Set link down default state. */
374 switch (pip->portphysstate_linkdown & 0xF) {
375 case 0: /* NOP */
376 break;
377 case 1: /* SLEEP */
378 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1))
379 goto err;
380 break;
381 case 2: /* POLL */
382 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0))
383 goto err;
384 break;
385 default:
386 goto err;
387 }
388
389 dev->mkeyprot_resv_lmc = pip->mkeyprot_resv_lmc;
390 dev->vl_high_limit = pip->vl_high_limit;
391
392 switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
393 case IB_MTU_256:
394 mtu = 256;
395 break;
396 case IB_MTU_512:
397 mtu = 512;
398 break;
399 case IB_MTU_1024:
400 mtu = 1024;
401 break;
402 case IB_MTU_2048:
403 mtu = 2048;
404 break;
405 case IB_MTU_4096:
406 mtu = 4096;
407 break;
408 default:
409 /* XXX We have already partially updated our state! */
410 goto err;
411 }
412 ipath_layer_set_mtu(dev->dd, mtu);
413
414 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
415
416 /* We only support VL0 */
417 if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
418 goto err;
419
420 if (pip->mkey_violations == 0)
421 dev->mkey_violations = 0;
422
423 /*
424 * Hardware counter can't be reset so snapshot and subtract
425 * later.
426 */
427 if (pip->pkey_violations == 0)
428 dev->n_pkey_violations =
429 ipath_layer_get_cr_errpkey(dev->dd);
430
431 if (pip->qkey_violations == 0)
432 dev->qkey_violations = 0;
433
434 if (ipath_layer_set_phyerrthreshold(
435 dev->dd,
436 (pip->localphyerrors_overrunerrors >> 4) & 0xF))
437 goto err;
438
439 if (ipath_layer_set_overrunthreshold(
440 dev->dd,
441 (pip->localphyerrors_overrunerrors & 0xF)))
442 goto err;
443
444 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
445
446 if (pip->clientrereg_resv_subnetto & 0x80) {
447 clientrereg = 1;
448 event.event = IB_EVENT_LID_CHANGE;
449 ib_dispatch_event(&event);
450 }
451
452 /*
453 * Do the port state change now that the other link parameters
454 * have been set.
455 * Changing the port physical state only makes sense if the link
456 * is down or is being set to down.
457 */
458 state = pip->linkspeed_portstate & 0xF;
459 flags = ipath_layer_get_flags(dev->dd);
460 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
461 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
462 goto err;
463
464 /*
465 * Only state changes of DOWN, ARM, and ACTIVE are valid
466 * and must be in the correct state to take effect (see 7.2.6).
467 */
468 switch (state) {
469 case IB_PORT_NOP:
470 if (lstate == 0)
471 break;
472 /* FALLTHROUGH */
473 case IB_PORT_DOWN:
474 if (lstate == 0)
475 if (ipath_layer_get_linkdowndefaultstate(dev->dd))
476 lstate = IPATH_IB_LINKDOWN_SLEEP;
477 else
478 lstate = IPATH_IB_LINKDOWN;
479 else if (lstate == 1)
480 lstate = IPATH_IB_LINKDOWN_SLEEP;
481 else if (lstate == 2)
482 lstate = IPATH_IB_LINKDOWN;
483 else if (lstate == 3)
484 lstate = IPATH_IB_LINKDOWN_DISABLE;
485 else
486 goto err;
487 ipath_layer_set_linkstate(dev->dd, lstate);
488 if (flags & IPATH_LINKACTIVE) {
489 event.event = IB_EVENT_PORT_ERR;
490 ib_dispatch_event(&event);
491 }
492 break;
493 case IB_PORT_ARMED:
494 if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE)))
495 break;
496 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM);
497 if (flags & IPATH_LINKACTIVE) {
498 event.event = IB_EVENT_PORT_ERR;
499 ib_dispatch_event(&event);
500 }
501 break;
502 case IB_PORT_ACTIVE:
503 if (!(flags & IPATH_LINKARMED))
504 break;
505 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
506 event.event = IB_EVENT_PORT_ACTIVE;
507 ib_dispatch_event(&event);
508 break;
509 default:
510 /* XXX We have already partially updated our state! */
511 goto err;
512 }
513
514 ret = recv_subn_get_portinfo(smp, ibdev, port);
515
516 if (clientrereg)
517 pip->clientrereg_resv_subnetto |= 0x80;
518
519 goto done;
520
521err:
522 smp->status |= IB_SMP_INVALID_FIELD;
523 ret = recv_subn_get_portinfo(smp, ibdev, port);
524
525done:
526 return ret;
527}
528
529static int recv_subn_set_pkeytable(struct ib_smp *smp,
530 struct ib_device *ibdev)
531{
532 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
533 __be16 *p = (__be16 *) smp->data;
534 u16 *q = (u16 *) smp->data;
535 struct ipath_ibdev *dev = to_idev(ibdev);
536 unsigned i, n = ipath_layer_get_npkeys(dev->dd);
537
538 for (i = 0; i < n; i++)
539 q[i] = be16_to_cpu(p[i]);
540
541 if (startpx != 0 ||
542 ipath_layer_set_pkeys(dev->dd, q) != 0)
543 smp->status |= IB_SMP_INVALID_FIELD;
544
545 return recv_subn_get_pkeytable(smp, ibdev);
546}
547
548#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
549#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010)
550#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011)
551#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012)
552#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D)
553#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E)
554
555struct ib_perf {
556 u8 base_version;
557 u8 mgmt_class;
558 u8 class_version;
559 u8 method;
560 __be16 status;
561 __be16 unused;
562 __be64 tid;
563 __be16 attr_id;
564 __be16 resv;
565 __be32 attr_mod;
566 u8 reserved[40];
567 u8 data[192];
568} __attribute__ ((packed));
569
570struct ib_pma_classportinfo {
571 u8 base_version;
572 u8 class_version;
573 __be16 cap_mask;
574 u8 reserved[3];
575 u8 resp_time_value; /* only lower 5 bits */
576 union ib_gid redirect_gid;
577 __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
578 __be16 redirect_lid;
579 __be16 redirect_pkey;
580 __be32 redirect_qp; /* only lower 24 bits */
581 __be32 redirect_qkey;
582 union ib_gid trap_gid;
583 __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
584 __be16 trap_lid;
585 __be16 trap_pkey;
586 __be32 trap_hl_qp; /* 8, 24 bits respectively */
587 __be32 trap_qkey;
588} __attribute__ ((packed));
589
590struct ib_pma_portsamplescontrol {
591 u8 opcode;
592 u8 port_select;
593 u8 tick;
594 u8 counter_width; /* only lower 3 bits */
595 __be32 counter_mask0_9; /* 2, 10 * 3, bits */
596 __be16 counter_mask10_14; /* 1, 5 * 3, bits */
597 u8 sample_mechanisms;
598 u8 sample_status; /* only lower 2 bits */
599 __be64 option_mask;
600 __be64 vendor_mask;
601 __be32 sample_start;
602 __be32 sample_interval;
603 __be16 tag;
604 __be16 counter_select[15];
605} __attribute__ ((packed));
606
607struct ib_pma_portsamplesresult {
608 __be16 tag;
609 __be16 sample_status; /* only lower 2 bits */
610 __be32 counter[15];
611} __attribute__ ((packed));
612
613struct ib_pma_portsamplesresult_ext {
614 __be16 tag;
615 __be16 sample_status; /* only lower 2 bits */
616 __be32 extended_width; /* only upper 2 bits */
617 __be64 counter[15];
618} __attribute__ ((packed));
619
620struct ib_pma_portcounters {
621 u8 reserved;
622 u8 port_select;
623 __be16 counter_select;
624 __be16 symbol_error_counter;
625 u8 link_error_recovery_counter;
626 u8 link_downed_counter;
627 __be16 port_rcv_errors;
628 __be16 port_rcv_remphys_errors;
629 __be16 port_rcv_switch_relay_errors;
630 __be16 port_xmit_discards;
631 u8 port_xmit_constraint_errors;
632 u8 port_rcv_constraint_errors;
633 u8 reserved1;
634 u8 lli_ebor_errors; /* 4, 4, bits */
635 __be16 reserved2;
636 __be16 vl15_dropped;
637 __be32 port_xmit_data;
638 __be32 port_rcv_data;
639 __be32 port_xmit_packets;
640 __be32 port_rcv_packets;
641} __attribute__ ((packed));
642
643#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001)
644#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002)
645#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004)
646#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008)
647#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010)
648#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040)
649#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000)
650#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000)
651#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000)
652#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000)
653
654struct ib_pma_portcounters_ext {
655 u8 reserved;
656 u8 port_select;
657 __be16 counter_select;
658 __be32 reserved1;
659 __be64 port_xmit_data;
660 __be64 port_rcv_data;
661 __be64 port_xmit_packets;
662 __be64 port_rcv_packets;
663 __be64 port_unicast_xmit_packets;
664 __be64 port_unicast_rcv_packets;
665 __be64 port_multicast_xmit_packets;
666 __be64 port_multicast_rcv_packets;
667} __attribute__ ((packed));
668
669#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001)
670#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002)
671#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004)
672#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008)
673#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010)
674#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020)
675#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040)
676#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080)
677
678static int recv_pma_get_classportinfo(struct ib_perf *pmp)
679{
680 struct ib_pma_classportinfo *p =
681 (struct ib_pma_classportinfo *)pmp->data;
682
683 memset(pmp->data, 0, sizeof(pmp->data));
684
685 if (pmp->attr_mod != 0)
686 pmp->status |= IB_SMP_INVALID_FIELD;
687
688 /* Indicate AllPortSelect is valid (only one port anyway) */
689 p->cap_mask = __constant_cpu_to_be16(1 << 8);
690 p->base_version = 1;
691 p->class_version = 1;
692 /*
693 * Expected response time is 4.096 usec. * 2^18 == 1.073741824
694 * sec.
695 */
696 p->resp_time_value = 18;
697
698 return reply((struct ib_smp *) pmp);
699}
700
701/*
702 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
703 * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
704 * We support 5 counters which only count the mandatory quantities.
705 */
706#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
707#define COUNTER_MASK0_9 \
708 __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \
709 COUNTER_MASK(1, 1) | \
710 COUNTER_MASK(1, 2) | \
711 COUNTER_MASK(1, 3) | \
712 COUNTER_MASK(1, 4))
713
714static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
715 struct ib_device *ibdev, u8 port)
716{
717 struct ib_pma_portsamplescontrol *p =
718 (struct ib_pma_portsamplescontrol *)pmp->data;
719 struct ipath_ibdev *dev = to_idev(ibdev);
720 unsigned long flags;
721 u8 port_select = p->port_select;
722
723 memset(pmp->data, 0, sizeof(pmp->data));
724
725 p->port_select = port_select;
726 if (pmp->attr_mod != 0 ||
727 (port_select != port && port_select != 0xFF))
728 pmp->status |= IB_SMP_INVALID_FIELD;
729 /*
730 * Ticks are 10x the link transfer period which for 2.5Gbs is 4
731 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
732 * intervals are counted in ticks. Since we use Linux timers, that
733 * count in jiffies, we can't sample for less than 1000 ticks if HZ
734 * == 1000 (4000 ticks if HZ is 250).
735 */
736 /* XXX This is WRONG. */
737 p->tick = 250; /* 1 usec. */
738 p->counter_width = 4; /* 32 bit counters */
739 p->counter_mask0_9 = COUNTER_MASK0_9;
740 spin_lock_irqsave(&dev->pending_lock, flags);
741 p->sample_status = dev->pma_sample_status;
742 p->sample_start = cpu_to_be32(dev->pma_sample_start);
743 p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
744 p->tag = cpu_to_be16(dev->pma_tag);
745 p->counter_select[0] = dev->pma_counter_select[0];
746 p->counter_select[1] = dev->pma_counter_select[1];
747 p->counter_select[2] = dev->pma_counter_select[2];
748 p->counter_select[3] = dev->pma_counter_select[3];
749 p->counter_select[4] = dev->pma_counter_select[4];
750 spin_unlock_irqrestore(&dev->pending_lock, flags);
751
752 return reply((struct ib_smp *) pmp);
753}
754
755static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
756 struct ib_device *ibdev, u8 port)
757{
758 struct ib_pma_portsamplescontrol *p =
759 (struct ib_pma_portsamplescontrol *)pmp->data;
760 struct ipath_ibdev *dev = to_idev(ibdev);
761 unsigned long flags;
762 u32 start;
763 int ret;
764
765 if (pmp->attr_mod != 0 ||
766 (p->port_select != port && p->port_select != 0xFF)) {
767 pmp->status |= IB_SMP_INVALID_FIELD;
768 ret = reply((struct ib_smp *) pmp);
769 goto bail;
770 }
771
772 start = be32_to_cpu(p->sample_start);
773 if (start != 0) {
774 spin_lock_irqsave(&dev->pending_lock, flags);
775 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) {
776 dev->pma_sample_status =
777 IB_PMA_SAMPLE_STATUS_STARTED;
778 dev->pma_sample_start = start;
779 dev->pma_sample_interval =
780 be32_to_cpu(p->sample_interval);
781 dev->pma_tag = be16_to_cpu(p->tag);
782 if (p->counter_select[0])
783 dev->pma_counter_select[0] =
784 p->counter_select[0];
785 if (p->counter_select[1])
786 dev->pma_counter_select[1] =
787 p->counter_select[1];
788 if (p->counter_select[2])
789 dev->pma_counter_select[2] =
790 p->counter_select[2];
791 if (p->counter_select[3])
792 dev->pma_counter_select[3] =
793 p->counter_select[3];
794 if (p->counter_select[4])
795 dev->pma_counter_select[4] =
796 p->counter_select[4];
797 }
798 spin_unlock_irqrestore(&dev->pending_lock, flags);
799 }
800 ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
801
802bail:
803 return ret;
804}
805
806static u64 get_counter(struct ipath_ibdev *dev, __be16 sel)
807{
808 u64 ret;
809
810 switch (sel) {
811 case IB_PMA_PORT_XMIT_DATA:
812 ret = dev->ipath_sword;
813 break;
814 case IB_PMA_PORT_RCV_DATA:
815 ret = dev->ipath_rword;
816 break;
817 case IB_PMA_PORT_XMIT_PKTS:
818 ret = dev->ipath_spkts;
819 break;
820 case IB_PMA_PORT_RCV_PKTS:
821 ret = dev->ipath_rpkts;
822 break;
823 case IB_PMA_PORT_XMIT_WAIT:
824 ret = dev->ipath_xmit_wait;
825 break;
826 default:
827 ret = 0;
828 }
829
830 return ret;
831}
832
833static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
834 struct ib_device *ibdev)
835{
836 struct ib_pma_portsamplesresult *p =
837 (struct ib_pma_portsamplesresult *)pmp->data;
838 struct ipath_ibdev *dev = to_idev(ibdev);
839 int i;
840
841 memset(pmp->data, 0, sizeof(pmp->data));
842 p->tag = cpu_to_be16(dev->pma_tag);
843 p->sample_status = cpu_to_be16(dev->pma_sample_status);
844 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
845 p->counter[i] = cpu_to_be32(
846 get_counter(dev, dev->pma_counter_select[i]));
847
848 return reply((struct ib_smp *) pmp);
849}
850
851static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
852 struct ib_device *ibdev)
853{
854 struct ib_pma_portsamplesresult_ext *p =
855 (struct ib_pma_portsamplesresult_ext *)pmp->data;
856 struct ipath_ibdev *dev = to_idev(ibdev);
857 int i;
858
859 memset(pmp->data, 0, sizeof(pmp->data));
860 p->tag = cpu_to_be16(dev->pma_tag);
861 p->sample_status = cpu_to_be16(dev->pma_sample_status);
862 /* 64 bits */
863 p->extended_width = __constant_cpu_to_be32(0x80000000);
864 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
865 p->counter[i] = cpu_to_be64(
866 get_counter(dev, dev->pma_counter_select[i]));
867
868 return reply((struct ib_smp *) pmp);
869}
870
871static int recv_pma_get_portcounters(struct ib_perf *pmp,
872 struct ib_device *ibdev, u8 port)
873{
874 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
875 pmp->data;
876 struct ipath_ibdev *dev = to_idev(ibdev);
877 struct ipath_layer_counters cntrs;
878 u8 port_select = p->port_select;
879
880 ipath_layer_get_counters(dev->dd, &cntrs);
881
882 /* Adjust counters for any resets done. */
883 cntrs.symbol_error_counter -= dev->n_symbol_error_counter;
884 cntrs.link_error_recovery_counter -=
885 dev->n_link_error_recovery_counter;
886 cntrs.link_downed_counter -= dev->n_link_downed_counter;
887 cntrs.port_rcv_errors += dev->rcv_errors;
888 cntrs.port_rcv_errors -= dev->n_port_rcv_errors;
889 cntrs.port_rcv_remphys_errors -= dev->n_port_rcv_remphys_errors;
890 cntrs.port_xmit_discards -= dev->n_port_xmit_discards;
891 cntrs.port_xmit_data -= dev->n_port_xmit_data;
892 cntrs.port_rcv_data -= dev->n_port_rcv_data;
893 cntrs.port_xmit_packets -= dev->n_port_xmit_packets;
894 cntrs.port_rcv_packets -= dev->n_port_rcv_packets;
895
896 memset(pmp->data, 0, sizeof(pmp->data));
897
898 p->port_select = port_select;
899 if (pmp->attr_mod != 0 ||
900 (port_select != port && port_select != 0xFF))
901 pmp->status |= IB_SMP_INVALID_FIELD;
902
903 if (cntrs.symbol_error_counter > 0xFFFFUL)
904 p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF);
905 else
906 p->symbol_error_counter =
907 cpu_to_be16((u16)cntrs.symbol_error_counter);
908 if (cntrs.link_error_recovery_counter > 0xFFUL)
909 p->link_error_recovery_counter = 0xFF;
910 else
911 p->link_error_recovery_counter =
912 (u8)cntrs.link_error_recovery_counter;
913 if (cntrs.link_downed_counter > 0xFFUL)
914 p->link_downed_counter = 0xFF;
915 else
916 p->link_downed_counter = (u8)cntrs.link_downed_counter;
917 if (cntrs.port_rcv_errors > 0xFFFFUL)
918 p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF);
919 else
920 p->port_rcv_errors =
921 cpu_to_be16((u16) cntrs.port_rcv_errors);
922 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
923 p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF);
924 else
925 p->port_rcv_remphys_errors =
926 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
927 if (cntrs.port_xmit_discards > 0xFFFFUL)
928 p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF);
929 else
930 p->port_xmit_discards =
931 cpu_to_be16((u16)cntrs.port_xmit_discards);
932 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
933 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
934 else
935 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
936 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
937 p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF);
938 else
939 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
940 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
941 p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF);
942 else
943 p->port_xmit_packets =
944 cpu_to_be32((u32)cntrs.port_xmit_packets);
945 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
946 p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF);
947 else
948 p->port_rcv_packets =
949 cpu_to_be32((u32) cntrs.port_rcv_packets);
950
951 return reply((struct ib_smp *) pmp);
952}
953
954static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
955 struct ib_device *ibdev, u8 port)
956{
957 struct ib_pma_portcounters_ext *p =
958 (struct ib_pma_portcounters_ext *)pmp->data;
959 struct ipath_ibdev *dev = to_idev(ibdev);
960 u64 swords, rwords, spkts, rpkts, xwait;
961 u8 port_select = p->port_select;
962
963 ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
964 &rpkts, &xwait);
965
966 /* Adjust counters for any resets done. */
967 swords -= dev->n_port_xmit_data;
968 rwords -= dev->n_port_rcv_data;
969 spkts -= dev->n_port_xmit_packets;
970 rpkts -= dev->n_port_rcv_packets;
971
972 memset(pmp->data, 0, sizeof(pmp->data));
973
974 p->port_select = port_select;
975 if (pmp->attr_mod != 0 ||
976 (port_select != port && port_select != 0xFF))
977 pmp->status |= IB_SMP_INVALID_FIELD;
978
979 p->port_xmit_data = cpu_to_be64(swords);
980 p->port_rcv_data = cpu_to_be64(rwords);
981 p->port_xmit_packets = cpu_to_be64(spkts);
982 p->port_rcv_packets = cpu_to_be64(rpkts);
983 p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
984 p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
985 p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
986 p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
987
988 return reply((struct ib_smp *) pmp);
989}
990
991static int recv_pma_set_portcounters(struct ib_perf *pmp,
992 struct ib_device *ibdev, u8 port)
993{
994 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
995 pmp->data;
996 struct ipath_ibdev *dev = to_idev(ibdev);
997 struct ipath_layer_counters cntrs;
998
999 /*
1000 * Since the HW doesn't support clearing counters, we save the
1001 * current count and subtract it from future responses.
1002 */
1003 ipath_layer_get_counters(dev->dd, &cntrs);
1004
1005 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1006 dev->n_symbol_error_counter = cntrs.symbol_error_counter;
1007
1008 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1009 dev->n_link_error_recovery_counter =
1010 cntrs.link_error_recovery_counter;
1011
1012 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1013 dev->n_link_downed_counter = cntrs.link_downed_counter;
1014
1015 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1016 dev->n_port_rcv_errors =
1017 cntrs.port_rcv_errors + dev->rcv_errors;
1018
1019 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1020 dev->n_port_rcv_remphys_errors =
1021 cntrs.port_rcv_remphys_errors;
1022
1023 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1024 dev->n_port_xmit_discards = cntrs.port_xmit_discards;
1025
1026 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1027 dev->n_port_xmit_data = cntrs.port_xmit_data;
1028
1029 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1030 dev->n_port_rcv_data = cntrs.port_rcv_data;
1031
1032 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1033 dev->n_port_xmit_packets = cntrs.port_xmit_packets;
1034
1035 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1036 dev->n_port_rcv_packets = cntrs.port_rcv_packets;
1037
1038 return recv_pma_get_portcounters(pmp, ibdev, port);
1039}
1040
1041static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
1042 struct ib_device *ibdev, u8 port)
1043{
1044 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1045 pmp->data;
1046 struct ipath_ibdev *dev = to_idev(ibdev);
1047 u64 swords, rwords, spkts, rpkts, xwait;
1048
1049 ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1050 &rpkts, &xwait);
1051
1052 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1053 dev->n_port_xmit_data = swords;
1054
1055 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1056 dev->n_port_rcv_data = rwords;
1057
1058 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1059 dev->n_port_xmit_packets = spkts;
1060
1061 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1062 dev->n_port_rcv_packets = rpkts;
1063
1064 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1065 dev->n_unicast_xmit = 0;
1066
1067 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1068 dev->n_unicast_rcv = 0;
1069
1070 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1071 dev->n_multicast_xmit = 0;
1072
1073 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1074 dev->n_multicast_rcv = 0;
1075
1076 return recv_pma_get_portcounters_ext(pmp, ibdev, port);
1077}
1078
1079static int process_subn(struct ib_device *ibdev, int mad_flags,
1080 u8 port_num, struct ib_mad *in_mad,
1081 struct ib_mad *out_mad)
1082{
1083 struct ib_smp *smp = (struct ib_smp *)out_mad;
1084 struct ipath_ibdev *dev = to_idev(ibdev);
1085 int ret;
1086
1087 *out_mad = *in_mad;
1088 if (smp->class_version != 1) {
1089 smp->status |= IB_SMP_UNSUP_VERSION;
1090 ret = reply(smp);
1091 goto bail;
1092 }
1093
1094 /* Is the mkey in the process of expiring? */
1095 if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) {
1096 /* Clear timeout and mkey protection field. */
1097 dev->mkey_lease_timeout = 0;
1098 dev->mkeyprot_resv_lmc &= 0x3F;
1099 }
1100
1101 /*
1102 * M_Key checking depends on
1103 * Portinfo:M_Key_protect_bits
1104 */
1105 if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
1106 dev->mkey != smp->mkey &&
1107 (smp->method == IB_MGMT_METHOD_SET ||
1108 (smp->method == IB_MGMT_METHOD_GET &&
1109 (dev->mkeyprot_resv_lmc >> 7) != 0))) {
1110 if (dev->mkey_violations != 0xFFFF)
1111 ++dev->mkey_violations;
1112 if (dev->mkey_lease_timeout ||
1113 dev->mkey_lease_period == 0) {
1114 ret = IB_MAD_RESULT_SUCCESS |
1115 IB_MAD_RESULT_CONSUMED;
1116 goto bail;
1117 }
1118 dev->mkey_lease_timeout = jiffies +
1119 dev->mkey_lease_period * HZ;
1120 /* Future: Generate a trap notice. */
1121 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1122 goto bail;
1123 } else if (dev->mkey_lease_timeout)
1124 dev->mkey_lease_timeout = 0;
1125
1126 switch (smp->method) {
1127 case IB_MGMT_METHOD_GET:
1128 switch (smp->attr_id) {
1129 case IB_SMP_ATTR_NODE_DESC:
1130 ret = recv_subn_get_nodedescription(smp, ibdev);
1131 goto bail;
1132 case IB_SMP_ATTR_NODE_INFO:
1133 ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
1134 goto bail;
1135 case IB_SMP_ATTR_GUID_INFO:
1136 ret = recv_subn_get_guidinfo(smp, ibdev);
1137 goto bail;
1138 case IB_SMP_ATTR_PORT_INFO:
1139 ret = recv_subn_get_portinfo(smp, ibdev, port_num);
1140 goto bail;
1141 case IB_SMP_ATTR_PKEY_TABLE:
1142 ret = recv_subn_get_pkeytable(smp, ibdev);
1143 goto bail;
1144 case IB_SMP_ATTR_SM_INFO:
1145 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1146 ret = IB_MAD_RESULT_SUCCESS |
1147 IB_MAD_RESULT_CONSUMED;
1148 goto bail;
1149 }
1150 if (dev->port_cap_flags & IB_PORT_SM) {
1151 ret = IB_MAD_RESULT_SUCCESS;
1152 goto bail;
1153 }
1154 /* FALLTHROUGH */
1155 default:
1156 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1157 ret = reply(smp);
1158 goto bail;
1159 }
1160
1161 case IB_MGMT_METHOD_SET:
1162 switch (smp->attr_id) {
1163 case IB_SMP_ATTR_GUID_INFO:
1164 ret = recv_subn_set_guidinfo(smp, ibdev);
1165 goto bail;
1166 case IB_SMP_ATTR_PORT_INFO:
1167 ret = recv_subn_set_portinfo(smp, ibdev, port_num);
1168 goto bail;
1169 case IB_SMP_ATTR_PKEY_TABLE:
1170 ret = recv_subn_set_pkeytable(smp, ibdev);
1171 goto bail;
1172 case IB_SMP_ATTR_SM_INFO:
1173 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1174 ret = IB_MAD_RESULT_SUCCESS |
1175 IB_MAD_RESULT_CONSUMED;
1176 goto bail;
1177 }
1178 if (dev->port_cap_flags & IB_PORT_SM) {
1179 ret = IB_MAD_RESULT_SUCCESS;
1180 goto bail;
1181 }
1182 /* FALLTHROUGH */
1183 default:
1184 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1185 ret = reply(smp);
1186 goto bail;
1187 }
1188
1189 case IB_MGMT_METHOD_GET_RESP:
1190 /*
1191 * The ib_mad module will call us to process responses
1192 * before checking for other consumers.
1193 * Just tell the caller to process it normally.
1194 */
1195 ret = IB_MAD_RESULT_FAILURE;
1196 goto bail;
1197 default:
1198 smp->status |= IB_SMP_UNSUP_METHOD;
1199 ret = reply(smp);
1200 }
1201
1202bail:
1203 return ret;
1204}
1205
1206static int process_perf(struct ib_device *ibdev, u8 port_num,
1207 struct ib_mad *in_mad,
1208 struct ib_mad *out_mad)
1209{
1210 struct ib_perf *pmp = (struct ib_perf *)out_mad;
1211 int ret;
1212
1213 *out_mad = *in_mad;
1214 if (pmp->class_version != 1) {
1215 pmp->status |= IB_SMP_UNSUP_VERSION;
1216 ret = reply((struct ib_smp *) pmp);
1217 goto bail;
1218 }
1219
1220 switch (pmp->method) {
1221 case IB_MGMT_METHOD_GET:
1222 switch (pmp->attr_id) {
1223 case IB_PMA_CLASS_PORT_INFO:
1224 ret = recv_pma_get_classportinfo(pmp);
1225 goto bail;
1226 case IB_PMA_PORT_SAMPLES_CONTROL:
1227 ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
1228 port_num);
1229 goto bail;
1230 case IB_PMA_PORT_SAMPLES_RESULT:
1231 ret = recv_pma_get_portsamplesresult(pmp, ibdev);
1232 goto bail;
1233 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1234 ret = recv_pma_get_portsamplesresult_ext(pmp,
1235 ibdev);
1236 goto bail;
1237 case IB_PMA_PORT_COUNTERS:
1238 ret = recv_pma_get_portcounters(pmp, ibdev,
1239 port_num);
1240 goto bail;
1241 case IB_PMA_PORT_COUNTERS_EXT:
1242 ret = recv_pma_get_portcounters_ext(pmp, ibdev,
1243 port_num);
1244 goto bail;
1245 default:
1246 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1247 ret = reply((struct ib_smp *) pmp);
1248 goto bail;
1249 }
1250
1251 case IB_MGMT_METHOD_SET:
1252 switch (pmp->attr_id) {
1253 case IB_PMA_PORT_SAMPLES_CONTROL:
1254 ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
1255 port_num);
1256 goto bail;
1257 case IB_PMA_PORT_COUNTERS:
1258 ret = recv_pma_set_portcounters(pmp, ibdev,
1259 port_num);
1260 goto bail;
1261 case IB_PMA_PORT_COUNTERS_EXT:
1262 ret = recv_pma_set_portcounters_ext(pmp, ibdev,
1263 port_num);
1264 goto bail;
1265 default:
1266 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1267 ret = reply((struct ib_smp *) pmp);
1268 goto bail;
1269 }
1270
1271 case IB_MGMT_METHOD_GET_RESP:
1272 /*
1273 * The ib_mad module will call us to process responses
1274 * before checking for other consumers.
1275 * Just tell the caller to process it normally.
1276 */
1277 ret = IB_MAD_RESULT_FAILURE;
1278 goto bail;
1279 default:
1280 pmp->status |= IB_SMP_UNSUP_METHOD;
1281 ret = reply((struct ib_smp *) pmp);
1282 }
1283
1284bail:
1285 return ret;
1286}
1287
1288/**
1289 * ipath_process_mad - process an incoming MAD packet
1290 * @ibdev: the infiniband device this packet came in on
1291 * @mad_flags: MAD flags
1292 * @port_num: the port number this packet came in on
1293 * @in_wc: the work completion entry for this packet
1294 * @in_grh: the global route header for this packet
1295 * @in_mad: the incoming MAD
1296 * @out_mad: any outgoing MAD reply
1297 *
1298 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
1299 * interested in processing.
1300 *
1301 * Note that the verbs framework has already done the MAD sanity checks,
1302 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
1303 * MADs.
1304 *
1305 * This is called by the ib_mad module.
1306 */
1307int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1308 struct ib_wc *in_wc, struct ib_grh *in_grh,
1309 struct ib_mad *in_mad, struct ib_mad *out_mad)
1310{
1311 struct ipath_ibdev *dev = to_idev(ibdev);
1312 int ret;
1313
1314 /*
1315 * Snapshot current HW counters to "clear" them.
1316 * This should be done when the driver is loaded except that for
1317 * some reason we get a zillion errors when brining up the link.
1318 */
1319 if (dev->rcv_errors == 0) {
1320 struct ipath_layer_counters cntrs;
1321
1322 ipath_layer_get_counters(to_idev(ibdev)->dd, &cntrs);
1323 dev->rcv_errors++;
1324 dev->n_symbol_error_counter = cntrs.symbol_error_counter;
1325 dev->n_link_error_recovery_counter =
1326 cntrs.link_error_recovery_counter;
1327 dev->n_link_downed_counter = cntrs.link_downed_counter;
1328 dev->n_port_rcv_errors = cntrs.port_rcv_errors + 1;
1329 dev->n_port_rcv_remphys_errors =
1330 cntrs.port_rcv_remphys_errors;
1331 dev->n_port_xmit_discards = cntrs.port_xmit_discards;
1332 dev->n_port_xmit_data = cntrs.port_xmit_data;
1333 dev->n_port_rcv_data = cntrs.port_rcv_data;
1334 dev->n_port_xmit_packets = cntrs.port_xmit_packets;
1335 dev->n_port_rcv_packets = cntrs.port_rcv_packets;
1336 }
1337 switch (in_mad->mad_hdr.mgmt_class) {
1338 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1339 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1340 ret = process_subn(ibdev, mad_flags, port_num,
1341 in_mad, out_mad);
1342 goto bail;
1343 case IB_MGMT_CLASS_PERF_MGMT:
1344 ret = process_perf(ibdev, port_num, in_mad, out_mad);
1345 goto bail;
1346 default:
1347 ret = IB_MAD_RESULT_SUCCESS;
1348 }
1349
1350bail:
1351 return ret;
1352}
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
new file mode 100644
index 000000000000..69ffec66d45d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -0,0 +1,383 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_pack.h>
34#include <rdma/ib_smi.h>
35
36#include "ipath_verbs.h"
37
38/**
39 * ipath_get_dma_mr - get a DMA memory region
40 * @pd: protection domain for this memory region
41 * @acc: access flags
42 *
43 * Returns the memory region on success, otherwise returns an errno.
44 */
45struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
46{
47 struct ipath_mr *mr;
48 struct ib_mr *ret;
49
50 mr = kzalloc(sizeof *mr, GFP_KERNEL);
51 if (!mr) {
52 ret = ERR_PTR(-ENOMEM);
53 goto bail;
54 }
55
56 mr->mr.access_flags = acc;
57 ret = &mr->ibmr;
58
59bail:
60 return ret;
61}
62
63static struct ipath_mr *alloc_mr(int count,
64 struct ipath_lkey_table *lk_table)
65{
66 struct ipath_mr *mr;
67 int m, i = 0;
68
69 /* Allocate struct plus pointers to first level page tables. */
70 m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
71 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
72 if (!mr)
73 goto done;
74
75 /* Allocate first level page tables. */
76 for (; i < m; i++) {
77 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
78 if (!mr->mr.map[i])
79 goto bail;
80 }
81 mr->mr.mapsz = m;
82
83 /*
84 * ib_reg_phys_mr() will initialize mr->ibmr except for
85 * lkey and rkey.
86 */
87 if (!ipath_alloc_lkey(lk_table, &mr->mr))
88 goto bail;
89 mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
90
91 goto done;
92
93bail:
94 while (i) {
95 i--;
96 kfree(mr->mr.map[i]);
97 }
98 kfree(mr);
99 mr = NULL;
100
101done:
102 return mr;
103}
104
105/**
106 * ipath_reg_phys_mr - register a physical memory region
107 * @pd: protection domain for this memory region
108 * @buffer_list: pointer to the list of physical buffers to register
109 * @num_phys_buf: the number of physical buffers to register
110 * @iova_start: the starting address passed over IB which maps to this MR
111 *
112 * Returns the memory region on success, otherwise returns an errno.
113 */
114struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
115 struct ib_phys_buf *buffer_list,
116 int num_phys_buf, int acc, u64 *iova_start)
117{
118 struct ipath_mr *mr;
119 int n, m, i;
120 struct ib_mr *ret;
121
122 mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
123 if (mr == NULL) {
124 ret = ERR_PTR(-ENOMEM);
125 goto bail;
126 }
127
128 mr->mr.user_base = *iova_start;
129 mr->mr.iova = *iova_start;
130 mr->mr.length = 0;
131 mr->mr.offset = 0;
132 mr->mr.access_flags = acc;
133 mr->mr.max_segs = num_phys_buf;
134
135 m = 0;
136 n = 0;
137 for (i = 0; i < num_phys_buf; i++) {
138 mr->mr.map[m]->segs[n].vaddr =
139 phys_to_virt(buffer_list[i].addr);
140 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
141 mr->mr.length += buffer_list[i].size;
142 n++;
143 if (n == IPATH_SEGSZ) {
144 m++;
145 n = 0;
146 }
147 }
148
149 ret = &mr->ibmr;
150
151bail:
152 return ret;
153}
154
155/**
156 * ipath_reg_user_mr - register a userspace memory region
157 * @pd: protection domain for this memory region
158 * @region: the user memory region
159 * @mr_access_flags: access flags for this memory region
160 * @udata: unused by the InfiniPath driver
161 *
162 * Returns the memory region on success, otherwise returns an errno.
163 */
164struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
165 int mr_access_flags, struct ib_udata *udata)
166{
167 struct ipath_mr *mr;
168 struct ib_umem_chunk *chunk;
169 int n, m, i;
170 struct ib_mr *ret;
171
172 n = 0;
173 list_for_each_entry(chunk, &region->chunk_list, list)
174 n += chunk->nents;
175
176 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
177 if (!mr) {
178 ret = ERR_PTR(-ENOMEM);
179 goto bail;
180 }
181
182 mr->mr.user_base = region->user_base;
183 mr->mr.iova = region->virt_base;
184 mr->mr.length = region->length;
185 mr->mr.offset = region->offset;
186 mr->mr.access_flags = mr_access_flags;
187 mr->mr.max_segs = n;
188
189 m = 0;
190 n = 0;
191 list_for_each_entry(chunk, &region->chunk_list, list) {
192 for (i = 0; i < chunk->nmap; i++) {
193 mr->mr.map[m]->segs[n].vaddr =
194 page_address(chunk->page_list[i].page);
195 mr->mr.map[m]->segs[n].length = region->page_size;
196 n++;
197 if (n == IPATH_SEGSZ) {
198 m++;
199 n = 0;
200 }
201 }
202 }
203 ret = &mr->ibmr;
204
205bail:
206 return ret;
207}
208
209/**
210 * ipath_dereg_mr - unregister and free a memory region
211 * @ibmr: the memory region to free
212 *
213 * Returns 0 on success.
214 *
215 * Note that this is called to free MRs created by ipath_get_dma_mr()
216 * or ipath_reg_user_mr().
217 */
218int ipath_dereg_mr(struct ib_mr *ibmr)
219{
220 struct ipath_mr *mr = to_imr(ibmr);
221 int i;
222
223 ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
224 i = mr->mr.mapsz;
225 while (i) {
226 i--;
227 kfree(mr->mr.map[i]);
228 }
229 kfree(mr);
230 return 0;
231}
232
233/**
234 * ipath_alloc_fmr - allocate a fast memory region
235 * @pd: the protection domain for this memory region
236 * @mr_access_flags: access flags for this memory region
237 * @fmr_attr: fast memory region attributes
238 *
239 * Returns the memory region on success, otherwise returns an errno.
240 */
241struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
242 struct ib_fmr_attr *fmr_attr)
243{
244 struct ipath_fmr *fmr;
245 int m, i = 0;
246 struct ib_fmr *ret;
247
248 /* Allocate struct plus pointers to first level page tables. */
249 m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
250 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
251 if (!fmr)
252 goto bail;
253
254 /* Allocate first level page tables. */
255 for (; i < m; i++) {
256 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
257 GFP_KERNEL);
258 if (!fmr->mr.map[i])
259 goto bail;
260 }
261 fmr->mr.mapsz = m;
262
263 /*
264 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
265 * rkey.
266 */
267 if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
268 goto bail;
269 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
270 /*
271 * Resources are allocated but no valid mapping (RKEY can't be
272 * used).
273 */
274 fmr->mr.user_base = 0;
275 fmr->mr.iova = 0;
276 fmr->mr.length = 0;
277 fmr->mr.offset = 0;
278 fmr->mr.access_flags = mr_access_flags;
279 fmr->mr.max_segs = fmr_attr->max_pages;
280 fmr->page_shift = fmr_attr->page_shift;
281
282 ret = &fmr->ibfmr;
283 goto done;
284
285bail:
286 while (i)
287 kfree(fmr->mr.map[--i]);
288 kfree(fmr);
289 ret = ERR_PTR(-ENOMEM);
290
291done:
292 return ret;
293}
294
295/**
296 * ipath_map_phys_fmr - set up a fast memory region
297 * @ibmfr: the fast memory region to set up
298 * @page_list: the list of pages to associate with the fast memory region
299 * @list_len: the number of pages to associate with the fast memory region
300 * @iova: the virtual address of the start of the fast memory region
301 *
302 * This may be called from interrupt context.
303 */
304
305int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
306 int list_len, u64 iova)
307{
308 struct ipath_fmr *fmr = to_ifmr(ibfmr);
309 struct ipath_lkey_table *rkt;
310 unsigned long flags;
311 int m, n, i;
312 u32 ps;
313 int ret;
314
315 if (list_len > fmr->mr.max_segs) {
316 ret = -EINVAL;
317 goto bail;
318 }
319 rkt = &to_idev(ibfmr->device)->lk_table;
320 spin_lock_irqsave(&rkt->lock, flags);
321 fmr->mr.user_base = iova;
322 fmr->mr.iova = iova;
323 ps = 1 << fmr->page_shift;
324 fmr->mr.length = list_len * ps;
325 m = 0;
326 n = 0;
327 ps = 1 << fmr->page_shift;
328 for (i = 0; i < list_len; i++) {
329 fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]);
330 fmr->mr.map[m]->segs[n].length = ps;
331 if (++n == IPATH_SEGSZ) {
332 m++;
333 n = 0;
334 }
335 }
336 spin_unlock_irqrestore(&rkt->lock, flags);
337 ret = 0;
338
339bail:
340 return ret;
341}
342
343/**
344 * ipath_unmap_fmr - unmap fast memory regions
345 * @fmr_list: the list of fast memory regions to unmap
346 *
347 * Returns 0 on success.
348 */
349int ipath_unmap_fmr(struct list_head *fmr_list)
350{
351 struct ipath_fmr *fmr;
352 struct ipath_lkey_table *rkt;
353 unsigned long flags;
354
355 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
356 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
357 spin_lock_irqsave(&rkt->lock, flags);
358 fmr->mr.user_base = 0;
359 fmr->mr.iova = 0;
360 fmr->mr.length = 0;
361 spin_unlock_irqrestore(&rkt->lock, flags);
362 }
363 return 0;
364}
365
366/**
367 * ipath_dealloc_fmr - deallocate a fast memory region
368 * @ibfmr: the fast memory region to deallocate
369 *
370 * Returns 0 on success.
371 */
372int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
373{
374 struct ipath_fmr *fmr = to_ifmr(ibfmr);
375 int i;
376
377 ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
378 i = fmr->mr.mapsz;
379 while (i)
380 kfree(fmr->mr.map[--i]);
381 kfree(fmr);
382 return 0;
383}
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c
new file mode 100644
index 000000000000..e693a7a82667
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_pe800.c
@@ -0,0 +1,1247 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/*
33 * This file contains all of the code that is specific to the
34 * InfiniPath PE-800 chip.
35 */
36
37#include <linux/interrupt.h>
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41
42#include "ipath_kernel.h"
43#include "ipath_registers.h"
44
45/*
46 * This file contains all the chip-specific register information and
47 * access functions for the PathScale PE800, the PCI-Express chip.
48 *
49 * This lists the InfiniPath PE800 registers, in the actual chip layout.
50 * This structure should never be directly accessed.
51 */
52struct _infinipath_do_not_use_kernel_regs {
53 unsigned long long Revision;
54 unsigned long long Control;
55 unsigned long long PageAlign;
56 unsigned long long PortCnt;
57 unsigned long long DebugPortSelect;
58 unsigned long long Reserved0;
59 unsigned long long SendRegBase;
60 unsigned long long UserRegBase;
61 unsigned long long CounterRegBase;
62 unsigned long long Scratch;
63 unsigned long long Reserved1;
64 unsigned long long Reserved2;
65 unsigned long long IntBlocked;
66 unsigned long long IntMask;
67 unsigned long long IntStatus;
68 unsigned long long IntClear;
69 unsigned long long ErrorMask;
70 unsigned long long ErrorStatus;
71 unsigned long long ErrorClear;
72 unsigned long long HwErrMask;
73 unsigned long long HwErrStatus;
74 unsigned long long HwErrClear;
75 unsigned long long HwDiagCtrl;
76 unsigned long long MDIO;
77 unsigned long long IBCStatus;
78 unsigned long long IBCCtrl;
79 unsigned long long ExtStatus;
80 unsigned long long ExtCtrl;
81 unsigned long long GPIOOut;
82 unsigned long long GPIOMask;
83 unsigned long long GPIOStatus;
84 unsigned long long GPIOClear;
85 unsigned long long RcvCtrl;
86 unsigned long long RcvBTHQP;
87 unsigned long long RcvHdrSize;
88 unsigned long long RcvHdrCnt;
89 unsigned long long RcvHdrEntSize;
90 unsigned long long RcvTIDBase;
91 unsigned long long RcvTIDCnt;
92 unsigned long long RcvEgrBase;
93 unsigned long long RcvEgrCnt;
94 unsigned long long RcvBufBase;
95 unsigned long long RcvBufSize;
96 unsigned long long RxIntMemBase;
97 unsigned long long RxIntMemSize;
98 unsigned long long RcvPartitionKey;
99 unsigned long long Reserved3;
100 unsigned long long RcvPktLEDCnt;
101 unsigned long long Reserved4[8];
102 unsigned long long SendCtrl;
103 unsigned long long SendPIOBufBase;
104 unsigned long long SendPIOSize;
105 unsigned long long SendPIOBufCnt;
106 unsigned long long SendPIOAvailAddr;
107 unsigned long long TxIntMemBase;
108 unsigned long long TxIntMemSize;
109 unsigned long long Reserved5;
110 unsigned long long PCIeRBufTestReg0;
111 unsigned long long PCIeRBufTestReg1;
112 unsigned long long Reserved51[6];
113 unsigned long long SendBufferError;
114 unsigned long long SendBufferErrorCONT1;
115 unsigned long long Reserved6SBE[6];
116 unsigned long long RcvHdrAddr0;
117 unsigned long long RcvHdrAddr1;
118 unsigned long long RcvHdrAddr2;
119 unsigned long long RcvHdrAddr3;
120 unsigned long long RcvHdrAddr4;
121 unsigned long long Reserved7RHA[11];
122 unsigned long long RcvHdrTailAddr0;
123 unsigned long long RcvHdrTailAddr1;
124 unsigned long long RcvHdrTailAddr2;
125 unsigned long long RcvHdrTailAddr3;
126 unsigned long long RcvHdrTailAddr4;
127 unsigned long long Reserved8RHTA[11];
128 unsigned long long Reserved9SW[8];
129 unsigned long long SerdesConfig0;
130 unsigned long long SerdesConfig1;
131 unsigned long long SerdesStatus;
132 unsigned long long XGXSConfig;
133 unsigned long long IBPLLCfg;
134 unsigned long long Reserved10SW2[3];
135 unsigned long long PCIEQ0SerdesConfig0;
136 unsigned long long PCIEQ0SerdesConfig1;
137 unsigned long long PCIEQ0SerdesStatus;
138 unsigned long long Reserved11;
139 unsigned long long PCIEQ1SerdesConfig0;
140 unsigned long long PCIEQ1SerdesConfig1;
141 unsigned long long PCIEQ1SerdesStatus;
142 unsigned long long Reserved12;
143};
144
145#define IPATH_KREG_OFFSET(field) (offsetof(struct \
146 _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
147#define IPATH_CREG_OFFSET(field) (offsetof( \
148 struct infinipath_counters, field) / sizeof(u64))
149
150static const struct ipath_kregs ipath_pe_kregs = {
151 .kr_control = IPATH_KREG_OFFSET(Control),
152 .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
153 .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
154 .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
155 .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
156 .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
157 .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
158 .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
159 .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
160 .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
161 .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
162 .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
163 .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
164 .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
165 .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
166 .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
167 .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
168 .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
169 .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
170 .kr_intclear = IPATH_KREG_OFFSET(IntClear),
171 .kr_intmask = IPATH_KREG_OFFSET(IntMask),
172 .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
173 .kr_mdio = IPATH_KREG_OFFSET(MDIO),
174 .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
175 .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
176 .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
177 .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
178 .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
179 .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
180 .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
181 .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
182 .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
183 .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
184 .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
185 .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
186 .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
187 .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
188 .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
189 .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
190 .kr_revision = IPATH_KREG_OFFSET(Revision),
191 .kr_scratch = IPATH_KREG_OFFSET(Scratch),
192 .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
193 .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
194 .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
195 .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
196 .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
197 .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
198 .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
199 .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
200 .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
201 .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
202 .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
203 .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
204 .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
205 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
206 .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
207
208 /*
209 * These should not be used directly via ipath_read_kreg64(),
210 * use them with ipath_read_kreg64_port()
211 */
212 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
213 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
214
215 /* This group is pe-800-specific; and used only in this file */
216 /* The rcvpktled register controls one of the debug port signals, so
217 * a packet activity LED can be connected to it. */
218 .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
219 .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
220 .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
221 .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
222 .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
223 .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
224 .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
225 .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
226 .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
227};
228
229static const struct ipath_cregs ipath_pe_cregs = {
230 .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
231 .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
232 .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
233 .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
234 .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
235 .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
236 .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
237 .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
238 .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
239 .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
240 .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
241 .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
242 .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
243 .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
244 .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
245 .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
246 .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
247 .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
248 .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
249 .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
250 .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
251 .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
252 .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
253 .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
254 .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
255 .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
256 .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
257 .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
258 .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
259 .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
260 .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
261 .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
262 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
263};
264
265/* kr_intstatus, kr_intclear, kr_intmask bits */
266#define INFINIPATH_I_RCVURG_MASK 0x1F
267#define INFINIPATH_I_RCVAVAIL_MASK 0x1F
268
269/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
270#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
271#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
272#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
273#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
274#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
275#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
276#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
277#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
278#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
279#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
280#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
281#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
282
283/* kr_extstatus bits */
284#define INFINIPATH_EXTS_FREQSEL 0x2
285#define INFINIPATH_EXTS_SERDESSEL 0x4
286#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
287#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
288
289#define _IPATH_GPIO_SDA_NUM 1
290#define _IPATH_GPIO_SCL_NUM 0
291
292#define IPATH_GPIO_SDA (1ULL << \
293 (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
294#define IPATH_GPIO_SCL (1ULL << \
295 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
296
297/**
298 * ipath_pe_handle_hwerrors - display hardware errors.
299 * @dd: the infinipath device
300 * @msg: the output buffer
301 * @msgl: the size of the output buffer
302 *
303 * Use same msg buffer as regular errors to avoid excessive stack
304 * use. Most hardware errors are catastrophic, but for right now,
305 * we'll print them and continue. We reuse the same message buffer as
306 * ipath_handle_errors() to avoid excessive stack usage.
307 */
308void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
309 size_t msgl)
310{
311 ipath_err_t hwerrs;
312 u32 bits, ctrl;
313 int isfatal = 0;
314 char bitsmsg[64];
315
316 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
317 if (!hwerrs) {
318 /*
319 * better than printing cofusing messages
320 * This seems to be related to clearing the crc error, or
321 * the pll error during init.
322 */
323 ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
324 return;
325 } else if (hwerrs == ~0ULL) {
326 ipath_dev_err(dd, "Read of hardware error status failed "
327 "(all bits set); ignoring\n");
328 return;
329 }
330 ipath_stats.sps_hwerrs++;
331
332 /* Always clear the error status register, except MEMBISTFAIL,
333 * regardless of whether we continue or stop using the chip.
334 * We want that set so we know it failed, even across driver reload.
335 * We'll still ignore it in the hwerrmask. We do this partly for
336 * diagnostics, but also for support */
337 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
338 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
339
340 hwerrs &= dd->ipath_hwerrmask;
341
342 /*
343 * make sure we get this much out, unless told to be quiet,
344 * or it's occurred within the last 5 seconds
345 */
346 if ((hwerrs & ~dd->ipath_lasthwerror) ||
347 (ipath_debug & __IPATH_VERBDBG))
348 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
349 "(cleared)\n", (unsigned long long) hwerrs);
350 dd->ipath_lasthwerror |= hwerrs;
351
352 if (hwerrs & ~infinipath_hwe_bitsextant)
353 ipath_dev_err(dd, "hwerror interrupt with unknown errors "
354 "%llx set\n", (unsigned long long)
355 (hwerrs & ~infinipath_hwe_bitsextant));
356
357 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
358 if (ctrl & INFINIPATH_C_FREEZEMODE) {
359 if (hwerrs) {
360 /*
361 * if any set that we aren't ignoring only make the
362 * complaint once, in case it's stuck or recurring,
363 * and we get here multiple times
364 */
365 if (dd->ipath_flags & IPATH_INITTED) {
366 ipath_dev_err(dd, "Fatal Error (freeze "
367 "mode), no longer usable\n");
368 isfatal = 1;
369 }
370 /*
371 * Mark as having had an error for driver, and also
372 * for /sys and status word mapped to user programs.
373 * This marks unit as not usable, until reset
374 */
375 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
376 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
377 dd->ipath_flags &= ~IPATH_INITTED;
378 } else {
379 ipath_dbg("Clearing freezemode on ignored hardware "
380 "error\n");
381 ctrl &= ~INFINIPATH_C_FREEZEMODE;
382 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
383 ctrl);
384 }
385 }
386
387 *msg = '\0';
388
389 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
390 strlcat(msg, "[Memory BIST test failed, PE-800 unusable]",
391 msgl);
392 /* ignore from now on, so disable until driver reloaded */
393 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
394 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
395 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
396 dd->ipath_hwerrmask);
397 }
398 if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
399 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
400 bits = (u32) ((hwerrs >>
401 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
402 INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
403 snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
404 bits);
405 strlcat(msg, bitsmsg, msgl);
406 }
407 if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
408 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
409 bits = (u32) ((hwerrs >>
410 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
411 INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
412 snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
413 bits);
414 strlcat(msg, bitsmsg, msgl);
415 }
416 if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
417 << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
418 bits = (u32) ((hwerrs >>
419 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
420 INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
421 snprintf(bitsmsg, sizeof bitsmsg,
422 "[PCIe Mem Parity Errs %x] ", bits);
423 strlcat(msg, bitsmsg, msgl);
424 }
425 if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
426 strlcat(msg, "[IB2IPATH Parity]", msgl);
427 if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
428 strlcat(msg, "[IPATH2IB Parity]", msgl);
429
430#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
431 INFINIPATH_HWE_COREPLL_RFSLIP )
432
433 if (hwerrs & _IPATH_PLL_FAIL) {
434 snprintf(bitsmsg, sizeof bitsmsg,
435 "[PLL failed (%llx), PE-800 unusable]",
436 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
437 strlcat(msg, bitsmsg, msgl);
438 /* ignore from now on, so disable until driver reloaded */
439 dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
440 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
441 dd->ipath_hwerrmask);
442 }
443
444 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
445 /*
446 * If it occurs, it is left masked since the eternal
447 * interface is unused
448 */
449 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
450 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
451 dd->ipath_hwerrmask);
452 }
453
454 if (hwerrs & INFINIPATH_HWE_PCIEPOISONEDTLP)
455 strlcat(msg, "[PCIe Poisoned TLP]", msgl);
456 if (hwerrs & INFINIPATH_HWE_PCIECPLTIMEOUT)
457 strlcat(msg, "[PCIe completion timeout]", msgl);
458
459 /*
460 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
461 * parity or memory parity error failures, because most likely we
462 * won't be able to talk to the core of the chip. Nonetheless, we
463 * might see them, if they are in parts of the PCIe core that aren't
464 * essential.
465 */
466 if (hwerrs & INFINIPATH_HWE_PCIE1PLLFAILED)
467 strlcat(msg, "[PCIePLL1]", msgl);
468 if (hwerrs & INFINIPATH_HWE_PCIE0PLLFAILED)
469 strlcat(msg, "[PCIePLL0]", msgl);
470 if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXTLH)
471 strlcat(msg, "[PCIe XTLH core parity]", msgl);
472 if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXADM)
473 strlcat(msg, "[PCIe ADM TX core parity]", msgl);
474 if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYRADM)
475 strlcat(msg, "[PCIe ADM RX core parity]", msgl);
476
477 if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
478 strlcat(msg, "[Rx Dsync]", msgl);
479 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
480 strlcat(msg, "[SerDes PLL]", msgl);
481
482 ipath_dev_err(dd, "%s hardware error\n", msg);
483 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
484 /*
485 * for /sys status file ; if no trailing } is copied, we'll
486 * know it was truncated.
487 */
488 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
489 "{%s}", msg);
490 }
491}
492
493/**
494 * ipath_pe_boardname - fill in the board name
495 * @dd: the infinipath device
496 * @name: the output buffer
497 * @namelen: the size of the output buffer
498 *
499 * info is based on the board revision register
500 */
501static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
502 size_t namelen)
503{
504 char *n = NULL;
505 u8 boardrev = dd->ipath_boardrev;
506 int ret;
507
508 switch (boardrev) {
509 case 0:
510 n = "InfiniPath_Emulation";
511 break;
512 case 1:
513 n = "InfiniPath_PE-800-Bringup";
514 break;
515 case 2:
516 n = "InfiniPath_PE-880";
517 break;
518 case 3:
519 n = "InfiniPath_PE-850";
520 break;
521 case 4:
522 n = "InfiniPath_PE-860";
523 break;
524 default:
525 ipath_dev_err(dd,
526 "Don't yet know about board with ID %u\n",
527 boardrev);
528 snprintf(name, namelen, "Unknown_InfiniPath_PE-8xx_%u",
529 boardrev);
530 break;
531 }
532 if (n)
533 snprintf(name, namelen, "%s", n);
534
535 if (dd->ipath_majrev != 4 || dd->ipath_minrev != 1) {
536 ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n",
537 dd->ipath_majrev, dd->ipath_minrev);
538 ret = 1;
539 } else
540 ret = 0;
541
542 return ret;
543}
544
545/**
546 * ipath_pe_init_hwerrors - enable hardware errors
547 * @dd: the infinipath device
548 *
549 * now that we have finished initializing everything that might reasonably
550 * cause a hardware error, and cleared those errors bits as they occur,
551 * we can enable hardware errors in the mask (potentially enabling
552 * freeze mode), and enable hardware errors as errors (along with
553 * everything else) in errormask
554 */
555void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
556{
557 ipath_err_t val;
558 u64 extsval;
559
560 extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
561
562 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
563 ipath_dev_err(dd, "MemBIST did not complete!\n");
564
565 val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
566
567 if (!dd->ipath_boardrev) // no PLL for Emulator
568 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
569
570 /* workaround bug 9460 in internal interface bus parity checking */
571 val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
572
573 dd->ipath_hwerrmask = val;
574}
575
576/**
577 * ipath_pe_bringup_serdes - bring up the serdes
578 * @dd: the infinipath device
579 */
580int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
581{
582 u64 val, tmp, config1;
583 int ret = 0, change = 0;
584
585 ipath_dbg("Trying to bringup serdes\n");
586
587 if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
588 INFINIPATH_HWE_SERDESPLLFAILED) {
589 ipath_dbg("At start, serdes PLL failed bit set "
590 "in hwerrstatus, clearing and continuing\n");
591 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
592 INFINIPATH_HWE_SERDESPLLFAILED);
593 }
594
595 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
596 config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
597
598 ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
599 "xgxsconfig %llx\n", (unsigned long long) val,
600 (unsigned long long) config1, (unsigned long long)
601 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
602
603 /*
604 * Force reset on, also set rxdetect enable. Must do before reading
605 * serdesstatus at least for simulation, or some of the bits in
606 * serdes status will come back as undefined and cause simulation
607 * failures
608 */
609 val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
610 | INFINIPATH_SERDC0_L1PWR_DN;
611 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
612 /* be sure chip saw it */
613 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
614 udelay(5); /* need pll reset set at least for a bit */
615 /*
616 * after PLL is reset, set the per-lane Resets and TxIdle and
617 * clear the PLL reset and rxdetect (to get falling edge).
618 * Leave L1PWR bits set (permanently)
619 */
620 val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
621 | INFINIPATH_SERDC0_L1PWR_DN);
622 val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
623 ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
624 "and txidle (%llx)\n", (unsigned long long) val);
625 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
626 /* be sure chip saw it */
627 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
628 /* need PLL reset clear for at least 11 usec before lane
629 * resets cleared; give it a few more to be sure */
630 udelay(15);
631 val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
632
633 ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
634 "(writing %llx)\n", (unsigned long long) val);
635 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
636 /* be sure chip saw it */
637 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
638
639 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
640 if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
641 INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
642 val &=
643 ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
644 INFINIPATH_XGXS_MDIOADDR_SHIFT);
645 /* MDIO address 3 */
646 val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
647 change = 1;
648 }
649 if (val & INFINIPATH_XGXS_RESET) {
650 val &= ~INFINIPATH_XGXS_RESET;
651 change = 1;
652 }
653 if (change)
654 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
655
656 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
657
658 /* clear current and de-emphasis bits */
659 config1 &= ~0x0ffffffff00ULL;
660 /* set current to 20ma */
661 config1 |= 0x00000000000ULL;
662 /* set de-emphasis to -5.68dB */
663 config1 |= 0x0cccc000000ULL;
664 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
665
666 ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
667 "config1=%llx, sstatus=%llx xgxs=%llx\n",
668 (unsigned long long) val, (unsigned long long) config1,
669 (unsigned long long)
670 ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
671 (unsigned long long)
672 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
673
674 if (!ipath_waitfor_mdio_cmdready(dd)) {
675 ipath_write_kreg(
676 dd, dd->ipath_kregs->kr_mdio,
677 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
678 IPATH_MDIO_CTRL_XGXS_REG_8, 0));
679 if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
680 IPATH_MDIO_DATAVALID, &val))
681 ipath_dbg("Never got MDIO data for XGXS "
682 "status read\n");
683 else
684 ipath_cdbg(VERBOSE, "MDIO Read reg8, "
685 "'bank' 31 %x\n", (u32) val);
686 } else
687 ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
688
689 return ret;
690}
691
692/**
693 * ipath_pe_quiet_serdes - set serdes to txidle
694 * @dd: the infinipath device
695 * Called when driver is being unloaded
696 */
697void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
698{
699 u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
700
701 val |= INFINIPATH_SERDC0_TXIDLE;
702 ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
703 (unsigned long long) val);
704 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
705}
706
707/* this is not yet needed on the PE800, so just return 0. */
708static int ipath_pe_intconfig(struct ipath_devdata *dd)
709{
710 return 0;
711}
712
713/**
714 * ipath_setup_pe_setextled - set the state of the two external LEDs
715 * @dd: the infinipath device
716 * @lst: the L state
717 * @ltst: the LT state
718
719 * These LEDs indicate the physical and logical state of IB link.
720 * For this chip (at least with recommended board pinouts), LED1
721 * is Yellow (logical state) and LED2 is Green (physical state),
722 *
723 * Note: We try to match the Mellanox HCA LED behavior as best
724 * we can. Green indicates physical link state is OK (something is
725 * plugged in, and we can train).
726 * Amber indicates the link is logically up (ACTIVE).
727 * Mellanox further blinks the amber LED to indicate data packet
728 * activity, but we have no hardware support for that, so it would
729 * require waking up every 10-20 msecs and checking the counters
730 * on the chip, and then turning the LED off if appropriate. That's
731 * visible overhead, so not something we will do.
732 *
733 */
734static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
735 u64 ltst)
736{
737 u64 extctl;
738
739 /* the diags use the LED to indicate diag info, so we leave
740 * the external LED alone when the diags are running */
741 if (ipath_diag_inuse)
742 return;
743
744 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
745 INFINIPATH_EXTC_LED2PRIPORT_ON);
746
747 if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP)
748 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
749 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
750 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
751 dd->ipath_extctrl = extctl;
752 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
753}
754
755/**
756 * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
757 * @dd: the infinipath device
758 *
759 * This is called during driver unload.
760 * We do the pci_disable_msi here, not in generic code, because it
761 * isn't used for the HT-400. If we do end up needing pci_enable_msi
762 * at some point in the future for HT-400, we'll move the call back
763 * into the main init_one code.
764 */
765static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
766{
767 dd->ipath_msi_lo = 0; /* just in case unload fails */
768 pci_disable_msi(dd->pcidev);
769}
770
771/**
772 * ipath_setup_pe_config - setup PCIe config related stuff
773 * @dd: the infinipath device
774 * @pdev: the PCI device
775 *
776 * The pci_enable_msi() call will fail on systems with MSI quirks
777 * such as those with AMD8131, even if the device of interest is not
778 * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
779 * late in 2.6.16).
780 * All that can be done is to edit the kernel source to remove the quirk
781 * check until that is fixed.
782 * We do not need to call enable_msi() for our HyperTransport chip (HT-400),
783 * even those it uses MSI, and we want to avoid the quirk warning, so
784 * So we call enable_msi only for the PE-800. If we do end up needing
785 * pci_enable_msi at some point in the future for HT-400, we'll move the
786 * call back into the main init_one code.
787 * We save the msi lo and hi values, so we can restore them after
788 * chip reset (the kernel PCI infrastructure doesn't yet handle that
789 * correctly).
790 */
791static int ipath_setup_pe_config(struct ipath_devdata *dd,
792 struct pci_dev *pdev)
793{
794 int pos, ret;
795
796 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
797 ret = pci_enable_msi(dd->pcidev);
798 if (ret)
799 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
800 "interrupts may not work\n", ret);
801 /* continue even if it fails, we may still be OK... */
802
803 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
804 u16 control;
805 pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
806 &dd->ipath_msi_lo);
807 pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
808 &dd->ipath_msi_hi);
809 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
810 &control);
811 /* now save the data (vector) info */
812 pci_read_config_word(dd->pcidev,
813 pos + ((control & PCI_MSI_FLAGS_64BIT)
814 ? 12 : 8),
815 &dd->ipath_msi_data);
816 ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
817 "0x%x, control=0x%x\n", dd->ipath_msi_data,
818 pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
819 control);
820 /* we save the cachelinesize also, although it doesn't
821 * really matter */
822 pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
823 &dd->ipath_pci_cacheline);
824 } else
825 ipath_dev_err(dd, "Can't find MSI capability, "
826 "can't save MSI settings for reset\n");
827 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) {
828 u16 linkstat;
829 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
830 &linkstat);
831 linkstat >>= 4;
832 linkstat &= 0x1f;
833 if (linkstat != 8)
834 ipath_dev_err(dd, "PCIe width %u, "
835 "performance reduced\n", linkstat);
836 }
837 else
838 ipath_dev_err(dd, "Can't find PCI Express "
839 "capability!\n");
840 return 0;
841}
842
843static void ipath_init_pe_variables(void)
844{
845 /*
846 * bits for selecting i2c direction and values,
847 * used for I2C serial flash
848 */
849 ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
850 ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
851 ipath_gpio_sda = IPATH_GPIO_SDA;
852 ipath_gpio_scl = IPATH_GPIO_SCL;
853
854 /* variables for sanity checking interrupt and errors */
855 infinipath_hwe_bitsextant =
856 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
857 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
858 (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
859 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
860 INFINIPATH_HWE_PCIE1PLLFAILED |
861 INFINIPATH_HWE_PCIE0PLLFAILED |
862 INFINIPATH_HWE_PCIEPOISONEDTLP |
863 INFINIPATH_HWE_PCIECPLTIMEOUT |
864 INFINIPATH_HWE_PCIEBUSPARITYXTLH |
865 INFINIPATH_HWE_PCIEBUSPARITYXADM |
866 INFINIPATH_HWE_PCIEBUSPARITYRADM |
867 INFINIPATH_HWE_MEMBISTFAILED |
868 INFINIPATH_HWE_COREPLL_FBSLIP |
869 INFINIPATH_HWE_COREPLL_RFSLIP |
870 INFINIPATH_HWE_SERDESPLLFAILED |
871 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
872 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
873 infinipath_i_bitsextant =
874 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
875 (INFINIPATH_I_RCVAVAIL_MASK <<
876 INFINIPATH_I_RCVAVAIL_SHIFT) |
877 INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
878 INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
879 infinipath_e_bitsextant =
880 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
881 INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
882 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
883 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
884 INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
885 INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
886 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
887 INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
888 INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
889 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
890 INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
891 INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
892 INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
893 INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
894 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
895 INFINIPATH_E_HARDWARE;
896
897 infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
898 infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
899}
900
901/* setup the MSI stuff again after a reset. I'd like to just call
902 * pci_enable_msi() and request_irq() again, but when I do that,
903 * the MSI enable bit doesn't get set in the command word, and
904 * we switch to to a different interrupt vector, which is confusing,
905 * so I instead just do it all inline. Perhaps somehow can tie this
906 * into the PCIe hotplug support at some point
907 * Note, because I'm doing it all here, I don't call pci_disable_msi()
908 * or free_irq() at the start of ipath_setup_pe_reset().
909 */
910static int ipath_reinit_msi(struct ipath_devdata *dd)
911{
912 int pos;
913 u16 control;
914 int ret;
915
916 if (!dd->ipath_msi_lo) {
917 dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
918 "initial setup failed?\n");
919 ret = 0;
920 goto bail;
921 }
922
923 if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
924 ipath_dev_err(dd, "Can't find MSI capability, "
925 "can't restore MSI settings\n");
926 ret = 0;
927 goto bail;
928 }
929 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
930 dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
931 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
932 dd->ipath_msi_lo);
933 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
934 dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
935 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
936 dd->ipath_msi_hi);
937 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
938 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
939 ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
940 "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
941 control, control | PCI_MSI_FLAGS_ENABLE);
942 control |= PCI_MSI_FLAGS_ENABLE;
943 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
944 control);
945 }
946 /* now rewrite the data (vector) info */
947 pci_write_config_word(dd->pcidev, pos +
948 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
949 dd->ipath_msi_data);
950 /* we restore the cachelinesize also, although it doesn't really
951 * matter */
952 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
953 dd->ipath_pci_cacheline);
954 /* and now set the pci master bit again */
955 pci_set_master(dd->pcidev);
956 ret = 1;
957
958bail:
959 return ret;
960}
961
962/* This routine sleeps, so it can only be called from user context, not
963 * from interrupt context. If we need interrupt context, we can split
964 * it into two routines.
965*/
966static int ipath_setup_pe_reset(struct ipath_devdata *dd)
967{
968 u64 val;
969 int i;
970 int ret;
971
972 /* Use ERROR so it shows up in logs, etc. */
973 ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
974 dd->ipath_unit);
975 val = dd->ipath_control | INFINIPATH_C_RESET;
976 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
977 mb();
978
979 for (i = 1; i <= 5; i++) {
980 int r;
981 /* allow MBIST, etc. to complete; longer on each retry.
982 * We sometimes get machine checks from bus timeout if no
983 * response, so for now, make it *really* long.
984 */
985 msleep(1000 + (1 + i) * 2000);
986 if ((r =
987 pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
988 dd->ipath_pcibar0)))
989 ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
990 r);
991 if ((r =
992 pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
993 dd->ipath_pcibar1)))
994 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
995 r);
996 /* now re-enable memory access */
997 if ((r = pci_enable_device(dd->pcidev)))
998 ipath_dev_err(dd, "pci_enable_device failed after "
999 "reset: %d\n", r);
1000 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1001 if (val == dd->ipath_revision) {
1002 ipath_cdbg(VERBOSE, "Got matching revision "
1003 "register %llx on try %d\n",
1004 (unsigned long long) val, i);
1005 ret = ipath_reinit_msi(dd);
1006 goto bail;
1007 }
1008 /* Probably getting -1 back */
1009 ipath_dbg("Didn't get expected revision register, "
1010 "got %llx, try %d\n", (unsigned long long) val,
1011 i + 1);
1012 }
1013 ret = 0; /* failed */
1014
1015bail:
1016 return ret;
1017}
1018
1019/**
1020 * ipath_pe_put_tid - write a TID in chip
1021 * @dd: the infinipath device
1022 * @tidptr: pointer to the expected TID (in chip) to udpate
1023 * @tidtype: 0 for eager, 1 for expected
1024 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1025 *
1026 * This exists as a separate routine to allow for special locking etc.
1027 * It's used for both the full cleanup on exit, as well as the normal
1028 * setup and teardown.
1029 */
1030static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1031 u32 type, unsigned long pa)
1032{
1033 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1034 unsigned long flags = 0; /* keep gcc quiet */
1035
1036 if (pa != dd->ipath_tidinvalid) {
1037 if (pa & ((1U << 11) - 1)) {
1038 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1039 "not 4KB aligned!\n", pa);
1040 return;
1041 }
1042 pa >>= 11;
1043 /* paranoia check */
1044 if (pa & (7<<29))
1045 ipath_dev_err(dd,
1046 "BUG: Physical page address 0x%lx "
1047 "has bits set in 31-29\n", pa);
1048
1049 if (type == 0)
1050 pa |= dd->ipath_tidtemplate;
1051 else /* for now, always full 4KB page */
1052 pa |= 2 << 29;
1053 }
1054
1055 /* workaround chip bug 9437 by writing each TID twice
1056 * and holding a spinlock around the writes, so they don't
1057 * intermix with other TID (eager or expected) writes
1058 * Unfortunately, this call can be done from interrupt level
1059 * for the port 0 eager TIDs, so we have to use irqsave
1060 */
1061 spin_lock_irqsave(&dd->ipath_tid_lock, flags);
1062 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
1063 if (dd->ipath_kregbase)
1064 writel(pa, tidp32);
1065 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
1066 mmiowb();
1067 spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
1068}
1069
1070/**
1071 * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
1072 * @dd: the infinipath device
1073 * @port: the port
1074 *
1075 * clear all TID entries for a port, expected and eager.
1076 * Used from ipath_close(). On PE800, TIDs are only 32 bits,
1077 * not 64, but they are still on 64 bit boundaries, so tidbase
1078 * is declared as u64 * for the pointer math, even though we write 32 bits
1079 */
1080static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
1081{
1082 u64 __iomem *tidbase;
1083 unsigned long tidinv;
1084 int i;
1085
1086 if (!dd->ipath_kregbase)
1087 return;
1088
1089 ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
1090
1091 tidinv = dd->ipath_tidinvalid;
1092 tidbase = (u64 __iomem *)
1093 ((char __iomem *)(dd->ipath_kregbase) +
1094 dd->ipath_rcvtidbase +
1095 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
1096
1097 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1098 ipath_pe_put_tid(dd, &tidbase[i], 0, tidinv);
1099
1100 tidbase = (u64 __iomem *)
1101 ((char __iomem *)(dd->ipath_kregbase) +
1102 dd->ipath_rcvegrbase +
1103 port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
1104
1105 for (i = 0; i < dd->ipath_rcvegrcnt; i++)
1106 ipath_pe_put_tid(dd, &tidbase[i], 1, tidinv);
1107}
1108
1109/**
1110 * ipath_pe_tidtemplate - setup constants for TID updates
1111 * @dd: the infinipath device
1112 *
1113 * We setup stuff that we use a lot, to avoid calculating each time
1114 */
1115static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
1116{
1117 u32 egrsize = dd->ipath_rcvegrbufsize;
1118
1119 /* For now, we always allocate 4KB buffers (at init) so we can
1120 * receive max size packets. We may want a module parameter to
1121 * specify 2KB or 4KB and/or make be per port instead of per device
1122 * for those who want to reduce memory footprint. Note that the
1123 * ipath_rcvhdrentsize size must be large enough to hold the largest
1124 * IB header (currently 96 bytes) that we expect to handle (plus of
1125 * course the 2 dwords of RHF).
1126 */
1127 if (egrsize == 2048)
1128 dd->ipath_tidtemplate = 1U << 29;
1129 else if (egrsize == 4096)
1130 dd->ipath_tidtemplate = 2U << 29;
1131 else {
1132 egrsize = 4096;
1133 dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
1134 "%u, using %u\n", dd->ipath_rcvegrbufsize,
1135 egrsize);
1136 dd->ipath_tidtemplate = 2U << 29;
1137 }
1138 dd->ipath_tidinvalid = 0;
1139}
1140
1141static int ipath_pe_early_init(struct ipath_devdata *dd)
1142{
1143 dd->ipath_flags |= IPATH_4BYTE_TID;
1144
1145 /*
1146 * For openib, we need to be able to handle an IB header of 96 bytes
1147 * or 24 dwords. HT-400 has arbitrary sized receive buffers, so we
1148 * made them the same size as the PIO buffers. The PE-800 does not
1149 * handle arbitrary size buffers, so we need the header large enough
1150 * to handle largest IB header, but still have room for a 2KB MTU
1151 * standard IB packet.
1152 */
1153 dd->ipath_rcvhdrentsize = 24;
1154 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1155
1156 /* For HT-400, we allocate a somewhat overly large eager buffer,
1157 * such that we can guarantee that we can receive the largest packet
1158 * that we can send out. To truly support a 4KB MTU, we need to
1159 * bump this to a larger value. We'll do this when I get around to
1160 * testing 4KB sends on the PE-800, which I have not yet done.
1161 */
1162 dd->ipath_rcvegrbufsize = 2048;
1163 /*
1164 * the min() check here is currently a nop, but it may not always
1165 * be, depending on just how we do ipath_rcvegrbufsize
1166 */
1167 dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
1168 dd->ipath_rcvegrbufsize +
1169 (dd->ipath_rcvhdrentsize << 2));
1170 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1171
1172 /*
1173 * For PE-800, we can request a receive interrupt for 1 or
1174 * more packets from current offset. For now, we set this
1175 * up for a single packet, to match the HT-400 behavior.
1176 */
1177 dd->ipath_rhdrhead_intr_off = 1ULL<<32;
1178
1179 return 0;
1180}
1181
1182int __attribute__((weak)) ipath_unordered_wc(void)
1183{
1184 return 0;
1185}
1186
1187/**
1188 * ipath_init_pe_get_base_info - set chip-specific flags for user code
1189 * @dd: the infinipath device
1190 * @kbase: ipath_base_info pointer
1191 *
1192 * We set the PCIE flag because the lower bandwidth on PCIe vs
1193 * HyperTransport can affect some user packet algorithims.
1194 */
1195static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
1196{
1197 struct ipath_base_info *kinfo = kbase;
1198
1199 if (ipath_unordered_wc()) {
1200 kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
1201 ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
1202 }
1203 else
1204 ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
1205
1206 kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
1207
1208 return 0;
1209}
1210
1211/**
1212 * ipath_init_pe800_funcs - set up the chip-specific function pointers
1213 * @dd: the infinipath device
1214 *
1215 * This is global, and is called directly at init to set up the
1216 * chip-specific function pointers for later use.
1217 */
1218void ipath_init_pe800_funcs(struct ipath_devdata *dd)
1219{
1220 dd->ipath_f_intrsetup = ipath_pe_intconfig;
1221 dd->ipath_f_bus = ipath_setup_pe_config;
1222 dd->ipath_f_reset = ipath_setup_pe_reset;
1223 dd->ipath_f_get_boardname = ipath_pe_boardname;
1224 dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
1225 dd->ipath_f_early_init = ipath_pe_early_init;
1226 dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
1227 dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
1228 dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
1229 dd->ipath_f_clear_tids = ipath_pe_clear_tids;
1230 dd->ipath_f_put_tid = ipath_pe_put_tid;
1231 dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
1232 dd->ipath_f_setextled = ipath_setup_pe_setextled;
1233 dd->ipath_f_get_base_info = ipath_pe_get_base_info;
1234
1235 /* initialize chip-specific variables */
1236 dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
1237
1238 /*
1239 * setup the register offsets, since they are different for each
1240 * chip
1241 */
1242 dd->ipath_kregs = &ipath_pe_kregs;
1243 dd->ipath_cregs = &ipath_pe_cregs;
1244
1245 ipath_init_pe_variables();
1246}
1247
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
new file mode 100644
index 000000000000..6058d70d7577
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -0,0 +1,913 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/err.h>
34#include <linux/vmalloc.h>
35
36#include "ipath_verbs.h"
37#include "ips_common.h"
38
39#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
40#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
41#define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
42 (off))
43#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
44 BITS_PER_PAGE, off)
45
46#define TRANS_INVALID 0
47#define TRANS_ANY2RST 1
48#define TRANS_RST2INIT 2
49#define TRANS_INIT2INIT 3
50#define TRANS_INIT2RTR 4
51#define TRANS_RTR2RTS 5
52#define TRANS_RTS2RTS 6
53#define TRANS_SQERR2RTS 7
54#define TRANS_ANY2ERR 8
55#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */
56#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */
57#define TRANS_SQD2RTS 11 /* error if not drained */
58
59/*
60 * Convert the AETH credit code into the number of credits.
61 */
62static u32 credit_table[31] = {
63 0, /* 0 */
64 1, /* 1 */
65 2, /* 2 */
66 3, /* 3 */
67 4, /* 4 */
68 6, /* 5 */
69 8, /* 6 */
70 12, /* 7 */
71 16, /* 8 */
72 24, /* 9 */
73 32, /* A */
74 48, /* B */
75 64, /* C */
76 96, /* D */
77 128, /* E */
78 192, /* F */
79 256, /* 10 */
80 384, /* 11 */
81 512, /* 12 */
82 768, /* 13 */
83 1024, /* 14 */
84 1536, /* 15 */
85 2048, /* 16 */
86 3072, /* 17 */
87 4096, /* 18 */
88 6144, /* 19 */
89 8192, /* 1A */
90 12288, /* 1B */
91 16384, /* 1C */
92 24576, /* 1D */
93 32768 /* 1E */
94};
95
96static u32 alloc_qpn(struct ipath_qp_table *qpt)
97{
98 u32 i, offset, max_scan, qpn;
99 struct qpn_map *map;
100 u32 ret;
101
102 qpn = qpt->last + 1;
103 if (qpn >= QPN_MAX)
104 qpn = 2;
105 offset = qpn & BITS_PER_PAGE_MASK;
106 map = &qpt->map[qpn / BITS_PER_PAGE];
107 max_scan = qpt->nmaps - !offset;
108 for (i = 0;;) {
109 if (unlikely(!map->page)) {
110 unsigned long page = get_zeroed_page(GFP_KERNEL);
111 unsigned long flags;
112
113 /*
114 * Free the page if someone raced with us
115 * installing it:
116 */
117 spin_lock_irqsave(&qpt->lock, flags);
118 if (map->page)
119 free_page(page);
120 else
121 map->page = (void *)page;
122 spin_unlock_irqrestore(&qpt->lock, flags);
123 if (unlikely(!map->page))
124 break;
125 }
126 if (likely(atomic_read(&map->n_free))) {
127 do {
128 if (!test_and_set_bit(offset, map->page)) {
129 atomic_dec(&map->n_free);
130 qpt->last = qpn;
131 ret = qpn;
132 goto bail;
133 }
134 offset = find_next_offset(map, offset);
135 qpn = mk_qpn(qpt, map, offset);
136 /*
137 * This test differs from alloc_pidmap().
138 * If find_next_offset() does find a zero
139 * bit, we don't need to check for QPN
140 * wrapping around past our starting QPN.
141 * We just need to be sure we don't loop
142 * forever.
143 */
144 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
145 }
146 /*
147 * In order to keep the number of pages allocated to a
148 * minimum, we scan the all existing pages before increasing
149 * the size of the bitmap table.
150 */
151 if (++i > max_scan) {
152 if (qpt->nmaps == QPNMAP_ENTRIES)
153 break;
154 map = &qpt->map[qpt->nmaps++];
155 offset = 0;
156 } else if (map < &qpt->map[qpt->nmaps]) {
157 ++map;
158 offset = 0;
159 } else {
160 map = &qpt->map[0];
161 offset = 2;
162 }
163 qpn = mk_qpn(qpt, map, offset);
164 }
165
166 ret = 0;
167
168bail:
169 return ret;
170}
171
172static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
173{
174 struct qpn_map *map;
175
176 map = qpt->map + qpn / BITS_PER_PAGE;
177 if (map->page)
178 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
179 atomic_inc(&map->n_free);
180}
181
182/**
183 * ipath_alloc_qpn - allocate a QP number
184 * @qpt: the QP table
185 * @qp: the QP
186 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
187 *
188 * Allocate the next available QPN and put the QP into the hash table.
189 * The hash table holds a reference to the QP.
190 */
191int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
192 enum ib_qp_type type)
193{
194 unsigned long flags;
195 u32 qpn;
196 int ret;
197
198 if (type == IB_QPT_SMI)
199 qpn = 0;
200 else if (type == IB_QPT_GSI)
201 qpn = 1;
202 else {
203 /* Allocate the next available QPN */
204 qpn = alloc_qpn(qpt);
205 if (qpn == 0) {
206 ret = -ENOMEM;
207 goto bail;
208 }
209 }
210 qp->ibqp.qp_num = qpn;
211
212 /* Add the QP to the hash table. */
213 spin_lock_irqsave(&qpt->lock, flags);
214
215 qpn %= qpt->max;
216 qp->next = qpt->table[qpn];
217 qpt->table[qpn] = qp;
218 atomic_inc(&qp->refcount);
219
220 spin_unlock_irqrestore(&qpt->lock, flags);
221 ret = 0;
222
223bail:
224 return ret;
225}
226
227/**
228 * ipath_free_qp - remove a QP from the QP table
229 * @qpt: the QP table
230 * @qp: the QP to remove
231 *
232 * Remove the QP from the table so it can't be found asynchronously by
233 * the receive interrupt routine.
234 */
235void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
236{
237 struct ipath_qp *q, **qpp;
238 unsigned long flags;
239 int fnd = 0;
240
241 spin_lock_irqsave(&qpt->lock, flags);
242
243 /* Remove QP from the hash table. */
244 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
245 for (; (q = *qpp) != NULL; qpp = &q->next) {
246 if (q == qp) {
247 *qpp = qp->next;
248 qp->next = NULL;
249 atomic_dec(&qp->refcount);
250 fnd = 1;
251 break;
252 }
253 }
254
255 spin_unlock_irqrestore(&qpt->lock, flags);
256
257 if (!fnd)
258 return;
259
260 /* If QPN is not reserved, mark QPN free in the bitmap. */
261 if (qp->ibqp.qp_num > 1)
262 free_qpn(qpt, qp->ibqp.qp_num);
263
264 wait_event(qp->wait, !atomic_read(&qp->refcount));
265}
266
267/**
268 * ipath_free_all_qps - remove all QPs from the table
269 * @qpt: the QP table to empty
270 */
271void ipath_free_all_qps(struct ipath_qp_table *qpt)
272{
273 unsigned long flags;
274 struct ipath_qp *qp, *nqp;
275 u32 n;
276
277 for (n = 0; n < qpt->max; n++) {
278 spin_lock_irqsave(&qpt->lock, flags);
279 qp = qpt->table[n];
280 qpt->table[n] = NULL;
281 spin_unlock_irqrestore(&qpt->lock, flags);
282
283 while (qp) {
284 nqp = qp->next;
285 if (qp->ibqp.qp_num > 1)
286 free_qpn(qpt, qp->ibqp.qp_num);
287 if (!atomic_dec_and_test(&qp->refcount) ||
288 !ipath_destroy_qp(&qp->ibqp))
289 _VERBS_INFO("QP memory leak!\n");
290 qp = nqp;
291 }
292 }
293
294 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
295 if (qpt->map[n].page)
296 free_page((unsigned long)qpt->map[n].page);
297 }
298}
299
300/**
301 * ipath_lookup_qpn - return the QP with the given QPN
302 * @qpt: the QP table
303 * @qpn: the QP number to look up
304 *
305 * The caller is responsible for decrementing the QP reference count
306 * when done.
307 */
308struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
309{
310 unsigned long flags;
311 struct ipath_qp *qp;
312
313 spin_lock_irqsave(&qpt->lock, flags);
314
315 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
316 if (qp->ibqp.qp_num == qpn) {
317 atomic_inc(&qp->refcount);
318 break;
319 }
320 }
321
322 spin_unlock_irqrestore(&qpt->lock, flags);
323 return qp;
324}
325
326/**
327 * ipath_reset_qp - initialize the QP state to the reset state
328 * @qp: the QP to reset
329 */
330static void ipath_reset_qp(struct ipath_qp *qp)
331{
332 qp->remote_qpn = 0;
333 qp->qkey = 0;
334 qp->qp_access_flags = 0;
335 qp->s_hdrwords = 0;
336 qp->s_psn = 0;
337 qp->r_psn = 0;
338 atomic_set(&qp->msn, 0);
339 if (qp->ibqp.qp_type == IB_QPT_RC) {
340 qp->s_state = IB_OPCODE_RC_SEND_LAST;
341 qp->r_state = IB_OPCODE_RC_SEND_LAST;
342 } else {
343 qp->s_state = IB_OPCODE_UC_SEND_LAST;
344 qp->r_state = IB_OPCODE_UC_SEND_LAST;
345 }
346 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
347 qp->s_nak_state = 0;
348 qp->s_rnr_timeout = 0;
349 qp->s_head = 0;
350 qp->s_tail = 0;
351 qp->s_cur = 0;
352 qp->s_last = 0;
353 qp->s_ssn = 1;
354 qp->s_lsn = 0;
355 qp->r_rq.head = 0;
356 qp->r_rq.tail = 0;
357 qp->r_reuse_sge = 0;
358}
359
360/**
361 * ipath_modify_qp - modify the attributes of a queue pair
362 * @ibqp: the queue pair who's attributes we're modifying
363 * @attr: the new attributes
364 * @attr_mask: the mask of attributes to modify
365 *
366 * Returns 0 on success, otherwise returns an errno.
367 */
368int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
369 int attr_mask)
370{
371 struct ipath_qp *qp = to_iqp(ibqp);
372 enum ib_qp_state cur_state, new_state;
373 unsigned long flags;
374 int ret;
375
376 spin_lock_irqsave(&qp->r_rq.lock, flags);
377 spin_lock(&qp->s_lock);
378
379 cur_state = attr_mask & IB_QP_CUR_STATE ?
380 attr->cur_qp_state : qp->state;
381 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
382
383 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
384 attr_mask))
385 goto inval;
386
387 switch (new_state) {
388 case IB_QPS_RESET:
389 ipath_reset_qp(qp);
390 break;
391
392 case IB_QPS_ERR:
393 ipath_error_qp(qp);
394 break;
395
396 default:
397 break;
398
399 }
400
401 if (attr_mask & IB_QP_PKEY_INDEX) {
402 struct ipath_ibdev *dev = to_idev(ibqp->device);
403
404 if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
405 goto inval;
406 qp->s_pkey_index = attr->pkey_index;
407 }
408
409 if (attr_mask & IB_QP_DEST_QPN)
410 qp->remote_qpn = attr->dest_qp_num;
411
412 if (attr_mask & IB_QP_SQ_PSN) {
413 qp->s_next_psn = attr->sq_psn;
414 qp->s_last_psn = qp->s_next_psn - 1;
415 }
416
417 if (attr_mask & IB_QP_RQ_PSN)
418 qp->r_psn = attr->rq_psn;
419
420 if (attr_mask & IB_QP_ACCESS_FLAGS)
421 qp->qp_access_flags = attr->qp_access_flags;
422
423 if (attr_mask & IB_QP_AV) {
424 if (attr->ah_attr.dlid == 0 ||
425 attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
426 goto inval;
427 qp->remote_ah_attr = attr->ah_attr;
428 }
429
430 if (attr_mask & IB_QP_PATH_MTU)
431 qp->path_mtu = attr->path_mtu;
432
433 if (attr_mask & IB_QP_RETRY_CNT)
434 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
435
436 if (attr_mask & IB_QP_RNR_RETRY) {
437 qp->s_rnr_retry = attr->rnr_retry;
438 if (qp->s_rnr_retry > 7)
439 qp->s_rnr_retry = 7;
440 qp->s_rnr_retry_cnt = qp->s_rnr_retry;
441 }
442
443 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
444 if (attr->min_rnr_timer > 31)
445 goto inval;
446 qp->s_min_rnr_timer = attr->min_rnr_timer;
447 }
448
449 if (attr_mask & IB_QP_QKEY)
450 qp->qkey = attr->qkey;
451
452 if (attr_mask & IB_QP_PKEY_INDEX)
453 qp->s_pkey_index = attr->pkey_index;
454
455 qp->state = new_state;
456 spin_unlock(&qp->s_lock);
457 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
458
459 /*
460 * If QP1 changed to the RTS state, try to move to the link to INIT
461 * even if it was ACTIVE so the SM will reinitialize the SMA's
462 * state.
463 */
464 if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) {
465 struct ipath_ibdev *dev = to_idev(ibqp->device);
466
467 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
468 }
469 ret = 0;
470 goto bail;
471
472inval:
473 spin_unlock(&qp->s_lock);
474 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
475 ret = -EINVAL;
476
477bail:
478 return ret;
479}
480
481int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
482 int attr_mask, struct ib_qp_init_attr *init_attr)
483{
484 struct ipath_qp *qp = to_iqp(ibqp);
485
486 attr->qp_state = qp->state;
487 attr->cur_qp_state = attr->qp_state;
488 attr->path_mtu = qp->path_mtu;
489 attr->path_mig_state = 0;
490 attr->qkey = qp->qkey;
491 attr->rq_psn = qp->r_psn;
492 attr->sq_psn = qp->s_next_psn;
493 attr->dest_qp_num = qp->remote_qpn;
494 attr->qp_access_flags = qp->qp_access_flags;
495 attr->cap.max_send_wr = qp->s_size - 1;
496 attr->cap.max_recv_wr = qp->r_rq.size - 1;
497 attr->cap.max_send_sge = qp->s_max_sge;
498 attr->cap.max_recv_sge = qp->r_rq.max_sge;
499 attr->cap.max_inline_data = 0;
500 attr->ah_attr = qp->remote_ah_attr;
501 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
502 attr->pkey_index = qp->s_pkey_index;
503 attr->alt_pkey_index = 0;
504 attr->en_sqd_async_notify = 0;
505 attr->sq_draining = 0;
506 attr->max_rd_atomic = 1;
507 attr->max_dest_rd_atomic = 1;
508 attr->min_rnr_timer = qp->s_min_rnr_timer;
509 attr->port_num = 1;
510 attr->timeout = 0;
511 attr->retry_cnt = qp->s_retry_cnt;
512 attr->rnr_retry = qp->s_rnr_retry;
513 attr->alt_port_num = 0;
514 attr->alt_timeout = 0;
515
516 init_attr->event_handler = qp->ibqp.event_handler;
517 init_attr->qp_context = qp->ibqp.qp_context;
518 init_attr->send_cq = qp->ibqp.send_cq;
519 init_attr->recv_cq = qp->ibqp.recv_cq;
520 init_attr->srq = qp->ibqp.srq;
521 init_attr->cap = attr->cap;
522 init_attr->sq_sig_type =
523 (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
524 ? IB_SIGNAL_REQ_WR : 0;
525 init_attr->qp_type = qp->ibqp.qp_type;
526 init_attr->port_num = 1;
527 return 0;
528}
529
530/**
531 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
532 * @qp: the queue pair to compute the AETH for
533 *
534 * Returns the AETH.
535 *
536 * The QP s_lock should be held.
537 */
538__be32 ipath_compute_aeth(struct ipath_qp *qp)
539{
540 u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK;
541
542 if (qp->s_nak_state) {
543 aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT;
544 } else if (qp->ibqp.srq) {
545 /*
546 * Shared receive queues don't generate credits.
547 * Set the credit field to the invalid value.
548 */
549 aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT;
550 } else {
551 u32 min, max, x;
552 u32 credits;
553
554 /*
555 * Compute the number of credits available (RWQEs).
556 * XXX Not holding the r_rq.lock here so there is a small
557 * chance that the pair of reads are not atomic.
558 */
559 credits = qp->r_rq.head - qp->r_rq.tail;
560 if ((int)credits < 0)
561 credits += qp->r_rq.size;
562 /*
563 * Binary search the credit table to find the code to
564 * use.
565 */
566 min = 0;
567 max = 31;
568 for (;;) {
569 x = (min + max) / 2;
570 if (credit_table[x] == credits)
571 break;
572 if (credit_table[x] > credits)
573 max = x;
574 else if (min == x)
575 break;
576 else
577 min = x;
578 }
579 aeth |= x << IPS_AETH_CREDIT_SHIFT;
580 }
581 return cpu_to_be32(aeth);
582}
583
584/**
585 * ipath_create_qp - create a queue pair for a device
586 * @ibpd: the protection domain who's device we create the queue pair for
587 * @init_attr: the attributes of the queue pair
588 * @udata: unused by InfiniPath
589 *
590 * Returns the queue pair on success, otherwise returns an errno.
591 *
592 * Called by the ib_create_qp() core verbs function.
593 */
594struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
595 struct ib_qp_init_attr *init_attr,
596 struct ib_udata *udata)
597{
598 struct ipath_qp *qp;
599 int err;
600 struct ipath_swqe *swq = NULL;
601 struct ipath_ibdev *dev;
602 size_t sz;
603 struct ib_qp *ret;
604
605 if (init_attr->cap.max_send_sge > 255 ||
606 init_attr->cap.max_recv_sge > 255) {
607 ret = ERR_PTR(-ENOMEM);
608 goto bail;
609 }
610
611 switch (init_attr->qp_type) {
612 case IB_QPT_UC:
613 case IB_QPT_RC:
614 sz = sizeof(struct ipath_sge) *
615 init_attr->cap.max_send_sge +
616 sizeof(struct ipath_swqe);
617 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
618 if (swq == NULL) {
619 ret = ERR_PTR(-ENOMEM);
620 goto bail;
621 }
622 /* FALLTHROUGH */
623 case IB_QPT_UD:
624 case IB_QPT_SMI:
625 case IB_QPT_GSI:
626 qp = kmalloc(sizeof(*qp), GFP_KERNEL);
627 if (!qp) {
628 ret = ERR_PTR(-ENOMEM);
629 goto bail;
630 }
631 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
632 sz = sizeof(struct ipath_sge) *
633 init_attr->cap.max_recv_sge +
634 sizeof(struct ipath_rwqe);
635 qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
636 if (!qp->r_rq.wq) {
637 kfree(qp);
638 ret = ERR_PTR(-ENOMEM);
639 goto bail;
640 }
641
642 /*
643 * ib_create_qp() will initialize qp->ibqp
644 * except for qp->ibqp.qp_num.
645 */
646 spin_lock_init(&qp->s_lock);
647 spin_lock_init(&qp->r_rq.lock);
648 atomic_set(&qp->refcount, 0);
649 init_waitqueue_head(&qp->wait);
650 tasklet_init(&qp->s_task,
651 init_attr->qp_type == IB_QPT_RC ?
652 ipath_do_rc_send : ipath_do_uc_send,
653 (unsigned long)qp);
654 qp->piowait.next = LIST_POISON1;
655 qp->piowait.prev = LIST_POISON2;
656 qp->timerwait.next = LIST_POISON1;
657 qp->timerwait.prev = LIST_POISON2;
658 qp->state = IB_QPS_RESET;
659 qp->s_wq = swq;
660 qp->s_size = init_attr->cap.max_send_wr + 1;
661 qp->s_max_sge = init_attr->cap.max_send_sge;
662 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
663 qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
664 1 << IPATH_S_SIGNAL_REQ_WR : 0;
665 dev = to_idev(ibpd->device);
666 err = ipath_alloc_qpn(&dev->qp_table, qp,
667 init_attr->qp_type);
668 if (err) {
669 vfree(swq);
670 vfree(qp->r_rq.wq);
671 kfree(qp);
672 ret = ERR_PTR(err);
673 goto bail;
674 }
675 ipath_reset_qp(qp);
676
677 /* Tell the core driver that the kernel SMA is present. */
678 if (qp->ibqp.qp_type == IB_QPT_SMI)
679 ipath_layer_set_verbs_flags(dev->dd,
680 IPATH_VERBS_KERNEL_SMA);
681 break;
682
683 default:
684 /* Don't support raw QPs */
685 ret = ERR_PTR(-ENOSYS);
686 goto bail;
687 }
688
689 init_attr->cap.max_inline_data = 0;
690
691 ret = &qp->ibqp;
692
693bail:
694 return ret;
695}
696
697/**
698 * ipath_destroy_qp - destroy a queue pair
699 * @ibqp: the queue pair to destroy
700 *
701 * Returns 0 on success.
702 *
703 * Note that this can be called while the QP is actively sending or
704 * receiving!
705 */
706int ipath_destroy_qp(struct ib_qp *ibqp)
707{
708 struct ipath_qp *qp = to_iqp(ibqp);
709 struct ipath_ibdev *dev = to_idev(ibqp->device);
710 unsigned long flags;
711
712 /* Tell the core driver that the kernel SMA is gone. */
713 if (qp->ibqp.qp_type == IB_QPT_SMI)
714 ipath_layer_set_verbs_flags(dev->dd, 0);
715
716 spin_lock_irqsave(&qp->r_rq.lock, flags);
717 spin_lock(&qp->s_lock);
718 qp->state = IB_QPS_ERR;
719 spin_unlock(&qp->s_lock);
720 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
721
722 /* Stop the sending tasklet. */
723 tasklet_kill(&qp->s_task);
724
725 /* Make sure the QP isn't on the timeout list. */
726 spin_lock_irqsave(&dev->pending_lock, flags);
727 if (qp->timerwait.next != LIST_POISON1)
728 list_del(&qp->timerwait);
729 if (qp->piowait.next != LIST_POISON1)
730 list_del(&qp->piowait);
731 spin_unlock_irqrestore(&dev->pending_lock, flags);
732
733 /*
734 * Make sure that the QP is not in the QPN table so receive
735 * interrupts will discard packets for this QP. XXX Also remove QP
736 * from multicast table.
737 */
738 if (atomic_read(&qp->refcount) != 0)
739 ipath_free_qp(&dev->qp_table, qp);
740
741 vfree(qp->s_wq);
742 vfree(qp->r_rq.wq);
743 kfree(qp);
744 return 0;
745}
746
747/**
748 * ipath_init_qp_table - initialize the QP table for a device
749 * @idev: the device who's QP table we're initializing
750 * @size: the size of the QP table
751 *
752 * Returns 0 on success, otherwise returns an errno.
753 */
754int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
755{
756 int i;
757 int ret;
758
759 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */
760 idev->qp_table.max = size;
761 idev->qp_table.nmaps = 1;
762 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
763 GFP_KERNEL);
764 if (idev->qp_table.table == NULL) {
765 ret = -ENOMEM;
766 goto bail;
767 }
768
769 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
770 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
771 idev->qp_table.map[i].page = NULL;
772 }
773
774 ret = 0;
775
776bail:
777 return ret;
778}
779
780/**
781 * ipath_sqerror_qp - put a QP's send queue into an error state
782 * @qp: QP who's send queue will be put into an error state
783 * @wc: the WC responsible for putting the QP in this state
784 *
785 * Flushes the send work queue.
786 * The QP s_lock should be held.
787 */
788
789void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
790{
791 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
792 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
793
794 _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n",
795 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
796
797 spin_lock(&dev->pending_lock);
798 /* XXX What if its already removed by the timeout code? */
799 if (qp->timerwait.next != LIST_POISON1)
800 list_del(&qp->timerwait);
801 if (qp->piowait.next != LIST_POISON1)
802 list_del(&qp->piowait);
803 spin_unlock(&dev->pending_lock);
804
805 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
806 if (++qp->s_last >= qp->s_size)
807 qp->s_last = 0;
808
809 wc->status = IB_WC_WR_FLUSH_ERR;
810
811 while (qp->s_last != qp->s_head) {
812 wc->wr_id = wqe->wr.wr_id;
813 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
814 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
815 if (++qp->s_last >= qp->s_size)
816 qp->s_last = 0;
817 wqe = get_swqe_ptr(qp, qp->s_last);
818 }
819 qp->s_cur = qp->s_tail = qp->s_head;
820 qp->state = IB_QPS_SQE;
821}
822
823/**
824 * ipath_error_qp - put a QP into an error state
825 * @qp: the QP to put into an error state
826 *
827 * Flushes both send and receive work queues.
828 * QP r_rq.lock and s_lock should be held.
829 */
830
831void ipath_error_qp(struct ipath_qp *qp)
832{
833 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
834 struct ib_wc wc;
835
836 _VERBS_INFO("QP%d/%d in error state\n",
837 qp->ibqp.qp_num, qp->remote_qpn);
838
839 spin_lock(&dev->pending_lock);
840 /* XXX What if its already removed by the timeout code? */
841 if (qp->timerwait.next != LIST_POISON1)
842 list_del(&qp->timerwait);
843 if (qp->piowait.next != LIST_POISON1)
844 list_del(&qp->piowait);
845 spin_unlock(&dev->pending_lock);
846
847 wc.status = IB_WC_WR_FLUSH_ERR;
848 wc.vendor_err = 0;
849 wc.byte_len = 0;
850 wc.imm_data = 0;
851 wc.qp_num = qp->ibqp.qp_num;
852 wc.src_qp = 0;
853 wc.wc_flags = 0;
854 wc.pkey_index = 0;
855 wc.slid = 0;
856 wc.sl = 0;
857 wc.dlid_path_bits = 0;
858 wc.port_num = 0;
859
860 while (qp->s_last != qp->s_head) {
861 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
862
863 wc.wr_id = wqe->wr.wr_id;
864 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
865 if (++qp->s_last >= qp->s_size)
866 qp->s_last = 0;
867 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
868 }
869 qp->s_cur = qp->s_tail = qp->s_head;
870 qp->s_hdrwords = 0;
871 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
872
873 wc.opcode = IB_WC_RECV;
874 while (qp->r_rq.tail != qp->r_rq.head) {
875 wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
876 if (++qp->r_rq.tail >= qp->r_rq.size)
877 qp->r_rq.tail = 0;
878 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
879 }
880}
881
882/**
883 * ipath_get_credit - flush the send work queue of a QP
884 * @qp: the qp who's send work queue to flush
885 * @aeth: the Acknowledge Extended Transport Header
886 *
887 * The QP s_lock should be held.
888 */
889void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
890{
891 u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK;
892
893 /*
894 * If the credit is invalid, we can send
895 * as many packets as we like. Otherwise, we have to
896 * honor the credit field.
897 */
898 if (credit == IPS_AETH_CREDIT_INVAL) {
899 qp->s_lsn = (u32) -1;
900 } else if (qp->s_lsn != (u32) -1) {
901 /* Compute new LSN (i.e., MSN + credit) */
902 credit = (aeth + credit_table[credit]) & IPS_MSN_MASK;
903 if (ipath_cmp24(credit, qp->s_lsn) > 0)
904 qp->s_lsn = credit;
905 }
906
907 /* Restart sending if it was blocked due to lack of credits. */
908 if (qp->s_cur != qp->s_head &&
909 (qp->s_lsn == (u32) -1 ||
910 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
911 qp->s_lsn + 1) <= 0))
912 tasklet_hi_schedule(&qp->s_task);
913}
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
new file mode 100644
index 000000000000..a4055ca00614
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -0,0 +1,1857 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "ipath_verbs.h"
34#include "ips_common.h"
35
36/* cut down ridiculously long IB macro names */
37#define OP(x) IB_OPCODE_RC_##x
38
39/**
40 * ipath_init_restart- initialize the qp->s_sge after a restart
41 * @qp: the QP who's SGE we're restarting
42 * @wqe: the work queue to initialize the QP's SGE from
43 *
44 * The QP s_lock should be held.
45 */
46static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
47{
48 struct ipath_ibdev *dev;
49 u32 len;
50
51 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
52 ib_mtu_enum_to_int(qp->path_mtu);
53 qp->s_sge.sge = wqe->sg_list[0];
54 qp->s_sge.sg_list = wqe->sg_list + 1;
55 qp->s_sge.num_sge = wqe->wr.num_sge;
56 ipath_skip_sge(&qp->s_sge, len);
57 qp->s_len = wqe->length - len;
58 dev = to_idev(qp->ibqp.device);
59 spin_lock(&dev->pending_lock);
60 if (qp->timerwait.next == LIST_POISON1)
61 list_add_tail(&qp->timerwait,
62 &dev->pending[dev->pending_index]);
63 spin_unlock(&dev->pending_lock);
64}
65
66/**
67 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
68 * @qp: a pointer to the QP
69 * @ohdr: a pointer to the IB header being constructed
70 * @pmtu: the path MTU
71 *
72 * Return bth0 if constructed; otherwise, return 0.
73 * Note the QP s_lock must be held.
74 */
75static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
76 struct ipath_other_headers *ohdr,
77 u32 pmtu)
78{
79 struct ipath_sge_state *ss;
80 u32 hwords;
81 u32 len;
82 u32 bth0;
83
84 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
85 hwords = 5;
86
87 /*
88 * Send a response. Note that we are in the responder's
89 * side of the QP context.
90 */
91 switch (qp->s_ack_state) {
92 case OP(RDMA_READ_REQUEST):
93 ss = &qp->s_rdma_sge;
94 len = qp->s_rdma_len;
95 if (len > pmtu) {
96 len = pmtu;
97 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
98 }
99 else
100 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
101 qp->s_rdma_len -= len;
102 bth0 = qp->s_ack_state << 24;
103 ohdr->u.aeth = ipath_compute_aeth(qp);
104 hwords++;
105 break;
106
107 case OP(RDMA_READ_RESPONSE_FIRST):
108 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
109 /* FALLTHROUGH */
110 case OP(RDMA_READ_RESPONSE_MIDDLE):
111 ss = &qp->s_rdma_sge;
112 len = qp->s_rdma_len;
113 if (len > pmtu)
114 len = pmtu;
115 else {
116 ohdr->u.aeth = ipath_compute_aeth(qp);
117 hwords++;
118 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
119 }
120 qp->s_rdma_len -= len;
121 bth0 = qp->s_ack_state << 24;
122 break;
123
124 case OP(RDMA_READ_RESPONSE_LAST):
125 case OP(RDMA_READ_RESPONSE_ONLY):
126 /*
127 * We have to prevent new requests from changing
128 * the r_sge state while a ipath_verbs_send()
129 * is in progress.
130 * Changing r_state allows the receiver
131 * to continue processing new packets.
132 * We do it here now instead of above so
133 * that we are sure the packet was sent before
134 * changing the state.
135 */
136 qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
137 qp->s_ack_state = OP(ACKNOWLEDGE);
138 return 0;
139
140 case OP(COMPARE_SWAP):
141 case OP(FETCH_ADD):
142 ss = NULL;
143 len = 0;
144 qp->r_state = OP(SEND_LAST);
145 qp->s_ack_state = OP(ACKNOWLEDGE);
146 bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
147 ohdr->u.at.aeth = ipath_compute_aeth(qp);
148 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
149 hwords += sizeof(ohdr->u.at) / 4;
150 break;
151
152 default:
153 /* Send a regular ACK. */
154 ss = NULL;
155 len = 0;
156 qp->s_ack_state = OP(ACKNOWLEDGE);
157 bth0 = qp->s_ack_state << 24;
158 ohdr->u.aeth = ipath_compute_aeth(qp);
159 hwords++;
160 }
161 qp->s_hdrwords = hwords;
162 qp->s_cur_sge = ss;
163 qp->s_cur_size = len;
164
165 return bth0;
166}
167
168/**
169 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
170 * @qp: a pointer to the QP
171 * @ohdr: a pointer to the IB header being constructed
172 * @pmtu: the path MTU
173 * @bth0p: pointer to the BTH opcode word
174 * @bth2p: pointer to the BTH PSN word
175 *
176 * Return 1 if constructed; otherwise, return 0.
177 * Note the QP s_lock must be held.
178 */
179static inline int ipath_make_rc_req(struct ipath_qp *qp,
180 struct ipath_other_headers *ohdr,
181 u32 pmtu, u32 *bth0p, u32 *bth2p)
182{
183 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
184 struct ipath_sge_state *ss;
185 struct ipath_swqe *wqe;
186 u32 hwords;
187 u32 len;
188 u32 bth0;
189 u32 bth2;
190 char newreq;
191
192 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
193 qp->s_rnr_timeout)
194 goto done;
195
196 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
197 hwords = 5;
198 bth0 = 0;
199
200 /* Send a request. */
201 wqe = get_swqe_ptr(qp, qp->s_cur);
202 switch (qp->s_state) {
203 default:
204 /*
205 * Resend an old request or start a new one.
206 *
207 * We keep track of the current SWQE so that
208 * we don't reset the "furthest progress" state
209 * if we need to back up.
210 */
211 newreq = 0;
212 if (qp->s_cur == qp->s_tail) {
213 /* Check if send work queue is empty. */
214 if (qp->s_tail == qp->s_head)
215 goto done;
216 qp->s_psn = wqe->psn = qp->s_next_psn;
217 newreq = 1;
218 }
219 /*
220 * Note that we have to be careful not to modify the
221 * original work request since we may need to resend
222 * it.
223 */
224 qp->s_sge.sge = wqe->sg_list[0];
225 qp->s_sge.sg_list = wqe->sg_list + 1;
226 qp->s_sge.num_sge = wqe->wr.num_sge;
227 qp->s_len = len = wqe->length;
228 ss = &qp->s_sge;
229 bth2 = 0;
230 switch (wqe->wr.opcode) {
231 case IB_WR_SEND:
232 case IB_WR_SEND_WITH_IMM:
233 /* If no credit, return. */
234 if (qp->s_lsn != (u32) -1 &&
235 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
236 goto done;
237 wqe->lpsn = wqe->psn;
238 if (len > pmtu) {
239 wqe->lpsn += (len - 1) / pmtu;
240 qp->s_state = OP(SEND_FIRST);
241 len = pmtu;
242 break;
243 }
244 if (wqe->wr.opcode == IB_WR_SEND)
245 qp->s_state = OP(SEND_ONLY);
246 else {
247 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
248 /* Immediate data comes after the BTH */
249 ohdr->u.imm_data = wqe->wr.imm_data;
250 hwords += 1;
251 }
252 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
253 bth0 |= 1 << 23;
254 bth2 = 1 << 31; /* Request ACK. */
255 if (++qp->s_cur == qp->s_size)
256 qp->s_cur = 0;
257 break;
258
259 case IB_WR_RDMA_WRITE:
260 if (newreq)
261 qp->s_lsn++;
262 /* FALLTHROUGH */
263 case IB_WR_RDMA_WRITE_WITH_IMM:
264 /* If no credit, return. */
265 if (qp->s_lsn != (u32) -1 &&
266 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
267 goto done;
268 ohdr->u.rc.reth.vaddr =
269 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
270 ohdr->u.rc.reth.rkey =
271 cpu_to_be32(wqe->wr.wr.rdma.rkey);
272 ohdr->u.rc.reth.length = cpu_to_be32(len);
273 hwords += sizeof(struct ib_reth) / 4;
274 wqe->lpsn = wqe->psn;
275 if (len > pmtu) {
276 wqe->lpsn += (len - 1) / pmtu;
277 qp->s_state = OP(RDMA_WRITE_FIRST);
278 len = pmtu;
279 break;
280 }
281 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
282 qp->s_state = OP(RDMA_WRITE_ONLY);
283 else {
284 qp->s_state =
285 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
286 /* Immediate data comes
287 * after RETH */
288 ohdr->u.rc.imm_data = wqe->wr.imm_data;
289 hwords += 1;
290 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
291 bth0 |= 1 << 23;
292 }
293 bth2 = 1 << 31; /* Request ACK. */
294 if (++qp->s_cur == qp->s_size)
295 qp->s_cur = 0;
296 break;
297
298 case IB_WR_RDMA_READ:
299 ohdr->u.rc.reth.vaddr =
300 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
301 ohdr->u.rc.reth.rkey =
302 cpu_to_be32(wqe->wr.wr.rdma.rkey);
303 ohdr->u.rc.reth.length = cpu_to_be32(len);
304 qp->s_state = OP(RDMA_READ_REQUEST);
305 hwords += sizeof(ohdr->u.rc.reth) / 4;
306 if (newreq) {
307 qp->s_lsn++;
308 /*
309 * Adjust s_next_psn to count the
310 * expected number of responses.
311 */
312 if (len > pmtu)
313 qp->s_next_psn += (len - 1) / pmtu;
314 wqe->lpsn = qp->s_next_psn++;
315 }
316 ss = NULL;
317 len = 0;
318 if (++qp->s_cur == qp->s_size)
319 qp->s_cur = 0;
320 break;
321
322 case IB_WR_ATOMIC_CMP_AND_SWP:
323 case IB_WR_ATOMIC_FETCH_AND_ADD:
324 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
325 qp->s_state = OP(COMPARE_SWAP);
326 else
327 qp->s_state = OP(FETCH_ADD);
328 ohdr->u.atomic_eth.vaddr = cpu_to_be64(
329 wqe->wr.wr.atomic.remote_addr);
330 ohdr->u.atomic_eth.rkey = cpu_to_be32(
331 wqe->wr.wr.atomic.rkey);
332 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
333 wqe->wr.wr.atomic.swap);
334 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
335 wqe->wr.wr.atomic.compare_add);
336 hwords += sizeof(struct ib_atomic_eth) / 4;
337 if (newreq) {
338 qp->s_lsn++;
339 wqe->lpsn = wqe->psn;
340 }
341 if (++qp->s_cur == qp->s_size)
342 qp->s_cur = 0;
343 ss = NULL;
344 len = 0;
345 break;
346
347 default:
348 goto done;
349 }
350 if (newreq) {
351 qp->s_tail++;
352 if (qp->s_tail >= qp->s_size)
353 qp->s_tail = 0;
354 }
355 bth2 |= qp->s_psn++ & IPS_PSN_MASK;
356 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
357 qp->s_next_psn = qp->s_psn;
358 spin_lock(&dev->pending_lock);
359 if (qp->timerwait.next == LIST_POISON1)
360 list_add_tail(&qp->timerwait,
361 &dev->pending[dev->pending_index]);
362 spin_unlock(&dev->pending_lock);
363 break;
364
365 case OP(RDMA_READ_RESPONSE_FIRST):
366 /*
367 * This case can only happen if a send is restarted. See
368 * ipath_restart_rc().
369 */
370 ipath_init_restart(qp, wqe);
371 /* FALLTHROUGH */
372 case OP(SEND_FIRST):
373 qp->s_state = OP(SEND_MIDDLE);
374 /* FALLTHROUGH */
375 case OP(SEND_MIDDLE):
376 bth2 = qp->s_psn++ & IPS_PSN_MASK;
377 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
378 qp->s_next_psn = qp->s_psn;
379 ss = &qp->s_sge;
380 len = qp->s_len;
381 if (len > pmtu) {
382 /*
383 * Request an ACK every 1/2 MB to avoid retransmit
384 * timeouts.
385 */
386 if (((wqe->length - len) % (512 * 1024)) == 0)
387 bth2 |= 1 << 31;
388 len = pmtu;
389 break;
390 }
391 if (wqe->wr.opcode == IB_WR_SEND)
392 qp->s_state = OP(SEND_LAST);
393 else {
394 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
395 /* Immediate data comes after the BTH */
396 ohdr->u.imm_data = wqe->wr.imm_data;
397 hwords += 1;
398 }
399 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
400 bth0 |= 1 << 23;
401 bth2 |= 1 << 31; /* Request ACK. */
402 qp->s_cur++;
403 if (qp->s_cur >= qp->s_size)
404 qp->s_cur = 0;
405 break;
406
407 case OP(RDMA_READ_RESPONSE_LAST):
408 /*
409 * This case can only happen if a RDMA write is restarted.
410 * See ipath_restart_rc().
411 */
412 ipath_init_restart(qp, wqe);
413 /* FALLTHROUGH */
414 case OP(RDMA_WRITE_FIRST):
415 qp->s_state = OP(RDMA_WRITE_MIDDLE);
416 /* FALLTHROUGH */
417 case OP(RDMA_WRITE_MIDDLE):
418 bth2 = qp->s_psn++ & IPS_PSN_MASK;
419 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
420 qp->s_next_psn = qp->s_psn;
421 ss = &qp->s_sge;
422 len = qp->s_len;
423 if (len > pmtu) {
424 /*
425 * Request an ACK every 1/2 MB to avoid retransmit
426 * timeouts.
427 */
428 if (((wqe->length - len) % (512 * 1024)) == 0)
429 bth2 |= 1 << 31;
430 len = pmtu;
431 break;
432 }
433 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
434 qp->s_state = OP(RDMA_WRITE_LAST);
435 else {
436 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
437 /* Immediate data comes after the BTH */
438 ohdr->u.imm_data = wqe->wr.imm_data;
439 hwords += 1;
440 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
441 bth0 |= 1 << 23;
442 }
443 bth2 |= 1 << 31; /* Request ACK. */
444 qp->s_cur++;
445 if (qp->s_cur >= qp->s_size)
446 qp->s_cur = 0;
447 break;
448
449 case OP(RDMA_READ_RESPONSE_MIDDLE):
450 /*
451 * This case can only happen if a RDMA read is restarted.
452 * See ipath_restart_rc().
453 */
454 ipath_init_restart(qp, wqe);
455 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
456 ohdr->u.rc.reth.vaddr =
457 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
458 ohdr->u.rc.reth.rkey =
459 cpu_to_be32(wqe->wr.wr.rdma.rkey);
460 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
461 qp->s_state = OP(RDMA_READ_REQUEST);
462 hwords += sizeof(ohdr->u.rc.reth) / 4;
463 bth2 = qp->s_psn++ & IPS_PSN_MASK;
464 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
465 qp->s_next_psn = qp->s_psn;
466 ss = NULL;
467 len = 0;
468 qp->s_cur++;
469 if (qp->s_cur == qp->s_size)
470 qp->s_cur = 0;
471 break;
472
473 case OP(RDMA_READ_REQUEST):
474 case OP(COMPARE_SWAP):
475 case OP(FETCH_ADD):
476 /*
477 * We shouldn't start anything new until this request is
478 * finished. The ACK will handle rescheduling us. XXX The
479 * number of outstanding ones is negotiated at connection
480 * setup time (see pg. 258,289)? XXX Also, if we support
481 * multiple outstanding requests, we need to check the WQE
482 * IB_SEND_FENCE flag and not send a new request if a RDMA
483 * read or atomic is pending.
484 */
485 goto done;
486 }
487 qp->s_len -= len;
488 qp->s_hdrwords = hwords;
489 qp->s_cur_sge = ss;
490 qp->s_cur_size = len;
491 *bth0p = bth0 | (qp->s_state << 24);
492 *bth2p = bth2;
493 return 1;
494
495done:
496 return 0;
497}
498
499static inline void ipath_make_rc_grh(struct ipath_qp *qp,
500 struct ib_global_route *grh,
501 u32 nwords)
502{
503 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
504
505 /* GRH header size in 32-bit words. */
506 qp->s_hdrwords += 10;
507 qp->s_hdr.u.l.grh.version_tclass_flow =
508 cpu_to_be32((6 << 28) |
509 (grh->traffic_class << 20) |
510 grh->flow_label);
511 qp->s_hdr.u.l.grh.paylen =
512 cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
513 SIZE_OF_CRC) << 2);
514 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
515 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
516 qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
517 /* The SGID is 32-bit aligned. */
518 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
519 qp->s_hdr.u.l.grh.sgid.global.interface_id =
520 ipath_layer_get_guid(dev->dd);
521 qp->s_hdr.u.l.grh.dgid = grh->dgid;
522}
523
524/**
525 * ipath_do_rc_send - perform a send on an RC QP
526 * @data: contains a pointer to the QP
527 *
528 * Process entries in the send work queue until credit or queue is
529 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
530 * Otherwise, after we drop the QP s_lock, two threads could send
531 * packets out of order.
532 */
533void ipath_do_rc_send(unsigned long data)
534{
535 struct ipath_qp *qp = (struct ipath_qp *)data;
536 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
537 unsigned long flags;
538 u16 lrh0;
539 u32 nwords;
540 u32 extra_bytes;
541 u32 bth0;
542 u32 bth2;
543 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
544 struct ipath_other_headers *ohdr;
545
546 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
547 goto bail;
548
549 if (unlikely(qp->remote_ah_attr.dlid ==
550 ipath_layer_get_lid(dev->dd))) {
551 struct ib_wc wc;
552
553 /*
554 * Pass in an uninitialized ib_wc to be consistent with
555 * other places where ipath_ruc_loopback() is called.
556 */
557 ipath_ruc_loopback(qp, &wc);
558 goto clear;
559 }
560
561 ohdr = &qp->s_hdr.u.oth;
562 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
563 ohdr = &qp->s_hdr.u.l.oth;
564
565again:
566 /* Check for a constructed packet to be sent. */
567 if (qp->s_hdrwords != 0) {
568 /*
569 * If no PIO bufs are available, return. An interrupt will
570 * call ipath_ib_piobufavail() when one is available.
571 */
572 _VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
573 _VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
574 qp->s_cur_sge->sg_list,
575 qp->s_cur_sge->num_sge,
576 qp->s_cur_sge->sge.vaddr,
577 qp->s_cur_sge->sge.sge_length,
578 qp->s_cur_sge->sge.length,
579 qp->s_cur_sge->sge.m,
580 qp->s_cur_sge->sge.n);
581 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
582 (u32 *) &qp->s_hdr, qp->s_cur_size,
583 qp->s_cur_sge)) {
584 ipath_no_bufs_available(qp, dev);
585 goto bail;
586 }
587 dev->n_unicast_xmit++;
588 /* Record that we sent the packet and s_hdr is empty. */
589 qp->s_hdrwords = 0;
590 }
591
592 /*
593 * The lock is needed to synchronize between setting
594 * qp->s_ack_state, resend timer, and post_send().
595 */
596 spin_lock_irqsave(&qp->s_lock, flags);
597
598 /* Sending responses has higher priority over sending requests. */
599 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
600 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
601 bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
602 else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
603 goto done;
604
605 spin_unlock_irqrestore(&qp->s_lock, flags);
606
607 /* Construct the header. */
608 extra_bytes = (4 - qp->s_cur_size) & 3;
609 nwords = (qp->s_cur_size + extra_bytes) >> 2;
610 lrh0 = IPS_LRH_BTH;
611 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
612 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
613 lrh0 = IPS_LRH_GRH;
614 }
615 lrh0 |= qp->remote_ah_attr.sl << 4;
616 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
617 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
618 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
619 SIZE_OF_CRC);
620 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
621 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
622 bth0 |= extra_bytes << 20;
623 ohdr->bth[0] = cpu_to_be32(bth0);
624 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
625 ohdr->bth[2] = cpu_to_be32(bth2);
626
627 /* Check for more work to do. */
628 goto again;
629
630done:
631 spin_unlock_irqrestore(&qp->s_lock, flags);
632clear:
633 clear_bit(IPATH_S_BUSY, &qp->s_flags);
634bail:
635 return;
636}
637
638static void send_rc_ack(struct ipath_qp *qp)
639{
640 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
641 u16 lrh0;
642 u32 bth0;
643 struct ipath_other_headers *ohdr;
644
645 /* Construct the header. */
646 ohdr = &qp->s_hdr.u.oth;
647 lrh0 = IPS_LRH_BTH;
648 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
649 qp->s_hdrwords = 6;
650 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
651 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0);
652 ohdr = &qp->s_hdr.u.l.oth;
653 lrh0 = IPS_LRH_GRH;
654 }
655 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
656 ohdr->u.aeth = ipath_compute_aeth(qp);
657 if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
658 bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
659 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
660 qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
661 }
662 else
663 bth0 |= OP(ACKNOWLEDGE) << 24;
664 lrh0 |= qp->remote_ah_attr.sl << 4;
665 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
666 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
667 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC);
668 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
669 ohdr->bth[0] = cpu_to_be32(bth0);
670 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
671 ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
672
673 /*
674 * If we can send the ACK, clear the ACK state.
675 */
676 if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr,
677 0, NULL) == 0) {
678 qp->s_ack_state = OP(ACKNOWLEDGE);
679 dev->n_rc_qacks++;
680 dev->n_unicast_xmit++;
681 }
682}
683
684/**
685 * ipath_restart_rc - back up requester to resend the last un-ACKed request
686 * @qp: the QP to restart
687 * @psn: packet sequence number for the request
688 * @wc: the work completion request
689 *
690 * The QP s_lock should be held.
691 */
692void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
693{
694 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
695 struct ipath_ibdev *dev;
696 u32 n;
697
698 /*
699 * If there are no requests pending, we are done.
700 */
701 if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
702 qp->s_last == qp->s_tail)
703 goto done;
704
705 if (qp->s_retry == 0) {
706 wc->wr_id = wqe->wr.wr_id;
707 wc->status = IB_WC_RETRY_EXC_ERR;
708 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
709 wc->vendor_err = 0;
710 wc->byte_len = 0;
711 wc->qp_num = qp->ibqp.qp_num;
712 wc->src_qp = qp->remote_qpn;
713 wc->pkey_index = 0;
714 wc->slid = qp->remote_ah_attr.dlid;
715 wc->sl = qp->remote_ah_attr.sl;
716 wc->dlid_path_bits = 0;
717 wc->port_num = 0;
718 ipath_sqerror_qp(qp, wc);
719 goto bail;
720 }
721 qp->s_retry--;
722
723 /*
724 * Remove the QP from the timeout queue.
725 * Note: it may already have been removed by ipath_ib_timer().
726 */
727 dev = to_idev(qp->ibqp.device);
728 spin_lock(&dev->pending_lock);
729 if (qp->timerwait.next != LIST_POISON1)
730 list_del(&qp->timerwait);
731 spin_unlock(&dev->pending_lock);
732
733 if (wqe->wr.opcode == IB_WR_RDMA_READ)
734 dev->n_rc_resends++;
735 else
736 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
737
738 /*
739 * If we are starting the request from the beginning, let the normal
740 * send code handle initialization.
741 */
742 qp->s_cur = qp->s_last;
743 if (ipath_cmp24(psn, wqe->psn) <= 0) {
744 qp->s_state = OP(SEND_LAST);
745 qp->s_psn = wqe->psn;
746 } else {
747 n = qp->s_cur;
748 for (;;) {
749 if (++n == qp->s_size)
750 n = 0;
751 if (n == qp->s_tail) {
752 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
753 qp->s_cur = n;
754 wqe = get_swqe_ptr(qp, n);
755 }
756 break;
757 }
758 wqe = get_swqe_ptr(qp, n);
759 if (ipath_cmp24(psn, wqe->psn) < 0)
760 break;
761 qp->s_cur = n;
762 }
763 qp->s_psn = psn;
764
765 /*
766 * Reset the state to restart in the middle of a request.
767 * Don't change the s_sge, s_cur_sge, or s_cur_size.
768 * See ipath_do_rc_send().
769 */
770 switch (wqe->wr.opcode) {
771 case IB_WR_SEND:
772 case IB_WR_SEND_WITH_IMM:
773 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
774 break;
775
776 case IB_WR_RDMA_WRITE:
777 case IB_WR_RDMA_WRITE_WITH_IMM:
778 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
779 break;
780
781 case IB_WR_RDMA_READ:
782 qp->s_state =
783 OP(RDMA_READ_RESPONSE_MIDDLE);
784 break;
785
786 default:
787 /*
788 * This case shouldn't happen since its only
789 * one PSN per req.
790 */
791 qp->s_state = OP(SEND_LAST);
792 }
793 }
794
795done:
796 tasklet_hi_schedule(&qp->s_task);
797
798bail:
799 return;
800}
801
802/**
803 * reset_psn - reset the QP state to send starting from PSN
804 * @qp: the QP
805 * @psn: the packet sequence number to restart at
806 *
807 * This is called from ipath_rc_rcv() to process an incoming RC ACK
808 * for the given QP.
809 * Called at interrupt level with the QP s_lock held.
810 */
811static void reset_psn(struct ipath_qp *qp, u32 psn)
812{
813 struct ipath_swqe *wqe;
814 u32 n;
815
816 n = qp->s_cur;
817 wqe = get_swqe_ptr(qp, n);
818 for (;;) {
819 if (++n == qp->s_size)
820 n = 0;
821 if (n == qp->s_tail) {
822 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
823 qp->s_cur = n;
824 wqe = get_swqe_ptr(qp, n);
825 }
826 break;
827 }
828 wqe = get_swqe_ptr(qp, n);
829 if (ipath_cmp24(psn, wqe->psn) < 0)
830 break;
831 qp->s_cur = n;
832 }
833 qp->s_psn = psn;
834
835 /*
836 * Set the state to restart in the middle of a
837 * request. Don't change the s_sge, s_cur_sge, or
838 * s_cur_size. See ipath_do_rc_send().
839 */
840 switch (wqe->wr.opcode) {
841 case IB_WR_SEND:
842 case IB_WR_SEND_WITH_IMM:
843 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
844 break;
845
846 case IB_WR_RDMA_WRITE:
847 case IB_WR_RDMA_WRITE_WITH_IMM:
848 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
849 break;
850
851 case IB_WR_RDMA_READ:
852 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
853 break;
854
855 default:
856 /*
857 * This case shouldn't happen since its only
858 * one PSN per req.
859 */
860 qp->s_state = OP(SEND_LAST);
861 }
862}
863
864/**
865 * do_rc_ack - process an incoming RC ACK
866 * @qp: the QP the ACK came in on
867 * @psn: the packet sequence number of the ACK
868 * @opcode: the opcode of the request that resulted in the ACK
869 *
870 * This is called from ipath_rc_rcv() to process an incoming RC ACK
871 * for the given QP.
872 * Called at interrupt level with the QP s_lock held.
873 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
874 */
875static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
876{
877 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
878 struct ib_wc wc;
879 struct ipath_swqe *wqe;
880 int ret = 0;
881
882 /*
883 * Remove the QP from the timeout queue (or RNR timeout queue).
884 * If ipath_ib_timer() has already removed it,
885 * it's OK since we hold the QP s_lock and ipath_restart_rc()
886 * just won't find anything to restart if we ACK everything.
887 */
888 spin_lock(&dev->pending_lock);
889 if (qp->timerwait.next != LIST_POISON1)
890 list_del(&qp->timerwait);
891 spin_unlock(&dev->pending_lock);
892
893 /*
894 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
895 * requests and implicitly NAK RDMA read and atomic requests issued
896 * before the NAK'ed request. The MSN won't include the NAK'ed
897 * request but will include an ACK'ed request(s).
898 */
899 wqe = get_swqe_ptr(qp, qp->s_last);
900
901 /* Nothing is pending to ACK/NAK. */
902 if (qp->s_last == qp->s_tail)
903 goto bail;
904
905 /*
906 * The MSN might be for a later WQE than the PSN indicates so
907 * only complete WQEs that the PSN finishes.
908 */
909 while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
910 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
911 if (ipath_cmp24(aeth, wqe->ssn) < 0)
912 break;
913 /*
914 * If this request is a RDMA read or atomic, and the ACK is
915 * for a later operation, this ACK NAKs the RDMA read or
916 * atomic. In other words, only a RDMA_READ_LAST or ONLY
917 * can ACK a RDMA read and likewise for atomic ops. Note
918 * that the NAK case can only happen if relaxed ordering is
919 * used and requests are sent after an RDMA read or atomic
920 * is sent but before the response is received.
921 */
922 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
923 opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
924 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
925 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
926 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
927 ipath_cmp24(wqe->psn, psn) != 0))) {
928 /*
929 * The last valid PSN seen is the previous
930 * request's.
931 */
932 qp->s_last_psn = wqe->psn - 1;
933 /* Retry this request. */
934 ipath_restart_rc(qp, wqe->psn, &wc);
935 /*
936 * No need to process the ACK/NAK since we are
937 * restarting an earlier request.
938 */
939 goto bail;
940 }
941 /* Post a send completion queue entry if requested. */
942 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
943 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
944 wc.wr_id = wqe->wr.wr_id;
945 wc.status = IB_WC_SUCCESS;
946 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
947 wc.vendor_err = 0;
948 wc.byte_len = wqe->length;
949 wc.qp_num = qp->ibqp.qp_num;
950 wc.src_qp = qp->remote_qpn;
951 wc.pkey_index = 0;
952 wc.slid = qp->remote_ah_attr.dlid;
953 wc.sl = qp->remote_ah_attr.sl;
954 wc.dlid_path_bits = 0;
955 wc.port_num = 0;
956 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
957 }
958 qp->s_retry = qp->s_retry_cnt;
959 /*
960 * If we are completing a request which is in the process of
961 * being resent, we can stop resending it since we know the
962 * responder has already seen it.
963 */
964 if (qp->s_last == qp->s_cur) {
965 if (++qp->s_cur >= qp->s_size)
966 qp->s_cur = 0;
967 wqe = get_swqe_ptr(qp, qp->s_cur);
968 qp->s_state = OP(SEND_LAST);
969 qp->s_psn = wqe->psn;
970 }
971 if (++qp->s_last >= qp->s_size)
972 qp->s_last = 0;
973 wqe = get_swqe_ptr(qp, qp->s_last);
974 if (qp->s_last == qp->s_tail)
975 break;
976 }
977
978 switch (aeth >> 29) {
979 case 0: /* ACK */
980 dev->n_rc_acks++;
981 /* If this is a partial ACK, reset the retransmit timer. */
982 if (qp->s_last != qp->s_tail) {
983 spin_lock(&dev->pending_lock);
984 list_add_tail(&qp->timerwait,
985 &dev->pending[dev->pending_index]);
986 spin_unlock(&dev->pending_lock);
987 }
988 ipath_get_credit(qp, aeth);
989 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
990 qp->s_retry = qp->s_retry_cnt;
991 qp->s_last_psn = psn;
992 ret = 1;
993 goto bail;
994
995 case 1: /* RNR NAK */
996 dev->n_rnr_naks++;
997 if (qp->s_rnr_retry == 0) {
998 if (qp->s_last == qp->s_tail)
999 goto bail;
1000
1001 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
1002 goto class_b;
1003 }
1004 if (qp->s_rnr_retry_cnt < 7)
1005 qp->s_rnr_retry--;
1006 if (qp->s_last == qp->s_tail)
1007 goto bail;
1008
1009 /* The last valid PSN seen is the previous request's. */
1010 qp->s_last_psn = wqe->psn - 1;
1011
1012 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
1013
1014 /*
1015 * If we are starting the request from the beginning, let
1016 * the normal send code handle initialization.
1017 */
1018 qp->s_cur = qp->s_last;
1019 wqe = get_swqe_ptr(qp, qp->s_cur);
1020 if (ipath_cmp24(psn, wqe->psn) <= 0) {
1021 qp->s_state = OP(SEND_LAST);
1022 qp->s_psn = wqe->psn;
1023 } else
1024 reset_psn(qp, psn);
1025
1026 qp->s_rnr_timeout =
1027 ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
1028 IPS_AETH_CREDIT_MASK];
1029 ipath_insert_rnr_queue(qp);
1030 goto bail;
1031
1032 case 3: /* NAK */
1033 /* The last valid PSN seen is the previous request's. */
1034 if (qp->s_last != qp->s_tail)
1035 qp->s_last_psn = wqe->psn - 1;
1036 switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
1037 IPS_AETH_CREDIT_MASK) {
1038 case 0: /* PSN sequence error */
1039 dev->n_seq_naks++;
1040 /*
1041 * Back up to the responder's expected PSN. XXX
1042 * Note that we might get a NAK in the middle of an
1043 * RDMA READ response which terminates the RDMA
1044 * READ.
1045 */
1046 if (qp->s_last == qp->s_tail)
1047 break;
1048
1049 if (ipath_cmp24(psn, wqe->psn) < 0)
1050 break;
1051
1052 /* Retry the request. */
1053 ipath_restart_rc(qp, psn, &wc);
1054 break;
1055
1056 case 1: /* Invalid Request */
1057 wc.status = IB_WC_REM_INV_REQ_ERR;
1058 dev->n_other_naks++;
1059 goto class_b;
1060
1061 case 2: /* Remote Access Error */
1062 wc.status = IB_WC_REM_ACCESS_ERR;
1063 dev->n_other_naks++;
1064 goto class_b;
1065
1066 case 3: /* Remote Operation Error */
1067 wc.status = IB_WC_REM_OP_ERR;
1068 dev->n_other_naks++;
1069 class_b:
1070 wc.wr_id = wqe->wr.wr_id;
1071 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1072 wc.vendor_err = 0;
1073 wc.byte_len = 0;
1074 wc.qp_num = qp->ibqp.qp_num;
1075 wc.src_qp = qp->remote_qpn;
1076 wc.pkey_index = 0;
1077 wc.slid = qp->remote_ah_attr.dlid;
1078 wc.sl = qp->remote_ah_attr.sl;
1079 wc.dlid_path_bits = 0;
1080 wc.port_num = 0;
1081 ipath_sqerror_qp(qp, &wc);
1082 break;
1083
1084 default:
1085 /* Ignore other reserved NAK error codes */
1086 goto reserved;
1087 }
1088 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1089 goto bail;
1090
1091 default: /* 2: reserved */
1092 reserved:
1093 /* Ignore reserved NAK codes. */
1094 goto bail;
1095 }
1096
1097bail:
1098 return ret;
1099}
1100
1101/**
1102 * ipath_rc_rcv_resp - process an incoming RC response packet
1103 * @dev: the device this packet came in on
1104 * @ohdr: the other headers for this packet
1105 * @data: the packet data
1106 * @tlen: the packet length
1107 * @qp: the QP for this packet
1108 * @opcode: the opcode for this packet
1109 * @psn: the packet sequence number for this packet
1110 * @hdrsize: the header length
1111 * @pmtu: the path MTU
1112 * @header_in_data: true if part of the header data is in the data buffer
1113 *
1114 * This is called from ipath_rc_rcv() to process an incoming RC response
1115 * packet for the given QP.
1116 * Called at interrupt level.
1117 */
1118static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1119 struct ipath_other_headers *ohdr,
1120 void *data, u32 tlen,
1121 struct ipath_qp *qp,
1122 u32 opcode,
1123 u32 psn, u32 hdrsize, u32 pmtu,
1124 int header_in_data)
1125{
1126 unsigned long flags;
1127 struct ib_wc wc;
1128 int diff;
1129 u32 pad;
1130 u32 aeth;
1131
1132 spin_lock_irqsave(&qp->s_lock, flags);
1133
1134 /* Ignore invalid responses. */
1135 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1136 goto ack_done;
1137
1138 /* Ignore duplicate responses. */
1139 diff = ipath_cmp24(psn, qp->s_last_psn);
1140 if (unlikely(diff <= 0)) {
1141 /* Update credits for "ghost" ACKs */
1142 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1143 if (!header_in_data)
1144 aeth = be32_to_cpu(ohdr->u.aeth);
1145 else {
1146 aeth = be32_to_cpu(((__be32 *) data)[0]);
1147 data += sizeof(__be32);
1148 }
1149 if ((aeth >> 29) == 0)
1150 ipath_get_credit(qp, aeth);
1151 }
1152 goto ack_done;
1153 }
1154
1155 switch (opcode) {
1156 case OP(ACKNOWLEDGE):
1157 case OP(ATOMIC_ACKNOWLEDGE):
1158 case OP(RDMA_READ_RESPONSE_FIRST):
1159 if (!header_in_data)
1160 aeth = be32_to_cpu(ohdr->u.aeth);
1161 else {
1162 aeth = be32_to_cpu(((__be32 *) data)[0]);
1163 data += sizeof(__be32);
1164 }
1165 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1166 *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
1167 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1168 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1169 goto ack_done;
1170 hdrsize += 4;
1171 /*
1172 * do_rc_ack() has already checked the PSN so skip
1173 * the sequence check.
1174 */
1175 goto rdma_read;
1176
1177 case OP(RDMA_READ_RESPONSE_MIDDLE):
1178 /* no AETH, no ACK */
1179 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1180 dev->n_rdma_seq++;
1181 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1182 goto ack_done;
1183 }
1184 rdma_read:
1185 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1186 goto ack_done;
1187 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1188 goto ack_done;
1189 if (unlikely(pmtu >= qp->s_len))
1190 goto ack_done;
1191 /* We got a response so update the timeout. */
1192 if (unlikely(qp->s_last == qp->s_tail ||
1193 get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1194 IB_WR_RDMA_READ))
1195 goto ack_done;
1196 spin_lock(&dev->pending_lock);
1197 if (qp->s_rnr_timeout == 0 &&
1198 qp->timerwait.next != LIST_POISON1)
1199 list_move_tail(&qp->timerwait,
1200 &dev->pending[dev->pending_index]);
1201 spin_unlock(&dev->pending_lock);
1202 /*
1203 * Update the RDMA receive state but do the copy w/o holding the
1204 * locks and blocking interrupts. XXX Yet another place that
1205 * affects relaxed RDMA order since we don't want s_sge modified.
1206 */
1207 qp->s_len -= pmtu;
1208 qp->s_last_psn = psn;
1209 spin_unlock_irqrestore(&qp->s_lock, flags);
1210 ipath_copy_sge(&qp->s_sge, data, pmtu);
1211 goto bail;
1212
1213 case OP(RDMA_READ_RESPONSE_LAST):
1214 /* ACKs READ req. */
1215 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1216 dev->n_rdma_seq++;
1217 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1218 goto ack_done;
1219 }
1220 /* FALLTHROUGH */
1221 case OP(RDMA_READ_RESPONSE_ONLY):
1222 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1223 goto ack_done;
1224 /*
1225 * Get the number of bytes the message was padded by.
1226 */
1227 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1228 /*
1229 * Check that the data size is >= 1 && <= pmtu.
1230 * Remember to account for the AETH header (4) and
1231 * ICRC (4).
1232 */
1233 if (unlikely(tlen <= (hdrsize + pad + 8))) {
1234 /*
1235 * XXX Need to generate an error CQ
1236 * entry.
1237 */
1238 goto ack_done;
1239 }
1240 tlen -= hdrsize + pad + 8;
1241 if (unlikely(tlen != qp->s_len)) {
1242 /*
1243 * XXX Need to generate an error CQ
1244 * entry.
1245 */
1246 goto ack_done;
1247 }
1248 if (!header_in_data)
1249 aeth = be32_to_cpu(ohdr->u.aeth);
1250 else {
1251 aeth = be32_to_cpu(((__be32 *) data)[0]);
1252 data += sizeof(__be32);
1253 }
1254 ipath_copy_sge(&qp->s_sge, data, tlen);
1255 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1256 /*
1257 * Change the state so we contimue
1258 * processing new requests.
1259 */
1260 qp->s_state = OP(SEND_LAST);
1261 }
1262 goto ack_done;
1263 }
1264
1265ack_done:
1266 spin_unlock_irqrestore(&qp->s_lock, flags);
1267bail:
1268 return;
1269}
1270
1271/**
1272 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1273 * @dev: the device this packet came in on
1274 * @ohdr: the other headers for this packet
1275 * @data: the packet data
1276 * @qp: the QP for this packet
1277 * @opcode: the opcode for this packet
1278 * @psn: the packet sequence number for this packet
1279 * @diff: the difference between the PSN and the expected PSN
1280 * @header_in_data: true if part of the header data is in the data buffer
1281 *
1282 * This is called from ipath_rc_rcv() to process an unexpected
1283 * incoming RC packet for the given QP.
1284 * Called at interrupt level.
1285 * Return 1 if no more processing is needed; otherwise return 0 to
1286 * schedule a response to be sent and the s_lock unlocked.
1287 */
1288static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1289 struct ipath_other_headers *ohdr,
1290 void *data,
1291 struct ipath_qp *qp,
1292 u32 opcode,
1293 u32 psn,
1294 int diff,
1295 int header_in_data)
1296{
1297 struct ib_reth *reth;
1298
1299 if (diff > 0) {
1300 /*
1301 * Packet sequence error.
1302 * A NAK will ACK earlier sends and RDMA writes.
1303 * Don't queue the NAK if a RDMA read, atomic, or
1304 * NAK is pending though.
1305 */
1306 spin_lock(&qp->s_lock);
1307 if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1308 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
1309 qp->s_nak_state != 0) {
1310 spin_unlock(&qp->s_lock);
1311 goto done;
1312 }
1313 qp->s_ack_state = OP(SEND_ONLY);
1314 qp->s_nak_state = IB_NAK_PSN_ERROR;
1315 /* Use the expected PSN. */
1316 qp->s_ack_psn = qp->r_psn;
1317 goto resched;
1318 }
1319
1320 /*
1321 * Handle a duplicate request. Don't re-execute SEND, RDMA
1322 * write or atomic op. Don't NAK errors, just silently drop
1323 * the duplicate request. Note that r_sge, r_len, and
1324 * r_rcv_len may be in use so don't modify them.
1325 *
1326 * We are supposed to ACK the earliest duplicate PSN but we
1327 * can coalesce an outstanding duplicate ACK. We have to
1328 * send the earliest so that RDMA reads can be restarted at
1329 * the requester's expected PSN.
1330 */
1331 spin_lock(&qp->s_lock);
1332 if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
1333 ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
1334 if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
1335 qp->s_ack_psn = psn;
1336 spin_unlock(&qp->s_lock);
1337 goto done;
1338 }
1339 switch (opcode) {
1340 case OP(RDMA_READ_REQUEST):
1341 /*
1342 * We have to be careful to not change s_rdma_sge
1343 * while ipath_do_rc_send() is using it and not
1344 * holding the s_lock.
1345 */
1346 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1347 qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1348 spin_unlock(&qp->s_lock);
1349 dev->n_rdma_dup_busy++;
1350 goto done;
1351 }
1352 /* RETH comes after BTH */
1353 if (!header_in_data)
1354 reth = &ohdr->u.rc.reth;
1355 else {
1356 reth = (struct ib_reth *)data;
1357 data += sizeof(*reth);
1358 }
1359 qp->s_rdma_len = be32_to_cpu(reth->length);
1360 if (qp->s_rdma_len != 0) {
1361 u32 rkey = be32_to_cpu(reth->rkey);
1362 u64 vaddr = be64_to_cpu(reth->vaddr);
1363 int ok;
1364
1365 /*
1366 * Address range must be a subset of the original
1367 * request and start on pmtu boundaries.
1368 */
1369 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1370 qp->s_rdma_len, vaddr, rkey,
1371 IB_ACCESS_REMOTE_READ);
1372 if (unlikely(!ok))
1373 goto done;
1374 } else {
1375 qp->s_rdma_sge.sg_list = NULL;
1376 qp->s_rdma_sge.num_sge = 0;
1377 qp->s_rdma_sge.sge.mr = NULL;
1378 qp->s_rdma_sge.sge.vaddr = NULL;
1379 qp->s_rdma_sge.sge.length = 0;
1380 qp->s_rdma_sge.sge.sge_length = 0;
1381 }
1382 break;
1383
1384 case OP(COMPARE_SWAP):
1385 case OP(FETCH_ADD):
1386 /*
1387 * Check for the PSN of the last atomic operations
1388 * performed and resend the result if found.
1389 */
1390 if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
1391 spin_unlock(&qp->s_lock);
1392 goto done;
1393 }
1394 qp->s_ack_atomic = qp->r_atomic_data;
1395 break;
1396 }
1397 qp->s_ack_state = opcode;
1398 qp->s_nak_state = 0;
1399 qp->s_ack_psn = psn;
1400resched:
1401 return 0;
1402
1403done:
1404 return 1;
1405}
1406
1407/**
1408 * ipath_rc_rcv - process an incoming RC packet
1409 * @dev: the device this packet came in on
1410 * @hdr: the header of this packet
1411 * @has_grh: true if the header has a GRH
1412 * @data: the packet data
1413 * @tlen: the packet length
1414 * @qp: the QP for this packet
1415 *
1416 * This is called from ipath_qp_rcv() to process an incoming RC packet
1417 * for the given QP.
1418 * Called at interrupt level.
1419 */
1420void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1421 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1422{
1423 struct ipath_other_headers *ohdr;
1424 u32 opcode;
1425 u32 hdrsize;
1426 u32 psn;
1427 u32 pad;
1428 unsigned long flags;
1429 struct ib_wc wc;
1430 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1431 int diff;
1432 struct ib_reth *reth;
1433 int header_in_data;
1434
1435 /* Check for GRH */
1436 if (!has_grh) {
1437 ohdr = &hdr->u.oth;
1438 hdrsize = 8 + 12; /* LRH + BTH */
1439 psn = be32_to_cpu(ohdr->bth[2]);
1440 header_in_data = 0;
1441 } else {
1442 ohdr = &hdr->u.l.oth;
1443 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1444 /*
1445 * The header with GRH is 60 bytes and the core driver sets
1446 * the eager header buffer size to 56 bytes so the last 4
1447 * bytes of the BTH header (PSN) is in the data buffer.
1448 */
1449 header_in_data =
1450 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
1451 if (header_in_data) {
1452 psn = be32_to_cpu(((__be32 *) data)[0]);
1453 data += sizeof(__be32);
1454 } else
1455 psn = be32_to_cpu(ohdr->bth[2]);
1456 }
1457 /*
1458 * The opcode is in the low byte when its in network order
1459 * (top byte when in host order).
1460 */
1461 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1462
1463 /*
1464 * Process responses (ACKs) before anything else. Note that the
1465 * packet sequence number will be for something in the send work
1466 * queue rather than the expected receive packet sequence number.
1467 * In other words, this QP is the requester.
1468 */
1469 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1470 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1471 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1472 hdrsize, pmtu, header_in_data);
1473 goto bail;
1474 }
1475
1476 spin_lock_irqsave(&qp->r_rq.lock, flags);
1477
1478 /* Compute 24 bits worth of difference. */
1479 diff = ipath_cmp24(psn, qp->r_psn);
1480 if (unlikely(diff)) {
1481 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1482 psn, diff, header_in_data))
1483 goto done;
1484 goto resched;
1485 }
1486
1487 /* Check for opcode sequence errors. */
1488 switch (qp->r_state) {
1489 case OP(SEND_FIRST):
1490 case OP(SEND_MIDDLE):
1491 if (opcode == OP(SEND_MIDDLE) ||
1492 opcode == OP(SEND_LAST) ||
1493 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1494 break;
1495 nack_inv:
1496 /*
1497 * A NAK will ACK earlier sends and RDMA writes. Don't queue the
1498 * NAK if a RDMA read, atomic, or NAK is pending though.
1499 */
1500 spin_lock(&qp->s_lock);
1501 if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1502 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1503 spin_unlock(&qp->s_lock);
1504 goto done;
1505 }
1506 /* XXX Flush WQEs */
1507 qp->state = IB_QPS_ERR;
1508 qp->s_ack_state = OP(SEND_ONLY);
1509 qp->s_nak_state = IB_NAK_INVALID_REQUEST;
1510 qp->s_ack_psn = qp->r_psn;
1511 goto resched;
1512
1513 case OP(RDMA_WRITE_FIRST):
1514 case OP(RDMA_WRITE_MIDDLE):
1515 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1516 opcode == OP(RDMA_WRITE_LAST) ||
1517 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1518 break;
1519 goto nack_inv;
1520
1521 case OP(RDMA_READ_REQUEST):
1522 case OP(COMPARE_SWAP):
1523 case OP(FETCH_ADD):
1524 /*
1525 * Drop all new requests until a response has been sent. A
1526 * new request then ACKs the RDMA response we sent. Relaxed
1527 * ordering would allow new requests to be processed but we
1528 * would need to keep a queue of rwqe's for all that are in
1529 * progress. Note that we can't RNR NAK this request since
1530 * the RDMA READ or atomic response is already queued to be
1531 * sent (unless we implement a response send queue).
1532 */
1533 goto done;
1534
1535 default:
1536 if (opcode == OP(SEND_MIDDLE) ||
1537 opcode == OP(SEND_LAST) ||
1538 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1539 opcode == OP(RDMA_WRITE_MIDDLE) ||
1540 opcode == OP(RDMA_WRITE_LAST) ||
1541 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1542 goto nack_inv;
1543 break;
1544 }
1545
1546 wc.imm_data = 0;
1547 wc.wc_flags = 0;
1548
1549 /* OK, process the packet. */
1550 switch (opcode) {
1551 case OP(SEND_FIRST):
1552 if (!ipath_get_rwqe(qp, 0)) {
1553 rnr_nak:
1554 /*
1555 * A RNR NAK will ACK earlier sends and RDMA writes.
1556 * Don't queue the NAK if a RDMA read or atomic
1557 * is pending though.
1558 */
1559 spin_lock(&qp->s_lock);
1560 if (qp->s_ack_state >=
1561 OP(RDMA_READ_REQUEST) &&
1562 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1563 spin_unlock(&qp->s_lock);
1564 goto done;
1565 }
1566 qp->s_ack_state = OP(SEND_ONLY);
1567 qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
1568 qp->s_ack_psn = qp->r_psn;
1569 goto resched;
1570 }
1571 qp->r_rcv_len = 0;
1572 /* FALLTHROUGH */
1573 case OP(SEND_MIDDLE):
1574 case OP(RDMA_WRITE_MIDDLE):
1575 send_middle:
1576 /* Check for invalid length PMTU or posted rwqe len. */
1577 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1578 goto nack_inv;
1579 qp->r_rcv_len += pmtu;
1580 if (unlikely(qp->r_rcv_len > qp->r_len))
1581 goto nack_inv;
1582 ipath_copy_sge(&qp->r_sge, data, pmtu);
1583 break;
1584
1585 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1586 /* consume RWQE */
1587 if (!ipath_get_rwqe(qp, 1))
1588 goto rnr_nak;
1589 goto send_last_imm;
1590
1591 case OP(SEND_ONLY):
1592 case OP(SEND_ONLY_WITH_IMMEDIATE):
1593 if (!ipath_get_rwqe(qp, 0))
1594 goto rnr_nak;
1595 qp->r_rcv_len = 0;
1596 if (opcode == OP(SEND_ONLY))
1597 goto send_last;
1598 /* FALLTHROUGH */
1599 case OP(SEND_LAST_WITH_IMMEDIATE):
1600 send_last_imm:
1601 if (header_in_data) {
1602 wc.imm_data = *(__be32 *) data;
1603 data += sizeof(__be32);
1604 } else {
1605 /* Immediate data comes after BTH */
1606 wc.imm_data = ohdr->u.imm_data;
1607 }
1608 hdrsize += 4;
1609 wc.wc_flags = IB_WC_WITH_IMM;
1610 /* FALLTHROUGH */
1611 case OP(SEND_LAST):
1612 case OP(RDMA_WRITE_LAST):
1613 send_last:
1614 /* Get the number of bytes the message was padded by. */
1615 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1616 /* Check for invalid length. */
1617 /* XXX LAST len should be >= 1 */
1618 if (unlikely(tlen < (hdrsize + pad + 4)))
1619 goto nack_inv;
1620 /* Don't count the CRC. */
1621 tlen -= (hdrsize + pad + 4);
1622 wc.byte_len = tlen + qp->r_rcv_len;
1623 if (unlikely(wc.byte_len > qp->r_len))
1624 goto nack_inv;
1625 ipath_copy_sge(&qp->r_sge, data, tlen);
1626 atomic_inc(&qp->msn);
1627 if (opcode == OP(RDMA_WRITE_LAST) ||
1628 opcode == OP(RDMA_WRITE_ONLY))
1629 break;
1630 wc.wr_id = qp->r_wr_id;
1631 wc.status = IB_WC_SUCCESS;
1632 wc.opcode = IB_WC_RECV;
1633 wc.vendor_err = 0;
1634 wc.qp_num = qp->ibqp.qp_num;
1635 wc.src_qp = qp->remote_qpn;
1636 wc.pkey_index = 0;
1637 wc.slid = qp->remote_ah_attr.dlid;
1638 wc.sl = qp->remote_ah_attr.sl;
1639 wc.dlid_path_bits = 0;
1640 wc.port_num = 0;
1641 /* Signal completion event if the solicited bit is set. */
1642 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1643 (ohdr->bth[0] &
1644 __constant_cpu_to_be32(1 << 23)) != 0);
1645 break;
1646
1647 case OP(RDMA_WRITE_FIRST):
1648 case OP(RDMA_WRITE_ONLY):
1649 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1650 /* consume RWQE */
1651 /* RETH comes after BTH */
1652 if (!header_in_data)
1653 reth = &ohdr->u.rc.reth;
1654 else {
1655 reth = (struct ib_reth *)data;
1656 data += sizeof(*reth);
1657 }
1658 hdrsize += sizeof(*reth);
1659 qp->r_len = be32_to_cpu(reth->length);
1660 qp->r_rcv_len = 0;
1661 if (qp->r_len != 0) {
1662 u32 rkey = be32_to_cpu(reth->rkey);
1663 u64 vaddr = be64_to_cpu(reth->vaddr);
1664 int ok;
1665
1666 /* Check rkey & NAK */
1667 ok = ipath_rkey_ok(dev, &qp->r_sge,
1668 qp->r_len, vaddr, rkey,
1669 IB_ACCESS_REMOTE_WRITE);
1670 if (unlikely(!ok)) {
1671 nack_acc:
1672 /*
1673 * A NAK will ACK earlier sends and RDMA
1674 * writes. Don't queue the NAK if a RDMA
1675 * read, atomic, or NAK is pending though.
1676 */
1677 spin_lock(&qp->s_lock);
1678 if (qp->s_ack_state >=
1679 OP(RDMA_READ_REQUEST) &&
1680 qp->s_ack_state !=
1681 IB_OPCODE_ACKNOWLEDGE) {
1682 spin_unlock(&qp->s_lock);
1683 goto done;
1684 }
1685 /* XXX Flush WQEs */
1686 qp->state = IB_QPS_ERR;
1687 qp->s_ack_state = OP(RDMA_WRITE_ONLY);
1688 qp->s_nak_state =
1689 IB_NAK_REMOTE_ACCESS_ERROR;
1690 qp->s_ack_psn = qp->r_psn;
1691 goto resched;
1692 }
1693 } else {
1694 qp->r_sge.sg_list = NULL;
1695 qp->r_sge.sge.mr = NULL;
1696 qp->r_sge.sge.vaddr = NULL;
1697 qp->r_sge.sge.length = 0;
1698 qp->r_sge.sge.sge_length = 0;
1699 }
1700 if (unlikely(!(qp->qp_access_flags &
1701 IB_ACCESS_REMOTE_WRITE)))
1702 goto nack_acc;
1703 if (opcode == OP(RDMA_WRITE_FIRST))
1704 goto send_middle;
1705 else if (opcode == OP(RDMA_WRITE_ONLY))
1706 goto send_last;
1707 if (!ipath_get_rwqe(qp, 1))
1708 goto rnr_nak;
1709 goto send_last_imm;
1710
1711 case OP(RDMA_READ_REQUEST):
1712 /* RETH comes after BTH */
1713 if (!header_in_data)
1714 reth = &ohdr->u.rc.reth;
1715 else {
1716 reth = (struct ib_reth *)data;
1717 data += sizeof(*reth);
1718 }
1719 spin_lock(&qp->s_lock);
1720 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1721 qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1722 spin_unlock(&qp->s_lock);
1723 goto done;
1724 }
1725 qp->s_rdma_len = be32_to_cpu(reth->length);
1726 if (qp->s_rdma_len != 0) {
1727 u32 rkey = be32_to_cpu(reth->rkey);
1728 u64 vaddr = be64_to_cpu(reth->vaddr);
1729 int ok;
1730
1731 /* Check rkey & NAK */
1732 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1733 qp->s_rdma_len, vaddr, rkey,
1734 IB_ACCESS_REMOTE_READ);
1735 if (unlikely(!ok)) {
1736 spin_unlock(&qp->s_lock);
1737 goto nack_acc;
1738 }
1739 /*
1740 * Update the next expected PSN. We add 1 later
1741 * below, so only add the remainder here.
1742 */
1743 if (qp->s_rdma_len > pmtu)
1744 qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
1745 } else {
1746 qp->s_rdma_sge.sg_list = NULL;
1747 qp->s_rdma_sge.num_sge = 0;
1748 qp->s_rdma_sge.sge.mr = NULL;
1749 qp->s_rdma_sge.sge.vaddr = NULL;
1750 qp->s_rdma_sge.sge.length = 0;
1751 qp->s_rdma_sge.sge.sge_length = 0;
1752 }
1753 if (unlikely(!(qp->qp_access_flags &
1754 IB_ACCESS_REMOTE_READ)))
1755 goto nack_acc;
1756 /*
1757 * We need to increment the MSN here instead of when we
1758 * finish sending the result since a duplicate request would
1759 * increment it more than once.
1760 */
1761 atomic_inc(&qp->msn);
1762 qp->s_ack_state = opcode;
1763 qp->s_nak_state = 0;
1764 qp->s_ack_psn = psn;
1765 qp->r_psn++;
1766 qp->r_state = opcode;
1767 goto rdmadone;
1768
1769 case OP(COMPARE_SWAP):
1770 case OP(FETCH_ADD): {
1771 struct ib_atomic_eth *ateth;
1772 u64 vaddr;
1773 u64 sdata;
1774 u32 rkey;
1775
1776 if (!header_in_data)
1777 ateth = &ohdr->u.atomic_eth;
1778 else {
1779 ateth = (struct ib_atomic_eth *)data;
1780 data += sizeof(*ateth);
1781 }
1782 vaddr = be64_to_cpu(ateth->vaddr);
1783 if (unlikely(vaddr & (sizeof(u64) - 1)))
1784 goto nack_inv;
1785 rkey = be32_to_cpu(ateth->rkey);
1786 /* Check rkey & NAK */
1787 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge,
1788 sizeof(u64), vaddr, rkey,
1789 IB_ACCESS_REMOTE_ATOMIC)))
1790 goto nack_acc;
1791 if (unlikely(!(qp->qp_access_flags &
1792 IB_ACCESS_REMOTE_ATOMIC)))
1793 goto nack_acc;
1794 /* Perform atomic OP and save result. */
1795 sdata = be64_to_cpu(ateth->swap_data);
1796 spin_lock(&dev->pending_lock);
1797 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
1798 if (opcode == OP(FETCH_ADD))
1799 *(u64 *) qp->r_sge.sge.vaddr =
1800 qp->r_atomic_data + sdata;
1801 else if (qp->r_atomic_data ==
1802 be64_to_cpu(ateth->compare_data))
1803 *(u64 *) qp->r_sge.sge.vaddr = sdata;
1804 spin_unlock(&dev->pending_lock);
1805 atomic_inc(&qp->msn);
1806 qp->r_atomic_psn = psn & IPS_PSN_MASK;
1807 psn |= 1 << 31;
1808 break;
1809 }
1810
1811 default:
1812 /* Drop packet for unknown opcodes. */
1813 goto done;
1814 }
1815 qp->r_psn++;
1816 qp->r_state = opcode;
1817 /* Send an ACK if requested or required. */
1818 if (psn & (1 << 31)) {
1819 /*
1820 * Coalesce ACKs unless there is a RDMA READ or
1821 * ATOMIC pending.
1822 */
1823 spin_lock(&qp->s_lock);
1824 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
1825 qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
1826 qp->s_ack_state = opcode;
1827 qp->s_nak_state = 0;
1828 qp->s_ack_psn = psn;
1829 qp->s_ack_atomic = qp->r_atomic_data;
1830 goto resched;
1831 }
1832 spin_unlock(&qp->s_lock);
1833 }
1834done:
1835 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1836 goto bail;
1837
1838resched:
1839 /*
1840 * Try to send ACK right away but not if ipath_do_rc_send() is
1841 * active.
1842 */
1843 if (qp->s_hdrwords == 0 &&
1844 (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
1845 qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
1846 send_rc_ack(qp);
1847
1848rdmadone:
1849 spin_unlock(&qp->s_lock);
1850 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1851
1852 /* Call ipath_do_rc_send() in another thread. */
1853 tasklet_hi_schedule(&qp->s_task);
1854
1855bail:
1856 return;
1857}
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
new file mode 100644
index 000000000000..1e59750c5f63
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -0,0 +1,446 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _IPATH_REGISTERS_H
34#define _IPATH_REGISTERS_H
35
36/*
37 * This file should only be included by kernel source, and by the diags.
38 * It defines the registers, and their contents, for the InfiniPath HT-400 chip
39 */
40
41/*
42 * These are the InfiniPath register and buffer bit definitions,
43 * that are visible to software, and needed only by the kernel
44 * and diag code. A few, that are visible to protocol and user
45 * code are in ipath_common.h. Some bits are specific
46 * to a given chip implementation, and have been moved to the
47 * chip-specific source file
48 */
49
50/* kr_revision bits */
51#define INFINIPATH_R_CHIPREVMINOR_MASK 0xFF
52#define INFINIPATH_R_CHIPREVMINOR_SHIFT 0
53#define INFINIPATH_R_CHIPREVMAJOR_MASK 0xFF
54#define INFINIPATH_R_CHIPREVMAJOR_SHIFT 8
55#define INFINIPATH_R_ARCH_MASK 0xFF
56#define INFINIPATH_R_ARCH_SHIFT 16
57#define INFINIPATH_R_SOFTWARE_MASK 0xFF
58#define INFINIPATH_R_SOFTWARE_SHIFT 24
59#define INFINIPATH_R_BOARDID_MASK 0xFF
60#define INFINIPATH_R_BOARDID_SHIFT 32
61
62/* kr_control bits */
63#define INFINIPATH_C_FREEZEMODE 0x00000002
64#define INFINIPATH_C_LINKENABLE 0x00000004
65#define INFINIPATH_C_RESET 0x00000001
66
67/* kr_sendctrl bits */
68#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
69
70#define IPATH_S_ABORT 0
71#define IPATH_S_PIOINTBUFAVAIL 1
72#define IPATH_S_PIOBUFAVAILUPD 2
73#define IPATH_S_PIOENABLE 3
74#define IPATH_S_DISARM 31
75
76#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT)
77#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL)
78#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD)
79#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE)
80#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM)
81
82/* kr_rcvctrl bits */
83#define INFINIPATH_R_PORTENABLE_SHIFT 0
84#define INFINIPATH_R_INTRAVAIL_SHIFT 16
85#define INFINIPATH_R_TAILUPD 0x80000000
86
87/* kr_intstatus, kr_intclear, kr_intmask bits */
88#define INFINIPATH_I_RCVURG_SHIFT 0
89#define INFINIPATH_I_RCVAVAIL_SHIFT 12
90#define INFINIPATH_I_ERROR 0x80000000
91#define INFINIPATH_I_SPIOSENT 0x40000000
92#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000
93#define INFINIPATH_I_GPIO 0x10000000
94
95/* kr_errorstatus, kr_errorclear, kr_errormask bits */
96#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
97#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
98#define INFINIPATH_E_RICRC 0x0000000000000004ULL
99#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
100#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
101#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
102#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
103#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
104#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
105#define INFINIPATH_E_REBP 0x0000000000000200ULL
106#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
107#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
108#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
109#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
110#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
111#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
112#define INFINIPATH_E_RHDR 0x0000000000010000ULL
113#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
114#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
115#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
116#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
117#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
118#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
119#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
120#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
121#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
122#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
123#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
124#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
125#define INFINIPATH_E_RESET 0x0004000000000000ULL
126#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
127
128/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
129/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
130 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: eagerTID, 3: expTID
131 * bit 4: flag buffer, 5: datainfo, 6: header info */
132#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
133#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
134#define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL
135#define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44
136#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL
137#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL
138#define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL
139#define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL
140
141/* kr_hwdiagctrl bits */
142#define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL
143#define INFINIPATH_DC_FORCETXEMEMPARITYERR_SHIFT 40
144#define INFINIPATH_DC_FORCERXEMEMPARITYERR_MASK 0x7FULL
145#define INFINIPATH_DC_FORCERXEMEMPARITYERR_SHIFT 44
146#define INFINIPATH_DC_FORCERXDSYNCMEMPARITYERR 0x0000000400000000ULL
147#define INFINIPATH_DC_COUNTERDISABLE 0x1000000000000000ULL
148#define INFINIPATH_DC_COUNTERWREN 0x2000000000000000ULL
149#define INFINIPATH_DC_FORCEIBCBUSTOSPCPARITYERR 0x4000000000000000ULL
150#define INFINIPATH_DC_FORCEIBCBUSFRSPCPARITYERR 0x8000000000000000ULL
151
152/* kr_ibcctrl bits */
153#define INFINIPATH_IBCC_FLOWCTRLPERIOD_MASK 0xFFULL
154#define INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT 0
155#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_MASK 0xFFULL
156#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
157#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
158#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
159#define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */
160#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */
161#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
162#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
163#define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */
164#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
165#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
166#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
167#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
168#define INFINIPATH_IBCC_MAXPKTLEN_SHIFT 20
169#define INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK 0xFULL
170#define INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT 32
171#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK 0xFULL
172#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT 36
173#define INFINIPATH_IBCC_CREDITSCALE_MASK 0x7ULL
174#define INFINIPATH_IBCC_CREDITSCALE_SHIFT 40
175#define INFINIPATH_IBCC_LOOPBACK 0x8000000000000000ULL
176#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
177
178/* kr_ibcstatus bits */
179#define INFINIPATH_IBCS_LINKTRAININGSTATE_MASK 0xF
180#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
181#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
182#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
183#define INFINIPATH_IBCS_TXREADY 0x40000000
184#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
185/* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
186#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
187#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
188#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
189#define INFINIPATH_IBCS_LT_STATE_POLLQUIET 0x03
190#define INFINIPATH_IBCS_LT_STATE_SLEEPDELAY 0x04
191#define INFINIPATH_IBCS_LT_STATE_SLEEPQUIET 0x05
192#define INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE 0x08
193#define INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG 0x09
194#define INFINIPATH_IBCS_LT_STATE_CFGWAITRMT 0x0a
195#define INFINIPATH_IBCS_LT_STATE_CFGIDLE 0x0b
196#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c
197#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e
198#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f
199/* link state machine states (shift by INFINIPATH_IBCS_LINKSTATE_SHIFT) */
200#define INFINIPATH_IBCS_L_STATE_DOWN 0x0
201#define INFINIPATH_IBCS_L_STATE_INIT 0x1
202#define INFINIPATH_IBCS_L_STATE_ARM 0x2
203#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3
204#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4
205
206/* combination link status states that we use with some frequency */
207#define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \
208 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
209 (INFINIPATH_IBCS_LINKSTATE_MASK \
210 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
211#define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \
212 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
213 (INFINIPATH_IBCS_LT_STATE_LINKUP \
214 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
215#define IPATH_IBSTATE_ARM ((INFINIPATH_IBCS_L_STATE_ARM \
216 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
217 (INFINIPATH_IBCS_LT_STATE_LINKUP \
218 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
219#define IPATH_IBSTATE_ACTIVE ((INFINIPATH_IBCS_L_STATE_ACTIVE \
220 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
221 (INFINIPATH_IBCS_LT_STATE_LINKUP \
222 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
223
224/* kr_extstatus bits */
225#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
226#define INFINIPATH_EXTS_GPIOIN_MASK 0xFFFFULL
227#define INFINIPATH_EXTS_GPIOIN_SHIFT 48
228
229/* kr_extctrl bits */
230#define INFINIPATH_EXTC_GPIOINVERT_MASK 0xFFFFULL
231#define INFINIPATH_EXTC_GPIOINVERT_SHIFT 32
232#define INFINIPATH_EXTC_GPIOOE_MASK 0xFFFFULL
233#define INFINIPATH_EXTC_GPIOOE_SHIFT 48
234#define INFINIPATH_EXTC_SERDESENABLE 0x80000000ULL
235#define INFINIPATH_EXTC_SERDESCONNECT 0x40000000ULL
236#define INFINIPATH_EXTC_SERDESENTRUNKING 0x20000000ULL
237#define INFINIPATH_EXTC_SERDESDISRXFIFO 0x10000000ULL
238#define INFINIPATH_EXTC_SERDESENPLPBK1 0x08000000ULL
239#define INFINIPATH_EXTC_SERDESENPLPBK2 0x04000000ULL
240#define INFINIPATH_EXTC_SERDESENENCDEC 0x02000000ULL
241#define INFINIPATH_EXTC_LED1SECPORT_ON 0x00000020ULL
242#define INFINIPATH_EXTC_LED2SECPORT_ON 0x00000010ULL
243#define INFINIPATH_EXTC_LED1PRIPORT_ON 0x00000008ULL
244#define INFINIPATH_EXTC_LED2PRIPORT_ON 0x00000004ULL
245#define INFINIPATH_EXTC_LEDGBLOK_ON 0x00000002ULL
246#define INFINIPATH_EXTC_LEDGBLERR_OFF 0x00000001ULL
247
248/* kr_mdio bits */
249#define INFINIPATH_MDIO_CLKDIV_MASK 0x7FULL
250#define INFINIPATH_MDIO_CLKDIV_SHIFT 32
251#define INFINIPATH_MDIO_COMMAND_MASK 0x7ULL
252#define INFINIPATH_MDIO_COMMAND_SHIFT 26
253#define INFINIPATH_MDIO_DEVADDR_MASK 0x1FULL
254#define INFINIPATH_MDIO_DEVADDR_SHIFT 21
255#define INFINIPATH_MDIO_REGADDR_MASK 0x1FULL
256#define INFINIPATH_MDIO_REGADDR_SHIFT 16
257#define INFINIPATH_MDIO_DATA_MASK 0xFFFFULL
258#define INFINIPATH_MDIO_DATA_SHIFT 0
259#define INFINIPATH_MDIO_CMDVALID 0x0000000040000000ULL
260#define INFINIPATH_MDIO_RDDATAVALID 0x0000000080000000ULL
261
262/* kr_partitionkey bits */
263#define INFINIPATH_PKEY_SIZE 16
264#define INFINIPATH_PKEY_MASK 0xFFFF
265#define INFINIPATH_PKEY_DEFAULT_PKEY 0xFFFF
266
267/* kr_serdesconfig0 bits */
268#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
269#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
270#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */
271#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */
272#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT,
273 Otherwise not used on IB side */
274
275/* kr_xgxsconfig bits */
276#define INFINIPATH_XGXS_RESET 0x7ULL
277#define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL
278#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
279
280#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
281
282/* TID entries (memory), HT400-only */
283#define INFINIPATH_RT_VALID 0x8000000000000000ULL
284#define INFINIPATH_RT_ADDR_SHIFT 0
285#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF
286#define INFINIPATH_RT_BUFSIZE_SHIFT 48
287
288/*
289 * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
290 * PIO send buffers. This is well beyond anything currently
291 * defined in the InfiniBand spec.
292 */
293#define IPATH_PIO_MAXIBHDR 128
294
295typedef u64 ipath_err_t;
296
297/* mask of defined bits for various registers */
298extern u64 infinipath_i_bitsextant;
299extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
300
301/* masks that are different in various chips, or only exist in some chips */
302extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
303
304/*
305 * register bits for selecting i2c direction and values, used for I2C serial
306 * flash
307 */
308extern u16 ipath_gpio_sda_num, ipath_gpio_scl_num;
309extern u64 ipath_gpio_sda, ipath_gpio_scl;
310
311/*
312 * These are the infinipath general register numbers (not offsets).
313 * The kernel registers are used directly, those beyond the kernel
314 * registers are calculated from one of the base registers. The use of
315 * an integer type doesn't allow type-checking as thorough as, say,
316 * an enum but allows for better hiding of chip differences.
317 */
318typedef const u16 ipath_kreg, /* infinipath general registers */
319 ipath_creg, /* infinipath counter registers */
320 ipath_sreg; /* kernel-only, infinipath send registers */
321
322/*
323 * These are the chip registers common to all infinipath chips, and
324 * used both by the kernel and the diagnostics or other user code.
325 * They are all implemented such that 64 bit accesses work.
326 * Some implement no more than 32 bits. Because 64 bit reads
327 * require 2 HT cmds on opteron, we access those with 32 bit
328 * reads for efficiency (they are written as 64 bits, since
329 * the extra 32 bits are nearly free on writes, and it slightly reduces
330 * complexity). The rest are all accessed as 64 bits.
331 */
332struct ipath_kregs {
333 /* These are the 32 bit group */
334 ipath_kreg kr_control;
335 ipath_kreg kr_counterregbase;
336 ipath_kreg kr_intmask;
337 ipath_kreg kr_intstatus;
338 ipath_kreg kr_pagealign;
339 ipath_kreg kr_portcnt;
340 ipath_kreg kr_rcvtidbase;
341 ipath_kreg kr_rcvtidcnt;
342 ipath_kreg kr_rcvegrbase;
343 ipath_kreg kr_rcvegrcnt;
344 ipath_kreg kr_scratch;
345 ipath_kreg kr_sendctrl;
346 ipath_kreg kr_sendpiobufbase;
347 ipath_kreg kr_sendpiobufcnt;
348 ipath_kreg kr_sendpiosize;
349 ipath_kreg kr_sendregbase;
350 ipath_kreg kr_userregbase;
351 /* These are the 64 bit group */
352 ipath_kreg kr_debugport;
353 ipath_kreg kr_debugportselect;
354 ipath_kreg kr_errorclear;
355 ipath_kreg kr_errormask;
356 ipath_kreg kr_errorstatus;
357 ipath_kreg kr_extctrl;
358 ipath_kreg kr_extstatus;
359 ipath_kreg kr_gpio_clear;
360 ipath_kreg kr_gpio_mask;
361 ipath_kreg kr_gpio_out;
362 ipath_kreg kr_gpio_status;
363 ipath_kreg kr_hwdiagctrl;
364 ipath_kreg kr_hwerrclear;
365 ipath_kreg kr_hwerrmask;
366 ipath_kreg kr_hwerrstatus;
367 ipath_kreg kr_ibcctrl;
368 ipath_kreg kr_ibcstatus;
369 ipath_kreg kr_intblocked;
370 ipath_kreg kr_intclear;
371 ipath_kreg kr_interruptconfig;
372 ipath_kreg kr_mdio;
373 ipath_kreg kr_partitionkey;
374 ipath_kreg kr_rcvbthqp;
375 ipath_kreg kr_rcvbufbase;
376 ipath_kreg kr_rcvbufsize;
377 ipath_kreg kr_rcvctrl;
378 ipath_kreg kr_rcvhdrcnt;
379 ipath_kreg kr_rcvhdrentsize;
380 ipath_kreg kr_rcvhdrsize;
381 ipath_kreg kr_rcvintmembase;
382 ipath_kreg kr_rcvintmemsize;
383 ipath_kreg kr_revision;
384 ipath_kreg kr_sendbuffererror;
385 ipath_kreg kr_sendpioavailaddr;
386 ipath_kreg kr_serdesconfig0;
387 ipath_kreg kr_serdesconfig1;
388 ipath_kreg kr_serdesstatus;
389 ipath_kreg kr_txintmembase;
390 ipath_kreg kr_txintmemsize;
391 ipath_kreg kr_xgxsconfig;
392 ipath_kreg kr_ibpllcfg;
393 /* use these two (and the following N ports) only with ipath_k*_kreg64_port();
394 * not *kreg64() */
395 ipath_kreg kr_rcvhdraddr;
396 ipath_kreg kr_rcvhdrtailaddr;
397
398 /* remaining registers are not present on all types of infinipath chips */
399 ipath_kreg kr_rcvpktledcnt;
400 ipath_kreg kr_pcierbuftestreg0;
401 ipath_kreg kr_pcierbuftestreg1;
402 ipath_kreg kr_pcieq0serdesconfig0;
403 ipath_kreg kr_pcieq0serdesconfig1;
404 ipath_kreg kr_pcieq0serdesstatus;
405 ipath_kreg kr_pcieq1serdesconfig0;
406 ipath_kreg kr_pcieq1serdesconfig1;
407 ipath_kreg kr_pcieq1serdesstatus;
408};
409
410struct ipath_cregs {
411 ipath_creg cr_badformatcnt;
412 ipath_creg cr_erricrccnt;
413 ipath_creg cr_errlinkcnt;
414 ipath_creg cr_errlpcrccnt;
415 ipath_creg cr_errpkey;
416 ipath_creg cr_errrcvflowctrlcnt;
417 ipath_creg cr_err_rlencnt;
418 ipath_creg cr_errslencnt;
419 ipath_creg cr_errtidfull;
420 ipath_creg cr_errtidvalid;
421 ipath_creg cr_errvcrccnt;
422 ipath_creg cr_ibstatuschange;
423 ipath_creg cr_intcnt;
424 ipath_creg cr_invalidrlencnt;
425 ipath_creg cr_invalidslencnt;
426 ipath_creg cr_lbflowstallcnt;
427 ipath_creg cr_iblinkdowncnt;
428 ipath_creg cr_iblinkerrrecovcnt;
429 ipath_creg cr_ibsymbolerrcnt;
430 ipath_creg cr_pktrcvcnt;
431 ipath_creg cr_pktrcvflowctrlcnt;
432 ipath_creg cr_pktsendcnt;
433 ipath_creg cr_pktsendflowcnt;
434 ipath_creg cr_portovflcnt;
435 ipath_creg cr_rcvebpcnt;
436 ipath_creg cr_rcvovflcnt;
437 ipath_creg cr_rxdroppktcnt;
438 ipath_creg cr_senddropped;
439 ipath_creg cr_sendstallcnt;
440 ipath_creg cr_sendunderruncnt;
441 ipath_creg cr_unsupvlcnt;
442 ipath_creg cr_wordrcvcnt;
443 ipath_creg cr_wordsendcnt;
444};
445
446#endif /* _IPATH_REGISTERS_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
new file mode 100644
index 000000000000..f232e77b78ee
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -0,0 +1,552 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "ipath_verbs.h"
34
35/*
36 * Convert the AETH RNR timeout code into the number of milliseconds.
37 */
38const u32 ib_ipath_rnr_table[32] = {
39 656, /* 0 */
40 1, /* 1 */
41 1, /* 2 */
42 1, /* 3 */
43 1, /* 4 */
44 1, /* 5 */
45 1, /* 6 */
46 1, /* 7 */
47 1, /* 8 */
48 1, /* 9 */
49 1, /* A */
50 1, /* B */
51 1, /* C */
52 1, /* D */
53 2, /* E */
54 2, /* F */
55 3, /* 10 */
56 4, /* 11 */
57 6, /* 12 */
58 8, /* 13 */
59 11, /* 14 */
60 16, /* 15 */
61 21, /* 16 */
62 31, /* 17 */
63 41, /* 18 */
64 62, /* 19 */
65 82, /* 1A */
66 123, /* 1B */
67 164, /* 1C */
68 246, /* 1D */
69 328, /* 1E */
70 492 /* 1F */
71};
72
73/**
74 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
75 * @qp: the QP
76 *
77 * XXX Use a simple list for now. We might need a priority
78 * queue if we have lots of QPs waiting for RNR timeouts
79 * but that should be rare.
80 */
81void ipath_insert_rnr_queue(struct ipath_qp *qp)
82{
83 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
84 unsigned long flags;
85
86 spin_lock_irqsave(&dev->pending_lock, flags);
87 if (list_empty(&dev->rnrwait))
88 list_add(&qp->timerwait, &dev->rnrwait);
89 else {
90 struct list_head *l = &dev->rnrwait;
91 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
92 timerwait);
93
94 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
95 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
96 l = l->next;
97 if (l->next == &dev->rnrwait)
98 break;
99 nqp = list_entry(l->next, struct ipath_qp,
100 timerwait);
101 }
102 list_add(&qp->timerwait, l);
103 }
104 spin_unlock_irqrestore(&dev->pending_lock, flags);
105}
106
107/**
108 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
109 * @qp: the QP
110 * @wr_id_only: update wr_id only, not SGEs
111 *
112 * Return 0 if no RWQE is available, otherwise return 1.
113 *
114 * Called at interrupt level with the QP r_rq.lock held.
115 */
116int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
117{
118 struct ipath_rq *rq;
119 struct ipath_srq *srq;
120 struct ipath_rwqe *wqe;
121 int ret;
122
123 if (!qp->ibqp.srq) {
124 rq = &qp->r_rq;
125 if (unlikely(rq->tail == rq->head)) {
126 ret = 0;
127 goto bail;
128 }
129 wqe = get_rwqe_ptr(rq, rq->tail);
130 qp->r_wr_id = wqe->wr_id;
131 if (!wr_id_only) {
132 qp->r_sge.sge = wqe->sg_list[0];
133 qp->r_sge.sg_list = wqe->sg_list + 1;
134 qp->r_sge.num_sge = wqe->num_sge;
135 qp->r_len = wqe->length;
136 }
137 if (++rq->tail >= rq->size)
138 rq->tail = 0;
139 ret = 1;
140 goto bail;
141 }
142
143 srq = to_isrq(qp->ibqp.srq);
144 rq = &srq->rq;
145 spin_lock(&rq->lock);
146 if (unlikely(rq->tail == rq->head)) {
147 spin_unlock(&rq->lock);
148 ret = 0;
149 goto bail;
150 }
151 wqe = get_rwqe_ptr(rq, rq->tail);
152 qp->r_wr_id = wqe->wr_id;
153 if (!wr_id_only) {
154 qp->r_sge.sge = wqe->sg_list[0];
155 qp->r_sge.sg_list = wqe->sg_list + 1;
156 qp->r_sge.num_sge = wqe->num_sge;
157 qp->r_len = wqe->length;
158 }
159 if (++rq->tail >= rq->size)
160 rq->tail = 0;
161 if (srq->ibsrq.event_handler) {
162 struct ib_event ev;
163 u32 n;
164
165 if (rq->head < rq->tail)
166 n = rq->size + rq->head - rq->tail;
167 else
168 n = rq->head - rq->tail;
169 if (n < srq->limit) {
170 srq->limit = 0;
171 spin_unlock(&rq->lock);
172 ev.device = qp->ibqp.device;
173 ev.element.srq = qp->ibqp.srq;
174 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
175 srq->ibsrq.event_handler(&ev,
176 srq->ibsrq.srq_context);
177 } else
178 spin_unlock(&rq->lock);
179 } else
180 spin_unlock(&rq->lock);
181 ret = 1;
182
183bail:
184 return ret;
185}
186
187/**
188 * ipath_ruc_loopback - handle UC and RC lookback requests
189 * @sqp: the loopback QP
190 * @wc: the work completion entry
191 *
192 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
193 * forward a WQE addressed to the same HCA.
194 * Note that although we are single threaded due to the tasklet, we still
195 * have to protect against post_send(). We don't have to worry about
196 * receive interrupts since this is a connected protocol and all packets
197 * will pass through here.
198 */
199void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc)
200{
201 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
202 struct ipath_qp *qp;
203 struct ipath_swqe *wqe;
204 struct ipath_sge *sge;
205 unsigned long flags;
206 u64 sdata;
207
208 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
209 if (!qp) {
210 dev->n_pkt_drops++;
211 return;
212 }
213
214again:
215 spin_lock_irqsave(&sqp->s_lock, flags);
216
217 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
218 spin_unlock_irqrestore(&sqp->s_lock, flags);
219 goto done;
220 }
221
222 /* Get the next send request. */
223 if (sqp->s_last == sqp->s_head) {
224 /* Send work queue is empty. */
225 spin_unlock_irqrestore(&sqp->s_lock, flags);
226 goto done;
227 }
228
229 /*
230 * We can rely on the entry not changing without the s_lock
231 * being held until we update s_last.
232 */
233 wqe = get_swqe_ptr(sqp, sqp->s_last);
234 spin_unlock_irqrestore(&sqp->s_lock, flags);
235
236 wc->wc_flags = 0;
237 wc->imm_data = 0;
238
239 sqp->s_sge.sge = wqe->sg_list[0];
240 sqp->s_sge.sg_list = wqe->sg_list + 1;
241 sqp->s_sge.num_sge = wqe->wr.num_sge;
242 sqp->s_len = wqe->length;
243 switch (wqe->wr.opcode) {
244 case IB_WR_SEND_WITH_IMM:
245 wc->wc_flags = IB_WC_WITH_IMM;
246 wc->imm_data = wqe->wr.imm_data;
247 /* FALLTHROUGH */
248 case IB_WR_SEND:
249 spin_lock_irqsave(&qp->r_rq.lock, flags);
250 if (!ipath_get_rwqe(qp, 0)) {
251 rnr_nak:
252 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
253 /* Handle RNR NAK */
254 if (qp->ibqp.qp_type == IB_QPT_UC)
255 goto send_comp;
256 if (sqp->s_rnr_retry == 0) {
257 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
258 goto err;
259 }
260 if (sqp->s_rnr_retry_cnt < 7)
261 sqp->s_rnr_retry--;
262 dev->n_rnr_naks++;
263 sqp->s_rnr_timeout =
264 ib_ipath_rnr_table[sqp->s_min_rnr_timer];
265 ipath_insert_rnr_queue(sqp);
266 goto done;
267 }
268 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
269 break;
270
271 case IB_WR_RDMA_WRITE_WITH_IMM:
272 wc->wc_flags = IB_WC_WITH_IMM;
273 wc->imm_data = wqe->wr.imm_data;
274 spin_lock_irqsave(&qp->r_rq.lock, flags);
275 if (!ipath_get_rwqe(qp, 1))
276 goto rnr_nak;
277 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
278 /* FALLTHROUGH */
279 case IB_WR_RDMA_WRITE:
280 if (wqe->length == 0)
281 break;
282 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length,
283 wqe->wr.wr.rdma.remote_addr,
284 wqe->wr.wr.rdma.rkey,
285 IB_ACCESS_REMOTE_WRITE))) {
286 acc_err:
287 wc->status = IB_WC_REM_ACCESS_ERR;
288 err:
289 wc->wr_id = wqe->wr.wr_id;
290 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
291 wc->vendor_err = 0;
292 wc->byte_len = 0;
293 wc->qp_num = sqp->ibqp.qp_num;
294 wc->src_qp = sqp->remote_qpn;
295 wc->pkey_index = 0;
296 wc->slid = sqp->remote_ah_attr.dlid;
297 wc->sl = sqp->remote_ah_attr.sl;
298 wc->dlid_path_bits = 0;
299 wc->port_num = 0;
300 ipath_sqerror_qp(sqp, wc);
301 goto done;
302 }
303 break;
304
305 case IB_WR_RDMA_READ:
306 if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length,
307 wqe->wr.wr.rdma.remote_addr,
308 wqe->wr.wr.rdma.rkey,
309 IB_ACCESS_REMOTE_READ)))
310 goto acc_err;
311 if (unlikely(!(qp->qp_access_flags &
312 IB_ACCESS_REMOTE_READ)))
313 goto acc_err;
314 qp->r_sge.sge = wqe->sg_list[0];
315 qp->r_sge.sg_list = wqe->sg_list + 1;
316 qp->r_sge.num_sge = wqe->wr.num_sge;
317 break;
318
319 case IB_WR_ATOMIC_CMP_AND_SWP:
320 case IB_WR_ATOMIC_FETCH_AND_ADD:
321 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64),
322 wqe->wr.wr.rdma.remote_addr,
323 wqe->wr.wr.rdma.rkey,
324 IB_ACCESS_REMOTE_ATOMIC)))
325 goto acc_err;
326 /* Perform atomic OP and save result. */
327 sdata = wqe->wr.wr.atomic.swap;
328 spin_lock_irqsave(&dev->pending_lock, flags);
329 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
330 if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
331 *(u64 *) qp->r_sge.sge.vaddr =
332 qp->r_atomic_data + sdata;
333 else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
334 *(u64 *) qp->r_sge.sge.vaddr = sdata;
335 spin_unlock_irqrestore(&dev->pending_lock, flags);
336 *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
337 goto send_comp;
338
339 default:
340 goto done;
341 }
342
343 sge = &sqp->s_sge.sge;
344 while (sqp->s_len) {
345 u32 len = sqp->s_len;
346
347 if (len > sge->length)
348 len = sge->length;
349 BUG_ON(len == 0);
350 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
351 sge->vaddr += len;
352 sge->length -= len;
353 sge->sge_length -= len;
354 if (sge->sge_length == 0) {
355 if (--sqp->s_sge.num_sge)
356 *sge = *sqp->s_sge.sg_list++;
357 } else if (sge->length == 0 && sge->mr != NULL) {
358 if (++sge->n >= IPATH_SEGSZ) {
359 if (++sge->m >= sge->mr->mapsz)
360 break;
361 sge->n = 0;
362 }
363 sge->vaddr =
364 sge->mr->map[sge->m]->segs[sge->n].vaddr;
365 sge->length =
366 sge->mr->map[sge->m]->segs[sge->n].length;
367 }
368 sqp->s_len -= len;
369 }
370
371 if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
372 wqe->wr.opcode == IB_WR_RDMA_READ)
373 goto send_comp;
374
375 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
376 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
377 else
378 wc->opcode = IB_WC_RECV;
379 wc->wr_id = qp->r_wr_id;
380 wc->status = IB_WC_SUCCESS;
381 wc->vendor_err = 0;
382 wc->byte_len = wqe->length;
383 wc->qp_num = qp->ibqp.qp_num;
384 wc->src_qp = qp->remote_qpn;
385 /* XXX do we know which pkey matched? Only needed for GSI. */
386 wc->pkey_index = 0;
387 wc->slid = qp->remote_ah_attr.dlid;
388 wc->sl = qp->remote_ah_attr.sl;
389 wc->dlid_path_bits = 0;
390 /* Signal completion event if the solicited bit is set. */
391 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
392 wqe->wr.send_flags & IB_SEND_SOLICITED);
393
394send_comp:
395 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
396
397 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
398 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
399 wc->wr_id = wqe->wr.wr_id;
400 wc->status = IB_WC_SUCCESS;
401 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
402 wc->vendor_err = 0;
403 wc->byte_len = wqe->length;
404 wc->qp_num = sqp->ibqp.qp_num;
405 wc->src_qp = 0;
406 wc->pkey_index = 0;
407 wc->slid = 0;
408 wc->sl = 0;
409 wc->dlid_path_bits = 0;
410 wc->port_num = 0;
411 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), wc, 0);
412 }
413
414 /* Update s_last now that we are finished with the SWQE */
415 spin_lock_irqsave(&sqp->s_lock, flags);
416 if (++sqp->s_last >= sqp->s_size)
417 sqp->s_last = 0;
418 spin_unlock_irqrestore(&sqp->s_lock, flags);
419 goto again;
420
421done:
422 if (atomic_dec_and_test(&qp->refcount))
423 wake_up(&qp->wait);
424}
425
426/**
427 * ipath_no_bufs_available - tell the layer driver we need buffers
428 * @qp: the QP that caused the problem
429 * @dev: the device we ran out of buffers on
430 *
431 * Called when we run out of PIO buffers.
432 */
433void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
434{
435 unsigned long flags;
436
437 spin_lock_irqsave(&dev->pending_lock, flags);
438 if (qp->piowait.next == LIST_POISON1)
439 list_add_tail(&qp->piowait, &dev->piowait);
440 spin_unlock_irqrestore(&dev->pending_lock, flags);
441 /*
442 * Note that as soon as ipath_layer_want_buffer() is called and
443 * possibly before it returns, ipath_ib_piobufavail()
444 * could be called. If we are still in the tasklet function,
445 * tasklet_hi_schedule() will not call us until the next time
446 * tasklet_hi_schedule() is called.
447 * We clear the tasklet flag now since we are committing to return
448 * from the tasklet function.
449 */
450 clear_bit(IPATH_S_BUSY, &qp->s_flags);
451 tasklet_unlock(&qp->s_task);
452 ipath_layer_want_buffer(dev->dd);
453 dev->n_piowait++;
454}
455
456/**
457 * ipath_post_rc_send - post RC and UC sends
458 * @qp: the QP to post on
459 * @wr: the work request to send
460 */
461int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
462{
463 struct ipath_swqe *wqe;
464 unsigned long flags;
465 u32 next;
466 int i, j;
467 int acc;
468 int ret;
469
470 /*
471 * Don't allow RDMA reads or atomic operations on UC or
472 * undefined operations.
473 * Make sure buffer is large enough to hold the result for atomics.
474 */
475 if (qp->ibqp.qp_type == IB_QPT_UC) {
476 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
477 ret = -EINVAL;
478 goto bail;
479 }
480 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
481 ret = -EINVAL;
482 goto bail;
483 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
484 (wr->num_sge == 0 ||
485 wr->sg_list[0].length < sizeof(u64) ||
486 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
487 ret = -EINVAL;
488 goto bail;
489 }
490 /* IB spec says that num_sge == 0 is OK. */
491 if (wr->num_sge > qp->s_max_sge) {
492 ret = -ENOMEM;
493 goto bail;
494 }
495 spin_lock_irqsave(&qp->s_lock, flags);
496 next = qp->s_head + 1;
497 if (next >= qp->s_size)
498 next = 0;
499 if (next == qp->s_last) {
500 spin_unlock_irqrestore(&qp->s_lock, flags);
501 ret = -EINVAL;
502 goto bail;
503 }
504
505 wqe = get_swqe_ptr(qp, qp->s_head);
506 wqe->wr = *wr;
507 wqe->ssn = qp->s_ssn++;
508 wqe->sg_list[0].mr = NULL;
509 wqe->sg_list[0].vaddr = NULL;
510 wqe->sg_list[0].length = 0;
511 wqe->sg_list[0].sge_length = 0;
512 wqe->length = 0;
513 acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
514 for (i = 0, j = 0; i < wr->num_sge; i++) {
515 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
516 spin_unlock_irqrestore(&qp->s_lock, flags);
517 ret = -EINVAL;
518 goto bail;
519 }
520 if (wr->sg_list[i].length == 0)
521 continue;
522 if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table,
523 &wqe->sg_list[j], &wr->sg_list[i],
524 acc)) {
525 spin_unlock_irqrestore(&qp->s_lock, flags);
526 ret = -EINVAL;
527 goto bail;
528 }
529 wqe->length += wr->sg_list[i].length;
530 j++;
531 }
532 wqe->wr.num_sge = j;
533 qp->s_head = next;
534 /*
535 * Wake up the send tasklet if the QP is not waiting
536 * for an RNR timeout.
537 */
538 next = qp->s_rnr_timeout;
539 spin_unlock_irqrestore(&qp->s_lock, flags);
540
541 if (next == 0) {
542 if (qp->ibqp.qp_type == IB_QPT_UC)
543 ipath_do_uc_send((unsigned long) qp);
544 else
545 ipath_do_rc_send((unsigned long) qp);
546 }
547
548 ret = 0;
549
550bail:
551 return ret;
552}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
new file mode 100644
index 000000000000..01c4c6c56118
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -0,0 +1,273 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/err.h>
34#include <linux/vmalloc.h>
35
36#include "ipath_verbs.h"
37
38/**
39 * ipath_post_srq_receive - post a receive on a shared receive queue
40 * @ibsrq: the SRQ to post the receive on
41 * @wr: the list of work requests to post
42 * @bad_wr: the first WR to cause a problem is put here
43 *
44 * This may be called from interrupt context.
45 */
46int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
47 struct ib_recv_wr **bad_wr)
48{
49 struct ipath_srq *srq = to_isrq(ibsrq);
50 struct ipath_ibdev *dev = to_idev(ibsrq->device);
51 unsigned long flags;
52 int ret;
53
54 for (; wr; wr = wr->next) {
55 struct ipath_rwqe *wqe;
56 u32 next;
57 int i, j;
58
59 if (wr->num_sge > srq->rq.max_sge) {
60 *bad_wr = wr;
61 ret = -ENOMEM;
62 goto bail;
63 }
64
65 spin_lock_irqsave(&srq->rq.lock, flags);
66 next = srq->rq.head + 1;
67 if (next >= srq->rq.size)
68 next = 0;
69 if (next == srq->rq.tail) {
70 spin_unlock_irqrestore(&srq->rq.lock, flags);
71 *bad_wr = wr;
72 ret = -ENOMEM;
73 goto bail;
74 }
75
76 wqe = get_rwqe_ptr(&srq->rq, srq->rq.head);
77 wqe->wr_id = wr->wr_id;
78 wqe->sg_list[0].mr = NULL;
79 wqe->sg_list[0].vaddr = NULL;
80 wqe->sg_list[0].length = 0;
81 wqe->sg_list[0].sge_length = 0;
82 wqe->length = 0;
83 for (i = 0, j = 0; i < wr->num_sge; i++) {
84 /* Check LKEY */
85 if (to_ipd(srq->ibsrq.pd)->user &&
86 wr->sg_list[i].lkey == 0) {
87 spin_unlock_irqrestore(&srq->rq.lock,
88 flags);
89 *bad_wr = wr;
90 ret = -EINVAL;
91 goto bail;
92 }
93 if (wr->sg_list[i].length == 0)
94 continue;
95 if (!ipath_lkey_ok(&dev->lk_table,
96 &wqe->sg_list[j],
97 &wr->sg_list[i],
98 IB_ACCESS_LOCAL_WRITE)) {
99 spin_unlock_irqrestore(&srq->rq.lock,
100 flags);
101 *bad_wr = wr;
102 ret = -EINVAL;
103 goto bail;
104 }
105 wqe->length += wr->sg_list[i].length;
106 j++;
107 }
108 wqe->num_sge = j;
109 srq->rq.head = next;
110 spin_unlock_irqrestore(&srq->rq.lock, flags);
111 }
112 ret = 0;
113
114bail:
115 return ret;
116}
117
118/**
119 * ipath_create_srq - create a shared receive queue
120 * @ibpd: the protection domain of the SRQ to create
121 * @attr: the attributes of the SRQ
122 * @udata: not used by the InfiniPath verbs driver
123 */
124struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
125 struct ib_srq_init_attr *srq_init_attr,
126 struct ib_udata *udata)
127{
128 struct ipath_srq *srq;
129 u32 sz;
130 struct ib_srq *ret;
131
132 if (srq_init_attr->attr.max_sge < 1) {
133 ret = ERR_PTR(-EINVAL);
134 goto bail;
135 }
136
137 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
138 if (!srq) {
139 ret = ERR_PTR(-ENOMEM);
140 goto bail;
141 }
142
143 /*
144 * Need to use vmalloc() if we want to support large #s of entries.
145 */
146 srq->rq.size = srq_init_attr->attr.max_wr + 1;
147 sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge +
148 sizeof(struct ipath_rwqe);
149 srq->rq.wq = vmalloc(srq->rq.size * sz);
150 if (!srq->rq.wq) {
151 kfree(srq);
152 ret = ERR_PTR(-ENOMEM);
153 goto bail;
154 }
155
156 /*
157 * ib_create_srq() will initialize srq->ibsrq.
158 */
159 spin_lock_init(&srq->rq.lock);
160 srq->rq.head = 0;
161 srq->rq.tail = 0;
162 srq->rq.max_sge = srq_init_attr->attr.max_sge;
163 srq->limit = srq_init_attr->attr.srq_limit;
164
165 ret = &srq->ibsrq;
166
167bail:
168 return ret;
169}
170
171/**
172 * ipath_modify_srq - modify a shared receive queue
173 * @ibsrq: the SRQ to modify
174 * @attr: the new attributes of the SRQ
175 * @attr_mask: indicates which attributes to modify
176 */
177int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
178 enum ib_srq_attr_mask attr_mask)
179{
180 struct ipath_srq *srq = to_isrq(ibsrq);
181 unsigned long flags;
182 int ret;
183
184 if (attr_mask & IB_SRQ_LIMIT) {
185 spin_lock_irqsave(&srq->rq.lock, flags);
186 srq->limit = attr->srq_limit;
187 spin_unlock_irqrestore(&srq->rq.lock, flags);
188 }
189 if (attr_mask & IB_SRQ_MAX_WR) {
190 u32 size = attr->max_wr + 1;
191 struct ipath_rwqe *wq, *p;
192 u32 n;
193 u32 sz;
194
195 if (attr->max_sge < srq->rq.max_sge) {
196 ret = -EINVAL;
197 goto bail;
198 }
199
200 sz = sizeof(struct ipath_rwqe) +
201 attr->max_sge * sizeof(struct ipath_sge);
202 wq = vmalloc(size * sz);
203 if (!wq) {
204 ret = -ENOMEM;
205 goto bail;
206 }
207
208 spin_lock_irqsave(&srq->rq.lock, flags);
209 if (srq->rq.head < srq->rq.tail)
210 n = srq->rq.size + srq->rq.head - srq->rq.tail;
211 else
212 n = srq->rq.head - srq->rq.tail;
213 if (size <= n || size <= srq->limit) {
214 spin_unlock_irqrestore(&srq->rq.lock, flags);
215 vfree(wq);
216 ret = -EINVAL;
217 goto bail;
218 }
219 n = 0;
220 p = wq;
221 while (srq->rq.tail != srq->rq.head) {
222 struct ipath_rwqe *wqe;
223 int i;
224
225 wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail);
226 p->wr_id = wqe->wr_id;
227 p->length = wqe->length;
228 p->num_sge = wqe->num_sge;
229 for (i = 0; i < wqe->num_sge; i++)
230 p->sg_list[i] = wqe->sg_list[i];
231 n++;
232 p = (struct ipath_rwqe *)((char *) p + sz);
233 if (++srq->rq.tail >= srq->rq.size)
234 srq->rq.tail = 0;
235 }
236 vfree(srq->rq.wq);
237 srq->rq.wq = wq;
238 srq->rq.size = size;
239 srq->rq.head = n;
240 srq->rq.tail = 0;
241 srq->rq.max_sge = attr->max_sge;
242 spin_unlock_irqrestore(&srq->rq.lock, flags);
243 }
244
245 ret = 0;
246
247bail:
248 return ret;
249}
250
251int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
252{
253 struct ipath_srq *srq = to_isrq(ibsrq);
254
255 attr->max_wr = srq->rq.size - 1;
256 attr->max_sge = srq->rq.max_sge;
257 attr->srq_limit = srq->limit;
258 return 0;
259}
260
261/**
262 * ipath_destroy_srq - destroy a shared receive queue
263 * @ibsrq: the SRQ to destroy
264 */
265int ipath_destroy_srq(struct ib_srq *ibsrq)
266{
267 struct ipath_srq *srq = to_isrq(ibsrq);
268
269 vfree(srq->rq.wq);
270 kfree(srq);
271
272 return 0;
273}
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
new file mode 100644
index 000000000000..fe209137ee74
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -0,0 +1,303 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/pci.h>
34
35#include "ipath_kernel.h"
36
37struct infinipath_stats ipath_stats;
38
39/**
40 * ipath_snap_cntr - snapshot a chip counter
41 * @dd: the infinipath device
42 * @creg: the counter to snapshot
43 *
44 * called from add_timer and user counter read calls, to deal with
45 * counters that wrap in "human time". The words sent and received, and
46 * the packets sent and received are all that we worry about. For now,
47 * at least, we don't worry about error counters, because if they wrap
48 * that quickly, we probably don't care. We may eventually just make this
49 * handle all the counters. word counters can wrap in about 20 seconds
50 * of full bandwidth traffic, packet counters in a few hours.
51 */
52
53u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
54{
55 u32 val, reg64 = 0;
56 u64 val64;
57 unsigned long t0, t1;
58 u64 ret;
59
60 t0 = jiffies;
61 /* If fast increment counters are only 32 bits, snapshot them,
62 * and maintain them as 64bit values in the driver */
63 if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
64 (creg == dd->ipath_cregs->cr_wordsendcnt ||
65 creg == dd->ipath_cregs->cr_wordrcvcnt ||
66 creg == dd->ipath_cregs->cr_pktsendcnt ||
67 creg == dd->ipath_cregs->cr_pktrcvcnt)) {
68 val64 = ipath_read_creg(dd, creg);
69 val = val64 == ~0ULL ? ~0U : 0;
70 reg64 = 1;
71 } else /* val64 just to keep gcc quiet... */
72 val64 = val = ipath_read_creg32(dd, creg);
73 /*
74 * See if a second has passed. This is just a way to detect things
75 * that are quite broken. Normally this should take just a few
76 * cycles (the check is for long enough that we don't care if we get
77 * pre-empted.) An Opteron HT O read timeout is 4 seconds with
78 * normal NB values
79 */
80 t1 = jiffies;
81 if (time_before(t0 + HZ, t1) && val == -1) {
82 ipath_dev_err(dd, "Error! Read counter 0x%x timed out\n",
83 creg);
84 ret = 0ULL;
85 goto bail;
86 }
87 if (reg64) {
88 ret = val64;
89 goto bail;
90 }
91
92 if (creg == dd->ipath_cregs->cr_wordsendcnt) {
93 if (val != dd->ipath_lastsword) {
94 dd->ipath_sword += val - dd->ipath_lastsword;
95 dd->ipath_lastsword = val;
96 }
97 val64 = dd->ipath_sword;
98 } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
99 if (val != dd->ipath_lastrword) {
100 dd->ipath_rword += val - dd->ipath_lastrword;
101 dd->ipath_lastrword = val;
102 }
103 val64 = dd->ipath_rword;
104 } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
105 if (val != dd->ipath_lastspkts) {
106 dd->ipath_spkts += val - dd->ipath_lastspkts;
107 dd->ipath_lastspkts = val;
108 }
109 val64 = dd->ipath_spkts;
110 } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
111 if (val != dd->ipath_lastrpkts) {
112 dd->ipath_rpkts += val - dd->ipath_lastrpkts;
113 dd->ipath_lastrpkts = val;
114 }
115 val64 = dd->ipath_rpkts;
116 } else
117 val64 = (u64) val;
118
119 ret = val64;
120
121bail:
122 return ret;
123}
124
125/**
126 * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
127 * @dd: the infinipath device
128 *
129 * print the delta of egrfull/hdrqfull errors for kernel ports no more than
130 * every 5 seconds. User processes are printed at close, but kernel doesn't
131 * close, so... Separate routine so may call from other places someday, and
132 * so function name when printed by _IPATH_INFO is meaningfull
133 */
134static void ipath_qcheck(struct ipath_devdata *dd)
135{
136 static u64 last_tot_hdrqfull;
137 size_t blen = 0;
138 char buf[128];
139
140 *buf = 0;
141 if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
142 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
143 dd->ipath_pd[0]->port_hdrqfull -
144 dd->ipath_p0_hdrqfull);
145 dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
146 }
147 if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
148 blen += snprintf(buf + blen, sizeof buf - blen,
149 "%srcvegrfull %llu",
150 blen ? ", " : "",
151 (unsigned long long)
152 (ipath_stats.sps_etidfull -
153 dd->ipath_last_tidfull));
154 dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
155 }
156
157 /*
158 * this is actually the number of hdrq full interrupts, not actual
159 * events, but at the moment that's mostly what I'm interested in.
160 * Actual count, etc. is in the counters, if needed. For production
161 * users this won't ordinarily be printed.
162 */
163
164 if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
165 ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
166 blen += snprintf(buf + blen, sizeof buf - blen,
167 "%shdrqfull %llu (all ports)",
168 blen ? ", " : "",
169 (unsigned long long)
170 (ipath_stats.sps_hdrqfull -
171 last_tot_hdrqfull));
172 last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
173 }
174 if (blen)
175 ipath_dbg("%s\n", buf);
176
177 if (dd->ipath_port0head != (u32)
178 le64_to_cpu(*dd->ipath_hdrqtailptr)) {
179 if (dd->ipath_lastport0rcv_cnt ==
180 ipath_stats.sps_port0pkts) {
181 ipath_cdbg(PKT, "missing rcv interrupts? "
182 "port0 hd=%llx tl=%x; port0pkts %llx\n",
183 (unsigned long long)
184 le64_to_cpu(*dd->ipath_hdrqtailptr),
185 dd->ipath_port0head,
186 (unsigned long long)
187 ipath_stats.sps_port0pkts);
188 ipath_kreceive(dd);
189 }
190 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
191 }
192}
193
194/**
195 * ipath_get_faststats - get word counters from chip before they overflow
196 * @opaque - contains a pointer to the infinipath device ipath_devdata
197 *
198 * called from add_timer
199 */
200void ipath_get_faststats(unsigned long opaque)
201{
202 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
203 u32 val;
204 static unsigned cnt;
205
206 /*
207 * don't access the chip while running diags, or memory diags can
208 * fail
209 */
210 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT) ||
211 ipath_diag_inuse)
212 /* but re-arm the timer, for diags case; won't hurt other */
213 goto done;
214
215 if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
216 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
217 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
218 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
219 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
220 }
221
222 ipath_qcheck(dd);
223
224 /*
225 * deal with repeat error suppression. Doesn't really matter if
226 * last error was almost a full interval ago, or just a few usecs
227 * ago; still won't get more than 2 per interval. We may want
228 * longer intervals for this eventually, could do with mod, counter
229 * or separate timer. Also see code in ipath_handle_errors() and
230 * ipath_handle_hwerrors().
231 */
232
233 if (dd->ipath_lasterror)
234 dd->ipath_lasterror = 0;
235 if (dd->ipath_lasthwerror)
236 dd->ipath_lasthwerror = 0;
237 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
238 && time_after(jiffies, dd->ipath_unmasktime)) {
239 char ebuf[256];
240 ipath_decode_err(ebuf, sizeof ebuf,
241 (dd->ipath_maskederrs & ~dd->
242 ipath_ignorederrs));
243 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
244 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
245 ipath_dev_err(dd, "Re-enabling masked errors "
246 "(%s)\n", ebuf);
247 else {
248 /*
249 * rcvegrfull and rcvhdrqfull are "normal", for some
250 * types of processes (mostly benchmarks) that send
251 * huge numbers of messages, while not processing
252 * them. So only complain about these at debug
253 * level.
254 */
255 ipath_dbg("Disabling frequent queue full errors "
256 "(%s)\n", ebuf);
257 }
258 dd->ipath_maskederrs = dd->ipath_ignorederrs;
259 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
260 ~dd->ipath_maskederrs);
261 }
262
263 /* limit qfull messages to ~one per minute per port */
264 if ((++cnt & 0x10)) {
265 for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
266 val--) {
267 if (dd->ipath_lastegrheads[val] != -1)
268 dd->ipath_lastegrheads[val] = -1;
269 if (dd->ipath_lastrcvhdrqtails[val] != -1)
270 dd->ipath_lastrcvhdrqtails[val] = -1;
271 }
272 }
273
274 if (dd->ipath_nosma_bufs) {
275 dd->ipath_nosma_secs += 5;
276 if (dd->ipath_nosma_secs >= 30) {
277 ipath_cdbg(SMA, "No SMA bufs avail %u seconds; "
278 "cancelling pending sends\n",
279 dd->ipath_nosma_secs);
280 /*
281 * issue an abort as well, in case we have a packet
282 * stuck in launch fifo. This could corrupt an
283 * outgoing user packet in the worst case,
284 * but this is a pretty catastrophic, anyway.
285 */
286 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
287 INFINIPATH_S_ABORT);
288 ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
289 dd->ipath_piobcnt2k +
290 dd->ipath_piobcnt4k -
291 dd->ipath_lastport_piobuf);
292 /* start again, if necessary */
293 dd->ipath_nosma_secs = 0;
294 } else
295 ipath_cdbg(SMA, "No SMA bufs avail %u tries, "
296 "after %u seconds\n",
297 dd->ipath_nosma_bufs,
298 dd->ipath_nosma_secs);
299 }
300
301done:
302 mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
303}
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
new file mode 100644
index 000000000000..32acd8048b49
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -0,0 +1,778 @@
1/*
2 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/ctype.h>
34#include <linux/pci.h>
35
36#include "ipath_kernel.h"
37#include "ips_common.h"
38#include "ipath_layer.h"
39
40/**
41 * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
42 * @str: the string containing the number
43 * @valp: where to put the result
44 *
45 * returns the number of bytes consumed, or negative value on error
46 */
47int ipath_parse_ushort(const char *str, unsigned short *valp)
48{
49 unsigned long val;
50 char *end;
51 int ret;
52
53 if (!isdigit(str[0])) {
54 ret = -EINVAL;
55 goto bail;
56 }
57
58 val = simple_strtoul(str, &end, 0);
59
60 if (val > 0xffff) {
61 ret = -EINVAL;
62 goto bail;
63 }
64
65 *valp = val;
66
67 ret = end + 1 - str;
68 if (ret == 0)
69 ret = -EINVAL;
70
71bail:
72 return ret;
73}
74
75static ssize_t show_version(struct device_driver *dev, char *buf)
76{
77 /* The string printed here is already newline-terminated. */
78 return scnprintf(buf, PAGE_SIZE, "%s", ipath_core_version);
79}
80
81static ssize_t show_num_units(struct device_driver *dev, char *buf)
82{
83 return scnprintf(buf, PAGE_SIZE, "%d\n",
84 ipath_count_units(NULL, NULL, NULL));
85}
86
87#define DRIVER_STAT(name, attr) \
88 static ssize_t show_stat_##name(struct device_driver *dev, \
89 char *buf) \
90 { \
91 return scnprintf( \
92 buf, PAGE_SIZE, "%llu\n", \
93 (unsigned long long) ipath_stats.sps_ ##attr); \
94 } \
95 static DRIVER_ATTR(name, S_IRUGO, show_stat_##name, NULL)
96
97DRIVER_STAT(intrs, ints);
98DRIVER_STAT(err_intrs, errints);
99DRIVER_STAT(errs, errs);
100DRIVER_STAT(pkt_errs, pkterrs);
101DRIVER_STAT(crc_errs, crcerrs);
102DRIVER_STAT(hw_errs, hwerrs);
103DRIVER_STAT(ib_link, iblink);
104DRIVER_STAT(port0_pkts, port0pkts);
105DRIVER_STAT(ether_spkts, ether_spkts);
106DRIVER_STAT(ether_rpkts, ether_rpkts);
107DRIVER_STAT(sma_spkts, sma_spkts);
108DRIVER_STAT(sma_rpkts, sma_rpkts);
109DRIVER_STAT(hdrq_full, hdrqfull);
110DRIVER_STAT(etid_full, etidfull);
111DRIVER_STAT(no_piobufs, nopiobufs);
112DRIVER_STAT(ports, ports);
113DRIVER_STAT(pkey0, pkeys[0]);
114DRIVER_STAT(pkey1, pkeys[1]);
115DRIVER_STAT(pkey2, pkeys[2]);
116DRIVER_STAT(pkey3, pkeys[3]);
117/* XXX fix the following when dynamic table of devices used */
118DRIVER_STAT(lid0, lid[0]);
119DRIVER_STAT(lid1, lid[1]);
120DRIVER_STAT(lid2, lid[2]);
121DRIVER_STAT(lid3, lid[3]);
122
123DRIVER_STAT(nports, nports);
124DRIVER_STAT(null_intr, nullintr);
125DRIVER_STAT(max_pkts_call, maxpkts_call);
126DRIVER_STAT(avg_pkts_call, avgpkts_call);
127DRIVER_STAT(page_locks, pagelocks);
128DRIVER_STAT(page_unlocks, pageunlocks);
129DRIVER_STAT(krdrops, krdrops);
130/* XXX fix the following when dynamic table of devices used */
131DRIVER_STAT(mlid0, mlid[0]);
132DRIVER_STAT(mlid1, mlid[1]);
133DRIVER_STAT(mlid2, mlid[2]);
134DRIVER_STAT(mlid3, mlid[3]);
135
136static struct attribute *driver_stat_attributes[] = {
137 &driver_attr_intrs.attr,
138 &driver_attr_err_intrs.attr,
139 &driver_attr_errs.attr,
140 &driver_attr_pkt_errs.attr,
141 &driver_attr_crc_errs.attr,
142 &driver_attr_hw_errs.attr,
143 &driver_attr_ib_link.attr,
144 &driver_attr_port0_pkts.attr,
145 &driver_attr_ether_spkts.attr,
146 &driver_attr_ether_rpkts.attr,
147 &driver_attr_sma_spkts.attr,
148 &driver_attr_sma_rpkts.attr,
149 &driver_attr_hdrq_full.attr,
150 &driver_attr_etid_full.attr,
151 &driver_attr_no_piobufs.attr,
152 &driver_attr_ports.attr,
153 &driver_attr_pkey0.attr,
154 &driver_attr_pkey1.attr,
155 &driver_attr_pkey2.attr,
156 &driver_attr_pkey3.attr,
157 &driver_attr_lid0.attr,
158 &driver_attr_lid1.attr,
159 &driver_attr_lid2.attr,
160 &driver_attr_lid3.attr,
161 &driver_attr_nports.attr,
162 &driver_attr_null_intr.attr,
163 &driver_attr_max_pkts_call.attr,
164 &driver_attr_avg_pkts_call.attr,
165 &driver_attr_page_locks.attr,
166 &driver_attr_page_unlocks.attr,
167 &driver_attr_krdrops.attr,
168 &driver_attr_mlid0.attr,
169 &driver_attr_mlid1.attr,
170 &driver_attr_mlid2.attr,
171 &driver_attr_mlid3.attr,
172 NULL
173};
174
175static struct attribute_group driver_stat_attr_group = {
176 .name = "stats",
177 .attrs = driver_stat_attributes
178};
179
180static ssize_t show_status(struct device *dev,
181 struct device_attribute *attr,
182 char *buf)
183{
184 struct ipath_devdata *dd = dev_get_drvdata(dev);
185 ssize_t ret;
186
187 if (!dd->ipath_statusp) {
188 ret = -EINVAL;
189 goto bail;
190 }
191
192 ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
193 (unsigned long long) *(dd->ipath_statusp));
194
195bail:
196 return ret;
197}
198
199static const char *ipath_status_str[] = {
200 "Initted",
201 "Disabled",
202 "Admin_Disabled",
203 "OIB_SMA",
204 "SMA",
205 "Present",
206 "IB_link_up",
207 "IB_configured",
208 "NoIBcable",
209 "Fatal_Hardware_Error",
210 NULL,
211};
212
213static ssize_t show_status_str(struct device *dev,
214 struct device_attribute *attr,
215 char *buf)
216{
217 struct ipath_devdata *dd = dev_get_drvdata(dev);
218 int i, any;
219 u64 s;
220 ssize_t ret;
221
222 if (!dd->ipath_statusp) {
223 ret = -EINVAL;
224 goto bail;
225 }
226
227 s = *(dd->ipath_statusp);
228 *buf = '\0';
229 for (any = i = 0; s && ipath_status_str[i]; i++) {
230 if (s & 1) {
231 if (any && strlcat(buf, " ", PAGE_SIZE) >=
232 PAGE_SIZE)
233 /* overflow */
234 break;
235 if (strlcat(buf, ipath_status_str[i],
236 PAGE_SIZE) >= PAGE_SIZE)
237 break;
238 any = 1;
239 }
240 s >>= 1;
241 }
242 if (any)
243 strlcat(buf, "\n", PAGE_SIZE);
244
245 ret = strlen(buf);
246
247bail:
248 return ret;
249}
250
251static ssize_t show_boardversion(struct device *dev,
252 struct device_attribute *attr,
253 char *buf)
254{
255 struct ipath_devdata *dd = dev_get_drvdata(dev);
256 /* The string printed here is already newline-terminated. */
257 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
258}
259
260static ssize_t show_lid(struct device *dev,
261 struct device_attribute *attr,
262 char *buf)
263{
264 struct ipath_devdata *dd = dev_get_drvdata(dev);
265
266 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid);
267}
268
269static ssize_t store_lid(struct device *dev,
270 struct device_attribute *attr,
271 const char *buf,
272 size_t count)
273{
274 struct ipath_devdata *dd = dev_get_drvdata(dev);
275 u16 lid;
276 int ret;
277
278 ret = ipath_parse_ushort(buf, &lid);
279 if (ret < 0)
280 goto invalid;
281
282 if (lid == 0 || lid >= 0xc000) {
283 ret = -EINVAL;
284 goto invalid;
285 }
286
287 ipath_set_sps_lid(dd, lid, 0);
288
289 goto bail;
290invalid:
291 ipath_dev_err(dd, "attempt to set invalid LID\n");
292bail:
293 return ret;
294}
295
296static ssize_t show_mlid(struct device *dev,
297 struct device_attribute *attr,
298 char *buf)
299{
300 struct ipath_devdata *dd = dev_get_drvdata(dev);
301
302 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid);
303}
304
305static ssize_t store_mlid(struct device *dev,
306 struct device_attribute *attr,
307 const char *buf,
308 size_t count)
309{
310 struct ipath_devdata *dd = dev_get_drvdata(dev);
311 int unit;
312 u16 mlid;
313 int ret;
314
315 ret = ipath_parse_ushort(buf, &mlid);
316 if (ret < 0)
317 goto invalid;
318
319 unit = dd->ipath_unit;
320
321 dd->ipath_mlid = mlid;
322 ipath_stats.sps_mlid[unit] = mlid;
323 ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST);
324
325 goto bail;
326invalid:
327 ipath_dev_err(dd, "attempt to set invalid MLID\n");
328bail:
329 return ret;
330}
331
332static ssize_t show_guid(struct device *dev,
333 struct device_attribute *attr,
334 char *buf)
335{
336 struct ipath_devdata *dd = dev_get_drvdata(dev);
337 u8 *guid;
338
339 guid = (u8 *) & (dd->ipath_guid);
340
341 return scnprintf(buf, PAGE_SIZE,
342 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
343 guid[0], guid[1], guid[2], guid[3],
344 guid[4], guid[5], guid[6], guid[7]);
345}
346
347static ssize_t store_guid(struct device *dev,
348 struct device_attribute *attr,
349 const char *buf,
350 size_t count)
351{
352 struct ipath_devdata *dd = dev_get_drvdata(dev);
353 ssize_t ret;
354 unsigned short guid[8];
355 __be64 nguid;
356 u8 *ng;
357 int i;
358
359 if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx",
360 &guid[0], &guid[1], &guid[2], &guid[3],
361 &guid[4], &guid[5], &guid[6], &guid[7]) != 8)
362 goto invalid;
363
364 ng = (u8 *) &nguid;
365
366 for (i = 0; i < 8; i++) {
367 if (guid[i] > 0xff)
368 goto invalid;
369 ng[i] = guid[i];
370 }
371
372 dd->ipath_guid = nguid;
373 dd->ipath_nguid = 1;
374
375 ret = strlen(buf);
376 goto bail;
377
378invalid:
379 ipath_dev_err(dd, "attempt to set invalid GUID\n");
380 ret = -EINVAL;
381
382bail:
383 return ret;
384}
385
386static ssize_t show_nguid(struct device *dev,
387 struct device_attribute *attr,
388 char *buf)
389{
390 struct ipath_devdata *dd = dev_get_drvdata(dev);
391
392 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid);
393}
394
395static ssize_t show_serial(struct device *dev,
396 struct device_attribute *attr,
397 char *buf)
398{
399 struct ipath_devdata *dd = dev_get_drvdata(dev);
400
401 buf[sizeof dd->ipath_serial] = '\0';
402 memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial);
403 strcat(buf, "\n");
404 return strlen(buf);
405}
406
407static ssize_t show_unit(struct device *dev,
408 struct device_attribute *attr,
409 char *buf)
410{
411 struct ipath_devdata *dd = dev_get_drvdata(dev);
412
413 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
414}
415
416#define DEVICE_COUNTER(name, attr) \
417 static ssize_t show_counter_##name(struct device *dev, \
418 struct device_attribute *attr, \
419 char *buf) \
420 { \
421 struct ipath_devdata *dd = dev_get_drvdata(dev); \
422 return scnprintf(\
423 buf, PAGE_SIZE, "%llu\n", (unsigned long long) \
424 ipath_snap_cntr( \
425 dd, offsetof(struct infinipath_counters, \
426 attr) / sizeof(u64))); \
427 } \
428 static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL);
429
430DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt);
431DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt);
432DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt);
433DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt);
434DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt);
435DEVICE_COUNTER(lb_ints, LBIntCnt);
436DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt);
437DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt);
438DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt);
439DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt);
440DEVICE_COUNTER(rx_dwords, RxDwordCnt);
441DEVICE_COUNTER(rx_ebps, RxEBPCnt);
442DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt);
443DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt);
444DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt);
445DEVICE_COUNTER(rx_len_errs, RxLenErrCnt);
446DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt);
447DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt);
448DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt);
449DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt);
450DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt);
451DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt);
452DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt);
453DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt);
454DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt);
455DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt);
456DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt);
457DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt);
458DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt);
459DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt);
460DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt);
461DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt);
462DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt);
463DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt);
464DEVICE_COUNTER(tx_dwords, TxDwordCnt);
465DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt);
466DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt);
467DEVICE_COUNTER(tx_len_errs, TxLenErrCnt);
468DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt);
469DEVICE_COUNTER(tx_underruns, TxUnderrunCnt);
470DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt);
471
472static struct attribute *dev_counter_attributes[] = {
473 &dev_attr_ib_link_downeds.attr,
474 &dev_attr_ib_link_err_recoveries.attr,
475 &dev_attr_ib_status_changes.attr,
476 &dev_attr_ib_symbol_errs.attr,
477 &dev_attr_lb_flow_stalls.attr,
478 &dev_attr_lb_ints.attr,
479 &dev_attr_rx_bad_formats.attr,
480 &dev_attr_rx_buf_ovfls.attr,
481 &dev_attr_rx_data_pkts.attr,
482 &dev_attr_rx_dropped_pkts.attr,
483 &dev_attr_rx_dwords.attr,
484 &dev_attr_rx_ebps.attr,
485 &dev_attr_rx_flow_ctrl_errs.attr,
486 &dev_attr_rx_flow_pkts.attr,
487 &dev_attr_rx_icrc_errs.attr,
488 &dev_attr_rx_len_errs.attr,
489 &dev_attr_rx_link_problems.attr,
490 &dev_attr_rx_lpcrc_errs.attr,
491 &dev_attr_rx_max_min_len_errs.attr,
492 &dev_attr_rx_p0_hdr_egr_ovfls.attr,
493 &dev_attr_rx_p1_hdr_egr_ovfls.attr,
494 &dev_attr_rx_p2_hdr_egr_ovfls.attr,
495 &dev_attr_rx_p3_hdr_egr_ovfls.attr,
496 &dev_attr_rx_p4_hdr_egr_ovfls.attr,
497 &dev_attr_rx_p5_hdr_egr_ovfls.attr,
498 &dev_attr_rx_p6_hdr_egr_ovfls.attr,
499 &dev_attr_rx_p7_hdr_egr_ovfls.attr,
500 &dev_attr_rx_p8_hdr_egr_ovfls.attr,
501 &dev_attr_rx_pkey_mismatches.attr,
502 &dev_attr_rx_tid_full_errs.attr,
503 &dev_attr_rx_tid_valid_errs.attr,
504 &dev_attr_rx_vcrc_errs.attr,
505 &dev_attr_tx_data_pkts.attr,
506 &dev_attr_tx_dropped_pkts.attr,
507 &dev_attr_tx_dwords.attr,
508 &dev_attr_tx_flow_pkts.attr,
509 &dev_attr_tx_flow_stalls.attr,
510 &dev_attr_tx_len_errs.attr,
511 &dev_attr_tx_max_min_len_errs.attr,
512 &dev_attr_tx_underruns.attr,
513 &dev_attr_tx_unsup_vl_errs.attr,
514 NULL
515};
516
517static struct attribute_group dev_counter_attr_group = {
518 .name = "counters",
519 .attrs = dev_counter_attributes
520};
521
522static ssize_t store_reset(struct device *dev,
523 struct device_attribute *attr,
524 const char *buf,
525 size_t count)
526{
527 struct ipath_devdata *dd = dev_get_drvdata(dev);
528 int ret;
529
530 if (count < 5 || memcmp(buf, "reset", 5)) {
531 ret = -EINVAL;
532 goto bail;
533 }
534
535 if (dd->ipath_flags & IPATH_DISABLED) {
536 /*
537 * post-reset init would re-enable interrupts, etc.
538 * so don't allow reset on disabled devices. Not
539 * perfect error, but about the best choice.
540 */
541 dev_info(dev,"Unit %d is disabled, can't reset\n",
542 dd->ipath_unit);
543 ret = -EINVAL;
544 }
545 ret = ipath_reset_device(dd->ipath_unit);
546bail:
547 return ret<0 ? ret : count;
548}
549
550static ssize_t store_link_state(struct device *dev,
551 struct device_attribute *attr,
552 const char *buf,
553 size_t count)
554{
555 struct ipath_devdata *dd = dev_get_drvdata(dev);
556 int ret, r;
557 u16 state;
558
559 ret = ipath_parse_ushort(buf, &state);
560 if (ret < 0)
561 goto invalid;
562
563 r = ipath_layer_set_linkstate(dd, state);
564 if (r < 0) {
565 ret = r;
566 goto bail;
567 }
568
569 goto bail;
570invalid:
571 ipath_dev_err(dd, "attempt to set invalid link state\n");
572bail:
573 return ret;
574}
575
576static ssize_t show_mtu(struct device *dev,
577 struct device_attribute *attr,
578 char *buf)
579{
580 struct ipath_devdata *dd = dev_get_drvdata(dev);
581 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu);
582}
583
584static ssize_t store_mtu(struct device *dev,
585 struct device_attribute *attr,
586 const char *buf,
587 size_t count)
588{
589 struct ipath_devdata *dd = dev_get_drvdata(dev);
590 ssize_t ret;
591 u16 mtu = 0;
592 int r;
593
594 ret = ipath_parse_ushort(buf, &mtu);
595 if (ret < 0)
596 goto invalid;
597
598 r = ipath_layer_set_mtu(dd, mtu);
599 if (r < 0)
600 ret = r;
601
602 goto bail;
603invalid:
604 ipath_dev_err(dd, "attempt to set invalid MTU\n");
605bail:
606 return ret;
607}
608
609static ssize_t show_enabled(struct device *dev,
610 struct device_attribute *attr,
611 char *buf)
612{
613 struct ipath_devdata *dd = dev_get_drvdata(dev);
614 return scnprintf(buf, PAGE_SIZE, "%u\n",
615 (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1);
616}
617
618static ssize_t store_enabled(struct device *dev,
619 struct device_attribute *attr,
620 const char *buf,
621 size_t count)
622{
623 struct ipath_devdata *dd = dev_get_drvdata(dev);
624 ssize_t ret;
625 u16 enable = 0;
626
627 ret = ipath_parse_ushort(buf, &enable);
628 if (ret < 0) {
629 ipath_dev_err(dd, "attempt to use non-numeric on enable\n");
630 goto bail;
631 }
632
633 if (enable) {
634 if (!(dd->ipath_flags & IPATH_DISABLED))
635 goto bail;
636
637 dev_info(dev, "Enabling unit %d\n", dd->ipath_unit);
638 /* same as post-reset */
639 ret = ipath_init_chip(dd, 1);
640 if (ret)
641 ipath_dev_err(dd, "Failed to enable unit %d\n",
642 dd->ipath_unit);
643 else {
644 dd->ipath_flags &= ~IPATH_DISABLED;
645 *dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED;
646 }
647 }
648 else if (!(dd->ipath_flags & IPATH_DISABLED)) {
649 dev_info(dev, "Disabling unit %d\n", dd->ipath_unit);
650 ipath_shutdown_device(dd);
651 dd->ipath_flags |= IPATH_DISABLED;
652 *dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED;
653 }
654
655bail:
656 return ret;
657}
658
659static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
660static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
661
662static struct attribute *driver_attributes[] = {
663 &driver_attr_num_units.attr,
664 &driver_attr_version.attr,
665 NULL
666};
667
668static struct attribute_group driver_attr_group = {
669 .attrs = driver_attributes
670};
671
672static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
673static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
674static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state);
675static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid);
676static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu);
677static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled);
678static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL);
679static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset);
680static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
681static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
682static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
683static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
684static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
685
686static struct attribute *dev_attributes[] = {
687 &dev_attr_guid.attr,
688 &dev_attr_lid.attr,
689 &dev_attr_link_state.attr,
690 &dev_attr_mlid.attr,
691 &dev_attr_mtu.attr,
692 &dev_attr_nguid.attr,
693 &dev_attr_serial.attr,
694 &dev_attr_status.attr,
695 &dev_attr_status_str.attr,
696 &dev_attr_boardversion.attr,
697 &dev_attr_unit.attr,
698 &dev_attr_enabled.attr,
699 NULL
700};
701
702static struct attribute_group dev_attr_group = {
703 .attrs = dev_attributes
704};
705
706/**
707 * ipath_expose_reset - create a device reset file
708 * @dev: the device structure
709 *
710 * Only expose a file that lets us reset the device after someone
711 * enters diag mode. A device reset is quite likely to crash the
712 * machine entirely, so we don't want to normally make it
713 * available.
714 */
715int ipath_expose_reset(struct device *dev)
716{
717 return device_create_file(dev, &dev_attr_reset);
718}
719
720int ipath_driver_create_group(struct device_driver *drv)
721{
722 int ret;
723
724 ret = sysfs_create_group(&drv->kobj, &driver_attr_group);
725 if (ret)
726 goto bail;
727
728 ret = sysfs_create_group(&drv->kobj, &driver_stat_attr_group);
729 if (ret)
730 sysfs_remove_group(&drv->kobj, &driver_attr_group);
731
732bail:
733 return ret;
734}
735
736void ipath_driver_remove_group(struct device_driver *drv)
737{
738 sysfs_remove_group(&drv->kobj, &driver_stat_attr_group);
739 sysfs_remove_group(&drv->kobj, &driver_attr_group);
740}
741
742int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
743{
744 int ret;
745 char unit[5];
746
747 ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
748 if (ret)
749 goto bail;
750
751 ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group);
752 if (ret)
753 goto bail_attrs;
754
755 snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
756 ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj, unit);
757 if (ret == 0)
758 goto bail;
759
760 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
761bail_attrs:
762 sysfs_remove_group(&dev->kobj, &dev_attr_group);
763bail:
764 return ret;
765}
766
767void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
768{
769 char unit[5];
770
771 snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit);
772 sysfs_remove_link(&dev->driver->kobj, unit);
773
774 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
775 sysfs_remove_group(&dev->kobj, &dev_attr_group);
776
777 device_remove_file(dev, &dev_attr_reset);
778}
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
new file mode 100644
index 000000000000..0d6dbc0a541e
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -0,0 +1,645 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "ipath_verbs.h"
34#include "ips_common.h"
35
36/* cut down ridiculously long IB macro names */
37#define OP(x) IB_OPCODE_UC_##x
38
39static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
40 struct ib_wc *wc)
41{
42 if (++qp->s_last == qp->s_size)
43 qp->s_last = 0;
44 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
45 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
46 wc->wr_id = wqe->wr.wr_id;
47 wc->status = IB_WC_SUCCESS;
48 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
49 wc->vendor_err = 0;
50 wc->byte_len = wqe->length;
51 wc->qp_num = qp->ibqp.qp_num;
52 wc->src_qp = qp->remote_qpn;
53 wc->pkey_index = 0;
54 wc->slid = qp->remote_ah_attr.dlid;
55 wc->sl = qp->remote_ah_attr.sl;
56 wc->dlid_path_bits = 0;
57 wc->port_num = 0;
58 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
59 }
60 wqe = get_swqe_ptr(qp, qp->s_last);
61}
62
63/**
64 * ipath_do_uc_send - do a send on a UC queue
65 * @data: contains a pointer to the QP to send on
66 *
67 * Process entries in the send work queue until the queue is exhausted.
68 * Only allow one CPU to send a packet per QP (tasklet).
69 * Otherwise, after we drop the QP lock, two threads could send
70 * packets out of order.
71 * This is similar to ipath_do_rc_send() below except we don't have
72 * timeouts or resends.
73 */
74void ipath_do_uc_send(unsigned long data)
75{
76 struct ipath_qp *qp = (struct ipath_qp *)data;
77 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
78 struct ipath_swqe *wqe;
79 unsigned long flags;
80 u16 lrh0;
81 u32 hwords;
82 u32 nwords;
83 u32 extra_bytes;
84 u32 bth0;
85 u32 bth2;
86 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
87 u32 len;
88 struct ipath_other_headers *ohdr;
89 struct ib_wc wc;
90
91 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
92 goto bail;
93
94 if (unlikely(qp->remote_ah_attr.dlid ==
95 ipath_layer_get_lid(dev->dd))) {
96 /* Pass in an uninitialized ib_wc to save stack space. */
97 ipath_ruc_loopback(qp, &wc);
98 clear_bit(IPATH_S_BUSY, &qp->s_flags);
99 goto bail;
100 }
101
102 ohdr = &qp->s_hdr.u.oth;
103 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
104 ohdr = &qp->s_hdr.u.l.oth;
105
106again:
107 /* Check for a constructed packet to be sent. */
108 if (qp->s_hdrwords != 0) {
109 /*
110 * If no PIO bufs are available, return.
111 * An interrupt will call ipath_ib_piobufavail()
112 * when one is available.
113 */
114 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
115 (u32 *) &qp->s_hdr,
116 qp->s_cur_size,
117 qp->s_cur_sge)) {
118 ipath_no_bufs_available(qp, dev);
119 goto bail;
120 }
121 dev->n_unicast_xmit++;
122 /* Record that we sent the packet and s_hdr is empty. */
123 qp->s_hdrwords = 0;
124 }
125
126 lrh0 = IPS_LRH_BTH;
127 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
128 hwords = 5;
129
130 /*
131 * The lock is needed to synchronize between
132 * setting qp->s_ack_state and post_send().
133 */
134 spin_lock_irqsave(&qp->s_lock, flags);
135
136 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
137 goto done;
138
139 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
140
141 /* Send a request. */
142 wqe = get_swqe_ptr(qp, qp->s_last);
143 switch (qp->s_state) {
144 default:
145 /*
146 * Signal the completion of the last send (if there is
147 * one).
148 */
149 if (qp->s_last != qp->s_tail)
150 complete_last_send(qp, wqe, &wc);
151
152 /* Check if send work queue is empty. */
153 if (qp->s_tail == qp->s_head)
154 goto done;
155 /*
156 * Start a new request.
157 */
158 qp->s_psn = wqe->psn = qp->s_next_psn;
159 qp->s_sge.sge = wqe->sg_list[0];
160 qp->s_sge.sg_list = wqe->sg_list + 1;
161 qp->s_sge.num_sge = wqe->wr.num_sge;
162 qp->s_len = len = wqe->length;
163 switch (wqe->wr.opcode) {
164 case IB_WR_SEND:
165 case IB_WR_SEND_WITH_IMM:
166 if (len > pmtu) {
167 qp->s_state = OP(SEND_FIRST);
168 len = pmtu;
169 break;
170 }
171 if (wqe->wr.opcode == IB_WR_SEND)
172 qp->s_state = OP(SEND_ONLY);
173 else {
174 qp->s_state =
175 OP(SEND_ONLY_WITH_IMMEDIATE);
176 /* Immediate data comes after the BTH */
177 ohdr->u.imm_data = wqe->wr.imm_data;
178 hwords += 1;
179 }
180 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181 bth0 |= 1 << 23;
182 break;
183
184 case IB_WR_RDMA_WRITE:
185 case IB_WR_RDMA_WRITE_WITH_IMM:
186 ohdr->u.rc.reth.vaddr =
187 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
188 ohdr->u.rc.reth.rkey =
189 cpu_to_be32(wqe->wr.wr.rdma.rkey);
190 ohdr->u.rc.reth.length = cpu_to_be32(len);
191 hwords += sizeof(struct ib_reth) / 4;
192 if (len > pmtu) {
193 qp->s_state = OP(RDMA_WRITE_FIRST);
194 len = pmtu;
195 break;
196 }
197 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
198 qp->s_state = OP(RDMA_WRITE_ONLY);
199 else {
200 qp->s_state =
201 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
202 /* Immediate data comes after the RETH */
203 ohdr->u.rc.imm_data = wqe->wr.imm_data;
204 hwords += 1;
205 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
206 bth0 |= 1 << 23;
207 }
208 break;
209
210 default:
211 goto done;
212 }
213 if (++qp->s_tail >= qp->s_size)
214 qp->s_tail = 0;
215 break;
216
217 case OP(SEND_FIRST):
218 qp->s_state = OP(SEND_MIDDLE);
219 /* FALLTHROUGH */
220 case OP(SEND_MIDDLE):
221 len = qp->s_len;
222 if (len > pmtu) {
223 len = pmtu;
224 break;
225 }
226 if (wqe->wr.opcode == IB_WR_SEND)
227 qp->s_state = OP(SEND_LAST);
228 else {
229 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
230 /* Immediate data comes after the BTH */
231 ohdr->u.imm_data = wqe->wr.imm_data;
232 hwords += 1;
233 }
234 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
235 bth0 |= 1 << 23;
236 break;
237
238 case OP(RDMA_WRITE_FIRST):
239 qp->s_state = OP(RDMA_WRITE_MIDDLE);
240 /* FALLTHROUGH */
241 case OP(RDMA_WRITE_MIDDLE):
242 len = qp->s_len;
243 if (len > pmtu) {
244 len = pmtu;
245 break;
246 }
247 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
248 qp->s_state = OP(RDMA_WRITE_LAST);
249 else {
250 qp->s_state =
251 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
252 /* Immediate data comes after the BTH */
253 ohdr->u.imm_data = wqe->wr.imm_data;
254 hwords += 1;
255 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
256 bth0 |= 1 << 23;
257 }
258 break;
259 }
260 bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
261 qp->s_len -= len;
262 bth0 |= qp->s_state << 24;
263
264 spin_unlock_irqrestore(&qp->s_lock, flags);
265
266 /* Construct the header. */
267 extra_bytes = (4 - len) & 3;
268 nwords = (len + extra_bytes) >> 2;
269 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
270 /* Header size in 32-bit words. */
271 hwords += 10;
272 lrh0 = IPS_LRH_GRH;
273 qp->s_hdr.u.l.grh.version_tclass_flow =
274 cpu_to_be32((6 << 28) |
275 (qp->remote_ah_attr.grh.traffic_class
276 << 20) |
277 qp->remote_ah_attr.grh.flow_label);
278 qp->s_hdr.u.l.grh.paylen =
279 cpu_to_be16(((hwords - 12) + nwords +
280 SIZE_OF_CRC) << 2);
281 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
282 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
283 qp->s_hdr.u.l.grh.hop_limit =
284 qp->remote_ah_attr.grh.hop_limit;
285 /* The SGID is 32-bit aligned. */
286 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
287 dev->gid_prefix;
288 qp->s_hdr.u.l.grh.sgid.global.interface_id =
289 ipath_layer_get_guid(dev->dd);
290 qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
291 }
292 qp->s_hdrwords = hwords;
293 qp->s_cur_sge = &qp->s_sge;
294 qp->s_cur_size = len;
295 lrh0 |= qp->remote_ah_attr.sl << 4;
296 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
297 /* DEST LID */
298 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
299 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
300 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
301 bth0 |= extra_bytes << 20;
302 ohdr->bth[0] = cpu_to_be32(bth0);
303 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
304 ohdr->bth[2] = cpu_to_be32(bth2);
305
306 /* Check for more work to do. */
307 goto again;
308
309done:
310 spin_unlock_irqrestore(&qp->s_lock, flags);
311 clear_bit(IPATH_S_BUSY, &qp->s_flags);
312
313bail:
314 return;
315}
316
317/**
318 * ipath_uc_rcv - handle an incoming UC packet
319 * @dev: the device the packet came in on
320 * @hdr: the header of the packet
321 * @has_grh: true if the packet has a GRH
322 * @data: the packet data
323 * @tlen: the length of the packet
324 * @qp: the QP for this packet.
325 *
326 * This is called from ipath_qp_rcv() to process an incoming UC packet
327 * for the given QP.
328 * Called at interrupt level.
329 */
330void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
331 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
332{
333 struct ipath_other_headers *ohdr;
334 int opcode;
335 u32 hdrsize;
336 u32 psn;
337 u32 pad;
338 unsigned long flags;
339 struct ib_wc wc;
340 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
341 struct ib_reth *reth;
342 int header_in_data;
343
344 /* Check for GRH */
345 if (!has_grh) {
346 ohdr = &hdr->u.oth;
347 hdrsize = 8 + 12; /* LRH + BTH */
348 psn = be32_to_cpu(ohdr->bth[2]);
349 header_in_data = 0;
350 } else {
351 ohdr = &hdr->u.l.oth;
352 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
353 /*
354 * The header with GRH is 60 bytes and the
355 * core driver sets the eager header buffer
356 * size to 56 bytes so the last 4 bytes of
357 * the BTH header (PSN) is in the data buffer.
358 */
359 header_in_data =
360 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
361 if (header_in_data) {
362 psn = be32_to_cpu(((__be32 *) data)[0]);
363 data += sizeof(__be32);
364 } else
365 psn = be32_to_cpu(ohdr->bth[2]);
366 }
367 /*
368 * The opcode is in the low byte when its in network order
369 * (top byte when in host order).
370 */
371 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
372
373 wc.imm_data = 0;
374 wc.wc_flags = 0;
375
376 spin_lock_irqsave(&qp->r_rq.lock, flags);
377
378 /* Compare the PSN verses the expected PSN. */
379 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
380 /*
381 * Handle a sequence error.
382 * Silently drop any current message.
383 */
384 qp->r_psn = psn;
385 inv:
386 qp->r_state = OP(SEND_LAST);
387 switch (opcode) {
388 case OP(SEND_FIRST):
389 case OP(SEND_ONLY):
390 case OP(SEND_ONLY_WITH_IMMEDIATE):
391 goto send_first;
392
393 case OP(RDMA_WRITE_FIRST):
394 case OP(RDMA_WRITE_ONLY):
395 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
396 goto rdma_first;
397
398 default:
399 dev->n_pkt_drops++;
400 goto done;
401 }
402 }
403
404 /* Check for opcode sequence errors. */
405 switch (qp->r_state) {
406 case OP(SEND_FIRST):
407 case OP(SEND_MIDDLE):
408 if (opcode == OP(SEND_MIDDLE) ||
409 opcode == OP(SEND_LAST) ||
410 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
411 break;
412 goto inv;
413
414 case OP(RDMA_WRITE_FIRST):
415 case OP(RDMA_WRITE_MIDDLE):
416 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
417 opcode == OP(RDMA_WRITE_LAST) ||
418 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
419 break;
420 goto inv;
421
422 default:
423 if (opcode == OP(SEND_FIRST) ||
424 opcode == OP(SEND_ONLY) ||
425 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
426 opcode == OP(RDMA_WRITE_FIRST) ||
427 opcode == OP(RDMA_WRITE_ONLY) ||
428 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
429 break;
430 goto inv;
431 }
432
433 /* OK, process the packet. */
434 switch (opcode) {
435 case OP(SEND_FIRST):
436 case OP(SEND_ONLY):
437 case OP(SEND_ONLY_WITH_IMMEDIATE):
438 send_first:
439 if (qp->r_reuse_sge) {
440 qp->r_reuse_sge = 0;
441 qp->r_sge = qp->s_rdma_sge;
442 } else if (!ipath_get_rwqe(qp, 0)) {
443 dev->n_pkt_drops++;
444 goto done;
445 }
446 /* Save the WQE so we can reuse it in case of an error. */
447 qp->s_rdma_sge = qp->r_sge;
448 qp->r_rcv_len = 0;
449 if (opcode == OP(SEND_ONLY))
450 goto send_last;
451 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
452 goto send_last_imm;
453 /* FALLTHROUGH */
454 case OP(SEND_MIDDLE):
455 /* Check for invalid length PMTU or posted rwqe len. */
456 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
457 qp->r_reuse_sge = 1;
458 dev->n_pkt_drops++;
459 goto done;
460 }
461 qp->r_rcv_len += pmtu;
462 if (unlikely(qp->r_rcv_len > qp->r_len)) {
463 qp->r_reuse_sge = 1;
464 dev->n_pkt_drops++;
465 goto done;
466 }
467 ipath_copy_sge(&qp->r_sge, data, pmtu);
468 break;
469
470 case OP(SEND_LAST_WITH_IMMEDIATE):
471 send_last_imm:
472 if (header_in_data) {
473 wc.imm_data = *(__be32 *) data;
474 data += sizeof(__be32);
475 } else {
476 /* Immediate data comes after BTH */
477 wc.imm_data = ohdr->u.imm_data;
478 }
479 hdrsize += 4;
480 wc.wc_flags = IB_WC_WITH_IMM;
481 /* FALLTHROUGH */
482 case OP(SEND_LAST):
483 send_last:
484 /* Get the number of bytes the message was padded by. */
485 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
486 /* Check for invalid length. */
487 /* XXX LAST len should be >= 1 */
488 if (unlikely(tlen < (hdrsize + pad + 4))) {
489 qp->r_reuse_sge = 1;
490 dev->n_pkt_drops++;
491 goto done;
492 }
493 /* Don't count the CRC. */
494 tlen -= (hdrsize + pad + 4);
495 wc.byte_len = tlen + qp->r_rcv_len;
496 if (unlikely(wc.byte_len > qp->r_len)) {
497 qp->r_reuse_sge = 1;
498 dev->n_pkt_drops++;
499 goto done;
500 }
501 /* XXX Need to free SGEs */
502 last_imm:
503 ipath_copy_sge(&qp->r_sge, data, tlen);
504 wc.wr_id = qp->r_wr_id;
505 wc.status = IB_WC_SUCCESS;
506 wc.opcode = IB_WC_RECV;
507 wc.vendor_err = 0;
508 wc.qp_num = qp->ibqp.qp_num;
509 wc.src_qp = qp->remote_qpn;
510 wc.pkey_index = 0;
511 wc.slid = qp->remote_ah_attr.dlid;
512 wc.sl = qp->remote_ah_attr.sl;
513 wc.dlid_path_bits = 0;
514 wc.port_num = 0;
515 /* Signal completion event if the solicited bit is set. */
516 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
517 (ohdr->bth[0] &
518 __constant_cpu_to_be32(1 << 23)) != 0);
519 break;
520
521 case OP(RDMA_WRITE_FIRST):
522 case OP(RDMA_WRITE_ONLY):
523 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
524 rdma_first:
525 /* RETH comes after BTH */
526 if (!header_in_data)
527 reth = &ohdr->u.rc.reth;
528 else {
529 reth = (struct ib_reth *)data;
530 data += sizeof(*reth);
531 }
532 hdrsize += sizeof(*reth);
533 qp->r_len = be32_to_cpu(reth->length);
534 qp->r_rcv_len = 0;
535 if (qp->r_len != 0) {
536 u32 rkey = be32_to_cpu(reth->rkey);
537 u64 vaddr = be64_to_cpu(reth->vaddr);
538
539 /* Check rkey */
540 if (unlikely(!ipath_rkey_ok(
541 dev, &qp->r_sge, qp->r_len,
542 vaddr, rkey,
543 IB_ACCESS_REMOTE_WRITE))) {
544 dev->n_pkt_drops++;
545 goto done;
546 }
547 } else {
548 qp->r_sge.sg_list = NULL;
549 qp->r_sge.sge.mr = NULL;
550 qp->r_sge.sge.vaddr = NULL;
551 qp->r_sge.sge.length = 0;
552 qp->r_sge.sge.sge_length = 0;
553 }
554 if (unlikely(!(qp->qp_access_flags &
555 IB_ACCESS_REMOTE_WRITE))) {
556 dev->n_pkt_drops++;
557 goto done;
558 }
559 if (opcode == OP(RDMA_WRITE_ONLY))
560 goto rdma_last;
561 else if (opcode ==
562 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
563 goto rdma_last_imm;
564 /* FALLTHROUGH */
565 case OP(RDMA_WRITE_MIDDLE):
566 /* Check for invalid length PMTU or posted rwqe len. */
567 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
568 dev->n_pkt_drops++;
569 goto done;
570 }
571 qp->r_rcv_len += pmtu;
572 if (unlikely(qp->r_rcv_len > qp->r_len)) {
573 dev->n_pkt_drops++;
574 goto done;
575 }
576 ipath_copy_sge(&qp->r_sge, data, pmtu);
577 break;
578
579 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
580 rdma_last_imm:
581 /* Get the number of bytes the message was padded by. */
582 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
583 /* Check for invalid length. */
584 /* XXX LAST len should be >= 1 */
585 if (unlikely(tlen < (hdrsize + pad + 4))) {
586 dev->n_pkt_drops++;
587 goto done;
588 }
589 /* Don't count the CRC. */
590 tlen -= (hdrsize + pad + 4);
591 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
592 dev->n_pkt_drops++;
593 goto done;
594 }
595 if (qp->r_reuse_sge) {
596 qp->r_reuse_sge = 0;
597 } else if (!ipath_get_rwqe(qp, 1)) {
598 dev->n_pkt_drops++;
599 goto done;
600 }
601 if (header_in_data) {
602 wc.imm_data = *(__be32 *) data;
603 data += sizeof(__be32);
604 } else {
605 /* Immediate data comes after BTH */
606 wc.imm_data = ohdr->u.imm_data;
607 }
608 hdrsize += 4;
609 wc.wc_flags = IB_WC_WITH_IMM;
610 wc.byte_len = 0;
611 goto last_imm;
612
613 case OP(RDMA_WRITE_LAST):
614 rdma_last:
615 /* Get the number of bytes the message was padded by. */
616 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
617 /* Check for invalid length. */
618 /* XXX LAST len should be >= 1 */
619 if (unlikely(tlen < (hdrsize + pad + 4))) {
620 dev->n_pkt_drops++;
621 goto done;
622 }
623 /* Don't count the CRC. */
624 tlen -= (hdrsize + pad + 4);
625 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
626 dev->n_pkt_drops++;
627 goto done;
628 }
629 ipath_copy_sge(&qp->r_sge, data, tlen);
630 break;
631
632 default:
633 /* Drop packet for unknown opcodes. */
634 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
635 dev->n_pkt_drops++;
636 goto bail;
637 }
638 qp->r_psn++;
639 qp->r_state = opcode;
640done:
641 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
642
643bail:
644 return;
645}
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
new file mode 100644
index 000000000000..5ff3de6128b2
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -0,0 +1,621 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_smi.h>
34
35#include "ipath_verbs.h"
36#include "ips_common.h"
37
38/**
39 * ipath_ud_loopback - handle send on loopback QPs
40 * @sqp: the QP
41 * @ss: the SGE state
42 * @length: the length of the data to send
43 * @wr: the work request
44 * @wc: the work completion entry
45 *
46 * This is called from ipath_post_ud_send() to forward a WQE addressed
47 * to the same HCA.
48 */
49void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss,
50 u32 length, struct ib_send_wr *wr, struct ib_wc *wc)
51{
52 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
53 struct ipath_qp *qp;
54 struct ib_ah_attr *ah_attr;
55 unsigned long flags;
56 struct ipath_rq *rq;
57 struct ipath_srq *srq;
58 struct ipath_sge_state rsge;
59 struct ipath_sge *sge;
60 struct ipath_rwqe *wqe;
61
62 qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn);
63 if (!qp)
64 return;
65
66 /*
67 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
68 * Qkeys with the high order bit set mean use the
69 * qkey from the QP context instead of the WR (see 10.2.5).
70 */
71 if (unlikely(qp->ibqp.qp_num &&
72 ((int) wr->wr.ud.remote_qkey < 0
73 ? qp->qkey : wr->wr.ud.remote_qkey) != qp->qkey)) {
74 /* XXX OK to lose a count once in a while. */
75 dev->qkey_violations++;
76 dev->n_pkt_drops++;
77 goto done;
78 }
79
80 /*
81 * A GRH is expected to preceed the data even if not
82 * present on the wire.
83 */
84 wc->byte_len = length + sizeof(struct ib_grh);
85
86 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
87 wc->wc_flags = IB_WC_WITH_IMM;
88 wc->imm_data = wr->imm_data;
89 } else {
90 wc->wc_flags = 0;
91 wc->imm_data = 0;
92 }
93
94 /*
95 * Get the next work request entry to find where to put the data.
96 * Note that it is safe to drop the lock after changing rq->tail
97 * since ipath_post_receive() won't fill the empty slot.
98 */
99 if (qp->ibqp.srq) {
100 srq = to_isrq(qp->ibqp.srq);
101 rq = &srq->rq;
102 } else {
103 srq = NULL;
104 rq = &qp->r_rq;
105 }
106 spin_lock_irqsave(&rq->lock, flags);
107 if (rq->tail == rq->head) {
108 spin_unlock_irqrestore(&rq->lock, flags);
109 dev->n_pkt_drops++;
110 goto done;
111 }
112 /* Silently drop packets which are too big. */
113 wqe = get_rwqe_ptr(rq, rq->tail);
114 if (wc->byte_len > wqe->length) {
115 spin_unlock_irqrestore(&rq->lock, flags);
116 dev->n_pkt_drops++;
117 goto done;
118 }
119 wc->wr_id = wqe->wr_id;
120 rsge.sge = wqe->sg_list[0];
121 rsge.sg_list = wqe->sg_list + 1;
122 rsge.num_sge = wqe->num_sge;
123 if (++rq->tail >= rq->size)
124 rq->tail = 0;
125 if (srq && srq->ibsrq.event_handler) {
126 u32 n;
127
128 if (rq->head < rq->tail)
129 n = rq->size + rq->head - rq->tail;
130 else
131 n = rq->head - rq->tail;
132 if (n < srq->limit) {
133 struct ib_event ev;
134
135 srq->limit = 0;
136 spin_unlock_irqrestore(&rq->lock, flags);
137 ev.device = qp->ibqp.device;
138 ev.element.srq = qp->ibqp.srq;
139 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
140 srq->ibsrq.event_handler(&ev,
141 srq->ibsrq.srq_context);
142 } else
143 spin_unlock_irqrestore(&rq->lock, flags);
144 } else
145 spin_unlock_irqrestore(&rq->lock, flags);
146 ah_attr = &to_iah(wr->wr.ud.ah)->attr;
147 if (ah_attr->ah_flags & IB_AH_GRH) {
148 ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
149 wc->wc_flags |= IB_WC_GRH;
150 } else
151 ipath_skip_sge(&rsge, sizeof(struct ib_grh));
152 sge = &ss->sge;
153 while (length) {
154 u32 len = sge->length;
155
156 if (len > length)
157 len = length;
158 BUG_ON(len == 0);
159 ipath_copy_sge(&rsge, sge->vaddr, len);
160 sge->vaddr += len;
161 sge->length -= len;
162 sge->sge_length -= len;
163 if (sge->sge_length == 0) {
164 if (--ss->num_sge)
165 *sge = *ss->sg_list++;
166 } else if (sge->length == 0 && sge->mr != NULL) {
167 if (++sge->n >= IPATH_SEGSZ) {
168 if (++sge->m >= sge->mr->mapsz)
169 break;
170 sge->n = 0;
171 }
172 sge->vaddr =
173 sge->mr->map[sge->m]->segs[sge->n].vaddr;
174 sge->length =
175 sge->mr->map[sge->m]->segs[sge->n].length;
176 }
177 length -= len;
178 }
179 wc->status = IB_WC_SUCCESS;
180 wc->opcode = IB_WC_RECV;
181 wc->vendor_err = 0;
182 wc->qp_num = qp->ibqp.qp_num;
183 wc->src_qp = sqp->ibqp.qp_num;
184 /* XXX do we know which pkey matched? Only needed for GSI. */
185 wc->pkey_index = 0;
186 wc->slid = ipath_layer_get_lid(dev->dd) |
187 (ah_attr->src_path_bits &
188 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1));
189 wc->sl = ah_attr->sl;
190 wc->dlid_path_bits =
191 ah_attr->dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
192 /* Signal completion event if the solicited bit is set. */
193 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
194 wr->send_flags & IB_SEND_SOLICITED);
195
196done:
197 if (atomic_dec_and_test(&qp->refcount))
198 wake_up(&qp->wait);
199}
200
201/**
202 * ipath_post_ud_send - post a UD send on QP
203 * @qp: the QP
204 * @wr: the work request
205 *
206 * Note that we actually send the data as it is posted instead of putting
207 * the request into a ring buffer. If we wanted to use a ring buffer,
208 * we would need to save a reference to the destination address in the SWQE.
209 */
210int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
211{
212 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
213 struct ipath_other_headers *ohdr;
214 struct ib_ah_attr *ah_attr;
215 struct ipath_sge_state ss;
216 struct ipath_sge *sg_list;
217 struct ib_wc wc;
218 u32 hwords;
219 u32 nwords;
220 u32 len;
221 u32 extra_bytes;
222 u32 bth0;
223 u16 lrh0;
224 u16 lid;
225 int i;
226 int ret;
227
228 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
229 ret = 0;
230 goto bail;
231 }
232
233 /* IB spec says that num_sge == 0 is OK. */
234 if (wr->num_sge > qp->s_max_sge) {
235 ret = -EINVAL;
236 goto bail;
237 }
238
239 if (wr->num_sge > 1) {
240 sg_list = kmalloc((qp->s_max_sge - 1) * sizeof(*sg_list),
241 GFP_ATOMIC);
242 if (!sg_list) {
243 ret = -ENOMEM;
244 goto bail;
245 }
246 } else
247 sg_list = NULL;
248
249 /* Check the buffer to send. */
250 ss.sg_list = sg_list;
251 ss.sge.mr = NULL;
252 ss.sge.vaddr = NULL;
253 ss.sge.length = 0;
254 ss.sge.sge_length = 0;
255 ss.num_sge = 0;
256 len = 0;
257 for (i = 0; i < wr->num_sge; i++) {
258 /* Check LKEY */
259 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
260 ret = -EINVAL;
261 goto bail;
262 }
263
264 if (wr->sg_list[i].length == 0)
265 continue;
266 if (!ipath_lkey_ok(&dev->lk_table, ss.num_sge ?
267 sg_list + ss.num_sge - 1 : &ss.sge,
268 &wr->sg_list[i], 0)) {
269 ret = -EINVAL;
270 goto bail;
271 }
272 len += wr->sg_list[i].length;
273 ss.num_sge++;
274 }
275 extra_bytes = (4 - len) & 3;
276 nwords = (len + extra_bytes) >> 2;
277
278 /* Construct the header. */
279 ah_attr = &to_iah(wr->wr.ud.ah)->attr;
280 if (ah_attr->dlid == 0) {
281 ret = -EINVAL;
282 goto bail;
283 }
284 if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE) {
285 if (ah_attr->dlid != IPS_PERMISSIVE_LID)
286 dev->n_multicast_xmit++;
287 else
288 dev->n_unicast_xmit++;
289 } else {
290 dev->n_unicast_xmit++;
291 lid = ah_attr->dlid &
292 ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
293 if (unlikely(lid == ipath_layer_get_lid(dev->dd))) {
294 /*
295 * Pass in an uninitialized ib_wc to save stack
296 * space.
297 */
298 ipath_ud_loopback(qp, &ss, len, wr, &wc);
299 goto done;
300 }
301 }
302 if (ah_attr->ah_flags & IB_AH_GRH) {
303 /* Header size in 32-bit words. */
304 hwords = 17;
305 lrh0 = IPS_LRH_GRH;
306 ohdr = &qp->s_hdr.u.l.oth;
307 qp->s_hdr.u.l.grh.version_tclass_flow =
308 cpu_to_be32((6 << 28) |
309 (ah_attr->grh.traffic_class << 20) |
310 ah_attr->grh.flow_label);
311 qp->s_hdr.u.l.grh.paylen =
312 cpu_to_be16(((wr->opcode ==
313 IB_WR_SEND_WITH_IMM ? 6 : 5) +
314 nwords + SIZE_OF_CRC) << 2);
315 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
316 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
317 qp->s_hdr.u.l.grh.hop_limit = ah_attr->grh.hop_limit;
318 /* The SGID is 32-bit aligned. */
319 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
320 dev->gid_prefix;
321 qp->s_hdr.u.l.grh.sgid.global.interface_id =
322 ipath_layer_get_guid(dev->dd);
323 qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid;
324 /*
325 * Don't worry about sending to locally attached multicast
326 * QPs. It is unspecified by the spec. what happens.
327 */
328 } else {
329 /* Header size in 32-bit words. */
330 hwords = 7;
331 lrh0 = IPS_LRH_BTH;
332 ohdr = &qp->s_hdr.u.oth;
333 }
334 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
335 ohdr->u.ud.imm_data = wr->imm_data;
336 wc.imm_data = wr->imm_data;
337 hwords += 1;
338 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
339 } else if (wr->opcode == IB_WR_SEND) {
340 wc.imm_data = 0;
341 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
342 } else {
343 ret = -EINVAL;
344 goto bail;
345 }
346 lrh0 |= ah_attr->sl << 4;
347 if (qp->ibqp.qp_type == IB_QPT_SMI)
348 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
349 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
350 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
351 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
352 lid = ipath_layer_get_lid(dev->dd);
353 if (lid) {
354 lid |= ah_attr->src_path_bits &
355 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
356 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
357 } else
358 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
359 if (wr->send_flags & IB_SEND_SOLICITED)
360 bth0 |= 1 << 23;
361 bth0 |= extra_bytes << 20;
362 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPS_DEFAULT_P_KEY :
363 ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
364 ohdr->bth[0] = cpu_to_be32(bth0);
365 /*
366 * Use the multicast QP if the destination LID is a multicast LID.
367 */
368 ohdr->bth[1] = ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
369 ah_attr->dlid != IPS_PERMISSIVE_LID ?
370 __constant_cpu_to_be32(IPS_MULTICAST_QPN) :
371 cpu_to_be32(wr->wr.ud.remote_qpn);
372 /* XXX Could lose a PSN count but not worth locking */
373 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPS_PSN_MASK);
374 /*
375 * Qkeys with the high order bit set mean use the
376 * qkey from the QP context instead of the WR (see 10.2.5).
377 */
378 ohdr->u.ud.deth[0] = cpu_to_be32((int)wr->wr.ud.remote_qkey < 0 ?
379 qp->qkey : wr->wr.ud.remote_qkey);
380 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
381 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &qp->s_hdr,
382 len, &ss))
383 dev->n_no_piobuf++;
384
385done:
386 /* Queue the completion status entry. */
387 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
388 (wr->send_flags & IB_SEND_SIGNALED)) {
389 wc.wr_id = wr->wr_id;
390 wc.status = IB_WC_SUCCESS;
391 wc.vendor_err = 0;
392 wc.opcode = IB_WC_SEND;
393 wc.byte_len = len;
394 wc.qp_num = qp->ibqp.qp_num;
395 wc.src_qp = 0;
396 wc.wc_flags = 0;
397 /* XXX initialize other fields? */
398 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
399 }
400 kfree(sg_list);
401
402 ret = 0;
403
404bail:
405 return ret;
406}
407
408/**
409 * ipath_ud_rcv - receive an incoming UD packet
410 * @dev: the device the packet came in on
411 * @hdr: the packet header
412 * @has_grh: true if the packet has a GRH
413 * @data: the packet data
414 * @tlen: the packet length
415 * @qp: the QP the packet came on
416 *
417 * This is called from ipath_qp_rcv() to process an incoming UD packet
418 * for the given QP.
419 * Called at interrupt level.
420 */
421void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
422 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
423{
424 struct ipath_other_headers *ohdr;
425 int opcode;
426 u32 hdrsize;
427 u32 pad;
428 unsigned long flags;
429 struct ib_wc wc;
430 u32 qkey;
431 u32 src_qp;
432 struct ipath_rq *rq;
433 struct ipath_srq *srq;
434 struct ipath_rwqe *wqe;
435 u16 dlid;
436 int header_in_data;
437
438 /* Check for GRH */
439 if (!has_grh) {
440 ohdr = &hdr->u.oth;
441 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
442 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
443 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
444 header_in_data = 0;
445 } else {
446 ohdr = &hdr->u.l.oth;
447 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
448 /*
449 * The header with GRH is 68 bytes and the core driver sets
450 * the eager header buffer size to 56 bytes so the last 12
451 * bytes of the IB header is in the data buffer.
452 */
453 header_in_data =
454 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
455 if (header_in_data) {
456 qkey = be32_to_cpu(((__be32 *) data)[1]);
457 src_qp = be32_to_cpu(((__be32 *) data)[2]);
458 data += 12;
459 } else {
460 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
461 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
462 }
463 }
464 src_qp &= IPS_QPN_MASK;
465
466 /*
467 * Check that the permissive LID is only used on QP0
468 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
469 */
470 if (qp->ibqp.qp_num) {
471 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
472 hdr->lrh[3] == IB_LID_PERMISSIVE)) {
473 dev->n_pkt_drops++;
474 goto bail;
475 }
476 if (unlikely(qkey != qp->qkey)) {
477 /* XXX OK to lose a count once in a while. */
478 dev->qkey_violations++;
479 dev->n_pkt_drops++;
480 goto bail;
481 }
482 } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
483 hdr->lrh[3] == IB_LID_PERMISSIVE) {
484 struct ib_smp *smp = (struct ib_smp *) data;
485
486 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
487 dev->n_pkt_drops++;
488 goto bail;
489 }
490 }
491
492 /* Get the number of bytes the message was padded by. */
493 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
494 if (unlikely(tlen < (hdrsize + pad + 4))) {
495 /* Drop incomplete packets. */
496 dev->n_pkt_drops++;
497 goto bail;
498 }
499 tlen -= hdrsize + pad + 4;
500
501 /* Drop invalid MAD packets (see 13.5.3.1). */
502 if (unlikely((qp->ibqp.qp_num == 0 &&
503 (tlen != 256 ||
504 (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
505 (qp->ibqp.qp_num == 1 &&
506 (tlen != 256 ||
507 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
508 dev->n_pkt_drops++;
509 goto bail;
510 }
511
512 /*
513 * A GRH is expected to preceed the data even if not
514 * present on the wire.
515 */
516 wc.byte_len = tlen + sizeof(struct ib_grh);
517
518 /*
519 * The opcode is in the low byte when its in network order
520 * (top byte when in host order).
521 */
522 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
523 if (qp->ibqp.qp_num > 1 &&
524 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
525 if (header_in_data) {
526 wc.imm_data = *(__be32 *) data;
527 data += sizeof(__be32);
528 } else
529 wc.imm_data = ohdr->u.ud.imm_data;
530 wc.wc_flags = IB_WC_WITH_IMM;
531 hdrsize += sizeof(u32);
532 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
533 wc.imm_data = 0;
534 wc.wc_flags = 0;
535 } else {
536 dev->n_pkt_drops++;
537 goto bail;
538 }
539
540 /*
541 * Get the next work request entry to find where to put the data.
542 * Note that it is safe to drop the lock after changing rq->tail
543 * since ipath_post_receive() won't fill the empty slot.
544 */
545 if (qp->ibqp.srq) {
546 srq = to_isrq(qp->ibqp.srq);
547 rq = &srq->rq;
548 } else {
549 srq = NULL;
550 rq = &qp->r_rq;
551 }
552 spin_lock_irqsave(&rq->lock, flags);
553 if (rq->tail == rq->head) {
554 spin_unlock_irqrestore(&rq->lock, flags);
555 dev->n_pkt_drops++;
556 goto bail;
557 }
558 /* Silently drop packets which are too big. */
559 wqe = get_rwqe_ptr(rq, rq->tail);
560 if (wc.byte_len > wqe->length) {
561 spin_unlock_irqrestore(&rq->lock, flags);
562 dev->n_pkt_drops++;
563 goto bail;
564 }
565 wc.wr_id = wqe->wr_id;
566 qp->r_sge.sge = wqe->sg_list[0];
567 qp->r_sge.sg_list = wqe->sg_list + 1;
568 qp->r_sge.num_sge = wqe->num_sge;
569 if (++rq->tail >= rq->size)
570 rq->tail = 0;
571 if (srq && srq->ibsrq.event_handler) {
572 u32 n;
573
574 if (rq->head < rq->tail)
575 n = rq->size + rq->head - rq->tail;
576 else
577 n = rq->head - rq->tail;
578 if (n < srq->limit) {
579 struct ib_event ev;
580
581 srq->limit = 0;
582 spin_unlock_irqrestore(&rq->lock, flags);
583 ev.device = qp->ibqp.device;
584 ev.element.srq = qp->ibqp.srq;
585 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
586 srq->ibsrq.event_handler(&ev,
587 srq->ibsrq.srq_context);
588 } else
589 spin_unlock_irqrestore(&rq->lock, flags);
590 } else
591 spin_unlock_irqrestore(&rq->lock, flags);
592 if (has_grh) {
593 ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
594 sizeof(struct ib_grh));
595 wc.wc_flags |= IB_WC_GRH;
596 } else
597 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
598 ipath_copy_sge(&qp->r_sge, data,
599 wc.byte_len - sizeof(struct ib_grh));
600 wc.status = IB_WC_SUCCESS;
601 wc.opcode = IB_WC_RECV;
602 wc.vendor_err = 0;
603 wc.qp_num = qp->ibqp.qp_num;
604 wc.src_qp = src_qp;
605 /* XXX do we know which pkey matched? Only needed for GSI. */
606 wc.pkey_index = 0;
607 wc.slid = be16_to_cpu(hdr->lrh[3]);
608 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
609 dlid = be16_to_cpu(hdr->lrh[1]);
610 /*
611 * Save the LMC lower bits if the destination LID is a unicast LID.
612 */
613 wc.dlid_path_bits = dlid >= IPS_MULTICAST_LID_BASE ? 0 :
614 dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
615 /* Signal completion event if the solicited bit is set. */
616 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
617 (ohdr->bth[0] &
618 __constant_cpu_to_be32(1 << 23)) != 0);
619
620bail:;
621}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
new file mode 100644
index 000000000000..2bb08afc86d0
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mm.h>
34#include <linux/device.h>
35
36#include "ipath_kernel.h"
37
38static void __ipath_release_user_pages(struct page **p, size_t num_pages,
39 int dirty)
40{
41 size_t i;
42
43 for (i = 0; i < num_pages; i++) {
44 ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
45 (unsigned long) num_pages, p[i]);
46 if (dirty)
47 set_page_dirty_lock(p[i]);
48 put_page(p[i]);
49 }
50}
51
52/* call with current->mm->mmap_sem held */
53static int __get_user_pages(unsigned long start_page, size_t num_pages,
54 struct page **p, struct vm_area_struct **vma)
55{
56 unsigned long lock_limit;
57 size_t got;
58 int ret;
59
60#if 0
61 /*
62 * XXX - causes MPI programs to fail, haven't had time to check
63 * yet
64 */
65 if (!capable(CAP_IPC_LOCK)) {
66 ret = -EPERM;
67 goto bail;
68 }
69#endif
70
71 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
72 PAGE_SHIFT;
73
74 if (num_pages > lock_limit) {
75 ret = -ENOMEM;
76 goto bail;
77 }
78
79 ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
80 (unsigned long) num_pages, start_page);
81
82 for (got = 0; got < num_pages; got += ret) {
83 ret = get_user_pages(current, current->mm,
84 start_page + got * PAGE_SIZE,
85 num_pages - got, 1, 1,
86 p + got, vma);
87 if (ret < 0)
88 goto bail_release;
89 }
90
91 current->mm->locked_vm += num_pages;
92
93 ret = 0;
94 goto bail;
95
96bail_release:
97 __ipath_release_user_pages(p, got, 0);
98bail:
99 return ret;
100}
101
102/**
103 * ipath_get_user_pages - lock user pages into memory
104 * @start_page: the start page
105 * @num_pages: the number of pages
106 * @p: the output page structures
107 *
108 * This function takes a given start page (page aligned user virtual
109 * address) and pins it and the following specified number of pages. For
110 * now, num_pages is always 1, but that will probably change at some point
111 * (because caller is doing expected sends on a single virtually contiguous
112 * buffer, so we can do all pages at once).
113 */
114int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
115 struct page **p)
116{
117 int ret;
118
119 down_write(&current->mm->mmap_sem);
120
121 ret = __get_user_pages(start_page, num_pages, p, NULL);
122
123 up_write(&current->mm->mmap_sem);
124
125 return ret;
126}
127
128/**
129 * ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared
130 * @start_page: the page to lock
131 * @p: the output page structure
132 *
133 * This is similar to ipath_get_user_pages, but it's always one page, and we
134 * mark the page as locked for I/O, and shared. This is used for the user
135 * process page that contains the destination address for the rcvhdrq tail
136 * update, so we need to have the vma. If we don't do this, the page can be
137 * taken away from us on fork, even if the child never touches it, and then
138 * the user process never sees the tail register updates.
139 */
140int ipath_get_user_pages_nocopy(unsigned long page, struct page **p)
141{
142 struct vm_area_struct *vma;
143 int ret;
144
145 down_write(&current->mm->mmap_sem);
146
147 ret = __get_user_pages(page, 1, p, &vma);
148
149 up_write(&current->mm->mmap_sem);
150
151 return ret;
152}
153
154void ipath_release_user_pages(struct page **p, size_t num_pages)
155{
156 down_write(&current->mm->mmap_sem);
157
158 __ipath_release_user_pages(p, num_pages, 1);
159
160 current->mm->locked_vm -= num_pages;
161
162 up_write(&current->mm->mmap_sem);
163}
164
165struct ipath_user_pages_work {
166 struct work_struct work;
167 struct mm_struct *mm;
168 unsigned long num_pages;
169};
170
171static void user_pages_account(void *ptr)
172{
173 struct ipath_user_pages_work *work = ptr;
174
175 down_write(&work->mm->mmap_sem);
176 work->mm->locked_vm -= work->num_pages;
177 up_write(&work->mm->mmap_sem);
178 mmput(work->mm);
179 kfree(work);
180}
181
182void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
183{
184 struct ipath_user_pages_work *work;
185 struct mm_struct *mm;
186
187 __ipath_release_user_pages(p, num_pages, 1);
188
189 mm = get_task_mm(current);
190 if (!mm)
191 goto bail;
192
193 work = kmalloc(sizeof(*work), GFP_KERNEL);
194 if (!work)
195 goto bail_mm;
196
197 goto bail;
198
199 INIT_WORK(&work->work, user_pages_account, work);
200 work->mm = mm;
201 work->num_pages = num_pages;
202
203bail_mm:
204 mmput(mm);
205bail:
206 return;
207}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
new file mode 100644
index 000000000000..9f27fd35cdbb
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -0,0 +1,1222 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_user_verbs.h>
35#include <linux/utsname.h>
36
37#include "ipath_kernel.h"
38#include "ipath_verbs.h"
39#include "ips_common.h"
40
41/* Not static, because we don't want the compiler removing it */
42const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
43
44unsigned int ib_ipath_qp_table_size = 251;
45module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
46MODULE_PARM_DESC(qp_table_size, "QP table size");
47
48unsigned int ib_ipath_lkey_table_size = 12;
49module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
50 S_IRUGO);
51MODULE_PARM_DESC(lkey_table_size,
52 "LKEY table size in bits (2^n, 1 <= n <= 23)");
53
54unsigned int ib_ipath_debug; /* debug mask */
55module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
56MODULE_PARM_DESC(debug, "Verbs debug mask");
57
58MODULE_LICENSE("GPL");
59MODULE_AUTHOR("PathScale <support@pathscale.com>");
60MODULE_DESCRIPTION("Pathscale InfiniPath driver");
61
62const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
63 [IB_QPS_RESET] = 0,
64 [IB_QPS_INIT] = IPATH_POST_RECV_OK,
65 [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
66 [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
67 IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
68 [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
69 IPATH_POST_SEND_OK,
70 [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
71 [IB_QPS_ERR] = 0,
72};
73
74/*
75 * Translate ib_wr_opcode into ib_wc_opcode.
76 */
77const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
78 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
79 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
80 [IB_WR_SEND] = IB_WC_SEND,
81 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
82 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
83 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
84 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
85};
86
87/*
88 * System image GUID.
89 */
90__be64 sys_image_guid;
91
92/**
93 * ipath_copy_sge - copy data to SGE memory
94 * @ss: the SGE state
95 * @data: the data to copy
96 * @length: the length of the data
97 */
98void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
99{
100 struct ipath_sge *sge = &ss->sge;
101
102 while (length) {
103 u32 len = sge->length;
104
105 BUG_ON(len == 0);
106 if (len > length)
107 len = length;
108 memcpy(sge->vaddr, data, len);
109 sge->vaddr += len;
110 sge->length -= len;
111 sge->sge_length -= len;
112 if (sge->sge_length == 0) {
113 if (--ss->num_sge)
114 *sge = *ss->sg_list++;
115 } else if (sge->length == 0 && sge->mr != NULL) {
116 if (++sge->n >= IPATH_SEGSZ) {
117 if (++sge->m >= sge->mr->mapsz)
118 break;
119 sge->n = 0;
120 }
121 sge->vaddr =
122 sge->mr->map[sge->m]->segs[sge->n].vaddr;
123 sge->length =
124 sge->mr->map[sge->m]->segs[sge->n].length;
125 }
126 data += len;
127 length -= len;
128 }
129}
130
131/**
132 * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
133 * @ss: the SGE state
134 * @length: the number of bytes to skip
135 */
136void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
137{
138 struct ipath_sge *sge = &ss->sge;
139
140 while (length > sge->sge_length) {
141 length -= sge->sge_length;
142 ss->sge = *ss->sg_list++;
143 }
144 while (length) {
145 u32 len = sge->length;
146
147 BUG_ON(len == 0);
148 if (len > length)
149 len = length;
150 sge->vaddr += len;
151 sge->length -= len;
152 sge->sge_length -= len;
153 if (sge->sge_length == 0) {
154 if (--ss->num_sge)
155 *sge = *ss->sg_list++;
156 } else if (sge->length == 0 && sge->mr != NULL) {
157 if (++sge->n >= IPATH_SEGSZ) {
158 if (++sge->m >= sge->mr->mapsz)
159 break;
160 sge->n = 0;
161 }
162 sge->vaddr =
163 sge->mr->map[sge->m]->segs[sge->n].vaddr;
164 sge->length =
165 sge->mr->map[sge->m]->segs[sge->n].length;
166 }
167 length -= len;
168 }
169}
170
171/**
172 * ipath_post_send - post a send on a QP
173 * @ibqp: the QP to post the send on
174 * @wr: the list of work requests to post
175 * @bad_wr: the first bad WR is put here
176 *
177 * This may be called from interrupt context.
178 */
179static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
180 struct ib_send_wr **bad_wr)
181{
182 struct ipath_qp *qp = to_iqp(ibqp);
183 int err = 0;
184
185 /* Check that state is OK to post send. */
186 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)) {
187 *bad_wr = wr;
188 err = -EINVAL;
189 goto bail;
190 }
191
192 for (; wr; wr = wr->next) {
193 switch (qp->ibqp.qp_type) {
194 case IB_QPT_UC:
195 case IB_QPT_RC:
196 err = ipath_post_rc_send(qp, wr);
197 break;
198
199 case IB_QPT_SMI:
200 case IB_QPT_GSI:
201 case IB_QPT_UD:
202 err = ipath_post_ud_send(qp, wr);
203 break;
204
205 default:
206 err = -EINVAL;
207 }
208 if (err) {
209 *bad_wr = wr;
210 break;
211 }
212 }
213
214bail:
215 return err;
216}
217
218/**
219 * ipath_post_receive - post a receive on a QP
220 * @ibqp: the QP to post the receive on
221 * @wr: the WR to post
222 * @bad_wr: the first bad WR is put here
223 *
224 * This may be called from interrupt context.
225 */
226static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
227 struct ib_recv_wr **bad_wr)
228{
229 struct ipath_qp *qp = to_iqp(ibqp);
230 unsigned long flags;
231 int ret;
232
233 /* Check that state is OK to post receive. */
234 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) {
235 *bad_wr = wr;
236 ret = -EINVAL;
237 goto bail;
238 }
239
240 for (; wr; wr = wr->next) {
241 struct ipath_rwqe *wqe;
242 u32 next;
243 int i, j;
244
245 if (wr->num_sge > qp->r_rq.max_sge) {
246 *bad_wr = wr;
247 ret = -ENOMEM;
248 goto bail;
249 }
250
251 spin_lock_irqsave(&qp->r_rq.lock, flags);
252 next = qp->r_rq.head + 1;
253 if (next >= qp->r_rq.size)
254 next = 0;
255 if (next == qp->r_rq.tail) {
256 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
257 *bad_wr = wr;
258 ret = -ENOMEM;
259 goto bail;
260 }
261
262 wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head);
263 wqe->wr_id = wr->wr_id;
264 wqe->sg_list[0].mr = NULL;
265 wqe->sg_list[0].vaddr = NULL;
266 wqe->sg_list[0].length = 0;
267 wqe->sg_list[0].sge_length = 0;
268 wqe->length = 0;
269 for (i = 0, j = 0; i < wr->num_sge; i++) {
270 /* Check LKEY */
271 if (to_ipd(qp->ibqp.pd)->user &&
272 wr->sg_list[i].lkey == 0) {
273 spin_unlock_irqrestore(&qp->r_rq.lock,
274 flags);
275 *bad_wr = wr;
276 ret = -EINVAL;
277 goto bail;
278 }
279 if (wr->sg_list[i].length == 0)
280 continue;
281 if (!ipath_lkey_ok(
282 &to_idev(qp->ibqp.device)->lk_table,
283 &wqe->sg_list[j], &wr->sg_list[i],
284 IB_ACCESS_LOCAL_WRITE)) {
285 spin_unlock_irqrestore(&qp->r_rq.lock,
286 flags);
287 *bad_wr = wr;
288 ret = -EINVAL;
289 goto bail;
290 }
291 wqe->length += wr->sg_list[i].length;
292 j++;
293 }
294 wqe->num_sge = j;
295 qp->r_rq.head = next;
296 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
297 }
298 ret = 0;
299
300bail:
301 return ret;
302}
303
304/**
305 * ipath_qp_rcv - processing an incoming packet on a QP
306 * @dev: the device the packet came on
307 * @hdr: the packet header
308 * @has_grh: true if the packet has a GRH
309 * @data: the packet data
310 * @tlen: the packet length
311 * @qp: the QP the packet came on
312 *
313 * This is called from ipath_ib_rcv() to process an incoming packet
314 * for the given QP.
315 * Called at interrupt level.
316 */
317static void ipath_qp_rcv(struct ipath_ibdev *dev,
318 struct ipath_ib_header *hdr, int has_grh,
319 void *data, u32 tlen, struct ipath_qp *qp)
320{
321 /* Check for valid receive state. */
322 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
323 dev->n_pkt_drops++;
324 return;
325 }
326
327 switch (qp->ibqp.qp_type) {
328 case IB_QPT_SMI:
329 case IB_QPT_GSI:
330 case IB_QPT_UD:
331 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
332 break;
333
334 case IB_QPT_RC:
335 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
336 break;
337
338 case IB_QPT_UC:
339 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
340 break;
341
342 default:
343 break;
344 }
345}
346
347/**
348 * ipath_ib_rcv - process and incoming packet
349 * @arg: the device pointer
350 * @rhdr: the header of the packet
351 * @data: the packet data
352 * @tlen: the packet length
353 *
354 * This is called from ipath_kreceive() to process an incoming packet at
355 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
356 */
357static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
358{
359 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
360 struct ipath_ib_header *hdr = rhdr;
361 struct ipath_other_headers *ohdr;
362 struct ipath_qp *qp;
363 u32 qp_num;
364 int lnh;
365 u8 opcode;
366 u16 lid;
367
368 if (unlikely(dev == NULL))
369 goto bail;
370
371 if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
372 dev->rcv_errors++;
373 goto bail;
374 }
375
376 /* Check for a valid destination LID (see ch. 7.11.1). */
377 lid = be16_to_cpu(hdr->lrh[1]);
378 if (lid < IPS_MULTICAST_LID_BASE) {
379 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
380 if (unlikely(lid != ipath_layer_get_lid(dev->dd))) {
381 dev->rcv_errors++;
382 goto bail;
383 }
384 }
385
386 /* Check for GRH */
387 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
388 if (lnh == IPS_LRH_BTH)
389 ohdr = &hdr->u.oth;
390 else if (lnh == IPS_LRH_GRH)
391 ohdr = &hdr->u.l.oth;
392 else {
393 dev->rcv_errors++;
394 goto bail;
395 }
396
397 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
398 dev->opstats[opcode].n_bytes += tlen;
399 dev->opstats[opcode].n_packets++;
400
401 /* Get the destination QP number. */
402 qp_num = be32_to_cpu(ohdr->bth[1]) & IPS_QPN_MASK;
403 if (qp_num == IPS_MULTICAST_QPN) {
404 struct ipath_mcast *mcast;
405 struct ipath_mcast_qp *p;
406
407 mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
408 if (mcast == NULL) {
409 dev->n_pkt_drops++;
410 goto bail;
411 }
412 dev->n_multicast_rcv++;
413 list_for_each_entry_rcu(p, &mcast->qp_list, list)
414 ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
415 tlen, p->qp);
416 /*
417 * Notify ipath_multicast_detach() if it is waiting for us
418 * to finish.
419 */
420 if (atomic_dec_return(&mcast->refcount) <= 1)
421 wake_up(&mcast->wait);
422 } else {
423 qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
424 if (qp) {
425 dev->n_unicast_rcv++;
426 ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
427 tlen, qp);
428 /*
429 * Notify ipath_destroy_qp() if it is waiting
430 * for us to finish.
431 */
432 if (atomic_dec_and_test(&qp->refcount))
433 wake_up(&qp->wait);
434 } else
435 dev->n_pkt_drops++;
436 }
437
438bail:;
439}
440
441/**
442 * ipath_ib_timer - verbs timer
443 * @arg: the device pointer
444 *
445 * This is called from ipath_do_rcv_timer() at interrupt level to check for
446 * QPs which need retransmits and to collect performance numbers.
447 */
448static void ipath_ib_timer(void *arg)
449{
450 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
451 struct ipath_qp *resend = NULL;
452 struct ipath_qp *rnr = NULL;
453 struct list_head *last;
454 struct ipath_qp *qp;
455 unsigned long flags;
456
457 if (dev == NULL)
458 return;
459
460 spin_lock_irqsave(&dev->pending_lock, flags);
461 /* Start filling the next pending queue. */
462 if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
463 dev->pending_index = 0;
464 /* Save any requests still in the new queue, they have timed out. */
465 last = &dev->pending[dev->pending_index];
466 while (!list_empty(last)) {
467 qp = list_entry(last->next, struct ipath_qp, timerwait);
468 if (last->next == LIST_POISON1 ||
469 last->next != &qp->timerwait ||
470 qp->timerwait.prev != last) {
471 INIT_LIST_HEAD(last);
472 } else {
473 list_del(&qp->timerwait);
474 qp->timerwait.prev = (struct list_head *) resend;
475 resend = qp;
476 atomic_inc(&qp->refcount);
477 }
478 }
479 last = &dev->rnrwait;
480 if (!list_empty(last)) {
481 qp = list_entry(last->next, struct ipath_qp, timerwait);
482 if (--qp->s_rnr_timeout == 0) {
483 do {
484 if (last->next == LIST_POISON1 ||
485 last->next != &qp->timerwait ||
486 qp->timerwait.prev != last) {
487 INIT_LIST_HEAD(last);
488 break;
489 }
490 list_del(&qp->timerwait);
491 qp->timerwait.prev =
492 (struct list_head *) rnr;
493 rnr = qp;
494 if (list_empty(last))
495 break;
496 qp = list_entry(last->next, struct ipath_qp,
497 timerwait);
498 } while (qp->s_rnr_timeout == 0);
499 }
500 }
501 /*
502 * We should only be in the started state if pma_sample_start != 0
503 */
504 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
505 --dev->pma_sample_start == 0) {
506 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
507 ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword,
508 &dev->ipath_rword,
509 &dev->ipath_spkts,
510 &dev->ipath_rpkts,
511 &dev->ipath_xmit_wait);
512 }
513 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
514 if (dev->pma_sample_interval == 0) {
515 u64 ta, tb, tc, td, te;
516
517 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
518 ipath_layer_snapshot_counters(dev->dd, &ta, &tb,
519 &tc, &td, &te);
520
521 dev->ipath_sword = ta - dev->ipath_sword;
522 dev->ipath_rword = tb - dev->ipath_rword;
523 dev->ipath_spkts = tc - dev->ipath_spkts;
524 dev->ipath_rpkts = td - dev->ipath_rpkts;
525 dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
526 }
527 else
528 dev->pma_sample_interval--;
529 }
530 spin_unlock_irqrestore(&dev->pending_lock, flags);
531
532 /* XXX What if timer fires again while this is running? */
533 for (qp = resend; qp != NULL;
534 qp = (struct ipath_qp *) qp->timerwait.prev) {
535 struct ib_wc wc;
536
537 spin_lock_irqsave(&qp->s_lock, flags);
538 if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
539 dev->n_timeouts++;
540 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
541 }
542 spin_unlock_irqrestore(&qp->s_lock, flags);
543
544 /* Notify ipath_destroy_qp() if it is waiting. */
545 if (atomic_dec_and_test(&qp->refcount))
546 wake_up(&qp->wait);
547 }
548 for (qp = rnr; qp != NULL;
549 qp = (struct ipath_qp *) qp->timerwait.prev)
550 tasklet_hi_schedule(&qp->s_task);
551}
552
553/**
554 * ipath_ib_piobufavail - callback when a PIO buffer is available
555 * @arg: the device pointer
556 *
557 * This is called from ipath_intr() at interrupt level when a PIO buffer is
558 * available after ipath_verbs_send() returned an error that no buffers were
559 * available. Return 0 if we consumed all the PIO buffers and we still have
560 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
561 * return one).
562 */
563static int ipath_ib_piobufavail(void *arg)
564{
565 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
566 struct ipath_qp *qp;
567 unsigned long flags;
568
569 if (dev == NULL)
570 goto bail;
571
572 spin_lock_irqsave(&dev->pending_lock, flags);
573 while (!list_empty(&dev->piowait)) {
574 qp = list_entry(dev->piowait.next, struct ipath_qp,
575 piowait);
576 list_del(&qp->piowait);
577 tasklet_hi_schedule(&qp->s_task);
578 }
579 spin_unlock_irqrestore(&dev->pending_lock, flags);
580
581bail:
582 return 1;
583}
584
585static int ipath_query_device(struct ib_device *ibdev,
586 struct ib_device_attr *props)
587{
588 struct ipath_ibdev *dev = to_idev(ibdev);
589 u32 vendor, boardrev, majrev, minrev;
590
591 memset(props, 0, sizeof(*props));
592
593 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
594 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
595 IB_DEVICE_SYS_IMAGE_GUID;
596 ipath_layer_query_device(dev->dd, &vendor, &boardrev,
597 &majrev, &minrev);
598 props->vendor_id = vendor;
599 props->vendor_part_id = boardrev;
600 props->hw_ver = boardrev << 16 | majrev << 8 | minrev;
601
602 props->sys_image_guid = dev->sys_image_guid;
603
604 props->max_mr_size = ~0ull;
605 props->max_qp = 0xffff;
606 props->max_qp_wr = 0xffff;
607 props->max_sge = 255;
608 props->max_cq = 0xffff;
609 props->max_cqe = 0xffff;
610 props->max_mr = 0xffff;
611 props->max_pd = 0xffff;
612 props->max_qp_rd_atom = 1;
613 props->max_qp_init_rd_atom = 1;
614 /* props->max_res_rd_atom */
615 props->max_srq = 0xffff;
616 props->max_srq_wr = 0xffff;
617 props->max_srq_sge = 255;
618 /* props->local_ca_ack_delay */
619 props->atomic_cap = IB_ATOMIC_HCA;
620 props->max_pkeys = ipath_layer_get_npkeys(dev->dd);
621 props->max_mcast_grp = 0xffff;
622 props->max_mcast_qp_attach = 0xffff;
623 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
624 props->max_mcast_grp;
625
626 return 0;
627}
628
629const u8 ipath_cvt_physportstate[16] = {
630 [INFINIPATH_IBCS_LT_STATE_DISABLED] = 3,
631 [INFINIPATH_IBCS_LT_STATE_LINKUP] = 5,
632 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2,
633 [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2,
634 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1,
635 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1,
636 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4,
637 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4,
638 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4,
639 [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4,
640 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6,
641 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6,
642 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
643};
644
645static int ipath_query_port(struct ib_device *ibdev,
646 u8 port, struct ib_port_attr *props)
647{
648 struct ipath_ibdev *dev = to_idev(ibdev);
649 enum ib_mtu mtu;
650 u16 lid = ipath_layer_get_lid(dev->dd);
651 u64 ibcstat;
652
653 memset(props, 0, sizeof(*props));
654 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
655 props->lmc = dev->mkeyprot_resv_lmc & 7;
656 props->sm_lid = dev->sm_lid;
657 props->sm_sl = dev->sm_sl;
658 ibcstat = ipath_layer_get_lastibcstat(dev->dd);
659 props->state = ((ibcstat >> 4) & 0x3) + 1;
660 /* See phys_state_show() */
661 props->phys_state = ipath_cvt_physportstate[
662 ipath_layer_get_lastibcstat(dev->dd) & 0xf];
663 props->port_cap_flags = dev->port_cap_flags;
664 props->gid_tbl_len = 1;
665 props->max_msg_sz = 4096;
666 props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd);
667 props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) -
668 dev->n_pkey_violations;
669 props->qkey_viol_cntr = dev->qkey_violations;
670 props->active_width = IB_WIDTH_4X;
671 /* See rate_show() */
672 props->active_speed = 1; /* Regular 10Mbs speed. */
673 props->max_vl_num = 1; /* VLCap = VL0 */
674 props->init_type_reply = 0;
675
676 props->max_mtu = IB_MTU_4096;
677 switch (ipath_layer_get_ibmtu(dev->dd)) {
678 case 4096:
679 mtu = IB_MTU_4096;
680 break;
681 case 2048:
682 mtu = IB_MTU_2048;
683 break;
684 case 1024:
685 mtu = IB_MTU_1024;
686 break;
687 case 512:
688 mtu = IB_MTU_512;
689 break;
690 case 256:
691 mtu = IB_MTU_256;
692 break;
693 default:
694 mtu = IB_MTU_2048;
695 }
696 props->active_mtu = mtu;
697 props->subnet_timeout = dev->subnet_timeout;
698
699 return 0;
700}
701
702static int ipath_modify_device(struct ib_device *device,
703 int device_modify_mask,
704 struct ib_device_modify *device_modify)
705{
706 int ret;
707
708 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
709 IB_DEVICE_MODIFY_NODE_DESC)) {
710 ret = -EOPNOTSUPP;
711 goto bail;
712 }
713
714 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
715 memcpy(device->node_desc, device_modify->node_desc, 64);
716
717 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
718 to_idev(device)->sys_image_guid =
719 cpu_to_be64(device_modify->sys_image_guid);
720
721 ret = 0;
722
723bail:
724 return ret;
725}
726
727static int ipath_modify_port(struct ib_device *ibdev,
728 u8 port, int port_modify_mask,
729 struct ib_port_modify *props)
730{
731 struct ipath_ibdev *dev = to_idev(ibdev);
732
733 dev->port_cap_flags |= props->set_port_cap_mask;
734 dev->port_cap_flags &= ~props->clr_port_cap_mask;
735 if (port_modify_mask & IB_PORT_SHUTDOWN)
736 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
737 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
738 dev->qkey_violations = 0;
739 return 0;
740}
741
742static int ipath_query_gid(struct ib_device *ibdev, u8 port,
743 int index, union ib_gid *gid)
744{
745 struct ipath_ibdev *dev = to_idev(ibdev);
746 int ret;
747
748 if (index >= 1) {
749 ret = -EINVAL;
750 goto bail;
751 }
752 gid->global.subnet_prefix = dev->gid_prefix;
753 gid->global.interface_id = ipath_layer_get_guid(dev->dd);
754
755 ret = 0;
756
757bail:
758 return ret;
759}
760
761static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
762 struct ib_ucontext *context,
763 struct ib_udata *udata)
764{
765 struct ipath_pd *pd;
766 struct ib_pd *ret;
767
768 pd = kmalloc(sizeof *pd, GFP_KERNEL);
769 if (!pd) {
770 ret = ERR_PTR(-ENOMEM);
771 goto bail;
772 }
773
774 /* ib_alloc_pd() will initialize pd->ibpd. */
775 pd->user = udata != NULL;
776
777 ret = &pd->ibpd;
778
779bail:
780 return ret;
781}
782
783static int ipath_dealloc_pd(struct ib_pd *ibpd)
784{
785 struct ipath_pd *pd = to_ipd(ibpd);
786
787 kfree(pd);
788
789 return 0;
790}
791
792/**
793 * ipath_create_ah - create an address handle
794 * @pd: the protection domain
795 * @ah_attr: the attributes of the AH
796 *
797 * This may be called from interrupt context.
798 */
799static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
800 struct ib_ah_attr *ah_attr)
801{
802 struct ipath_ah *ah;
803 struct ib_ah *ret;
804
805 /* A multicast address requires a GRH (see ch. 8.4.1). */
806 if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
807 ah_attr->dlid != IPS_PERMISSIVE_LID &&
808 !(ah_attr->ah_flags & IB_AH_GRH)) {
809 ret = ERR_PTR(-EINVAL);
810 goto bail;
811 }
812
813 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
814 if (!ah) {
815 ret = ERR_PTR(-ENOMEM);
816 goto bail;
817 }
818
819 /* ib_create_ah() will initialize ah->ibah. */
820 ah->attr = *ah_attr;
821
822 ret = &ah->ibah;
823
824bail:
825 return ret;
826}
827
828/**
829 * ipath_destroy_ah - destroy an address handle
830 * @ibah: the AH to destroy
831 *
832 * This may be called from interrupt context.
833 */
834static int ipath_destroy_ah(struct ib_ah *ibah)
835{
836 struct ipath_ah *ah = to_iah(ibah);
837
838 kfree(ah);
839
840 return 0;
841}
842
843static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
844{
845 struct ipath_ah *ah = to_iah(ibah);
846
847 *ah_attr = ah->attr;
848
849 return 0;
850}
851
852static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
853 u16 *pkey)
854{
855 struct ipath_ibdev *dev = to_idev(ibdev);
856 int ret;
857
858 if (index >= ipath_layer_get_npkeys(dev->dd)) {
859 ret = -EINVAL;
860 goto bail;
861 }
862
863 *pkey = ipath_layer_get_pkey(dev->dd, index);
864 ret = 0;
865
866bail:
867 return ret;
868}
869
870
871/**
872 * ipath_alloc_ucontext - allocate a ucontest
873 * @ibdev: the infiniband device
874 * @udata: not used by the InfiniPath driver
875 */
876
877static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
878 struct ib_udata *udata)
879{
880 struct ipath_ucontext *context;
881 struct ib_ucontext *ret;
882
883 context = kmalloc(sizeof *context, GFP_KERNEL);
884 if (!context) {
885 ret = ERR_PTR(-ENOMEM);
886 goto bail;
887 }
888
889 ret = &context->ibucontext;
890
891bail:
892 return ret;
893}
894
895static int ipath_dealloc_ucontext(struct ib_ucontext *context)
896{
897 kfree(to_iucontext(context));
898 return 0;
899}
900
901static int ipath_verbs_register_sysfs(struct ib_device *dev);
902
903/**
904 * ipath_register_ib_device - register our device with the infiniband core
905 * @unit: the device number to register
906 * @dd: the device data structure
907 * Return the allocated ipath_ibdev pointer or NULL on error.
908 */
909static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
910{
911 struct ipath_ibdev *idev;
912 struct ib_device *dev;
913 int ret;
914
915 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
916 if (idev == NULL)
917 goto bail;
918
919 dev = &idev->ibdev;
920
921 /* Only need to initialize non-zero fields. */
922 spin_lock_init(&idev->qp_table.lock);
923 spin_lock_init(&idev->lk_table.lock);
924 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
925 /* Set the prefix to the default value (see ch. 4.1.1) */
926 idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
927
928 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
929 if (ret)
930 goto err_qp;
931
932 /*
933 * The top ib_ipath_lkey_table_size bits are used to index the
934 * table. The lower 8 bits can be owned by the user (copied from
935 * the LKEY). The remaining bits act as a generation number or tag.
936 */
937 idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
938 idev->lk_table.table = kzalloc(idev->lk_table.max *
939 sizeof(*idev->lk_table.table),
940 GFP_KERNEL);
941 if (idev->lk_table.table == NULL) {
942 ret = -ENOMEM;
943 goto err_lk;
944 }
945 spin_lock_init(&idev->pending_lock);
946 INIT_LIST_HEAD(&idev->pending[0]);
947 INIT_LIST_HEAD(&idev->pending[1]);
948 INIT_LIST_HEAD(&idev->pending[2]);
949 INIT_LIST_HEAD(&idev->piowait);
950 INIT_LIST_HEAD(&idev->rnrwait);
951 idev->pending_index = 0;
952 idev->port_cap_flags =
953 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
954 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
955 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
956 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
957 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
958 idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT;
959 idev->link_width_enabled = 3; /* 1x or 4x */
960
961 /*
962 * The system image GUID is supposed to be the same for all
963 * IB HCAs in a single system but since there can be other
964 * device types in the system, we can't be sure this is unique.
965 */
966 if (!sys_image_guid)
967 sys_image_guid = ipath_layer_get_guid(dd);
968 idev->sys_image_guid = sys_image_guid;
969 idev->ib_unit = unit;
970 idev->dd = dd;
971
972 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
973 dev->node_guid = ipath_layer_get_guid(dd);
974 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
975 dev->uverbs_cmd_mask =
976 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
977 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
978 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
979 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
980 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
981 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
982 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
983 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
984 (1ull << IB_USER_VERBS_CMD_REG_MR) |
985 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
986 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
987 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
988 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
989 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
990 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
991 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
992 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
993 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
994 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
995 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
996 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
997 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
998 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
999 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1000 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1001 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1002 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1003 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1004 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1005 dev->node_type = IB_NODE_CA;
1006 dev->phys_port_cnt = 1;
1007 dev->dma_device = ipath_layer_get_device(dd);
1008 dev->class_dev.dev = dev->dma_device;
1009 dev->query_device = ipath_query_device;
1010 dev->modify_device = ipath_modify_device;
1011 dev->query_port = ipath_query_port;
1012 dev->modify_port = ipath_modify_port;
1013 dev->query_pkey = ipath_query_pkey;
1014 dev->query_gid = ipath_query_gid;
1015 dev->alloc_ucontext = ipath_alloc_ucontext;
1016 dev->dealloc_ucontext = ipath_dealloc_ucontext;
1017 dev->alloc_pd = ipath_alloc_pd;
1018 dev->dealloc_pd = ipath_dealloc_pd;
1019 dev->create_ah = ipath_create_ah;
1020 dev->destroy_ah = ipath_destroy_ah;
1021 dev->query_ah = ipath_query_ah;
1022 dev->create_srq = ipath_create_srq;
1023 dev->modify_srq = ipath_modify_srq;
1024 dev->query_srq = ipath_query_srq;
1025 dev->destroy_srq = ipath_destroy_srq;
1026 dev->create_qp = ipath_create_qp;
1027 dev->modify_qp = ipath_modify_qp;
1028 dev->query_qp = ipath_query_qp;
1029 dev->destroy_qp = ipath_destroy_qp;
1030 dev->post_send = ipath_post_send;
1031 dev->post_recv = ipath_post_receive;
1032 dev->post_srq_recv = ipath_post_srq_receive;
1033 dev->create_cq = ipath_create_cq;
1034 dev->destroy_cq = ipath_destroy_cq;
1035 dev->resize_cq = ipath_resize_cq;
1036 dev->poll_cq = ipath_poll_cq;
1037 dev->req_notify_cq = ipath_req_notify_cq;
1038 dev->get_dma_mr = ipath_get_dma_mr;
1039 dev->reg_phys_mr = ipath_reg_phys_mr;
1040 dev->reg_user_mr = ipath_reg_user_mr;
1041 dev->dereg_mr = ipath_dereg_mr;
1042 dev->alloc_fmr = ipath_alloc_fmr;
1043 dev->map_phys_fmr = ipath_map_phys_fmr;
1044 dev->unmap_fmr = ipath_unmap_fmr;
1045 dev->dealloc_fmr = ipath_dealloc_fmr;
1046 dev->attach_mcast = ipath_multicast_attach;
1047 dev->detach_mcast = ipath_multicast_detach;
1048 dev->process_mad = ipath_process_mad;
1049
1050 snprintf(dev->node_desc, sizeof(dev->node_desc),
1051 IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename);
1052
1053 ret = ib_register_device(dev);
1054 if (ret)
1055 goto err_reg;
1056
1057 if (ipath_verbs_register_sysfs(dev))
1058 goto err_class;
1059
1060 ipath_layer_enable_timer(dd);
1061
1062 goto bail;
1063
1064err_class:
1065 ib_unregister_device(dev);
1066err_reg:
1067 kfree(idev->lk_table.table);
1068err_lk:
1069 kfree(idev->qp_table.table);
1070err_qp:
1071 ib_dealloc_device(dev);
1072 _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n",
1073 unit, -ret);
1074 idev = NULL;
1075
1076bail:
1077 return idev;
1078}
1079
1080static void ipath_unregister_ib_device(void *arg)
1081{
1082 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
1083 struct ib_device *ibdev = &dev->ibdev;
1084
1085 ipath_layer_disable_timer(dev->dd);
1086
1087 ib_unregister_device(ibdev);
1088
1089 if (!list_empty(&dev->pending[0]) ||
1090 !list_empty(&dev->pending[1]) ||
1091 !list_empty(&dev->pending[2]))
1092 _VERBS_ERROR("ipath%d pending list not empty!\n",
1093 dev->ib_unit);
1094 if (!list_empty(&dev->piowait))
1095 _VERBS_ERROR("ipath%d piowait list not empty!\n",
1096 dev->ib_unit);
1097 if (!list_empty(&dev->rnrwait))
1098 _VERBS_ERROR("ipath%d rnrwait list not empty!\n",
1099 dev->ib_unit);
1100 if (!ipath_mcast_tree_empty())
1101 _VERBS_ERROR("ipath%d multicast table memory leak!\n",
1102 dev->ib_unit);
1103 /*
1104 * Note that ipath_unregister_ib_device() can be called before all
1105 * the QPs are destroyed!
1106 */
1107 ipath_free_all_qps(&dev->qp_table);
1108 kfree(dev->qp_table.table);
1109 kfree(dev->lk_table.table);
1110 ib_dealloc_device(ibdev);
1111}
1112
1113int __init ipath_verbs_init(void)
1114{
1115 return ipath_verbs_register(ipath_register_ib_device,
1116 ipath_unregister_ib_device,
1117 ipath_ib_piobufavail, ipath_ib_rcv,
1118 ipath_ib_timer);
1119}
1120
1121void __exit ipath_verbs_cleanup(void)
1122{
1123 ipath_verbs_unregister();
1124}
1125
1126static ssize_t show_rev(struct class_device *cdev, char *buf)
1127{
1128 struct ipath_ibdev *dev =
1129 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1130 int vendor, boardrev, majrev, minrev;
1131
1132 ipath_layer_query_device(dev->dd, &vendor, &boardrev,
1133 &majrev, &minrev);
1134 return sprintf(buf, "%d.%d\n", majrev, minrev);
1135}
1136
1137static ssize_t show_hca(struct class_device *cdev, char *buf)
1138{
1139 struct ipath_ibdev *dev =
1140 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1141 int ret;
1142
1143 ret = ipath_layer_get_boardname(dev->dd, buf, 128);
1144 if (ret < 0)
1145 goto bail;
1146 strcat(buf, "\n");
1147 ret = strlen(buf);
1148
1149bail:
1150 return ret;
1151}
1152
1153static ssize_t show_stats(struct class_device *cdev, char *buf)
1154{
1155 struct ipath_ibdev *dev =
1156 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1157 int i;
1158 int len;
1159
1160 len = sprintf(buf,
1161 "RC resends %d\n"
1162 "RC QACKs %d\n"
1163 "RC ACKs %d\n"
1164 "RC SEQ NAKs %d\n"
1165 "RC RDMA seq %d\n"
1166 "RC RNR NAKs %d\n"
1167 "RC OTH NAKs %d\n"
1168 "RC timeouts %d\n"
1169 "RC RDMA dup %d\n"
1170 "piobuf wait %d\n"
1171 "no piobuf %d\n"
1172 "PKT drops %d\n"
1173 "WQE errs %d\n",
1174 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1175 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1176 dev->n_other_naks, dev->n_timeouts,
1177 dev->n_rdma_dup_busy, dev->n_piowait,
1178 dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
1179 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1180 const struct ipath_opcode_stats *si = &dev->opstats[i];
1181
1182 if (!si->n_packets && !si->n_bytes)
1183 continue;
1184 len += sprintf(buf + len, "%02x %llu/%llu\n", i,
1185 (unsigned long long) si->n_packets,
1186 (unsigned long long) si->n_bytes);
1187 }
1188 return len;
1189}
1190
1191static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1192static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1193static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
1194static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
1195
1196static struct class_device_attribute *ipath_class_attributes[] = {
1197 &class_device_attr_hw_rev,
1198 &class_device_attr_hca_type,
1199 &class_device_attr_board_id,
1200 &class_device_attr_stats
1201};
1202
1203static int ipath_verbs_register_sysfs(struct ib_device *dev)
1204{
1205 int i;
1206 int ret;
1207
1208 for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
1209 if (class_device_create_file(&dev->class_dev,
1210 ipath_class_attributes[i])) {
1211 ret = 1;
1212 goto bail;
1213 }
1214
1215 ret = 0;
1216
1217bail:
1218 return ret;
1219}
1220
1221module_init(ipath_verbs_init);
1222module_exit(ipath_verbs_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
new file mode 100644
index 000000000000..b824632b2a8c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -0,0 +1,697 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IPATH_VERBS_H
34#define IPATH_VERBS_H
35
36#include <linux/types.h>
37#include <linux/spinlock.h>
38#include <linux/kernel.h>
39#include <linux/interrupt.h>
40#include <rdma/ib_pack.h>
41
42#include "ipath_layer.h"
43#include "verbs_debug.h"
44
45#define QPN_MAX (1 << 24)
46#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
47
48/*
49 * Increment this value if any changes that break userspace ABI
50 * compatibility are made.
51 */
52#define IPATH_UVERBS_ABI_VERSION 1
53
54/*
55 * Define an ib_cq_notify value that is not valid so we know when CQ
56 * notifications are armed.
57 */
58#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
59
60#define IB_RNR_NAK 0x20
61#define IB_NAK_PSN_ERROR 0x60
62#define IB_NAK_INVALID_REQUEST 0x61
63#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
64#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
65#define IB_NAK_INVALID_RD_REQUEST 0x64
66
67#define IPATH_POST_SEND_OK 0x01
68#define IPATH_POST_RECV_OK 0x02
69#define IPATH_PROCESS_RECV_OK 0x04
70#define IPATH_PROCESS_SEND_OK 0x08
71
72/* IB Performance Manager status values */
73#define IB_PMA_SAMPLE_STATUS_DONE 0x00
74#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
75#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
76
77/* Mandatory IB performance counter select values. */
78#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001)
79#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002)
80#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003)
81#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004)
82#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005)
83
84struct ib_reth {
85 __be64 vaddr;
86 __be32 rkey;
87 __be32 length;
88} __attribute__ ((packed));
89
90struct ib_atomic_eth {
91 __be64 vaddr;
92 __be32 rkey;
93 __be64 swap_data;
94 __be64 compare_data;
95} __attribute__ ((packed));
96
97struct ipath_other_headers {
98 __be32 bth[3];
99 union {
100 struct {
101 __be32 deth[2];
102 __be32 imm_data;
103 } ud;
104 struct {
105 struct ib_reth reth;
106 __be32 imm_data;
107 } rc;
108 struct {
109 __be32 aeth;
110 __be64 atomic_ack_eth;
111 } at;
112 __be32 imm_data;
113 __be32 aeth;
114 struct ib_atomic_eth atomic_eth;
115 } u;
116} __attribute__ ((packed));
117
118/*
119 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
120 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
121 * will be in the eager header buffer. The remaining 12 or 16 bytes
122 * are in the data buffer.
123 */
124struct ipath_ib_header {
125 __be16 lrh[4];
126 union {
127 struct {
128 struct ib_grh grh;
129 struct ipath_other_headers oth;
130 } l;
131 struct ipath_other_headers oth;
132 } u;
133} __attribute__ ((packed));
134
135/*
136 * There is one struct ipath_mcast for each multicast GID.
137 * All attached QPs are then stored as a list of
138 * struct ipath_mcast_qp.
139 */
140struct ipath_mcast_qp {
141 struct list_head list;
142 struct ipath_qp *qp;
143};
144
145struct ipath_mcast {
146 struct rb_node rb_node;
147 union ib_gid mgid;
148 struct list_head qp_list;
149 wait_queue_head_t wait;
150 atomic_t refcount;
151};
152
153/* Memory region */
154struct ipath_mr {
155 struct ib_mr ibmr;
156 struct ipath_mregion mr; /* must be last */
157};
158
159/* Fast memory region */
160struct ipath_fmr {
161 struct ib_fmr ibfmr;
162 u8 page_shift;
163 struct ipath_mregion mr; /* must be last */
164};
165
166/* Protection domain */
167struct ipath_pd {
168 struct ib_pd ibpd;
169 int user; /* non-zero if created from user space */
170};
171
172/* Address Handle */
173struct ipath_ah {
174 struct ib_ah ibah;
175 struct ib_ah_attr attr;
176};
177
178/*
179 * Quick description of our CQ/QP locking scheme:
180 *
181 * We have one global lock that protects dev->cq/qp_table. Each
182 * struct ipath_cq/qp also has its own lock. An individual qp lock
183 * may be taken inside of an individual cq lock. Both cqs attached to
184 * a qp may be locked, with the send cq locked first. No other
185 * nesting should be done.
186 *
187 * Each struct ipath_cq/qp also has an atomic_t ref count. The
188 * pointer from the cq/qp_table to the struct counts as one reference.
189 * This reference also is good for access through the consumer API, so
190 * modifying the CQ/QP etc doesn't need to take another reference.
191 * Access because of a completion being polled does need a reference.
192 *
193 * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the
194 * destroy function to sleep on.
195 *
196 * This means that access from the consumer API requires nothing but
197 * taking the struct's lock.
198 *
199 * Access because of a completion event should go as follows:
200 * - lock cq/qp_table and look up struct
201 * - increment ref count in struct
202 * - drop cq/qp_table lock
203 * - lock struct, do your thing, and unlock struct
204 * - decrement ref count; if zero, wake up waiters
205 *
206 * To destroy a CQ/QP, we can do the following:
207 * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
208 * - decrement ref count
209 * - wait_event until ref count is zero
210 *
211 * It is the consumer's responsibilty to make sure that no QP
212 * operations (WQE posting or state modification) are pending when the
213 * QP is destroyed. Also, the consumer must make sure that calls to
214 * qp_modify are serialized.
215 *
216 * Possible optimizations (wait for profile data to see if/where we
217 * have locks bouncing between CPUs):
218 * - split cq/qp table lock into n separate (cache-aligned) locks,
219 * indexed (say) by the page in the table
220 */
221
222struct ipath_cq {
223 struct ib_cq ibcq;
224 struct tasklet_struct comptask;
225 spinlock_t lock;
226 u8 notify;
227 u8 triggered;
228 u32 head; /* new records added to the head */
229 u32 tail; /* poll_cq() reads from here. */
230 struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */
231};
232
233/*
234 * Send work request queue entry.
235 * The size of the sg_list is determined when the QP is created and stored
236 * in qp->s_max_sge.
237 */
238struct ipath_swqe {
239 struct ib_send_wr wr; /* don't use wr.sg_list */
240 u32 psn; /* first packet sequence number */
241 u32 lpsn; /* last packet sequence number */
242 u32 ssn; /* send sequence number */
243 u32 length; /* total length of data in sg_list */
244 struct ipath_sge sg_list[0];
245};
246
247/*
248 * Receive work request queue entry.
249 * The size of the sg_list is determined when the QP is created and stored
250 * in qp->r_max_sge.
251 */
252struct ipath_rwqe {
253 u64 wr_id;
254 u32 length; /* total length of data in sg_list */
255 u8 num_sge;
256 struct ipath_sge sg_list[0];
257};
258
259struct ipath_rq {
260 spinlock_t lock;
261 u32 head; /* new work requests posted to the head */
262 u32 tail; /* receives pull requests from here. */
263 u32 size; /* size of RWQE array */
264 u8 max_sge;
265 struct ipath_rwqe *wq; /* RWQE array */
266};
267
268struct ipath_srq {
269 struct ib_srq ibsrq;
270 struct ipath_rq rq;
271 /* send signal when number of RWQEs < limit */
272 u32 limit;
273};
274
275/*
276 * Variables prefixed with s_ are for the requester (sender).
277 * Variables prefixed with r_ are for the responder (receiver).
278 * Variables prefixed with ack_ are for responder replies.
279 *
280 * Common variables are protected by both r_rq.lock and s_lock in that order
281 * which only happens in modify_qp() or changing the QP 'state'.
282 */
283struct ipath_qp {
284 struct ib_qp ibqp;
285 struct ipath_qp *next; /* link list for QPN hash table */
286 struct list_head piowait; /* link for wait PIO buf */
287 struct list_head timerwait; /* link for waiting for timeouts */
288 struct ib_ah_attr remote_ah_attr;
289 struct ipath_ib_header s_hdr; /* next packet header to send */
290 atomic_t refcount;
291 wait_queue_head_t wait;
292 struct tasklet_struct s_task;
293 struct ipath_sge_state *s_cur_sge;
294 struct ipath_sge_state s_sge; /* current send request data */
295 /* current RDMA read send data */
296 struct ipath_sge_state s_rdma_sge;
297 struct ipath_sge_state r_sge; /* current receive data */
298 spinlock_t s_lock;
299 unsigned long s_flags;
300 u32 s_hdrwords; /* size of s_hdr in 32 bit words */
301 u32 s_cur_size; /* size of send packet in bytes */
302 u32 s_len; /* total length of s_sge */
303 u32 s_rdma_len; /* total length of s_rdma_sge */
304 u32 s_next_psn; /* PSN for next request */
305 u32 s_last_psn; /* last response PSN processed */
306 u32 s_psn; /* current packet sequence number */
307 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
308 u32 s_ack_psn; /* PSN for next ACK or RDMA_READ */
309 u64 s_ack_atomic; /* data for atomic ACK */
310 u64 r_wr_id; /* ID for current receive WQE */
311 u64 r_atomic_data; /* data for last atomic op */
312 u32 r_atomic_psn; /* PSN of last atomic op */
313 u32 r_len; /* total length of r_sge */
314 u32 r_rcv_len; /* receive data len processed */
315 u32 r_psn; /* expected rcv packet sequence number */
316 u8 state; /* QP state */
317 u8 s_state; /* opcode of last packet sent */
318 u8 s_ack_state; /* opcode of packet to ACK */
319 u8 s_nak_state; /* non-zero if NAK is pending */
320 u8 r_state; /* opcode of last packet received */
321 u8 r_reuse_sge; /* for UC receive errors */
322 u8 r_sge_inx; /* current index into sg_list */
323 u8 s_max_sge; /* size of s_wq->sg_list */
324 u8 qp_access_flags;
325 u8 s_retry_cnt; /* number of times to retry */
326 u8 s_rnr_retry_cnt;
327 u8 s_min_rnr_timer;
328 u8 s_retry; /* requester retry counter */
329 u8 s_rnr_retry; /* requester RNR retry counter */
330 u8 s_pkey_index; /* PKEY index to use */
331 enum ib_mtu path_mtu;
332 atomic_t msn; /* message sequence number */
333 u32 remote_qpn;
334 u32 qkey; /* QKEY for this QP (for UD or RD) */
335 u32 s_size; /* send work queue size */
336 u32 s_head; /* new entries added here */
337 u32 s_tail; /* next entry to process */
338 u32 s_cur; /* current work queue entry */
339 u32 s_last; /* last un-ACK'ed entry */
340 u32 s_ssn; /* SSN of tail entry */
341 u32 s_lsn; /* limit sequence number (credit) */
342 struct ipath_swqe *s_wq; /* send work queue */
343 struct ipath_rq r_rq; /* receive work queue */
344};
345
346/*
347 * Bit definitions for s_flags.
348 */
349#define IPATH_S_BUSY 0
350#define IPATH_S_SIGNAL_REQ_WR 1
351
352/*
353 * Since struct ipath_swqe is not a fixed size, we can't simply index into
354 * struct ipath_qp.s_wq. This function does the array index computation.
355 */
356static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
357 unsigned n)
358{
359 return (struct ipath_swqe *)((char *)qp->s_wq +
360 (sizeof(struct ipath_swqe) +
361 qp->s_max_sge *
362 sizeof(struct ipath_sge)) * n);
363}
364
365/*
366 * Since struct ipath_rwqe is not a fixed size, we can't simply index into
367 * struct ipath_rq.wq. This function does the array index computation.
368 */
369static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
370 unsigned n)
371{
372 return (struct ipath_rwqe *)
373 ((char *) rq->wq +
374 (sizeof(struct ipath_rwqe) +
375 rq->max_sge * sizeof(struct ipath_sge)) * n);
376}
377
378/*
379 * QPN-map pages start out as NULL, they get allocated upon
380 * first use and are never deallocated. This way,
381 * large bitmaps are not allocated unless large numbers of QPs are used.
382 */
383struct qpn_map {
384 atomic_t n_free;
385 void *page;
386};
387
388struct ipath_qp_table {
389 spinlock_t lock;
390 u32 last; /* last QP number allocated */
391 u32 max; /* size of the hash table */
392 u32 nmaps; /* size of the map table */
393 struct ipath_qp **table;
394 /* bit map of free numbers */
395 struct qpn_map map[QPNMAP_ENTRIES];
396};
397
398struct ipath_lkey_table {
399 spinlock_t lock;
400 u32 next; /* next unused index (speeds search) */
401 u32 gen; /* generation count */
402 u32 max; /* size of the table */
403 struct ipath_mregion **table;
404};
405
406struct ipath_opcode_stats {
407 u64 n_packets; /* number of packets */
408 u64 n_bytes; /* total number of bytes */
409};
410
411struct ipath_ibdev {
412 struct ib_device ibdev;
413 struct list_head dev_list;
414 struct ipath_devdata *dd;
415 int ib_unit; /* This is the device number */
416 u16 sm_lid; /* in host order */
417 u8 sm_sl;
418 u8 mkeyprot_resv_lmc;
419 /* non-zero when timer is set */
420 unsigned long mkey_lease_timeout;
421
422 /* The following fields are really per port. */
423 struct ipath_qp_table qp_table;
424 struct ipath_lkey_table lk_table;
425 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
426 struct list_head piowait; /* list for wait PIO buf */
427 /* list of QPs waiting for RNR timer */
428 struct list_head rnrwait;
429 spinlock_t pending_lock;
430 __be64 sys_image_guid; /* in network order */
431 __be64 gid_prefix; /* in network order */
432 __be64 mkey;
433 u64 ipath_sword; /* total dwords sent (sample result) */
434 u64 ipath_rword; /* total dwords received (sample result) */
435 u64 ipath_spkts; /* total packets sent (sample result) */
436 u64 ipath_rpkts; /* total packets received (sample result) */
437 /* # of ticks no data sent (sample result) */
438 u64 ipath_xmit_wait;
439 u64 rcv_errors; /* # of packets with SW detected rcv errs */
440 u64 n_unicast_xmit; /* total unicast packets sent */
441 u64 n_unicast_rcv; /* total unicast packets received */
442 u64 n_multicast_xmit; /* total multicast packets sent */
443 u64 n_multicast_rcv; /* total multicast packets received */
444 u64 n_symbol_error_counter; /* starting count for PMA */
445 u64 n_link_error_recovery_counter; /* starting count for PMA */
446 u64 n_link_downed_counter; /* starting count for PMA */
447 u64 n_port_rcv_errors; /* starting count for PMA */
448 u64 n_port_rcv_remphys_errors; /* starting count for PMA */
449 u64 n_port_xmit_discards; /* starting count for PMA */
450 u64 n_port_xmit_data; /* starting count for PMA */
451 u64 n_port_rcv_data; /* starting count for PMA */
452 u64 n_port_xmit_packets; /* starting count for PMA */
453 u64 n_port_rcv_packets; /* starting count for PMA */
454 u32 n_pkey_violations; /* starting count for PMA */
455 u32 n_rc_resends;
456 u32 n_rc_acks;
457 u32 n_rc_qacks;
458 u32 n_seq_naks;
459 u32 n_rdma_seq;
460 u32 n_rnr_naks;
461 u32 n_other_naks;
462 u32 n_timeouts;
463 u32 n_pkt_drops;
464 u32 n_wqe_errs;
465 u32 n_rdma_dup_busy;
466 u32 n_piowait;
467 u32 n_no_piobuf;
468 u32 port_cap_flags;
469 u32 pma_sample_start;
470 u32 pma_sample_interval;
471 __be16 pma_counter_select[5];
472 u16 pma_tag;
473 u16 qkey_violations;
474 u16 mkey_violations;
475 u16 mkey_lease_period;
476 u16 pending_index; /* which pending queue is active */
477 u8 pma_sample_status;
478 u8 subnet_timeout;
479 u8 link_width_enabled;
480 u8 vl_high_limit;
481 struct ipath_opcode_stats opstats[128];
482};
483
484struct ipath_ucontext {
485 struct ib_ucontext ibucontext;
486};
487
488static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
489{
490 return container_of(ibmr, struct ipath_mr, ibmr);
491}
492
493static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
494{
495 return container_of(ibfmr, struct ipath_fmr, ibfmr);
496}
497
498static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
499{
500 return container_of(ibpd, struct ipath_pd, ibpd);
501}
502
503static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
504{
505 return container_of(ibah, struct ipath_ah, ibah);
506}
507
508static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
509{
510 return container_of(ibcq, struct ipath_cq, ibcq);
511}
512
513static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
514{
515 return container_of(ibsrq, struct ipath_srq, ibsrq);
516}
517
518static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
519{
520 return container_of(ibqp, struct ipath_qp, ibqp);
521}
522
523static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
524{
525 return container_of(ibdev, struct ipath_ibdev, ibdev);
526}
527
528int ipath_process_mad(struct ib_device *ibdev,
529 int mad_flags,
530 u8 port_num,
531 struct ib_wc *in_wc,
532 struct ib_grh *in_grh,
533 struct ib_mad *in_mad, struct ib_mad *out_mad);
534
535static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
536 *ibucontext)
537{
538 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
539}
540
541/*
542 * Compare the lower 24 bits of the two values.
543 * Returns an integer <, ==, or > than zero.
544 */
545static inline int ipath_cmp24(u32 a, u32 b)
546{
547 return (((int) a) - ((int) b)) << 8;
548}
549
550struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
551
552int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
553
554int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
555
556int ipath_mcast_tree_empty(void);
557
558__be32 ipath_compute_aeth(struct ipath_qp *qp);
559
560struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
561
562struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
563 struct ib_qp_init_attr *init_attr,
564 struct ib_udata *udata);
565
566int ipath_destroy_qp(struct ib_qp *ibqp);
567
568int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
569 int attr_mask);
570
571int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
572 int attr_mask, struct ib_qp_init_attr *init_attr);
573
574void ipath_free_all_qps(struct ipath_qp_table *qpt);
575
576int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
577
578void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
579
580void ipath_error_qp(struct ipath_qp *qp);
581
582void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
583
584void ipath_do_rc_send(unsigned long data);
585
586void ipath_do_uc_send(unsigned long data);
587
588void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
589
590int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
591 u32 len, u64 vaddr, u32 rkey, int acc);
592
593int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
594 struct ib_sge *sge, int acc);
595
596void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
597
598void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
599
600int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
601
602void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
603 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
604
605void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
606 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
607
608void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc);
609
610void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss,
611 u32 length, struct ib_send_wr *wr, struct ib_wc *wc);
612
613int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
614
615void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
616 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
617
618int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
619 struct ipath_mregion *mr);
620
621void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
622
623int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge,
624 struct ib_sge *sge, int acc);
625
626int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
627 u32 len, u64 vaddr, u32 rkey, int acc);
628
629int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
630 struct ib_recv_wr **bad_wr);
631
632struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
633 struct ib_srq_init_attr *srq_init_attr,
634 struct ib_udata *udata);
635
636int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
637 enum ib_srq_attr_mask attr_mask);
638
639int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
640
641int ipath_destroy_srq(struct ib_srq *ibsrq);
642
643void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
644
645int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
646
647struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
648 struct ib_ucontext *context,
649 struct ib_udata *udata);
650
651int ipath_destroy_cq(struct ib_cq *ibcq);
652
653int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
654
655int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
656
657struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
658
659struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
660 struct ib_phys_buf *buffer_list,
661 int num_phys_buf, int acc, u64 *iova_start);
662
663struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
664 int mr_access_flags,
665 struct ib_udata *udata);
666
667int ipath_dereg_mr(struct ib_mr *ibmr);
668
669struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
670 struct ib_fmr_attr *fmr_attr);
671
672int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
673 int list_len, u64 iova);
674
675int ipath_unmap_fmr(struct list_head *fmr_list);
676
677int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
678
679void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
680
681void ipath_insert_rnr_queue(struct ipath_qp *qp);
682
683int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
684
685void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc);
686
687extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
688
689extern const u8 ipath_cvt_physportstate[];
690
691extern const int ib_ipath_state_ops[];
692
693extern unsigned int ib_ipath_lkey_table_size;
694
695extern const u32 ib_ipath_rnr_table[];
696
697#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
new file mode 100644
index 000000000000..10b31d2c4f20
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -0,0 +1,333 @@
1/*
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/list.h>
34#include <linux/rcupdate.h>
35
36#include "ipath_verbs.h"
37
38/*
39 * Global table of GID to attached QPs.
40 * The table is global to all ipath devices since a send from one QP/device
41 * needs to be locally routed to any locally attached QPs on the same
42 * or different device.
43 */
44static struct rb_root mcast_tree;
45static DEFINE_SPINLOCK(mcast_lock);
46
47/**
48 * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
49 * @qp: the QP to link
50 */
51static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
52{
53 struct ipath_mcast_qp *mqp;
54
55 mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
56 if (!mqp)
57 goto bail;
58
59 mqp->qp = qp;
60 atomic_inc(&qp->refcount);
61
62bail:
63 return mqp;
64}
65
66static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
67{
68 struct ipath_qp *qp = mqp->qp;
69
70 /* Notify ipath_destroy_qp() if it is waiting. */
71 if (atomic_dec_and_test(&qp->refcount))
72 wake_up(&qp->wait);
73
74 kfree(mqp);
75}
76
77/**
78 * ipath_mcast_alloc - allocate the multicast GID structure
79 * @mgid: the multicast GID
80 *
81 * A list of QPs will be attached to this structure.
82 */
83static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
84{
85 struct ipath_mcast *mcast;
86
87 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
88 if (!mcast)
89 goto bail;
90
91 mcast->mgid = *mgid;
92 INIT_LIST_HEAD(&mcast->qp_list);
93 init_waitqueue_head(&mcast->wait);
94 atomic_set(&mcast->refcount, 0);
95
96bail:
97 return mcast;
98}
99
100static void ipath_mcast_free(struct ipath_mcast *mcast)
101{
102 struct ipath_mcast_qp *p, *tmp;
103
104 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
105 ipath_mcast_qp_free(p);
106
107 kfree(mcast);
108}
109
110/**
111 * ipath_mcast_find - search the global table for the given multicast GID
112 * @mgid: the multicast GID to search for
113 *
114 * Returns NULL if not found.
115 *
116 * The caller is responsible for decrementing the reference count if found.
117 */
118struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
119{
120 struct rb_node *n;
121 unsigned long flags;
122 struct ipath_mcast *mcast;
123
124 spin_lock_irqsave(&mcast_lock, flags);
125 n = mcast_tree.rb_node;
126 while (n) {
127 int ret;
128
129 mcast = rb_entry(n, struct ipath_mcast, rb_node);
130
131 ret = memcmp(mgid->raw, mcast->mgid.raw,
132 sizeof(union ib_gid));
133 if (ret < 0)
134 n = n->rb_left;
135 else if (ret > 0)
136 n = n->rb_right;
137 else {
138 atomic_inc(&mcast->refcount);
139 spin_unlock_irqrestore(&mcast_lock, flags);
140 goto bail;
141 }
142 }
143 spin_unlock_irqrestore(&mcast_lock, flags);
144
145 mcast = NULL;
146
147bail:
148 return mcast;
149}
150
151/**
152 * ipath_mcast_add - insert mcast GID into table and attach QP struct
153 * @mcast: the mcast GID table
154 * @mqp: the QP to attach
155 *
156 * Return zero if both were added. Return EEXIST if the GID was already in
157 * the table but the QP was added. Return ESRCH if the QP was already
158 * attached and neither structure was added.
159 */
160static int ipath_mcast_add(struct ipath_mcast *mcast,
161 struct ipath_mcast_qp *mqp)
162{
163 struct rb_node **n = &mcast_tree.rb_node;
164 struct rb_node *pn = NULL;
165 unsigned long flags;
166 int ret;
167
168 spin_lock_irqsave(&mcast_lock, flags);
169
170 while (*n) {
171 struct ipath_mcast *tmcast;
172 struct ipath_mcast_qp *p;
173
174 pn = *n;
175 tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
176
177 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
178 sizeof(union ib_gid));
179 if (ret < 0) {
180 n = &pn->rb_left;
181 continue;
182 }
183 if (ret > 0) {
184 n = &pn->rb_right;
185 continue;
186 }
187
188 /* Search the QP list to see if this is already there. */
189 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
190 if (p->qp == mqp->qp) {
191 spin_unlock_irqrestore(&mcast_lock, flags);
192 ret = ESRCH;
193 goto bail;
194 }
195 }
196 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
197 spin_unlock_irqrestore(&mcast_lock, flags);
198 ret = EEXIST;
199 goto bail;
200 }
201
202 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
203
204 atomic_inc(&mcast->refcount);
205 rb_link_node(&mcast->rb_node, pn, n);
206 rb_insert_color(&mcast->rb_node, &mcast_tree);
207
208 spin_unlock_irqrestore(&mcast_lock, flags);
209
210 ret = 0;
211
212bail:
213 return ret;
214}
215
216int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
217{
218 struct ipath_qp *qp = to_iqp(ibqp);
219 struct ipath_mcast *mcast;
220 struct ipath_mcast_qp *mqp;
221 int ret;
222
223 /*
224 * Allocate data structures since its better to do this outside of
225 * spin locks and it will most likely be needed.
226 */
227 mcast = ipath_mcast_alloc(gid);
228 if (mcast == NULL) {
229 ret = -ENOMEM;
230 goto bail;
231 }
232 mqp = ipath_mcast_qp_alloc(qp);
233 if (mqp == NULL) {
234 ipath_mcast_free(mcast);
235 ret = -ENOMEM;
236 goto bail;
237 }
238 switch (ipath_mcast_add(mcast, mqp)) {
239 case ESRCH:
240 /* Neither was used: can't attach the same QP twice. */
241 ipath_mcast_qp_free(mqp);
242 ipath_mcast_free(mcast);
243 ret = -EINVAL;
244 goto bail;
245 case EEXIST: /* The mcast wasn't used */
246 ipath_mcast_free(mcast);
247 break;
248 default:
249 break;
250 }
251
252 ret = 0;
253
254bail:
255 return ret;
256}
257
258int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
259{
260 struct ipath_qp *qp = to_iqp(ibqp);
261 struct ipath_mcast *mcast = NULL;
262 struct ipath_mcast_qp *p, *tmp;
263 struct rb_node *n;
264 unsigned long flags;
265 int last = 0;
266 int ret;
267
268 spin_lock_irqsave(&mcast_lock, flags);
269
270 /* Find the GID in the mcast table. */
271 n = mcast_tree.rb_node;
272 while (1) {
273 if (n == NULL) {
274 spin_unlock_irqrestore(&mcast_lock, flags);
275 ret = 0;
276 goto bail;
277 }
278
279 mcast = rb_entry(n, struct ipath_mcast, rb_node);
280 ret = memcmp(gid->raw, mcast->mgid.raw,
281 sizeof(union ib_gid));
282 if (ret < 0)
283 n = n->rb_left;
284 else if (ret > 0)
285 n = n->rb_right;
286 else
287 break;
288 }
289
290 /* Search the QP list. */
291 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
292 if (p->qp != qp)
293 continue;
294 /*
295 * We found it, so remove it, but don't poison the forward
296 * link until we are sure there are no list walkers.
297 */
298 list_del_rcu(&p->list);
299
300 /* If this was the last attached QP, remove the GID too. */
301 if (list_empty(&mcast->qp_list)) {
302 rb_erase(&mcast->rb_node, &mcast_tree);
303 last = 1;
304 }
305 break;
306 }
307
308 spin_unlock_irqrestore(&mcast_lock, flags);
309
310 if (p) {
311 /*
312 * Wait for any list walkers to finish before freeing the
313 * list element.
314 */
315 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
316 ipath_mcast_qp_free(p);
317 }
318 if (last) {
319 atomic_dec(&mcast->refcount);
320 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
321 ipath_mcast_free(mcast);
322 }
323
324 ret = 0;
325
326bail:
327 return ret;
328}
329
330int ipath_mcast_tree_empty(void)
331{
332 return mcast_tree.rb_node == NULL;
333}
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
new file mode 100644
index 000000000000..adc5322f15c1
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file is conditionally built on x86_64 only. Otherwise weak symbol
35 * versions of the functions exported from here are used.
36 */
37
38#include <linux/pci.h>
39#include <asm/mtrr.h>
40#include <asm/processor.h>
41
42#include "ipath_kernel.h"
43
44/**
45 * ipath_enable_wc - enable write combining for MMIO writes to the device
46 * @dd: infinipath device
47 *
48 * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
49 * write combining.
50 */
51int ipath_enable_wc(struct ipath_devdata *dd)
52{
53 int ret = 0;
54 u64 pioaddr, piolen;
55 unsigned bits;
56 const unsigned long addr = pci_resource_start(dd->pcidev, 0);
57 const size_t len = pci_resource_len(dd->pcidev, 0);
58
59 /*
60 * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
61 * chip. Linux (possibly the hardware) requires it to be on a power
62 * of 2 address matching the length (which has to be a power of 2).
63 * For rev1, that means the base address, for rev2, it will be just
64 * the PIO buffers themselves.
65 */
66 pioaddr = addr + dd->ipath_piobufbase;
67 piolen = (dd->ipath_piobcnt2k +
68 dd->ipath_piobcnt4k) *
69 ALIGN(dd->ipath_piobcnt2k +
70 dd->ipath_piobcnt4k, dd->ipath_palign);
71
72 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
73 /* do nothing */ ;
74
75 if (piolen != (1ULL << bits)) {
76 piolen >>= bits;
77 while (piolen >>= 1)
78 bits++;
79 piolen = 1ULL << (bits + 1);
80 }
81 if (pioaddr & (piolen - 1)) {
82 u64 atmp;
83 ipath_dbg("pioaddr %llx not on right boundary for size "
84 "%llx, fixing\n",
85 (unsigned long long) pioaddr,
86 (unsigned long long) piolen);
87 atmp = pioaddr & ~(piolen - 1);
88 if (atmp < addr || (atmp + piolen) > (addr + len)) {
89 ipath_dev_err(dd, "No way to align address/size "
90 "(%llx/%llx), no WC mtrr\n",
91 (unsigned long long) atmp,
92 (unsigned long long) piolen << 1);
93 ret = -ENODEV;
94 } else {
95 ipath_dbg("changing WC base from %llx to %llx, "
96 "len from %llx to %llx\n",
97 (unsigned long long) pioaddr,
98 (unsigned long long) atmp,
99 (unsigned long long) piolen,
100 (unsigned long long) piolen << 1);
101 pioaddr = atmp;
102 piolen <<= 1;
103 }
104 }
105
106 if (!ret) {
107 int cookie;
108 ipath_cdbg(VERBOSE, "Setting mtrr for chip to WC "
109 "(addr %llx, len=0x%llx)\n",
110 (unsigned long long) pioaddr,
111 (unsigned long long) piolen);
112 cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
113 if (cookie < 0) {
114 {
115 dev_info(&dd->pcidev->dev,
116 "mtrr_add() WC for PIO bufs "
117 "failed (%d)\n",
118 cookie);
119 ret = -EINVAL;
120 }
121 } else {
122 ipath_cdbg(VERBOSE, "Set mtrr for chip to WC, "
123 "cookie is %d\n", cookie);
124 dd->ipath_wc_cookie = cookie;
125 }
126 }
127
128 return ret;
129}
130
131/**
132 * ipath_disable_wc - disable write combining for MMIO writes to the device
133 * @dd: infinipath device
134 */
135void ipath_disable_wc(struct ipath_devdata *dd)
136{
137 if (dd->ipath_wc_cookie) {
138 ipath_cdbg(VERBOSE, "undoing WCCOMB on pio buffers\n");
139 mtrr_del(dd->ipath_wc_cookie, 0, 0);
140 dd->ipath_wc_cookie = 0;
141 }
142}
143
144/**
145 * ipath_unordered_wc - indicate whether write combining is ordered
146 *
147 * Because our performance depends on our ability to do write combining mmio
148 * writes in the most efficient way, we need to know if we are on an Intel
149 * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
150 * the order completed, and so no special flushing is required to get
151 * correct ordering. Intel processors, however, will flush write buffers
152 * out in "random" orders, and so explicit ordering is needed at times.
153 */
154int ipath_unordered_wc(void)
155{
156 return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
157}
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h
new file mode 100644
index 000000000000..410a764dfcef
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ips_common.h
@@ -0,0 +1,263 @@
1#ifndef IPS_COMMON_H
2#define IPS_COMMON_H
3/*
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "ipath_common.h"
36
37struct ipath_header {
38 /*
39 * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
40 * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
41 * Port 3, TID 11, offset 14.
42 */
43 __le32 ver_port_tid_offset;
44 __le16 chksum;
45 __le16 pkt_flags;
46};
47
48struct ips_message_header {
49 __be16 lrh[4];
50 __be32 bth[3];
51 /* fields below this point are in host byte order */
52 struct ipath_header iph;
53 __u8 sub_opcode;
54 __u8 flags;
55 __u16 src_rank;
56 /* 24 bits. The upper 8 bit is available for other use */
57 union {
58 struct {
59 unsigned ack_seq_num:24;
60 unsigned port:4;
61 unsigned unused:4;
62 };
63 __u32 ack_seq_num_org;
64 };
65 __u8 expected_tid_session_id;
66 __u8 tinylen; /* to aid MPI */
67 union {
68 __u16 tag; /* to aid MPI */
69 __u16 mqhdr; /* for PSM MQ */
70 };
71 union {
72 __u32 mpi[4]; /* to aid MPI */
73 __u32 data[4];
74 __u64 mq[2]; /* for PSM MQ */
75 struct {
76 __u16 mtu;
77 __u8 major_ver;
78 __u8 minor_ver;
79 __u32 not_used; //free
80 __u32 run_id;
81 __u32 client_ver;
82 };
83 };
84};
85
86struct ether_header {
87 __be16 lrh[4];
88 __be32 bth[3];
89 struct ipath_header iph;
90 __u8 sub_opcode;
91 __u8 cmd;
92 __be16 lid;
93 __u16 mac[3];
94 __u8 frag_num;
95 __u8 seq_num;
96 __le32 len;
97 /* MUST be of word size due to PIO write requirements */
98 __u32 csum;
99 __le16 csum_offset;
100 __le16 flags;
101 __u16 first_2_bytes;
102 __u8 unused[2]; /* currently unused */
103};
104
105/*
106 * The PIO buffer used for sending infinipath messages must only be written
107 * in 32-bit words, all the data must be written, and no writes can occur
108 * after the last word is written (which transfers "ownership" of the buffer
109 * to the chip and triggers the message to be sent).
110 * Since the Linux sk_buff structure can be recursive, non-aligned, and
111 * any number of bytes in each segment, we use the following structure
112 * to keep information about the overall state of the copy operation.
113 * This is used to save the information needed to store the checksum
114 * in the right place before sending the last word to the hardware and
115 * to buffer the last 0-3 bytes of non-word sized segments.
116 */
117struct copy_data_s {
118 struct ether_header *hdr;
119 /* addr of PIO buf to write csum to */
120 __u32 __iomem *csum_pio;
121 __u32 __iomem *to; /* addr of PIO buf to write data to */
122 __u32 device; /* which device to allocate PIO bufs from */
123 __s32 error; /* set if there is an error. */
124 __s32 extra; /* amount of data saved in u.buf below */
125 __u32 len; /* total length to send in bytes */
126 __u32 flen; /* frament length in words */
127 __u32 csum; /* partial IP checksum */
128 __u32 pos; /* position for partial checksum */
129 __u32 offset; /* offset to where data currently starts */
130 __s32 checksum_calc; /* set to 1 when csum has been calculated */
131 struct sk_buff *skb;
132 union {
133 __u32 w;
134 __u8 buf[4];
135 } u;
136};
137
138/* IB - LRH header consts */
139#define IPS_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
140#define IPS_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
141
142#define IPS_OFFSET 0
143
144/*
145 * defines the cut-off point between the header queue and eager/expected
146 * TID queue
147 */
148#define NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE \
149 ((sizeof(struct ips_message_header) - \
150 offsetof(struct ips_message_header, iph)) >> 2)
151
152/* OpCodes */
153#define OPCODE_IPS 0xC0
154#define OPCODE_ITH4X 0xC1
155
156/* OpCode 30 is use by stand-alone test programs */
157#define OPCODE_RAW_DATA 0xDE
158/* last OpCode (31) is reserved for test */
159#define OPCODE_TEST 0xDF
160
161/* sub OpCodes - ips */
162#define OPCODE_SEQ_DATA 0x01
163#define OPCODE_SEQ_CTRL 0x02
164
165#define OPCODE_SEQ_MQ_DATA 0x03
166#define OPCODE_SEQ_MQ_CTRL 0x04
167
168#define OPCODE_ACK 0x10
169#define OPCODE_NAK 0x11
170
171#define OPCODE_ERR_CHK 0x20
172#define OPCODE_ERR_CHK_PLS 0x21
173
174#define OPCODE_STARTUP 0x30
175#define OPCODE_STARTUP_ACK 0x31
176#define OPCODE_STARTUP_NAK 0x32
177
178#define OPCODE_STARTUP_EXT 0x34
179#define OPCODE_STARTUP_ACK_EXT 0x35
180#define OPCODE_STARTUP_NAK_EXT 0x36
181
182#define OPCODE_TIDS_RELEASE 0x40
183#define OPCODE_TIDS_RELEASE_CONFIRM 0x41
184
185#define OPCODE_CLOSE 0x50
186#define OPCODE_CLOSE_ACK 0x51
187/*
188 * like OPCODE_CLOSE, but no complaint if other side has already closed.
189 * Used when doing abort(), MPI_Abort(), etc.
190 */
191#define OPCODE_ABORT 0x52
192
193/* sub OpCodes - ith4x */
194#define OPCODE_ENCAP 0x81
195#define OPCODE_LID_ARP 0x82
196
197/* Receive Header Queue: receive type (from infinipath) */
198#define RCVHQ_RCV_TYPE_EXPECTED 0
199#define RCVHQ_RCV_TYPE_EAGER 1
200#define RCVHQ_RCV_TYPE_NON_KD 2
201#define RCVHQ_RCV_TYPE_ERROR 3
202
203/* misc. */
204#define SIZE_OF_CRC 1
205
206#define EAGER_TID_ID INFINIPATH_I_TID_MASK
207
208#define IPS_DEFAULT_P_KEY 0xFFFF
209
210#define IPS_PERMISSIVE_LID 0xFFFF
211#define IPS_MULTICAST_LID_BASE 0xC000
212
213#define IPS_AETH_CREDIT_SHIFT 24
214#define IPS_AETH_CREDIT_MASK 0x1F
215#define IPS_AETH_CREDIT_INVAL 0x1F
216
217#define IPS_PSN_MASK 0xFFFFFF
218#define IPS_MSN_MASK 0xFFFFFF
219#define IPS_QPN_MASK 0xFFFFFF
220#define IPS_MULTICAST_QPN 0xFFFFFF
221
222/* functions for extracting fields from rcvhdrq entries */
223static inline __u32 ips_get_hdr_err_flags(const __le32 * rbuf)
224{
225 return __le32_to_cpu(rbuf[1]);
226}
227
228static inline __u32 ips_get_index(const __le32 * rbuf)
229{
230 return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
231 & INFINIPATH_RHF_EGRINDEX_MASK;
232}
233
234static inline __u32 ips_get_rcv_type(const __le32 * rbuf)
235{
236 return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
237 & INFINIPATH_RHF_RCVTYPE_MASK;
238}
239
240static inline __u32 ips_get_length_in_bytes(const __le32 * rbuf)
241{
242 return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
243 & INFINIPATH_RHF_LENGTH_MASK) << 2;
244}
245
246static inline void *ips_get_first_protocol_header(const __u32 * rbuf)
247{
248 return (void *)&rbuf[2];
249}
250
251static inline struct ips_message_header *ips_get_ips_header(const __u32 *
252 rbuf)
253{
254 return (struct ips_message_header *)&rbuf[2];
255}
256
257static inline __u32 ips_get_ipath_ver(__le32 hdrword)
258{
259 return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
260 & INFINIPATH_I_VERS_MASK;
261}
262
263#endif /* IPS_COMMON_H */
diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h
new file mode 100644
index 000000000000..40d693cf3f94
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/verbs_debug.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _VERBS_DEBUG_H
34#define _VERBS_DEBUG_H
35
36/*
37 * This file contains tracing code for the ib_ipath kernel module.
38 */
39#ifndef _VERBS_DEBUGGING /* tracing enabled or not */
40#define _VERBS_DEBUGGING 1
41#endif
42
43extern unsigned ib_ipath_debug;
44
45#define _VERBS_ERROR(fmt,...) \
46 do { \
47 printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
48 } while(0)
49
50#define _VERBS_UNIT_ERROR(unit,fmt,...) \
51 do { \
52 printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
53 } while(0)
54
55#if _VERBS_DEBUGGING
56
57/*
58 * Mask values for debugging. The scheme allows us to compile out any
59 * of the debug tracing stuff, and if compiled in, to enable or
60 * disable dynamically.
61 * This can be set at modprobe time also:
62 * modprobe ib_path ib_ipath_debug=3
63 */
64
65#define __VERBS_INFO 0x1 /* generic low verbosity stuff */
66#define __VERBS_DBG 0x2 /* generic debug */
67#define __VERBS_VDBG 0x4 /* verbose debug */
68#define __VERBS_SMADBG 0x8000 /* sma packet debug */
69
70#define _VERBS_INFO(fmt,...) \
71 do { \
72 if (unlikely(ib_ipath_debug&__VERBS_INFO)) \
73 printk(KERN_INFO "%s: " fmt,"ib_ipath", \
74 ##__VA_ARGS__); \
75 } while(0)
76
77#define _VERBS_DBG(fmt,...) \
78 do { \
79 if (unlikely(ib_ipath_debug&__VERBS_DBG)) \
80 printk(KERN_DEBUG "%s: " fmt, __func__, \
81 ##__VA_ARGS__); \
82 } while(0)
83
84#define _VERBS_VDBG(fmt,...) \
85 do { \
86 if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \
87 printk(KERN_DEBUG "%s: " fmt, __func__, \
88 ##__VA_ARGS__); \
89 } while(0)
90
91#define _VERBS_SMADBG(fmt,...) \
92 do { \
93 if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \
94 printk(KERN_DEBUG "%s: " fmt, __func__, \
95 ##__VA_ARGS__); \
96 } while(0)
97
98#else /* ! _VERBS_DEBUGGING */
99
100#define _VERBS_INFO(fmt,...)
101#define _VERBS_DBG(fmt,...)
102#define _VERBS_VDBG(fmt,...)
103#define _VERBS_SMADBG(fmt,...)
104
105#endif /* _VERBS_DEBUGGING */
106
107#endif /* _VERBS_DEBUG_H */
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
index d7828936fd8f..07358fb51b82 100644
--- a/drivers/input/evbug.c
+++ b/drivers/input/evbug.c
@@ -49,9 +49,8 @@ static struct input_handle *evbug_connect(struct input_handler *handler, struct
49{ 49{
50 struct input_handle *handle; 50 struct input_handle *handle;
51 51
52 if (!(handle = kmalloc(sizeof(struct input_handle), GFP_KERNEL))) 52 if (!(handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL)))
53 return NULL; 53 return NULL;
54 memset(handle, 0, sizeof(struct input_handle));
55 54
56 handle->dev = dev; 55 handle->dev = dev;
57 handle->handler = handler; 56 handle->handler = handler;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 745979f33dc2..a34e3d91d9ed 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -130,9 +130,8 @@ static int evdev_open(struct inode * inode, struct file * file)
130 if ((accept_err = input_accept_process(&(evdev_table[i]->handle), file))) 130 if ((accept_err = input_accept_process(&(evdev_table[i]->handle), file)))
131 return accept_err; 131 return accept_err;
132 132
133 if (!(list = kmalloc(sizeof(struct evdev_list), GFP_KERNEL))) 133 if (!(list = kzalloc(sizeof(struct evdev_list), GFP_KERNEL)))
134 return -ENOMEM; 134 return -ENOMEM;
135 memset(list, 0, sizeof(struct evdev_list));
136 135
137 list->evdev = evdev_table[i]; 136 list->evdev = evdev_table[i];
138 list_add_tail(&list->node, &evdev_table[i]->list); 137 list_add_tail(&list->node, &evdev_table[i]->list);
@@ -609,9 +608,8 @@ static struct input_handle *evdev_connect(struct input_handler *handler, struct
609 return NULL; 608 return NULL;
610 } 609 }
611 610
612 if (!(evdev = kmalloc(sizeof(struct evdev), GFP_KERNEL))) 611 if (!(evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL)))
613 return NULL; 612 return NULL;
614 memset(evdev, 0, sizeof(struct evdev));
615 613
616 INIT_LIST_HEAD(&evdev->list); 614 INIT_LIST_HEAD(&evdev->list);
617 init_waitqueue_head(&evdev->wait); 615 init_waitqueue_head(&evdev->wait);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index b765a155c008..36644bff379d 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/sched.h> /* HZ */ 24#include <linux/sched.h> /* HZ */
25#include <linux/mutex.h>
25 26
26/*#include <asm/io.h>*/ 27/*#include <asm/io.h>*/
27 28
@@ -43,10 +44,10 @@ EXPORT_SYMBOL(gameport_start_polling);
43EXPORT_SYMBOL(gameport_stop_polling); 44EXPORT_SYMBOL(gameport_stop_polling);
44 45
45/* 46/*
46 * gameport_sem protects entire gameport subsystem and is taken 47 * gameport_mutex protects entire gameport subsystem and is taken
47 * every time gameport port or driver registrered or unregistered. 48 * every time gameport port or driver registrered or unregistered.
48 */ 49 */
49static DECLARE_MUTEX(gameport_sem); 50static DEFINE_MUTEX(gameport_mutex);
50 51
51static LIST_HEAD(gameport_list); 52static LIST_HEAD(gameport_list);
52 53
@@ -265,6 +266,7 @@ static void gameport_queue_event(void *object, struct module *owner,
265 if ((event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC))) { 266 if ((event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC))) {
266 if (!try_module_get(owner)) { 267 if (!try_module_get(owner)) {
267 printk(KERN_WARNING "gameport: Can't get module reference, dropping event %d\n", event_type); 268 printk(KERN_WARNING "gameport: Can't get module reference, dropping event %d\n", event_type);
269 kfree(event);
268 goto out; 270 goto out;
269 } 271 }
270 272
@@ -342,7 +344,7 @@ static void gameport_handle_event(void)
342 struct gameport_event *event; 344 struct gameport_event *event;
343 struct gameport_driver *gameport_drv; 345 struct gameport_driver *gameport_drv;
344 346
345 down(&gameport_sem); 347 mutex_lock(&gameport_mutex);
346 348
347 /* 349 /*
348 * Note that we handle only one event here to give swsusp 350 * Note that we handle only one event here to give swsusp
@@ -379,7 +381,7 @@ static void gameport_handle_event(void)
379 gameport_free_event(event); 381 gameport_free_event(event);
380 } 382 }
381 383
382 up(&gameport_sem); 384 mutex_unlock(&gameport_mutex);
383} 385}
384 386
385/* 387/*
@@ -464,7 +466,7 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
464 struct device_driver *drv; 466 struct device_driver *drv;
465 int retval; 467 int retval;
466 468
467 retval = down_interruptible(&gameport_sem); 469 retval = mutex_lock_interruptible(&gameport_mutex);
468 if (retval) 470 if (retval)
469 return retval; 471 return retval;
470 472
@@ -484,7 +486,7 @@ static ssize_t gameport_rebind_driver(struct device *dev, struct device_attribut
484 retval = -EINVAL; 486 retval = -EINVAL;
485 } 487 }
486 488
487 up(&gameport_sem); 489 mutex_unlock(&gameport_mutex);
488 490
489 return retval; 491 return retval;
490} 492}
@@ -521,7 +523,7 @@ static void gameport_init_port(struct gameport *gameport)
521 523
522 __module_get(THIS_MODULE); 524 __module_get(THIS_MODULE);
523 525
524 init_MUTEX(&gameport->drv_sem); 526 mutex_init(&gameport->drv_mutex);
525 device_initialize(&gameport->dev); 527 device_initialize(&gameport->dev);
526 snprintf(gameport->dev.bus_id, sizeof(gameport->dev.bus_id), 528 snprintf(gameport->dev.bus_id, sizeof(gameport->dev.bus_id),
527 "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1); 529 "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
@@ -661,10 +663,10 @@ void __gameport_register_port(struct gameport *gameport, struct module *owner)
661 */ 663 */
662void gameport_unregister_port(struct gameport *gameport) 664void gameport_unregister_port(struct gameport *gameport)
663{ 665{
664 down(&gameport_sem); 666 mutex_lock(&gameport_mutex);
665 gameport_disconnect_port(gameport); 667 gameport_disconnect_port(gameport);
666 gameport_destroy_port(gameport); 668 gameport_destroy_port(gameport);
667 up(&gameport_sem); 669 mutex_unlock(&gameport_mutex);
668} 670}
669 671
670 672
@@ -717,7 +719,7 @@ void gameport_unregister_driver(struct gameport_driver *drv)
717{ 719{
718 struct gameport *gameport; 720 struct gameport *gameport;
719 721
720 down(&gameport_sem); 722 mutex_lock(&gameport_mutex);
721 drv->ignore = 1; /* so gameport_find_driver ignores it */ 723 drv->ignore = 1; /* so gameport_find_driver ignores it */
722 724
723start_over: 725start_over:
@@ -731,7 +733,7 @@ start_over:
731 } 733 }
732 734
733 driver_unregister(&drv->driver); 735 driver_unregister(&drv->driver);
734 up(&gameport_sem); 736 mutex_unlock(&gameport_mutex);
735} 737}
736 738
737static int gameport_bus_match(struct device *dev, struct device_driver *drv) 739static int gameport_bus_match(struct device *dev, struct device_driver *drv)
@@ -743,9 +745,9 @@ static int gameport_bus_match(struct device *dev, struct device_driver *drv)
743 745
744static void gameport_set_drv(struct gameport *gameport, struct gameport_driver *drv) 746static void gameport_set_drv(struct gameport *gameport, struct gameport_driver *drv)
745{ 747{
746 down(&gameport->drv_sem); 748 mutex_lock(&gameport->drv_mutex);
747 gameport->drv = drv; 749 gameport->drv = drv;
748 up(&gameport->drv_sem); 750 mutex_unlock(&gameport->drv_mutex);
749} 751}
750 752
751int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode) 753int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode)
@@ -796,5 +798,5 @@ static void __exit gameport_exit(void)
796 kthread_stop(gameport_task); 798 kthread_stop(gameport_task);
797} 799}
798 800
799module_init(gameport_init); 801subsys_initcall(gameport_init);
800module_exit(gameport_exit); 802module_exit(gameport_exit);
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index d2e55dc956ba..3e2d28f263e9 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -252,14 +252,14 @@ static struct pnp_driver ns558_pnp_driver;
252 252
253#endif 253#endif
254 254
255static int pnp_registered = 0;
256
257static int __init ns558_init(void) 255static int __init ns558_init(void)
258{ 256{
259 int i = 0; 257 int i = 0;
258 int error;
260 259
261 if (pnp_register_driver(&ns558_pnp_driver) >= 0) 260 error = pnp_register_driver(&ns558_pnp_driver);
262 pnp_registered = 1; 261 if (error && error != -ENODEV) /* should be ENOSYS really */
262 return error;
263 263
264/* 264/*
265 * Probe ISA ports after PnP, so that PnP ports that are already 265 * Probe ISA ports after PnP, so that PnP ports that are already
@@ -270,7 +270,7 @@ static int __init ns558_init(void)
270 while (ns558_isa_portlist[i]) 270 while (ns558_isa_portlist[i])
271 ns558_isa_probe(ns558_isa_portlist[i++]); 271 ns558_isa_probe(ns558_isa_portlist[i++]);
272 272
273 return (list_empty(&ns558_list) && !pnp_registered) ? -ENODEV : 0; 273 return list_empty(&ns558_list) && error ? -ENODEV : 0;
274} 274}
275 275
276static void __exit ns558_exit(void) 276static void __exit ns558_exit(void)
@@ -283,8 +283,7 @@ static void __exit ns558_exit(void)
283 kfree(ns558); 283 kfree(ns558);
284 } 284 }
285 285
286 if (pnp_registered) 286 pnp_unregister_driver(&ns558_pnp_driver);
287 pnp_unregister_driver(&ns558_pnp_driver);
288} 287}
289 288
290module_init(ns558_init); 289module_init(ns558_init);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index f8af0945964e..a935abeffffc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -18,9 +18,11 @@
18#include <linux/random.h> 18#include <linux/random.h>
19#include <linux/major.h> 19#include <linux/major.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
22#include <linux/poll.h> 23#include <linux/poll.h>
23#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/mutex.h>
24 26
25MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 27MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
26MODULE_DESCRIPTION("Input core"); 28MODULE_DESCRIPTION("Input core");
@@ -224,7 +226,7 @@ int input_open_device(struct input_handle *handle)
224 struct input_dev *dev = handle->dev; 226 struct input_dev *dev = handle->dev;
225 int err; 227 int err;
226 228
227 err = down_interruptible(&dev->sem); 229 err = mutex_lock_interruptible(&dev->mutex);
228 if (err) 230 if (err)
229 return err; 231 return err;
230 232
@@ -236,7 +238,7 @@ int input_open_device(struct input_handle *handle)
236 if (err) 238 if (err)
237 handle->open--; 239 handle->open--;
238 240
239 up(&dev->sem); 241 mutex_unlock(&dev->mutex);
240 242
241 return err; 243 return err;
242} 244}
@@ -255,13 +257,13 @@ void input_close_device(struct input_handle *handle)
255 257
256 input_release_device(handle); 258 input_release_device(handle);
257 259
258 down(&dev->sem); 260 mutex_lock(&dev->mutex);
259 261
260 if (!--dev->users && dev->close) 262 if (!--dev->users && dev->close)
261 dev->close(dev); 263 dev->close(dev);
262 handle->open--; 264 handle->open--;
263 265
264 up(&dev->sem); 266 mutex_unlock(&dev->mutex);
265} 267}
266 268
267static void input_link_handle(struct input_handle *handle) 269static void input_link_handle(struct input_handle *handle)
@@ -315,21 +317,6 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
315 return NULL; 317 return NULL;
316} 318}
317 319
318static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, int max)
319{
320 int i;
321 int len = 0;
322
323 for (i = NBITS(max) - 1; i > 0; i--)
324 if (bitmap[i])
325 break;
326
327 for (; i >= 0; i--)
328 len += snprintf(buf + len, max(buf_size - len, 0),
329 "%lx%s", bitmap[i], i > 0 ? " " : "");
330 return len;
331}
332
333#ifdef CONFIG_PROC_FS 320#ifdef CONFIG_PROC_FS
334 321
335static struct proc_dir_entry *proc_bus_input_dir; 322static struct proc_dir_entry *proc_bus_input_dir;
@@ -342,7 +329,7 @@ static inline void input_wakeup_procfs_readers(void)
342 wake_up(&input_devices_poll_wait); 329 wake_up(&input_devices_poll_wait);
343} 330}
344 331
345static unsigned int input_devices_poll(struct file *file, poll_table *wait) 332static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
346{ 333{
347 int state = input_devices_state; 334 int state = input_devices_state;
348 poll_wait(file, &input_devices_poll_wait, wait); 335 poll_wait(file, &input_devices_poll_wait, wait);
@@ -351,115 +338,171 @@ static unsigned int input_devices_poll(struct file *file, poll_table *wait)
351 return 0; 338 return 0;
352} 339}
353 340
354#define SPRINTF_BIT(ev, bm) \ 341static struct list_head *list_get_nth_element(struct list_head *list, loff_t *pos)
355 do { \ 342{
356 len += sprintf(buf + len, "B: %s=", #ev); \ 343 struct list_head *node;
357 len += input_print_bitmap(buf + len, INT_MAX, \ 344 loff_t i = 0;
358 dev->bm##bit, ev##_MAX); \
359 len += sprintf(buf + len, "\n"); \
360 } while (0)
361 345
362#define TEST_AND_SPRINTF_BIT(ev, bm) \ 346 list_for_each(node, list)
363 do { \ 347 if (i++ == *pos)
364 if (test_bit(EV_##ev, dev->evbit)) \ 348 return node;
365 SPRINTF_BIT(ev, bm); \ 349
366 } while (0) 350 return NULL;
351}
367 352
368static int input_devices_read(char *buf, char **start, off_t pos, int count, int *eof, void *data) 353static struct list_head *list_get_next_element(struct list_head *list, struct list_head *element, loff_t *pos)
369{ 354{
370 struct input_dev *dev; 355 if (element->next == list)
371 struct input_handle *handle; 356 return NULL;
372 const char *path; 357
358 ++(*pos);
359 return element->next;
360}
361
362static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
363{
364 /* acquire lock here ... Yes, we do need locking, I knowi, I know... */
365
366 return list_get_nth_element(&input_dev_list, pos);
367}
373 368
374 off_t at = 0; 369static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
375 int len, cnt = 0; 370{
371 return list_get_next_element(&input_dev_list, v, pos);
372}
376 373
377 list_for_each_entry(dev, &input_dev_list, node) { 374static void input_devices_seq_stop(struct seq_file *seq, void *v)
375{
376 /* release lock here */
377}
378 378
379 path = kobject_get_path(&dev->cdev.kobj, GFP_KERNEL); 379static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
380 unsigned long *bitmap, int max)
381{
382 int i;
380 383
381 len = sprintf(buf, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 384 for (i = NBITS(max) - 1; i > 0; i--)
382 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 385 if (bitmap[i])
386 break;
383 387
384 len += sprintf(buf + len, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 388 seq_printf(seq, "B: %s=", name);
385 len += sprintf(buf + len, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 389 for (; i >= 0; i--)
386 len += sprintf(buf + len, "S: Sysfs=%s\n", path ? path : ""); 390 seq_printf(seq, "%lx%s", bitmap[i], i > 0 ? " " : "");
387 len += sprintf(buf + len, "H: Handlers="); 391 seq_putc(seq, '\n');
392}
388 393
389 list_for_each_entry(handle, &dev->h_list, d_node) 394static int input_devices_seq_show(struct seq_file *seq, void *v)
390 len += sprintf(buf + len, "%s ", handle->name); 395{
391 396 struct input_dev *dev = container_of(v, struct input_dev, node);
392 len += sprintf(buf + len, "\n"); 397 const char *path = kobject_get_path(&dev->cdev.kobj, GFP_KERNEL);
393 398 struct input_handle *handle;
394 SPRINTF_BIT(EV, ev);
395 TEST_AND_SPRINTF_BIT(KEY, key);
396 TEST_AND_SPRINTF_BIT(REL, rel);
397 TEST_AND_SPRINTF_BIT(ABS, abs);
398 TEST_AND_SPRINTF_BIT(MSC, msc);
399 TEST_AND_SPRINTF_BIT(LED, led);
400 TEST_AND_SPRINTF_BIT(SND, snd);
401 TEST_AND_SPRINTF_BIT(FF, ff);
402 TEST_AND_SPRINTF_BIT(SW, sw);
403
404 len += sprintf(buf + len, "\n");
405
406 at += len;
407
408 if (at >= pos) {
409 if (!*start) {
410 *start = buf + (pos - (at - len));
411 cnt = at - pos;
412 } else cnt += len;
413 buf += len;
414 if (cnt >= count)
415 break;
416 }
417 399
418 kfree(path); 400 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
419 } 401 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
420 402
421 if (&dev->node == &input_dev_list) 403 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
422 *eof = 1; 404 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
405 seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
406 seq_printf(seq, "H: Handlers=");
423 407
424 return (count > cnt) ? cnt : count; 408 list_for_each_entry(handle, &dev->h_list, d_node)
409 seq_printf(seq, "%s ", handle->name);
410 seq_putc(seq, '\n');
411
412 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
413 if (test_bit(EV_KEY, dev->evbit))
414 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
415 if (test_bit(EV_REL, dev->evbit))
416 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
417 if (test_bit(EV_ABS, dev->evbit))
418 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
419 if (test_bit(EV_MSC, dev->evbit))
420 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
421 if (test_bit(EV_LED, dev->evbit))
422 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
423 if (test_bit(EV_SND, dev->evbit))
424 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
425 if (test_bit(EV_FF, dev->evbit))
426 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
427 if (test_bit(EV_SW, dev->evbit))
428 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
429
430 seq_putc(seq, '\n');
431
432 kfree(path);
433 return 0;
425} 434}
426 435
427static int input_handlers_read(char *buf, char **start, off_t pos, int count, int *eof, void *data) 436static struct seq_operations input_devices_seq_ops = {
437 .start = input_devices_seq_start,
438 .next = input_devices_seq_next,
439 .stop = input_devices_seq_stop,
440 .show = input_devices_seq_show,
441};
442
443static int input_proc_devices_open(struct inode *inode, struct file *file)
428{ 444{
429 struct input_handler *handler; 445 return seq_open(file, &input_devices_seq_ops);
446}
430 447
431 off_t at = 0; 448static struct file_operations input_devices_fileops = {
432 int len = 0, cnt = 0; 449 .owner = THIS_MODULE,
433 int i = 0; 450 .open = input_proc_devices_open,
451 .poll = input_proc_devices_poll,
452 .read = seq_read,
453 .llseek = seq_lseek,
454 .release = seq_release,
455};
434 456
435 list_for_each_entry(handler, &input_handler_list, node) { 457static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
458{
459 /* acquire lock here ... Yes, we do need locking, I knowi, I know... */
460 seq->private = (void *)(unsigned long)*pos;
461 return list_get_nth_element(&input_handler_list, pos);
462}
463
464static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
465{
466 seq->private = (void *)(unsigned long)(*pos + 1);
467 return list_get_next_element(&input_handler_list, v, pos);
468}
436 469
437 if (handler->fops) 470static void input_handlers_seq_stop(struct seq_file *seq, void *v)
438 len = sprintf(buf, "N: Number=%d Name=%s Minor=%d\n", 471{
439 i++, handler->name, handler->minor); 472 /* release lock here */
440 else 473}
441 len = sprintf(buf, "N: Number=%d Name=%s\n",
442 i++, handler->name);
443 474
444 at += len; 475static int input_handlers_seq_show(struct seq_file *seq, void *v)
476{
477 struct input_handler *handler = container_of(v, struct input_handler, node);
445 478
446 if (at >= pos) { 479 seq_printf(seq, "N: Number=%ld Name=%s",
447 if (!*start) { 480 (unsigned long)seq->private, handler->name);
448 *start = buf + (pos - (at - len)); 481 if (handler->fops)
449 cnt = at - pos; 482 seq_printf(seq, " Minor=%d", handler->minor);
450 } else cnt += len; 483 seq_putc(seq, '\n');
451 buf += len;
452 if (cnt >= count)
453 break;
454 }
455 }
456 if (&handler->node == &input_handler_list)
457 *eof = 1;
458 484
459 return (count > cnt) ? cnt : count; 485 return 0;
460} 486}
487static struct seq_operations input_handlers_seq_ops = {
488 .start = input_handlers_seq_start,
489 .next = input_handlers_seq_next,
490 .stop = input_handlers_seq_stop,
491 .show = input_handlers_seq_show,
492};
461 493
462static struct file_operations input_fileops; 494static int input_proc_handlers_open(struct inode *inode, struct file *file)
495{
496 return seq_open(file, &input_handlers_seq_ops);
497}
498
499static struct file_operations input_handlers_fileops = {
500 .owner = THIS_MODULE,
501 .open = input_proc_handlers_open,
502 .read = seq_read,
503 .llseek = seq_lseek,
504 .release = seq_release,
505};
463 506
464static int __init input_proc_init(void) 507static int __init input_proc_init(void)
465{ 508{
@@ -471,20 +514,19 @@ static int __init input_proc_init(void)
471 514
472 proc_bus_input_dir->owner = THIS_MODULE; 515 proc_bus_input_dir->owner = THIS_MODULE;
473 516
474 entry = create_proc_read_entry("devices", 0, proc_bus_input_dir, input_devices_read, NULL); 517 entry = create_proc_entry("devices", 0, proc_bus_input_dir);
475 if (!entry) 518 if (!entry)
476 goto fail1; 519 goto fail1;
477 520
478 entry->owner = THIS_MODULE; 521 entry->owner = THIS_MODULE;
479 input_fileops = *entry->proc_fops; 522 entry->proc_fops = &input_devices_fileops;
480 input_fileops.poll = input_devices_poll;
481 entry->proc_fops = &input_fileops;
482 523
483 entry = create_proc_read_entry("handlers", 0, proc_bus_input_dir, input_handlers_read, NULL); 524 entry = create_proc_entry("handlers", 0, proc_bus_input_dir);
484 if (!entry) 525 if (!entry)
485 goto fail2; 526 goto fail2;
486 527
487 entry->owner = THIS_MODULE; 528 entry->owner = THIS_MODULE;
529 entry->proc_fops = &input_handlers_fileops;
488 530
489 return 0; 531 return 0;
490 532
@@ -512,13 +554,14 @@ static ssize_t input_dev_show_##name(struct class_device *dev, char *buf) \
512 struct input_dev *input_dev = to_input_dev(dev); \ 554 struct input_dev *input_dev = to_input_dev(dev); \
513 int retval; \ 555 int retval; \
514 \ 556 \
515 retval = down_interruptible(&input_dev->sem); \ 557 retval = mutex_lock_interruptible(&input_dev->mutex); \
516 if (retval) \ 558 if (retval) \
517 return retval; \ 559 return retval; \
518 \ 560 \
519 retval = sprintf(buf, "%s\n", input_dev->name ? input_dev->name : ""); \ 561 retval = scnprintf(buf, PAGE_SIZE, \
562 "%s\n", input_dev->name ? input_dev->name : ""); \
520 \ 563 \
521 up(&input_dev->sem); \ 564 mutex_unlock(&input_dev->mutex); \
522 \ 565 \
523 return retval; \ 566 return retval; \
524} \ 567} \
@@ -528,46 +571,51 @@ INPUT_DEV_STRING_ATTR_SHOW(name);
528INPUT_DEV_STRING_ATTR_SHOW(phys); 571INPUT_DEV_STRING_ATTR_SHOW(phys);
529INPUT_DEV_STRING_ATTR_SHOW(uniq); 572INPUT_DEV_STRING_ATTR_SHOW(uniq);
530 573
531static int print_modalias_bits(char *buf, int size, char prefix, unsigned long *arr, 574static int input_print_modalias_bits(char *buf, int size,
532 unsigned int min, unsigned int max) 575 char name, unsigned long *bm,
576 unsigned int min_bit, unsigned int max_bit)
533{ 577{
534 int len, i; 578 int len = 0, i;
535 579
536 len = snprintf(buf, size, "%c", prefix); 580 len += snprintf(buf, max(size, 0), "%c", name);
537 for (i = min; i < max; i++) 581 for (i = min_bit; i < max_bit; i++)
538 if (arr[LONG(i)] & BIT(i)) 582 if (bm[LONG(i)] & BIT(i))
539 len += snprintf(buf + len, size - len, "%X,", i); 583 len += snprintf(buf + len, max(size - len, 0), "%X,", i);
540 return len; 584 return len;
541} 585}
542 586
543static int print_modalias(char *buf, int size, struct input_dev *id) 587static int input_print_modalias(char *buf, int size, struct input_dev *id,
588 int add_cr)
544{ 589{
545 int len; 590 int len;
546 591
547 len = snprintf(buf, size, "input:b%04Xv%04Xp%04Xe%04X-", 592 len = snprintf(buf, max(size, 0),
548 id->id.bustype, 593 "input:b%04Xv%04Xp%04Xe%04X-",
549 id->id.vendor, 594 id->id.bustype, id->id.vendor,
550 id->id.product, 595 id->id.product, id->id.version);
551 id->id.version); 596
552 597 len += input_print_modalias_bits(buf + len, size - len,
553 len += print_modalias_bits(buf + len, size - len, 'e', id->evbit, 598 'e', id->evbit, 0, EV_MAX);
554 0, EV_MAX); 599 len += input_print_modalias_bits(buf + len, size - len,
555 len += print_modalias_bits(buf + len, size - len, 'k', id->keybit, 600 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
556 KEY_MIN_INTERESTING, KEY_MAX); 601 len += input_print_modalias_bits(buf + len, size - len,
557 len += print_modalias_bits(buf + len, size - len, 'r', id->relbit, 602 'r', id->relbit, 0, REL_MAX);
558 0, REL_MAX); 603 len += input_print_modalias_bits(buf + len, size - len,
559 len += print_modalias_bits(buf + len, size - len, 'a', id->absbit, 604 'a', id->absbit, 0, ABS_MAX);
560 0, ABS_MAX); 605 len += input_print_modalias_bits(buf + len, size - len,
561 len += print_modalias_bits(buf + len, size - len, 'm', id->mscbit, 606 'm', id->mscbit, 0, MSC_MAX);
562 0, MSC_MAX); 607 len += input_print_modalias_bits(buf + len, size - len,
563 len += print_modalias_bits(buf + len, size - len, 'l', id->ledbit, 608 'l', id->ledbit, 0, LED_MAX);
564 0, LED_MAX); 609 len += input_print_modalias_bits(buf + len, size - len,
565 len += print_modalias_bits(buf + len, size - len, 's', id->sndbit, 610 's', id->sndbit, 0, SND_MAX);
566 0, SND_MAX); 611 len += input_print_modalias_bits(buf + len, size - len,
567 len += print_modalias_bits(buf + len, size - len, 'f', id->ffbit, 612 'f', id->ffbit, 0, FF_MAX);
568 0, FF_MAX); 613 len += input_print_modalias_bits(buf + len, size - len,
569 len += print_modalias_bits(buf + len, size - len, 'w', id->swbit, 614 'w', id->swbit, 0, SW_MAX);
570 0, SW_MAX); 615
616 if (add_cr)
617 len += snprintf(buf + len, max(size - len, 0), "\n");
618
571 return len; 619 return len;
572} 620}
573 621
@@ -576,9 +624,9 @@ static ssize_t input_dev_show_modalias(struct class_device *dev, char *buf)
576 struct input_dev *id = to_input_dev(dev); 624 struct input_dev *id = to_input_dev(dev);
577 ssize_t len; 625 ssize_t len;
578 626
579 len = print_modalias(buf, PAGE_SIZE, id); 627 len = input_print_modalias(buf, PAGE_SIZE, id, 1);
580 len += snprintf(buf + len, PAGE_SIZE-len, "\n"); 628
581 return len; 629 return max_t(int, len, PAGE_SIZE);
582} 630}
583static CLASS_DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 631static CLASS_DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
584 632
@@ -598,7 +646,7 @@ static struct attribute_group input_dev_attr_group = {
598static ssize_t input_dev_show_id_##name(struct class_device *dev, char *buf) \ 646static ssize_t input_dev_show_id_##name(struct class_device *dev, char *buf) \
599{ \ 647{ \
600 struct input_dev *input_dev = to_input_dev(dev); \ 648 struct input_dev *input_dev = to_input_dev(dev); \
601 return sprintf(buf, "%04x\n", input_dev->id.name); \ 649 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
602} \ 650} \
603static CLASS_DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL); 651static CLASS_DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL);
604 652
@@ -620,11 +668,33 @@ static struct attribute_group input_dev_id_attr_group = {
620 .attrs = input_dev_id_attrs, 668 .attrs = input_dev_id_attrs,
621}; 669};
622 670
671static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
672 int max, int add_cr)
673{
674 int i;
675 int len = 0;
676
677 for (i = NBITS(max) - 1; i > 0; i--)
678 if (bitmap[i])
679 break;
680
681 for (; i >= 0; i--)
682 len += snprintf(buf + len, max(buf_size - len, 0),
683 "%lx%s", bitmap[i], i > 0 ? " " : "");
684
685 if (add_cr)
686 len += snprintf(buf + len, max(buf_size - len, 0), "\n");
687
688 return len;
689}
690
623#define INPUT_DEV_CAP_ATTR(ev, bm) \ 691#define INPUT_DEV_CAP_ATTR(ev, bm) \
624static ssize_t input_dev_show_cap_##bm(struct class_device *dev, char *buf) \ 692static ssize_t input_dev_show_cap_##bm(struct class_device *dev, char *buf) \
625{ \ 693{ \
626 struct input_dev *input_dev = to_input_dev(dev); \ 694 struct input_dev *input_dev = to_input_dev(dev); \
627 return input_print_bitmap(buf, PAGE_SIZE, input_dev->bm##bit, ev##_MAX);\ 695 int len = input_print_bitmap(buf, PAGE_SIZE, \
696 input_dev->bm##bit, ev##_MAX, 1); \
697 return min_t(int, len, PAGE_SIZE); \
628} \ 698} \
629static CLASS_DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL); 699static CLASS_DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL);
630 700
@@ -669,8 +739,8 @@ static void input_dev_release(struct class_device *class_dev)
669 * device bitfields. 739 * device bitfields.
670 */ 740 */
671static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index, 741static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
672 char *buffer, int buffer_size, int *cur_len, 742 char *buffer, int buffer_size, int *cur_len,
673 const char *name, unsigned long *bitmap, int max) 743 const char *name, unsigned long *bitmap, int max)
674{ 744{
675 if (*cur_index >= num_envp - 1) 745 if (*cur_index >= num_envp - 1)
676 return -ENOMEM; 746 return -ENOMEM;
@@ -678,12 +748,36 @@ static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
678 envp[*cur_index] = buffer + *cur_len; 748 envp[*cur_index] = buffer + *cur_len;
679 749
680 *cur_len += snprintf(buffer + *cur_len, max(buffer_size - *cur_len, 0), name); 750 *cur_len += snprintf(buffer + *cur_len, max(buffer_size - *cur_len, 0), name);
681 if (*cur_len > buffer_size) 751 if (*cur_len >= buffer_size)
682 return -ENOMEM; 752 return -ENOMEM;
683 753
684 *cur_len += input_print_bitmap(buffer + *cur_len, 754 *cur_len += input_print_bitmap(buffer + *cur_len,
685 max(buffer_size - *cur_len, 0), 755 max(buffer_size - *cur_len, 0),
686 bitmap, max) + 1; 756 bitmap, max, 0) + 1;
757 if (*cur_len > buffer_size)
758 return -ENOMEM;
759
760 (*cur_index)++;
761 return 0;
762}
763
764static int input_add_uevent_modalias_var(char **envp, int num_envp, int *cur_index,
765 char *buffer, int buffer_size, int *cur_len,
766 struct input_dev *dev)
767{
768 if (*cur_index >= num_envp - 1)
769 return -ENOMEM;
770
771 envp[*cur_index] = buffer + *cur_len;
772
773 *cur_len += snprintf(buffer + *cur_len, max(buffer_size - *cur_len, 0),
774 "MODALIAS=");
775 if (*cur_len >= buffer_size)
776 return -ENOMEM;
777
778 *cur_len += input_print_modalias(buffer + *cur_len,
779 max(buffer_size - *cur_len, 0),
780 dev, 0) + 1;
687 if (*cur_len > buffer_size) 781 if (*cur_len > buffer_size)
688 return -ENOMEM; 782 return -ENOMEM;
689 783
@@ -693,7 +787,7 @@ static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
693 787
694#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 788#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
695 do { \ 789 do { \
696 int err = add_uevent_var(envp, num_envp, &i, \ 790 int err = add_uevent_var(envp, num_envp, &i, \
697 buffer, buffer_size, &len, \ 791 buffer, buffer_size, &len, \
698 fmt, val); \ 792 fmt, val); \
699 if (err) \ 793 if (err) \
@@ -709,6 +803,16 @@ static int input_add_uevent_bm_var(char **envp, int num_envp, int *cur_index,
709 return err; \ 803 return err; \
710 } while (0) 804 } while (0)
711 805
806#define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
807 do { \
808 int err = input_add_uevent_modalias_var(envp, \
809 num_envp, &i, \
810 buffer, buffer_size, &len, \
811 dev); \
812 if (err) \
813 return err; \
814 } while (0)
815
712static int input_dev_uevent(struct class_device *cdev, char **envp, 816static int input_dev_uevent(struct class_device *cdev, char **envp,
713 int num_envp, char *buffer, int buffer_size) 817 int num_envp, char *buffer, int buffer_size)
714{ 818{
@@ -744,9 +848,7 @@ static int input_dev_uevent(struct class_device *cdev, char **envp,
744 if (test_bit(EV_SW, dev->evbit)) 848 if (test_bit(EV_SW, dev->evbit))
745 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 849 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
746 850
747 envp[i++] = buffer + len; 851 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
748 len += snprintf(buffer + len, buffer_size - len, "MODALIAS=");
749 len += print_modalias(buffer + len, buffer_size - len, dev) + 1;
750 852
751 envp[i] = NULL; 853 envp[i] = NULL;
752 return 0; 854 return 0;
@@ -790,7 +892,7 @@ int input_register_device(struct input_dev *dev)
790 return -EINVAL; 892 return -EINVAL;
791 } 893 }
792 894
793 init_MUTEX(&dev->sem); 895 mutex_init(&dev->mutex);
794 set_bit(EV_SYN, dev->evbit); 896 set_bit(EV_SYN, dev->evbit);
795 897
796 /* 898 /*
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 20e2972b9204..949bdcef8c2b 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -171,9 +171,8 @@ static int joydev_open(struct inode *inode, struct file *file)
171 if (i >= JOYDEV_MINORS || !joydev_table[i]) 171 if (i >= JOYDEV_MINORS || !joydev_table[i])
172 return -ENODEV; 172 return -ENODEV;
173 173
174 if (!(list = kmalloc(sizeof(struct joydev_list), GFP_KERNEL))) 174 if (!(list = kzalloc(sizeof(struct joydev_list), GFP_KERNEL)))
175 return -ENOMEM; 175 return -ENOMEM;
176 memset(list, 0, sizeof(struct joydev_list));
177 176
178 list->joydev = joydev_table[i]; 177 list->joydev = joydev_table[i];
179 list_add_tail(&list->node, &joydev_table[i]->list); 178 list_add_tail(&list->node, &joydev_table[i]->list);
@@ -457,9 +456,8 @@ static struct input_handle *joydev_connect(struct input_handler *handler, struct
457 return NULL; 456 return NULL;
458 } 457 }
459 458
460 if (!(joydev = kmalloc(sizeof(struct joydev), GFP_KERNEL))) 459 if (!(joydev = kzalloc(sizeof(struct joydev), GFP_KERNEL)))
461 return NULL; 460 return NULL;
462 memset(joydev, 0, sizeof(struct joydev));
463 461
464 INIT_LIST_HEAD(&joydev->list); 462 INIT_LIST_HEAD(&joydev->list);
465 init_waitqueue_head(&joydev->wait); 463 init_waitqueue_head(&joydev->wait);
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index ec55a29fc861..7249d324297b 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/input.h> 37#include <linux/input.h>
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/mutex.h>
39 40
40#include <asm/system.h> 41#include <asm/system.h>
41#include <asm/amigahw.h> 42#include <asm/amigahw.h>
@@ -52,7 +53,7 @@ MODULE_PARM_DESC(map, "Map of attached joysticks in form of <a>,<b> (default is
52__obsolete_setup("amijoy="); 53__obsolete_setup("amijoy=");
53 54
54static int amijoy_used; 55static int amijoy_used;
55static DECLARE_MUTEX(amijoy_sem); 56static DEFINE_MUTEX(amijoy_mutex);
56static struct input_dev *amijoy_dev[2]; 57static struct input_dev *amijoy_dev[2];
57static char *amijoy_phys[2] = { "amijoy/input0", "amijoy/input1" }; 58static char *amijoy_phys[2] = { "amijoy/input0", "amijoy/input1" };
58 59
@@ -85,7 +86,7 @@ static int amijoy_open(struct input_dev *dev)
85{ 86{
86 int err; 87 int err;
87 88
88 err = down_interruptible(&amijoy_sem); 89 err = mutex_lock_interruptible(&amijoy_mutex);
89 if (err) 90 if (err)
90 return err; 91 return err;
91 92
@@ -97,16 +98,16 @@ static int amijoy_open(struct input_dev *dev)
97 98
98 amijoy_used++; 99 amijoy_used++;
99out: 100out:
100 up(&amijoy_sem); 101 mutex_unlock(&amijoy_mutex);
101 return err; 102 return err;
102} 103}
103 104
104static void amijoy_close(struct input_dev *dev) 105static void amijoy_close(struct input_dev *dev)
105{ 106{
106 down(&amijoy_sem); 107 mutex_lock(&amijoy_mutex);
107 if (!--amijoy_used) 108 if (!--amijoy_used)
108 free_irq(IRQ_AMIGA_VERTB, amijoy_interrupt); 109 free_irq(IRQ_AMIGA_VERTB, amijoy_interrupt);
109 up(&amijoy_sem); 110 mutex_unlock(&amijoy_mutex);
110} 111}
111 112
112static int __init amijoy_init(void) 113static int __init amijoy_init(void)
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index dcffc34f30c3..e61894685cb1 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -38,6 +38,7 @@
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/parport.h> 39#include <linux/parport.h>
40#include <linux/input.h> 40#include <linux/input.h>
41#include <linux/mutex.h>
41 42
42MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 43MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
43MODULE_DESCRIPTION("Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver"); 44MODULE_DESCRIPTION("Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver");
@@ -111,7 +112,7 @@ struct db9 {
111 struct pardevice *pd; 112 struct pardevice *pd;
112 int mode; 113 int mode;
113 int used; 114 int used;
114 struct semaphore sem; 115 struct mutex mutex;
115 char phys[DB9_MAX_DEVICES][32]; 116 char phys[DB9_MAX_DEVICES][32];
116}; 117};
117 118
@@ -525,7 +526,7 @@ static int db9_open(struct input_dev *dev)
525 struct parport *port = db9->pd->port; 526 struct parport *port = db9->pd->port;
526 int err; 527 int err;
527 528
528 err = down_interruptible(&db9->sem); 529 err = mutex_lock_interruptible(&db9->mutex);
529 if (err) 530 if (err)
530 return err; 531 return err;
531 532
@@ -539,7 +540,7 @@ static int db9_open(struct input_dev *dev)
539 mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME); 540 mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
540 } 541 }
541 542
542 up(&db9->sem); 543 mutex_unlock(&db9->mutex);
543 return 0; 544 return 0;
544} 545}
545 546
@@ -548,14 +549,14 @@ static void db9_close(struct input_dev *dev)
548 struct db9 *db9 = dev->private; 549 struct db9 *db9 = dev->private;
549 struct parport *port = db9->pd->port; 550 struct parport *port = db9->pd->port;
550 551
551 down(&db9->sem); 552 mutex_lock(&db9->mutex);
552 if (!--db9->used) { 553 if (!--db9->used) {
553 del_timer_sync(&db9->timer); 554 del_timer_sync(&db9->timer);
554 parport_write_control(port, 0x00); 555 parport_write_control(port, 0x00);
555 parport_data_forward(port); 556 parport_data_forward(port);
556 parport_release(db9->pd); 557 parport_release(db9->pd);
557 } 558 }
558 up(&db9->sem); 559 mutex_unlock(&db9->mutex);
559} 560}
560 561
561static struct db9 __init *db9_probe(int parport, int mode) 562static struct db9 __init *db9_probe(int parport, int mode)
@@ -603,7 +604,7 @@ static struct db9 __init *db9_probe(int parport, int mode)
603 goto err_unreg_pardev; 604 goto err_unreg_pardev;
604 } 605 }
605 606
606 init_MUTEX(&db9->sem); 607 mutex_init(&db9->mutex);
607 db9->pd = pd; 608 db9->pd = pd;
608 db9->mode = mode; 609 db9->mode = mode;
609 init_timer(&db9->timer); 610 init_timer(&db9->timer);
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 900587acdb47..ecbdb6b9bbd6 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -7,6 +7,7 @@
7 * Based on the work of: 7 * Based on the work of:
8 * Andree Borrmann John Dahlstrom 8 * Andree Borrmann John Dahlstrom
9 * David Kuder Nathan Hand 9 * David Kuder Nathan Hand
10 * Raphael Assenat
10 */ 11 */
11 12
12/* 13/*
@@ -36,6 +37,7 @@
36#include <linux/init.h> 37#include <linux/init.h>
37#include <linux/parport.h> 38#include <linux/parport.h>
38#include <linux/input.h> 39#include <linux/input.h>
40#include <linux/mutex.h>
39 41
40MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 42MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
41MODULE_DESCRIPTION("NES, SNES, N64, MultiSystem, PSX gamepad driver"); 43MODULE_DESCRIPTION("NES, SNES, N64, MultiSystem, PSX gamepad driver");
@@ -72,8 +74,9 @@ __obsolete_setup("gc_3=");
72#define GC_N64 6 74#define GC_N64 6
73#define GC_PSX 7 75#define GC_PSX 7
74#define GC_DDR 8 76#define GC_DDR 8
77#define GC_SNESMOUSE 9
75 78
76#define GC_MAX 8 79#define GC_MAX 9
77 80
78#define GC_REFRESH_TIME HZ/100 81#define GC_REFRESH_TIME HZ/100
79 82
@@ -83,7 +86,7 @@ struct gc {
83 struct timer_list timer; 86 struct timer_list timer;
84 unsigned char pads[GC_MAX + 1]; 87 unsigned char pads[GC_MAX + 1];
85 int used; 88 int used;
86 struct semaphore sem; 89 struct mutex mutex;
87 char phys[GC_MAX_DEVICES][32]; 90 char phys[GC_MAX_DEVICES][32];
88}; 91};
89 92
@@ -93,7 +96,7 @@ static int gc_status_bit[] = { 0x40, 0x80, 0x20, 0x10, 0x08 };
93 96
94static char *gc_names[] = { NULL, "SNES pad", "NES pad", "NES FourPort", "Multisystem joystick", 97static char *gc_names[] = { NULL, "SNES pad", "NES pad", "NES FourPort", "Multisystem joystick",
95 "Multisystem 2-button joystick", "N64 controller", "PSX controller", 98 "Multisystem 2-button joystick", "N64 controller", "PSX controller",
96 "PSX DDR controller" }; 99 "PSX DDR controller", "SNES mouse" };
97/* 100/*
98 * N64 support. 101 * N64 support.
99 */ 102 */
@@ -205,9 +208,12 @@ static void gc_n64_process_packet(struct gc *gc)
205 * NES/SNES support. 208 * NES/SNES support.
206 */ 209 */
207 210
208#define GC_NES_DELAY 6 /* Delay between bits - 6us */ 211#define GC_NES_DELAY 6 /* Delay between bits - 6us */
209#define GC_NES_LENGTH 8 /* The NES pads use 8 bits of data */ 212#define GC_NES_LENGTH 8 /* The NES pads use 8 bits of data */
210#define GC_SNES_LENGTH 12 /* The SNES true length is 16, but the last 4 bits are unused */ 213#define GC_SNES_LENGTH 12 /* The SNES true length is 16, but the
214 last 4 bits are unused */
215#define GC_SNESMOUSE_LENGTH 32 /* The SNES mouse uses 32 bits, the first
216 16 bits are equivalent to a gamepad */
211 217
212#define GC_NES_POWER 0xfc 218#define GC_NES_POWER 0xfc
213#define GC_NES_CLOCK 0x01 219#define GC_NES_CLOCK 0x01
@@ -242,11 +248,15 @@ static void gc_nes_read_packet(struct gc *gc, int length, unsigned char *data)
242 248
243static void gc_nes_process_packet(struct gc *gc) 249static void gc_nes_process_packet(struct gc *gc)
244{ 250{
245 unsigned char data[GC_SNES_LENGTH]; 251 unsigned char data[GC_SNESMOUSE_LENGTH];
246 struct input_dev *dev; 252 struct input_dev *dev;
247 int i, j, s; 253 int i, j, s, len;
254 char x_rel, y_rel;
255
256 len = gc->pads[GC_SNESMOUSE] ? GC_SNESMOUSE_LENGTH :
257 (gc->pads[GC_SNES] ? GC_SNES_LENGTH : GC_NES_LENGTH);
248 258
249 gc_nes_read_packet(gc, gc->pads[GC_SNES] ? GC_SNES_LENGTH : GC_NES_LENGTH, data); 259 gc_nes_read_packet(gc, len, data);
250 260
251 for (i = 0; i < GC_MAX_DEVICES; i++) { 261 for (i = 0; i < GC_MAX_DEVICES; i++) {
252 262
@@ -269,6 +279,44 @@ static void gc_nes_process_packet(struct gc *gc)
269 for (j = 0; j < 8; j++) 279 for (j = 0; j < 8; j++)
270 input_report_key(dev, gc_snes_btn[j], s & data[gc_snes_bytes[j]]); 280 input_report_key(dev, gc_snes_btn[j], s & data[gc_snes_bytes[j]]);
271 281
282 if (s & gc->pads[GC_SNESMOUSE]) {
283 /*
284 * The 4 unused bits from SNES controllers appear to be ID bits
285 * so use them to make sure iwe are dealing with a mouse.
286 * gamepad is connected. This is important since
287 * my SNES gamepad sends 1's for bits 16-31, which
288 * cause the mouse pointer to quickly move to the
289 * upper left corner of the screen.
290 */
291 if (!(s & data[12]) && !(s & data[13]) &&
292 !(s & data[14]) && (s & data[15])) {
293 input_report_key(dev, BTN_LEFT, s & data[9]);
294 input_report_key(dev, BTN_RIGHT, s & data[8]);
295
296 x_rel = y_rel = 0;
297 for (j = 0; j < 7; j++) {
298 x_rel <<= 1;
299 if (data[25 + j] & s)
300 x_rel |= 1;
301
302 y_rel <<= 1;
303 if (data[17 + j] & s)
304 y_rel |= 1;
305 }
306
307 if (x_rel) {
308 if (data[24] & s)
309 x_rel = -x_rel;
310 input_report_rel(dev, REL_X, x_rel);
311 }
312
313 if (y_rel) {
314 if (data[16] & s)
315 y_rel = -y_rel;
316 input_report_rel(dev, REL_Y, y_rel);
317 }
318 }
319 }
272 input_sync(dev); 320 input_sync(dev);
273 } 321 }
274} 322}
@@ -524,10 +572,10 @@ static void gc_timer(unsigned long private)
524 gc_n64_process_packet(gc); 572 gc_n64_process_packet(gc);
525 573
526/* 574/*
527 * NES and SNES pads 575 * NES and SNES pads or mouse
528 */ 576 */
529 577
530 if (gc->pads[GC_NES] || gc->pads[GC_SNES]) 578 if (gc->pads[GC_NES] || gc->pads[GC_SNES] || gc->pads[GC_SNESMOUSE])
531 gc_nes_process_packet(gc); 579 gc_nes_process_packet(gc);
532 580
533/* 581/*
@@ -552,7 +600,7 @@ static int gc_open(struct input_dev *dev)
552 struct gc *gc = dev->private; 600 struct gc *gc = dev->private;
553 int err; 601 int err;
554 602
555 err = down_interruptible(&gc->sem); 603 err = mutex_lock_interruptible(&gc->mutex);
556 if (err) 604 if (err)
557 return err; 605 return err;
558 606
@@ -562,7 +610,7 @@ static int gc_open(struct input_dev *dev)
562 mod_timer(&gc->timer, jiffies + GC_REFRESH_TIME); 610 mod_timer(&gc->timer, jiffies + GC_REFRESH_TIME);
563 } 611 }
564 612
565 up(&gc->sem); 613 mutex_unlock(&gc->mutex);
566 return 0; 614 return 0;
567} 615}
568 616
@@ -570,13 +618,13 @@ static void gc_close(struct input_dev *dev)
570{ 618{
571 struct gc *gc = dev->private; 619 struct gc *gc = dev->private;
572 620
573 down(&gc->sem); 621 mutex_lock(&gc->mutex);
574 if (!--gc->used) { 622 if (!--gc->used) {
575 del_timer_sync(&gc->timer); 623 del_timer_sync(&gc->timer);
576 parport_write_control(gc->pd->port, 0x00); 624 parport_write_control(gc->pd->port, 0x00);
577 parport_release(gc->pd); 625 parport_release(gc->pd);
578 } 626 }
579 up(&gc->sem); 627 mutex_unlock(&gc->mutex);
580} 628}
581 629
582static int __init gc_setup_pad(struct gc *gc, int idx, int pad_type) 630static int __init gc_setup_pad(struct gc *gc, int idx, int pad_type)
@@ -609,10 +657,13 @@ static int __init gc_setup_pad(struct gc *gc, int idx, int pad_type)
609 input_dev->open = gc_open; 657 input_dev->open = gc_open;
610 input_dev->close = gc_close; 658 input_dev->close = gc_close;
611 659
612 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); 660 if (pad_type != GC_SNESMOUSE) {
661 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
613 662
614 for (i = 0; i < 2; i++) 663 for (i = 0; i < 2; i++)
615 input_set_abs_params(input_dev, ABS_X + i, -1, 1, 0, 0); 664 input_set_abs_params(input_dev, ABS_X + i, -1, 1, 0, 0);
665 } else
666 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
616 667
617 gc->pads[0] |= gc_status_bit[idx]; 668 gc->pads[0] |= gc_status_bit[idx];
618 gc->pads[pad_type] |= gc_status_bit[idx]; 669 gc->pads[pad_type] |= gc_status_bit[idx];
@@ -630,6 +681,13 @@ static int __init gc_setup_pad(struct gc *gc, int idx, int pad_type)
630 681
631 break; 682 break;
632 683
684 case GC_SNESMOUSE:
685 set_bit(BTN_LEFT, input_dev->keybit);
686 set_bit(BTN_RIGHT, input_dev->keybit);
687 set_bit(REL_X, input_dev->relbit);
688 set_bit(REL_Y, input_dev->relbit);
689 break;
690
633 case GC_SNES: 691 case GC_SNES:
634 for (i = 4; i < 8; i++) 692 for (i = 4; i < 8; i++)
635 set_bit(gc_snes_btn[i], input_dev->keybit); 693 set_bit(gc_snes_btn[i], input_dev->keybit);
@@ -693,7 +751,7 @@ static struct gc __init *gc_probe(int parport, int *pads, int n_pads)
693 goto err_unreg_pardev; 751 goto err_unreg_pardev;
694 } 752 }
695 753
696 init_MUTEX(&gc->sem); 754 mutex_init(&gc->mutex);
697 gc->pd = pd; 755 gc->pd = pd;
698 init_timer(&gc->timer); 756 init_timer(&gc->timer);
699 gc->timer.data = (long) gc; 757 gc->timer.data = (long) gc;
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 4678b6dab43b..2b8e8456c9fa 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -42,14 +42,14 @@ static int make_magnitude_modifier(struct iforce* iforce,
42 unsigned char data[3]; 42 unsigned char data[3];
43 43
44 if (!no_alloc) { 44 if (!no_alloc) {
45 down(&iforce->mem_mutex); 45 mutex_lock(&iforce->mem_mutex);
46 if (allocate_resource(&(iforce->device_memory), mod_chunk, 2, 46 if (allocate_resource(&(iforce->device_memory), mod_chunk, 2,
47 iforce->device_memory.start, iforce->device_memory.end, 2L, 47 iforce->device_memory.start, iforce->device_memory.end, 2L,
48 NULL, NULL)) { 48 NULL, NULL)) {
49 up(&iforce->mem_mutex); 49 mutex_unlock(&iforce->mem_mutex);
50 return -ENOMEM; 50 return -ENOMEM;
51 } 51 }
52 up(&iforce->mem_mutex); 52 mutex_unlock(&iforce->mem_mutex);
53 } 53 }
54 54
55 data[0] = LO(mod_chunk->start); 55 data[0] = LO(mod_chunk->start);
@@ -75,14 +75,14 @@ static int make_period_modifier(struct iforce* iforce,
75 period = TIME_SCALE(period); 75 period = TIME_SCALE(period);
76 76
77 if (!no_alloc) { 77 if (!no_alloc) {
78 down(&iforce->mem_mutex); 78 mutex_lock(&iforce->mem_mutex);
79 if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0c, 79 if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0c,
80 iforce->device_memory.start, iforce->device_memory.end, 2L, 80 iforce->device_memory.start, iforce->device_memory.end, 2L,
81 NULL, NULL)) { 81 NULL, NULL)) {
82 up(&iforce->mem_mutex); 82 mutex_unlock(&iforce->mem_mutex);
83 return -ENOMEM; 83 return -ENOMEM;
84 } 84 }
85 up(&iforce->mem_mutex); 85 mutex_unlock(&iforce->mem_mutex);
86 } 86 }
87 87
88 data[0] = LO(mod_chunk->start); 88 data[0] = LO(mod_chunk->start);
@@ -115,14 +115,14 @@ static int make_envelope_modifier(struct iforce* iforce,
115 fade_duration = TIME_SCALE(fade_duration); 115 fade_duration = TIME_SCALE(fade_duration);
116 116
117 if (!no_alloc) { 117 if (!no_alloc) {
118 down(&iforce->mem_mutex); 118 mutex_lock(&iforce->mem_mutex);
119 if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0e, 119 if (allocate_resource(&(iforce->device_memory), mod_chunk, 0x0e,
120 iforce->device_memory.start, iforce->device_memory.end, 2L, 120 iforce->device_memory.start, iforce->device_memory.end, 2L,
121 NULL, NULL)) { 121 NULL, NULL)) {
122 up(&iforce->mem_mutex); 122 mutex_unlock(&iforce->mem_mutex);
123 return -ENOMEM; 123 return -ENOMEM;
124 } 124 }
125 up(&iforce->mem_mutex); 125 mutex_unlock(&iforce->mem_mutex);
126 } 126 }
127 127
128 data[0] = LO(mod_chunk->start); 128 data[0] = LO(mod_chunk->start);
@@ -152,14 +152,14 @@ static int make_condition_modifier(struct iforce* iforce,
152 unsigned char data[10]; 152 unsigned char data[10];
153 153
154 if (!no_alloc) { 154 if (!no_alloc) {
155 down(&iforce->mem_mutex); 155 mutex_lock(&iforce->mem_mutex);
156 if (allocate_resource(&(iforce->device_memory), mod_chunk, 8, 156 if (allocate_resource(&(iforce->device_memory), mod_chunk, 8,
157 iforce->device_memory.start, iforce->device_memory.end, 2L, 157 iforce->device_memory.start, iforce->device_memory.end, 2L,
158 NULL, NULL)) { 158 NULL, NULL)) {
159 up(&iforce->mem_mutex); 159 mutex_unlock(&iforce->mem_mutex);
160 return -ENOMEM; 160 return -ENOMEM;
161 } 161 }
162 up(&iforce->mem_mutex); 162 mutex_unlock(&iforce->mem_mutex);
163 } 163 }
164 164
165 data[0] = LO(mod_chunk->start); 165 data[0] = LO(mod_chunk->start);
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b6bc04998047..ab0a26b924ca 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -350,7 +350,7 @@ int iforce_init_device(struct iforce *iforce)
350 350
351 init_waitqueue_head(&iforce->wait); 351 init_waitqueue_head(&iforce->wait);
352 spin_lock_init(&iforce->xmit_lock); 352 spin_lock_init(&iforce->xmit_lock);
353 init_MUTEX(&iforce->mem_mutex); 353 mutex_init(&iforce->mem_mutex);
354 iforce->xmit.buf = iforce->xmit_data; 354 iforce->xmit.buf = iforce->xmit_data;
355 iforce->dev = input_dev; 355 iforce->dev = input_dev;
356 356
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 146f406b8f8a..668f24535ba0 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -37,7 +37,7 @@
37#include <linux/serio.h> 37#include <linux/serio.h>
38#include <linux/config.h> 38#include <linux/config.h>
39#include <linux/circ_buf.h> 39#include <linux/circ_buf.h>
40#include <asm/semaphore.h> 40#include <linux/mutex.h>
41 41
42/* This module provides arbitrary resource management routines. 42/* This module provides arbitrary resource management routines.
43 * I use it to manage the device's memory. 43 * I use it to manage the device's memory.
@@ -45,6 +45,7 @@
45 */ 45 */
46#include <linux/ioport.h> 46#include <linux/ioport.h>
47 47
48
48#define IFORCE_MAX_LENGTH 16 49#define IFORCE_MAX_LENGTH 16
49 50
50/* iforce::bus */ 51/* iforce::bus */
@@ -146,7 +147,7 @@ struct iforce {
146 wait_queue_head_t wait; 147 wait_queue_head_t wait;
147 struct resource device_memory; 148 struct resource device_memory;
148 struct iforce_core_effect core_effects[FF_EFFECTS_MAX]; 149 struct iforce_core_effect core_effects[FF_EFFECTS_MAX];
149 struct semaphore mem_mutex; 150 struct mutex mem_mutex;
150}; 151};
151 152
152/* Get hi and low bytes of a 16-bits int */ 153/* Get hi and low bytes of a 16-bits int */
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index b154938e88a4..5570fd5487c7 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -37,6 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/mutex.h>
40 41
41MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 42MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
42MODULE_DESCRIPTION("TurboGraFX parallel port interface driver"); 43MODULE_DESCRIPTION("TurboGraFX parallel port interface driver");
@@ -86,7 +87,7 @@ static struct tgfx {
86 char phys[TGFX_MAX_DEVICES][32]; 87 char phys[TGFX_MAX_DEVICES][32];
87 int sticks; 88 int sticks;
88 int used; 89 int used;
89 struct semaphore sem; 90 struct mutex sem;
90} *tgfx_base[TGFX_MAX_PORTS]; 91} *tgfx_base[TGFX_MAX_PORTS];
91 92
92/* 93/*
@@ -128,7 +129,7 @@ static int tgfx_open(struct input_dev *dev)
128 struct tgfx *tgfx = dev->private; 129 struct tgfx *tgfx = dev->private;
129 int err; 130 int err;
130 131
131 err = down_interruptible(&tgfx->sem); 132 err = mutex_lock_interruptible(&tgfx->sem);
132 if (err) 133 if (err)
133 return err; 134 return err;
134 135
@@ -138,7 +139,7 @@ static int tgfx_open(struct input_dev *dev)
138 mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME); 139 mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME);
139 } 140 }
140 141
141 up(&tgfx->sem); 142 mutex_unlock(&tgfx->sem);
142 return 0; 143 return 0;
143} 144}
144 145
@@ -146,13 +147,13 @@ static void tgfx_close(struct input_dev *dev)
146{ 147{
147 struct tgfx *tgfx = dev->private; 148 struct tgfx *tgfx = dev->private;
148 149
149 down(&tgfx->sem); 150 mutex_lock(&tgfx->sem);
150 if (!--tgfx->used) { 151 if (!--tgfx->used) {
151 del_timer_sync(&tgfx->timer); 152 del_timer_sync(&tgfx->timer);
152 parport_write_control(tgfx->pd->port, 0x00); 153 parport_write_control(tgfx->pd->port, 0x00);
153 parport_release(tgfx->pd); 154 parport_release(tgfx->pd);
154 } 155 }
155 up(&tgfx->sem); 156 mutex_unlock(&tgfx->sem);
156} 157}
157 158
158 159
@@ -191,7 +192,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
191 goto err_unreg_pardev; 192 goto err_unreg_pardev;
192 } 193 }
193 194
194 init_MUTEX(&tgfx->sem); 195 mutex_init(&tgfx->sem);
195 tgfx->pd = pd; 196 tgfx->pd = pd;
196 init_timer(&tgfx->timer); 197 init_timer(&tgfx->timer);
197 tgfx->timer.data = (long) tgfx; 198 tgfx->timer.data = (long) tgfx;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 3b0ac3b43c54..a9dda56f62c4 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -13,7 +13,7 @@ menuconfig INPUT_KEYBOARD
13if INPUT_KEYBOARD 13if INPUT_KEYBOARD
14 14
15config KEYBOARD_ATKBD 15config KEYBOARD_ATKBD
16 tristate "AT keyboard" if !X86_PC 16 tristate "AT keyboard" if EMBEDDED || !X86_PC
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index ffacf6eca5f5..fad04b66d268 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -27,6 +27,7 @@
27#include <linux/serio.h> 27#include <linux/serio.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/libps2.h> 29#include <linux/libps2.h>
30#include <linux/mutex.h>
30 31
31#define DRIVER_DESC "AT and PS/2 keyboard driver" 32#define DRIVER_DESC "AT and PS/2 keyboard driver"
32 33
@@ -216,7 +217,7 @@ struct atkbd {
216 unsigned long time; 217 unsigned long time;
217 218
218 struct work_struct event_work; 219 struct work_struct event_work;
219 struct semaphore event_sem; 220 struct mutex event_mutex;
220 unsigned long event_mask; 221 unsigned long event_mask;
221}; 222};
222 223
@@ -302,19 +303,19 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
302 if (atkbd->translated) { 303 if (atkbd->translated) {
303 304
304 if (atkbd->emul || 305 if (atkbd->emul ||
305 !(code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1 || 306 (code != ATKBD_RET_EMUL0 && code != ATKBD_RET_EMUL1 &&
306 code == ATKBD_RET_HANGUEL || code == ATKBD_RET_HANJA || 307 code != ATKBD_RET_HANGUEL && code != ATKBD_RET_HANJA &&
307 (code == ATKBD_RET_ERR && !atkbd->err_xl) || 308 (code != ATKBD_RET_ERR || atkbd->err_xl) &&
308 (code == ATKBD_RET_BAT && !atkbd->bat_xl))) { 309 (code != ATKBD_RET_BAT || atkbd->bat_xl))) {
309 atkbd->release = code >> 7; 310 atkbd->release = code >> 7;
310 code &= 0x7f; 311 code &= 0x7f;
311 } 312 }
312 313
313 if (!atkbd->emul) { 314 if (!atkbd->emul) {
314 if ((code & 0x7f) == (ATKBD_RET_BAT & 0x7f)) 315 if ((code & 0x7f) == (ATKBD_RET_BAT & 0x7f))
315 atkbd->bat_xl = !atkbd->release; 316 atkbd->bat_xl = !(data >> 7);
316 if ((code & 0x7f) == (ATKBD_RET_ERR & 0x7f)) 317 if ((code & 0x7f) == (ATKBD_RET_ERR & 0x7f))
317 atkbd->err_xl = !atkbd->release; 318 atkbd->err_xl = !(data >> 7);
318 } 319 }
319 } 320 }
320 321
@@ -449,7 +450,7 @@ static void atkbd_event_work(void *data)
449 unsigned char param[2]; 450 unsigned char param[2];
450 int i, j; 451 int i, j;
451 452
452 down(&atkbd->event_sem); 453 mutex_lock(&atkbd->event_mutex);
453 454
454 if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) { 455 if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) {
455 param[0] = (test_bit(LED_SCROLLL, dev->led) ? 1 : 0) 456 param[0] = (test_bit(LED_SCROLLL, dev->led) ? 1 : 0)
@@ -480,7 +481,7 @@ static void atkbd_event_work(void *data)
480 ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETREP); 481 ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETREP);
481 } 482 }
482 483
483 up(&atkbd->event_sem); 484 mutex_unlock(&atkbd->event_mutex);
484} 485}
485 486
486/* 487/*
@@ -846,7 +847,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
846 atkbd->dev = dev; 847 atkbd->dev = dev;
847 ps2_init(&atkbd->ps2dev, serio); 848 ps2_init(&atkbd->ps2dev, serio);
848 INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); 849 INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
849 init_MUTEX(&atkbd->event_sem); 850 mutex_init(&atkbd->event_mutex);
850 851
851 switch (serio->id.type) { 852 switch (serio->id.type) {
852 853
@@ -862,9 +863,6 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
862 atkbd->softrepeat = atkbd_softrepeat; 863 atkbd->softrepeat = atkbd_softrepeat;
863 atkbd->scroll = atkbd_scroll; 864 atkbd->scroll = atkbd_scroll;
864 865
865 if (!atkbd->write)
866 atkbd->softrepeat = 1;
867
868 if (atkbd->softrepeat) 866 if (atkbd->softrepeat)
869 atkbd->softraw = 1; 867 atkbd->softraw = 1;
870 868
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index e301ee4ca264..96c6bf77248a 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -29,11 +29,11 @@
29#define KB_COLS 12 29#define KB_COLS 12
30#define KB_ROWMASK(r) (1 << (r)) 30#define KB_ROWMASK(r) (1 << (r))
31#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 ) 31#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 )
32/* zero code, 124 scancodes + 3 hinge combinations */ 32/* zero code, 124 scancodes */
33#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 +3 ) 33#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 )
34#define SCAN_INTERVAL (HZ/10)
35 34
36#define HINGE_SCAN_INTERVAL (HZ/4) 35#define SCAN_INTERVAL (50) /* ms */
36#define HINGE_SCAN_INTERVAL (250) /* ms */
37 37
38#define CORGI_KEY_CALENDER KEY_F1 38#define CORGI_KEY_CALENDER KEY_F1
39#define CORGI_KEY_ADDRESS KEY_F2 39#define CORGI_KEY_ADDRESS KEY_F2
@@ -49,9 +49,6 @@
49#define CORGI_KEY_MAIL KEY_F10 49#define CORGI_KEY_MAIL KEY_F10
50#define CORGI_KEY_OK KEY_F11 50#define CORGI_KEY_OK KEY_F11
51#define CORGI_KEY_MENU KEY_F12 51#define CORGI_KEY_MENU KEY_F12
52#define CORGI_HINGE_0 KEY_KP0
53#define CORGI_HINGE_1 KEY_KP1
54#define CORGI_HINGE_2 KEY_KP2
55 52
56static unsigned char corgikbd_keycode[NR_SCANCODES] = { 53static unsigned char corgikbd_keycode[NR_SCANCODES] = {
57 0, /* 0 */ 54 0, /* 0 */
@@ -63,7 +60,6 @@ static unsigned char corgikbd_keycode[NR_SCANCODES] = {
63 CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */ 60 CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
64 KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */ 61 KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
65 CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */ 62 CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
66 CORGI_HINGE_0, CORGI_HINGE_1, CORGI_HINGE_2 /* 125-127 */
67}; 63};
68 64
69 65
@@ -187,7 +183,7 @@ static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs
187 183
188 /* if any keys are pressed, enable the timer */ 184 /* if any keys are pressed, enable the timer */
189 if (num_pressed) 185 if (num_pressed)
190 mod_timer(&corgikbd_data->timer, jiffies + SCAN_INTERVAL); 186 mod_timer(&corgikbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
191 187
192 spin_unlock_irqrestore(&corgikbd_data->lock, flags); 188 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
193} 189}
@@ -228,6 +224,7 @@ static void corgikbd_timer_callback(unsigned long data)
228 * 0x0c - Keyboard and Screen Closed 224 * 0x0c - Keyboard and Screen Closed
229 */ 225 */
230 226
227#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
231#define HINGE_STABLE_COUNT 2 228#define HINGE_STABLE_COUNT 2
232static int sharpsl_hinge_state; 229static int sharpsl_hinge_state;
233static int hinge_count; 230static int hinge_count;
@@ -239,6 +236,7 @@ static void corgikbd_hinge_timer(unsigned long data)
239 unsigned long flags; 236 unsigned long flags;
240 237
241 gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB); 238 gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
239 gprr |= (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0);
242 if (gprr != sharpsl_hinge_state) { 240 if (gprr != sharpsl_hinge_state) {
243 hinge_count = 0; 241 hinge_count = 0;
244 sharpsl_hinge_state = gprr; 242 sharpsl_hinge_state = gprr;
@@ -249,27 +247,38 @@ static void corgikbd_hinge_timer(unsigned long data)
249 247
250 input_report_switch(corgikbd_data->input, SW_0, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0)); 248 input_report_switch(corgikbd_data->input, SW_0, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
251 input_report_switch(corgikbd_data->input, SW_1, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0)); 249 input_report_switch(corgikbd_data->input, SW_1, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
250 input_report_switch(corgikbd_data->input, SW_2, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0));
252 input_sync(corgikbd_data->input); 251 input_sync(corgikbd_data->input);
253 252
254 spin_unlock_irqrestore(&corgikbd_data->lock, flags); 253 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
255 } 254 }
256 } 255 }
257 mod_timer(&corgikbd_data->htimer, jiffies + HINGE_SCAN_INTERVAL); 256 mod_timer(&corgikbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
258} 257}
259 258
260#ifdef CONFIG_PM 259#ifdef CONFIG_PM
261static int corgikbd_suspend(struct platform_device *dev, pm_message_t state) 260static int corgikbd_suspend(struct platform_device *dev, pm_message_t state)
262{ 261{
262 int i;
263 struct corgikbd *corgikbd = platform_get_drvdata(dev); 263 struct corgikbd *corgikbd = platform_get_drvdata(dev);
264
264 corgikbd->suspended = 1; 265 corgikbd->suspended = 1;
266 /* strobe 0 is the power key so this can't be made an input for
267 powersaving therefore i = 1 */
268 for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
269 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_IN);
265 270
266 return 0; 271 return 0;
267} 272}
268 273
269static int corgikbd_resume(struct platform_device *dev) 274static int corgikbd_resume(struct platform_device *dev)
270{ 275{
276 int i;
271 struct corgikbd *corgikbd = platform_get_drvdata(dev); 277 struct corgikbd *corgikbd = platform_get_drvdata(dev);
272 278
279 for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
280 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
281
273 /* Upon resume, ignore the suspend key for a short while */ 282 /* Upon resume, ignore the suspend key for a short while */
274 corgikbd->suspend_jiffies=jiffies; 283 corgikbd->suspend_jiffies=jiffies;
275 corgikbd->suspended = 0; 284 corgikbd->suspended = 0;
@@ -333,10 +342,11 @@ static int __init corgikbd_probe(struct platform_device *pdev)
333 clear_bit(0, input_dev->keybit); 342 clear_bit(0, input_dev->keybit);
334 set_bit(SW_0, input_dev->swbit); 343 set_bit(SW_0, input_dev->swbit);
335 set_bit(SW_1, input_dev->swbit); 344 set_bit(SW_1, input_dev->swbit);
345 set_bit(SW_2, input_dev->swbit);
336 346
337 input_register_device(corgikbd->input); 347 input_register_device(corgikbd->input);
338 348
339 mod_timer(&corgikbd->htimer, jiffies + HINGE_SCAN_INTERVAL); 349 mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
340 350
341 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */ 351 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
342 for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) { 352 for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
@@ -351,6 +361,9 @@ static int __init corgikbd_probe(struct platform_device *pdev)
351 for (i = 0; i < CORGI_KEY_STROBE_NUM; i++) 361 for (i = 0; i < CORGI_KEY_STROBE_NUM; i++)
352 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH); 362 pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
353 363
364 /* Setup the headphone jack as an input */
365 pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN);
366
354 return 0; 367 return 0;
355} 368}
356 369
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 63f387e4b783..1dca3cf42a54 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -250,16 +250,19 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv)
250 struct hil_kbd *kbd; 250 struct hil_kbd *kbd;
251 uint8_t did, *idd; 251 uint8_t did, *idd;
252 int i; 252 int i;
253 253
254 kbd = kzalloc(sizeof(*kbd), GFP_KERNEL); 254 kbd = kzalloc(sizeof(*kbd), GFP_KERNEL);
255 if (!kbd) 255 if (!kbd)
256 return -ENOMEM; 256 return -ENOMEM;
257 257
258 kbd->dev = input_allocate_device(); 258 kbd->dev = input_allocate_device();
259 if (!kbd->dev) goto bail1; 259 if (!kbd->dev)
260 goto bail0;
261
260 kbd->dev->private = kbd; 262 kbd->dev->private = kbd;
261 263
262 if (serio_open(serio, drv)) goto bail0; 264 if (serio_open(serio, drv))
265 goto bail1;
263 266
264 serio_set_drvdata(serio, kbd); 267 serio_set_drvdata(serio, kbd);
265 kbd->serio = serio; 268 kbd->serio = serio;
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
index 83999d583122..bc61cf8cfc65 100644
--- a/drivers/input/keyboard/spitzkbd.c
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -30,6 +30,7 @@
30#define SCANCODE(r,c) (((r)<<4) + (c) + 1) 30#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
31#define NR_SCANCODES ((KB_ROWS<<4) + 1) 31#define NR_SCANCODES ((KB_ROWS<<4) + 1)
32 32
33#define SCAN_INTERVAL (50) /* ms */
33#define HINGE_SCAN_INTERVAL (150) /* ms */ 34#define HINGE_SCAN_INTERVAL (150) /* ms */
34 35
35#define SPITZ_KEY_CALENDER KEY_F1 36#define SPITZ_KEY_CALENDER KEY_F1
@@ -230,7 +231,7 @@ static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data, struct pt_regs
230 231
231 /* if any keys are pressed, enable the timer */ 232 /* if any keys are pressed, enable the timer */
232 if (num_pressed) 233 if (num_pressed)
233 mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(100)); 234 mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
234 235
235 spin_unlock_irqrestore(&spitzkbd_data->lock, flags); 236 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
236} 237}
@@ -287,6 +288,7 @@ static void spitzkbd_hinge_timer(unsigned long data)
287 unsigned long flags; 288 unsigned long flags;
288 289
289 state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB)); 290 state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
291 state |= (GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT));
290 if (state != sharpsl_hinge_state) { 292 if (state != sharpsl_hinge_state) {
291 hinge_count = 0; 293 hinge_count = 0;
292 sharpsl_hinge_state = state; 294 sharpsl_hinge_state = state;
@@ -299,6 +301,7 @@ static void spitzkbd_hinge_timer(unsigned long data)
299 301
300 input_report_switch(spitzkbd_data->input, SW_0, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0)); 302 input_report_switch(spitzkbd_data->input, SW_0, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
301 input_report_switch(spitzkbd_data->input, SW_1, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0)); 303 input_report_switch(spitzkbd_data->input, SW_1, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
304 input_report_switch(spitzkbd_data->input, SW_2, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0));
302 input_sync(spitzkbd_data->input); 305 input_sync(spitzkbd_data->input);
303 306
304 spin_unlock_irqrestore(&spitzkbd_data->lock, flags); 307 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
@@ -397,6 +400,7 @@ static int __init spitzkbd_probe(struct platform_device *dev)
397 clear_bit(0, input_dev->keybit); 400 clear_bit(0, input_dev->keybit);
398 set_bit(SW_0, input_dev->swbit); 401 set_bit(SW_0, input_dev->swbit);
399 set_bit(SW_1, input_dev->swbit); 402 set_bit(SW_1, input_dev->swbit);
403 set_bit(SW_2, input_dev->swbit);
400 404
401 input_register_device(input_dev); 405 input_register_device(input_dev);
402 406
@@ -432,6 +436,9 @@ static int __init spitzkbd_probe(struct platform_device *dev)
432 request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr, 436 request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
433 SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING, 437 SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
434 "Spitzkbd SWB", spitzkbd); 438 "Spitzkbd SWB", spitzkbd);
439 request_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd_hinge_isr,
440 SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
441 "Spitzkbd HP", spitzkbd);
435 442
436 printk(KERN_INFO "input: Spitz Keyboard Registered\n"); 443 printk(KERN_INFO "input: Spitz Keyboard Registered\n");
437 444
@@ -450,6 +457,7 @@ static int spitzkbd_remove(struct platform_device *dev)
450 free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd); 457 free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
451 free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd); 458 free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
452 free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd); 459 free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
460 free_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd);
453 461
454 del_timer_sync(&spitzkbd->htimer); 462 del_timer_sync(&spitzkbd->htimer);
455 del_timer_sync(&spitzkbd->timer); 463 del_timer_sync(&spitzkbd->timer);
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 1ef477f4469c..afd322185bbf 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -24,7 +24,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
24MODULE_DESCRIPTION("PC Speaker beeper driver"); 24MODULE_DESCRIPTION("PC Speaker beeper driver");
25MODULE_LICENSE("GPL"); 25MODULE_LICENSE("GPL");
26 26
27static struct platform_device *pcspkr_platform_device;
28static DEFINE_SPINLOCK(i8253_beep_lock); 27static DEFINE_SPINLOCK(i8253_beep_lock);
29 28
30static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 29static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -135,35 +134,11 @@ static struct platform_driver pcspkr_platform_driver = {
135 134
136static int __init pcspkr_init(void) 135static int __init pcspkr_init(void)
137{ 136{
138 int err; 137 return platform_driver_register(&pcspkr_platform_driver);
139
140 err = platform_driver_register(&pcspkr_platform_driver);
141 if (err)
142 return err;
143
144 pcspkr_platform_device = platform_device_alloc("pcspkr", -1);
145 if (!pcspkr_platform_device) {
146 err = -ENOMEM;
147 goto err_unregister_driver;
148 }
149
150 err = platform_device_add(pcspkr_platform_device);
151 if (err)
152 goto err_free_device;
153
154 return 0;
155
156 err_free_device:
157 platform_device_put(pcspkr_platform_device);
158 err_unregister_driver:
159 platform_driver_unregister(&pcspkr_platform_driver);
160
161 return err;
162} 138}
163 139
164static void __exit pcspkr_exit(void) 140static void __exit pcspkr_exit(void)
165{ 141{
166 platform_device_unregister(pcspkr_platform_device);
167 platform_driver_unregister(&pcspkr_platform_driver); 142 platform_driver_unregister(&pcspkr_platform_driver);
168} 143}
169 144
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 546ed9b4901d..d723e9ad7c41 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -194,7 +194,7 @@ static int uinput_open(struct inode *inode, struct file *file)
194 if (!newdev) 194 if (!newdev)
195 return -ENOMEM; 195 return -ENOMEM;
196 196
197 init_MUTEX(&newdev->sem); 197 mutex_init(&newdev->mutex);
198 spin_lock_init(&newdev->requests_lock); 198 spin_lock_init(&newdev->requests_lock);
199 init_waitqueue_head(&newdev->requests_waitq); 199 init_waitqueue_head(&newdev->requests_waitq);
200 init_waitqueue_head(&newdev->waitq); 200 init_waitqueue_head(&newdev->waitq);
@@ -340,7 +340,7 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t
340 struct uinput_device *udev = file->private_data; 340 struct uinput_device *udev = file->private_data;
341 int retval; 341 int retval;
342 342
343 retval = down_interruptible(&udev->sem); 343 retval = mutex_lock_interruptible(&udev->mutex);
344 if (retval) 344 if (retval)
345 return retval; 345 return retval;
346 346
@@ -348,7 +348,7 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t
348 uinput_inject_event(udev, buffer, count) : 348 uinput_inject_event(udev, buffer, count) :
349 uinput_setup_device(udev, buffer, count); 349 uinput_setup_device(udev, buffer, count);
350 350
351 up(&udev->sem); 351 mutex_unlock(&udev->mutex);
352 352
353 return retval; 353 return retval;
354} 354}
@@ -369,7 +369,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer, size_t count,
369 if (retval) 369 if (retval)
370 return retval; 370 return retval;
371 371
372 retval = down_interruptible(&udev->sem); 372 retval = mutex_lock_interruptible(&udev->mutex);
373 if (retval) 373 if (retval)
374 return retval; 374 return retval;
375 375
@@ -388,7 +388,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer, size_t count,
388 } 388 }
389 389
390 out: 390 out:
391 up(&udev->sem); 391 mutex_unlock(&udev->mutex);
392 392
393 return retval; 393 return retval;
394} 394}
@@ -439,7 +439,7 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
439 439
440 udev = file->private_data; 440 udev = file->private_data;
441 441
442 retval = down_interruptible(&udev->sem); 442 retval = mutex_lock_interruptible(&udev->mutex);
443 if (retval) 443 if (retval)
444 return retval; 444 return retval;
445 445
@@ -589,7 +589,7 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
589 } 589 }
590 590
591 out: 591 out:
592 up(&udev->sem); 592 mutex_unlock(&udev->mutex);
593 return retval; 593 return retval;
594} 594}
595 595
diff --git a/drivers/input/mouse/hil_ptr.c b/drivers/input/mouse/hil_ptr.c
index bfb564fd8fe2..69f02178c528 100644
--- a/drivers/input/mouse/hil_ptr.c
+++ b/drivers/input/mouse/hil_ptr.c
@@ -249,10 +249,13 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver)
249 return -ENOMEM; 249 return -ENOMEM;
250 250
251 ptr->dev = input_allocate_device(); 251 ptr->dev = input_allocate_device();
252 if (!ptr->dev) goto bail0; 252 if (!ptr->dev)
253 goto bail0;
254
253 ptr->dev->private = ptr; 255 ptr->dev->private = ptr;
254 256
255 if (serio_open(serio, driver)) goto bail1; 257 if (serio_open(serio, driver))
258 goto bail1;
256 259
257 serio_set_drvdata(serio, ptr); 260 serio_set_drvdata(serio, ptr);
258 ptr->serio = serio; 261 ptr->serio = serio;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index ad6217467676..32d70ed8f41d 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -20,6 +20,8 @@
20#include <linux/serio.h> 20#include <linux/serio.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/libps2.h> 22#include <linux/libps2.h>
23#include <linux/mutex.h>
24
23#include "psmouse.h" 25#include "psmouse.h"
24#include "synaptics.h" 26#include "synaptics.h"
25#include "logips2pp.h" 27#include "logips2pp.h"
@@ -98,13 +100,13 @@ __obsolete_setup("psmouse_resetafter=");
98__obsolete_setup("psmouse_rate="); 100__obsolete_setup("psmouse_rate=");
99 101
100/* 102/*
101 * psmouse_sem protects all operations changing state of mouse 103 * psmouse_mutex protects all operations changing state of mouse
102 * (connecting, disconnecting, changing rate or resolution via 104 * (connecting, disconnecting, changing rate or resolution via
103 * sysfs). We could use a per-device semaphore but since there 105 * sysfs). We could use a per-device semaphore but since there
104 * rarely more than one PS/2 mouse connected and since semaphore 106 * rarely more than one PS/2 mouse connected and since semaphore
105 * is taken in "slow" paths it is not worth it. 107 * is taken in "slow" paths it is not worth it.
106 */ 108 */
107static DECLARE_MUTEX(psmouse_sem); 109static DEFINE_MUTEX(psmouse_mutex);
108 110
109static struct workqueue_struct *kpsmoused_wq; 111static struct workqueue_struct *kpsmoused_wq;
110 112
@@ -868,7 +870,7 @@ static void psmouse_resync(void *p)
868 int failed = 0, enabled = 0; 870 int failed = 0, enabled = 0;
869 int i; 871 int i;
870 872
871 down(&psmouse_sem); 873 mutex_lock(&psmouse_mutex);
872 874
873 if (psmouse->state != PSMOUSE_RESYNCING) 875 if (psmouse->state != PSMOUSE_RESYNCING)
874 goto out; 876 goto out;
@@ -948,7 +950,7 @@ static void psmouse_resync(void *p)
948 if (parent) 950 if (parent)
949 psmouse_activate(parent); 951 psmouse_activate(parent);
950 out: 952 out:
951 up(&psmouse_sem); 953 mutex_unlock(&psmouse_mutex);
952} 954}
953 955
954/* 956/*
@@ -974,14 +976,14 @@ static void psmouse_disconnect(struct serio *serio)
974 976
975 sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group); 977 sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group);
976 978
977 down(&psmouse_sem); 979 mutex_lock(&psmouse_mutex);
978 980
979 psmouse_set_state(psmouse, PSMOUSE_CMD_MODE); 981 psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
980 982
981 /* make sure we don't have a resync in progress */ 983 /* make sure we don't have a resync in progress */
982 up(&psmouse_sem); 984 mutex_unlock(&psmouse_mutex);
983 flush_workqueue(kpsmoused_wq); 985 flush_workqueue(kpsmoused_wq);
984 down(&psmouse_sem); 986 mutex_lock(&psmouse_mutex);
985 987
986 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { 988 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
987 parent = serio_get_drvdata(serio->parent); 989 parent = serio_get_drvdata(serio->parent);
@@ -1004,7 +1006,7 @@ static void psmouse_disconnect(struct serio *serio)
1004 if (parent) 1006 if (parent)
1005 psmouse_activate(parent); 1007 psmouse_activate(parent);
1006 1008
1007 up(&psmouse_sem); 1009 mutex_unlock(&psmouse_mutex);
1008} 1010}
1009 1011
1010static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_protocol *proto) 1012static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_protocol *proto)
@@ -1076,7 +1078,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
1076 struct input_dev *input_dev; 1078 struct input_dev *input_dev;
1077 int retval = -ENOMEM; 1079 int retval = -ENOMEM;
1078 1080
1079 down(&psmouse_sem); 1081 mutex_lock(&psmouse_mutex);
1080 1082
1081 /* 1083 /*
1082 * If this is a pass-through port deactivate parent so the device 1084 * If this is a pass-through port deactivate parent so the device
@@ -1144,7 +1146,7 @@ out:
1144 if (parent) 1146 if (parent)
1145 psmouse_activate(parent); 1147 psmouse_activate(parent);
1146 1148
1147 up(&psmouse_sem); 1149 mutex_unlock(&psmouse_mutex);
1148 return retval; 1150 return retval;
1149} 1151}
1150 1152
@@ -1161,7 +1163,7 @@ static int psmouse_reconnect(struct serio *serio)
1161 return -1; 1163 return -1;
1162 } 1164 }
1163 1165
1164 down(&psmouse_sem); 1166 mutex_lock(&psmouse_mutex);
1165 1167
1166 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { 1168 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
1167 parent = serio_get_drvdata(serio->parent); 1169 parent = serio_get_drvdata(serio->parent);
@@ -1195,7 +1197,7 @@ out:
1195 if (parent) 1197 if (parent)
1196 psmouse_activate(parent); 1198 psmouse_activate(parent);
1197 1199
1198 up(&psmouse_sem); 1200 mutex_unlock(&psmouse_mutex);
1199 return rc; 1201 return rc;
1200} 1202}
1201 1203
@@ -1273,7 +1275,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
1273 goto out_unpin; 1275 goto out_unpin;
1274 } 1276 }
1275 1277
1276 retval = down_interruptible(&psmouse_sem); 1278 retval = mutex_lock_interruptible(&psmouse_mutex);
1277 if (retval) 1279 if (retval)
1278 goto out_unpin; 1280 goto out_unpin;
1279 1281
@@ -1281,7 +1283,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
1281 1283
1282 if (psmouse->state == PSMOUSE_IGNORE) { 1284 if (psmouse->state == PSMOUSE_IGNORE) {
1283 retval = -ENODEV; 1285 retval = -ENODEV;
1284 goto out_up; 1286 goto out_unlock;
1285 } 1287 }
1286 1288
1287 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { 1289 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
@@ -1299,8 +1301,8 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
1299 if (parent) 1301 if (parent)
1300 psmouse_activate(parent); 1302 psmouse_activate(parent);
1301 1303
1302 out_up: 1304 out_unlock:
1303 up(&psmouse_sem); 1305 mutex_unlock(&psmouse_mutex);
1304 out_unpin: 1306 out_unpin:
1305 serio_unpin_driver(serio); 1307 serio_unpin_driver(serio);
1306 return retval; 1308 return retval;
@@ -1357,11 +1359,11 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
1357 return -EIO; 1359 return -EIO;
1358 } 1360 }
1359 1361
1360 up(&psmouse_sem); 1362 mutex_unlock(&psmouse_mutex);
1361 serio_unpin_driver(serio); 1363 serio_unpin_driver(serio);
1362 serio_unregister_child_port(serio); 1364 serio_unregister_child_port(serio);
1363 serio_pin_driver_uninterruptible(serio); 1365 serio_pin_driver_uninterruptible(serio);
1364 down(&psmouse_sem); 1366 mutex_lock(&psmouse_mutex);
1365 1367
1366 if (serio->drv != &psmouse_drv) { 1368 if (serio->drv != &psmouse_drv) {
1367 input_free_device(new_dev); 1369 input_free_device(new_dev);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2051bec2c394..ad5d0a85e960 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -247,14 +247,12 @@ static void synaptics_pt_create(struct psmouse *psmouse)
247{ 247{
248 struct serio *serio; 248 struct serio *serio;
249 249
250 serio = kmalloc(sizeof(struct serio), GFP_KERNEL); 250 serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
251 if (!serio) { 251 if (!serio) {
252 printk(KERN_ERR "synaptics: not enough memory to allocate pass-through port\n"); 252 printk(KERN_ERR "synaptics: not enough memory to allocate pass-through port\n");
253 return; 253 return;
254 } 254 }
255 255
256 memset(serio, 0, sizeof(struct serio));
257
258 serio->id.type = SERIO_PS_PSTHRU; 256 serio->id.type = SERIO_PS_PSTHRU;
259 strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name)); 257 strlcpy(serio->name, "Synaptics pass-through", sizeof(serio->name));
260 strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->name)); 258 strlcpy(serio->phys, "synaptics-pt/serio0", sizeof(serio->name));
@@ -605,14 +603,21 @@ static struct dmi_system_id toshiba_dmi_table[] = {
605 .ident = "Toshiba Satellite", 603 .ident = "Toshiba Satellite",
606 .matches = { 604 .matches = {
607 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 605 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
608 DMI_MATCH(DMI_PRODUCT_NAME , "Satellite"), 606 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
609 }, 607 },
610 }, 608 },
611 { 609 {
612 .ident = "Toshiba Dynabook", 610 .ident = "Toshiba Dynabook",
613 .matches = { 611 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 612 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
615 DMI_MATCH(DMI_PRODUCT_NAME , "dynabook"), 613 DMI_MATCH(DMI_PRODUCT_NAME, "dynabook"),
614 },
615 },
616 {
617 .ident = "Toshiba Portege M300",
618 .matches = {
619 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
620 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
616 }, 621 },
617 }, 622 },
618 { } 623 { }
@@ -623,10 +628,9 @@ int synaptics_init(struct psmouse *psmouse)
623{ 628{
624 struct synaptics_data *priv; 629 struct synaptics_data *priv;
625 630
626 psmouse->private = priv = kmalloc(sizeof(struct synaptics_data), GFP_KERNEL); 631 psmouse->private = priv = kzalloc(sizeof(struct synaptics_data), GFP_KERNEL);
627 if (!priv) 632 if (!priv)
628 return -1; 633 return -1;
629 memset(priv, 0, sizeof(struct synaptics_data));
630 634
631 if (synaptics_query_hardware(psmouse)) { 635 if (synaptics_query_hardware(psmouse)) {
632 printk(KERN_ERR "Unable to query Synaptics hardware.\n"); 636 printk(KERN_ERR "Unable to query Synaptics hardware.\n");
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 9abed18d2ecf..b685a507955d 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -412,9 +412,8 @@ static int mousedev_open(struct inode * inode, struct file * file)
412 if (i >= MOUSEDEV_MINORS || !mousedev_table[i]) 412 if (i >= MOUSEDEV_MINORS || !mousedev_table[i])
413 return -ENODEV; 413 return -ENODEV;
414 414
415 if (!(list = kmalloc(sizeof(struct mousedev_list), GFP_KERNEL))) 415 if (!(list = kzalloc(sizeof(struct mousedev_list), GFP_KERNEL)))
416 return -ENOMEM; 416 return -ENOMEM;
417 memset(list, 0, sizeof(struct mousedev_list));
418 417
419 spin_lock_init(&list->packet_lock); 418 spin_lock_init(&list->packet_lock);
420 list->pos_x = xres / 2; 419 list->pos_x = xres / 2;
@@ -626,9 +625,8 @@ static struct input_handle *mousedev_connect(struct input_handler *handler, stru
626 return NULL; 625 return NULL;
627 } 626 }
628 627
629 if (!(mousedev = kmalloc(sizeof(struct mousedev), GFP_KERNEL))) 628 if (!(mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL)))
630 return NULL; 629 return NULL;
631 memset(mousedev, 0, sizeof(struct mousedev));
632 630
633 INIT_LIST_HEAD(&mousedev->list); 631 INIT_LIST_HEAD(&mousedev->list);
634 init_waitqueue_head(&mousedev->wait); 632 init_waitqueue_head(&mousedev->wait);
diff --git a/drivers/input/power.c b/drivers/input/power.c
index bfc5c63ebffe..526e6070600c 100644
--- a/drivers/input/power.c
+++ b/drivers/input/power.c
@@ -103,9 +103,8 @@ static struct input_handle *power_connect(struct input_handler *handler,
103{ 103{
104 struct input_handle *handle; 104 struct input_handle *handle;
105 105
106 if (!(handle = kmalloc(sizeof(struct input_handle), GFP_KERNEL))) 106 if (!(handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL)))
107 return NULL; 107 return NULL;
108 memset(handle, 0, sizeof(struct input_handle));
109 108
110 handle->dev = dev; 109 handle->dev = dev;
111 handle->handler = handler; 110 handle->handler = handler;
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index ea499783fb12..bbbe15e21904 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -872,9 +872,8 @@ int hil_mlc_register(hil_mlc *mlc) {
872 for (i = 0; i < HIL_MLC_DEVMEM; i++) { 872 for (i = 0; i < HIL_MLC_DEVMEM; i++) {
873 struct serio *mlc_serio; 873 struct serio *mlc_serio;
874 hil_mlc_copy_di_scratch(mlc, i); 874 hil_mlc_copy_di_scratch(mlc, i);
875 mlc_serio = kmalloc(sizeof(*mlc_serio), GFP_KERNEL); 875 mlc_serio = kzalloc(sizeof(*mlc_serio), GFP_KERNEL);
876 mlc->serio[i] = mlc_serio; 876 mlc->serio[i] = mlc_serio;
877 memset(mlc_serio, 0, sizeof(*mlc_serio));
878 mlc_serio->id = hil_mlc_serio_id; 877 mlc_serio->id = hil_mlc_serio_id;
879 mlc_serio->write = hil_mlc_serio_write; 878 mlc_serio->write = hil_mlc_serio_write;
880 mlc_serio->open = hil_mlc_serio_open; 879 mlc_serio->open = hil_mlc_serio_open;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a4c6f3522723..f606e96bc2f4 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -192,7 +192,9 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
192#include <linux/pnp.h> 192#include <linux/pnp.h>
193 193
194static int i8042_pnp_kbd_registered; 194static int i8042_pnp_kbd_registered;
195static unsigned int i8042_pnp_kbd_devices;
195static int i8042_pnp_aux_registered; 196static int i8042_pnp_aux_registered;
197static unsigned int i8042_pnp_aux_devices;
196 198
197static int i8042_pnp_command_reg; 199static int i8042_pnp_command_reg;
198static int i8042_pnp_data_reg; 200static int i8042_pnp_data_reg;
@@ -219,6 +221,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
219 strncat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name)); 221 strncat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
220 } 222 }
221 223
224 i8042_pnp_kbd_devices++;
222 return 0; 225 return 0;
223} 226}
224 227
@@ -239,6 +242,7 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
239 strncat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name)); 242 strncat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
240 } 243 }
241 244
245 i8042_pnp_aux_devices++;
242 return 0; 246 return 0;
243} 247}
244 248
@@ -287,21 +291,23 @@ static void i8042_pnp_exit(void)
287 291
288static int __init i8042_pnp_init(void) 292static int __init i8042_pnp_init(void)
289{ 293{
290 int result_kbd = 0, result_aux = 0;
291 char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 }; 294 char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
295 int err;
292 296
293 if (i8042_nopnp) { 297 if (i8042_nopnp) {
294 printk(KERN_INFO "i8042: PNP detection disabled\n"); 298 printk(KERN_INFO "i8042: PNP detection disabled\n");
295 return 0; 299 return 0;
296 } 300 }
297 301
298 if ((result_kbd = pnp_register_driver(&i8042_pnp_kbd_driver)) >= 0) 302 err = pnp_register_driver(&i8042_pnp_kbd_driver);
303 if (!err)
299 i8042_pnp_kbd_registered = 1; 304 i8042_pnp_kbd_registered = 1;
300 305
301 if ((result_aux = pnp_register_driver(&i8042_pnp_aux_driver)) >= 0) 306 err = pnp_register_driver(&i8042_pnp_aux_driver);
307 if (!err)
302 i8042_pnp_aux_registered = 1; 308 i8042_pnp_aux_registered = 1;
303 309
304 if (result_kbd <= 0 && result_aux <= 0) { 310 if (!i8042_pnp_kbd_devices && !i8042_pnp_aux_devices) {
305 i8042_pnp_exit(); 311 i8042_pnp_exit();
306#if defined(__ia64__) 312#if defined(__ia64__)
307 return -ENODEV; 313 return -ENODEV;
@@ -311,24 +317,24 @@ static int __init i8042_pnp_init(void)
311#endif 317#endif
312 } 318 }
313 319
314 if (result_kbd > 0) 320 if (i8042_pnp_kbd_devices)
315 snprintf(kbd_irq_str, sizeof(kbd_irq_str), 321 snprintf(kbd_irq_str, sizeof(kbd_irq_str),
316 "%d", i8042_pnp_kbd_irq); 322 "%d", i8042_pnp_kbd_irq);
317 if (result_aux > 0) 323 if (i8042_pnp_aux_devices)
318 snprintf(aux_irq_str, sizeof(aux_irq_str), 324 snprintf(aux_irq_str, sizeof(aux_irq_str),
319 "%d", i8042_pnp_aux_irq); 325 "%d", i8042_pnp_aux_irq);
320 326
321 printk(KERN_INFO "PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %s%s%s\n", 327 printk(KERN_INFO "PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %s%s%s\n",
322 i8042_pnp_kbd_name, (result_kbd > 0 && result_aux > 0) ? "," : "", 328 i8042_pnp_kbd_name, (i8042_pnp_kbd_devices && i8042_pnp_aux_devices) ? "," : "",
323 i8042_pnp_aux_name, 329 i8042_pnp_aux_name,
324 i8042_pnp_data_reg, i8042_pnp_command_reg, 330 i8042_pnp_data_reg, i8042_pnp_command_reg,
325 kbd_irq_str, (result_kbd > 0 && result_aux > 0) ? "," : "", 331 kbd_irq_str, (i8042_pnp_kbd_devices && i8042_pnp_aux_devices) ? "," : "",
326 aux_irq_str); 332 aux_irq_str);
327 333
328#if defined(__ia64__) 334#if defined(__ia64__)
329 if (result_kbd <= 0) 335 if (!i8042_pnp_kbd_devices)
330 i8042_nokbd = 1; 336 i8042_nokbd = 1;
331 if (result_aux <= 0) 337 if (!i8042_pnp_aux_devices)
332 i8042_noaux = 1; 338 i8042_noaux = 1;
333#endif 339#endif
334 340
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index d4c990f7c85e..79c97f94bcbd 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -84,7 +84,7 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout)
84 maxbytes = sizeof(ps2dev->cmdbuf); 84 maxbytes = sizeof(ps2dev->cmdbuf);
85 } 85 }
86 86
87 down(&ps2dev->cmd_sem); 87 mutex_lock(&ps2dev->cmd_mutex);
88 88
89 serio_pause_rx(ps2dev->serio); 89 serio_pause_rx(ps2dev->serio);
90 ps2dev->flags = PS2_FLAG_CMD; 90 ps2dev->flags = PS2_FLAG_CMD;
@@ -94,7 +94,7 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout)
94 wait_event_timeout(ps2dev->wait, 94 wait_event_timeout(ps2dev->wait,
95 !(ps2dev->flags & PS2_FLAG_CMD), 95 !(ps2dev->flags & PS2_FLAG_CMD),
96 msecs_to_jiffies(timeout)); 96 msecs_to_jiffies(timeout));
97 up(&ps2dev->cmd_sem); 97 mutex_unlock(&ps2dev->cmd_mutex);
98} 98}
99 99
100/* 100/*
@@ -177,7 +177,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
177 return -1; 177 return -1;
178 } 178 }
179 179
180 down(&ps2dev->cmd_sem); 180 mutex_lock(&ps2dev->cmd_mutex);
181 181
182 serio_pause_rx(ps2dev->serio); 182 serio_pause_rx(ps2dev->serio);
183 ps2dev->flags = command == PS2_CMD_GETID ? PS2_FLAG_WAITID : 0; 183 ps2dev->flags = command == PS2_CMD_GETID ? PS2_FLAG_WAITID : 0;
@@ -229,7 +229,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
229 ps2dev->flags = 0; 229 ps2dev->flags = 0;
230 serio_continue_rx(ps2dev->serio); 230 serio_continue_rx(ps2dev->serio);
231 231
232 up(&ps2dev->cmd_sem); 232 mutex_unlock(&ps2dev->cmd_mutex);
233 return rc; 233 return rc;
234} 234}
235 235
@@ -281,7 +281,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
281 281
282void ps2_init(struct ps2dev *ps2dev, struct serio *serio) 282void ps2_init(struct ps2dev *ps2dev, struct serio *serio)
283{ 283{
284 init_MUTEX(&ps2dev->cmd_sem); 284 mutex_init(&ps2dev->cmd_mutex);
285 init_waitqueue_head(&ps2dev->wait); 285 init_waitqueue_head(&ps2dev->wait);
286 ps2dev->serio = serio; 286 ps2dev->serio = serio;
287} 287}
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 1d15c2819818..a5c1fb3a4a51 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -171,9 +171,8 @@ static struct serio * __init parkbd_allocate_serio(void)
171{ 171{
172 struct serio *serio; 172 struct serio *serio;
173 173
174 serio = kmalloc(sizeof(struct serio), GFP_KERNEL); 174 serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
175 if (serio) { 175 if (serio) {
176 memset(serio, 0, sizeof(struct serio));
177 serio->id.type = parkbd_mode; 176 serio->id.type = parkbd_mode;
178 serio->write = parkbd_write, 177 serio->write = parkbd_write,
179 strlcpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name)); 178 strlcpy(serio->name, "PARKBD AT/XT keyboard adapter", sizeof(serio->name));
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index a3bd11589bc3..513d37fc1acf 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -111,11 +111,10 @@ static int __devinit rpckbd_probe(struct platform_device *dev)
111{ 111{
112 struct serio *serio; 112 struct serio *serio;
113 113
114 serio = kmalloc(sizeof(struct serio), GFP_KERNEL); 114 serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
115 if (!serio) 115 if (!serio)
116 return -ENOMEM; 116 return -ENOMEM;
117 117
118 memset(serio, 0, sizeof(struct serio));
119 serio->id.type = SERIO_8042; 118 serio->id.type = SERIO_8042;
120 serio->write = rpckbd_write; 119 serio->write = rpckbd_write;
121 serio->open = rpckbd_open; 120 serio->open = rpckbd_open;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 2f76813c3a64..6521034bc933 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -34,6 +34,7 @@
34#include <linux/sched.h> 34#include <linux/sched.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/kthread.h> 36#include <linux/kthread.h>
37#include <linux/mutex.h>
37 38
38MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 39MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
39MODULE_DESCRIPTION("Serio abstraction core"); 40MODULE_DESCRIPTION("Serio abstraction core");
@@ -52,10 +53,10 @@ EXPORT_SYMBOL(serio_rescan);
52EXPORT_SYMBOL(serio_reconnect); 53EXPORT_SYMBOL(serio_reconnect);
53 54
54/* 55/*
55 * serio_sem protects entire serio subsystem and is taken every time 56 * serio_mutex protects entire serio subsystem and is taken every time
56 * serio port or driver registrered or unregistered. 57 * serio port or driver registrered or unregistered.
57 */ 58 */
58static DECLARE_MUTEX(serio_sem); 59static DEFINE_MUTEX(serio_mutex);
59 60
60static LIST_HEAD(serio_list); 61static LIST_HEAD(serio_list);
61 62
@@ -70,9 +71,9 @@ static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
70{ 71{
71 int retval; 72 int retval;
72 73
73 down(&serio->drv_sem); 74 mutex_lock(&serio->drv_mutex);
74 retval = drv->connect(serio, drv); 75 retval = drv->connect(serio, drv);
75 up(&serio->drv_sem); 76 mutex_unlock(&serio->drv_mutex);
76 77
77 return retval; 78 return retval;
78} 79}
@@ -81,20 +82,20 @@ static int serio_reconnect_driver(struct serio *serio)
81{ 82{
82 int retval = -1; 83 int retval = -1;
83 84
84 down(&serio->drv_sem); 85 mutex_lock(&serio->drv_mutex);
85 if (serio->drv && serio->drv->reconnect) 86 if (serio->drv && serio->drv->reconnect)
86 retval = serio->drv->reconnect(serio); 87 retval = serio->drv->reconnect(serio);
87 up(&serio->drv_sem); 88 mutex_unlock(&serio->drv_mutex);
88 89
89 return retval; 90 return retval;
90} 91}
91 92
92static void serio_disconnect_driver(struct serio *serio) 93static void serio_disconnect_driver(struct serio *serio)
93{ 94{
94 down(&serio->drv_sem); 95 mutex_lock(&serio->drv_mutex);
95 if (serio->drv) 96 if (serio->drv)
96 serio->drv->disconnect(serio); 97 serio->drv->disconnect(serio);
97 up(&serio->drv_sem); 98 mutex_unlock(&serio->drv_mutex);
98} 99}
99 100
100static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) 101static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
@@ -195,6 +196,7 @@ static void serio_queue_event(void *object, struct module *owner,
195 if ((event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC))) { 196 if ((event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC))) {
196 if (!try_module_get(owner)) { 197 if (!try_module_get(owner)) {
197 printk(KERN_WARNING "serio: Can't get module reference, dropping event %d\n", event_type); 198 printk(KERN_WARNING "serio: Can't get module reference, dropping event %d\n", event_type);
199 kfree(event);
198 goto out; 200 goto out;
199 } 201 }
200 202
@@ -272,7 +274,7 @@ static void serio_handle_event(void)
272 struct serio_event *event; 274 struct serio_event *event;
273 struct serio_driver *serio_drv; 275 struct serio_driver *serio_drv;
274 276
275 down(&serio_sem); 277 mutex_lock(&serio_mutex);
276 278
277 /* 279 /*
278 * Note that we handle only one event here to give swsusp 280 * Note that we handle only one event here to give swsusp
@@ -314,7 +316,7 @@ static void serio_handle_event(void)
314 serio_free_event(event); 316 serio_free_event(event);
315 } 317 }
316 318
317 up(&serio_sem); 319 mutex_unlock(&serio_mutex);
318} 320}
319 321
320/* 322/*
@@ -449,7 +451,7 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
449 struct device_driver *drv; 451 struct device_driver *drv;
450 int retval; 452 int retval;
451 453
452 retval = down_interruptible(&serio_sem); 454 retval = mutex_lock_interruptible(&serio_mutex);
453 if (retval) 455 if (retval)
454 return retval; 456 return retval;
455 457
@@ -469,7 +471,7 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
469 retval = -EINVAL; 471 retval = -EINVAL;
470 } 472 }
471 473
472 up(&serio_sem); 474 mutex_unlock(&serio_mutex);
473 475
474 return retval; 476 return retval;
475} 477}
@@ -524,7 +526,7 @@ static void serio_init_port(struct serio *serio)
524 __module_get(THIS_MODULE); 526 __module_get(THIS_MODULE);
525 527
526 spin_lock_init(&serio->lock); 528 spin_lock_init(&serio->lock);
527 init_MUTEX(&serio->drv_sem); 529 mutex_init(&serio->drv_mutex);
528 device_initialize(&serio->dev); 530 device_initialize(&serio->dev);
529 snprintf(serio->dev.bus_id, sizeof(serio->dev.bus_id), 531 snprintf(serio->dev.bus_id, sizeof(serio->dev.bus_id),
530 "serio%ld", (long)atomic_inc_return(&serio_no) - 1); 532 "serio%ld", (long)atomic_inc_return(&serio_no) - 1);
@@ -661,10 +663,10 @@ void __serio_register_port(struct serio *serio, struct module *owner)
661 */ 663 */
662void serio_unregister_port(struct serio *serio) 664void serio_unregister_port(struct serio *serio)
663{ 665{
664 down(&serio_sem); 666 mutex_lock(&serio_mutex);
665 serio_disconnect_port(serio); 667 serio_disconnect_port(serio);
666 serio_destroy_port(serio); 668 serio_destroy_port(serio);
667 up(&serio_sem); 669 mutex_unlock(&serio_mutex);
668} 670}
669 671
670/* 672/*
@@ -672,17 +674,17 @@ void serio_unregister_port(struct serio *serio)
672 */ 674 */
673void serio_unregister_child_port(struct serio *serio) 675void serio_unregister_child_port(struct serio *serio)
674{ 676{
675 down(&serio_sem); 677 mutex_lock(&serio_mutex);
676 if (serio->child) { 678 if (serio->child) {
677 serio_disconnect_port(serio->child); 679 serio_disconnect_port(serio->child);
678 serio_destroy_port(serio->child); 680 serio_destroy_port(serio->child);
679 } 681 }
680 up(&serio_sem); 682 mutex_unlock(&serio_mutex);
681} 683}
682 684
683/* 685/*
684 * Submits register request to kseriod for subsequent execution. 686 * Submits register request to kseriod for subsequent execution.
685 * Can be used when it is not obvious whether the serio_sem is 687 * Can be used when it is not obvious whether the serio_mutex is
686 * taken or not and when delayed execution is feasible. 688 * taken or not and when delayed execution is feasible.
687 */ 689 */
688void __serio_unregister_port_delayed(struct serio *serio, struct module *owner) 690void __serio_unregister_port_delayed(struct serio *serio, struct module *owner)
@@ -765,7 +767,7 @@ void serio_unregister_driver(struct serio_driver *drv)
765{ 767{
766 struct serio *serio; 768 struct serio *serio;
767 769
768 down(&serio_sem); 770 mutex_lock(&serio_mutex);
769 drv->manual_bind = 1; /* so serio_find_driver ignores it */ 771 drv->manual_bind = 1; /* so serio_find_driver ignores it */
770 772
771start_over: 773start_over:
@@ -779,7 +781,7 @@ start_over:
779 } 781 }
780 782
781 driver_unregister(&drv->driver); 783 driver_unregister(&drv->driver);
782 up(&serio_sem); 784 mutex_unlock(&serio_mutex);
783} 785}
784 786
785static void serio_set_drv(struct serio *serio, struct serio_driver *drv) 787static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
@@ -858,7 +860,7 @@ static int serio_resume(struct device *dev)
858 return 0; 860 return 0;
859} 861}
860 862
861/* called from serio_driver->connect/disconnect methods under serio_sem */ 863/* called from serio_driver->connect/disconnect methods under serio_mutex */
862int serio_open(struct serio *serio, struct serio_driver *drv) 864int serio_open(struct serio *serio, struct serio_driver *drv)
863{ 865{
864 serio_set_drv(serio, drv); 866 serio_set_drv(serio, drv);
@@ -870,7 +872,7 @@ int serio_open(struct serio *serio, struct serio_driver *drv)
870 return 0; 872 return 0;
871} 873}
872 874
873/* called from serio_driver->connect/disconnect methods under serio_sem */ 875/* called from serio_driver->connect/disconnect methods under serio_mutex */
874void serio_close(struct serio *serio) 876void serio_close(struct serio *serio)
875{ 877{
876 if (serio->close) 878 if (serio->close)
@@ -923,5 +925,5 @@ static void __exit serio_exit(void)
923 kthread_stop(serio_task); 925 kthread_stop(serio_task);
924} 926}
925 927
926module_init(serio_init); 928subsys_initcall(serio_init);
927module_exit(serio_exit); 929module_exit(serio_exit);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 47e08de18d07..5a2703b536dc 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -19,6 +19,7 @@
19#include <linux/devfs_fs_kernel.h> 19#include <linux/devfs_fs_kernel.h>
20#include <linux/miscdevice.h> 20#include <linux/miscdevice.h>
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <linux/mutex.h>
22 23
23#define DRIVER_DESC "Raw serio driver" 24#define DRIVER_DESC "Raw serio driver"
24 25
@@ -46,7 +47,7 @@ struct serio_raw_list {
46 struct list_head node; 47 struct list_head node;
47}; 48};
48 49
49static DECLARE_MUTEX(serio_raw_sem); 50static DEFINE_MUTEX(serio_raw_mutex);
50static LIST_HEAD(serio_raw_list); 51static LIST_HEAD(serio_raw_list);
51static unsigned int serio_raw_no; 52static unsigned int serio_raw_no;
52 53
@@ -81,7 +82,7 @@ static int serio_raw_open(struct inode *inode, struct file *file)
81 struct serio_raw_list *list; 82 struct serio_raw_list *list;
82 int retval = 0; 83 int retval = 0;
83 84
84 retval = down_interruptible(&serio_raw_sem); 85 retval = mutex_lock_interruptible(&serio_raw_mutex);
85 if (retval) 86 if (retval)
86 return retval; 87 return retval;
87 88
@@ -95,12 +96,11 @@ static int serio_raw_open(struct inode *inode, struct file *file)
95 goto out; 96 goto out;
96 } 97 }
97 98
98 if (!(list = kmalloc(sizeof(struct serio_raw_list), GFP_KERNEL))) { 99 if (!(list = kzalloc(sizeof(struct serio_raw_list), GFP_KERNEL))) {
99 retval = -ENOMEM; 100 retval = -ENOMEM;
100 goto out; 101 goto out;
101 } 102 }
102 103
103 memset(list, 0, sizeof(struct serio_raw_list));
104 list->serio_raw = serio_raw; 104 list->serio_raw = serio_raw;
105 file->private_data = list; 105 file->private_data = list;
106 106
@@ -108,7 +108,7 @@ static int serio_raw_open(struct inode *inode, struct file *file)
108 list_add_tail(&list->node, &serio_raw->list); 108 list_add_tail(&list->node, &serio_raw->list);
109 109
110out: 110out:
111 up(&serio_raw_sem); 111 mutex_unlock(&serio_raw_mutex);
112 return retval; 112 return retval;
113} 113}
114 114
@@ -130,12 +130,12 @@ static int serio_raw_release(struct inode *inode, struct file *file)
130 struct serio_raw_list *list = file->private_data; 130 struct serio_raw_list *list = file->private_data;
131 struct serio_raw *serio_raw = list->serio_raw; 131 struct serio_raw *serio_raw = list->serio_raw;
132 132
133 down(&serio_raw_sem); 133 mutex_lock(&serio_raw_mutex);
134 134
135 serio_raw_fasync(-1, file, 0); 135 serio_raw_fasync(-1, file, 0);
136 serio_raw_cleanup(serio_raw); 136 serio_raw_cleanup(serio_raw);
137 137
138 up(&serio_raw_sem); 138 mutex_unlock(&serio_raw_mutex);
139 return 0; 139 return 0;
140} 140}
141 141
@@ -194,7 +194,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer, siz
194 int retval; 194 int retval;
195 unsigned char c; 195 unsigned char c;
196 196
197 retval = down_interruptible(&serio_raw_sem); 197 retval = mutex_lock_interruptible(&serio_raw_mutex);
198 if (retval) 198 if (retval)
199 return retval; 199 return retval;
200 200
@@ -219,7 +219,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer, siz
219 }; 219 };
220 220
221out: 221out:
222 up(&serio_raw_sem); 222 mutex_unlock(&serio_raw_mutex);
223 return written; 223 return written;
224} 224}
225 225
@@ -275,14 +275,13 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
275 struct serio_raw *serio_raw; 275 struct serio_raw *serio_raw;
276 int err; 276 int err;
277 277
278 if (!(serio_raw = kmalloc(sizeof(struct serio_raw), GFP_KERNEL))) { 278 if (!(serio_raw = kzalloc(sizeof(struct serio_raw), GFP_KERNEL))) {
279 printk(KERN_ERR "serio_raw.c: can't allocate memory for a device\n"); 279 printk(KERN_ERR "serio_raw.c: can't allocate memory for a device\n");
280 return -ENOMEM; 280 return -ENOMEM;
281 } 281 }
282 282
283 down(&serio_raw_sem); 283 mutex_lock(&serio_raw_mutex);
284 284
285 memset(serio_raw, 0, sizeof(struct serio_raw));
286 snprintf(serio_raw->name, sizeof(serio_raw->name), "serio_raw%d", serio_raw_no++); 285 snprintf(serio_raw->name, sizeof(serio_raw->name), "serio_raw%d", serio_raw_no++);
287 serio_raw->refcnt = 1; 286 serio_raw->refcnt = 1;
288 serio_raw->serio = serio; 287 serio_raw->serio = serio;
@@ -325,7 +324,7 @@ out_free:
325 serio_set_drvdata(serio, NULL); 324 serio_set_drvdata(serio, NULL);
326 kfree(serio_raw); 325 kfree(serio_raw);
327out: 326out:
328 up(&serio_raw_sem); 327 mutex_unlock(&serio_raw_mutex);
329 return err; 328 return err;
330} 329}
331 330
@@ -350,7 +349,7 @@ static void serio_raw_disconnect(struct serio *serio)
350{ 349{
351 struct serio_raw *serio_raw; 350 struct serio_raw *serio_raw;
352 351
353 down(&serio_raw_sem); 352 mutex_lock(&serio_raw_mutex);
354 353
355 serio_raw = serio_get_drvdata(serio); 354 serio_raw = serio_get_drvdata(serio);
356 355
@@ -361,7 +360,7 @@ static void serio_raw_disconnect(struct serio *serio)
361 if (!serio_raw_cleanup(serio_raw)) 360 if (!serio_raw_cleanup(serio_raw))
362 wake_up_interruptible(&serio_raw->wait); 361 wake_up_interruptible(&serio_raw->wait);
363 362
364 up(&serio_raw_sem); 363 mutex_unlock(&serio_raw_mutex);
365} 364}
366 365
367static struct serio_device_id serio_raw_serio_ids[] = { 366static struct serio_device_id serio_raw_serio_ids[] = {
diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c
index ca1547929d62..d678d144bbf8 100644
--- a/drivers/input/tsdev.c
+++ b/drivers/input/tsdev.c
@@ -157,9 +157,8 @@ static int tsdev_open(struct inode *inode, struct file *file)
157 if (i >= TSDEV_MINORS || !tsdev_table[i & TSDEV_MINOR_MASK]) 157 if (i >= TSDEV_MINORS || !tsdev_table[i & TSDEV_MINOR_MASK])
158 return -ENODEV; 158 return -ENODEV;
159 159
160 if (!(list = kmalloc(sizeof(struct tsdev_list), GFP_KERNEL))) 160 if (!(list = kzalloc(sizeof(struct tsdev_list), GFP_KERNEL)))
161 return -ENOMEM; 161 return -ENOMEM;
162 memset(list, 0, sizeof(struct tsdev_list));
163 162
164 list->raw = (i >= TSDEV_MINORS/2) ? 1 : 0; 163 list->raw = (i >= TSDEV_MINORS/2) ? 1 : 0;
165 164
@@ -379,9 +378,8 @@ static struct input_handle *tsdev_connect(struct input_handler *handler,
379 return NULL; 378 return NULL;
380 } 379 }
381 380
382 if (!(tsdev = kmalloc(sizeof(struct tsdev), GFP_KERNEL))) 381 if (!(tsdev = kzalloc(sizeof(struct tsdev), GFP_KERNEL)))
383 return NULL; 382 return NULL;
384 memset(tsdev, 0, sizeof(struct tsdev));
385 383
386 INIT_LIST_HEAD(&tsdev->list); 384 INIT_LIST_HEAD(&tsdev->list);
387 init_waitqueue_head(&tsdev->wait); 385 init_waitqueue_head(&tsdev->wait);
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 2a2b03ff096b..7bbfd85ab793 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -51,8 +51,8 @@ MODULE_LICENSE("GPL");
51 handler. 51 handler.
52*/ 52*/
53 53
54static void avmcs_config(dev_link_t *link); 54static int avmcs_config(struct pcmcia_device *link);
55static void avmcs_release(dev_link_t *link); 55static void avmcs_release(struct pcmcia_device *link);
56 56
57/* 57/*
58 The attach() and detach() entry points are used to create and destroy 58 The attach() and detach() entry points are used to create and destroy
@@ -65,10 +65,10 @@ static void avmcs_detach(struct pcmcia_device *p_dev);
65/* 65/*
66 A linked list of "instances" of the skeleton device. Each actual 66 A linked list of "instances" of the skeleton device. Each actual
67 PCMCIA card corresponds to one device instance, and is described 67 PCMCIA card corresponds to one device instance, and is described
68 by one dev_link_t structure (defined in ds.h). 68 by one struct pcmcia_device structure (defined in ds.h).
69 69
70 You may not want to use a linked list for this -- for example, the 70 You may not want to use a linked list for this -- for example, the
71 memory card driver uses an array of dev_link_t pointers, where minor 71 memory card driver uses an array of struct pcmcia_device pointers, where minor
72 device numbers are used to derive the corresponding array index. 72 device numbers are used to derive the corresponding array index.
73*/ 73*/
74 74
@@ -78,7 +78,7 @@ static void avmcs_detach(struct pcmcia_device *p_dev);
78 example, ethernet cards, modems). In other cases, there may be 78 example, ethernet cards, modems). In other cases, there may be
79 many actual or logical devices (SCSI adapters, memory cards with 79 many actual or logical devices (SCSI adapters, memory cards with
80 multiple partitions). The dev_node_t structures need to be kept 80 multiple partitions). The dev_node_t structures need to be kept
81 in a linked list starting at the 'dev' field of a dev_link_t 81 in a linked list starting at the 'dev' field of a struct pcmcia_device
82 structure. We allocate them in the card's private data structure, 82 structure. We allocate them in the card's private data structure,
83 because they generally can't be allocated dynamically. 83 because they generally can't be allocated dynamically.
84*/ 84*/
@@ -99,54 +99,38 @@ typedef struct local_info_t {
99 99
100======================================================================*/ 100======================================================================*/
101 101
102static int avmcs_attach(struct pcmcia_device *p_dev) 102static int avmcs_probe(struct pcmcia_device *p_dev)
103{ 103{
104 dev_link_t *link;
105 local_info_t *local; 104 local_info_t *local;
106 105
107 /* Initialize the dev_link_t structure */
108 link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
109 if (!link)
110 goto err;
111 memset(link, 0, sizeof(struct dev_link_t));
112
113 /* The io structure describes IO port mapping */ 106 /* The io structure describes IO port mapping */
114 link->io.NumPorts1 = 16; 107 p_dev->io.NumPorts1 = 16;
115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 108 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
116 link->io.NumPorts2 = 0; 109 p_dev->io.NumPorts2 = 0;
117 110
118 /* Interrupt setup */ 111 /* Interrupt setup */
119 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 112 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
120 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 113 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
114
115 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
121 116
122 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
123
124 /* General socket configuration */ 117 /* General socket configuration */
125 link->conf.Attributes = CONF_ENABLE_IRQ; 118 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
126 link->conf.Vcc = 50; 119 p_dev->conf.IntType = INT_MEMORY_AND_IO;
127 link->conf.IntType = INT_MEMORY_AND_IO; 120 p_dev->conf.ConfigIndex = 1;
128 link->conf.ConfigIndex = 1; 121 p_dev->conf.Present = PRESENT_OPTION;
129 link->conf.Present = PRESENT_OPTION;
130 122
131 /* Allocate space for private device-specific data */ 123 /* Allocate space for private device-specific data */
132 local = kmalloc(sizeof(local_info_t), GFP_KERNEL); 124 local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
133 if (!local) 125 if (!local)
134 goto err_kfree; 126 goto err;
135 memset(local, 0, sizeof(local_info_t)); 127 memset(local, 0, sizeof(local_info_t));
136 link->priv = local; 128 p_dev->priv = local;
137 129
138 link->handle = p_dev; 130 return avmcs_config(p_dev);
139 p_dev->instance = link;
140 131
141 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
142 avmcs_config(link);
143
144 return 0;
145
146 err_kfree:
147 kfree(link);
148 err: 132 err:
149 return -EINVAL; 133 return -ENOMEM;
150} /* avmcs_attach */ 134} /* avmcs_attach */
151 135
152/*====================================================================== 136/*======================================================================
@@ -158,15 +142,10 @@ static int avmcs_attach(struct pcmcia_device *p_dev)
158 142
159======================================================================*/ 143======================================================================*/
160 144
161static void avmcs_detach(struct pcmcia_device *p_dev) 145static void avmcs_detach(struct pcmcia_device *link)
162{ 146{
163 dev_link_t *link = dev_to_instance(p_dev);
164
165 if (link->state & DEV_CONFIG)
166 avmcs_release(link); 147 avmcs_release(link);
167 148 kfree(link->priv);
168 kfree(link->priv);
169 kfree(link);
170} /* avmcs_detach */ 149} /* avmcs_detach */
171 150
172/*====================================================================== 151/*======================================================================
@@ -177,7 +156,7 @@ static void avmcs_detach(struct pcmcia_device *p_dev)
177 156
178======================================================================*/ 157======================================================================*/
179 158
180static int get_tuple(client_handle_t handle, tuple_t *tuple, 159static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
181 cisparse_t *parse) 160 cisparse_t *parse)
182{ 161{
183 int i = pcmcia_get_tuple_data(handle, tuple); 162 int i = pcmcia_get_tuple_data(handle, tuple);
@@ -185,7 +164,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
185 return pcmcia_parse_tuple(handle, tuple, parse); 164 return pcmcia_parse_tuple(handle, tuple, parse);
186} 165}
187 166
188static int first_tuple(client_handle_t handle, tuple_t *tuple, 167static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
189 cisparse_t *parse) 168 cisparse_t *parse)
190{ 169{
191 int i = pcmcia_get_first_tuple(handle, tuple); 170 int i = pcmcia_get_first_tuple(handle, tuple);
@@ -193,7 +172,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
193 return get_tuple(handle, tuple, parse); 172 return get_tuple(handle, tuple, parse);
194} 173}
195 174
196static int next_tuple(client_handle_t handle, tuple_t *tuple, 175static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
197 cisparse_t *parse) 176 cisparse_t *parse)
198{ 177{
199 int i = pcmcia_get_next_tuple(handle, tuple); 178 int i = pcmcia_get_next_tuple(handle, tuple);
@@ -201,9 +180,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
201 return get_tuple(handle, tuple, parse); 180 return get_tuple(handle, tuple, parse);
202} 181}
203 182
204static void avmcs_config(dev_link_t *link) 183static int avmcs_config(struct pcmcia_device *link)
205{ 184{
206 client_handle_t handle;
207 tuple_t tuple; 185 tuple_t tuple;
208 cisparse_t parse; 186 cisparse_t parse;
209 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 187 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
@@ -213,8 +191,7 @@ static void avmcs_config(dev_link_t *link)
213 char devname[128]; 191 char devname[128];
214 int cardtype; 192 int cardtype;
215 int (*addcard)(unsigned int port, unsigned irq); 193 int (*addcard)(unsigned int port, unsigned irq);
216 194
217 handle = link->handle;
218 dev = link->priv; 195 dev = link->priv;
219 196
220 /* 197 /*
@@ -223,25 +200,21 @@ static void avmcs_config(dev_link_t *link)
223 */ 200 */
224 do { 201 do {
225 tuple.DesiredTuple = CISTPL_CONFIG; 202 tuple.DesiredTuple = CISTPL_CONFIG;
226 i = pcmcia_get_first_tuple(handle, &tuple); 203 i = pcmcia_get_first_tuple(link, &tuple);
227 if (i != CS_SUCCESS) break; 204 if (i != CS_SUCCESS) break;
228 tuple.TupleData = buf; 205 tuple.TupleData = buf;
229 tuple.TupleDataMax = 64; 206 tuple.TupleDataMax = 64;
230 tuple.TupleOffset = 0; 207 tuple.TupleOffset = 0;
231 i = pcmcia_get_tuple_data(handle, &tuple); 208 i = pcmcia_get_tuple_data(link, &tuple);
232 if (i != CS_SUCCESS) break; 209 if (i != CS_SUCCESS) break;
233 i = pcmcia_parse_tuple(handle, &tuple, &parse); 210 i = pcmcia_parse_tuple(link, &tuple, &parse);
234 if (i != CS_SUCCESS) break; 211 if (i != CS_SUCCESS) break;
235 link->conf.ConfigBase = parse.config.base; 212 link->conf.ConfigBase = parse.config.base;
236 } while (0); 213 } while (0);
237 if (i != CS_SUCCESS) { 214 if (i != CS_SUCCESS) {
238 cs_error(link->handle, ParseTuple, i); 215 cs_error(link, ParseTuple, i);
239 link->state &= ~DEV_CONFIG_PENDING; 216 return -ENODEV;
240 return;
241 } 217 }
242
243 /* Configure card */
244 link->state |= DEV_CONFIG;
245 218
246 do { 219 do {
247 220
@@ -252,7 +225,7 @@ static void avmcs_config(dev_link_t *link)
252 tuple.DesiredTuple = CISTPL_VERS_1; 225 tuple.DesiredTuple = CISTPL_VERS_1;
253 226
254 devname[0] = 0; 227 devname[0] = 0;
255 if( !first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 1 ) { 228 if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
256 strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 229 strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1],
257 sizeof(devname)); 230 sizeof(devname));
258 } 231 }
@@ -263,7 +236,7 @@ static void avmcs_config(dev_link_t *link)
263 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 236 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
264 tuple.Attributes = 0; 237 tuple.Attributes = 0;
265 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 238 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
266 i = first_tuple(handle, &tuple, &parse); 239 i = first_tuple(link, &tuple, &parse);
267 while (i == CS_SUCCESS) { 240 while (i == CS_SUCCESS) {
268 if (cf->io.nwin > 0) { 241 if (cf->io.nwin > 0) {
269 link->conf.ConfigIndex = cf->index; 242 link->conf.ConfigIndex = cf->index;
@@ -273,36 +246,36 @@ static void avmcs_config(dev_link_t *link)
273 printk(KERN_INFO "avm_cs: testing i/o %#x-%#x\n", 246 printk(KERN_INFO "avm_cs: testing i/o %#x-%#x\n",
274 link->io.BasePort1, 247 link->io.BasePort1,
275 link->io.BasePort1+link->io.NumPorts1-1); 248 link->io.BasePort1+link->io.NumPorts1-1);
276 i = pcmcia_request_io(link->handle, &link->io); 249 i = pcmcia_request_io(link, &link->io);
277 if (i == CS_SUCCESS) goto found_port; 250 if (i == CS_SUCCESS) goto found_port;
278 } 251 }
279 i = next_tuple(handle, &tuple, &parse); 252 i = next_tuple(link, &tuple, &parse);
280 } 253 }
281 254
282found_port: 255found_port:
283 if (i != CS_SUCCESS) { 256 if (i != CS_SUCCESS) {
284 cs_error(link->handle, RequestIO, i); 257 cs_error(link, RequestIO, i);
285 break; 258 break;
286 } 259 }
287 260
288 /* 261 /*
289 * allocate an interrupt line 262 * allocate an interrupt line
290 */ 263 */
291 i = pcmcia_request_irq(link->handle, &link->irq); 264 i = pcmcia_request_irq(link, &link->irq);
292 if (i != CS_SUCCESS) { 265 if (i != CS_SUCCESS) {
293 cs_error(link->handle, RequestIRQ, i); 266 cs_error(link, RequestIRQ, i);
294 pcmcia_release_io(link->handle, &link->io); 267 /* undo */
268 pcmcia_disable_device(link);
295 break; 269 break;
296 } 270 }
297 271
298 /* 272 /*
299 * configure the PCMCIA socket 273 * configure the PCMCIA socket
300 */ 274 */
301 i = pcmcia_request_configuration(link->handle, &link->conf); 275 i = pcmcia_request_configuration(link, &link->conf);
302 if (i != CS_SUCCESS) { 276 if (i != CS_SUCCESS) {
303 cs_error(link->handle, RequestConfiguration, i); 277 cs_error(link, RequestConfiguration, i);
304 pcmcia_release_io(link->handle, &link->io); 278 pcmcia_disable_device(link);
305 pcmcia_release_irq(link->handle, &link->irq);
306 break; 279 break;
307 } 280 }
308 281
@@ -331,13 +304,12 @@ found_port:
331 304
332 dev->node.major = 64; 305 dev->node.major = 64;
333 dev->node.minor = 0; 306 dev->node.minor = 0;
334 link->dev = &dev->node; 307 link->dev_node = &dev->node;
335 308
336 link->state &= ~DEV_CONFIG_PENDING;
337 /* If any step failed, release any partially configured state */ 309 /* If any step failed, release any partially configured state */
338 if (i != 0) { 310 if (i != 0) {
339 avmcs_release(link); 311 avmcs_release(link);
340 return; 312 return -ENODEV;
341 } 313 }
342 314
343 315
@@ -351,9 +323,10 @@ found_port:
351 printk(KERN_ERR "avm_cs: failed to add AVM-%s-Controller at i/o %#x, irq %d\n", 323 printk(KERN_ERR "avm_cs: failed to add AVM-%s-Controller at i/o %#x, irq %d\n",
352 dev->node.dev_name, link->io.BasePort1, link->irq.AssignedIRQ); 324 dev->node.dev_name, link->io.BasePort1, link->irq.AssignedIRQ);
353 avmcs_release(link); 325 avmcs_release(link);
354 return; 326 return -ENODEV;
355 } 327 }
356 dev->node.minor = i; 328 dev->node.minor = i;
329 return 0;
357 330
358} /* avmcs_config */ 331} /* avmcs_config */
359 332
@@ -365,56 +338,12 @@ found_port:
365 338
366======================================================================*/ 339======================================================================*/
367 340
368static void avmcs_release(dev_link_t *link) 341static void avmcs_release(struct pcmcia_device *link)
369{ 342{
370 b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ); 343 b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ);
371 344 pcmcia_disable_device(link);
372 /* Unlink the device chain */
373 link->dev = NULL;
374
375 /* Don't bother checking to see if these succeed or not */
376 pcmcia_release_configuration(link->handle);
377 pcmcia_release_io(link->handle, &link->io);
378 pcmcia_release_irq(link->handle, &link->irq);
379 link->state &= ~DEV_CONFIG;
380} /* avmcs_release */ 345} /* avmcs_release */
381 346
382static int avmcs_suspend(struct pcmcia_device *dev)
383{
384 dev_link_t *link = dev_to_instance(dev);
385
386 link->state |= DEV_SUSPEND;
387 if (link->state & DEV_CONFIG)
388 pcmcia_release_configuration(link->handle);
389
390 return 0;
391}
392
393static int avmcs_resume(struct pcmcia_device *dev)
394{
395 dev_link_t *link = dev_to_instance(dev);
396
397 link->state &= ~DEV_SUSPEND;
398 if (link->state & DEV_CONFIG)
399 pcmcia_request_configuration(link->handle, &link->conf);
400
401 return 0;
402}
403
404/*======================================================================
405
406 The card status event handler. Mostly, this schedules other
407 stuff to run after an event is received. A CARD_REMOVAL event
408 also sets some flags to discourage the net drivers from trying
409 to talk to the card any more.
410
411 When a CARD_REMOVAL event is received, we immediately set a flag
412 to block future accesses to this device. All the functions that
413 actually access the device should check this flag to make sure
414 the card is still present.
415
416======================================================================*/
417
418 347
419static struct pcmcia_device_id avmcs_ids[] = { 348static struct pcmcia_device_id avmcs_ids[] = {
420 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335), 349 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN-Controller B1", 0x95d42008, 0x845dc335),
@@ -429,11 +358,9 @@ static struct pcmcia_driver avmcs_driver = {
429 .drv = { 358 .drv = {
430 .name = "avm_cs", 359 .name = "avm_cs",
431 }, 360 },
432 .probe = avmcs_attach, 361 .probe = avmcs_probe,
433 .remove = avmcs_detach, 362 .remove = avmcs_detach,
434 .id_table = avmcs_ids, 363 .id_table = avmcs_ids,
435 .suspend= avmcs_suspend,
436 .resume = avmcs_resume,
437}; 364};
438 365
439static int __init avmcs_init(void) 366static int __init avmcs_init(void)
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 969da40c4248..ac28e3278ad9 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -67,8 +67,8 @@ module_param(isdnprot, int, 0);
67 handler. 67 handler.
68*/ 68*/
69 69
70static void avma1cs_config(dev_link_t *link); 70static int avma1cs_config(struct pcmcia_device *link);
71static void avma1cs_release(dev_link_t *link); 71static void avma1cs_release(struct pcmcia_device *link);
72 72
73/* 73/*
74 The attach() and detach() entry points are used to create and destroy 74 The attach() and detach() entry points are used to create and destroy
@@ -82,10 +82,10 @@ static void avma1cs_detach(struct pcmcia_device *p_dev);
82/* 82/*
83 A linked list of "instances" of the skeleton device. Each actual 83 A linked list of "instances" of the skeleton device. Each actual
84 PCMCIA card corresponds to one device instance, and is described 84 PCMCIA card corresponds to one device instance, and is described
85 by one dev_link_t structure (defined in ds.h). 85 by one struct pcmcia_device structure (defined in ds.h).
86 86
87 You may not want to use a linked list for this -- for example, the 87 You may not want to use a linked list for this -- for example, the
88 memory card driver uses an array of dev_link_t pointers, where minor 88 memory card driver uses an array of struct pcmcia_device pointers, where minor
89 device numbers are used to derive the corresponding array index. 89 device numbers are used to derive the corresponding array index.
90*/ 90*/
91 91
@@ -95,7 +95,7 @@ static void avma1cs_detach(struct pcmcia_device *p_dev);
95 example, ethernet cards, modems). In other cases, there may be 95 example, ethernet cards, modems). In other cases, there may be
96 many actual or logical devices (SCSI adapters, memory cards with 96 many actual or logical devices (SCSI adapters, memory cards with
97 multiple partitions). The dev_node_t structures need to be kept 97 multiple partitions). The dev_node_t structures need to be kept
98 in a linked list starting at the 'dev' field of a dev_link_t 98 in a linked list starting at the 'dev' field of a struct pcmcia_device
99 structure. We allocate them in the card's private data structure, 99 structure. We allocate them in the card's private data structure,
100 because they generally can't be allocated dynamically. 100 because they generally can't be allocated dynamically.
101*/ 101*/
@@ -116,55 +116,40 @@ typedef struct local_info_t {
116 116
117======================================================================*/ 117======================================================================*/
118 118
119static int avma1cs_attach(struct pcmcia_device *p_dev) 119static int avma1cs_probe(struct pcmcia_device *p_dev)
120{ 120{
121 dev_link_t *link;
122 local_info_t *local; 121 local_info_t *local;
123 122
124 DEBUG(0, "avma1cs_attach()\n"); 123 DEBUG(0, "avma1cs_attach()\n");
125 124
126 /* Initialize the dev_link_t structure */
127 link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
128 if (!link)
129 return -ENOMEM;
130 memset(link, 0, sizeof(struct dev_link_t));
131
132 /* Allocate space for private device-specific data */ 125 /* Allocate space for private device-specific data */
133 local = kmalloc(sizeof(local_info_t), GFP_KERNEL); 126 local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
134 if (!local) { 127 if (!local)
135 kfree(link);
136 return -ENOMEM; 128 return -ENOMEM;
137 } 129
138 memset(local, 0, sizeof(local_info_t)); 130 memset(local, 0, sizeof(local_info_t));
139 link->priv = local; 131 p_dev->priv = local;
140 132
141 /* The io structure describes IO port mapping */ 133 /* The io structure describes IO port mapping */
142 link->io.NumPorts1 = 16; 134 p_dev->io.NumPorts1 = 16;
143 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 135 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
144 link->io.NumPorts2 = 16; 136 p_dev->io.NumPorts2 = 16;
145 link->io.Attributes2 = IO_DATA_PATH_WIDTH_16; 137 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
146 link->io.IOAddrLines = 5; 138 p_dev->io.IOAddrLines = 5;
147 139
148 /* Interrupt setup */ 140 /* Interrupt setup */
149 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 141 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
150 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 142 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
151 143
152 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 144 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
153 145
154 /* General socket configuration */ 146 /* General socket configuration */
155 link->conf.Attributes = CONF_ENABLE_IRQ; 147 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
156 link->conf.Vcc = 50; 148 p_dev->conf.IntType = INT_MEMORY_AND_IO;
157 link->conf.IntType = INT_MEMORY_AND_IO; 149 p_dev->conf.ConfigIndex = 1;
158 link->conf.ConfigIndex = 1; 150 p_dev->conf.Present = PRESENT_OPTION;
159 link->conf.Present = PRESENT_OPTION;
160 151
161 link->handle = p_dev; 152 return avma1cs_config(p_dev);
162 p_dev->instance = link;
163
164 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
165 avma1cs_config(link);
166
167 return 0;
168} /* avma1cs_attach */ 153} /* avma1cs_attach */
169 154
170/*====================================================================== 155/*======================================================================
@@ -176,17 +161,11 @@ static int avma1cs_attach(struct pcmcia_device *p_dev)
176 161
177======================================================================*/ 162======================================================================*/
178 163
179static void avma1cs_detach(struct pcmcia_device *p_dev) 164static void avma1cs_detach(struct pcmcia_device *link)
180{ 165{
181 dev_link_t *link = dev_to_instance(p_dev); 166 DEBUG(0, "avma1cs_detach(0x%p)\n", link);
182 167 avma1cs_release(link);
183 DEBUG(0, "avma1cs_detach(0x%p)\n", link); 168 kfree(link->priv);
184
185 if (link->state & DEV_CONFIG)
186 avma1cs_release(link);
187
188 kfree(link->priv);
189 kfree(link);
190} /* avma1cs_detach */ 169} /* avma1cs_detach */
191 170
192/*====================================================================== 171/*======================================================================
@@ -197,7 +176,7 @@ static void avma1cs_detach(struct pcmcia_device *p_dev)
197 176
198======================================================================*/ 177======================================================================*/
199 178
200static int get_tuple(client_handle_t handle, tuple_t *tuple, 179static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
201 cisparse_t *parse) 180 cisparse_t *parse)
202{ 181{
203 int i = pcmcia_get_tuple_data(handle, tuple); 182 int i = pcmcia_get_tuple_data(handle, tuple);
@@ -205,7 +184,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
205 return pcmcia_parse_tuple(handle, tuple, parse); 184 return pcmcia_parse_tuple(handle, tuple, parse);
206} 185}
207 186
208static int first_tuple(client_handle_t handle, tuple_t *tuple, 187static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
209 cisparse_t *parse) 188 cisparse_t *parse)
210{ 189{
211 int i = pcmcia_get_first_tuple(handle, tuple); 190 int i = pcmcia_get_first_tuple(handle, tuple);
@@ -213,7 +192,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
213 return get_tuple(handle, tuple, parse); 192 return get_tuple(handle, tuple, parse);
214} 193}
215 194
216static int next_tuple(client_handle_t handle, tuple_t *tuple, 195static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
217 cisparse_t *parse) 196 cisparse_t *parse)
218{ 197{
219 int i = pcmcia_get_next_tuple(handle, tuple); 198 int i = pcmcia_get_next_tuple(handle, tuple);
@@ -221,9 +200,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
221 return get_tuple(handle, tuple, parse); 200 return get_tuple(handle, tuple, parse);
222} 201}
223 202
224static void avma1cs_config(dev_link_t *link) 203static int avma1cs_config(struct pcmcia_device *link)
225{ 204{
226 client_handle_t handle;
227 tuple_t tuple; 205 tuple_t tuple;
228 cisparse_t parse; 206 cisparse_t parse;
229 cistpl_cftable_entry_t *cf = &parse.cftable_entry; 207 cistpl_cftable_entry_t *cf = &parse.cftable_entry;
@@ -233,8 +211,7 @@ static void avma1cs_config(dev_link_t *link)
233 char devname[128]; 211 char devname[128];
234 IsdnCard_t icard; 212 IsdnCard_t icard;
235 int busy = 0; 213 int busy = 0;
236 214
237 handle = link->handle;
238 dev = link->priv; 215 dev = link->priv;
239 216
240 DEBUG(0, "avma1cs_config(0x%p)\n", link); 217 DEBUG(0, "avma1cs_config(0x%p)\n", link);
@@ -245,25 +222,21 @@ static void avma1cs_config(dev_link_t *link)
245 */ 222 */
246 do { 223 do {
247 tuple.DesiredTuple = CISTPL_CONFIG; 224 tuple.DesiredTuple = CISTPL_CONFIG;
248 i = pcmcia_get_first_tuple(handle, &tuple); 225 i = pcmcia_get_first_tuple(link, &tuple);
249 if (i != CS_SUCCESS) break; 226 if (i != CS_SUCCESS) break;
250 tuple.TupleData = buf; 227 tuple.TupleData = buf;
251 tuple.TupleDataMax = 64; 228 tuple.TupleDataMax = 64;
252 tuple.TupleOffset = 0; 229 tuple.TupleOffset = 0;
253 i = pcmcia_get_tuple_data(handle, &tuple); 230 i = pcmcia_get_tuple_data(link, &tuple);
254 if (i != CS_SUCCESS) break; 231 if (i != CS_SUCCESS) break;
255 i = pcmcia_parse_tuple(handle, &tuple, &parse); 232 i = pcmcia_parse_tuple(link, &tuple, &parse);
256 if (i != CS_SUCCESS) break; 233 if (i != CS_SUCCESS) break;
257 link->conf.ConfigBase = parse.config.base; 234 link->conf.ConfigBase = parse.config.base;
258 } while (0); 235 } while (0);
259 if (i != CS_SUCCESS) { 236 if (i != CS_SUCCESS) {
260 cs_error(link->handle, ParseTuple, i); 237 cs_error(link, ParseTuple, i);
261 link->state &= ~DEV_CONFIG_PENDING; 238 return -ENODEV;
262 return;
263 } 239 }
264
265 /* Configure card */
266 link->state |= DEV_CONFIG;
267 240
268 do { 241 do {
269 242
@@ -274,7 +247,7 @@ static void avma1cs_config(dev_link_t *link)
274 tuple.DesiredTuple = CISTPL_VERS_1; 247 tuple.DesiredTuple = CISTPL_VERS_1;
275 248
276 devname[0] = 0; 249 devname[0] = 0;
277 if( !first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 1 ) { 250 if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
278 strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 251 strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1],
279 sizeof(devname)); 252 sizeof(devname));
280 } 253 }
@@ -285,7 +258,7 @@ static void avma1cs_config(dev_link_t *link)
285 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 258 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
286 tuple.Attributes = 0; 259 tuple.Attributes = 0;
287 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 260 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
288 i = first_tuple(handle, &tuple, &parse); 261 i = first_tuple(link, &tuple, &parse);
289 while (i == CS_SUCCESS) { 262 while (i == CS_SUCCESS) {
290 if (cf->io.nwin > 0) { 263 if (cf->io.nwin > 0) {
291 link->conf.ConfigIndex = cf->index; 264 link->conf.ConfigIndex = cf->index;
@@ -295,36 +268,36 @@ static void avma1cs_config(dev_link_t *link)
295 printk(KERN_INFO "avma1_cs: testing i/o %#x-%#x\n", 268 printk(KERN_INFO "avma1_cs: testing i/o %#x-%#x\n",
296 link->io.BasePort1, 269 link->io.BasePort1,
297 link->io.BasePort1+link->io.NumPorts1 - 1); 270 link->io.BasePort1+link->io.NumPorts1 - 1);
298 i = pcmcia_request_io(link->handle, &link->io); 271 i = pcmcia_request_io(link, &link->io);
299 if (i == CS_SUCCESS) goto found_port; 272 if (i == CS_SUCCESS) goto found_port;
300 } 273 }
301 i = next_tuple(handle, &tuple, &parse); 274 i = next_tuple(link, &tuple, &parse);
302 } 275 }
303 276
304found_port: 277found_port:
305 if (i != CS_SUCCESS) { 278 if (i != CS_SUCCESS) {
306 cs_error(link->handle, RequestIO, i); 279 cs_error(link, RequestIO, i);
307 break; 280 break;
308 } 281 }
309 282
310 /* 283 /*
311 * allocate an interrupt line 284 * allocate an interrupt line
312 */ 285 */
313 i = pcmcia_request_irq(link->handle, &link->irq); 286 i = pcmcia_request_irq(link, &link->irq);
314 if (i != CS_SUCCESS) { 287 if (i != CS_SUCCESS) {
315 cs_error(link->handle, RequestIRQ, i); 288 cs_error(link, RequestIRQ, i);
316 pcmcia_release_io(link->handle, &link->io); 289 /* undo */
290 pcmcia_disable_device(link);
317 break; 291 break;
318 } 292 }
319 293
320 /* 294 /*
321 * configure the PCMCIA socket 295 * configure the PCMCIA socket
322 */ 296 */
323 i = pcmcia_request_configuration(link->handle, &link->conf); 297 i = pcmcia_request_configuration(link, &link->conf);
324 if (i != CS_SUCCESS) { 298 if (i != CS_SUCCESS) {
325 cs_error(link->handle, RequestConfiguration, i); 299 cs_error(link, RequestConfiguration, i);
326 pcmcia_release_io(link->handle, &link->io); 300 pcmcia_disable_device(link);
327 pcmcia_release_irq(link->handle, &link->irq);
328 break; 301 break;
329 } 302 }
330 303
@@ -336,13 +309,12 @@ found_port:
336 strcpy(dev->node.dev_name, "A1"); 309 strcpy(dev->node.dev_name, "A1");
337 dev->node.major = 45; 310 dev->node.major = 45;
338 dev->node.minor = 0; 311 dev->node.minor = 0;
339 link->dev = &dev->node; 312 link->dev_node = &dev->node;
340 313
341 link->state &= ~DEV_CONFIG_PENDING;
342 /* If any step failed, release any partially configured state */ 314 /* If any step failed, release any partially configured state */
343 if (i != 0) { 315 if (i != 0) {
344 avma1cs_release(link); 316 avma1cs_release(link);
345 return; 317 return -ENODEV;
346 } 318 }
347 319
348 printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n", 320 printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n",
@@ -357,10 +329,11 @@ found_port:
357 if (i < 0) { 329 if (i < 0) {
358 printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 PCMCIA %d at i/o %#x\n", i, link->io.BasePort1); 330 printk(KERN_ERR "avma1_cs: failed to initialize AVM A1 PCMCIA %d at i/o %#x\n", i, link->io.BasePort1);
359 avma1cs_release(link); 331 avma1cs_release(link);
360 return; 332 return -ENODEV;
361 } 333 }
362 dev->node.minor = i; 334 dev->node.minor = i;
363 335
336 return 0;
364} /* avma1cs_config */ 337} /* avma1cs_config */
365 338
366/*====================================================================== 339/*======================================================================
@@ -371,47 +344,18 @@ found_port:
371 344
372======================================================================*/ 345======================================================================*/
373 346
374static void avma1cs_release(dev_link_t *link) 347static void avma1cs_release(struct pcmcia_device *link)
375{ 348{
376 local_info_t *local = link->priv; 349 local_info_t *local = link->priv;
377 350
378 DEBUG(0, "avma1cs_release(0x%p)\n", link); 351 DEBUG(0, "avma1cs_release(0x%p)\n", link);
379 352
380 /* no unregister function with hisax */ 353 /* now unregister function with hisax */
381 HiSax_closecard(local->node.minor); 354 HiSax_closecard(local->node.minor);
382 355
383 /* Unlink the device chain */ 356 pcmcia_disable_device(link);
384 link->dev = NULL;
385
386 /* Don't bother checking to see if these succeed or not */
387 pcmcia_release_configuration(link->handle);
388 pcmcia_release_io(link->handle, &link->io);
389 pcmcia_release_irq(link->handle, &link->irq);
390 link->state &= ~DEV_CONFIG;
391} /* avma1cs_release */ 357} /* avma1cs_release */
392 358
393static int avma1cs_suspend(struct pcmcia_device *dev)
394{
395 dev_link_t *link = dev_to_instance(dev);
396
397 link->state |= DEV_SUSPEND;
398 if (link->state & DEV_CONFIG)
399 pcmcia_release_configuration(link->handle);
400
401 return 0;
402}
403
404static int avma1cs_resume(struct pcmcia_device *dev)
405{
406 dev_link_t *link = dev_to_instance(dev);
407
408 link->state &= ~DEV_SUSPEND;
409 if (link->state & DEV_CONFIG)
410 pcmcia_request_configuration(link->handle, &link->conf);
411
412 return 0;
413}
414
415 359
416static struct pcmcia_device_id avma1cs_ids[] = { 360static struct pcmcia_device_id avma1cs_ids[] = {
417 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb), 361 PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb),
@@ -425,13 +369,11 @@ static struct pcmcia_driver avma1cs_driver = {
425 .drv = { 369 .drv = {
426 .name = "avma1_cs", 370 .name = "avma1_cs",
427 }, 371 },
428 .probe = avma1cs_attach, 372 .probe = avma1cs_probe,
429 .remove = avma1cs_detach, 373 .remove = avma1cs_detach,
430 .id_table = avma1cs_ids, 374 .id_table = avma1cs_ids,
431 .suspend = avma1cs_suspend,
432 .resume = avma1cs_resume,
433}; 375};
434 376
435/*====================================================================*/ 377/*====================================================================*/
436 378
437static int __init init_avma1_cs(void) 379static int __init init_avma1_cs(void)
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index 062fb8f0739f..e18e75be8ed3 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -94,8 +94,8 @@ module_param(protocol, int, 0);
94 handler. 94 handler.
95*/ 95*/
96 96
97static void elsa_cs_config(dev_link_t *link); 97static int elsa_cs_config(struct pcmcia_device *link);
98static void elsa_cs_release(dev_link_t *link); 98static void elsa_cs_release(struct pcmcia_device *link);
99 99
100/* 100/*
101 The attach() and detach() entry points are used to create and destroy 101 The attach() and detach() entry points are used to create and destroy
@@ -111,7 +111,7 @@ static void elsa_cs_detach(struct pcmcia_device *p_dev);
111 example, ethernet cards, modems). In other cases, there may be 111 example, ethernet cards, modems). In other cases, there may be
112 many actual or logical devices (SCSI adapters, memory cards with 112 many actual or logical devices (SCSI adapters, memory cards with
113 multiple partitions). The dev_node_t structures need to be kept 113 multiple partitions). The dev_node_t structures need to be kept
114 in a linked list starting at the 'dev' field of a dev_link_t 114 in a linked list starting at the 'dev' field of a struct pcmcia_device
115 structure. We allocate them in the card's private data structure, 115 structure. We allocate them in the card's private data structure,
116 because they generally shouldn't be allocated dynamically. 116 because they generally shouldn't be allocated dynamically.
117 In this case, we also provide a flag to indicate if a device is 117 In this case, we also provide a flag to indicate if a device is
@@ -121,7 +121,7 @@ static void elsa_cs_detach(struct pcmcia_device *p_dev);
121*/ 121*/
122 122
123typedef struct local_info_t { 123typedef struct local_info_t {
124 dev_link_t link; 124 struct pcmcia_device *p_dev;
125 dev_node_t node; 125 dev_node_t node;
126 int busy; 126 int busy;
127 int cardnr; 127 int cardnr;
@@ -139,9 +139,8 @@ typedef struct local_info_t {
139 139
140======================================================================*/ 140======================================================================*/
141 141
142static int elsa_cs_attach(struct pcmcia_device *p_dev) 142static int elsa_cs_probe(struct pcmcia_device *link)
143{ 143{
144 dev_link_t *link;
145 local_info_t *local; 144 local_info_t *local;
146 145
147 DEBUG(0, "elsa_cs_attach()\n"); 146 DEBUG(0, "elsa_cs_attach()\n");
@@ -150,8 +149,11 @@ static int elsa_cs_attach(struct pcmcia_device *p_dev)
150 local = kmalloc(sizeof(local_info_t), GFP_KERNEL); 149 local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
151 if (!local) return -ENOMEM; 150 if (!local) return -ENOMEM;
152 memset(local, 0, sizeof(local_info_t)); 151 memset(local, 0, sizeof(local_info_t));
152
153 local->p_dev = link;
154 link->priv = local;
155
153 local->cardnr = -1; 156 local->cardnr = -1;
154 link = &local->link; link->priv = local;
155 157
156 /* Interrupt setup */ 158 /* Interrupt setup */
157 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 159 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
@@ -170,16 +172,9 @@ static int elsa_cs_attach(struct pcmcia_device *p_dev)
170 link->io.IOAddrLines = 3; 172 link->io.IOAddrLines = 3;
171 173
172 link->conf.Attributes = CONF_ENABLE_IRQ; 174 link->conf.Attributes = CONF_ENABLE_IRQ;
173 link->conf.Vcc = 50;
174 link->conf.IntType = INT_MEMORY_AND_IO; 175 link->conf.IntType = INT_MEMORY_AND_IO;
175 176
176 link->handle = p_dev; 177 return elsa_cs_config(link);
177 p_dev->instance = link;
178
179 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
180 elsa_cs_config(link);
181
182 return 0;
183} /* elsa_cs_attach */ 178} /* elsa_cs_attach */
184 179
185/*====================================================================== 180/*======================================================================
@@ -191,20 +186,16 @@ static int elsa_cs_attach(struct pcmcia_device *p_dev)
191 186
192======================================================================*/ 187======================================================================*/
193 188
194static void elsa_cs_detach(struct pcmcia_device *p_dev) 189static void elsa_cs_detach(struct pcmcia_device *link)
195{ 190{
196 dev_link_t *link = dev_to_instance(p_dev); 191 local_info_t *info = link->priv;
197 local_info_t *info = link->priv;
198 192
199 DEBUG(0, "elsa_cs_detach(0x%p)\n", link); 193 DEBUG(0, "elsa_cs_detach(0x%p)\n", link);
200 194
201 if (link->state & DEV_CONFIG) { 195 info->busy = 1;
202 info->busy = 1; 196 elsa_cs_release(link);
203 elsa_cs_release(link);
204 }
205
206 kfree(info);
207 197
198 kfree(info);
208} /* elsa_cs_detach */ 199} /* elsa_cs_detach */
209 200
210/*====================================================================== 201/*======================================================================
@@ -214,7 +205,7 @@ static void elsa_cs_detach(struct pcmcia_device *p_dev)
214 device available to the system. 205 device available to the system.
215 206
216======================================================================*/ 207======================================================================*/
217static int get_tuple(client_handle_t handle, tuple_t *tuple, 208static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
218 cisparse_t *parse) 209 cisparse_t *parse)
219{ 210{
220 int i = pcmcia_get_tuple_data(handle, tuple); 211 int i = pcmcia_get_tuple_data(handle, tuple);
@@ -222,7 +213,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
222 return pcmcia_parse_tuple(handle, tuple, parse); 213 return pcmcia_parse_tuple(handle, tuple, parse);
223} 214}
224 215
225static int first_tuple(client_handle_t handle, tuple_t *tuple, 216static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
226 cisparse_t *parse) 217 cisparse_t *parse)
227{ 218{
228 int i = pcmcia_get_first_tuple(handle, tuple); 219 int i = pcmcia_get_first_tuple(handle, tuple);
@@ -230,7 +221,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
230 return get_tuple(handle, tuple, parse); 221 return get_tuple(handle, tuple, parse);
231} 222}
232 223
233static int next_tuple(client_handle_t handle, tuple_t *tuple, 224static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
234 cisparse_t *parse) 225 cisparse_t *parse)
235{ 226{
236 int i = pcmcia_get_next_tuple(handle, tuple); 227 int i = pcmcia_get_next_tuple(handle, tuple);
@@ -238,9 +229,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
238 return get_tuple(handle, tuple, parse); 229 return get_tuple(handle, tuple, parse);
239} 230}
240 231
241static void elsa_cs_config(dev_link_t *link) 232static int elsa_cs_config(struct pcmcia_device *link)
242{ 233{
243 client_handle_t handle;
244 tuple_t tuple; 234 tuple_t tuple;
245 cisparse_t parse; 235 cisparse_t parse;
246 local_info_t *dev; 236 local_info_t *dev;
@@ -250,7 +240,6 @@ static void elsa_cs_config(dev_link_t *link)
250 IsdnCard_t icard; 240 IsdnCard_t icard;
251 241
252 DEBUG(0, "elsa_config(0x%p)\n", link); 242 DEBUG(0, "elsa_config(0x%p)\n", link);
253 handle = link->handle;
254 dev = link->priv; 243 dev = link->priv;
255 244
256 /* 245 /*
@@ -262,7 +251,7 @@ static void elsa_cs_config(dev_link_t *link)
262 tuple.TupleDataMax = 255; 251 tuple.TupleDataMax = 255;
263 tuple.TupleOffset = 0; 252 tuple.TupleOffset = 0;
264 tuple.Attributes = 0; 253 tuple.Attributes = 0;
265 i = first_tuple(handle, &tuple, &parse); 254 i = first_tuple(link, &tuple, &parse);
266 if (i != CS_SUCCESS) { 255 if (i != CS_SUCCESS) {
267 last_fn = ParseTuple; 256 last_fn = ParseTuple;
268 goto cs_failed; 257 goto cs_failed;
@@ -270,32 +259,29 @@ static void elsa_cs_config(dev_link_t *link)
270 link->conf.ConfigBase = parse.config.base; 259 link->conf.ConfigBase = parse.config.base;
271 link->conf.Present = parse.config.rmask[0]; 260 link->conf.Present = parse.config.rmask[0];
272 261
273 /* Configure card */
274 link->state |= DEV_CONFIG;
275
276 tuple.TupleData = (cisdata_t *)buf; 262 tuple.TupleData = (cisdata_t *)buf;
277 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 263 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
278 tuple.Attributes = 0; 264 tuple.Attributes = 0;
279 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 265 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
280 i = first_tuple(handle, &tuple, &parse); 266 i = first_tuple(link, &tuple, &parse);
281 while (i == CS_SUCCESS) { 267 while (i == CS_SUCCESS) {
282 if ( (cf->io.nwin > 0) && cf->io.win[0].base) { 268 if ( (cf->io.nwin > 0) && cf->io.win[0].base) {
283 printk(KERN_INFO "(elsa_cs: looks like the 96 model)\n"); 269 printk(KERN_INFO "(elsa_cs: looks like the 96 model)\n");
284 link->conf.ConfigIndex = cf->index; 270 link->conf.ConfigIndex = cf->index;
285 link->io.BasePort1 = cf->io.win[0].base; 271 link->io.BasePort1 = cf->io.win[0].base;
286 i = pcmcia_request_io(link->handle, &link->io); 272 i = pcmcia_request_io(link, &link->io);
287 if (i == CS_SUCCESS) break; 273 if (i == CS_SUCCESS) break;
288 } else { 274 } else {
289 printk(KERN_INFO "(elsa_cs: looks like the 97 model)\n"); 275 printk(KERN_INFO "(elsa_cs: looks like the 97 model)\n");
290 link->conf.ConfigIndex = cf->index; 276 link->conf.ConfigIndex = cf->index;
291 for (i = 0, j = 0x2f0; j > 0x100; j -= 0x10) { 277 for (i = 0, j = 0x2f0; j > 0x100; j -= 0x10) {
292 link->io.BasePort1 = j; 278 link->io.BasePort1 = j;
293 i = pcmcia_request_io(link->handle, &link->io); 279 i = pcmcia_request_io(link, &link->io);
294 if (i == CS_SUCCESS) break; 280 if (i == CS_SUCCESS) break;
295 } 281 }
296 break; 282 break;
297 } 283 }
298 i = next_tuple(handle, &tuple, &parse); 284 i = next_tuple(link, &tuple, &parse);
299 } 285 }
300 286
301 if (i != CS_SUCCESS) { 287 if (i != CS_SUCCESS) {
@@ -303,14 +289,14 @@ static void elsa_cs_config(dev_link_t *link)
303 goto cs_failed; 289 goto cs_failed;
304 } 290 }
305 291
306 i = pcmcia_request_irq(link->handle, &link->irq); 292 i = pcmcia_request_irq(link, &link->irq);
307 if (i != CS_SUCCESS) { 293 if (i != CS_SUCCESS) {
308 link->irq.AssignedIRQ = 0; 294 link->irq.AssignedIRQ = 0;
309 last_fn = RequestIRQ; 295 last_fn = RequestIRQ;
310 goto cs_failed; 296 goto cs_failed;
311 } 297 }
312 298
313 i = pcmcia_request_configuration(link->handle, &link->conf); 299 i = pcmcia_request_configuration(link, &link->conf);
314 if (i != CS_SUCCESS) { 300 if (i != CS_SUCCESS) {
315 last_fn = RequestConfiguration; 301 last_fn = RequestConfiguration;
316 goto cs_failed; 302 goto cs_failed;
@@ -321,14 +307,11 @@ static void elsa_cs_config(dev_link_t *link)
321 sprintf(dev->node.dev_name, "elsa"); 307 sprintf(dev->node.dev_name, "elsa");
322 dev->node.major = dev->node.minor = 0x0; 308 dev->node.major = dev->node.minor = 0x0;
323 309
324 link->dev = &dev->node; 310 link->dev_node = &dev->node;
325 311
326 /* Finally, report what we've done */ 312 /* Finally, report what we've done */
327 printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d", 313 printk(KERN_INFO "%s: index 0x%02x: ",
328 dev->node.dev_name, link->conf.ConfigIndex, 314 dev->node.dev_name, link->conf.ConfigIndex);
329 link->conf.Vcc/10, link->conf.Vcc%10);
330 if (link->conf.Vpp1)
331 printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
332 if (link->conf.Attributes & CONF_ENABLE_IRQ) 315 if (link->conf.Attributes & CONF_ENABLE_IRQ)
333 printk(", irq %d", link->irq.AssignedIRQ); 316 printk(", irq %d", link->irq.AssignedIRQ);
334 if (link->io.NumPorts1) 317 if (link->io.NumPorts1)
@@ -339,8 +322,6 @@ static void elsa_cs_config(dev_link_t *link)
339 link->io.BasePort2+link->io.NumPorts2-1); 322 link->io.BasePort2+link->io.NumPorts2-1);
340 printk("\n"); 323 printk("\n");
341 324
342 link->state &= ~DEV_CONFIG_PENDING;
343
344 icard.para[0] = link->irq.AssignedIRQ; 325 icard.para[0] = link->irq.AssignedIRQ;
345 icard.para[1] = link->io.BasePort1; 326 icard.para[1] = link->io.BasePort1;
346 icard.protocol = protocol; 327 icard.protocol = protocol;
@@ -354,10 +335,11 @@ static void elsa_cs_config(dev_link_t *link)
354 } else 335 } else
355 ((local_info_t*)link->priv)->cardnr = i; 336 ((local_info_t*)link->priv)->cardnr = i;
356 337
357 return; 338 return 0;
358cs_failed: 339cs_failed:
359 cs_error(link->handle, last_fn, i); 340 cs_error(link, last_fn, i);
360 elsa_cs_release(link); 341 elsa_cs_release(link);
342 return -ENODEV;
361} /* elsa_cs_config */ 343} /* elsa_cs_config */
362 344
363/*====================================================================== 345/*======================================================================
@@ -368,7 +350,7 @@ cs_failed:
368 350
369======================================================================*/ 351======================================================================*/
370 352
371static void elsa_cs_release(dev_link_t *link) 353static void elsa_cs_release(struct pcmcia_device *link)
372{ 354{
373 local_info_t *local = link->priv; 355 local_info_t *local = link->priv;
374 356
@@ -380,39 +362,23 @@ static void elsa_cs_release(dev_link_t *link)
380 HiSax_closecard(local->cardnr); 362 HiSax_closecard(local->cardnr);
381 } 363 }
382 } 364 }
383 /* Unlink the device chain */ 365
384 link->dev = NULL; 366 pcmcia_disable_device(link);
385
386 /* Don't bother checking to see if these succeed or not */
387 if (link->win)
388 pcmcia_release_window(link->win);
389 pcmcia_release_configuration(link->handle);
390 pcmcia_release_io(link->handle, &link->io);
391 pcmcia_release_irq(link->handle, &link->irq);
392 link->state &= ~DEV_CONFIG;
393} /* elsa_cs_release */ 367} /* elsa_cs_release */
394 368
395static int elsa_suspend(struct pcmcia_device *p_dev) 369static int elsa_suspend(struct pcmcia_device *link)
396{ 370{
397 dev_link_t *link = dev_to_instance(p_dev);
398 local_info_t *dev = link->priv; 371 local_info_t *dev = link->priv;
399 372
400 link->state |= DEV_SUSPEND;
401 dev->busy = 1; 373 dev->busy = 1;
402 if (link->state & DEV_CONFIG)
403 pcmcia_release_configuration(link->handle);
404 374
405 return 0; 375 return 0;
406} 376}
407 377
408static int elsa_resume(struct pcmcia_device *p_dev) 378static int elsa_resume(struct pcmcia_device *link)
409{ 379{
410 dev_link_t *link = dev_to_instance(p_dev);
411 local_info_t *dev = link->priv; 380 local_info_t *dev = link->priv;
412 381
413 link->state &= ~DEV_SUSPEND;
414 if (link->state & DEV_CONFIG)
415 pcmcia_request_configuration(link->handle, &link->conf);
416 dev->busy = 0; 382 dev->busy = 0;
417 383
418 return 0; 384 return 0;
@@ -430,7 +396,7 @@ static struct pcmcia_driver elsa_cs_driver = {
430 .drv = { 396 .drv = {
431 .name = "elsa_cs", 397 .name = "elsa_cs",
432 }, 398 },
433 .probe = elsa_cs_attach, 399 .probe = elsa_cs_probe,
434 .remove = elsa_cs_detach, 400 .remove = elsa_cs_detach,
435 .id_table = elsa_ids, 401 .id_table = elsa_ids,
436 .suspend = elsa_suspend, 402 .suspend = elsa_suspend,
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 6f5213a18a8d..9bb18f3f7829 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -95,8 +95,8 @@ module_param(protocol, int, 0);
95 event handler. 95 event handler.
96*/ 96*/
97 97
98static void sedlbauer_config(dev_link_t *link); 98static int sedlbauer_config(struct pcmcia_device *link);
99static void sedlbauer_release(dev_link_t *link); 99static void sedlbauer_release(struct pcmcia_device *link);
100 100
101/* 101/*
102 The attach() and detach() entry points are used to create and destroy 102 The attach() and detach() entry points are used to create and destroy
@@ -119,7 +119,7 @@ static void sedlbauer_detach(struct pcmcia_device *p_dev);
119 example, ethernet cards, modems). In other cases, there may be 119 example, ethernet cards, modems). In other cases, there may be
120 many actual or logical devices (SCSI adapters, memory cards with 120 many actual or logical devices (SCSI adapters, memory cards with
121 multiple partitions). The dev_node_t structures need to be kept 121 multiple partitions). The dev_node_t structures need to be kept
122 in a linked list starting at the 'dev' field of a dev_link_t 122 in a linked list starting at the 'dev' field of a struct pcmcia_device
123 structure. We allocate them in the card's private data structure, 123 structure. We allocate them in the card's private data structure,
124 because they generally shouldn't be allocated dynamically. 124 because they generally shouldn't be allocated dynamically.
125 125
@@ -130,7 +130,7 @@ static void sedlbauer_detach(struct pcmcia_device *p_dev);
130*/ 130*/
131 131
132typedef struct local_info_t { 132typedef struct local_info_t {
133 dev_link_t link; 133 struct pcmcia_device *p_dev;
134 dev_node_t node; 134 dev_node_t node;
135 int stop; 135 int stop;
136 int cardnr; 136 int cardnr;
@@ -148,11 +148,10 @@ typedef struct local_info_t {
148 148
149======================================================================*/ 149======================================================================*/
150 150
151static int sedlbauer_attach(struct pcmcia_device *p_dev) 151static int sedlbauer_probe(struct pcmcia_device *link)
152{ 152{
153 local_info_t *local; 153 local_info_t *local;
154 dev_link_t *link; 154
155
156 DEBUG(0, "sedlbauer_attach()\n"); 155 DEBUG(0, "sedlbauer_attach()\n");
157 156
158 /* Allocate space for private device-specific data */ 157 /* Allocate space for private device-specific data */
@@ -160,8 +159,10 @@ static int sedlbauer_attach(struct pcmcia_device *p_dev)
160 if (!local) return -ENOMEM; 159 if (!local) return -ENOMEM;
161 memset(local, 0, sizeof(local_info_t)); 160 memset(local, 0, sizeof(local_info_t));
162 local->cardnr = -1; 161 local->cardnr = -1;
163 link = &local->link; link->priv = local; 162
164 163 local->p_dev = link;
164 link->priv = local;
165
165 /* Interrupt setup */ 166 /* Interrupt setup */
166 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 167 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
167 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 168 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
@@ -182,18 +183,10 @@ static int sedlbauer_attach(struct pcmcia_device *p_dev)
182 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 183 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
183 link->io.IOAddrLines = 3; 184 link->io.IOAddrLines = 3;
184 185
185
186 link->conf.Attributes = 0; 186 link->conf.Attributes = 0;
187 link->conf.Vcc = 50;
188 link->conf.IntType = INT_MEMORY_AND_IO; 187 link->conf.IntType = INT_MEMORY_AND_IO;
189 188
190 link->handle = p_dev; 189 return sedlbauer_config(link);
191 p_dev->instance = link;
192
193 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
194 sedlbauer_config(link);
195
196 return 0;
197} /* sedlbauer_attach */ 190} /* sedlbauer_attach */
198 191
199/*====================================================================== 192/*======================================================================
@@ -205,19 +198,15 @@ static int sedlbauer_attach(struct pcmcia_device *p_dev)
205 198
206======================================================================*/ 199======================================================================*/
207 200
208static void sedlbauer_detach(struct pcmcia_device *p_dev) 201static void sedlbauer_detach(struct pcmcia_device *link)
209{ 202{
210 dev_link_t *link = dev_to_instance(p_dev); 203 DEBUG(0, "sedlbauer_detach(0x%p)\n", link);
211
212 DEBUG(0, "sedlbauer_detach(0x%p)\n", link);
213 204
214 if (link->state & DEV_CONFIG) { 205 ((local_info_t *)link->priv)->stop = 1;
215 ((local_info_t *)link->priv)->stop = 1; 206 sedlbauer_release(link);
216 sedlbauer_release(link);
217 }
218 207
219 /* This points to the parent local_info_t struct */ 208 /* This points to the parent local_info_t struct */
220 kfree(link->priv); 209 kfree(link->priv);
221} /* sedlbauer_detach */ 210} /* sedlbauer_detach */
222 211
223/*====================================================================== 212/*======================================================================
@@ -230,9 +219,8 @@ static void sedlbauer_detach(struct pcmcia_device *p_dev)
230#define CS_CHECK(fn, ret) \ 219#define CS_CHECK(fn, ret) \
231do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 220do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
232 221
233static void sedlbauer_config(dev_link_t *link) 222static int sedlbauer_config(struct pcmcia_device *link)
234{ 223{
235 client_handle_t handle = link->handle;
236 local_info_t *dev = link->priv; 224 local_info_t *dev = link->priv;
237 tuple_t tuple; 225 tuple_t tuple;
238 cisparse_t parse; 226 cisparse_t parse;
@@ -254,18 +242,13 @@ static void sedlbauer_config(dev_link_t *link)
254 tuple.TupleData = buf; 242 tuple.TupleData = buf;
255 tuple.TupleDataMax = sizeof(buf); 243 tuple.TupleDataMax = sizeof(buf);
256 tuple.TupleOffset = 0; 244 tuple.TupleOffset = 0;
257 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 245 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
258 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 246 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
259 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 247 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
260 link->conf.ConfigBase = parse.config.base; 248 link->conf.ConfigBase = parse.config.base;
261 link->conf.Present = parse.config.rmask[0]; 249 link->conf.Present = parse.config.rmask[0];
262
263 /* Configure card */
264 link->state |= DEV_CONFIG;
265 250
266 /* Look up the current Vcc */ 251 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
267 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
268 link->conf.Vcc = conf.Vcc;
269 252
270 /* 253 /*
271 In this loop, we scan the CIS for configuration table entries, 254 In this loop, we scan the CIS for configuration table entries,
@@ -280,12 +263,12 @@ static void sedlbauer_config(dev_link_t *link)
280 will only use the CIS to fill in implementation-defined details. 263 will only use the CIS to fill in implementation-defined details.
281 */ 264 */
282 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 265 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
283 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 266 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
284 while (1) { 267 while (1) {
285 cistpl_cftable_entry_t dflt = { 0 }; 268 cistpl_cftable_entry_t dflt = { 0 };
286 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 269 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
287 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 270 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
288 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 271 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
289 goto next_entry; 272 goto next_entry;
290 273
291 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; 274 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
@@ -309,10 +292,10 @@ static void sedlbauer_config(dev_link_t *link)
309 } 292 }
310 293
311 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) 294 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
312 link->conf.Vpp1 = link->conf.Vpp2 = 295 link->conf.Vpp =
313 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; 296 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
314 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM)) 297 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
315 link->conf.Vpp1 = link->conf.Vpp2 = 298 link->conf.Vpp =
316 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000; 299 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
317 300
318 /* Do we need to allocate an interrupt? */ 301 /* Do we need to allocate an interrupt? */
@@ -339,13 +322,13 @@ static void sedlbauer_config(dev_link_t *link)
339 link->io.NumPorts2 = io->win[1].len; 322 link->io.NumPorts2 = io->win[1].len;
340 } 323 }
341 /* This reserves IO space but doesn't actually enable it */ 324 /* This reserves IO space but doesn't actually enable it */
342 if (pcmcia_request_io(link->handle, &link->io) != 0) 325 if (pcmcia_request_io(link, &link->io) != 0)
343 goto next_entry; 326 goto next_entry;
344 } 327 }
345 328
346 /* 329 /*
347 Now set up a common memory window, if needed. There is room 330 Now set up a common memory window, if needed. There is room
348 in the dev_link_t structure for one memory window handle, 331 in the struct pcmcia_device structure for one memory window handle,
349 but if the base addresses need to be saved, or if multiple 332 but if the base addresses need to be saved, or if multiple
350 windows are needed, the info should go in the private data 333 windows are needed, the info should go in the private data
351 structure for this device. 334 structure for this device.
@@ -366,7 +349,7 @@ static void sedlbauer_config(dev_link_t *link)
366 req.Size = 0x1000; 349 req.Size = 0x1000;
367*/ 350*/
368 req.AccessSpeed = 0; 351 req.AccessSpeed = 0;
369 if (pcmcia_request_window(&link->handle, &req, &link->win) != 0) 352 if (pcmcia_request_window(&link, &req, &link->win) != 0)
370 goto next_entry; 353 goto next_entry;
371 map.Page = 0; map.CardOffset = mem->win[0].card_addr; 354 map.Page = 0; map.CardOffset = mem->win[0].card_addr;
372 if (pcmcia_map_mem_page(link->win, &map) != 0) 355 if (pcmcia_map_mem_page(link->win, &map) != 0)
@@ -374,29 +357,25 @@ static void sedlbauer_config(dev_link_t *link)
374 } 357 }
375 /* If we got this far, we're cool! */ 358 /* If we got this far, we're cool! */
376 break; 359 break;
377 360
378 next_entry: 361 next_entry:
379/* new in dummy.cs 2001/01/28 MN 362 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
380 if (link->io.NumPorts1)
381 pcmcia_release_io(link->handle, &link->io);
382*/
383 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
384 } 363 }
385 364
386 /* 365 /*
387 Allocate an interrupt line. Note that this does not assign a 366 Allocate an interrupt line. Note that this does not assign a
388 handler to the interrupt, unless the 'Handler' member of the 367 handler to the interrupt, unless the 'Handler' member of the
389 irq structure is initialized. 368 irq structure is initialized.
390 */ 369 */
391 if (link->conf.Attributes & CONF_ENABLE_IRQ) 370 if (link->conf.Attributes & CONF_ENABLE_IRQ)
392 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 371 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
393 372
394 /* 373 /*
395 This actually configures the PCMCIA socket -- setting up 374 This actually configures the PCMCIA socket -- setting up
396 the I/O windows and the interrupt mapping, and putting the 375 the I/O windows and the interrupt mapping, and putting the
397 card and host interface into "Memory and IO" mode. 376 card and host interface into "Memory and IO" mode.
398 */ 377 */
399 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 378 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
400 379
401 /* 380 /*
402 At this point, the dev_node_t structure(s) need to be 381 At this point, the dev_node_t structure(s) need to be
@@ -404,14 +383,13 @@ static void sedlbauer_config(dev_link_t *link)
404 */ 383 */
405 sprintf(dev->node.dev_name, "sedlbauer"); 384 sprintf(dev->node.dev_name, "sedlbauer");
406 dev->node.major = dev->node.minor = 0; 385 dev->node.major = dev->node.minor = 0;
407 link->dev = &dev->node; 386 link->dev_node = &dev->node;
408 387
409 /* Finally, report what we've done */ 388 /* Finally, report what we've done */
410 printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d", 389 printk(KERN_INFO "%s: index 0x%02x:",
411 dev->node.dev_name, link->conf.ConfigIndex, 390 dev->node.dev_name, link->conf.ConfigIndex);
412 link->conf.Vcc/10, link->conf.Vcc%10); 391 if (link->conf.Vpp)
413 if (link->conf.Vpp1) 392 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
414 printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
415 if (link->conf.Attributes & CONF_ENABLE_IRQ) 393 if (link->conf.Attributes & CONF_ENABLE_IRQ)
416 printk(", irq %d", link->irq.AssignedIRQ); 394 printk(", irq %d", link->irq.AssignedIRQ);
417 if (link->io.NumPorts1) 395 if (link->io.NumPorts1)
@@ -424,8 +402,6 @@ static void sedlbauer_config(dev_link_t *link)
424 printk(", mem 0x%06lx-0x%06lx", req.Base, 402 printk(", mem 0x%06lx-0x%06lx", req.Base,
425 req.Base+req.Size-1); 403 req.Base+req.Size-1);
426 printk("\n"); 404 printk("\n");
427
428 link->state &= ~DEV_CONFIG_PENDING;
429 405
430 icard.para[0] = link->irq.AssignedIRQ; 406 icard.para[0] = link->irq.AssignedIRQ;
431 icard.para[1] = link->io.BasePort1; 407 icard.para[1] = link->io.BasePort1;
@@ -437,14 +413,16 @@ static void sedlbauer_config(dev_link_t *link)
437 printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n", 413 printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n",
438 last_ret, link->io.BasePort1); 414 last_ret, link->io.BasePort1);
439 sedlbauer_release(link); 415 sedlbauer_release(link);
416 return -ENODEV;
440 } else 417 } else
441 ((local_info_t*)link->priv)->cardnr = last_ret; 418 ((local_info_t*)link->priv)->cardnr = last_ret;
442 419
443 return; 420 return 0;
444 421
445cs_failed: 422cs_failed:
446 cs_error(link->handle, last_fn, last_ret); 423 cs_error(link, last_fn, last_ret);
447 sedlbauer_release(link); 424 sedlbauer_release(link);
425 return -ENODEV;
448 426
449} /* sedlbauer_config */ 427} /* sedlbauer_config */
450 428
@@ -456,7 +434,7 @@ cs_failed:
456 434
457======================================================================*/ 435======================================================================*/
458 436
459static void sedlbauer_release(dev_link_t *link) 437static void sedlbauer_release(struct pcmcia_device *link)
460{ 438{
461 local_info_t *local = link->priv; 439 local_info_t *local = link->priv;
462 DEBUG(0, "sedlbauer_release(0x%p)\n", link); 440 DEBUG(0, "sedlbauer_release(0x%p)\n", link);
@@ -467,46 +445,23 @@ static void sedlbauer_release(dev_link_t *link)
467 HiSax_closecard(local->cardnr); 445 HiSax_closecard(local->cardnr);
468 } 446 }
469 } 447 }
470 /* Unlink the device chain */
471 link->dev = NULL;
472 448
473 /* 449 pcmcia_disable_device(link);
474 In a normal driver, additional code may be needed to release
475 other kernel data structures associated with this device.
476 */
477
478 /* Don't bother checking to see if these succeed or not */
479 if (link->win)
480 pcmcia_release_window(link->win);
481 pcmcia_release_configuration(link->handle);
482 if (link->io.NumPorts1)
483 pcmcia_release_io(link->handle, &link->io);
484 if (link->irq.AssignedIRQ)
485 pcmcia_release_irq(link->handle, &link->irq);
486 link->state &= ~DEV_CONFIG;
487} /* sedlbauer_release */ 450} /* sedlbauer_release */
488 451
489static int sedlbauer_suspend(struct pcmcia_device *p_dev) 452static int sedlbauer_suspend(struct pcmcia_device *link)
490{ 453{
491 dev_link_t *link = dev_to_instance(p_dev);
492 local_info_t *dev = link->priv; 454 local_info_t *dev = link->priv;
493 455
494 link->state |= DEV_SUSPEND;
495 dev->stop = 1; 456 dev->stop = 1;
496 if (link->state & DEV_CONFIG)
497 pcmcia_release_configuration(link->handle);
498 457
499 return 0; 458 return 0;
500} 459}
501 460
502static int sedlbauer_resume(struct pcmcia_device *p_dev) 461static int sedlbauer_resume(struct pcmcia_device *link)
503{ 462{
504 dev_link_t *link = dev_to_instance(p_dev);
505 local_info_t *dev = link->priv; 463 local_info_t *dev = link->priv;
506 464
507 link->state &= ~DEV_SUSPEND;
508 if (link->state & DEV_CONFIG)
509 pcmcia_request_configuration(link->handle, &link->conf);
510 dev->stop = 0; 465 dev->stop = 0;
511 466
512 return 0; 467 return 0;
@@ -530,7 +485,7 @@ static struct pcmcia_driver sedlbauer_driver = {
530 .drv = { 485 .drv = {
531 .name = "sedlbauer_cs", 486 .name = "sedlbauer_cs",
532 }, 487 },
533 .probe = sedlbauer_attach, 488 .probe = sedlbauer_probe,
534 .remove = sedlbauer_detach, 489 .remove = sedlbauer_detach,
535 .id_table = sedlbauer_ids, 490 .id_table = sedlbauer_ids,
536 .suspend = sedlbauer_suspend, 491 .suspend = sedlbauer_suspend,
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 4e5c14c7240e..afcc2aeadb34 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -75,8 +75,8 @@ module_param(protocol, int, 0);
75 handler. 75 handler.
76*/ 76*/
77 77
78static void teles_cs_config(dev_link_t *link); 78static int teles_cs_config(struct pcmcia_device *link);
79static void teles_cs_release(dev_link_t *link); 79static void teles_cs_release(struct pcmcia_device *link);
80 80
81/* 81/*
82 The attach() and detach() entry points are used to create and destroy 82 The attach() and detach() entry points are used to create and destroy
@@ -89,10 +89,10 @@ static void teles_detach(struct pcmcia_device *p_dev);
89/* 89/*
90 A linked list of "instances" of the teles_cs device. Each actual 90 A linked list of "instances" of the teles_cs device. Each actual
91 PCMCIA card corresponds to one device instance, and is described 91 PCMCIA card corresponds to one device instance, and is described
92 by one dev_link_t structure (defined in ds.h). 92 by one struct pcmcia_device structure (defined in ds.h).
93 93
94 You may not want to use a linked list for this -- for example, the 94 You may not want to use a linked list for this -- for example, the
95 memory card driver uses an array of dev_link_t pointers, where minor 95 memory card driver uses an array of struct pcmcia_device pointers, where minor
96 device numbers are used to derive the corresponding array index. 96 device numbers are used to derive the corresponding array index.
97*/ 97*/
98 98
@@ -102,7 +102,7 @@ static void teles_detach(struct pcmcia_device *p_dev);
102 example, ethernet cards, modems). In other cases, there may be 102 example, ethernet cards, modems). In other cases, there may be
103 many actual or logical devices (SCSI adapters, memory cards with 103 many actual or logical devices (SCSI adapters, memory cards with
104 multiple partitions). The dev_node_t structures need to be kept 104 multiple partitions). The dev_node_t structures need to be kept
105 in a linked list starting at the 'dev' field of a dev_link_t 105 in a linked list starting at the 'dev' field of a struct pcmcia_device
106 structure. We allocate them in the card's private data structure, 106 structure. We allocate them in the card's private data structure,
107 because they generally shouldn't be allocated dynamically. 107 because they generally shouldn't be allocated dynamically.
108 In this case, we also provide a flag to indicate if a device is 108 In this case, we also provide a flag to indicate if a device is
@@ -112,7 +112,7 @@ static void teles_detach(struct pcmcia_device *p_dev);
112*/ 112*/
113 113
114typedef struct local_info_t { 114typedef struct local_info_t {
115 dev_link_t link; 115 struct pcmcia_device *p_dev;
116 dev_node_t node; 116 dev_node_t node;
117 int busy; 117 int busy;
118 int cardnr; 118 int cardnr;
@@ -130,9 +130,8 @@ typedef struct local_info_t {
130 130
131======================================================================*/ 131======================================================================*/
132 132
133static int teles_attach(struct pcmcia_device *p_dev) 133static int teles_probe(struct pcmcia_device *link)
134{ 134{
135 dev_link_t *link;
136 local_info_t *local; 135 local_info_t *local;
137 136
138 DEBUG(0, "teles_attach()\n"); 137 DEBUG(0, "teles_attach()\n");
@@ -142,7 +141,9 @@ static int teles_attach(struct pcmcia_device *p_dev)
142 if (!local) return -ENOMEM; 141 if (!local) return -ENOMEM;
143 memset(local, 0, sizeof(local_info_t)); 142 memset(local, 0, sizeof(local_info_t));
144 local->cardnr = -1; 143 local->cardnr = -1;
145 link = &local->link; link->priv = local; 144
145 local->p_dev = link;
146 link->priv = local;
146 147
147 /* Interrupt setup */ 148 /* Interrupt setup */
148 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 149 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
@@ -161,16 +162,9 @@ static int teles_attach(struct pcmcia_device *p_dev)
161 link->io.IOAddrLines = 5; 162 link->io.IOAddrLines = 5;
162 163
163 link->conf.Attributes = CONF_ENABLE_IRQ; 164 link->conf.Attributes = CONF_ENABLE_IRQ;
164 link->conf.Vcc = 50;
165 link->conf.IntType = INT_MEMORY_AND_IO; 165 link->conf.IntType = INT_MEMORY_AND_IO;
166 166
167 link->handle = p_dev; 167 return teles_cs_config(link);
168 p_dev->instance = link;
169
170 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
171 teles_cs_config(link);
172
173 return 0;
174} /* teles_attach */ 168} /* teles_attach */
175 169
176/*====================================================================== 170/*======================================================================
@@ -182,20 +176,16 @@ static int teles_attach(struct pcmcia_device *p_dev)
182 176
183======================================================================*/ 177======================================================================*/
184 178
185static void teles_detach(struct pcmcia_device *p_dev) 179static void teles_detach(struct pcmcia_device *link)
186{ 180{
187 dev_link_t *link = dev_to_instance(p_dev); 181 local_info_t *info = link->priv;
188 local_info_t *info = link->priv;
189
190 DEBUG(0, "teles_detach(0x%p)\n", link);
191 182
192 if (link->state & DEV_CONFIG) { 183 DEBUG(0, "teles_detach(0x%p)\n", link);
193 info->busy = 1;
194 teles_cs_release(link);
195 }
196 184
197 kfree(info); 185 info->busy = 1;
186 teles_cs_release(link);
198 187
188 kfree(info);
199} /* teles_detach */ 189} /* teles_detach */
200 190
201/*====================================================================== 191/*======================================================================
@@ -205,7 +195,7 @@ static void teles_detach(struct pcmcia_device *p_dev)
205 device available to the system. 195 device available to the system.
206 196
207======================================================================*/ 197======================================================================*/
208static int get_tuple(client_handle_t handle, tuple_t *tuple, 198static int get_tuple(struct pcmcia_device *handle, tuple_t *tuple,
209 cisparse_t *parse) 199 cisparse_t *parse)
210{ 200{
211 int i = pcmcia_get_tuple_data(handle, tuple); 201 int i = pcmcia_get_tuple_data(handle, tuple);
@@ -213,7 +203,7 @@ static int get_tuple(client_handle_t handle, tuple_t *tuple,
213 return pcmcia_parse_tuple(handle, tuple, parse); 203 return pcmcia_parse_tuple(handle, tuple, parse);
214} 204}
215 205
216static int first_tuple(client_handle_t handle, tuple_t *tuple, 206static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
217 cisparse_t *parse) 207 cisparse_t *parse)
218{ 208{
219 int i = pcmcia_get_first_tuple(handle, tuple); 209 int i = pcmcia_get_first_tuple(handle, tuple);
@@ -221,7 +211,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
221 return get_tuple(handle, tuple, parse); 211 return get_tuple(handle, tuple, parse);
222} 212}
223 213
224static int next_tuple(client_handle_t handle, tuple_t *tuple, 214static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
225 cisparse_t *parse) 215 cisparse_t *parse)
226{ 216{
227 int i = pcmcia_get_next_tuple(handle, tuple); 217 int i = pcmcia_get_next_tuple(handle, tuple);
@@ -229,9 +219,8 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
229 return get_tuple(handle, tuple, parse); 219 return get_tuple(handle, tuple, parse);
230} 220}
231 221
232static void teles_cs_config(dev_link_t *link) 222static int teles_cs_config(struct pcmcia_device *link)
233{ 223{
234 client_handle_t handle;
235 tuple_t tuple; 224 tuple_t tuple;
236 cisparse_t parse; 225 cisparse_t parse;
237 local_info_t *dev; 226 local_info_t *dev;
@@ -241,7 +230,6 @@ static void teles_cs_config(dev_link_t *link)
241 IsdnCard_t icard; 230 IsdnCard_t icard;
242 231
243 DEBUG(0, "teles_config(0x%p)\n", link); 232 DEBUG(0, "teles_config(0x%p)\n", link);
244 handle = link->handle;
245 dev = link->priv; 233 dev = link->priv;
246 234
247 /* 235 /*
@@ -253,7 +241,7 @@ static void teles_cs_config(dev_link_t *link)
253 tuple.TupleDataMax = 255; 241 tuple.TupleDataMax = 255;
254 tuple.TupleOffset = 0; 242 tuple.TupleOffset = 0;
255 tuple.Attributes = 0; 243 tuple.Attributes = 0;
256 i = first_tuple(handle, &tuple, &parse); 244 i = first_tuple(link, &tuple, &parse);
257 if (i != CS_SUCCESS) { 245 if (i != CS_SUCCESS) {
258 last_fn = ParseTuple; 246 last_fn = ParseTuple;
259 goto cs_failed; 247 goto cs_failed;
@@ -261,32 +249,29 @@ static void teles_cs_config(dev_link_t *link)
261 link->conf.ConfigBase = parse.config.base; 249 link->conf.ConfigBase = parse.config.base;
262 link->conf.Present = parse.config.rmask[0]; 250 link->conf.Present = parse.config.rmask[0];
263 251
264 /* Configure card */
265 link->state |= DEV_CONFIG;
266
267 tuple.TupleData = (cisdata_t *)buf; 252 tuple.TupleData = (cisdata_t *)buf;
268 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 253 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
269 tuple.Attributes = 0; 254 tuple.Attributes = 0;
270 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 255 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
271 i = first_tuple(handle, &tuple, &parse); 256 i = first_tuple(link, &tuple, &parse);
272 while (i == CS_SUCCESS) { 257 while (i == CS_SUCCESS) {
273 if ( (cf->io.nwin > 0) && cf->io.win[0].base) { 258 if ( (cf->io.nwin > 0) && cf->io.win[0].base) {
274 printk(KERN_INFO "(teles_cs: looks like the 96 model)\n"); 259 printk(KERN_INFO "(teles_cs: looks like the 96 model)\n");
275 link->conf.ConfigIndex = cf->index; 260 link->conf.ConfigIndex = cf->index;
276 link->io.BasePort1 = cf->io.win[0].base; 261 link->io.BasePort1 = cf->io.win[0].base;
277 i = pcmcia_request_io(link->handle, &link->io); 262 i = pcmcia_request_io(link, &link->io);
278 if (i == CS_SUCCESS) break; 263 if (i == CS_SUCCESS) break;
279 } else { 264 } else {
280 printk(KERN_INFO "(teles_cs: looks like the 97 model)\n"); 265 printk(KERN_INFO "(teles_cs: looks like the 97 model)\n");
281 link->conf.ConfigIndex = cf->index; 266 link->conf.ConfigIndex = cf->index;
282 for (i = 0, j = 0x2f0; j > 0x100; j -= 0x10) { 267 for (i = 0, j = 0x2f0; j > 0x100; j -= 0x10) {
283 link->io.BasePort1 = j; 268 link->io.BasePort1 = j;
284 i = pcmcia_request_io(link->handle, &link->io); 269 i = pcmcia_request_io(link, &link->io);
285 if (i == CS_SUCCESS) break; 270 if (i == CS_SUCCESS) break;
286 } 271 }
287 break; 272 break;
288 } 273 }
289 i = next_tuple(handle, &tuple, &parse); 274 i = next_tuple(link, &tuple, &parse);
290 } 275 }
291 276
292 if (i != CS_SUCCESS) { 277 if (i != CS_SUCCESS) {
@@ -294,14 +279,14 @@ static void teles_cs_config(dev_link_t *link)
294 goto cs_failed; 279 goto cs_failed;
295 } 280 }
296 281
297 i = pcmcia_request_irq(link->handle, &link->irq); 282 i = pcmcia_request_irq(link, &link->irq);
298 if (i != CS_SUCCESS) { 283 if (i != CS_SUCCESS) {
299 link->irq.AssignedIRQ = 0; 284 link->irq.AssignedIRQ = 0;
300 last_fn = RequestIRQ; 285 last_fn = RequestIRQ;
301 goto cs_failed; 286 goto cs_failed;
302 } 287 }
303 288
304 i = pcmcia_request_configuration(link->handle, &link->conf); 289 i = pcmcia_request_configuration(link, &link->conf);
305 if (i != CS_SUCCESS) { 290 if (i != CS_SUCCESS) {
306 last_fn = RequestConfiguration; 291 last_fn = RequestConfiguration;
307 goto cs_failed; 292 goto cs_failed;
@@ -312,14 +297,11 @@ static void teles_cs_config(dev_link_t *link)
312 sprintf(dev->node.dev_name, "teles"); 297 sprintf(dev->node.dev_name, "teles");
313 dev->node.major = dev->node.minor = 0x0; 298 dev->node.major = dev->node.minor = 0x0;
314 299
315 link->dev = &dev->node; 300 link->dev_node = &dev->node;
316 301
317 /* Finally, report what we've done */ 302 /* Finally, report what we've done */
318 printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d", 303 printk(KERN_INFO "%s: index 0x%02x:",
319 dev->node.dev_name, link->conf.ConfigIndex, 304 dev->node.dev_name, link->conf.ConfigIndex);
320 link->conf.Vcc/10, link->conf.Vcc%10);
321 if (link->conf.Vpp1)
322 printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
323 if (link->conf.Attributes & CONF_ENABLE_IRQ) 305 if (link->conf.Attributes & CONF_ENABLE_IRQ)
324 printk(", irq %d", link->irq.AssignedIRQ); 306 printk(", irq %d", link->irq.AssignedIRQ);
325 if (link->io.NumPorts1) 307 if (link->io.NumPorts1)
@@ -330,8 +312,6 @@ static void teles_cs_config(dev_link_t *link)
330 link->io.BasePort2+link->io.NumPorts2-1); 312 link->io.BasePort2+link->io.NumPorts2-1);
331 printk("\n"); 313 printk("\n");
332 314
333 link->state &= ~DEV_CONFIG_PENDING;
334
335 icard.para[0] = link->irq.AssignedIRQ; 315 icard.para[0] = link->irq.AssignedIRQ;
336 icard.para[1] = link->io.BasePort1; 316 icard.para[1] = link->io.BasePort1;
337 icard.protocol = protocol; 317 icard.protocol = protocol;
@@ -342,13 +322,16 @@ static void teles_cs_config(dev_link_t *link)
342 printk(KERN_ERR "teles_cs: failed to initialize Teles PCMCIA %d at i/o %#x\n", 322 printk(KERN_ERR "teles_cs: failed to initialize Teles PCMCIA %d at i/o %#x\n",
343 i, link->io.BasePort1); 323 i, link->io.BasePort1);
344 teles_cs_release(link); 324 teles_cs_release(link);
345 } else 325 return -ENODEV;
346 ((local_info_t*)link->priv)->cardnr = i; 326 }
327
328 ((local_info_t*)link->priv)->cardnr = i;
329 return 0;
347 330
348 return;
349cs_failed: 331cs_failed:
350 cs_error(link->handle, last_fn, i); 332 cs_error(link, last_fn, i);
351 teles_cs_release(link); 333 teles_cs_release(link);
334 return -ENODEV;
352} /* teles_cs_config */ 335} /* teles_cs_config */
353 336
354/*====================================================================== 337/*======================================================================
@@ -359,7 +342,7 @@ cs_failed:
359 342
360======================================================================*/ 343======================================================================*/
361 344
362static void teles_cs_release(dev_link_t *link) 345static void teles_cs_release(struct pcmcia_device *link)
363{ 346{
364 local_info_t *local = link->priv; 347 local_info_t *local = link->priv;
365 348
@@ -371,39 +354,23 @@ static void teles_cs_release(dev_link_t *link)
371 HiSax_closecard(local->cardnr); 354 HiSax_closecard(local->cardnr);
372 } 355 }
373 } 356 }
374 /* Unlink the device chain */ 357
375 link->dev = NULL; 358 pcmcia_disable_device(link);
376
377 /* Don't bother checking to see if these succeed or not */
378 if (link->win)
379 pcmcia_release_window(link->win);
380 pcmcia_release_configuration(link->handle);
381 pcmcia_release_io(link->handle, &link->io);
382 pcmcia_release_irq(link->handle, &link->irq);
383 link->state &= ~DEV_CONFIG;
384} /* teles_cs_release */ 359} /* teles_cs_release */
385 360
386static int teles_suspend(struct pcmcia_device *p_dev) 361static int teles_suspend(struct pcmcia_device *link)
387{ 362{
388 dev_link_t *link = dev_to_instance(p_dev);
389 local_info_t *dev = link->priv; 363 local_info_t *dev = link->priv;
390 364
391 link->state |= DEV_SUSPEND;
392 dev->busy = 1; 365 dev->busy = 1;
393 if (link->state & DEV_CONFIG)
394 pcmcia_release_configuration(link->handle);
395 366
396 return 0; 367 return 0;
397} 368}
398 369
399static int teles_resume(struct pcmcia_device *p_dev) 370static int teles_resume(struct pcmcia_device *link)
400{ 371{
401 dev_link_t *link = dev_to_instance(p_dev);
402 local_info_t *dev = link->priv; 372 local_info_t *dev = link->priv;
403 373
404 link->state &= ~DEV_SUSPEND;
405 if (link->state & DEV_CONFIG)
406 pcmcia_request_configuration(link->handle, &link->conf);
407 dev->busy = 0; 374 dev->busy = 0;
408 375
409 return 0; 376 return 0;
@@ -421,7 +388,7 @@ static struct pcmcia_driver teles_cs_driver = {
421 .drv = { 388 .drv = {
422 .name = "teles_cs", 389 .name = "teles_cs",
423 }, 390 },
424 .probe = teles_attach, 391 .probe = teles_probe,
425 .remove = teles_detach, 392 .remove = teles_detach,
426 .id_table = teles_ids, 393 .id_table = teles_ids,
427 .suspend = teles_suspend, 394 .suspend = teles_suspend,
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
index 94c9afb7017c..f4f71226a078 100644
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -46,7 +46,8 @@ int sc_ioctl(int card, scs_ioctl *data)
46 pr_debug("%s: SCIOCRESET: ioctl received\n", 46 pr_debug("%s: SCIOCRESET: ioctl received\n",
47 sc_adapter[card]->devicename); 47 sc_adapter[card]->devicename);
48 sc_adapter[card]->StartOnReset = 0; 48 sc_adapter[card]->StartOnReset = 0;
49 return (reset(card)); 49 kfree(rcvmsg);
50 return reset(card);
50 } 51 }
51 52
52 case SCIOCLOAD: 53 case SCIOCLOAD:
@@ -183,7 +184,7 @@ int sc_ioctl(int card, scs_ioctl *data)
183 sc_adapter[card]->devicename); 184 sc_adapter[card]->devicename);
184 185
185 spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); 186 spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
186 if(!spid) { 187 if (!spid) {
187 kfree(rcvmsg); 188 kfree(rcvmsg);
188 return -ENOMEM; 189 return -ENOMEM;
189 } 190 }
@@ -195,10 +196,10 @@ int sc_ioctl(int card, scs_ioctl *data)
195 if (!status) { 196 if (!status) {
196 pr_debug("%s: SCIOCGETSPID: command successful\n", 197 pr_debug("%s: SCIOCGETSPID: command successful\n",
197 sc_adapter[card]->devicename); 198 sc_adapter[card]->devicename);
198 } 199 } else {
199 else {
200 pr_debug("%s: SCIOCGETSPID: command failed (status = %d)\n", 200 pr_debug("%s: SCIOCGETSPID: command failed (status = %d)\n",
201 sc_adapter[card]->devicename, status); 201 sc_adapter[card]->devicename, status);
202 kfree(spid);
202 kfree(rcvmsg); 203 kfree(rcvmsg);
203 return status; 204 return status;
204 } 205 }
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
new file mode 100644
index 000000000000..2c4f20b7f021
--- /dev/null
+++ b/drivers/leds/Kconfig
@@ -0,0 +1,77 @@
1
2menu "LED devices"
3
4config NEW_LEDS
5 bool "LED Support"
6 help
7 Say Y to enable Linux LED support. This is not related to standard
8 keyboard LEDs which are controlled via the input system.
9
10config LEDS_CLASS
11 tristate "LED Class Support"
12 depends NEW_LEDS
13 help
14 This option enables the led sysfs class in /sys/class/leds. You'll
15 need this to do anything useful with LEDs. If unsure, say N.
16
17config LEDS_TRIGGERS
18 bool "LED Trigger support"
19 depends NEW_LEDS
20 help
21 This option enables trigger support for the leds class.
22 These triggers allow kernel events to drive the LEDs and can
23 be configured via sysfs. If unsure, say Y.
24
25config LEDS_CORGI
26 tristate "LED Support for the Sharp SL-C7x0 series"
27 depends LEDS_CLASS && PXA_SHARP_C7xx
28 help
29 This option enables support for the LEDs on Sharp Zaurus
30 SL-C7x0 series (C700, C750, C760, C860).
31
32config LEDS_LOCOMO
33 tristate "LED Support for Locomo device"
34 depends LEDS_CLASS && SHARP_LOCOMO
35 help
36 This option enables support for the LEDs on Sharp Locomo.
37 Zaurus models SL-5500 and SL-5600.
38
39config LEDS_SPITZ
40 tristate "LED Support for the Sharp SL-Cxx00 series"
41 depends LEDS_CLASS && PXA_SHARP_Cxx00
42 help
43 This option enables support for the LEDs on Sharp Zaurus
44 SL-Cxx00 series (C1000, C3000, C3100).
45
46config LEDS_IXP4XX
47 tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
48 depends LEDS_CLASS && ARCH_IXP4XX
49 help
50 This option enables support for the LEDs connected to GPIO
51 outputs of the Intel IXP4XX processors. To be useful the
52 particular board must have LEDs and they must be connected
53 to the GPIO lines. If unsure, say Y.
54
55config LEDS_TOSA
56 tristate "LED Support for the Sharp SL-6000 series"
57 depends LEDS_CLASS && PXA_SHARPSL
58 help
59 This option enables support for the LEDs on Sharp Zaurus
60 SL-6000 series.
61
62config LEDS_TRIGGER_TIMER
63 tristate "LED Timer Trigger"
64 depends LEDS_TRIGGERS
65 help
66 This allows LEDs to be controlled by a programmable timer
67 via sysfs. If unsure, say Y.
68
69config LEDS_TRIGGER_IDE_DISK
70 bool "LED Timer Trigger"
71 depends LEDS_TRIGGERS && BLK_DEV_IDEDISK
72 help
73 This allows LEDs to be controlled by IDE disk activity.
74 If unsure, say Y.
75
76endmenu
77
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
new file mode 100644
index 000000000000..40699d3cabbf
--- /dev/null
+++ b/drivers/leds/Makefile
@@ -0,0 +1,16 @@
1
2# LED Core
3obj-$(CONFIG_NEW_LEDS) += led-core.o
4obj-$(CONFIG_LEDS_CLASS) += led-class.o
5obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
6
7# LED Platform Drivers
8obj-$(CONFIG_LEDS_CORGI) += leds-corgi.o
9obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
10obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
11obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
12obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
13
14# LED Triggers
15obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
16obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
new file mode 100644
index 000000000000..b0b5d05fadd6
--- /dev/null
+++ b/drivers/leds/led-class.c
@@ -0,0 +1,167 @@
1/*
2 * LED Class Core
3 *
4 * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
5 * Copyright (C) 2005-2006 Richard Purdie <rpurdie@openedhand.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <linux/device.h>
19#include <linux/sysdev.h>
20#include <linux/timer.h>
21#include <linux/err.h>
22#include <linux/leds.h>
23#include "leds.h"
24
25static struct class *leds_class;
26
27static ssize_t led_brightness_show(struct class_device *dev, char *buf)
28{
29 struct led_classdev *led_cdev = class_get_devdata(dev);
30 ssize_t ret = 0;
31
32 /* no lock needed for this */
33 sprintf(buf, "%u\n", led_cdev->brightness);
34 ret = strlen(buf) + 1;
35
36 return ret;
37}
38
39static ssize_t led_brightness_store(struct class_device *dev,
40 const char *buf, size_t size)
41{
42 struct led_classdev *led_cdev = class_get_devdata(dev);
43 ssize_t ret = -EINVAL;
44 char *after;
45 unsigned long state = simple_strtoul(buf, &after, 10);
46
47 if (after - buf > 0) {
48 ret = after - buf;
49 led_set_brightness(led_cdev, state);
50 }
51
52 return ret;
53}
54
55static CLASS_DEVICE_ATTR(brightness, 0644, led_brightness_show,
56 led_brightness_store);
57#ifdef CONFIG_LEDS_TRIGGERS
58static CLASS_DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
59#endif
60
61/**
62 * led_classdev_suspend - suspend an led_classdev.
63 * @led_cdev: the led_classdev to suspend.
64 */
65void led_classdev_suspend(struct led_classdev *led_cdev)
66{
67 led_cdev->flags |= LED_SUSPENDED;
68 led_cdev->brightness_set(led_cdev, 0);
69}
70EXPORT_SYMBOL_GPL(led_classdev_suspend);
71
72/**
73 * led_classdev_resume - resume an led_classdev.
74 * @led_cdev: the led_classdev to resume.
75 */
76void led_classdev_resume(struct led_classdev *led_cdev)
77{
78 led_cdev->brightness_set(led_cdev, led_cdev->brightness);
79 led_cdev->flags &= ~LED_SUSPENDED;
80}
81EXPORT_SYMBOL_GPL(led_classdev_resume);
82
83/**
84 * led_classdev_register - register a new object of led_classdev class.
85 * @dev: The device to register.
86 * @led_cdev: the led_classdev structure for this device.
87 */
88int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
89{
90 led_cdev->class_dev = class_device_create(leds_class, NULL, 0,
91 parent, "%s", led_cdev->name);
92 if (unlikely(IS_ERR(led_cdev->class_dev)))
93 return PTR_ERR(led_cdev->class_dev);
94
95 class_set_devdata(led_cdev->class_dev, led_cdev);
96
97 /* register the attributes */
98 class_device_create_file(led_cdev->class_dev,
99 &class_device_attr_brightness);
100
101 /* add to the list of leds */
102 write_lock(&leds_list_lock);
103 list_add_tail(&led_cdev->node, &leds_list);
104 write_unlock(&leds_list_lock);
105
106#ifdef CONFIG_LEDS_TRIGGERS
107 rwlock_init(&led_cdev->trigger_lock);
108
109 led_trigger_set_default(led_cdev);
110
111 class_device_create_file(led_cdev->class_dev,
112 &class_device_attr_trigger);
113#endif
114
115 printk(KERN_INFO "Registered led device: %s\n",
116 led_cdev->class_dev->class_id);
117
118 return 0;
119}
120EXPORT_SYMBOL_GPL(led_classdev_register);
121
122/**
123 * led_classdev_unregister - unregisters a object of led_properties class.
124 * @led_cdev: the led device to unreigister
125 *
126 * Unregisters a previously registered via led_classdev_register object.
127 */
128void led_classdev_unregister(struct led_classdev *led_cdev)
129{
130 class_device_remove_file(led_cdev->class_dev,
131 &class_device_attr_brightness);
132#ifdef CONFIG_LEDS_TRIGGERS
133 class_device_remove_file(led_cdev->class_dev,
134 &class_device_attr_trigger);
135 write_lock(&led_cdev->trigger_lock);
136 if (led_cdev->trigger)
137 led_trigger_set(led_cdev, NULL);
138 write_unlock(&led_cdev->trigger_lock);
139#endif
140
141 class_device_unregister(led_cdev->class_dev);
142
143 write_lock(&leds_list_lock);
144 list_del(&led_cdev->node);
145 write_unlock(&leds_list_lock);
146}
147EXPORT_SYMBOL_GPL(led_classdev_unregister);
148
149static int __init leds_init(void)
150{
151 leds_class = class_create(THIS_MODULE, "leds");
152 if (IS_ERR(leds_class))
153 return PTR_ERR(leds_class);
154 return 0;
155}
156
157static void __exit leds_exit(void)
158{
159 class_destroy(leds_class);
160}
161
162subsys_initcall(leds_init);
163module_exit(leds_exit);
164
165MODULE_AUTHOR("John Lenz, Richard Purdie");
166MODULE_LICENSE("GPL");
167MODULE_DESCRIPTION("LED Class Interface");
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
new file mode 100644
index 000000000000..fe6541326c71
--- /dev/null
+++ b/drivers/leds/led-core.c
@@ -0,0 +1,25 @@
1/*
2 * LED Class Core
3 *
4 * Copyright 2005-2006 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <linux/leds.h>
19#include "leds.h"
20
21rwlock_t leds_list_lock = RW_LOCK_UNLOCKED;
22LIST_HEAD(leds_list);
23
24EXPORT_SYMBOL_GPL(leds_list);
25EXPORT_SYMBOL_GPL(leds_list_lock);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
new file mode 100644
index 000000000000..5e2cd8be1191
--- /dev/null
+++ b/drivers/leds/led-triggers.c
@@ -0,0 +1,239 @@
1/*
2 * LED Triggers Core
3 *
4 * Copyright 2005-2006 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/spinlock.h>
20#include <linux/device.h>
21#include <linux/sysdev.h>
22#include <linux/timer.h>
23#include <linux/leds.h>
24#include "leds.h"
25
26/*
27 * Nests outside led_cdev->trigger_lock
28 */
29static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED;
30static LIST_HEAD(trigger_list);
31
32ssize_t led_trigger_store(struct class_device *dev, const char *buf,
33 size_t count)
34{
35 struct led_classdev *led_cdev = class_get_devdata(dev);
36 char trigger_name[TRIG_NAME_MAX];
37 struct led_trigger *trig;
38 size_t len;
39
40 trigger_name[sizeof(trigger_name) - 1] = '\0';
41 strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
42 len = strlen(trigger_name);
43
44 if (len && trigger_name[len - 1] == '\n')
45 trigger_name[len - 1] = '\0';
46
47 if (!strcmp(trigger_name, "none")) {
48 write_lock(&led_cdev->trigger_lock);
49 led_trigger_set(led_cdev, NULL);
50 write_unlock(&led_cdev->trigger_lock);
51 return count;
52 }
53
54 read_lock(&triggers_list_lock);
55 list_for_each_entry(trig, &trigger_list, next_trig) {
56 if (!strcmp(trigger_name, trig->name)) {
57 write_lock(&led_cdev->trigger_lock);
58 led_trigger_set(led_cdev, trig);
59 write_unlock(&led_cdev->trigger_lock);
60
61 read_unlock(&triggers_list_lock);
62 return count;
63 }
64 }
65 read_unlock(&triggers_list_lock);
66
67 return -EINVAL;
68}
69
70
71ssize_t led_trigger_show(struct class_device *dev, char *buf)
72{
73 struct led_classdev *led_cdev = class_get_devdata(dev);
74 struct led_trigger *trig;
75 int len = 0;
76
77 read_lock(&triggers_list_lock);
78 read_lock(&led_cdev->trigger_lock);
79
80 if (!led_cdev->trigger)
81 len += sprintf(buf+len, "[none] ");
82 else
83 len += sprintf(buf+len, "none ");
84
85 list_for_each_entry(trig, &trigger_list, next_trig) {
86 if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
87 trig->name))
88 len += sprintf(buf+len, "[%s] ", trig->name);
89 else
90 len += sprintf(buf+len, "%s ", trig->name);
91 }
92 read_unlock(&led_cdev->trigger_lock);
93 read_unlock(&triggers_list_lock);
94
95 len += sprintf(len+buf, "\n");
96 return len;
97}
98
99void led_trigger_event(struct led_trigger *trigger,
100 enum led_brightness brightness)
101{
102 struct list_head *entry;
103
104 if (!trigger)
105 return;
106
107 read_lock(&trigger->leddev_list_lock);
108 list_for_each(entry, &trigger->led_cdevs) {
109 struct led_classdev *led_cdev;
110
111 led_cdev = list_entry(entry, struct led_classdev, trig_list);
112 led_set_brightness(led_cdev, brightness);
113 }
114 read_unlock(&trigger->leddev_list_lock);
115}
116
117/* Caller must ensure led_cdev->trigger_lock held */
118void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
119{
120 unsigned long flags;
121
122 /* Remove any existing trigger */
123 if (led_cdev->trigger) {
124 write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
125 list_del(&led_cdev->trig_list);
126 write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
127 if (led_cdev->trigger->deactivate)
128 led_cdev->trigger->deactivate(led_cdev);
129 }
130 if (trigger) {
131 write_lock_irqsave(&trigger->leddev_list_lock, flags);
132 list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
133 write_unlock_irqrestore(&trigger->leddev_list_lock, flags);
134 if (trigger->activate)
135 trigger->activate(led_cdev);
136 }
137 led_cdev->trigger = trigger;
138}
139
140void led_trigger_set_default(struct led_classdev *led_cdev)
141{
142 struct led_trigger *trig;
143
144 if (!led_cdev->default_trigger)
145 return;
146
147 read_lock(&triggers_list_lock);
148 write_lock(&led_cdev->trigger_lock);
149 list_for_each_entry(trig, &trigger_list, next_trig) {
150 if (!strcmp(led_cdev->default_trigger, trig->name))
151 led_trigger_set(led_cdev, trig);
152 }
153 write_unlock(&led_cdev->trigger_lock);
154 read_unlock(&triggers_list_lock);
155}
156
157int led_trigger_register(struct led_trigger *trigger)
158{
159 struct led_classdev *led_cdev;
160
161 rwlock_init(&trigger->leddev_list_lock);
162 INIT_LIST_HEAD(&trigger->led_cdevs);
163
164 /* Add to the list of led triggers */
165 write_lock(&triggers_list_lock);
166 list_add_tail(&trigger->next_trig, &trigger_list);
167 write_unlock(&triggers_list_lock);
168
169 /* Register with any LEDs that have this as a default trigger */
170 read_lock(&leds_list_lock);
171 list_for_each_entry(led_cdev, &leds_list, node) {
172 write_lock(&led_cdev->trigger_lock);
173 if (!led_cdev->trigger && led_cdev->default_trigger &&
174 !strcmp(led_cdev->default_trigger, trigger->name))
175 led_trigger_set(led_cdev, trigger);
176 write_unlock(&led_cdev->trigger_lock);
177 }
178 read_unlock(&leds_list_lock);
179
180 return 0;
181}
182
183void led_trigger_register_simple(const char *name, struct led_trigger **tp)
184{
185 struct led_trigger *trigger;
186
187 trigger = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
188
189 if (trigger) {
190 trigger->name = name;
191 led_trigger_register(trigger);
192 }
193 *tp = trigger;
194}
195
196void led_trigger_unregister(struct led_trigger *trigger)
197{
198 struct led_classdev *led_cdev;
199
200 /* Remove from the list of led triggers */
201 write_lock(&triggers_list_lock);
202 list_del(&trigger->next_trig);
203 write_unlock(&triggers_list_lock);
204
205 /* Remove anyone actively using this trigger */
206 read_lock(&leds_list_lock);
207 list_for_each_entry(led_cdev, &leds_list, node) {
208 write_lock(&led_cdev->trigger_lock);
209 if (led_cdev->trigger == trigger)
210 led_trigger_set(led_cdev, NULL);
211 write_unlock(&led_cdev->trigger_lock);
212 }
213 read_unlock(&leds_list_lock);
214}
215
216void led_trigger_unregister_simple(struct led_trigger *trigger)
217{
218 led_trigger_unregister(trigger);
219 kfree(trigger);
220}
221
222/* Used by LED Class */
223EXPORT_SYMBOL_GPL(led_trigger_set);
224EXPORT_SYMBOL_GPL(led_trigger_set_default);
225EXPORT_SYMBOL_GPL(led_trigger_show);
226EXPORT_SYMBOL_GPL(led_trigger_store);
227
228/* LED Trigger Interface */
229EXPORT_SYMBOL_GPL(led_trigger_register);
230EXPORT_SYMBOL_GPL(led_trigger_unregister);
231
232/* Simple LED Tigger Interface */
233EXPORT_SYMBOL_GPL(led_trigger_register_simple);
234EXPORT_SYMBOL_GPL(led_trigger_unregister_simple);
235EXPORT_SYMBOL_GPL(led_trigger_event);
236
237MODULE_AUTHOR("Richard Purdie");
238MODULE_LICENSE("GPL");
239MODULE_DESCRIPTION("LED Triggers Core");
diff --git a/drivers/leds/leds-corgi.c b/drivers/leds/leds-corgi.c
new file mode 100644
index 000000000000..bb7d84df0121
--- /dev/null
+++ b/drivers/leds/leds-corgi.c
@@ -0,0 +1,121 @@
1/*
2 * LED Triggers Core
3 *
4 * Copyright 2005-2006 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/leds.h>
19#include <asm/mach-types.h>
20#include <asm/arch/corgi.h>
21#include <asm/arch/hardware.h>
22#include <asm/arch/pxa-regs.h>
23#include <asm/hardware/scoop.h>
24
25static void corgiled_amber_set(struct led_classdev *led_cdev, enum led_brightness value)
26{
27 if (value)
28 GPSR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
29 else
30 GPCR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
31}
32
33static void corgiled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
34{
35 if (value)
36 set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN);
37 else
38 reset_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN);
39}
40
41static struct led_classdev corgi_amber_led = {
42 .name = "corgi:amber",
43 .default_trigger = "sharpsl-charge",
44 .brightness_set = corgiled_amber_set,
45};
46
47static struct led_classdev corgi_green_led = {
48 .name = "corgi:green",
49 .default_trigger = "nand-disk",
50 .brightness_set = corgiled_green_set,
51};
52
53#ifdef CONFIG_PM
54static int corgiled_suspend(struct platform_device *dev, pm_message_t state)
55{
56#ifdef CONFIG_LEDS_TRIGGERS
57 if (corgi_amber_led.trigger && strcmp(corgi_amber_led.trigger->name, "sharpsl-charge"))
58#endif
59 led_classdev_suspend(&corgi_amber_led);
60 led_classdev_suspend(&corgi_green_led);
61 return 0;
62}
63
64static int corgiled_resume(struct platform_device *dev)
65{
66 led_classdev_resume(&corgi_amber_led);
67 led_classdev_resume(&corgi_green_led);
68 return 0;
69}
70#endif
71
72static int corgiled_probe(struct platform_device *pdev)
73{
74 int ret;
75
76 ret = led_classdev_register(&pdev->dev, &corgi_amber_led);
77 if (ret < 0)
78 return ret;
79
80 ret = led_classdev_register(&pdev->dev, &corgi_green_led);
81 if (ret < 0)
82 led_classdev_unregister(&corgi_amber_led);
83
84 return ret;
85}
86
87static int corgiled_remove(struct platform_device *pdev)
88{
89 led_classdev_unregister(&corgi_amber_led);
90 led_classdev_unregister(&corgi_green_led);
91 return 0;
92}
93
94static struct platform_driver corgiled_driver = {
95 .probe = corgiled_probe,
96 .remove = corgiled_remove,
97#ifdef CONFIG_PM
98 .suspend = corgiled_suspend,
99 .resume = corgiled_resume,
100#endif
101 .driver = {
102 .name = "corgi-led",
103 },
104};
105
106static int __init corgiled_init(void)
107{
108 return platform_driver_register(&corgiled_driver);
109}
110
111static void __exit corgiled_exit(void)
112{
113 platform_driver_unregister(&corgiled_driver);
114}
115
116module_init(corgiled_init);
117module_exit(corgiled_exit);
118
119MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
120MODULE_DESCRIPTION("Corgi LED driver");
121MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-ixp4xx-gpio.c b/drivers/leds/leds-ixp4xx-gpio.c
new file mode 100644
index 000000000000..30ced150e4cf
--- /dev/null
+++ b/drivers/leds/leds-ixp4xx-gpio.c
@@ -0,0 +1,215 @@
1/*
2 * IXP4XX GPIO driver LED driver
3 *
4 * Author: John Bowler <jbowler@acm.org>
5 *
6 * Copyright (c) 2006 John Bowler
7 *
8 * Permission is hereby granted, free of charge, to any
9 * person obtaining a copy of this software and associated
10 * documentation files (the "Software"), to deal in the
11 * Software without restriction, including without
12 * limitation the rights to use, copy, modify, merge,
13 * publish, distribute, sublicense, and/or sell copies of
14 * the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the
16 * following conditions:
17 *
18 * The above copyright notice and this permission notice
19 * shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
23 * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
24 * TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
25 * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
26 * SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
27 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30 * OTHER DEALINGS IN THE SOFTWARE.
31 *
32 */
33
34#include <linux/config.h>
35#include <linux/kernel.h>
36#include <linux/init.h>
37#include <linux/platform_device.h>
38#include <linux/spinlock.h>
39#include <linux/leds.h>
40#include <asm/arch/hardware.h>
41
42extern spinlock_t gpio_lock;
43
44/* Up to 16 gpio lines are possible. */
45#define GPIO_MAX 16
46static struct ixp4xxgpioled_device {
47 struct led_classdev ancestor;
48 int flags;
49} ixp4xxgpioled_devices[GPIO_MAX];
50
51void ixp4xxgpioled_brightness_set(struct led_classdev *pled,
52 enum led_brightness value)
53{
54 const struct ixp4xxgpioled_device *const ixp4xx_dev =
55 container_of(pled, struct ixp4xxgpioled_device, ancestor);
56 const u32 gpio_pin = ixp4xx_dev - ixp4xxgpioled_devices;
57
58 if (gpio_pin < GPIO_MAX && ixp4xx_dev->ancestor.name != 0) {
59 /* Set or clear the 'gpio_pin' bit according to the style
60 * and the required setting (value > 0 == on)
61 */
62 const int gpio_value =
63 (value > 0) == (ixp4xx_dev->flags != IXP4XX_GPIO_LOW) ?
64 IXP4XX_GPIO_HIGH : IXP4XX_GPIO_LOW;
65
66 {
67 unsigned long flags;
68 spin_lock_irqsave(&gpio_lock, flags);
69 gpio_line_set(gpio_pin, gpio_value);
70 spin_unlock_irqrestore(&gpio_lock, flags);
71 }
72 }
73}
74
75/* LEDs are described in resources, the following iterates over the valid
76 * LED resources.
77 */
78#define for_all_leds(i, pdev) \
79 for (i=0; i<pdev->num_resources; ++i) \
80 if (pdev->resource[i].start < GPIO_MAX && \
81 pdev->resource[i].name != 0)
82
83/* The following applies 'operation' to each LED from the given platform,
84 * the function always returns 0 to allow tail call elimination.
85 */
86static int apply_to_all_leds(struct platform_device *pdev,
87 void (*operation)(struct led_classdev *pled))
88{
89 int i;
90
91 for_all_leds(i, pdev)
92 operation(&ixp4xxgpioled_devices[pdev->resource[i].start].ancestor);
93 return 0;
94}
95
96#ifdef CONFIG_PM
97static int ixp4xxgpioled_suspend(struct platform_device *pdev,
98 pm_message_t state)
99{
100 return apply_to_all_leds(pdev, led_classdev_suspend);
101}
102
103static int ixp4xxgpioled_resume(struct platform_device *pdev)
104{
105 return apply_to_all_leds(pdev, led_classdev_resume);
106}
107#endif
108
109static void ixp4xxgpioled_remove_one_led(struct led_classdev *pled)
110{
111 led_classdev_unregister(pled);
112 pled->name = 0;
113}
114
115static int ixp4xxgpioled_remove(struct platform_device *pdev)
116{
117 return apply_to_all_leds(pdev, ixp4xxgpioled_remove_one_led);
118}
119
120static int ixp4xxgpioled_probe(struct platform_device *pdev)
121{
122 /* The board level has to tell the driver where the
123 * LEDs are connected - there is no way to find out
124 * electrically. It must also say whether the GPIO
125 * lines are active high or active low.
126 *
127 * To do this read the num_resources (the number of
128 * LEDs) and the struct resource (the data for each
129 * LED). The name comes from the resource, and it
130 * isn't copied.
131 */
132 int i;
133
134 for_all_leds(i, pdev) {
135 const u8 gpio_pin = pdev->resource[i].start;
136 int rc;
137
138 if (ixp4xxgpioled_devices[gpio_pin].ancestor.name == 0) {
139 unsigned long flags;
140
141 spin_lock_irqsave(&gpio_lock, flags);
142 gpio_line_config(gpio_pin, IXP4XX_GPIO_OUT);
143 /* The config can, apparently, reset the state,
144 * I suspect the gpio line may be an input and
145 * the config may cause the line to be latched,
146 * so the setting depends on how the LED is
147 * connected to the line (which affects how it
148 * floats if not driven).
149 */
150 gpio_line_set(gpio_pin, IXP4XX_GPIO_HIGH);
151 spin_unlock_irqrestore(&gpio_lock, flags);
152
153 ixp4xxgpioled_devices[gpio_pin].flags =
154 pdev->resource[i].flags & IORESOURCE_BITS;
155
156 ixp4xxgpioled_devices[gpio_pin].ancestor.name =
157 pdev->resource[i].name;
158
159 /* This is how a board manufacturer makes the LED
160 * come on on reset - the GPIO line will be high, so
161 * make the LED light when the line is low...
162 */
163 if (ixp4xxgpioled_devices[gpio_pin].flags != IXP4XX_GPIO_LOW)
164 ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 100;
165 else
166 ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 0;
167
168 ixp4xxgpioled_devices[gpio_pin].ancestor.flags = 0;
169
170 ixp4xxgpioled_devices[gpio_pin].ancestor.brightness_set =
171 ixp4xxgpioled_brightness_set;
172
173 ixp4xxgpioled_devices[gpio_pin].ancestor.default_trigger = 0;
174 }
175
176 rc = led_classdev_register(&pdev->dev,
177 &ixp4xxgpioled_devices[gpio_pin].ancestor);
178 if (rc < 0) {
179 ixp4xxgpioled_devices[gpio_pin].ancestor.name = 0;
180 ixp4xxgpioled_remove(pdev);
181 return rc;
182 }
183 }
184
185 return 0;
186}
187
188static struct platform_driver ixp4xxgpioled_driver = {
189 .probe = ixp4xxgpioled_probe,
190 .remove = ixp4xxgpioled_remove,
191#ifdef CONFIG_PM
192 .suspend = ixp4xxgpioled_suspend,
193 .resume = ixp4xxgpioled_resume,
194#endif
195 .driver = {
196 .name = "IXP4XX-GPIO-LED",
197 },
198};
199
200static int __init ixp4xxgpioled_init(void)
201{
202 return platform_driver_register(&ixp4xxgpioled_driver);
203}
204
205static void __exit ixp4xxgpioled_exit(void)
206{
207 platform_driver_unregister(&ixp4xxgpioled_driver);
208}
209
210module_init(ixp4xxgpioled_init);
211module_exit(ixp4xxgpioled_exit);
212
213MODULE_AUTHOR("John Bowler <jbowler@acm.org>");
214MODULE_DESCRIPTION("IXP4XX GPIO LED driver");
215MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
new file mode 100644
index 000000000000..749a86c2adb6
--- /dev/null
+++ b/drivers/leds/leds-locomo.c
@@ -0,0 +1,95 @@
1/*
2 * linux/drivers/leds/locomo.c
3 *
4 * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/device.h>
15#include <linux/leds.h>
16
17#include <asm/hardware.h>
18#include <asm/hardware/locomo.h>
19
20static void locomoled_brightness_set(struct led_classdev *led_cdev,
21 enum led_brightness value, int offset)
22{
23 struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->class_dev->dev);
24 unsigned long flags;
25
26 local_irq_save(flags);
27 if (value)
28 locomo_writel(LOCOMO_LPT_TOFH, locomo_dev->mapbase + offset);
29 else
30 locomo_writel(LOCOMO_LPT_TOFL, locomo_dev->mapbase + offset);
31 local_irq_restore(flags);
32}
33
34static void locomoled_brightness_set0(struct led_classdev *led_cdev,
35 enum led_brightness value)
36{
37 locomoled_brightness_set(led_cdev, value, LOCOMO_LPT0);
38}
39
40static void locomoled_brightness_set1(struct led_classdev *led_cdev,
41 enum led_brightness value)
42{
43 locomoled_brightness_set(led_cdev, value, LOCOMO_LPT1);
44}
45
46static struct led_classdev locomo_led0 = {
47 .name = "locomo:amber",
48 .brightness_set = locomoled_brightness_set0,
49};
50
51static struct led_classdev locomo_led1 = {
52 .name = "locomo:green",
53 .brightness_set = locomoled_brightness_set1,
54};
55
56static int locomoled_probe(struct locomo_dev *ldev)
57{
58 int ret;
59
60 ret = led_classdev_register(&ldev->dev, &locomo_led0);
61 if (ret < 0)
62 return ret;
63
64 ret = led_classdev_register(&ldev->dev, &locomo_led1);
65 if (ret < 0)
66 led_classdev_unregister(&locomo_led0);
67
68 return ret;
69}
70
71static int locomoled_remove(struct locomo_dev *dev)
72{
73 led_classdev_unregister(&locomo_led0);
74 led_classdev_unregister(&locomo_led1);
75 return 0;
76}
77
78static struct locomo_driver locomoled_driver = {
79 .drv = {
80 .name = "locomoled"
81 },
82 .devid = LOCOMO_DEVID_LED,
83 .probe = locomoled_probe,
84 .remove = locomoled_remove,
85};
86
87static int __init locomoled_init(void)
88{
89 return locomo_driver_register(&locomoled_driver);
90}
91module_init(locomoled_init);
92
93MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
94MODULE_DESCRIPTION("Locomo LED driver");
95MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-spitz.c b/drivers/leds/leds-spitz.c
new file mode 100644
index 000000000000..65bbef4a5e09
--- /dev/null
+++ b/drivers/leds/leds-spitz.c
@@ -0,0 +1,125 @@
1/*
2 * LED Triggers Core
3 *
4 * Copyright 2005-2006 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/leds.h>
19#include <asm/hardware/scoop.h>
20#include <asm/mach-types.h>
21#include <asm/arch/hardware.h>
22#include <asm/arch/pxa-regs.h>
23#include <asm/arch/spitz.h>
24
25static void spitzled_amber_set(struct led_classdev *led_cdev, enum led_brightness value)
26{
27 if (value)
28 set_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
29 else
30 reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
31}
32
33static void spitzled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
34{
35 if (value)
36 set_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_GREEN);
37 else
38 reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_GREEN);
39}
40
41static struct led_classdev spitz_amber_led = {
42 .name = "spitz:amber",
43 .default_trigger = "sharpsl-charge",
44 .brightness_set = spitzled_amber_set,
45};
46
47static struct led_classdev spitz_green_led = {
48 .name = "spitz:green",
49 .default_trigger = "ide-disk",
50 .brightness_set = spitzled_green_set,
51};
52
53#ifdef CONFIG_PM
54static int spitzled_suspend(struct platform_device *dev, pm_message_t state)
55{
56#ifdef CONFIG_LEDS_TRIGGERS
57 if (spitz_amber_led.trigger && strcmp(spitz_amber_led.trigger->name, "sharpsl-charge"))
58#endif
59 led_classdev_suspend(&spitz_amber_led);
60 led_classdev_suspend(&spitz_green_led);
61 return 0;
62}
63
64static int spitzled_resume(struct platform_device *dev)
65{
66 led_classdev_resume(&spitz_amber_led);
67 led_classdev_resume(&spitz_green_led);
68 return 0;
69}
70#endif
71
72static int spitzled_probe(struct platform_device *pdev)
73{
74 int ret;
75
76 if (machine_is_akita())
77 spitz_green_led.default_trigger = "nand-disk";
78
79 ret = led_classdev_register(&pdev->dev, &spitz_amber_led);
80 if (ret < 0)
81 return ret;
82
83 ret = led_classdev_register(&pdev->dev, &spitz_green_led);
84 if (ret < 0)
85 led_classdev_unregister(&spitz_amber_led);
86
87 return ret;
88}
89
90static int spitzled_remove(struct platform_device *pdev)
91{
92 led_classdev_unregister(&spitz_amber_led);
93 led_classdev_unregister(&spitz_green_led);
94
95 return 0;
96}
97
98static struct platform_driver spitzled_driver = {
99 .probe = spitzled_probe,
100 .remove = spitzled_remove,
101#ifdef CONFIG_PM
102 .suspend = spitzled_suspend,
103 .resume = spitzled_resume,
104#endif
105 .driver = {
106 .name = "spitz-led",
107 },
108};
109
110static int __init spitzled_init(void)
111{
112 return platform_driver_register(&spitzled_driver);
113}
114
115static void __exit spitzled_exit(void)
116{
117 platform_driver_unregister(&spitzled_driver);
118}
119
120module_init(spitzled_init);
121module_exit(spitzled_exit);
122
123MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
124MODULE_DESCRIPTION("Spitz LED driver");
125MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-tosa.c b/drivers/leds/leds-tosa.c
new file mode 100644
index 000000000000..c9e8cc1ec481
--- /dev/null
+++ b/drivers/leds/leds-tosa.c
@@ -0,0 +1,131 @@
1/*
2 * LED Triggers Core
3 *
4 * Copyright 2005 Dirk Opfer
5 *
6 * Author: Dirk Opfer <Dirk@Opfer-Online.de>
7 * based on spitz.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/config.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/platform_device.h>
19#include <linux/leds.h>
20#include <asm/hardware/scoop.h>
21#include <asm/mach-types.h>
22#include <asm/arch/hardware.h>
23#include <asm/arch/pxa-regs.h>
24#include <asm/arch/tosa.h>
25
26static void tosaled_amber_set(struct led_classdev *led_cdev,
27 enum led_brightness value)
28{
29 if (value)
30 set_scoop_gpio(&tosascoop_jc_device.dev,
31 TOSA_SCOOP_JC_CHRG_ERR_LED);
32 else
33 reset_scoop_gpio(&tosascoop_jc_device.dev,
34 TOSA_SCOOP_JC_CHRG_ERR_LED);
35}
36
37static void tosaled_green_set(struct led_classdev *led_cdev,
38 enum led_brightness value)
39{
40 if (value)
41 set_scoop_gpio(&tosascoop_jc_device.dev,
42 TOSA_SCOOP_JC_NOTE_LED);
43 else
44 reset_scoop_gpio(&tosascoop_jc_device.dev,
45 TOSA_SCOOP_JC_NOTE_LED);
46}
47
48static struct led_classdev tosa_amber_led = {
49 .name = "tosa:amber",
50 .default_trigger = "sharpsl-charge",
51 .brightness_set = tosaled_amber_set,
52};
53
54static struct led_classdev tosa_green_led = {
55 .name = "tosa:green",
56 .default_trigger = "nand-disk",
57 .brightness_set = tosaled_green_set,
58};
59
60#ifdef CONFIG_PM
61static int tosaled_suspend(struct platform_device *dev, pm_message_t state)
62{
63#ifdef CONFIG_LEDS_TRIGGERS
64 if (tosa_amber_led.trigger && strcmp(tosa_amber_led.trigger->name,
65 "sharpsl-charge"))
66#endif
67 led_classdev_suspend(&tosa_amber_led);
68 led_classdev_suspend(&tosa_green_led);
69 return 0;
70}
71
72static int tosaled_resume(struct platform_device *dev)
73{
74 led_classdev_resume(&tosa_amber_led);
75 led_classdev_resume(&tosa_green_led);
76 return 0;
77}
78#else
79#define tosaled_suspend NULL
80#define tosaled_resume NULL
81#endif
82
83static int tosaled_probe(struct platform_device *pdev)
84{
85 int ret;
86
87 ret = led_classdev_register(&pdev->dev, &tosa_amber_led);
88 if (ret < 0)
89 return ret;
90
91 ret = led_classdev_register(&pdev->dev, &tosa_green_led);
92 if (ret < 0)
93 led_classdev_unregister(&tosa_amber_led);
94
95 return ret;
96}
97
98static int tosaled_remove(struct platform_device *pdev)
99{
100 led_classdev_unregister(&tosa_amber_led);
101 led_classdev_unregister(&tosa_green_led);
102
103 return 0;
104}
105
106static struct platform_driver tosaled_driver = {
107 .probe = tosaled_probe,
108 .remove = tosaled_remove,
109 .suspend = tosaled_suspend,
110 .resume = tosaled_resume,
111 .driver = {
112 .name = "tosa-led",
113 },
114};
115
116static int __init tosaled_init(void)
117{
118 return platform_driver_register(&tosaled_driver);
119}
120
121static void __exit tosaled_exit(void)
122{
123 platform_driver_unregister(&tosaled_driver);
124}
125
126module_init(tosaled_init);
127module_exit(tosaled_exit);
128
129MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
130MODULE_DESCRIPTION("Tosa LED driver");
131MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
new file mode 100644
index 000000000000..a715c4ed93ff
--- /dev/null
+++ b/drivers/leds/leds.h
@@ -0,0 +1,44 @@
1/*
2 * LED Core
3 *
4 * Copyright 2005 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#ifndef __LEDS_H_INCLUDED
14#define __LEDS_H_INCLUDED
15
16#include <linux/leds.h>
17
18static inline void led_set_brightness(struct led_classdev *led_cdev,
19 enum led_brightness value)
20{
21 if (value > LED_FULL)
22 value = LED_FULL;
23 led_cdev->brightness = value;
24 if (!(led_cdev->flags & LED_SUSPENDED))
25 led_cdev->brightness_set(led_cdev, value);
26}
27
28extern rwlock_t leds_list_lock;
29extern struct list_head leds_list;
30
31#ifdef CONFIG_LEDS_TRIGGERS
32void led_trigger_set_default(struct led_classdev *led_cdev);
33void led_trigger_set(struct led_classdev *led_cdev,
34 struct led_trigger *trigger);
35#else
36#define led_trigger_set_default(x) do {} while(0)
37#define led_trigger_set(x, y) do {} while(0)
38#endif
39
40ssize_t led_trigger_store(struct class_device *dev, const char *buf,
41 size_t count);
42ssize_t led_trigger_show(struct class_device *dev, char *buf);
43
44#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/ledtrig-ide-disk.c b/drivers/leds/ledtrig-ide-disk.c
new file mode 100644
index 000000000000..fa651886ab4f
--- /dev/null
+++ b/drivers/leds/ledtrig-ide-disk.c
@@ -0,0 +1,62 @@
1/*
2 * LED IDE-Disk Activity Trigger
3 *
4 * Copyright 2006 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/timer.h>
18#include <linux/leds.h>
19
20static void ledtrig_ide_timerfunc(unsigned long data);
21
22DEFINE_LED_TRIGGER(ledtrig_ide);
23static DEFINE_TIMER(ledtrig_ide_timer, ledtrig_ide_timerfunc, 0, 0);
24static int ide_activity;
25static int ide_lastactivity;
26
27void ledtrig_ide_activity(void)
28{
29 ide_activity++;
30 if (!timer_pending(&ledtrig_ide_timer))
31 mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
32}
33EXPORT_SYMBOL(ledtrig_ide_activity);
34
35static void ledtrig_ide_timerfunc(unsigned long data)
36{
37 if (ide_lastactivity != ide_activity) {
38 ide_lastactivity = ide_activity;
39 led_trigger_event(ledtrig_ide, LED_FULL);
40 mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
41 } else {
42 led_trigger_event(ledtrig_ide, LED_OFF);
43 }
44}
45
46static int __init ledtrig_ide_init(void)
47{
48 led_trigger_register_simple("ide-disk", &ledtrig_ide);
49 return 0;
50}
51
52static void __exit ledtrig_ide_exit(void)
53{
54 led_trigger_unregister_simple(ledtrig_ide);
55}
56
57module_init(ledtrig_ide_init);
58module_exit(ledtrig_ide_exit);
59
60MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
61MODULE_DESCRIPTION("LED IDE Disk Activity Trigger");
62MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
new file mode 100644
index 000000000000..f484b5d6dbf8
--- /dev/null
+++ b/drivers/leds/ledtrig-timer.c
@@ -0,0 +1,170 @@
1/*
2 * LED Kernel Timer Trigger
3 *
4 * Copyright 2005-2006 Openedhand Ltd.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/spinlock.h>
20#include <linux/device.h>
21#include <linux/sysdev.h>
22#include <linux/timer.h>
23#include <linux/leds.h>
24#include "leds.h"
25
26struct timer_trig_data {
27 unsigned long delay_on; /* milliseconds on */
28 unsigned long delay_off; /* milliseconds off */
29 struct timer_list timer;
30};
31
32static void led_timer_function(unsigned long data)
33{
34 struct led_classdev *led_cdev = (struct led_classdev *) data;
35 struct timer_trig_data *timer_data = led_cdev->trigger_data;
36 unsigned long brightness = LED_OFF;
37 unsigned long delay = timer_data->delay_off;
38
39 if (!timer_data->delay_on || !timer_data->delay_off) {
40 led_set_brightness(led_cdev, LED_OFF);
41 return;
42 }
43
44 if (!led_cdev->brightness) {
45 brightness = LED_FULL;
46 delay = timer_data->delay_on;
47 }
48
49 led_set_brightness(led_cdev, brightness);
50
51 mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
52}
53
54static ssize_t led_delay_on_show(struct class_device *dev, char *buf)
55{
56 struct led_classdev *led_cdev = class_get_devdata(dev);
57 struct timer_trig_data *timer_data = led_cdev->trigger_data;
58
59 sprintf(buf, "%lu\n", timer_data->delay_on);
60
61 return strlen(buf) + 1;
62}
63
64static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
65 size_t size)
66{
67 struct led_classdev *led_cdev = class_get_devdata(dev);
68 struct timer_trig_data *timer_data = led_cdev->trigger_data;
69 int ret = -EINVAL;
70 char *after;
71 unsigned long state = simple_strtoul(buf, &after, 10);
72
73 if (after - buf > 0) {
74 timer_data->delay_on = state;
75 mod_timer(&timer_data->timer, jiffies + 1);
76 ret = after - buf;
77 }
78
79 return ret;
80}
81
82static ssize_t led_delay_off_show(struct class_device *dev, char *buf)
83{
84 struct led_classdev *led_cdev = class_get_devdata(dev);
85 struct timer_trig_data *timer_data = led_cdev->trigger_data;
86
87 sprintf(buf, "%lu\n", timer_data->delay_off);
88
89 return strlen(buf) + 1;
90}
91
92static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
93 size_t size)
94{
95 struct led_classdev *led_cdev = class_get_devdata(dev);
96 struct timer_trig_data *timer_data = led_cdev->trigger_data;
97 int ret = -EINVAL;
98 char *after;
99 unsigned long state = simple_strtoul(buf, &after, 10);
100
101 if (after - buf > 0) {
102 timer_data->delay_off = state;
103 mod_timer(&timer_data->timer, jiffies + 1);
104 ret = after - buf;
105 }
106
107 return ret;
108}
109
110static CLASS_DEVICE_ATTR(delay_on, 0644, led_delay_on_show,
111 led_delay_on_store);
112static CLASS_DEVICE_ATTR(delay_off, 0644, led_delay_off_show,
113 led_delay_off_store);
114
115static void timer_trig_activate(struct led_classdev *led_cdev)
116{
117 struct timer_trig_data *timer_data;
118
119 timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
120 if (!timer_data)
121 return;
122
123 led_cdev->trigger_data = timer_data;
124
125 init_timer(&timer_data->timer);
126 timer_data->timer.function = led_timer_function;
127 timer_data->timer.data = (unsigned long) led_cdev;
128
129 class_device_create_file(led_cdev->class_dev,
130 &class_device_attr_delay_on);
131 class_device_create_file(led_cdev->class_dev,
132 &class_device_attr_delay_off);
133}
134
135static void timer_trig_deactivate(struct led_classdev *led_cdev)
136{
137 struct timer_trig_data *timer_data = led_cdev->trigger_data;
138
139 if (timer_data) {
140 class_device_remove_file(led_cdev->class_dev,
141 &class_device_attr_delay_on);
142 class_device_remove_file(led_cdev->class_dev,
143 &class_device_attr_delay_off);
144 del_timer_sync(&timer_data->timer);
145 kfree(timer_data);
146 }
147}
148
149static struct led_trigger timer_led_trigger = {
150 .name = "timer",
151 .activate = timer_trig_activate,
152 .deactivate = timer_trig_deactivate,
153};
154
155static int __init timer_trig_init(void)
156{
157 return led_trigger_register(&timer_led_trigger);
158}
159
160static void __exit timer_trig_exit(void)
161{
162 led_trigger_unregister(&timer_led_trigger);
163}
164
165module_init(timer_trig_init);
166module_exit(timer_trig_exit);
167
168MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
169MODULE_DESCRIPTION("Timer LED trigger");
170MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index aecd9e0c2616..64fd8e79ea4c 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -78,8 +78,7 @@ void dm_put_target_type(struct target_type *t)
78 if (--ti->use == 0) 78 if (--ti->use == 0)
79 module_put(ti->tt.module); 79 module_put(ti->tt.module);
80 80
81 if (ti->use < 0) 81 BUG_ON(ti->use < 0);
82 BUG();
83 up_read(&_lock); 82 up_read(&_lock);
84 83
85 return; 84 return;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 039e071c1007..1ed5152db450 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,13 +215,11 @@ static void mddev_put(mddev_t *mddev)
215 return; 215 return;
216 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 216 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
217 list_del(&mddev->all_mddevs); 217 list_del(&mddev->all_mddevs);
218 /* that blocks */ 218 spin_unlock(&all_mddevs_lock);
219 blk_cleanup_queue(mddev->queue); 219 blk_cleanup_queue(mddev->queue);
220 /* that also blocks */
221 kobject_unregister(&mddev->kobj); 220 kobject_unregister(&mddev->kobj);
222 /* result blows... */ 221 } else
223 } 222 spin_unlock(&all_mddevs_lock);
224 spin_unlock(&all_mddevs_lock);
225} 223}
226 224
227static mddev_t * mddev_find(dev_t unit) 225static mddev_t * mddev_find(dev_t unit)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3cb0872a845d..6081941de1b3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1135,8 +1135,19 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1135 mirror = i; 1135 mirror = i;
1136 break; 1136 break;
1137 } 1137 }
1138 if (!uptodate) 1138 if (!uptodate) {
1139 int sync_blocks = 0;
1140 sector_t s = r1_bio->sector;
1141 long sectors_to_go = r1_bio->sectors;
1142 /* make sure these bits doesn't get cleared. */
1143 do {
1144 bitmap_end_sync(mddev->bitmap, r1_bio->sector,
1145 &sync_blocks, 1);
1146 s += sync_blocks;
1147 sectors_to_go -= sync_blocks;
1148 } while (sectors_to_go > 0);
1139 md_error(mddev, conf->mirrors[mirror].rdev); 1149 md_error(mddev, conf->mirrors[mirror].rdev);
1150 }
1140 1151
1141 update_head_pos(mirror, r1_bio); 1152 update_head_pos(mirror, r1_bio);
1142 1153
@@ -1547,8 +1558,7 @@ static int init_resync(conf_t *conf)
1547 int buffs; 1558 int buffs;
1548 1559
1549 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 1560 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1550 if (conf->r1buf_pool) 1561 BUG_ON(conf->r1buf_pool);
1551 BUG();
1552 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 1562 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1553 conf->poolinfo); 1563 conf->poolinfo);
1554 if (!conf->r1buf_pool) 1564 if (!conf->r1buf_pool)
@@ -1721,8 +1731,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1721 !conf->fullsync && 1731 !conf->fullsync &&
1722 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1732 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1723 break; 1733 break;
1724 if (sync_blocks < (PAGE_SIZE>>9)) 1734 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
1725 BUG();
1726 if (len > (sync_blocks<<9)) 1735 if (len > (sync_blocks<<9))
1727 len = sync_blocks<<9; 1736 len = sync_blocks<<9;
1728 } 1737 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ab90a6d12020..617012bc107a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1117,8 +1117,7 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1117 for (i=0; i<conf->copies; i++) 1117 for (i=0; i<conf->copies; i++)
1118 if (r10_bio->devs[i].bio == bio) 1118 if (r10_bio->devs[i].bio == bio)
1119 break; 1119 break;
1120 if (i == conf->copies) 1120 BUG_ON(i == conf->copies);
1121 BUG();
1122 update_head_pos(i, r10_bio); 1121 update_head_pos(i, r10_bio);
1123 d = r10_bio->devs[i].devnum; 1122 d = r10_bio->devs[i].devnum;
1124 1123
@@ -1518,8 +1517,7 @@ static int init_resync(conf_t *conf)
1518 int buffs; 1517 int buffs;
1519 1518
1520 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 1519 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1521 if (conf->r10buf_pool) 1520 BUG_ON(conf->r10buf_pool);
1522 BUG();
1523 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 1521 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1524 if (!conf->r10buf_pool) 1522 if (!conf->r10buf_pool)
1525 return -ENOMEM; 1523 return -ENOMEM;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dae740adaf65..31843604049c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -73,10 +73,8 @@ static void print_raid5_conf (raid5_conf_t *conf);
73static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 73static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
74{ 74{
75 if (atomic_dec_and_test(&sh->count)) { 75 if (atomic_dec_and_test(&sh->count)) {
76 if (!list_empty(&sh->lru)) 76 BUG_ON(!list_empty(&sh->lru));
77 BUG(); 77 BUG_ON(atomic_read(&conf->active_stripes)==0);
78 if (atomic_read(&conf->active_stripes)==0)
79 BUG();
80 if (test_bit(STRIPE_HANDLE, &sh->state)) { 78 if (test_bit(STRIPE_HANDLE, &sh->state)) {
81 if (test_bit(STRIPE_DELAYED, &sh->state)) 79 if (test_bit(STRIPE_DELAYED, &sh->state))
82 list_add_tail(&sh->lru, &conf->delayed_list); 80 list_add_tail(&sh->lru, &conf->delayed_list);
@@ -184,10 +182,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
184 raid5_conf_t *conf = sh->raid_conf; 182 raid5_conf_t *conf = sh->raid_conf;
185 int i; 183 int i;
186 184
187 if (atomic_read(&sh->count) != 0) 185 BUG_ON(atomic_read(&sh->count) != 0);
188 BUG(); 186 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
189 if (test_bit(STRIPE_HANDLE, &sh->state))
190 BUG();
191 187
192 CHECK_DEVLOCK(); 188 CHECK_DEVLOCK();
193 PRINTK("init_stripe called, stripe %llu\n", 189 PRINTK("init_stripe called, stripe %llu\n",
@@ -269,8 +265,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
269 init_stripe(sh, sector, pd_idx, disks); 265 init_stripe(sh, sector, pd_idx, disks);
270 } else { 266 } else {
271 if (atomic_read(&sh->count)) { 267 if (atomic_read(&sh->count)) {
272 if (!list_empty(&sh->lru)) 268 BUG_ON(!list_empty(&sh->lru));
273 BUG();
274 } else { 269 } else {
275 if (!test_bit(STRIPE_HANDLE, &sh->state)) 270 if (!test_bit(STRIPE_HANDLE, &sh->state))
276 atomic_inc(&conf->active_stripes); 271 atomic_inc(&conf->active_stripes);
@@ -465,8 +460,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
465 spin_unlock_irq(&conf->device_lock); 460 spin_unlock_irq(&conf->device_lock);
466 if (!sh) 461 if (!sh)
467 return 0; 462 return 0;
468 if (atomic_read(&sh->count)) 463 BUG_ON(atomic_read(&sh->count));
469 BUG();
470 shrink_buffers(sh, conf->pool_size); 464 shrink_buffers(sh, conf->pool_size);
471 kmem_cache_free(conf->slab_cache, sh); 465 kmem_cache_free(conf->slab_cache, sh);
472 atomic_dec(&conf->active_stripes); 466 atomic_dec(&conf->active_stripes);
@@ -882,8 +876,7 @@ static void compute_parity(struct stripe_head *sh, int method)
882 ptr[0] = page_address(sh->dev[pd_idx].page); 876 ptr[0] = page_address(sh->dev[pd_idx].page);
883 switch(method) { 877 switch(method) {
884 case READ_MODIFY_WRITE: 878 case READ_MODIFY_WRITE:
885 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)) 879 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
886 BUG();
887 for (i=disks ; i-- ;) { 880 for (i=disks ; i-- ;) {
888 if (i==pd_idx) 881 if (i==pd_idx)
889 continue; 882 continue;
@@ -896,7 +889,7 @@ static void compute_parity(struct stripe_head *sh, int method)
896 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 889 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
897 wake_up(&conf->wait_for_overlap); 890 wake_up(&conf->wait_for_overlap);
898 891
899 if (sh->dev[i].written) BUG(); 892 BUG_ON(sh->dev[i].written);
900 sh->dev[i].written = chosen; 893 sh->dev[i].written = chosen;
901 check_xor(); 894 check_xor();
902 } 895 }
@@ -912,7 +905,7 @@ static void compute_parity(struct stripe_head *sh, int method)
912 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 905 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
913 wake_up(&conf->wait_for_overlap); 906 wake_up(&conf->wait_for_overlap);
914 907
915 if (sh->dev[i].written) BUG(); 908 BUG_ON(sh->dev[i].written);
916 sh->dev[i].written = chosen; 909 sh->dev[i].written = chosen;
917 } 910 }
918 break; 911 break;
@@ -995,8 +988,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
995 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 988 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
996 goto overlap; 989 goto overlap;
997 990
998 if (*bip && bi->bi_next && (*bip) != bi->bi_next) 991 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
999 BUG();
1000 if (*bip) 992 if (*bip)
1001 bi->bi_next = *bip; 993 bi->bi_next = *bip;
1002 *bip = bi; 994 *bip = bi;
@@ -1430,8 +1422,7 @@ static void handle_stripe(struct stripe_head *sh)
1430 set_bit(STRIPE_HANDLE, &sh->state); 1422 set_bit(STRIPE_HANDLE, &sh->state);
1431 if (failed == 0) { 1423 if (failed == 0) {
1432 char *pagea; 1424 char *pagea;
1433 if (uptodate != disks) 1425 BUG_ON(uptodate != disks);
1434 BUG();
1435 compute_parity(sh, CHECK_PARITY); 1426 compute_parity(sh, CHECK_PARITY);
1436 uptodate--; 1427 uptodate--;
1437 pagea = page_address(sh->dev[sh->pd_idx].page); 1428 pagea = page_address(sh->dev[sh->pd_idx].page);
@@ -2096,8 +2087,7 @@ static void raid5d (mddev_t *mddev)
2096 2087
2097 list_del_init(first); 2088 list_del_init(first);
2098 atomic_inc(&sh->count); 2089 atomic_inc(&sh->count);
2099 if (atomic_read(&sh->count)!= 1) 2090 BUG_ON(atomic_read(&sh->count)!= 1);
2100 BUG();
2101 spin_unlock_irq(&conf->device_lock); 2091 spin_unlock_irq(&conf->device_lock);
2102 2092
2103 handled++; 2093 handled++;
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 6df4930fddec..bc69355e0100 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -91,10 +91,8 @@ static void print_raid6_conf (raid6_conf_t *conf);
91static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh) 91static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
92{ 92{
93 if (atomic_dec_and_test(&sh->count)) { 93 if (atomic_dec_and_test(&sh->count)) {
94 if (!list_empty(&sh->lru)) 94 BUG_ON(!list_empty(&sh->lru));
95 BUG(); 95 BUG_ON(atomic_read(&conf->active_stripes)==0);
96 if (atomic_read(&conf->active_stripes)==0)
97 BUG();
98 if (test_bit(STRIPE_HANDLE, &sh->state)) { 96 if (test_bit(STRIPE_HANDLE, &sh->state)) {
99 if (test_bit(STRIPE_DELAYED, &sh->state)) 97 if (test_bit(STRIPE_DELAYED, &sh->state))
100 list_add_tail(&sh->lru, &conf->delayed_list); 98 list_add_tail(&sh->lru, &conf->delayed_list);
@@ -202,10 +200,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
202 raid6_conf_t *conf = sh->raid_conf; 200 raid6_conf_t *conf = sh->raid_conf;
203 int disks = conf->raid_disks, i; 201 int disks = conf->raid_disks, i;
204 202
205 if (atomic_read(&sh->count) != 0) 203 BUG_ON(atomic_read(&sh->count) != 0);
206 BUG(); 204 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
207 if (test_bit(STRIPE_HANDLE, &sh->state))
208 BUG();
209 205
210 CHECK_DEVLOCK(); 206 CHECK_DEVLOCK();
211 PRINTK("init_stripe called, stripe %llu\n", 207 PRINTK("init_stripe called, stripe %llu\n",
@@ -284,13 +280,11 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
284 init_stripe(sh, sector, pd_idx); 280 init_stripe(sh, sector, pd_idx);
285 } else { 281 } else {
286 if (atomic_read(&sh->count)) { 282 if (atomic_read(&sh->count)) {
287 if (!list_empty(&sh->lru)) 283 BUG_ON(!list_empty(&sh->lru));
288 BUG();
289 } else { 284 } else {
290 if (!test_bit(STRIPE_HANDLE, &sh->state)) 285 if (!test_bit(STRIPE_HANDLE, &sh->state))
291 atomic_inc(&conf->active_stripes); 286 atomic_inc(&conf->active_stripes);
292 if (list_empty(&sh->lru)) 287 BUG_ON(list_empty(&sh->lru));
293 BUG();
294 list_del_init(&sh->lru); 288 list_del_init(&sh->lru);
295 } 289 }
296 } 290 }
@@ -353,8 +347,7 @@ static int drop_one_stripe(raid6_conf_t *conf)
353 spin_unlock_irq(&conf->device_lock); 347 spin_unlock_irq(&conf->device_lock);
354 if (!sh) 348 if (!sh)
355 return 0; 349 return 0;
356 if (atomic_read(&sh->count)) 350 BUG_ON(atomic_read(&sh->count));
357 BUG();
358 shrink_buffers(sh, conf->raid_disks); 351 shrink_buffers(sh, conf->raid_disks);
359 kmem_cache_free(conf->slab_cache, sh); 352 kmem_cache_free(conf->slab_cache, sh);
360 atomic_dec(&conf->active_stripes); 353 atomic_dec(&conf->active_stripes);
@@ -780,7 +773,7 @@ static void compute_parity(struct stripe_head *sh, int method)
780 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 773 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
781 wake_up(&conf->wait_for_overlap); 774 wake_up(&conf->wait_for_overlap);
782 775
783 if (sh->dev[i].written) BUG(); 776 BUG_ON(sh->dev[i].written);
784 sh->dev[i].written = chosen; 777 sh->dev[i].written = chosen;
785 } 778 }
786 break; 779 break;
@@ -970,8 +963,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
970 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 963 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
971 goto overlap; 964 goto overlap;
972 965
973 if (*bip && bi->bi_next && (*bip) != bi->bi_next) 966 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
974 BUG();
975 if (*bip) 967 if (*bip)
976 bi->bi_next = *bip; 968 bi->bi_next = *bip;
977 *bip = bi; 969 *bip = bi;
@@ -1906,8 +1898,7 @@ static void raid6d (mddev_t *mddev)
1906 1898
1907 list_del_init(first); 1899 list_del_init(first);
1908 atomic_inc(&sh->count); 1900 atomic_inc(&sh->count);
1909 if (atomic_read(&sh->count)!= 1) 1901 BUG_ON(atomic_read(&sh->count)!= 1);
1910 BUG();
1911 spin_unlock_irq(&conf->device_lock); 1902 spin_unlock_irq(&conf->device_lock);
1912 1903
1913 handled++; 1904 handled++;
@@ -2151,6 +2142,8 @@ static int run(mddev_t *mddev)
2151 } 2142 }
2152 2143
2153 /* Ok, everything is just fine now */ 2144 /* Ok, everything is just fine now */
2145 sysfs_create_group(&mddev->kobj, &raid6_attrs_group);
2146
2154 mddev->array_size = mddev->size * (mddev->raid_disks - 2); 2147 mddev->array_size = mddev->size * (mddev->raid_disks - 2);
2155 2148
2156 mddev->queue->unplug_fn = raid6_unplug_device; 2149 mddev->queue->unplug_fn = raid6_unplug_device;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index baa9f58beffc..fffc711c260c 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -51,18 +51,18 @@ config VIDEO_TVEEPROM
51 tristate 51 tristate
52 52
53config USB_DABUSB 53config USB_DABUSB
54 tristate "DABUSB driver" 54 tristate "DABUSB driver"
55 depends on USB 55 depends on USB
56 ---help--- 56 ---help---
57 A Digital Audio Broadcasting (DAB) Receiver for USB and Linux 57 A Digital Audio Broadcasting (DAB) Receiver for USB and Linux
58 brought to you by the DAB-Team 58 brought to you by the DAB-Team
59 <http://wwwbode.cs.tum.edu/Par/arch/dab/>. This driver can be taken 59 <http://wwwbode.cs.tum.edu/Par/arch/dab/>. This driver can be taken
60 as an example for URB-based bulk, control, and isochronous 60 as an example for URB-based bulk, control, and isochronous
61 transactions. URB's are explained in 61 transactions. URB's are explained in
62 <Documentation/usb/URB.txt>. 62 <Documentation/usb/URB.txt>.
63 63
64 To compile this driver as a module, choose M here: the 64 To compile this driver as a module, choose M here: the
65 module will be called dabusb. 65 module will be called dabusb.
66 66
67endmenu 67endmenu
68 68
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index 2337b41714e0..376ca48f1d1d 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -7,6 +7,7 @@ config DVB_BT8XX
7 select DVB_CX24110 7 select DVB_CX24110
8 select DVB_OR51211 8 select DVB_OR51211
9 select DVB_LGDT330X 9 select DVB_LGDT330X
10 select FW_LOADER
10 help 11 help
11 Support for PCI cards based on the Bt8xx PCI bridge. Examples are 12 Support for PCI cards based on the Bt8xx PCI bridge. Examples are
12 the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards, 13 the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards,
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 09e96e9ddbdf..04578df3f249 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -141,12 +141,18 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
141 } 141 }
142 142
143 if ((file->f_flags & O_ACCMODE) == O_RDONLY) { 143 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
144 void *mem = vmalloc(DVR_BUFFER_SIZE); 144 void *mem;
145 if (!dvbdev->readers) {
146 mutex_unlock(&dmxdev->mutex);
147 return -EBUSY;
148 }
149 mem = vmalloc(DVR_BUFFER_SIZE);
145 if (!mem) { 150 if (!mem) {
146 mutex_unlock(&dmxdev->mutex); 151 mutex_unlock(&dmxdev->mutex);
147 return -ENOMEM; 152 return -ENOMEM;
148 } 153 }
149 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); 154 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
155 dvbdev->readers--;
150 } 156 }
151 157
152 if ((file->f_flags & O_ACCMODE) == O_WRONLY) { 158 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
@@ -184,6 +190,7 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
184 dmxdev->dvr_orig_fe); 190 dmxdev->dvr_orig_fe);
185 } 191 }
186 if ((file->f_flags & O_ACCMODE) == O_RDONLY) { 192 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
193 dvbdev->readers++;
187 if (dmxdev->dvr_buffer.data) { 194 if (dmxdev->dvr_buffer.data) {
188 void *mem = dmxdev->dvr_buffer.data; 195 void *mem = dmxdev->dvr_buffer.data;
189 mb(); 196 mb();
@@ -1029,8 +1036,7 @@ static struct file_operations dvb_dvr_fops = {
1029 1036
1030static struct dvb_device dvbdev_dvr = { 1037static struct dvb_device dvbdev_dvr = {
1031 .priv = NULL, 1038 .priv = NULL,
1032 .users = 1, 1039 .readers = 1,
1033 .writers = 1,
1034 .fops = &dvb_dvr_fops 1040 .fops = &dvb_dvr_fops
1035}; 1041};
1036 1042
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 2c3ea8f95dcd..4f8f257e6795 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -105,6 +105,7 @@ struct dvb_frontend_private {
105 fe_status_t status; 105 fe_status_t status;
106 unsigned long tune_mode_flags; 106 unsigned long tune_mode_flags;
107 unsigned int delay; 107 unsigned int delay;
108 unsigned int reinitialise;
108 109
109 /* swzigzag values */ 110 /* swzigzag values */
110 unsigned int state; 111 unsigned int state;
@@ -121,6 +122,7 @@ struct dvb_frontend_private {
121 unsigned int check_wrapped; 122 unsigned int check_wrapped;
122}; 123};
123 124
125static void dvb_frontend_wakeup(struct dvb_frontend *fe);
124 126
125static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status) 127static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status)
126{ 128{
@@ -213,6 +215,15 @@ static void dvb_frontend_init(struct dvb_frontend *fe)
213 fe->ops->init(fe); 215 fe->ops->init(fe);
214} 216}
215 217
218void dvb_frontend_reinitialise(struct dvb_frontend *fe)
219{
220 struct dvb_frontend_private *fepriv = fe->frontend_priv;
221
222 fepriv->reinitialise = 1;
223 dvb_frontend_wakeup(fe);
224}
225EXPORT_SYMBOL(dvb_frontend_reinitialise);
226
216static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepriv, int locked) 227static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepriv, int locked)
217{ 228{
218 int q2; 229 int q2;
@@ -505,8 +516,8 @@ static int dvb_frontend_thread(void *data)
505 fepriv->quality = 0; 516 fepriv->quality = 0;
506 fepriv->delay = 3*HZ; 517 fepriv->delay = 3*HZ;
507 fepriv->status = 0; 518 fepriv->status = 0;
508 dvb_frontend_init(fe);
509 fepriv->wakeup = 0; 519 fepriv->wakeup = 0;
520 fepriv->reinitialise = 1;
510 521
511 while (1) { 522 while (1) {
512 up(&fepriv->sem); /* is locked when we enter the thread... */ 523 up(&fepriv->sem); /* is locked when we enter the thread... */
@@ -524,6 +535,11 @@ static int dvb_frontend_thread(void *data)
524 if (down_interruptible(&fepriv->sem)) 535 if (down_interruptible(&fepriv->sem))
525 break; 536 break;
526 537
538 if (fepriv->reinitialise) {
539 dvb_frontend_init(fe);
540 fepriv->reinitialise = 0;
541 }
542
527 /* do an iteration of the tuning loop */ 543 /* do an iteration of the tuning loop */
528 if (fe->ops->tune) { 544 if (fe->ops->tune) {
529 /* have we been asked to retune? */ 545 /* have we been asked to retune? */
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index d5aee5ad67a0..5926a3b745c9 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -112,6 +112,8 @@ extern int dvb_register_frontend(struct dvb_adapter* dvb,
112 112
113extern int dvb_unregister_frontend(struct dvb_frontend* fe); 113extern int dvb_unregister_frontend(struct dvb_frontend* fe);
114 114
115extern void dvb_frontend_reinitialise(struct dvb_frontend *fe);
116
115extern void dvb_frontend_sleep_until(struct timeval *waketime, u32 add_usec); 117extern void dvb_frontend_sleep_until(struct timeval *waketime, u32 add_usec);
116extern s32 timeval_usec_diff(struct timeval lasttime, struct timeval curtime); 118extern s32 timeval_usec_diff(struct timeval lasttime, struct timeval curtime);
117 119
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index a14e737ec848..7edd6362b9cc 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -233,6 +233,45 @@ static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
233 { 0xfe, 0x4e, KEY_POWER }, 233 { 0xfe, 0x4e, KEY_POWER },
234}; 234};
235 235
236static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
237 { 0xfc, 0x02, KEY_SETUP }, /* Profile */
238 { 0xfc, 0x43, KEY_POWER2 },
239 { 0xfc, 0x06, KEY_EPG },
240 { 0xfc, 0x5a, KEY_BACK },
241 { 0xfc, 0x05, KEY_MENU },
242 { 0xfc, 0x47, KEY_INFO },
243 { 0xfc, 0x01, KEY_TAB },
244 { 0xfc, 0x42, KEY_PREVIOUSSONG },/* Replay */
245 { 0xfc, 0x49, KEY_VOLUMEUP },
246 { 0xfc, 0x09, KEY_VOLUMEDOWN },
247 { 0xfc, 0x54, KEY_CHANNELUP },
248 { 0xfc, 0x0b, KEY_CHANNELDOWN },
249 { 0xfc, 0x16, KEY_CAMERA },
250 { 0xfc, 0x40, KEY_TUNER }, /* ATV/DTV */
251 { 0xfc, 0x45, KEY_OPEN },
252 { 0xfc, 0x19, KEY_1 },
253 { 0xfc, 0x18, KEY_2 },
254 { 0xfc, 0x1b, KEY_3 },
255 { 0xfc, 0x1a, KEY_4 },
256 { 0xfc, 0x58, KEY_5 },
257 { 0xfc, 0x59, KEY_6 },
258 { 0xfc, 0x15, KEY_7 },
259 { 0xfc, 0x14, KEY_8 },
260 { 0xfc, 0x17, KEY_9 },
261 { 0xfc, 0x44, KEY_ANGLE }, /* Aspect */
262 { 0xfc, 0x55, KEY_0 },
263 { 0xfc, 0x07, KEY_ZOOM },
264 { 0xfc, 0x0a, KEY_REWIND },
265 { 0xfc, 0x08, KEY_PLAYPAUSE },
266 { 0xfc, 0x4b, KEY_FASTFORWARD },
267 { 0xfc, 0x5b, KEY_MUTE },
268 { 0xfc, 0x04, KEY_STOP },
269 { 0xfc, 0x56, KEY_RECORD },
270 { 0xfc, 0x57, KEY_POWER },
271 { 0xfc, 0x41, KEY_UNKNOWN }, /* INPUT */
272 { 0xfc, 0x00, KEY_UNKNOWN }, /* HD */
273};
274
236static int cxusb_dee1601_demod_init(struct dvb_frontend* fe) 275static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
237{ 276{
238 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 }; 277 static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 };
@@ -511,6 +550,11 @@ static struct dvb_usb_properties cxusb_bluebird_lgh064f_properties = {
511 550
512 .i2c_algo = &cxusb_i2c_algo, 551 .i2c_algo = &cxusb_i2c_algo,
513 552
553 .rc_interval = 100,
554 .rc_key_map = dvico_portable_rc_keys,
555 .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
556 .rc_query = cxusb_rc_query,
557
514 .generic_bulk_ctrl_endpoint = 0x01, 558 .generic_bulk_ctrl_endpoint = 0x01,
515 /* parameter for the MPEG2-data transfer */ 559 /* parameter for the MPEG2-data transfer */
516 .urb = { 560 .urb = {
@@ -600,6 +644,11 @@ static struct dvb_usb_properties cxusb_bluebird_lgz201_properties = {
600 644
601 .i2c_algo = &cxusb_i2c_algo, 645 .i2c_algo = &cxusb_i2c_algo,
602 646
647 .rc_interval = 100,
648 .rc_key_map = dvico_portable_rc_keys,
649 .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
650 .rc_query = cxusb_rc_query,
651
603 .generic_bulk_ctrl_endpoint = 0x01, 652 .generic_bulk_ctrl_endpoint = 0x01,
604 /* parameter for the MPEG2-data transfer */ 653 /* parameter for the MPEG2-data transfer */
605 .urb = { 654 .urb = {
@@ -640,6 +689,11 @@ static struct dvb_usb_properties cxusb_bluebird_dtt7579_properties = {
640 689
641 .i2c_algo = &cxusb_i2c_algo, 690 .i2c_algo = &cxusb_i2c_algo,
642 691
692 .rc_interval = 100,
693 .rc_key_map = dvico_portable_rc_keys,
694 .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
695 .rc_query = cxusb_rc_query,
696
643 .generic_bulk_ctrl_endpoint = 0x01, 697 .generic_bulk_ctrl_endpoint = 0x01,
644 /* parameter for the MPEG2-data transfer */ 698 /* parameter for the MPEG2-data transfer */
645 .urb = { 699 .urb = {
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index 12ebaf8bddca..70afcfd141ca 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -94,12 +94,14 @@ static int dtt200u_frontend_attach(struct dvb_usb_device *d)
94 94
95static struct dvb_usb_properties dtt200u_properties; 95static struct dvb_usb_properties dtt200u_properties;
96static struct dvb_usb_properties wt220u_properties; 96static struct dvb_usb_properties wt220u_properties;
97static struct dvb_usb_properties wt220u_zl0353_properties;
97 98
98static int dtt200u_usb_probe(struct usb_interface *intf, 99static int dtt200u_usb_probe(struct usb_interface *intf,
99 const struct usb_device_id *id) 100 const struct usb_device_id *id)
100{ 101{
101 if (dvb_usb_device_init(intf,&dtt200u_properties,THIS_MODULE,NULL) == 0 || 102 if (dvb_usb_device_init(intf,&dtt200u_properties,THIS_MODULE,NULL) == 0 ||
102 dvb_usb_device_init(intf,&wt220u_properties,THIS_MODULE,NULL) == 0) 103 dvb_usb_device_init(intf,&wt220u_properties,THIS_MODULE,NULL) == 0 ||
104 dvb_usb_device_init(intf,&wt220u_zl0353_properties,THIS_MODULE,NULL) == 0)
103 return 0; 105 return 0;
104 106
105 return -ENODEV; 107 return -ENODEV;
@@ -110,6 +112,8 @@ static struct usb_device_id dtt200u_usb_table [] = {
110 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_DTT200U_WARM) }, 112 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_DTT200U_WARM) },
111 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_COLD) }, 113 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_COLD) },
112 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_WARM) }, 114 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_WARM) },
115 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_ZL0353_COLD) },
116 { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_WT220U_ZL0353_WARM) },
113 { 0 }, 117 { 0 },
114}; 118};
115MODULE_DEVICE_TABLE(usb, dtt200u_usb_table); 119MODULE_DEVICE_TABLE(usb, dtt200u_usb_table);
@@ -196,6 +200,47 @@ static struct dvb_usb_properties wt220u_properties = {
196 } 200 }
197}; 201};
198 202
203static struct dvb_usb_properties wt220u_zl0353_properties = {
204 .caps = DVB_USB_HAS_PID_FILTER | DVB_USB_NEED_PID_FILTERING,
205 .pid_filter_count = 15,
206
207 .usb_ctrl = CYPRESS_FX2,
208 .firmware = "dvb-usb-wt220u-zl0353-01.fw",
209
210 .power_ctrl = dtt200u_power_ctrl,
211 .streaming_ctrl = dtt200u_streaming_ctrl,
212 .pid_filter = dtt200u_pid_filter,
213 .frontend_attach = dtt200u_frontend_attach,
214
215 .rc_interval = 300,
216 .rc_key_map = dtt200u_rc_keys,
217 .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
218 .rc_query = dtt200u_rc_query,
219
220 .generic_bulk_ctrl_endpoint = 0x01,
221
222 /* parameter for the MPEG2-data transfer */
223 .urb = {
224 .type = DVB_USB_BULK,
225 .count = 7,
226 .endpoint = 0x02,
227 .u = {
228 .bulk = {
229 .buffersize = 4096,
230 }
231 }
232 },
233
234 .num_device_descs = 1,
235 .devices = {
236 { .name = "WideView WT-220U PenType Receiver (based on ZL353)",
237 .cold_ids = { &dtt200u_usb_table[4], NULL },
238 .warm_ids = { &dtt200u_usb_table[5], NULL },
239 },
240 { NULL },
241 }
242};
243
199/* usb specific object needed to register this driver with the usb subsystem */ 244/* usb specific object needed to register this driver with the usb subsystem */
200static struct usb_driver dtt200u_usb_driver = { 245static struct usb_driver dtt200u_usb_driver = {
201 .name = "dvb_usb_dtt200u", 246 .name = "dvb_usb_dtt200u",
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 4a1b9e77e339..cb239049b098 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -83,6 +83,8 @@
83#define USB_PID_DTT200U_WARM 0x0301 83#define USB_PID_DTT200U_WARM 0x0301
84#define USB_PID_WT220U_COLD 0x0222 84#define USB_PID_WT220U_COLD 0x0222
85#define USB_PID_WT220U_WARM 0x0221 85#define USB_PID_WT220U_WARM 0x0221
86#define USB_PID_WT220U_ZL0353_COLD 0x022a
87#define USB_PID_WT220U_ZL0353_WARM 0x022b
86#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300 88#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300
87#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301 89#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
88#define USB_PID_NEBULA_DIGITV 0x0201 90#define USB_PID_NEBULA_DIGITV 0x0201
diff --git a/drivers/media/dvb/dvb-usb/vp702x-fe.c b/drivers/media/dvb/dvb-usb/vp702x-fe.c
index b6d95e1c9c52..2a89f8c5da99 100644
--- a/drivers/media/dvb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/dvb/dvb-usb/vp702x-fe.c
@@ -147,8 +147,9 @@ static int vp702x_fe_set_frontend(struct dvb_frontend* fe,
147 cmd[4] = (sr >> 4) & 0xff; 147 cmd[4] = (sr >> 4) & 0xff;
148 cmd[5] = (sr << 4) & 0xf0; 148 cmd[5] = (sr << 4) & 0xf0;
149 149
150 deb_fe("setting frontend to: %u -> %u (%x) LNB-based GHz, symbolrate: %d -> %Lu (%Lx)\n", 150 deb_fe("setting frontend to: %u -> %u (%x) LNB-based GHz, symbolrate: %d -> %lu (%lx)\n",
151 fep->frequency,freq,freq, fep->u.qpsk.symbol_rate, sr, sr); 151 fep->frequency,freq,freq, fep->u.qpsk.symbol_rate,
152 (unsigned long) sr, (unsigned long) sr);
152 153
153/* if (fep->inversion == INVERSION_ON) 154/* if (fep->inversion == INVERSION_ON)
154 cmd[6] |= 0x80; */ 155 cmd[6] |= 0x80; */
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 94233168d241..37d5e0af1683 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -181,6 +181,11 @@ config DVB_OR51211
181 help 181 help
182 An ATSC 8VSB tuner module. Say Y when you want to support this frontend. 182 An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
183 183
184 This driver needs external firmware. Please use the command
185 "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
186 download it, and then copy it to /usr/lib/hotplug/firmware
187 or /lib/firmware (depending on configuration of firmware hotplug).
188
184config DVB_OR51132 189config DVB_OR51132
185 tristate "Oren OR51132 based" 190 tristate "Oren OR51132 based"
186 depends on DVB_CORE 191 depends on DVB_CORE
@@ -189,6 +194,13 @@ config DVB_OR51132
189 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want 194 An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
190 to support this frontend. 195 to support this frontend.
191 196
197 This driver needs external firmware. Please use the commands
198 "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_vsb" and/or
199 "<kerneldir>/Documentation/dvb/get_dvb_firmware or51132_qam" to
200 download firmwares for 8VSB and QAM64/256, respectively. Copy them to
201 /usr/lib/hotplug/firmware or /lib/firmware (depending on
202 configuration of firmware hotplug).
203
192config DVB_BCM3510 204config DVB_BCM3510
193 tristate "Broadcom BCM3510" 205 tristate "Broadcom BCM3510"
194 depends on DVB_CORE 206 depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index 8e8df7b4ca0e..b83dafa4e12c 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -52,7 +52,6 @@ struct tda1004x_state {
52 struct dvb_frontend frontend; 52 struct dvb_frontend frontend;
53 53
54 /* private demod data */ 54 /* private demod data */
55 u8 initialised;
56 enum tda1004x_demod demod_type; 55 enum tda1004x_demod demod_type;
57}; 56};
58 57
@@ -594,9 +593,6 @@ static int tda10045_init(struct dvb_frontend* fe)
594 593
595 dprintk("%s\n", __FUNCTION__); 594 dprintk("%s\n", __FUNCTION__);
596 595
597 if (state->initialised)
598 return 0;
599
600 if (tda10045_fwupload(fe)) { 596 if (tda10045_fwupload(fe)) {
601 printk("tda1004x: firmware upload failed\n"); 597 printk("tda1004x: firmware upload failed\n");
602 return -EIO; 598 return -EIO;
@@ -626,7 +622,6 @@ static int tda10045_init(struct dvb_frontend* fe)
626 622
627 tda1004x_write_mask(state, 0x1f, 0x01, state->config->invert_oclk); 623 tda1004x_write_mask(state, 0x1f, 0x01, state->config->invert_oclk);
628 624
629 state->initialised = 1;
630 return 0; 625 return 0;
631} 626}
632 627
@@ -635,9 +630,6 @@ static int tda10046_init(struct dvb_frontend* fe)
635 struct tda1004x_state* state = fe->demodulator_priv; 630 struct tda1004x_state* state = fe->demodulator_priv;
636 dprintk("%s\n", __FUNCTION__); 631 dprintk("%s\n", __FUNCTION__);
637 632
638 if (state->initialised)
639 return 0;
640
641 if (tda10046_fwupload(fe)) { 633 if (tda10046_fwupload(fe)) {
642 printk("tda1004x: firmware upload failed\n"); 634 printk("tda1004x: firmware upload failed\n");
643 return -EIO; 635 return -EIO;
@@ -697,7 +689,6 @@ static int tda10046_init(struct dvb_frontend* fe)
697 // tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes 689 // tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes
698 tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7); 690 tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7);
699 691
700 state->initialised = 1;
701 return 0; 692 return 0;
702} 693}
703 694
@@ -1207,7 +1198,6 @@ static int tda1004x_sleep(struct dvb_frontend* fe)
1207 tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1); 1198 tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1);
1208 break; 1199 break;
1209 } 1200 }
1210 state->initialised = 0;
1211 1201
1212 return 0; 1202 return 0;
1213} 1203}
@@ -1271,7 +1261,6 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
1271 state->config = config; 1261 state->config = config;
1272 state->i2c = i2c; 1262 state->i2c = i2c;
1273 memcpy(&state->ops, &tda10045_ops, sizeof(struct dvb_frontend_ops)); 1263 memcpy(&state->ops, &tda10045_ops, sizeof(struct dvb_frontend_ops));
1274 state->initialised = 0;
1275 state->demod_type = TDA1004X_DEMOD_TDA10045; 1264 state->demod_type = TDA1004X_DEMOD_TDA10045;
1276 1265
1277 /* check if the demod is there */ 1266 /* check if the demod is there */
@@ -1330,7 +1319,6 @@ struct dvb_frontend* tda10046_attach(const struct tda1004x_config* config,
1330 state->config = config; 1319 state->config = config;
1331 state->i2c = i2c; 1320 state->i2c = i2c;
1332 memcpy(&state->ops, &tda10046_ops, sizeof(struct dvb_frontend_ops)); 1321 memcpy(&state->ops, &tda10046_ops, sizeof(struct dvb_frontend_ops));
1333 state->initialised = 0;
1334 state->demod_type = TDA1004X_DEMOD_TDA10046; 1322 state->demod_type = TDA1004X_DEMOD_TDA10046;
1335 1323
1336 /* check if the demod is there */ 1324 /* check if the demod is there */
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 840efec32cb6..d028245c8eed 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -87,6 +87,7 @@ static int volume = 255;
87static int budgetpatch; 87static int budgetpatch;
88static int wss_cfg_4_3 = 0x4008; 88static int wss_cfg_4_3 = 0x4008;
89static int wss_cfg_16_9 = 0x0007; 89static int wss_cfg_16_9 = 0x0007;
90static int tv_standard;
90 91
91module_param_named(debug, av7110_debug, int, 0644); 92module_param_named(debug, av7110_debug, int, 0644);
92MODULE_PARM_DESC(debug, "debug level (bitmask, default 0)"); 93MODULE_PARM_DESC(debug, "debug level (bitmask, default 0)");
@@ -109,6 +110,8 @@ module_param(wss_cfg_4_3, int, 0444);
109MODULE_PARM_DESC(wss_cfg_4_3, "WSS 4:3 - default 0x4008 - bit 15: disable, 14: burst mode, 13..0: wss data"); 110MODULE_PARM_DESC(wss_cfg_4_3, "WSS 4:3 - default 0x4008 - bit 15: disable, 14: burst mode, 13..0: wss data");
110module_param(wss_cfg_16_9, int, 0444); 111module_param(wss_cfg_16_9, int, 0444);
111MODULE_PARM_DESC(wss_cfg_16_9, "WSS 16:9 - default 0x0007 - bit 15: disable, 14: burst mode, 13..0: wss data"); 112MODULE_PARM_DESC(wss_cfg_16_9, "WSS 16:9 - default 0x0007 - bit 15: disable, 14: burst mode, 13..0: wss data");
113module_param(tv_standard, int, 0444);
114MODULE_PARM_DESC(tv_standard, "TV standard: 0 PAL (default), 1 NTSC");
112 115
113static void restart_feeds(struct av7110 *av7110); 116static void restart_feeds(struct av7110 *av7110);
114 117
@@ -2123,7 +2126,7 @@ static int frontend_init(struct av7110 *av7110)
2123 read_pwm(av7110)); 2126 read_pwm(av7110));
2124 break; 2127 break;
2125 case 0x0003: 2128 case 0x0003:
2126 /* Haupauge DVB-C 2.1 VES1820/ALPS TDBE2 */ 2129 /* Hauppauge DVB-C 2.1 VES1820/ALPS TDBE2 */
2127 av7110->fe = ves1820_attach(&alps_tdbe2_config, &av7110->i2c_adap, 2130 av7110->fe = ves1820_attach(&alps_tdbe2_config, &av7110->i2c_adap,
2128 read_pwm(av7110)); 2131 read_pwm(av7110));
2129 break; 2132 break;
@@ -2543,6 +2546,9 @@ static int __devinit av7110_attach(struct saa7146_dev* dev,
2543 av7110->osdwin = 1; 2546 av7110->osdwin = 1;
2544 mutex_init(&av7110->osd_mutex); 2547 mutex_init(&av7110->osd_mutex);
2545 2548
2549 /* TV standard */
2550 av7110->vidmode = tv_standard == 1 ? VIDEO_MODE_NTSC : VIDEO_MODE_PAL;
2551
2546 /* ARM "watchdog" */ 2552 /* ARM "watchdog" */
2547 init_waitqueue_head(&av7110->arm_wait); 2553 init_waitqueue_head(&av7110->arm_wait);
2548 av7110->arm_thread = NULL; 2554 av7110->arm_thread = NULL;
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 400facec7407..2eff09f638d3 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -1479,8 +1479,6 @@ int av7110_av_init(struct av7110 *av7110)
1479 void (*play[])(u8 *, int, void *) = { play_audio_cb, play_video_cb }; 1479 void (*play[])(u8 *, int, void *) = { play_audio_cb, play_video_cb };
1480 int i, ret; 1480 int i, ret;
1481 1481
1482 av7110->vidmode = VIDEO_MODE_PAL;
1483
1484 for (i = 0; i < 2; i++) { 1482 for (i = 0; i < 2; i++) {
1485 struct ipack *ipack = av7110->ipack + i; 1483 struct ipack *ipack = av7110->ipack + i;
1486 1484
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 9dd4745f5312..8efe3ce5f66c 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -60,11 +60,11 @@ struct budget_av {
60 struct dvb_ca_en50221 ca; 60 struct dvb_ca_en50221 ca;
61}; 61};
62 62
63/* GPIO CI Connections: 63/* GPIO Connections:
64 * 0 - Vcc/Reset (Reset is controlled by capacitor) 64 * 0 - Vcc/Reset (Reset is controlled by capacitor). Resets the frontend *AS WELL*!
65 * 1 - Attribute Memory 65 * 1 - CI memory select 0=>IO memory, 1=>Attribute Memory
66 * 2 - Card Enable (Active Low) 66 * 2 - CI Card Enable (Active Low)
67 * 3 - Card Detect 67 * 3 - CI Card Detect
68 */ 68 */
69 69
70/**************************************************************************** 70/****************************************************************************
@@ -214,6 +214,9 @@ static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
214 while (--timeout > 0 && ciintf_read_attribute_mem(ca, slot, 0) != 0x1d) 214 while (--timeout > 0 && ciintf_read_attribute_mem(ca, slot, 0) != 0x1d)
215 msleep(100); 215 msleep(100);
216 216
217 /* reinitialise the frontend */
218 dvb_frontend_reinitialise(budget_av->budget.dvb_frontend);
219
217 if (timeout <= 0) 220 if (timeout <= 0)
218 { 221 {
219 printk(KERN_ERR "budget-av: cam reset failed (timeout).\n"); 222 printk(KERN_ERR "budget-av: cam reset failed (timeout).\n");
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index 633e68c341c8..ea2066d461fc 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -39,9 +39,21 @@
39#include "budget.h" 39#include "budget.h"
40#include "ttpci-eeprom.h" 40#include "ttpci-eeprom.h"
41 41
42#define TS_WIDTH (2 * TS_SIZE)
43#define TS_WIDTH_ACTIVY TS_SIZE
44#define TS_HEIGHT_MASK 0xf00
45#define TS_HEIGHT_MASK_ACTIVY 0xc00
46#define TS_MIN_BUFSIZE_K 188
47#define TS_MAX_BUFSIZE_K 1410
48#define TS_MAX_BUFSIZE_K_ACTIVY 564
49#define BUFFER_WARNING_WAIT (30*HZ)
50
42int budget_debug; 51int budget_debug;
52static int dma_buffer_size = TS_MIN_BUFSIZE_K;
43module_param_named(debug, budget_debug, int, 0644); 53module_param_named(debug, budget_debug, int, 0644);
54module_param_named(bufsize, dma_buffer_size, int, 0444);
44MODULE_PARM_DESC(debug, "Turn on/off budget debugging (default:off)."); 55MODULE_PARM_DESC(debug, "Turn on/off budget debugging (default:off).");
56MODULE_PARM_DESC(bufsize, "DMA buffer size in KB, default: 188, min: 188, max: 1410 (Activy: 564)");
45 57
46/**************************************************************************** 58/****************************************************************************
47 * TT budget / WinTV Nova 59 * TT budget / WinTV Nova
@@ -70,11 +82,10 @@ static int start_ts_capture(struct budget *budget)
70 82
71 saa7146_write(dev, MC1, MASK_20); // DMA3 off 83 saa7146_write(dev, MC1, MASK_20); // DMA3 off
72 84
73 memset(budget->grabbing, 0x00, TS_HEIGHT * TS_WIDTH); 85 memset(budget->grabbing, 0x00, budget->buffer_size);
74 86
75 saa7146_write(dev, PCI_BT_V1, 0x001c0000 | (saa7146_read(dev, PCI_BT_V1) & ~0x001f0000)); 87 saa7146_write(dev, PCI_BT_V1, 0x001c0000 | (saa7146_read(dev, PCI_BT_V1) & ~0x001f0000));
76 88
77 budget->tsf = 0xff;
78 budget->ttbp = 0; 89 budget->ttbp = 0;
79 90
80 /* 91 /*
@@ -115,16 +126,12 @@ static int start_ts_capture(struct budget *budget)
115 126
116 saa7146_write(dev, BASE_ODD3, 0); 127 saa7146_write(dev, BASE_ODD3, 0);
117 saa7146_write(dev, BASE_EVEN3, 0); 128 saa7146_write(dev, BASE_EVEN3, 0);
118 saa7146_write(dev, PROT_ADDR3, TS_WIDTH * TS_HEIGHT); 129 saa7146_write(dev, PROT_ADDR3, budget->buffer_size);
119 saa7146_write(dev, BASE_PAGE3, budget->pt.dma | ME1 | 0x90); 130 saa7146_write(dev, BASE_PAGE3, budget->pt.dma | ME1 | 0x90);
120 131
121 if (budget->card->type == BUDGET_FS_ACTIVY) { 132 saa7146_write(dev, PITCH3, budget->buffer_width);
122 saa7146_write(dev, PITCH3, TS_WIDTH / 2); 133 saa7146_write(dev, NUM_LINE_BYTE3,
123 saa7146_write(dev, NUM_LINE_BYTE3, ((TS_HEIGHT * 2) << 16) | (TS_WIDTH / 2)); 134 (budget->buffer_height << 16) | budget->buffer_width);
124 } else {
125 saa7146_write(dev, PITCH3, TS_WIDTH);
126 saa7146_write(dev, NUM_LINE_BYTE3, (TS_HEIGHT << 16) | TS_WIDTH);
127 }
128 135
129 saa7146_write(dev, MC2, (MASK_04 | MASK_20)); 136 saa7146_write(dev, MC2, (MASK_04 | MASK_20));
130 137
@@ -141,11 +148,12 @@ static void vpeirq(unsigned long data)
141 u8 *mem = (u8 *) (budget->grabbing); 148 u8 *mem = (u8 *) (budget->grabbing);
142 u32 olddma = budget->ttbp; 149 u32 olddma = budget->ttbp;
143 u32 newdma = saa7146_read(budget->dev, PCI_VDP3); 150 u32 newdma = saa7146_read(budget->dev, PCI_VDP3);
151 u32 count;
144 152
145 /* nearest lower position divisible by 188 */ 153 /* nearest lower position divisible by 188 */
146 newdma -= newdma % 188; 154 newdma -= newdma % 188;
147 155
148 if (newdma >= TS_BUFLEN) 156 if (newdma >= budget->buffer_size)
149 return; 157 return;
150 158
151 budget->ttbp = newdma; 159 budget->ttbp = newdma;
@@ -154,11 +162,24 @@ static void vpeirq(unsigned long data)
154 return; 162 return;
155 163
156 if (newdma > olddma) { /* no wraparound, dump olddma..newdma */ 164 if (newdma > olddma) { /* no wraparound, dump olddma..newdma */
157 dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, (newdma - olddma) / 188); 165 count = newdma - olddma;
166 dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188);
158 } else { /* wraparound, dump olddma..buflen and 0..newdma */ 167 } else { /* wraparound, dump olddma..buflen and 0..newdma */
159 dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, (TS_BUFLEN - olddma) / 188); 168 count = budget->buffer_size - olddma;
169 dvb_dmx_swfilter_packets(&budget->demux, mem + olddma, count / 188);
170 count += newdma;
160 dvb_dmx_swfilter_packets(&budget->demux, mem, newdma / 188); 171 dvb_dmx_swfilter_packets(&budget->demux, mem, newdma / 188);
161 } 172 }
173
174 if (count > budget->buffer_warning_threshold)
175 budget->buffer_warnings++;
176
177 if (budget->buffer_warnings && time_after(jiffies, budget->buffer_warning_time)) {
178 printk("%s %s: used %d times >80%% of buffer (%u bytes now)\n",
179 budget->dev->name, __FUNCTION__, budget->buffer_warnings, count);
180 budget->buffer_warning_time = jiffies + BUFFER_WARNING_WAIT;
181 budget->buffer_warnings = 0;
182 }
162} 183}
163 184
164 185
@@ -341,9 +362,10 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
341 struct saa7146_pci_extension_data *info, 362 struct saa7146_pci_extension_data *info,
342 struct module *owner) 363 struct module *owner)
343{ 364{
344 int length = TS_WIDTH * TS_HEIGHT;
345 int ret = 0; 365 int ret = 0;
346 struct budget_info *bi = info->ext_priv; 366 struct budget_info *bi = info->ext_priv;
367 int max_bufsize;
368 int height_mask;
347 369
348 memset(budget, 0, sizeof(struct budget)); 370 memset(budget, 0, sizeof(struct budget));
349 371
@@ -352,6 +374,32 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
352 budget->card = bi; 374 budget->card = bi;
353 budget->dev = (struct saa7146_dev *) dev; 375 budget->dev = (struct saa7146_dev *) dev;
354 376
377 if (budget->card->type == BUDGET_FS_ACTIVY) {
378 budget->buffer_width = TS_WIDTH_ACTIVY;
379 max_bufsize = TS_MAX_BUFSIZE_K_ACTIVY;
380 height_mask = TS_HEIGHT_MASK_ACTIVY;
381 } else {
382 budget->buffer_width = TS_WIDTH;
383 max_bufsize = TS_MAX_BUFSIZE_K;
384 height_mask = TS_HEIGHT_MASK;
385 }
386
387 if (dma_buffer_size < TS_MIN_BUFSIZE_K)
388 dma_buffer_size = TS_MIN_BUFSIZE_K;
389 else if (dma_buffer_size > max_bufsize)
390 dma_buffer_size = max_bufsize;
391
392 budget->buffer_height = dma_buffer_size * 1024 / budget->buffer_width;
393 budget->buffer_height &= height_mask;
394 budget->buffer_size = budget->buffer_height * budget->buffer_width;
395 budget->buffer_warning_threshold = budget->buffer_size * 80/100;
396 budget->buffer_warnings = 0;
397 budget->buffer_warning_time = jiffies;
398
399 dprintk(2, "%s: width = %d, height = %d\n",
400 budget->dev->name, budget->buffer_width, budget->buffer_height);
401 printk("%s: dma buffer size %u\n", budget->dev->name, budget->buffer_size);
402
355 dvb_register_adapter(&budget->dvb_adapter, budget->card->name, owner); 403 dvb_register_adapter(&budget->dvb_adapter, budget->card->name, owner);
356 404
357 /* set dd1 stream a & b */ 405 /* set dd1 stream a & b */
@@ -392,7 +440,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
392 ttpci_eeprom_parse_mac(&budget->i2c_adap, budget->dvb_adapter.proposed_mac); 440 ttpci_eeprom_parse_mac(&budget->i2c_adap, budget->dvb_adapter.proposed_mac);
393 441
394 if (NULL == 442 if (NULL ==
395 (budget->grabbing = saa7146_vmalloc_build_pgtable(dev->pci, length, &budget->pt))) { 443 (budget->grabbing = saa7146_vmalloc_build_pgtable(dev->pci, budget->buffer_size, &budget->pt))) {
396 ret = -ENOMEM; 444 ret = -ENOMEM;
397 goto err; 445 goto err;
398 } 446 }
diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c
index 9fc9185a8426..1b3aaac5e763 100644
--- a/drivers/media/dvb/ttpci/budget-patch.c
+++ b/drivers/media/dvb/ttpci/budget-patch.c
@@ -577,6 +577,17 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte
577 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); 577 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
578 // Set RPS1 Address register to point to RPS code (r108 p42) 578 // Set RPS1 Address register to point to RPS code (r108 p42)
579 saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle); 579 saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
580
581 if (!(budget = kmalloc (sizeof(struct budget_patch), GFP_KERNEL)))
582 return -ENOMEM;
583
584 dprintk(2, "budget: %p\n", budget);
585
586 if ((err = ttpci_budget_init (budget, dev, info, THIS_MODULE))) {
587 kfree (budget);
588 return err;
589 }
590
580 // Set Source Line Counter Threshold, using BRS (rCC p43) 591 // Set Source Line Counter Threshold, using BRS (rCC p43)
581 // It generates HS event every TS_HEIGHT lines 592 // It generates HS event every TS_HEIGHT lines
582 // this is related to TS_WIDTH set in register 593 // this is related to TS_WIDTH set in register
@@ -585,24 +596,13 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte
585 //,then RPS_THRESH1 596 //,then RPS_THRESH1
586 // should be set to trigger every TS_HEIGHT (512) lines. 597 // should be set to trigger every TS_HEIGHT (512) lines.
587 // 598 //
588 saa7146_write(dev, RPS_THRESH1, (TS_HEIGHT*1) | MASK_12 ); 599 saa7146_write(dev, RPS_THRESH1, budget->buffer_height | MASK_12 );
589 600
590 // saa7146_write(dev, RPS_THRESH0, ((TS_HEIGHT/2)<<16) |MASK_28| (TS_HEIGHT/2) |MASK_12 ); 601 // saa7146_write(dev, RPS_THRESH0, ((TS_HEIGHT/2)<<16) |MASK_28| (TS_HEIGHT/2) |MASK_12 );
591 // Enable RPS1 (rFC p33) 602 // Enable RPS1 (rFC p33)
592 saa7146_write(dev, MC1, (MASK_13 | MASK_29)); 603 saa7146_write(dev, MC1, (MASK_13 | MASK_29));
593 604
594 605
595 if (!(budget = kmalloc (sizeof(struct budget_patch), GFP_KERNEL)))
596 return -ENOMEM;
597
598 dprintk(2, "budget: %p\n", budget);
599
600 if ((err = ttpci_budget_init (budget, dev, info, THIS_MODULE))) {
601 kfree (budget);
602 return err;
603 }
604
605
606 dev->ext_priv = budget; 606 dev->ext_priv = budget;
607 607
608 budget->dvb_adapter.priv = budget; 608 budget->dvb_adapter.priv = budget;
diff --git a/drivers/media/dvb/ttpci/budget.h b/drivers/media/dvb/ttpci/budget.h
index 4ac0f4d08025..ecea3a13030e 100644
--- a/drivers/media/dvb/ttpci/budget.h
+++ b/drivers/media/dvb/ttpci/budget.h
@@ -58,7 +58,13 @@ struct budget {
58 int ci_present; 58 int ci_present;
59 int video_port; 59 int video_port;
60 60
61 u8 tsf; 61 u32 buffer_width;
62 u32 buffer_height;
63 u32 buffer_size;
64 u32 buffer_warning_threshold;
65 u32 buffer_warnings;
66 unsigned long buffer_warning_time;
67
62 u32 ttbp; 68 u32 ttbp;
63 int feeding; 69 int feeding;
64 70
@@ -79,11 +85,6 @@ static struct saa7146_pci_extension_data x_var = { \
79 .ext_priv = &x_var ## _info, \ 85 .ext_priv = &x_var ## _info, \
80 .ext = &budget_extension }; 86 .ext = &budget_extension };
81 87
82#define TS_WIDTH (376)
83#define TS_HEIGHT (512)
84#define TS_BUFLEN (TS_WIDTH*TS_HEIGHT)
85#define TS_MAX_PACKETS (TS_BUFLEN/TS_SIZE)
86
87#define BUDGET_TT 0 88#define BUDGET_TT 0
88#define BUDGET_TT_HW_DISEQC 1 89#define BUDGET_TT_HW_DISEQC 1
89#define BUDGET_PATCH 3 90#define BUDGET_PATCH 3
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f31a19890b15..85888a8a93c9 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -300,7 +300,7 @@ config VIDEO_OVCAMCHIP
300 camera drivers. 300 camera drivers.
301 301
302 To compile this driver as a module, choose M here: the 302 To compile this driver as a module, choose M here: the
303 module will be called ovcamchip 303 module will be called ovcamchip.
304 304
305config VIDEO_M32R_AR 305config VIDEO_M32R_AR
306 tristate "AR devices" 306 tristate "AR devices"
@@ -316,6 +316,13 @@ config VIDEO_M32R_AR_M64278
316 Say Y here to use the Renesas M64278E-800 camera module, 316 Say Y here to use the Renesas M64278E-800 camera module,
317 which supports VGA(640x480 pixcels) size of images. 317 which supports VGA(640x480 pixcels) size of images.
318 318
319#
320# Encoder / Decoder module configuration
321#
322
323menu "Encoders and Decoders"
324 depends on VIDEO_DEV
325
319config VIDEO_MSP3400 326config VIDEO_MSP3400
320 tristate "Micronas MSP34xx audio decoders" 327 tristate "Micronas MSP34xx audio decoders"
321 depends on VIDEO_DEV && I2C 328 depends on VIDEO_DEV && I2C
@@ -323,7 +330,7 @@ config VIDEO_MSP3400
323 Support for the Micronas MSP34xx series of audio decoders. 330 Support for the Micronas MSP34xx series of audio decoders.
324 331
325 To compile this driver as a module, choose M here: the 332 To compile this driver as a module, choose M here: the
326 module will be called msp3400 333 module will be called msp3400.
327 334
328config VIDEO_CS53L32A 335config VIDEO_CS53L32A
329 tristate "Cirrus Logic CS53L32A audio ADC" 336 tristate "Cirrus Logic CS53L32A audio ADC"
@@ -333,17 +340,27 @@ config VIDEO_CS53L32A
333 stereo A/D converter. 340 stereo A/D converter.
334 341
335 To compile this driver as a module, choose M here: the 342 To compile this driver as a module, choose M here: the
336 module will be called cs53l32a 343 module will be called cs53l32a.
337 344
338config VIDEO_WM8775 345config VIDEO_WM8775
339 tristate "Wolfson Microelectronics WM8775 audio ADC" 346 tristate "Wolfson Microelectronics WM8775 audio ADC with input mixer"
340 depends on VIDEO_DEV && I2C && EXPERIMENTAL 347 depends on VIDEO_DEV && I2C && EXPERIMENTAL
341 ---help--- 348 ---help---
342 Support for the Wolfson Microelectronics WM8775 349 Support for the Wolfson Microelectronics WM8775 high
343 high performance stereo A/D Converter. 350 performance stereo A/D Converter with a 4 channel input mixer.
344 351
345 To compile this driver as a module, choose M here: the 352 To compile this driver as a module, choose M here: the
346 module will be called wm8775 353 module will be called wm8775.
354
355config VIDEO_WM8739
356 tristate "Wolfson Microelectronics WM8739 stereo audio ADC"
357 depends on VIDEO_DEV && I2C && EXPERIMENTAL
358 ---help---
359 Support for the Wolfson Microelectronics WM8739
360 stereo A/D Converter.
361
362 To compile this driver as a module, choose M here: the
363 module will be called wm8739.
347 364
348source "drivers/media/video/cx25840/Kconfig" 365source "drivers/media/video/cx25840/Kconfig"
349 366
@@ -354,7 +371,7 @@ config VIDEO_SAA711X
354 Support for the Philips SAA7113/4/5 video decoders. 371 Support for the Philips SAA7113/4/5 video decoders.
355 372
356 To compile this driver as a module, choose M here: the 373 To compile this driver as a module, choose M here: the
357 module will be called saa7115 374 module will be called saa7115.
358 375
359config VIDEO_SAA7127 376config VIDEO_SAA7127
360 tristate "Philips SAA7127/9 digital video encoders" 377 tristate "Philips SAA7127/9 digital video encoders"
@@ -363,7 +380,32 @@ config VIDEO_SAA7127
363 Support for the Philips SAA7127/9 digital video encoders. 380 Support for the Philips SAA7127/9 digital video encoders.
364 381
365 To compile this driver as a module, choose M here: the 382 To compile this driver as a module, choose M here: the
366 module will be called saa7127 383 module will be called saa7127.
384
385config VIDEO_UPD64031A
386 tristate "NEC Electronics uPD64031A Ghost Reduction"
387 depends on VIDEO_DEV && I2C && EXPERIMENTAL
388 ---help---
389 Support for the NEC Electronics uPD64031A Ghost Reduction
390 video chip. It is most often found in NTSC TV cards made for
391 Japan and is used to reduce the 'ghosting' effect that can
392 be present in analog TV broadcasts.
393
394 To compile this driver as a module, choose M here: the
395 module will be called upd64031a.
396
397config VIDEO_UPD64083
398 tristate "NEC Electronics uPD64083 3-Dimensional Y/C separation"
399 depends on VIDEO_DEV && I2C && EXPERIMENTAL
400 ---help---
401 Support for the NEC Electronics uPD64083 3-Dimensional Y/C
402 separation video chip. It is used to improve the quality of
403 the colors of a composite signal.
404
405 To compile this driver as a module, choose M here: the
406 module will be called upd64083.
407
408endmenu # encoder / decoder chips
367 409
368# 410#
369# USB Multimedia device configuration 411# USB Multimedia device configuration
@@ -374,20 +416,6 @@ menu "V4L USB devices"
374 416
375source "drivers/media/video/em28xx/Kconfig" 417source "drivers/media/video/em28xx/Kconfig"
376 418
377config USB_VICAM
378 tristate "USB 3com HomeConnect (aka vicam) support (EXPERIMENTAL)"
379 depends on USB && VIDEO_DEV && EXPERIMENTAL
380 ---help---
381 Say Y here if you have 3com homeconnect camera (vicam).
382
383 This driver uses the Video For Linux API. You must say Y or M to
384 "Video For Linux" (under Multimedia Devices) to use this driver.
385 Information on this API and pointers to "v4l" programs may be found
386 at <file:Documentation/video4linux/API.html>.
387
388 To compile this driver as a module, choose M here: the
389 module will be called vicam.
390
391config USB_DSBR 419config USB_DSBR
392 tristate "D-Link USB FM radio support (EXPERIMENTAL)" 420 tristate "D-Link USB FM radio support (EXPERIMENTAL)"
393 depends on USB && VIDEO_DEV && EXPERIMENTAL 421 depends on USB && VIDEO_DEV && EXPERIMENTAL
@@ -397,79 +425,20 @@ config USB_DSBR
397 you must connect the line out connector to a sound card or a 425 you must connect the line out connector to a sound card or a
398 set of speakers. 426 set of speakers.
399 427
400 This driver uses the Video For Linux API. You must enable
401 (Y or M in config) Video For Linux (under Character Devices)
402 to use this driver. Information on this API and pointers to
403 "v4l" programs may be found at
404 <file:Documentation/video4linux/API.html>.
405
406 To compile this driver as a module, choose M here: the 428 To compile this driver as a module, choose M here: the
407 module will be called dsbr100. 429 module will be called dsbr100.
408 430
409config USB_ET61X251 431source "drivers/media/video/usbvideo/Kconfig"
410 tristate "USB ET61X[12]51 PC Camera Controller support"
411 depends on USB && VIDEO_DEV
412 ---help---
413 Say Y here if you want support for cameras based on Etoms ET61X151
414 or ET61X251 PC Camera Controllers.
415
416 See <file:Documentation/usb/et61x251.txt> for more informations.
417
418 This driver uses the Video For Linux API. You must say Y or M to
419 "Video For Linux" to use this driver.
420
421 To compile this driver as a module, choose M here: the
422 module will be called et61x251.
423 432
424config USB_IBMCAM 433source "drivers/media/video/et61x251/Kconfig"
425 tristate "USB IBM (Xirlink) C-it Camera support"
426 depends on USB && VIDEO_DEV
427 ---help---
428 Say Y here if you want to connect a IBM "C-It" camera, also known as
429 "Xirlink PC Camera" to your computer's USB port. For more
430 information, read <file:Documentation/usb/ibmcam.txt>.
431
432 This driver uses the Video For Linux API. You must enable
433 (Y or M in config) Video For Linux (under Character Devices)
434 to use this driver. Information on this API and pointers to
435 "v4l" programs may be found at
436 <file:Documentation/video4linux/API.html>.
437
438 To compile this driver as a module, choose M here: the
439 module will be called ibmcam.
440
441 This camera has several configuration options which
442 can be specified when you load the module. Read
443 <file:Documentation/usb/ibmcam.txt> to learn more.
444
445config USB_KONICAWC
446 tristate "USB Konica Webcam support"
447 depends on USB && VIDEO_DEV
448 ---help---
449 Say Y here if you want support for webcams based on a Konica
450 chipset. This is known to work with the Intel YC76 webcam.
451
452 This driver uses the Video For Linux API. You must enable
453 (Y or M in config) Video For Linux (under Character Devices)
454 to use this driver. Information on this API and pointers to
455 "v4l" programs may be found at
456 <file:Documentation/video4linux/API.html>.
457
458 To compile this driver as a module, choose M here: the
459 module will be called konicawc.
460 434
461config USB_OV511 435config USB_OV511
462 tristate "USB OV511 Camera support" 436 tristate "USB OV511 Camera support"
463 depends on USB && VIDEO_DEV 437 depends on USB && VIDEO_DEV
464 ---help--- 438 ---help---
465 Say Y here if you want to connect this type of camera to your 439 Say Y here if you want to connect this type of camera to your
466 computer's USB port. See <file:Documentation/usb/ov511.txt> for more 440 computer's USB port. See <file:Documentation/video4linux/ov511.txt>
467 information and for a list of supported cameras. 441 for more information and for a list of supported cameras.
468
469 This driver uses the Video For Linux API. You must say Y or M to
470 "Video For Linux" (under Character Devices) to use this driver.
471 Information on this API and pointers to "v4l" programs may be found
472 at <file:Documentation/video4linux/API.html>.
473 442
474 To compile this driver as a module, choose M here: the 443 To compile this driver as a module, choose M here: the
475 module will be called ov511. 444 module will be called ov511.
@@ -479,31 +448,13 @@ config USB_SE401
479 depends on USB && VIDEO_DEV 448 depends on USB && VIDEO_DEV
480 ---help--- 449 ---help---
481 Say Y here if you want to connect this type of camera to your 450 Say Y here if you want to connect this type of camera to your
482 computer's USB port. See <file:Documentation/usb/se401.txt> for more 451 computer's USB port. See <file:Documentation/video4linux/se401.txt>
483 information and for a list of supported cameras. 452 for more information and for a list of supported cameras.
484
485 This driver uses the Video For Linux API. You must say Y or M to
486 "Video For Linux" (under Multimedia Devices) to use this driver.
487 Information on this API and pointers to "v4l" programs may be found
488 at <file:Documentation/video4linux/API.html>.
489 453
490 To compile this driver as a module, choose M here: the 454 To compile this driver as a module, choose M here: the
491 module will be called se401. 455 module will be called se401.
492 456
493config USB_SN9C102 457source "drivers/media/video/sn9c102/Kconfig"
494 tristate "USB SN9C10x PC Camera Controller support"
495 depends on USB && VIDEO_DEV
496 ---help---
497 Say Y here if you want support for cameras based on SONiX SN9C101,
498 SN9C102 or SN9C103 PC Camera Controllers.
499
500 See <file:Documentation/usb/sn9c102.txt> for more informations.
501
502 This driver uses the Video For Linux API. You must say Y or M to
503 "Video For Linux" to use this driver.
504
505 To compile this driver as a module, choose M here: the
506 module will be called sn9c102.
507 458
508config USB_STV680 459config USB_STV680
509 tristate "USB STV680 (Pencam) Camera support" 460 tristate "USB STV680 (Pencam) Camera support"
@@ -511,20 +462,16 @@ config USB_STV680
511 ---help--- 462 ---help---
512 Say Y here if you want to connect this type of camera to your 463 Say Y here if you want to connect this type of camera to your
513 computer's USB port. This includes the Pencam line of cameras. 464 computer's USB port. This includes the Pencam line of cameras.
514 See <file:Documentation/usb/stv680.txt> for more information and for 465 See <file:Documentation/video4linux/stv680.txt> for more information
515 a list of supported cameras. 466 and for a list of supported cameras.
516
517 This driver uses the Video For Linux API. You must say Y or M to
518 "Video For Linux" (under Multimedia Devices) to use this driver.
519 Information on this API and pointers to "v4l" programs may be found
520 at <file:Documentation/video4linux/API.html>.
521 467
522 To compile this driver as a module, choose M here: the 468 To compile this driver as a module, choose M here: the
523 module will be called stv680. 469 module will be called stv680.
524 470
525config USB_W9968CF 471config USB_W9968CF
526 tristate "USB W996[87]CF JPEG Dual Mode Camera support" 472 tristate "USB W996[87]CF JPEG Dual Mode Camera support"
527 depends on USB && VIDEO_DEV && I2C && VIDEO_OVCAMCHIP 473 depends on USB && VIDEO_DEV && I2C
474 select VIDEO_OVCAMCHIP
528 ---help--- 475 ---help---
529 Say Y here if you want support for cameras based on OV681 or 476 Say Y here if you want support for cameras based on OV681 or
530 Winbond W9967CF/W9968CF JPEG USB Dual Mode Camera Chips. 477 Winbond W9967CF/W9968CF JPEG USB Dual Mode Camera Chips.
@@ -534,64 +481,14 @@ config USB_W9968CF
534 resolutions and framerates, but cannot be included in the official 481 resolutions and framerates, but cannot be included in the official
535 Linux kernel for performance purposes. 482 Linux kernel for performance purposes.
536 483
537 See <file:Documentation/usb/w9968cf.txt> for more informations. 484 See <file:Documentation/video4linux/w9968cf.txt> for more info.
538
539 This driver uses the Video For Linux and the I2C APIs. It needs the
540 OmniVision Camera Chip support as well. You must say Y or M to
541 "Video For Linux", "I2C Support" and "OmniVision Camera Chip
542 support" to use this driver.
543 485
544 To compile this driver as a module, choose M here: the 486 To compile this driver as a module, choose M here: the
545 module will be called w9968cf. 487 module will be called w9968cf.
546 488
547config USB_ZC0301 489source "drivers/media/video/zc0301/Kconfig"
548 tristate "USB ZC0301 Image Processor and Control Chip support"
549 depends on USB && VIDEO_DEV
550 ---help---
551 Say Y here if you want support for cameras based on the ZC0301
552 Image Processor and Control Chip.
553
554 See <file:Documentation/usb/zc0301.txt> for more informations.
555
556 This driver uses the Video For Linux API. You must say Y or M to
557 "Video For Linux" to use this driver.
558
559 To compile this driver as a module, choose M here: the
560 module will be called zc0301.
561
562config USB_PWC
563 tristate "USB Philips Cameras"
564 depends on USB && VIDEO_DEV
565 ---help---
566 Say Y or M here if you want to use one of these Philips & OEM
567 webcams:
568 * Philips PCA645, PCA646
569 * Philips PCVC675, PCVC680, PCVC690
570 * Philips PCVC720/40, PCVC730, PCVC740, PCVC750
571 * Askey VC010
572 * Logitech QuickCam Pro 3000, 4000, 'Zoom', 'Notebook Pro'
573 and 'Orbit'/'Sphere'
574 * Samsung MPC-C10, MPC-C30
575 * Creative Webcam 5, Pro Ex
576 * SOTEC Afina Eye
577 * Visionite VCS-UC300, VCS-UM100
578
579 The PCA635, PCVC665 and PCVC720/20 are not supported by this driver
580 and never will be, but the 665 and 720/20 are supported by other
581 drivers.
582
583 See <file:Documentation/usb/philips.txt> for more information and
584 installation instructions.
585
586 The built-in microphone is enabled by selecting USB Audio support.
587
588 This driver uses the Video For Linux API. You must say Y or M to
589 "Video For Linux" (under Character Devices) to use this driver.
590 Information on this API and pointers to "v4l" programs may be found
591 at <file:Documentation/video4linux/API.html>.
592 490
593 To compile this driver as a module, choose M here: the 491source "drivers/media/video/pwc/Kconfig"
594 module will be called pwc.
595 492
596endmenu # V4L USB devices 493endmenu # V4L USB devices
597 494
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 1c0e72e5a593..4092a5e37ffc 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o
45obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o 45obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
46obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o 46obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
47obj-$(CONFIG_VIDEO_WM8775) += wm8775.o 47obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
48obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
48obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/ 49obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/
49obj-$(CONFIG_VIDEO_CPIA2) += cpia2/ 50obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
50obj-$(CONFIG_VIDEO_MXB) += saa7111.o tda9840.o tea6415c.o tea6420.o mxb.o 51obj-$(CONFIG_VIDEO_MXB) += saa7111.o tda9840.o tea6415c.o tea6420.o mxb.o
@@ -64,9 +65,8 @@ obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
64obj-$(CONFIG_VIDEO_CX25840) += cx25840/ 65obj-$(CONFIG_VIDEO_CX25840) += cx25840/
65obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o 66obj-$(CONFIG_VIDEO_SAA711X) += saa7115.o
66obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o 67obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
67 68obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
68et61x251-objs := et61x251_core.o et61x251_tas5130d1b.o 69obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
69zc0301-objs := zc0301_core.o zc0301_pas202bcb.o
70 70
71obj-$(CONFIG_USB_DABUSB) += dabusb.o 71obj-$(CONFIG_USB_DABUSB) += dabusb.o
72obj-$(CONFIG_USB_DSBR) += dsbr100.o 72obj-$(CONFIG_USB_DSBR) += dsbr100.o
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 74def9c23952..423e954948be 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -973,12 +973,12 @@ audio_mux(struct bttv *btv, int input, int mute)
973 For now this is sufficient. */ 973 For now this is sufficient. */
974 switch (input) { 974 switch (input) {
975 case TVAUDIO_INPUT_RADIO: 975 case TVAUDIO_INPUT_RADIO:
976 route.input = MSP_INPUT(MSP_IN_SCART_2, MSP_IN_TUNER_1, 976 route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
977 MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART); 977 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
978 break; 978 break;
979 case TVAUDIO_INPUT_EXTERN: 979 case TVAUDIO_INPUT_EXTERN:
980 route.input = MSP_INPUT(MSP_IN_SCART_1, MSP_IN_TUNER_1, 980 route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1,
981 MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART); 981 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
982 break; 982 break;
983 case TVAUDIO_INPUT_INTERN: 983 case TVAUDIO_INPUT_INTERN:
984 /* Yes, this is the same input as for RADIO. I doubt 984 /* Yes, this is the same input as for RADIO. I doubt
@@ -986,8 +986,8 @@ audio_mux(struct bttv *btv, int input, int mute)
986 input is the BTTV_BOARD_AVERMEDIA98. I wonder how 986 input is the BTTV_BOARD_AVERMEDIA98. I wonder how
987 that was tested. My guess is that the whole INTERN 987 that was tested. My guess is that the whole INTERN
988 input does not work. */ 988 input does not work. */
989 route.input = MSP_INPUT(MSP_IN_SCART_2, MSP_IN_TUNER_1, 989 route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
990 MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART); 990 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
991 break; 991 break;
992 case TVAUDIO_INPUT_TUNER: 992 case TVAUDIO_INPUT_TUNER:
993 default: 993 default:
@@ -1023,14 +1023,11 @@ audio_input(struct bttv *btv, int input)
1023static void 1023static void
1024i2c_vidiocschan(struct bttv *btv) 1024i2c_vidiocschan(struct bttv *btv)
1025{ 1025{
1026 struct video_channel c; 1026 v4l2_std_id std = bttv_tvnorms[btv->tvnorm].v4l2_id;
1027 1027
1028 memset(&c,0,sizeof(c)); 1028 bttv_call_i2c_clients(btv, VIDIOC_S_STD, &std);
1029 c.norm = btv->tvnorm;
1030 c.channel = btv->input;
1031 bttv_call_i2c_clients(btv,VIDIOCSCHAN,&c);
1032 if (btv->c.type == BTTV_BOARD_VOODOOTV_FM) 1029 if (btv->c.type == BTTV_BOARD_VOODOOTV_FM)
1033 bttv_tda9880_setnorm(btv,c.norm); 1030 bttv_tda9880_setnorm(btv,btv->tvnorm);
1034} 1031}
1035 1032
1036static int 1033static int
@@ -1184,11 +1181,27 @@ static int get_control(struct bttv *btv, struct v4l2_control *c)
1184 break; 1181 break;
1185 if (i == BTTV_CTLS) 1182 if (i == BTTV_CTLS)
1186 return -EINVAL; 1183 return -EINVAL;
1187 if (i >= 4 && i <= 8) { 1184 if (btv->audio_hook && i >= 4 && i <= 8) {
1188 memset(&va,0,sizeof(va)); 1185 memset(&va,0,sizeof(va));
1189 bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va); 1186 btv->audio_hook(btv,&va,0);
1190 if (btv->audio_hook) 1187 switch (c->id) {
1191 btv->audio_hook(btv,&va,0); 1188 case V4L2_CID_AUDIO_MUTE:
1189 c->value = (VIDEO_AUDIO_MUTE & va.flags) ? 1 : 0;
1190 break;
1191 case V4L2_CID_AUDIO_VOLUME:
1192 c->value = va.volume;
1193 break;
1194 case V4L2_CID_AUDIO_BALANCE:
1195 c->value = va.balance;
1196 break;
1197 case V4L2_CID_AUDIO_BASS:
1198 c->value = va.bass;
1199 break;
1200 case V4L2_CID_AUDIO_TREBLE:
1201 c->value = va.treble;
1202 break;
1203 }
1204 return 0;
1192 } 1205 }
1193 switch (c->id) { 1206 switch (c->id) {
1194 case V4L2_CID_BRIGHTNESS: 1207 case V4L2_CID_BRIGHTNESS:
@@ -1205,19 +1218,11 @@ static int get_control(struct bttv *btv, struct v4l2_control *c)
1205 break; 1218 break;
1206 1219
1207 case V4L2_CID_AUDIO_MUTE: 1220 case V4L2_CID_AUDIO_MUTE:
1208 c->value = (VIDEO_AUDIO_MUTE & va.flags) ? 1 : 0;
1209 break;
1210 case V4L2_CID_AUDIO_VOLUME: 1221 case V4L2_CID_AUDIO_VOLUME:
1211 c->value = va.volume;
1212 break;
1213 case V4L2_CID_AUDIO_BALANCE: 1222 case V4L2_CID_AUDIO_BALANCE:
1214 c->value = va.balance;
1215 break;
1216 case V4L2_CID_AUDIO_BASS: 1223 case V4L2_CID_AUDIO_BASS:
1217 c->value = va.bass;
1218 break;
1219 case V4L2_CID_AUDIO_TREBLE: 1224 case V4L2_CID_AUDIO_TREBLE:
1220 c->value = va.treble; 1225 bttv_call_i2c_clients(btv,VIDIOC_G_CTRL,c);
1221 break; 1226 break;
1222 1227
1223 case V4L2_CID_PRIVATE_CHROMA_AGC: 1228 case V4L2_CID_PRIVATE_CHROMA_AGC:
@@ -1269,11 +1274,35 @@ static int set_control(struct bttv *btv, struct v4l2_control *c)
1269 break; 1274 break;
1270 if (i == BTTV_CTLS) 1275 if (i == BTTV_CTLS)
1271 return -EINVAL; 1276 return -EINVAL;
1272 if (i >= 4 && i <= 8) { 1277 if (btv->audio_hook && i >= 4 && i <= 8) {
1273 memset(&va,0,sizeof(va)); 1278 memset(&va,0,sizeof(va));
1274 bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va); 1279 btv->audio_hook(btv,&va,0);
1275 if (btv->audio_hook) 1280 switch (c->id) {
1276 btv->audio_hook(btv,&va,0); 1281 case V4L2_CID_AUDIO_MUTE:
1282 if (c->value) {
1283 va.flags |= VIDEO_AUDIO_MUTE;
1284 audio_mute(btv, 1);
1285 } else {
1286 va.flags &= ~VIDEO_AUDIO_MUTE;
1287 audio_mute(btv, 0);
1288 }
1289 break;
1290
1291 case V4L2_CID_AUDIO_VOLUME:
1292 va.volume = c->value;
1293 break;
1294 case V4L2_CID_AUDIO_BALANCE:
1295 va.balance = c->value;
1296 break;
1297 case V4L2_CID_AUDIO_BASS:
1298 va.bass = c->value;
1299 break;
1300 case V4L2_CID_AUDIO_TREBLE:
1301 va.treble = c->value;
1302 break;
1303 }
1304 btv->audio_hook(btv,&va,1);
1305 return 0;
1277 } 1306 }
1278 switch (c->id) { 1307 switch (c->id) {
1279 case V4L2_CID_BRIGHTNESS: 1308 case V4L2_CID_BRIGHTNESS:
@@ -1289,26 +1318,13 @@ static int set_control(struct bttv *btv, struct v4l2_control *c)
1289 bt848_sat(btv,c->value); 1318 bt848_sat(btv,c->value);
1290 break; 1319 break;
1291 case V4L2_CID_AUDIO_MUTE: 1320 case V4L2_CID_AUDIO_MUTE:
1292 if (c->value) { 1321 audio_mute(btv, c->value);
1293 va.flags |= VIDEO_AUDIO_MUTE; 1322 /* fall through */
1294 audio_mute(btv, 1);
1295 } else {
1296 va.flags &= ~VIDEO_AUDIO_MUTE;
1297 audio_mute(btv, 0);
1298 }
1299 break;
1300
1301 case V4L2_CID_AUDIO_VOLUME: 1323 case V4L2_CID_AUDIO_VOLUME:
1302 va.volume = c->value;
1303 break;
1304 case V4L2_CID_AUDIO_BALANCE: 1324 case V4L2_CID_AUDIO_BALANCE:
1305 va.balance = c->value;
1306 break;
1307 case V4L2_CID_AUDIO_BASS: 1325 case V4L2_CID_AUDIO_BASS:
1308 va.bass = c->value;
1309 break;
1310 case V4L2_CID_AUDIO_TREBLE: 1326 case V4L2_CID_AUDIO_TREBLE:
1311 va.treble = c->value; 1327 bttv_call_i2c_clients(btv,VIDIOC_S_CTRL,c);
1312 break; 1328 break;
1313 1329
1314 case V4L2_CID_PRIVATE_CHROMA_AGC: 1330 case V4L2_CID_PRIVATE_CHROMA_AGC:
@@ -1364,11 +1380,6 @@ static int set_control(struct bttv *btv, struct v4l2_control *c)
1364 default: 1380 default:
1365 return -EINVAL; 1381 return -EINVAL;
1366 } 1382 }
1367 if (i >= 4 && i <= 8) {
1368 bttv_call_i2c_clients(btv, VIDIOCSAUDIO, &va);
1369 if (btv->audio_hook)
1370 btv->audio_hook(btv,&va,1);
1371 }
1372 return 0; 1383 return 0;
1373} 1384}
1374 1385
@@ -1591,12 +1602,16 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
1591 } 1602 }
1592 case VIDIOCSFREQ: 1603 case VIDIOCSFREQ:
1593 { 1604 {
1594 unsigned long *freq = arg; 1605 struct v4l2_frequency freq;
1606
1607 memset(&freq, 0, sizeof(freq));
1608 freq.frequency = *(unsigned long *)arg;
1595 mutex_lock(&btv->lock); 1609 mutex_lock(&btv->lock);
1596 btv->freq=*freq; 1610 freq.type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1597 bttv_call_i2c_clients(btv,VIDIOCSFREQ,freq); 1611 btv->freq = *(unsigned long *)arg;
1612 bttv_call_i2c_clients(btv,VIDIOC_S_FREQUENCY,&freq);
1598 if (btv->has_matchbox && btv->radio_user) 1613 if (btv->has_matchbox && btv->radio_user)
1599 tea5757_set_freq(btv,*freq); 1614 tea5757_set_freq(btv,*(unsigned long *)arg);
1600 mutex_unlock(&btv->lock); 1615 mutex_unlock(&btv->lock);
1601 return 0; 1616 return 0;
1602 } 1617 }
@@ -1827,33 +1842,26 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
1827 return -EINVAL; 1842 return -EINVAL;
1828 mutex_lock(&btv->lock); 1843 mutex_lock(&btv->lock);
1829 memset(t,0,sizeof(*t)); 1844 memset(t,0,sizeof(*t));
1845 t->rxsubchans = V4L2_TUNER_SUB_MONO;
1846 bttv_call_i2c_clients(btv, VIDIOC_G_TUNER, t);
1830 strcpy(t->name, "Television"); 1847 strcpy(t->name, "Television");
1831 t->type = V4L2_TUNER_ANALOG_TV;
1832 t->capability = V4L2_TUNER_CAP_NORM; 1848 t->capability = V4L2_TUNER_CAP_NORM;
1833 t->rxsubchans = V4L2_TUNER_SUB_MONO; 1849 t->type = V4L2_TUNER_ANALOG_TV;
1834 if (btread(BT848_DSTATUS)&BT848_DSTATUS_HLOC) 1850 if (btread(BT848_DSTATUS)&BT848_DSTATUS_HLOC)
1835 t->signal = 0xffff; 1851 t->signal = 0xffff;
1836 { 1852
1837 struct video_tuner tuner; 1853 if (btv->audio_hook) {
1838
1839 memset(&tuner, 0, sizeof (tuner));
1840 tuner.rangehigh = 0xffffffffUL;
1841 bttv_call_i2c_clients(btv, VIDIOCGTUNER, &tuner);
1842 t->rangelow = tuner.rangelow;
1843 t->rangehigh = tuner.rangehigh;
1844 }
1845 {
1846 /* Hmmm ... */ 1854 /* Hmmm ... */
1847 struct video_audio va; 1855 struct video_audio va;
1848 memset(&va, 0, sizeof(struct video_audio)); 1856 memset(&va, 0, sizeof(struct video_audio));
1849 bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va); 1857 btv->audio_hook(btv,&va,0);
1850 if (btv->audio_hook) 1858 t->audmode = V4L2_TUNER_MODE_MONO;
1851 btv->audio_hook(btv,&va,0); 1859 t->rxsubchans = V4L2_TUNER_SUB_MONO;
1852 if(va.mode & VIDEO_SOUND_STEREO) { 1860 if(va.mode & VIDEO_SOUND_STEREO) {
1853 t->audmode = V4L2_TUNER_MODE_STEREO; 1861 t->audmode = V4L2_TUNER_MODE_STEREO;
1854 t->rxsubchans |= V4L2_TUNER_SUB_STEREO; 1862 t->rxsubchans = V4L2_TUNER_SUB_STEREO;
1855 } 1863 }
1856 if(va.mode & VIDEO_SOUND_LANG1) { 1864 if(va.mode & VIDEO_SOUND_LANG2) {
1857 t->audmode = V4L2_TUNER_MODE_LANG1; 1865 t->audmode = V4L2_TUNER_MODE_LANG1;
1858 t->rxsubchans = V4L2_TUNER_SUB_LANG1 1866 t->rxsubchans = V4L2_TUNER_SUB_LANG1
1859 | V4L2_TUNER_SUB_LANG2; 1867 | V4L2_TUNER_SUB_LANG2;
@@ -1872,10 +1880,10 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
1872 if (0 != t->index) 1880 if (0 != t->index)
1873 return -EINVAL; 1881 return -EINVAL;
1874 mutex_lock(&btv->lock); 1882 mutex_lock(&btv->lock);
1875 { 1883 bttv_call_i2c_clients(btv, VIDIOC_S_TUNER, t);
1884 if (btv->audio_hook) {
1876 struct video_audio va; 1885 struct video_audio va;
1877 memset(&va, 0, sizeof(struct video_audio)); 1886 memset(&va, 0, sizeof(struct video_audio));
1878 bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va);
1879 if (t->audmode == V4L2_TUNER_MODE_MONO) 1887 if (t->audmode == V4L2_TUNER_MODE_MONO)
1880 va.mode = VIDEO_SOUND_MONO; 1888 va.mode = VIDEO_SOUND_MONO;
1881 else if (t->audmode == V4L2_TUNER_MODE_STEREO || 1889 else if (t->audmode == V4L2_TUNER_MODE_STEREO ||
@@ -1885,9 +1893,7 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
1885 va.mode = VIDEO_SOUND_LANG1; 1893 va.mode = VIDEO_SOUND_LANG1;
1886 else if (t->audmode == V4L2_TUNER_MODE_LANG2) 1894 else if (t->audmode == V4L2_TUNER_MODE_LANG2)
1887 va.mode = VIDEO_SOUND_LANG2; 1895 va.mode = VIDEO_SOUND_LANG2;
1888 bttv_call_i2c_clients(btv, VIDIOCSAUDIO, &va); 1896 btv->audio_hook(btv,&va,1);
1889 if (btv->audio_hook)
1890 btv->audio_hook(btv,&va,1);
1891 } 1897 }
1892 mutex_unlock(&btv->lock); 1898 mutex_unlock(&btv->lock);
1893 return 0; 1899 return 0;
@@ -1912,7 +1918,7 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
1912 return -EINVAL; 1918 return -EINVAL;
1913 mutex_lock(&btv->lock); 1919 mutex_lock(&btv->lock);
1914 btv->freq = f->frequency; 1920 btv->freq = f->frequency;
1915 bttv_call_i2c_clients(btv,VIDIOCSFREQ,&btv->freq); 1921 bttv_call_i2c_clients(btv,VIDIOC_S_FREQUENCY,f);
1916 if (btv->has_matchbox && btv->radio_user) 1922 if (btv->has_matchbox && btv->radio_user)
1917 tea5757_set_freq(btv,btv->freq); 1923 tea5757_set_freq(btv,btv->freq);
1918 mutex_unlock(&btv->lock); 1924 mutex_unlock(&btv->lock);
@@ -1920,7 +1926,9 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
1920 } 1926 }
1921 case VIDIOC_LOG_STATUS: 1927 case VIDIOC_LOG_STATUS:
1922 { 1928 {
1929 printk(KERN_INFO "bttv%d: ================= START STATUS CARD #%d =================\n", btv->c.nr, btv->c.nr);
1923 bttv_call_i2c_clients(btv, VIDIOC_LOG_STATUS, NULL); 1930 bttv_call_i2c_clients(btv, VIDIOC_LOG_STATUS, NULL);
1931 printk(KERN_INFO "bttv%d: ================== END STATUS CARD #%d ==================\n", btv->c.nr, btv->c.nr);
1924 return 0; 1932 return 0;
1925 } 1933 }
1926 1934
@@ -2870,12 +2878,10 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
2870 return 0; 2878 return 0;
2871 } 2879 }
2872 *c = bttv_ctls[i]; 2880 *c = bttv_ctls[i];
2873 if (i >= 4 && i <= 8) { 2881 if (btv->audio_hook && i >= 4 && i <= 8) {
2874 struct video_audio va; 2882 struct video_audio va;
2875 memset(&va,0,sizeof(va)); 2883 memset(&va,0,sizeof(va));
2876 bttv_call_i2c_clients(btv, VIDIOCGAUDIO, &va); 2884 btv->audio_hook(btv,&va,0);
2877 if (btv->audio_hook)
2878 btv->audio_hook(btv,&va,0);
2879 switch (bttv_ctls[i].id) { 2885 switch (bttv_ctls[i].id) {
2880 case V4L2_CID_AUDIO_VOLUME: 2886 case V4L2_CID_AUDIO_VOLUME:
2881 if (!(va.flags & VIDEO_AUDIO_VOLUME)) 2887 if (!(va.flags & VIDEO_AUDIO_VOLUME))
diff --git a/drivers/media/video/bt8xx/bttv-vbi.c b/drivers/media/video/bt8xx/bttv-vbi.c
index e20ff238e409..8c9f0f7cf467 100644
--- a/drivers/media/video/bt8xx/bttv-vbi.c
+++ b/drivers/media/video/bt8xx/bttv-vbi.c
@@ -184,7 +184,7 @@ void bttv_vbi_try_fmt(struct bttv_fh *fh, struct v4l2_format *f)
184 - tvnorm->vbistart[0]; 184 - tvnorm->vbistart[0];
185 count1 = (s64) f->fmt.vbi.start[1] + f->fmt.vbi.count[1] 185 count1 = (s64) f->fmt.vbi.start[1] + f->fmt.vbi.count[1]
186 - tvnorm->vbistart[1]; 186 - tvnorm->vbistart[1];
187 count = clamp (max (count0, count1), 1LL, (s64) VBI_MAXLINES); 187 count = clamp (max (count0, count1), (s64) 1, (s64) VBI_MAXLINES);
188 188
189 f->fmt.vbi.start[0] = tvnorm->vbistart[0]; 189 f->fmt.vbi.start[0] = tvnorm->vbistart[0];
190 f->fmt.vbi.start[1] = tvnorm->vbistart[1]; 190 f->fmt.vbi.start[1] = tvnorm->vbistart[1];
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 2227c5640c12..85d84e89d8f4 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -64,14 +64,13 @@ MODULE_LICENSE("GPL");
64MODULE_SUPPORTED_DEVICE("video"); 64MODULE_SUPPORTED_DEVICE("video");
65#endif 65#endif
66 66
67static unsigned short colorspace_conv = 0; 67static unsigned short colorspace_conv;
68module_param(colorspace_conv, ushort, 0444); 68module_param(colorspace_conv, ushort, 0444);
69MODULE_PARM_DESC(colorspace_conv, 69MODULE_PARM_DESC(colorspace_conv,
70 "\n<n> Colorspace conversion:" 70 " Colorspace conversion:"
71 "\n0 = disable" 71 "\n 0 = disable, 1 = enable"
72 "\n1 = enable" 72 "\n Default value is 0"
73 "\nDefault value is 0" 73 );
74 "\n");
75 74
76#define ABOUT "V4L-Driver for Vision CPiA based cameras" 75#define ABOUT "V4L-Driver for Vision CPiA based cameras"
77 76
@@ -4042,7 +4041,7 @@ static int __init cpia_init(void)
4042 "allowed, it is disabled by default now. Users should fix the " 4041 "allowed, it is disabled by default now. Users should fix the "
4043 "applications in case they don't work without conversion " 4042 "applications in case they don't work without conversion "
4044 "reenabled by setting the 'colorspace_conv' module " 4043 "reenabled by setting the 'colorspace_conv' module "
4045 "parameter to 1"); 4044 "parameter to 1\n");
4046 4045
4047#ifdef CONFIG_PROC_FS 4046#ifdef CONFIG_PROC_FS
4048 proc_cpia_create(); 4047 proc_cpia_create();
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 8394283993f6..1764991b0ac9 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -456,7 +456,7 @@ int cpia2_init_camera(struct camera_data *cam);
456int cpia2_allocate_buffers(struct camera_data *cam); 456int cpia2_allocate_buffers(struct camera_data *cam);
457void cpia2_free_buffers(struct camera_data *cam); 457void cpia2_free_buffers(struct camera_data *cam);
458long cpia2_read(struct camera_data *cam, 458long cpia2_read(struct camera_data *cam,
459 char *buf, unsigned long count, int noblock); 459 char __user *buf, unsigned long count, int noblock);
460unsigned int cpia2_poll(struct camera_data *cam, 460unsigned int cpia2_poll(struct camera_data *cam,
461 struct file *filp, poll_table *wait); 461 struct file *filp, poll_table *wait);
462int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma); 462int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index 3021f21aae36..0b00e6027dfb 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -873,7 +873,7 @@ static int __init cpia_pp_setup(char *str)
873 parport_nr[parport_ptr++] = PPCPIA_PARPORT_NONE; 873 parport_nr[parport_ptr++] = PPCPIA_PARPORT_NONE;
874 } 874 }
875 875
876 return 0; 876 return 1;
877} 877}
878 878
879__setup("cpia_pp=", cpia_pp_setup); 879__setup("cpia_pp=", cpia_pp_setup);
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index a4540e858f21..9a4b813152e5 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -19,8 +19,9 @@
19#include <linux/videodev2.h> 19#include <linux/videodev2.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <media/v4l2-common.h> 21#include <media/v4l2-common.h>
22#include <media/cx25840.h>
22 23
23#include "cx25840.h" 24#include "cx25840-core.h"
24 25
25static int set_audclk_freq(struct i2c_client *client, u32 freq) 26static int set_audclk_freq(struct i2c_client *client, u32 freq)
26{ 27{
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index a65b3cc4bf03..a961bb2ab0fd 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -32,8 +32,9 @@
32#include <linux/videodev2.h> 32#include <linux/videodev2.h>
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <media/v4l2-common.h> 34#include <media/v4l2-common.h>
35#include <media/cx25840.h>
35 36
36#include "cx25840.h" 37#include "cx25840-core.h"
37 38
38MODULE_DESCRIPTION("Conexant CX25840 audio/video decoder driver"); 39MODULE_DESCRIPTION("Conexant CX25840 audio/video decoder driver");
39MODULE_AUTHOR("Ulf Eklund, Chris Kennedy, Hans Verkuil, Tyler Trafford"); 40MODULE_AUTHOR("Ulf Eklund, Chris Kennedy, Hans Verkuil, Tyler Trafford");
@@ -668,6 +669,7 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
668{ 669{
669 struct cx25840_state *state = i2c_get_clientdata(client); 670 struct cx25840_state *state = i2c_get_clientdata(client);
670 struct v4l2_tuner *vt = arg; 671 struct v4l2_tuner *vt = arg;
672 struct v4l2_routing *route = arg;
671 673
672 switch (cmd) { 674 switch (cmd) {
673#ifdef CONFIG_VIDEO_ADV_DEBUG 675#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -749,19 +751,21 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
749 state->radio = 1; 751 state->radio = 1;
750 break; 752 break;
751 753
752 case VIDIOC_G_INPUT: 754 case VIDIOC_INT_G_VIDEO_ROUTING:
753 *(int *)arg = state->vid_input; 755 route->input = state->vid_input;
756 route->output = 0;
754 break; 757 break;
755 758
756 case VIDIOC_S_INPUT: 759 case VIDIOC_INT_S_VIDEO_ROUTING:
757 return set_input(client, *(enum cx25840_video_input *)arg, state->aud_input); 760 return set_input(client, route->input, state->aud_input);
758 761
759 case VIDIOC_S_AUDIO: 762 case VIDIOC_INT_G_AUDIO_ROUTING:
760 { 763 route->input = state->aud_input;
761 struct v4l2_audio *input = arg; 764 route->output = 0;
765 break;
762 766
763 return set_input(client, state->vid_input, input->index); 767 case VIDIOC_INT_S_AUDIO_ROUTING:
764 } 768 return set_input(client, state->vid_input, route->input);
765 769
766 case VIDIOC_S_FREQUENCY: 770 case VIDIOC_S_FREQUENCY:
767 input_change(client); 771 input_change(client);
diff --git a/drivers/media/video/cx25840/cx25840.h b/drivers/media/video/cx25840/cx25840-core.h
index dd70664d1dd9..1736929fc204 100644
--- a/drivers/media/video/cx25840/cx25840.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -1,4 +1,4 @@
1/* cx25840 API header 1/* cx25840 internal API header
2 * 2 *
3 * Copyright (C) 2003-2004 Chris Kennedy 3 * Copyright (C) 2003-2004 Chris Kennedy
4 * 4 *
@@ -17,8 +17,8 @@
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */ 18 */
19 19
20#ifndef _CX25840_H_ 20#ifndef _CX25840_CORE_H_
21#define _CX25840_H_ 21#define _CX25840_CORE_H_
22 22
23 23
24#include <linux/videodev2.h> 24#include <linux/videodev2.h>
@@ -32,46 +32,6 @@
32 providing this information. */ 32 providing this information. */
33#define CX25840_CID_ENABLE_PVR150_WORKAROUND (V4L2_CID_PRIVATE_BASE+0) 33#define CX25840_CID_ENABLE_PVR150_WORKAROUND (V4L2_CID_PRIVATE_BASE+0)
34 34
35enum cx25840_video_input {
36 /* Composite video inputs In1-In8 */
37 CX25840_COMPOSITE1 = 1,
38 CX25840_COMPOSITE2,
39 CX25840_COMPOSITE3,
40 CX25840_COMPOSITE4,
41 CX25840_COMPOSITE5,
42 CX25840_COMPOSITE6,
43 CX25840_COMPOSITE7,
44 CX25840_COMPOSITE8,
45
46 /* S-Video inputs consist of one luma input (In1-In4) ORed with one
47 chroma input (In5-In8) */
48 CX25840_SVIDEO_LUMA1 = 0x10,
49 CX25840_SVIDEO_LUMA2 = 0x20,
50 CX25840_SVIDEO_LUMA3 = 0x30,
51 CX25840_SVIDEO_LUMA4 = 0x40,
52 CX25840_SVIDEO_CHROMA4 = 0x400,
53 CX25840_SVIDEO_CHROMA5 = 0x500,
54 CX25840_SVIDEO_CHROMA6 = 0x600,
55 CX25840_SVIDEO_CHROMA7 = 0x700,
56 CX25840_SVIDEO_CHROMA8 = 0x800,
57
58 /* S-Video aliases for common luma/chroma combinations */
59 CX25840_SVIDEO1 = 0x510,
60 CX25840_SVIDEO2 = 0x620,
61 CX25840_SVIDEO3 = 0x730,
62 CX25840_SVIDEO4 = 0x840,
63};
64
65enum cx25840_audio_input {
66 /* Audio inputs: serial or In4-In8 */
67 CX25840_AUDIO_SERIAL,
68 CX25840_AUDIO4 = 4,
69 CX25840_AUDIO5,
70 CX25840_AUDIO6,
71 CX25840_AUDIO7,
72 CX25840_AUDIO8,
73};
74
75struct cx25840_state { 35struct cx25840_state {
76 int pvr150_workaround; 36 int pvr150_workaround;
77 int radio; 37 int radio;
diff --git a/drivers/media/video/cx25840/cx25840-firmware.c b/drivers/media/video/cx25840/cx25840-firmware.c
index e1a7823d82cd..f59ced181c55 100644
--- a/drivers/media/video/cx25840/cx25840-firmware.c
+++ b/drivers/media/video/cx25840/cx25840-firmware.c
@@ -20,11 +20,22 @@
20#include <linux/i2c-algo-bit.h> 20#include <linux/i2c-algo-bit.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <media/v4l2-common.h> 22#include <media/v4l2-common.h>
23#include <media/cx25840.h>
23 24
24#include "cx25840.h" 25#include "cx25840-core.h"
25 26
26#define FWFILE "v4l-cx25840.fw" 27#define FWFILE "v4l-cx25840.fw"
27#define FWSEND 1024 28
29/*
30 * Mike Isely <isely@pobox.com> - The FWSEND parameter controls the
31 * size of the firmware chunks sent down the I2C bus to the chip.
32 * Previously this had been set to 1024 but unfortunately some I2C
33 * implementations can't transfer data in such big gulps.
34 * Specifically, the pvrusb2 driver has a hard limit of around 60
35 * bytes, due to the encapsulation there of I2C traffic into USB
36 * messages. So we have to significantly reduce this parameter.
37 */
38#define FWSEND 48
28 39
29#define FWDEV(x) &((x)->adapter->dev) 40#define FWDEV(x) &((x)->adapter->dev)
30 41
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index e96fd1f1d6dc..57feca288d2b 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -19,8 +19,9 @@
19#include <linux/videodev2.h> 19#include <linux/videodev2.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <media/v4l2-common.h> 21#include <media/v4l2-common.h>
22#include <media/cx25840.h>
22 23
23#include "cx25840.h" 24#include "cx25840-core.h"
24 25
25static int odd_parity(u8 c) 26static int odd_parity(u8 c)
26{ 27{
@@ -151,7 +152,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
151 case VIDIOC_G_FMT: 152 case VIDIOC_G_FMT:
152 { 153 {
153 static u16 lcr2vbi[] = { 154 static u16 lcr2vbi[] = {
154 0, V4L2_SLICED_TELETEXT_PAL_B, 0, /* 1 */ 155 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
155 0, V4L2_SLICED_WSS_625, 0, /* 4 */ 156 0, V4L2_SLICED_WSS_625, 0, /* 4 */
156 V4L2_SLICED_CAPTION_525, /* 6 */ 157 V4L2_SLICED_CAPTION_525, /* 6 */
157 0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */ 158 0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */
@@ -231,7 +232,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
231 for (i = 7; i <= 23; i++) { 232 for (i = 7; i <= 23; i++) {
232 for (x = 0; x <= 1; x++) { 233 for (x = 0; x <= 1; x++) {
233 switch (svbi->service_lines[1-x][i]) { 234 switch (svbi->service_lines[1-x][i]) {
234 case V4L2_SLICED_TELETEXT_PAL_B: 235 case V4L2_SLICED_TELETEXT_B:
235 lcr[i] |= 1 << (4 * x); 236 lcr[i] |= 1 << (4 * x);
236 break; 237 break;
237 case V4L2_SLICED_WSS_625: 238 case V4L2_SLICED_WSS_625:
@@ -282,7 +283,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
282 283
283 switch (id2) { 284 switch (id2) {
284 case 1: 285 case 1:
285 id2 = V4L2_SLICED_TELETEXT_PAL_B; 286 id2 = V4L2_SLICED_TELETEXT_B;
286 break; 287 break;
287 case 4: 288 case 4:
288 id2 = V4L2_SLICED_WSS_625; 289 id2 = V4L2_SLICED_WSS_625;
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index ff0f72340d69..630273992a41 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -1,3 +1,7 @@
1config VIDEO_CX88_VP3054
2 tristate
3 depends on VIDEO_CX88_DVB && DVB_MT352
4
1config VIDEO_CX88 5config VIDEO_CX88
2 tristate "Conexant 2388x (bt878 successor) support" 6 tristate "Conexant 2388x (bt878 successor) support"
3 depends on VIDEO_DEV && PCI && I2C 7 depends on VIDEO_DEV && PCI && I2C
@@ -25,7 +29,7 @@ config VIDEO_CX88_ALSA
25 29
26 It only works with boards with function 01 enabled. 30 It only works with boards with function 01 enabled.
27 To check if your board supports, use lspci -n. 31 To check if your board supports, use lspci -n.
28 If supported, you should see 1471:8801 or 1471:8811 32 If supported, you should see 14f1:8801 or 14f1:8811
29 PCI device. 33 PCI device.
30 34
31 To compile this driver as a module, choose M here: the 35 To compile this driver as a module, choose M here: the
@@ -73,10 +77,11 @@ config VIDEO_CX88_DVB_MT352
73 This adds DVB-T support for cards based on the 77 This adds DVB-T support for cards based on the
74 Connexant 2388x chip and the MT352 demodulator. 78 Connexant 2388x chip and the MT352 demodulator.
75 79
76config VIDEO_CX88_VP3054 80config VIDEO_CX88_DVB_VP3054
77 tristate "VP-3054 Secondary I2C Bus Support" 81 bool "VP-3054 Secondary I2C Bus Support"
78 default m 82 default y
79 depends on DVB_MT352 83 depends on VIDEO_CX88_DVB_MT352
84 select VIDEO_CX88_VP3054
80 ---help--- 85 ---help---
81 This adds DVB-T support for cards based on the 86 This adds DVB-T support for cards based on the
82 Connexant 2388x chip and the MT352 demodulator, 87 Connexant 2388x chip and the MT352 demodulator,
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index f62fd706b45a..3ba3439db580 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -151,8 +151,8 @@ struct em28xx_board em28xx_boards[] = {
151 },{ 151 },{
152 .type = EM28XX_VMUX_SVIDEO, 152 .type = EM28XX_VMUX_SVIDEO,
153 .vmux = 2, 153 .vmux = 2,
154 .amux = MSP_INPUT(MSP_IN_SCART_1, MSP_IN_TUNER_1, 154 .amux = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1,
155 MSP_DSP_OUT_SCART, MSP_DSP_OUT_SCART), 155 MSP_DSP_IN_SCART, MSP_DSP_IN_SCART),
156 }}, 156 }},
157 }, 157 },
158 [EM2820_BOARD_MSI_VOX_USB_2] = { 158 [EM2820_BOARD_MSI_VOX_USB_2] = {
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index dfba33d0fa61..ddc92cbb5276 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -222,7 +222,7 @@ static void video_mux(struct em28xx *dev, int index)
222 if (dev->i2s_speed) 222 if (dev->i2s_speed)
223 em28xx_i2c_call_clients(dev, VIDIOC_INT_I2S_CLOCK_FREQ, &dev->i2s_speed); 223 em28xx_i2c_call_clients(dev, VIDIOC_INT_I2S_CLOCK_FREQ, &dev->i2s_speed);
224 route.input = dev->ctl_ainput; 224 route.input = dev->ctl_ainput;
225 route.output = MSP_OUTPUT(MSP_OUT_SCART1_DA); 225 route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
226 /* Note: this is msp3400 specific */ 226 /* Note: this is msp3400 specific */
227 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_AUDIO_ROUTING, &route); 227 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_AUDIO_ROUTING, &route);
228 ainput = EM28XX_AUDIO_SRC_TUNER; 228 ainput = EM28XX_AUDIO_SRC_TUNER;
@@ -1141,26 +1141,16 @@ static int em28xx_do_ioctl(struct inode *inode, struct file *filp,
1141 case VIDIOC_G_TUNER: 1141 case VIDIOC_G_TUNER:
1142 { 1142 {
1143 struct v4l2_tuner *t = arg; 1143 struct v4l2_tuner *t = arg;
1144 int status = 0;
1145 1144
1146 if (0 != t->index) 1145 if (0 != t->index)
1147 return -EINVAL; 1146 return -EINVAL;
1148 1147
1149 memset(t, 0, sizeof(*t)); 1148 memset(t, 0, sizeof(*t));
1150 strcpy(t->name, "Tuner"); 1149 strcpy(t->name, "Tuner");
1151 t->type = V4L2_TUNER_ANALOG_TV;
1152 t->capability = V4L2_TUNER_CAP_NORM;
1153 t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */
1154/* t->signal = 0xffff;*/
1155/* em28xx_i2c_call_clients(dev,VIDIOC_G_TUNER,t);*/
1156 /* No way to get signal strength? */
1157 mutex_lock(&dev->lock); 1150 mutex_lock(&dev->lock);
1158 em28xx_i2c_call_clients(dev, DECODER_GET_STATUS, 1151 /* let clients fill in the remainder of this struct */
1159 &status); 1152 em28xx_i2c_call_clients(dev, cmd, t);
1160 mutex_unlock(&dev->lock); 1153 mutex_unlock(&dev->lock);
1161 t->signal =
1162 (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0;
1163
1164 em28xx_videodbg("VIDIO_G_TUNER: signal=%x, afc=%x\n", t->signal, 1154 em28xx_videodbg("VIDIO_G_TUNER: signal=%x, afc=%x\n", t->signal,
1165 t->afc); 1155 t->afc);
1166 return 0; 1156 return 0;
@@ -1168,26 +1158,13 @@ static int em28xx_do_ioctl(struct inode *inode, struct file *filp,
1168 case VIDIOC_S_TUNER: 1158 case VIDIOC_S_TUNER:
1169 { 1159 {
1170 struct v4l2_tuner *t = arg; 1160 struct v4l2_tuner *t = arg;
1171 int status = 0;
1172 1161
1173 if (0 != t->index) 1162 if (0 != t->index)
1174 return -EINVAL; 1163 return -EINVAL;
1175 memset(t, 0, sizeof(*t));
1176 strcpy(t->name, "Tuner");
1177 t->type = V4L2_TUNER_ANALOG_TV;
1178 t->capability = V4L2_TUNER_CAP_NORM;
1179 t->rangehigh = 0xffffffffUL; /* FIXME: set correct range */
1180/* t->signal = 0xffff; */
1181 /* No way to get signal strength? */
1182 mutex_lock(&dev->lock); 1164 mutex_lock(&dev->lock);
1183 em28xx_i2c_call_clients(dev, DECODER_GET_STATUS, 1165 /* let clients handle this */
1184 &status); 1166 em28xx_i2c_call_clients(dev, cmd, t);
1185 mutex_unlock(&dev->lock); 1167 mutex_unlock(&dev->lock);
1186 t->signal =
1187 (status & DECODER_STATUS_GOOD) != 0 ? 0xffff : 0;
1188
1189 em28xx_videodbg("VIDIO_S_TUNER: signal=%x, afc=%x\n",
1190 t->signal, t->afc);
1191 return 0; 1168 return 0;
1192 } 1169 }
1193 case VIDIOC_G_FREQUENCY: 1170 case VIDIOC_G_FREQUENCY:
diff --git a/drivers/media/video/et61x251/Kconfig b/drivers/media/video/et61x251/Kconfig
new file mode 100644
index 000000000000..6c43a90c6569
--- /dev/null
+++ b/drivers/media/video/et61x251/Kconfig
@@ -0,0 +1,14 @@
1config USB_ET61X251
2 tristate "USB ET61X[12]51 PC Camera Controller support"
3 depends on USB && VIDEO_DEV
4 ---help---
5 Say Y here if you want support for cameras based on Etoms ET61X151
6 or ET61X251 PC Camera Controllers.
7
8 See <file:Documentation/video4linux/et61x251.txt> for more info.
9
10 This driver uses the Video For Linux API. You must say Y or M to
11 "Video For Linux" to use this driver.
12
13 To compile this driver as a module, choose M here: the
14 module will be called et61x251.
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 95bacf435414..7e66d83fe0ce 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -411,6 +411,9 @@ static int ir_probe(struct i2c_adapter *adap)
411 case I2C_HW_B_BT848: 411 case I2C_HW_B_BT848:
412 probe = probe_bttv; 412 probe = probe_bttv;
413 break; 413 break;
414 case I2C_HW_B_CX2341X:
415 probe = probe_bttv;
416 break;
414 case I2C_HW_SAA7134: 417 case I2C_HW_SAA7134:
415 probe = probe_saa7134; 418 probe = probe_saa7134;
416 break; 419 break;
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index c40e8ba9a2ea..b806999d6e0f 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -279,20 +279,8 @@ void msp_set_scart(struct i2c_client *client, int in, int out)
279 msp_write_dsp(client, 0x13, state->acb); 279 msp_write_dsp(client, 0x13, state->acb);
280 280
281 /* Sets I2S speed 0 = 1.024 Mbps, 1 = 2.048 Mbps */ 281 /* Sets I2S speed 0 = 1.024 Mbps, 1 = 2.048 Mbps */
282 msp_write_dem(client, 0x40, state->i2s_mode); 282 if (state->has_i2s_conf)
283} 283 msp_write_dem(client, 0x40, state->i2s_mode);
284
285void msp_set_mute(struct i2c_client *client)
286{
287 struct msp_state *state = i2c_get_clientdata(client);
288
289 v4l_dbg(1, msp_debug, client, "mute audio\n");
290 msp_write_dsp(client, 0x0000, 0);
291 msp_write_dsp(client, 0x0007, 1);
292 if (state->has_scart2_out_volume)
293 msp_write_dsp(client, 0x0040, 1);
294 if (state->has_headphones)
295 msp_write_dsp(client, 0x0006, 0);
296} 284}
297 285
298void msp_set_audio(struct i2c_client *client) 286void msp_set_audio(struct i2c_client *client)
@@ -300,17 +288,19 @@ void msp_set_audio(struct i2c_client *client)
300 struct msp_state *state = i2c_get_clientdata(client); 288 struct msp_state *state = i2c_get_clientdata(client);
301 int bal = 0, bass, treble, loudness; 289 int bal = 0, bass, treble, loudness;
302 int val = 0; 290 int val = 0;
291 int reallymuted = state->muted | state->scan_in_progress;
303 292
304 if (!state->muted) 293 if (!reallymuted)
305 val = (state->volume * 0x7f / 65535) << 8; 294 val = (state->volume * 0x7f / 65535) << 8;
306 295
307 v4l_dbg(1, msp_debug, client, "mute=%s volume=%d\n", 296 v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n",
308 state->muted ? "on" : "off", state->volume); 297 state->muted ? "on" : "off", state->scan_in_progress ? "yes" : "no",
298 state->volume);
309 299
310 msp_write_dsp(client, 0x0000, val); 300 msp_write_dsp(client, 0x0000, val);
311 msp_write_dsp(client, 0x0007, state->muted ? 0x1 : (val | 0x1)); 301 msp_write_dsp(client, 0x0007, reallymuted ? 0x1 : (val | 0x1));
312 if (state->has_scart2_out_volume) 302 if (state->has_scart2_out_volume)
313 msp_write_dsp(client, 0x0040, state->muted ? 0x1 : (val | 0x1)); 303 msp_write_dsp(client, 0x0040, reallymuted ? 0x1 : (val | 0x1));
314 if (state->has_headphones) 304 if (state->has_headphones)
315 msp_write_dsp(client, 0x0006, val); 305 msp_write_dsp(client, 0x0006, val);
316 if (!state->has_sound_processing) 306 if (!state->has_sound_processing)
@@ -346,7 +336,6 @@ static void msp_wake_thread(struct i2c_client *client)
346 336
347 if (NULL == state->kthread) 337 if (NULL == state->kthread)
348 return; 338 return;
349 msp_set_mute(client);
350 state->watch_stereo = 0; 339 state->watch_stereo = 0;
351 state->restart = 1; 340 state->restart = 1;
352 wake_up_interruptible(&state->wq); 341 wake_up_interruptible(&state->wq);
@@ -374,19 +363,15 @@ int msp_sleep(struct msp_state *state, int timeout)
374 363
375/* ------------------------------------------------------------------------ */ 364/* ------------------------------------------------------------------------ */
376 365
377static int msp_mode_v4l2_to_v4l1(int rxsubchans) 366static int msp_mode_v4l2_to_v4l1(int rxsubchans, int audmode)
378{ 367{
379 int mode = 0; 368 if (rxsubchans == V4L2_TUNER_SUB_MONO)
380 369 return VIDEO_SOUND_MONO;
381 if (rxsubchans & V4L2_TUNER_SUB_STEREO) 370 if (rxsubchans == V4L2_TUNER_SUB_STEREO)
382 mode |= VIDEO_SOUND_STEREO; 371 return VIDEO_SOUND_STEREO;
383 if (rxsubchans & V4L2_TUNER_SUB_LANG2) 372 if (audmode == V4L2_TUNER_MODE_LANG2)
384 mode |= VIDEO_SOUND_LANG2 | VIDEO_SOUND_STEREO; 373 return VIDEO_SOUND_LANG2;
385 if (rxsubchans & V4L2_TUNER_SUB_LANG1) 374 return VIDEO_SOUND_LANG1;
386 mode |= VIDEO_SOUND_LANG1 | VIDEO_SOUND_STEREO;
387 if (mode == 0)
388 mode |= VIDEO_SOUND_MONO;
389 return mode;
390} 375}
391 376
392static int msp_mode_v4l1_to_v4l2(int mode) 377static int msp_mode_v4l1_to_v4l2(int mode)
@@ -605,7 +590,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
605 break; 590 break;
606 if (state->opmode == OPMODE_AUTOSELECT) 591 if (state->opmode == OPMODE_AUTOSELECT)
607 msp_detect_stereo(client); 592 msp_detect_stereo(client);
608 va->mode = msp_mode_v4l2_to_v4l1(state->rxsubchans); 593 va->mode = msp_mode_v4l2_to_v4l1(state->rxsubchans, state->audmode);
609 break; 594 break;
610 } 595 }
611 596
@@ -620,7 +605,8 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
620 state->treble = va->treble; 605 state->treble = va->treble;
621 msp_set_audio(client); 606 msp_set_audio(client);
622 607
623 if (va->mode != 0 && state->radio == 0) { 608 if (va->mode != 0 && state->radio == 0 &&
609 state->audmode != msp_mode_v4l1_to_v4l2(va->mode)) {
624 state->audmode = msp_mode_v4l1_to_v4l2(va->mode); 610 state->audmode = msp_mode_v4l1_to_v4l2(va->mode);
625 msp_set_audmode(client); 611 msp_set_audmode(client);
626 } 612 }
@@ -687,21 +673,23 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
687 int sc_in = rt->input & 0x7; 673 int sc_in = rt->input & 0x7;
688 int sc1_out = rt->output & 0xf; 674 int sc1_out = rt->output & 0xf;
689 int sc2_out = (rt->output >> 4) & 0xf; 675 int sc2_out = (rt->output >> 4) & 0xf;
690 u16 val; 676 u16 val, reg;
691 677
678 if (state->routing.input == rt->input &&
679 state->routing.output == rt->output)
680 break;
692 state->routing = *rt; 681 state->routing = *rt;
693 if (state->opmode == OPMODE_AUTOSELECT) {
694 val = msp_read_dem(client, 0x30) & ~0x100;
695 msp_write_dem(client, 0x30, val | (tuner ? 0x100 : 0));
696 } else {
697 val = msp_read_dem(client, 0xbb) & ~0x100;
698 msp_write_dem(client, 0xbb, val | (tuner ? 0x100 : 0));
699 }
700 msp_set_scart(client, sc_in, 0); 682 msp_set_scart(client, sc_in, 0);
701 msp_set_scart(client, sc1_out, 1); 683 msp_set_scart(client, sc1_out, 1);
702 msp_set_scart(client, sc2_out, 2); 684 msp_set_scart(client, sc2_out, 2);
703 msp_set_audmode(client); 685 msp_set_audmode(client);
704 msp_wake_thread(client); 686 reg = (state->opmode == OPMODE_AUTOSELECT) ? 0x30 : 0xbb;
687 val = msp_read_dem(client, reg);
688 if (tuner != ((val >> 8) & 1)) {
689 msp_write_dem(client, reg, (val & ~0x100) | (tuner << 8));
690 /* wake thread when a new tuner input is chosen */
691 msp_wake_thread(client);
692 }
705 break; 693 break;
706 } 694 }
707 695
@@ -715,7 +703,7 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
715 msp_detect_stereo(client); 703 msp_detect_stereo(client);
716 vt->audmode = state->audmode; 704 vt->audmode = state->audmode;
717 vt->rxsubchans = state->rxsubchans; 705 vt->rxsubchans = state->rxsubchans;
718 vt->capability = V4L2_TUNER_CAP_STEREO | 706 vt->capability |= V4L2_TUNER_CAP_STEREO |
719 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 707 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
720 break; 708 break;
721 } 709 }
@@ -726,6 +714,8 @@ static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
726 714
727 if (state->radio) /* TODO: add mono/stereo support for radio */ 715 if (state->radio) /* TODO: add mono/stereo support for radio */
728 break; 716 break;
717 if (state->audmode == vt->audmode)
718 break;
729 state->audmode = vt->audmode; 719 state->audmode = vt->audmode;
730 /* only set audmode */ 720 /* only set audmode */
731 msp_set_audmode(client); 721 msp_set_audmode(client);
@@ -887,7 +877,7 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
887 877
888 memset(state, 0, sizeof(*state)); 878 memset(state, 0, sizeof(*state));
889 state->v4l2_std = V4L2_STD_NTSC; 879 state->v4l2_std = V4L2_STD_NTSC;
890 state->audmode = V4L2_TUNER_MODE_LANG1; 880 state->audmode = V4L2_TUNER_MODE_STEREO;
891 state->volume = 58880; /* 0db gain */ 881 state->volume = 58880; /* 0db gain */
892 state->balance = 32768; /* 0db gain */ 882 state->balance = 32768; /* 0db gain */
893 state->bass = 32768; 883 state->bass = 32768;
@@ -931,13 +921,16 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
931 state->has_radio = msp_revision >= 'G'; 921 state->has_radio = msp_revision >= 'G';
932 /* Has headphones output: not for stripped down products */ 922 /* Has headphones output: not for stripped down products */
933 state->has_headphones = msp_prod_lo < 5; 923 state->has_headphones = msp_prod_lo < 5;
924 /* Has scart2 input: not in stripped down products of the '3' family */
925 state->has_scart2 = msp_family >= 4 || msp_prod_lo < 7;
926 /* Has scart3 input: not in stripped down products of the '3' family */
927 state->has_scart3 = msp_family >= 4 || msp_prod_lo < 5;
934 /* Has scart4 input: not in pre D revisions, not in stripped D revs */ 928 /* Has scart4 input: not in pre D revisions, not in stripped D revs */
935 state->has_scart4 = msp_family >= 4 || (msp_revision >= 'D' && msp_prod_lo < 5); 929 state->has_scart4 = msp_family >= 4 || (msp_revision >= 'D' && msp_prod_lo < 5);
936 /* Has scart2 and scart3 inputs and scart2 output: not in stripped 930 /* Has scart2 output: not in stripped down products of the '3' family */
937 down products of the '3' family */ 931 state->has_scart2_out = msp_family >= 4 || msp_prod_lo < 5;
938 state->has_scart23_in_scart2_out = msp_family >= 4 || msp_prod_lo < 5;
939 /* Has scart2 a volume control? Not in pre-D revisions. */ 932 /* Has scart2 a volume control? Not in pre-D revisions. */
940 state->has_scart2_out_volume = msp_revision > 'C' && state->has_scart23_in_scart2_out; 933 state->has_scart2_out_volume = msp_revision > 'C' && state->has_scart2_out;
941 /* Has a configurable i2s out? */ 934 /* Has a configurable i2s out? */
942 state->has_i2s_conf = msp_revision >= 'G' && msp_prod_lo < 7; 935 state->has_i2s_conf = msp_revision >= 'G' && msp_prod_lo < 7;
943 /* Has subwoofer output: not in pre-D revs and not in stripped down products */ 936 /* Has subwoofer output: not in pre-D revs and not in stripped down products */
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index 1940748bb633..4e451049013d 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -54,8 +54,10 @@ struct msp_state {
54 u8 has_radio; 54 u8 has_radio;
55 u8 has_headphones; 55 u8 has_headphones;
56 u8 has_ntsc_jp_d_k3; 56 u8 has_ntsc_jp_d_k3;
57 u8 has_scart2;
58 u8 has_scart3;
57 u8 has_scart4; 59 u8 has_scart4;
58 u8 has_scart23_in_scart2_out; 60 u8 has_scart2_out;
59 u8 has_scart2_out_volume; 61 u8 has_scart2_out_volume;
60 u8 has_i2s_conf; 62 u8 has_i2s_conf;
61 u8 has_subwoofer; 63 u8 has_subwoofer;
@@ -83,6 +85,7 @@ struct msp_state {
83 int volume, muted; 85 int volume, muted;
84 int balance, loudness; 86 int balance, loudness;
85 int bass, treble; 87 int bass, treble;
88 int scan_in_progress;
86 89
87 /* thread */ 90 /* thread */
88 struct task_struct *kthread; 91 struct task_struct *kthread;
@@ -98,7 +101,6 @@ int msp_read_dem(struct i2c_client *client, int addr);
98int msp_read_dsp(struct i2c_client *client, int addr); 101int msp_read_dsp(struct i2c_client *client, int addr);
99int msp_reset(struct i2c_client *client); 102int msp_reset(struct i2c_client *client);
100void msp_set_scart(struct i2c_client *client, int in, int out); 103void msp_set_scart(struct i2c_client *client, int in, int out);
101void msp_set_mute(struct i2c_client *client);
102void msp_set_audio(struct i2c_client *client); 104void msp_set_audio(struct i2c_client *client);
103int msp_sleep(struct msp_state *state, int timeout); 105int msp_sleep(struct msp_state *state, int timeout);
104 106
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index c3984ea9ca07..633a10213789 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -170,7 +170,7 @@ static void msp_set_source(struct i2c_client *client, u16 src)
170 msp_write_dsp(client, 0x000a, src); 170 msp_write_dsp(client, 0x000a, src);
171 msp_write_dsp(client, 0x000b, src); 171 msp_write_dsp(client, 0x000b, src);
172 msp_write_dsp(client, 0x000c, src); 172 msp_write_dsp(client, 0x000c, src);
173 if (state->has_scart23_in_scart2_out) 173 if (state->has_scart2_out)
174 msp_write_dsp(client, 0x0041, src); 174 msp_write_dsp(client, 0x0041, src);
175} 175}
176 176
@@ -228,6 +228,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
228 char *modestr = (state->audmode >= 0 && state->audmode < 5) ? 228 char *modestr = (state->audmode >= 0 && state->audmode < 5) ?
229 strmode[state->audmode] : "unknown"; 229 strmode[state->audmode] : "unknown";
230 int src = 0; /* channel source: FM/AM, nicam or SCART */ 230 int src = 0; /* channel source: FM/AM, nicam or SCART */
231 int audmode = state->audmode;
231 232
232 if (state->opmode == OPMODE_AUTOSELECT) { 233 if (state->opmode == OPMODE_AUTOSELECT) {
233 /* this method would break everything, let's make sure 234 /* this method would break everything, let's make sure
@@ -239,11 +240,29 @@ static void msp3400c_set_audmode(struct i2c_client *client)
239 return; 240 return;
240 } 241 }
241 242
243 /* Note: for the C and D revs no NTSC stereo + SAP is possible as
244 the hardware does not support SAP. So the rxsubchans combination
245 of STEREO | LANG2 does not occur. */
246
247 /* switch to mono if only mono is available */
248 if (state->rxsubchans == V4L2_TUNER_SUB_MONO)
249 audmode = V4L2_TUNER_MODE_MONO;
250 /* if bilingual */
251 else if (state->rxsubchans & V4L2_TUNER_SUB_LANG2) {
252 /* and mono or stereo, then fallback to lang1 */
253 if (audmode == V4L2_TUNER_MODE_MONO ||
254 audmode == V4L2_TUNER_MODE_STEREO)
255 audmode = V4L2_TUNER_MODE_LANG1;
256 }
257 /* if stereo, and audmode is not mono, then switch to stereo */
258 else if (audmode != V4L2_TUNER_MODE_MONO)
259 audmode = V4L2_TUNER_MODE_STEREO;
260
242 /* switch demodulator */ 261 /* switch demodulator */
243 switch (state->mode) { 262 switch (state->mode) {
244 case MSP_MODE_FM_TERRA: 263 case MSP_MODE_FM_TERRA:
245 v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr); 264 v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr);
246 switch (state->audmode) { 265 switch (audmode) {
247 case V4L2_TUNER_MODE_STEREO: 266 case V4L2_TUNER_MODE_STEREO:
248 msp_write_dsp(client, 0x000e, 0x3001); 267 msp_write_dsp(client, 0x000e, 0x3001);
249 break; 268 break;
@@ -257,7 +276,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
257 break; 276 break;
258 case MSP_MODE_FM_SAT: 277 case MSP_MODE_FM_SAT:
259 v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr); 278 v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr);
260 switch (state->audmode) { 279 switch (audmode) {
261 case V4L2_TUNER_MODE_MONO: 280 case V4L2_TUNER_MODE_MONO:
262 msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5)); 281 msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
263 break; 282 break;
@@ -296,7 +315,8 @@ static void msp3400c_set_audmode(struct i2c_client *client)
296 } 315 }
297 316
298 /* switch audio */ 317 /* switch audio */
299 switch (state->audmode) { 318 v4l_dbg(1, msp_debug, client, "set audmode %d\n", audmode);
319 switch (audmode) {
300 case V4L2_TUNER_MODE_STEREO: 320 case V4L2_TUNER_MODE_STEREO:
301 case V4L2_TUNER_MODE_LANG1_LANG2: 321 case V4L2_TUNER_MODE_LANG1_LANG2:
302 src |= 0x0020; 322 src |= 0x0020;
@@ -314,10 +334,6 @@ static void msp3400c_set_audmode(struct i2c_client *client)
314 src = 0x0030; 334 src = 0x0030;
315 break; 335 break;
316 case V4L2_TUNER_MODE_LANG1: 336 case V4L2_TUNER_MODE_LANG1:
317 /* switch to stereo for stereo transmission, otherwise
318 keep first language */
319 if (state->rxsubchans & V4L2_TUNER_SUB_STEREO)
320 src |= 0x0020;
321 break; 337 break;
322 case V4L2_TUNER_MODE_LANG2: 338 case V4L2_TUNER_MODE_LANG2:
323 src |= 0x0010; 339 src |= 0x0010;
@@ -367,7 +383,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
367 if (val > 32767) 383 if (val > 32767)
368 val -= 65536; 384 val -= 65536;
369 v4l_dbg(2, msp_debug, client, "stereo detect register: %d\n", val); 385 v4l_dbg(2, msp_debug, client, "stereo detect register: %d\n", val);
370 if (val > 4096) { 386 if (val > 8192) {
371 rxsubchans = V4L2_TUNER_SUB_STEREO; 387 rxsubchans = V4L2_TUNER_SUB_STEREO;
372 } else if (val < -4096) { 388 } else if (val < -4096) {
373 rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 389 rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
@@ -464,19 +480,22 @@ int msp3400c_thread(void *data)
464 if (state->radio || MSP_MODE_EXTERN == state->mode) { 480 if (state->radio || MSP_MODE_EXTERN == state->mode) {
465 /* no carrier scan, just unmute */ 481 /* no carrier scan, just unmute */
466 v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n"); 482 v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n");
483 state->scan_in_progress = 0;
467 msp_set_audio(client); 484 msp_set_audio(client);
468 continue; 485 continue;
469 } 486 }
470 487
471 /* mute */ 488 /* mute audio */
472 msp_set_mute(client); 489 state->scan_in_progress = 1;
490 msp_set_audio(client);
491
473 msp3400c_set_mode(client, MSP_MODE_AM_DETECT); 492 msp3400c_set_mode(client, MSP_MODE_AM_DETECT);
474 val1 = val2 = 0; 493 val1 = val2 = 0;
475 max1 = max2 = -1; 494 max1 = max2 = -1;
476 state->watch_stereo = 0; 495 state->watch_stereo = 0;
477 state->nicam_on = 0; 496 state->nicam_on = 0;
478 497
479 /* some time for the tuner to sync */ 498 /* wait for tuner to settle down after a channel change */
480 if (msp_sleep(state, 200)) 499 if (msp_sleep(state, 200))
481 goto restart; 500 goto restart;
482 501
@@ -552,7 +571,6 @@ int msp3400c_thread(void *data)
552 /* B/G NICAM */ 571 /* B/G NICAM */
553 state->second = msp3400c_carrier_detect_55[max2].cdo; 572 state->second = msp3400c_carrier_detect_55[max2].cdo;
554 msp3400c_set_mode(client, MSP_MODE_FM_NICAM1); 573 msp3400c_set_mode(client, MSP_MODE_FM_NICAM1);
555 msp3400c_set_carrier(client, state->second, state->main);
556 state->nicam_on = 1; 574 state->nicam_on = 1;
557 state->watch_stereo = 1; 575 state->watch_stereo = 1;
558 } else { 576 } else {
@@ -563,7 +581,6 @@ int msp3400c_thread(void *data)
563 /* PAL I NICAM */ 581 /* PAL I NICAM */
564 state->second = MSP_CARRIER(6.552); 582 state->second = MSP_CARRIER(6.552);
565 msp3400c_set_mode(client, MSP_MODE_FM_NICAM2); 583 msp3400c_set_mode(client, MSP_MODE_FM_NICAM2);
566 msp3400c_set_carrier(client, state->second, state->main);
567 state->nicam_on = 1; 584 state->nicam_on = 1;
568 state->watch_stereo = 1; 585 state->watch_stereo = 1;
569 break; 586 break;
@@ -577,13 +594,11 @@ int msp3400c_thread(void *data)
577 /* L NICAM or AM-mono */ 594 /* L NICAM or AM-mono */
578 state->second = msp3400c_carrier_detect_65[max2].cdo; 595 state->second = msp3400c_carrier_detect_65[max2].cdo;
579 msp3400c_set_mode(client, MSP_MODE_AM_NICAM); 596 msp3400c_set_mode(client, MSP_MODE_AM_NICAM);
580 msp3400c_set_carrier(client, state->second, state->main);
581 state->watch_stereo = 1; 597 state->watch_stereo = 1;
582 } else if (max2 == 0 && state->has_nicam) { 598 } else if (max2 == 0 && state->has_nicam) {
583 /* D/K NICAM */ 599 /* D/K NICAM */
584 state->second = msp3400c_carrier_detect_65[max2].cdo; 600 state->second = msp3400c_carrier_detect_65[max2].cdo;
585 msp3400c_set_mode(client, MSP_MODE_FM_NICAM1); 601 msp3400c_set_mode(client, MSP_MODE_FM_NICAM1);
586 msp3400c_set_carrier(client, state->second, state->main);
587 state->nicam_on = 1; 602 state->nicam_on = 1;
588 state->watch_stereo = 1; 603 state->watch_stereo = 1;
589 } else { 604 } else {
@@ -595,25 +610,25 @@ int msp3400c_thread(void *data)
595 no_second: 610 no_second:
596 state->second = msp3400c_carrier_detect_main[max1].cdo; 611 state->second = msp3400c_carrier_detect_main[max1].cdo;
597 msp3400c_set_mode(client, MSP_MODE_FM_TERRA); 612 msp3400c_set_mode(client, MSP_MODE_FM_TERRA);
598 msp3400c_set_carrier(client, state->second, state->main);
599 state->rxsubchans = V4L2_TUNER_SUB_MONO;
600 break; 613 break;
601 } 614 }
615 msp3400c_set_carrier(client, state->second, state->main);
602 616
603 /* unmute */ 617 /* unmute */
604 msp_set_audio(client); 618 state->scan_in_progress = 0;
605 msp3400c_set_audmode(client); 619 msp3400c_set_audmode(client);
620 msp_set_audio(client);
606 621
607 if (msp_debug) 622 if (msp_debug)
608 msp3400c_print_mode(client); 623 msp3400c_print_mode(client);
609 624
610 /* monitor tv audio mode, the first time don't wait 625 /* monitor tv audio mode, the first time don't wait
611 so long to get a quick stereo/bilingual result */ 626 so long to get a quick stereo/bilingual result */
612 if (msp_sleep(state, 1000)) 627 count = 3;
613 goto restart;
614 while (state->watch_stereo) { 628 while (state->watch_stereo) {
615 if (msp_sleep(state, 5000)) 629 if (msp_sleep(state, count ? 1000 : 5000))
616 goto restart; 630 goto restart;
631 if (count) count--;
617 watch_stereo(client); 632 watch_stereo(client);
618 } 633 }
619 } 634 }
@@ -626,7 +641,7 @@ int msp3410d_thread(void *data)
626{ 641{
627 struct i2c_client *client = data; 642 struct i2c_client *client = data;
628 struct msp_state *state = i2c_get_clientdata(client); 643 struct msp_state *state = i2c_get_clientdata(client);
629 int val, i, std; 644 int val, i, std, count;
630 645
631 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n"); 646 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
632 647
@@ -644,16 +659,14 @@ int msp3410d_thread(void *data)
644 if (state->mode == MSP_MODE_EXTERN) { 659 if (state->mode == MSP_MODE_EXTERN) {
645 /* no carrier scan needed, just unmute */ 660 /* no carrier scan needed, just unmute */
646 v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n"); 661 v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n");
662 state->scan_in_progress = 0;
647 msp_set_audio(client); 663 msp_set_audio(client);
648 continue; 664 continue;
649 } 665 }
650 666
651 /* put into sane state (and mute) */ 667 /* mute audio */
652 msp_reset(client); 668 state->scan_in_progress = 1;
653 669 msp_set_audio(client);
654 /* some time for the tuner to sync */
655 if (msp_sleep(state,200))
656 goto restart;
657 670
658 /* start autodetect. Note: autodetect is not supported for 671 /* start autodetect. Note: autodetect is not supported for
659 NTSC-M and radio, hence we force the standard in those cases. */ 672 NTSC-M and radio, hence we force the standard in those cases. */
@@ -664,6 +677,10 @@ int msp3410d_thread(void *data)
664 state->watch_stereo = 0; 677 state->watch_stereo = 0;
665 state->nicam_on = 0; 678 state->nicam_on = 0;
666 679
680 /* wait for tuner to settle down after a channel change */
681 if (msp_sleep(state, 200))
682 goto restart;
683
667 if (msp_debug) 684 if (msp_debug)
668 v4l_dbg(2, msp_debug, client, "setting standard: %s (0x%04x)\n", 685 v4l_dbg(2, msp_debug, client, "setting standard: %s (0x%04x)\n",
669 msp_standard_std_name(std), std); 686 msp_standard_std_name(std), std);
@@ -693,6 +710,7 @@ int msp3410d_thread(void *data)
693 state->main = msp_stdlist[i].main; 710 state->main = msp_stdlist[i].main;
694 state->second = msp_stdlist[i].second; 711 state->second = msp_stdlist[i].second;
695 state->std = val; 712 state->std = val;
713 state->rxsubchans = V4L2_TUNER_SUB_MONO;
696 714
697 if (msp_amsound && !state->radio && (state->v4l2_std & V4L2_STD_SECAM) && 715 if (msp_amsound && !state->radio && (state->v4l2_std & V4L2_STD_SECAM) &&
698 (val != 0x0009)) { 716 (val != 0x0009)) {
@@ -714,20 +732,17 @@ int msp3410d_thread(void *data)
714 else 732 else
715 state->mode = MSP_MODE_FM_NICAM1; 733 state->mode = MSP_MODE_FM_NICAM1;
716 /* just turn on stereo */ 734 /* just turn on stereo */
717 state->rxsubchans = V4L2_TUNER_SUB_STEREO;
718 state->nicam_on = 1; 735 state->nicam_on = 1;
719 state->watch_stereo = 1; 736 state->watch_stereo = 1;
720 break; 737 break;
721 case 0x0009: 738 case 0x0009:
722 state->mode = MSP_MODE_AM_NICAM; 739 state->mode = MSP_MODE_AM_NICAM;
723 state->rxsubchans = V4L2_TUNER_SUB_MONO;
724 state->nicam_on = 1; 740 state->nicam_on = 1;
725 state->watch_stereo = 1; 741 state->watch_stereo = 1;
726 break; 742 break;
727 case 0x0020: /* BTSC */ 743 case 0x0020: /* BTSC */
728 /* The pre-'G' models only have BTSC-mono */ 744 /* The pre-'G' models only have BTSC-mono */
729 state->mode = MSP_MODE_BTSC; 745 state->mode = MSP_MODE_BTSC;
730 state->rxsubchans = V4L2_TUNER_SUB_MONO;
731 break; 746 break;
732 case 0x0040: /* FM radio */ 747 case 0x0040: /* FM radio */
733 state->mode = MSP_MODE_FM_RADIO; 748 state->mode = MSP_MODE_FM_RADIO;
@@ -737,15 +752,12 @@ int msp3410d_thread(void *data)
737 msp3400c_set_mode(client, MSP_MODE_FM_RADIO); 752 msp3400c_set_mode(client, MSP_MODE_FM_RADIO);
738 msp3400c_set_carrier(client, MSP_CARRIER(10.7), 753 msp3400c_set_carrier(client, MSP_CARRIER(10.7),
739 MSP_CARRIER(10.7)); 754 MSP_CARRIER(10.7));
740 /* scart routing (this doesn't belong here I think) */
741 msp_set_scart(client,SCART_IN2,0);
742 break; 755 break;
743 case 0x0002: 756 case 0x0002:
744 case 0x0003: 757 case 0x0003:
745 case 0x0004: 758 case 0x0004:
746 case 0x0005: 759 case 0x0005:
747 state->mode = MSP_MODE_FM_TERRA; 760 state->mode = MSP_MODE_FM_TERRA;
748 state->rxsubchans = V4L2_TUNER_SUB_MONO;
749 state->watch_stereo = 1; 761 state->watch_stereo = 1;
750 break; 762 break;
751 } 763 }
@@ -759,20 +771,19 @@ int msp3410d_thread(void *data)
759 if (state->has_i2s_conf) 771 if (state->has_i2s_conf)
760 msp_write_dem(client, 0x40, state->i2s_mode); 772 msp_write_dem(client, 0x40, state->i2s_mode);
761 773
762 /* unmute, restore misc registers */ 774 /* unmute */
763 msp_set_audio(client);
764
765 msp_write_dsp(client, 0x13, state->acb);
766 msp3400c_set_audmode(client); 775 msp3400c_set_audmode(client);
776 state->scan_in_progress = 0;
777 msp_set_audio(client);
767 778
768 /* monitor tv audio mode, the first time don't wait 779 /* monitor tv audio mode, the first time don't wait
769 so long to get a quick stereo/bilingual result */ 780 so long to get a quick stereo/bilingual result */
770 if (msp_sleep(state, 1000)) 781 count = 3;
771 goto restart;
772 while (state->watch_stereo) { 782 while (state->watch_stereo) {
773 watch_stereo(client); 783 if (msp_sleep(state, count ? 1000 : 5000))
774 if (msp_sleep(state, 5000))
775 goto restart; 784 goto restart;
785 if (count) count--;
786 watch_stereo(client);
776 } 787 }
777 } 788 }
778 v4l_dbg(1, msp_debug, client, "thread: exit\n"); 789 v4l_dbg(1, msp_debug, client, "thread: exit\n");
@@ -829,27 +840,27 @@ static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in)
829 source = 0; /* mono only */ 840 source = 0; /* mono only */
830 matrix = 0x30; 841 matrix = 0x30;
831 break; 842 break;
832 case V4L2_TUNER_MODE_LANG1:
833 source = 3; /* stereo or A */
834 matrix = 0x00;
835 break;
836 case V4L2_TUNER_MODE_LANG2: 843 case V4L2_TUNER_MODE_LANG2:
837 source = 4; /* stereo or B */ 844 source = 4; /* stereo or B */
838 matrix = 0x10; 845 matrix = 0x10;
839 break; 846 break;
840 case V4L2_TUNER_MODE_STEREO:
841 case V4L2_TUNER_MODE_LANG1_LANG2: 847 case V4L2_TUNER_MODE_LANG1_LANG2:
842 default:
843 source = 1; /* stereo or A|B */ 848 source = 1; /* stereo or A|B */
844 matrix = 0x20; 849 matrix = 0x20;
845 break; 850 break;
851 case V4L2_TUNER_MODE_STEREO:
852 case V4L2_TUNER_MODE_LANG1:
853 default:
854 source = 3; /* stereo or A */
855 matrix = 0x00;
856 break;
846 } 857 }
847 858
848 if (in == MSP_DSP_OUT_TUNER) 859 if (in == MSP_DSP_IN_TUNER)
849 source = (source << 8) | 0x20; 860 source = (source << 8) | 0x20;
850 /* the msp34x2g puts the MAIN_AVC, MAIN and AUX sources in 12, 13, 14 861 /* the msp34x2g puts the MAIN_AVC, MAIN and AUX sources in 12, 13, 14
851 instead of 11, 12, 13. So we add one for that msp version. */ 862 instead of 11, 12, 13. So we add one for that msp version. */
852 else if (in >= MSP_DSP_OUT_MAIN_AVC && state->has_dolby_pro_logic) 863 else if (in >= MSP_DSP_IN_MAIN_AVC && state->has_dolby_pro_logic)
853 source = ((in + 1) << 8) | matrix; 864 source = ((in + 1) << 8) | matrix;
854 else 865 else
855 source = (in << 8) | matrix; 866 source = (in << 8) | matrix;
@@ -869,7 +880,7 @@ static void msp34xxg_set_sources(struct i2c_client *client)
869 msp34xxg_set_source(client, 0x000c, (in >> 4) & 0xf); 880 msp34xxg_set_source(client, 0x000c, (in >> 4) & 0xf);
870 msp34xxg_set_source(client, 0x0009, (in >> 8) & 0xf); 881 msp34xxg_set_source(client, 0x0009, (in >> 8) & 0xf);
871 msp34xxg_set_source(client, 0x000a, (in >> 12) & 0xf); 882 msp34xxg_set_source(client, 0x000a, (in >> 12) & 0xf);
872 if (state->has_scart23_in_scart2_out) 883 if (state->has_scart2_out)
873 msp34xxg_set_source(client, 0x0041, (in >> 16) & 0xf); 884 msp34xxg_set_source(client, 0x0041, (in >> 16) & 0xf);
874 msp34xxg_set_source(client, 0x000b, (in >> 20) & 0xf); 885 msp34xxg_set_source(client, 0x000b, (in >> 20) & 0xf);
875} 886}
@@ -887,10 +898,6 @@ static void msp34xxg_reset(struct i2c_client *client)
887 898
888 msp_reset(client); 899 msp_reset(client);
889 900
890 /* make sure that input/output is muted (paranoid mode) */
891 /* ACB, mute DSP input, mute SCART 1 */
892 msp_write_dsp(client, 0x13, 0x0f20);
893
894 if (state->has_i2s_conf) 901 if (state->has_i2s_conf)
895 msp_write_dem(client, 0x40, state->i2s_mode); 902 msp_write_dem(client, 0x40, state->i2s_mode);
896 903
@@ -1028,7 +1035,7 @@ static void msp34xxg_set_audmode(struct i2c_client *client)
1028 1035
1029 if (state->std == 0x20) { 1036 if (state->std == 0x20) {
1030 if ((state->rxsubchans & V4L2_TUNER_SUB_SAP) && 1037 if ((state->rxsubchans & V4L2_TUNER_SUB_SAP) &&
1031 (state->audmode == V4L2_TUNER_MODE_STEREO || 1038 (state->audmode == V4L2_TUNER_MODE_LANG1_LANG2 ||
1032 state->audmode == V4L2_TUNER_MODE_LANG2)) { 1039 state->audmode == V4L2_TUNER_MODE_LANG2)) {
1033 msp_write_dem(client, 0x20, 0x21); 1040 msp_write_dem(client, 0x20, 0x21);
1034 } else { 1041 } else {
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
new file mode 100644
index 000000000000..86376556f108
--- /dev/null
+++ b/drivers/media/video/pwc/Kconfig
@@ -0,0 +1,28 @@
1config USB_PWC
2 tristate "USB Philips Cameras"
3 depends on USB && VIDEO_DEV
4 ---help---
5 Say Y or M here if you want to use one of these Philips & OEM
6 webcams:
7 * Philips PCA645, PCA646
8 * Philips PCVC675, PCVC680, PCVC690
9 * Philips PCVC720/40, PCVC730, PCVC740, PCVC750
10 * Askey VC010
11 * Logitech QuickCam Pro 3000, 4000, 'Zoom', 'Notebook Pro'
12 and 'Orbit'/'Sphere'
13 * Samsung MPC-C10, MPC-C30
14 * Creative Webcam 5, Pro Ex
15 * SOTEC Afina Eye
16 * Visionite VCS-UC300, VCS-UM100
17
18 The PCA635, PCVC665 and PCVC720/20 are not supported by this driver
19 and never will be, but the 665 and 720/20 are supported by other
20 drivers.
21
22 See <file:Documentation/usb/philips.txt> for more information and
23 installation instructions.
24
25 The built-in microphone is enabled by selecting USB Audio support.
26
27 To compile this driver as a module, choose M here: the
28 module will be called pwc.
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index b05015282601..dceebc0b1250 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -40,6 +40,7 @@
40#include <linux/i2c.h> 40#include <linux/i2c.h>
41#include <linux/videodev2.h> 41#include <linux/videodev2.h>
42#include <media/v4l2-common.h> 42#include <media/v4l2-common.h>
43#include <media/saa7115.h>
43#include <asm/div64.h> 44#include <asm/div64.h>
44 45
45MODULE_DESCRIPTION("Philips SAA7113/SAA7114/SAA7115 video decoder driver"); 46MODULE_DESCRIPTION("Philips SAA7113/SAA7114/SAA7115 video decoder driver");
@@ -53,7 +54,7 @@ module_param(debug, bool, 0644);
53MODULE_PARM_DESC(debug, "Debug level (0-1)"); 54MODULE_PARM_DESC(debug, "Debug level (0-1)");
54 55
55static unsigned short normal_i2c[] = { 56static unsigned short normal_i2c[] = {
56 0x4a >>1, 0x48 >>1, /* SAA7113 */ 57 0x4a >> 1, 0x48 >> 1, /* SAA7113 */
57 0x42 >> 1, 0x40 >> 1, /* SAA7114 and SAA7115 */ 58 0x42 >> 1, 0x40 >> 1, /* SAA7114 and SAA7115 */
58 I2C_CLIENT_END }; 59 I2C_CLIENT_END };
59 60
@@ -722,16 +723,16 @@ static void saa7115_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
722 100 reserved NTSC-Japan (3.58MHz) 723 100 reserved NTSC-Japan (3.58MHz)
723 */ 724 */
724 if (state->ident == V4L2_IDENT_SAA7113) { 725 if (state->ident == V4L2_IDENT_SAA7113) {
725 u8 reg = saa7115_read(client, 0x0e) & 0x8f; 726 u8 reg = saa7115_read(client, 0x0e) & 0x8f;
726 727
727 if (std == V4L2_STD_PAL_M) { 728 if (std == V4L2_STD_PAL_M) {
728 reg|=0x30; 729 reg |= 0x30;
729 } else if (std == V4L2_STD_PAL_N) { 730 } else if (std == V4L2_STD_PAL_N) {
730 reg|=0x20; 731 reg |= 0x20;
731 } else if (std == V4L2_STD_PAL_60) { 732 } else if (std == V4L2_STD_PAL_60) {
732 reg|=0x10; 733 reg |= 0x10;
733 } else if (std == V4L2_STD_NTSC_M_JP) { 734 } else if (std == V4L2_STD_NTSC_M_JP) {
734 reg|=0x40; 735 reg |= 0x40;
735 } 736 }
736 saa7115_write(client, 0x0e, reg); 737 saa7115_write(client, 0x0e, reg);
737 } 738 }
@@ -811,7 +812,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
811 u8 lcr[24]; 812 u8 lcr[24];
812 int i, x; 813 int i, x;
813 814
814 /* saa7113/71144 doesn't yet support VBI */ 815 /* saa7113/7114 doesn't yet support VBI */
815 if (state->ident != V4L2_IDENT_SAA7115) 816 if (state->ident != V4L2_IDENT_SAA7115)
816 return; 817 return;
817 818
@@ -851,7 +852,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
851 case 0: 852 case 0:
852 lcr[i] |= 0xf << (4 * x); 853 lcr[i] |= 0xf << (4 * x);
853 break; 854 break;
854 case V4L2_SLICED_TELETEXT_PAL_B: 855 case V4L2_SLICED_TELETEXT_B:
855 lcr[i] |= 1 << (4 * x); 856 lcr[i] |= 1 << (4 * x);
856 break; 857 break;
857 case V4L2_SLICED_CAPTION_525: 858 case V4L2_SLICED_CAPTION_525:
@@ -880,7 +881,7 @@ static void saa7115_set_lcr(struct i2c_client *client, struct v4l2_sliced_vbi_fo
880static int saa7115_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt) 881static int saa7115_get_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt)
881{ 882{
882 static u16 lcr2vbi[] = { 883 static u16 lcr2vbi[] = {
883 0, V4L2_SLICED_TELETEXT_PAL_B, 0, /* 1 */ 884 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
884 0, V4L2_SLICED_CAPTION_525, /* 4 */ 885 0, V4L2_SLICED_CAPTION_525, /* 4 */
885 V4L2_SLICED_WSS_625, 0, /* 5 */ 886 V4L2_SLICED_WSS_625, 0, /* 5 */
886 V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */ 887 V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */
@@ -1045,7 +1046,7 @@ static void saa7115_decode_vbi_line(struct i2c_client *client,
1045 /* decode payloads */ 1046 /* decode payloads */
1046 switch (id2) { 1047 switch (id2) {
1047 case 1: 1048 case 1:
1048 vbi->type = V4L2_SLICED_TELETEXT_PAL_B; 1049 vbi->type = V4L2_SLICED_TELETEXT_B;
1049 break; 1050 break;
1050 case 4: 1051 case 4:
1051 if (!saa7115_odd_parity(p[0]) || !saa7115_odd_parity(p[1])) 1052 if (!saa7115_odd_parity(p[0]) || !saa7115_odd_parity(p[1]))
@@ -1180,6 +1181,46 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
1180 state->radio = 1; 1181 state->radio = 1;
1181 break; 1182 break;
1182 1183
1184 case VIDIOC_INT_G_VIDEO_ROUTING:
1185 {
1186 struct v4l2_routing *route = arg;
1187
1188 route->input = state->input;
1189 route->output = 0;
1190 break;
1191 }
1192
1193 case VIDIOC_INT_S_VIDEO_ROUTING:
1194 {
1195 struct v4l2_routing *route = arg;
1196
1197 v4l_dbg(1, debug, client, "decoder set input %d\n", route->input);
1198 /* saa7113 does not have these inputs */
1199 if (state->ident == V4L2_IDENT_SAA7113 &&
1200 (route->input == SAA7115_COMPOSITE4 ||
1201 route->input == SAA7115_COMPOSITE5)) {
1202 return -EINVAL;
1203 }
1204 if (route->input > SAA7115_SVIDEO3)
1205 return -EINVAL;
1206 if (state->input == route->input)
1207 break;
1208 v4l_dbg(1, debug, client, "now setting %s input\n",
1209 (route->input >= SAA7115_SVIDEO0) ? "S-Video" : "Composite");
1210 state->input = route->input;
1211
1212 /* select mode */
1213 saa7115_write(client, 0x02,
1214 (saa7115_read(client, 0x02) & 0xf0) |
1215 state->input);
1216
1217 /* bypass chrominance trap for S-Video modes */
1218 saa7115_write(client, 0x09,
1219 (saa7115_read(client, 0x09) & 0x7f) |
1220 (state->input >= SAA7115_SVIDEO0 ? 0x80 : 0x0));
1221 break;
1222 }
1223
1183 case VIDIOC_G_INPUT: 1224 case VIDIOC_G_INPUT:
1184 *(int *)arg = state->input; 1225 *(int *)arg = state->input;
1185 break; 1226 break;
@@ -1321,7 +1362,7 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
1321 1362
1322 saa7115_write(client, 0, 5); 1363 saa7115_write(client, 0, 5);
1323 chip_id = saa7115_read(client, 0) & 0x0f; 1364 chip_id = saa7115_read(client, 0) & 0x0f;
1324 if (chip_id <3 && chip_id > 5) { 1365 if (chip_id < 3 && chip_id > 5) {
1325 v4l_dbg(1, debug, client, "saa7115 not found\n"); 1366 v4l_dbg(1, debug, client, "saa7115 not found\n");
1326 kfree(client); 1367 kfree(client);
1327 return 0; 1368 return 0;
@@ -1360,7 +1401,7 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
1360 v4l_dbg(1, debug, client, "writing init values\n"); 1401 v4l_dbg(1, debug, client, "writing init values\n");
1361 1402
1362 /* init to 60hz/48khz */ 1403 /* init to 60hz/48khz */
1363 if (state->ident==V4L2_IDENT_SAA7113) 1404 if (state->ident == V4L2_IDENT_SAA7113)
1364 saa7115_writeregs(client, saa7113_init_auto_input); 1405 saa7115_writeregs(client, saa7113_init_auto_input);
1365 else 1406 else
1366 saa7115_writeregs(client, saa7115_init_auto_input); 1407 saa7115_writeregs(client, saa7115_init_auto_input);
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 992c71774f30..133f9e5252fe 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -54,6 +54,7 @@
54#include <linux/i2c.h> 54#include <linux/i2c.h>
55#include <linux/videodev2.h> 55#include <linux/videodev2.h>
56#include <media/v4l2-common.h> 56#include <media/v4l2-common.h>
57#include <media/saa7127.h>
57 58
58static int debug = 0; 59static int debug = 0;
59static int test_image = 0; 60static int test_image = 0;
@@ -222,22 +223,6 @@ static struct i2c_reg_value saa7127_init_config_50hz[] = {
222 { 0, 0 } 223 { 0, 0 }
223}; 224};
224 225
225/* Enumeration for the Supported input types */
226enum saa7127_input_type {
227 SAA7127_INPUT_TYPE_NORMAL,
228 SAA7127_INPUT_TYPE_TEST_IMAGE
229};
230
231/* Enumeration for the Supported Output signal types */
232enum saa7127_output_type {
233 SAA7127_OUTPUT_TYPE_BOTH,
234 SAA7127_OUTPUT_TYPE_COMPOSITE,
235 SAA7127_OUTPUT_TYPE_SVIDEO,
236 SAA7127_OUTPUT_TYPE_RGB,
237 SAA7127_OUTPUT_TYPE_YUV_C,
238 SAA7127_OUTPUT_TYPE_YUV_V
239};
240
241/* 226/*
242 ********************************************************************** 227 **********************************************************************
243 * 228 *
@@ -561,7 +546,7 @@ static int saa7127_command(struct i2c_client *client,
561{ 546{
562 struct saa7127_state *state = i2c_get_clientdata(client); 547 struct saa7127_state *state = i2c_get_clientdata(client);
563 struct v4l2_format *fmt = arg; 548 struct v4l2_format *fmt = arg;
564 int *iarg = arg; 549 struct v4l2_routing *route = arg;
565 550
566 switch (cmd) { 551 switch (cmd) {
567 case VIDIOC_S_STD: 552 case VIDIOC_S_STD:
@@ -573,15 +558,23 @@ static int saa7127_command(struct i2c_client *client,
573 *(v4l2_std_id *)arg = state->std; 558 *(v4l2_std_id *)arg = state->std;
574 break; 559 break;
575 560
576 case VIDIOC_S_INPUT: 561 case VIDIOC_INT_G_VIDEO_ROUTING:
577 if (state->input_type == *iarg) 562 route->input = state->input_type;
578 break; 563 route->output = state->output_type;
579 return saa7127_set_input_type(client, *iarg); 564 break;
580 565
581 case VIDIOC_S_OUTPUT: 566 case VIDIOC_INT_S_VIDEO_ROUTING:
582 if (state->output_type == *iarg) 567 {
583 break; 568 int rc = 0;
584 return saa7127_set_output_type(client, *iarg); 569
570 if (state->input_type != route->input) {
571 rc = saa7127_set_input_type(client, route->input);
572 }
573 if (rc == 0 && state->output_type != route->output) {
574 rc = saa7127_set_output_type(client, route->output);
575 }
576 return rc;
577 }
585 578
586 case VIDIOC_STREAMON: 579 case VIDIOC_STREAMON:
587 case VIDIOC_STREAMOFF: 580 case VIDIOC_STREAMOFF:
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 86671a43e769..e1c1805df1fb 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -39,6 +39,7 @@ config VIDEO_SAA7134_DVB
39 tristate "DVB/ATSC Support for saa7134 based TV cards" 39 tristate "DVB/ATSC Support for saa7134 based TV cards"
40 depends on VIDEO_SAA7134 && DVB_CORE 40 depends on VIDEO_SAA7134 && DVB_CORE
41 select VIDEO_BUF_DVB 41 select VIDEO_BUF_DVB
42 select FW_LOADER
42 ---help--- 43 ---help---
43 This adds support for DVB cards based on the 44 This adds support for DVB cards based on the
44 Philips saa7134 chip. 45 Philips saa7134 chip.
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index fdd7f48f3b76..e666a4465ca4 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -208,7 +208,7 @@ struct saa7134_board saa7134_boards[] = {
208 [SAA7134_BOARD_FLYTVPLATINUM_FM] = { 208 [SAA7134_BOARD_FLYTVPLATINUM_FM] = {
209 /* LifeView FlyTV Platinum FM (LR214WF) */ 209 /* LifeView FlyTV Platinum FM (LR214WF) */
210 /* "Peter Missel <peter.missel@onlinehome.de> */ 210 /* "Peter Missel <peter.missel@onlinehome.de> */
211 .name = "LifeView FlyTV Platinum FM", 211 .name = "LifeView FlyTV Platinum FM / Gold",
212 .audio_clock = 0x00200000, 212 .audio_clock = 0x00200000,
213 .tuner_type = TUNER_PHILIPS_TDA8290, 213 .tuner_type = TUNER_PHILIPS_TDA8290,
214 .radio_type = UNSET, 214 .radio_type = UNSET,
@@ -2660,7 +2660,7 @@ struct saa7134_board saa7134_boards[] = {
2660 .mpeg = SAA7134_MPEG_DVB, 2660 .mpeg = SAA7134_MPEG_DVB,
2661 .inputs = {{ 2661 .inputs = {{
2662 .name = name_comp1, 2662 .name = name_comp1,
2663 .vmux = 0, 2663 .vmux = 1,
2664 .amux = LINE1, 2664 .amux = LINE1,
2665 },{ 2665 },{
2666 .name = name_svideo, 2666 .name = name_svideo,
@@ -2671,7 +2671,7 @@ struct saa7134_board saa7134_boards[] = {
2671 [SAA7134_BOARD_FLYDVBT_LR301] = { 2671 [SAA7134_BOARD_FLYDVBT_LR301] = {
2672 /* LifeView FlyDVB-T */ 2672 /* LifeView FlyDVB-T */
2673 /* Giampiero Giancipoli <gianci@libero.it> */ 2673 /* Giampiero Giancipoli <gianci@libero.it> */
2674 .name = "LifeView FlyDVB-T", 2674 .name = "LifeView FlyDVB-T / Genius VideoWonder DVB-T",
2675 .audio_clock = 0x00200000, 2675 .audio_clock = 0x00200000,
2676 .tuner_type = TUNER_ABSENT, 2676 .tuner_type = TUNER_ABSENT,
2677 .radio_type = UNSET, 2677 .radio_type = UNSET,
@@ -2808,6 +2808,40 @@ struct saa7134_board saa7134_boards[] = {
2808 .tuner_addr = ADDR_UNSET, 2808 .tuner_addr = ADDR_UNSET,
2809 .radio_addr = ADDR_UNSET, 2809 .radio_addr = ADDR_UNSET,
2810 }, 2810 },
2811 [SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS] = {
2812 .name = "LifeView FlyDVB-T Hybrid Cardbus",
2813 .audio_clock = 0x00200000,
2814 .tuner_type = TUNER_PHILIPS_TDA8290,
2815 .radio_type = UNSET,
2816 .tuner_addr = ADDR_UNSET,
2817 .radio_addr = ADDR_UNSET,
2818 .mpeg = SAA7134_MPEG_DVB,
2819 .gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */
2820 .inputs = {{
2821 .name = name_tv,
2822 .vmux = 1,
2823 .amux = TV,
2824 .gpio = 0x200000, /* GPIO21=High for TV input */
2825 .tv = 1,
2826 },{
2827 .name = name_svideo, /* S-Video signal on S-Video input */
2828 .vmux = 8,
2829 .amux = LINE2,
2830 },{
2831 .name = name_comp1, /* Composite signal on S-Video input */
2832 .vmux = 0,
2833 .amux = LINE2,
2834 },{
2835 .name = name_comp2, /* Composite input */
2836 .vmux = 3,
2837 .amux = LINE2,
2838 }},
2839 .radio = {
2840 .name = name_radio,
2841 .amux = TV,
2842 .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
2843 },
2844 },
2811}; 2845};
2812 2846
2813const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 2847const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -3333,6 +3367,30 @@ struct pci_device_id saa7134_pci_tbl[] = {
3333 .subdevice = 0x0005, 3367 .subdevice = 0x0005,
3334 .driver_data = SAA7134_BOARD_MD7134_BRIDGE_2, 3368 .driver_data = SAA7134_BOARD_MD7134_BRIDGE_2,
3335 },{ 3369 },{
3370 .vendor = PCI_VENDOR_ID_PHILIPS,
3371 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
3372 .subvendor = 0x1489,
3373 .subdevice = 0x0301,
3374 .driver_data = SAA7134_BOARD_FLYDVBT_LR301,
3375 },{
3376 .vendor = PCI_VENDOR_ID_PHILIPS,
3377 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3378 .subvendor = 0x5168, /* Animation Technologies (LifeView) */
3379 .subdevice = 0x0304,
3380 .driver_data = SAA7134_BOARD_FLYTVPLATINUM_FM,
3381 },{
3382 .vendor = PCI_VENDOR_ID_PHILIPS,
3383 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3384 .subvendor = 0x5168,
3385 .subdevice = 0x3306,
3386 .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS,
3387 },{
3388 .vendor = PCI_VENDOR_ID_PHILIPS,
3389 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
3390 .subvendor = 0x5168,
3391 .subdevice = 0x3502, /* whats the difference to 0x3306 ?*/
3392 .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS,
3393 },{
3336 /* --- boards without eeprom + subsystem ID --- */ 3394 /* --- boards without eeprom + subsystem ID --- */
3337 .vendor = PCI_VENDOR_ID_PHILIPS, 3395 .vendor = PCI_VENDOR_ID_PHILIPS,
3338 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 3396 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -3462,6 +3520,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
3462 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x06); 3520 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x06);
3463 break; 3521 break;
3464 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 3522 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
3523 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
3465 saa_writeb(SAA7134_GPIO_GPMODE3, 0x08); 3524 saa_writeb(SAA7134_GPIO_GPMODE3, 0x08);
3466 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x00); 3525 saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x00);
3467 break; 3526 break;
@@ -3633,6 +3692,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
3633 } 3692 }
3634 break; 3693 break;
3635 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: 3694 case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
3695 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
3636 /* make the tda10046 find its eeprom */ 3696 /* make the tda10046 find its eeprom */
3637 { 3697 {
3638 u8 data[] = { 0x3c, 0x33, 0x62}; 3698 u8 data[] = { 0x3c, 0x33, 0x62};
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 86cfdb8514cb..222a36c38917 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1064,6 +1064,10 @@ static int dvb_init(struct saa7134_dev *dev)
1064 dev->dvb.frontend = tda10046_attach(&tevion_dvbt220rf_config, 1064 dev->dvb.frontend = tda10046_attach(&tevion_dvbt220rf_config,
1065 &dev->i2c_adap); 1065 &dev->i2c_adap);
1066 break; 1066 break;
1067 case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
1068 dev->dvb.frontend = tda10046_attach(&ads_tech_duo_config,
1069 &dev->i2c_adap);
1070 break;
1067#endif 1071#endif
1068#ifdef HAVE_NXT200X 1072#ifdef HAVE_NXT200X
1069 case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180: 1073 case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180:
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 31ba293854c1..353af3a8b766 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -220,6 +220,7 @@ struct saa7134_format {
220#define SAA7134_BOARD_AVERMEDIA_A169_B 91 220#define SAA7134_BOARD_AVERMEDIA_A169_B 91
221#define SAA7134_BOARD_AVERMEDIA_A169_B1 92 221#define SAA7134_BOARD_AVERMEDIA_A169_B1 92
222#define SAA7134_BOARD_MD7134_BRIDGE_2 93 222#define SAA7134_BOARD_MD7134_BRIDGE_2 93
223#define SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS 94
223 224
224#define SAA7134_MAXBOARDS 8 225#define SAA7134_MAXBOARDS 8
225#define SAA7134_INPUT_MAX 8 226#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/video/sn9c102/Kconfig b/drivers/media/video/sn9c102/Kconfig
new file mode 100644
index 000000000000..55f2bc11964b
--- /dev/null
+++ b/drivers/media/video/sn9c102/Kconfig
@@ -0,0 +1,11 @@
1config USB_SN9C102
2 tristate "USB SN9C10x PC Camera Controller support"
3 depends on USB && VIDEO_DEV
4 ---help---
5 Say Y here if you want support for cameras based on SONiX SN9C101,
6 SN9C102 or SN9C103 PC Camera Controllers.
7
8 See <file:Documentation/video4linux/sn9c102.txt> for more info.
9
10 To compile this driver as a module, choose M here: the
11 module will be called sn9c102.
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index df195c905366..1013b4de89a2 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -401,7 +401,7 @@ static void tuner_status(struct i2c_client *client)
401 } 401 }
402 tuner_info("Tuner mode: %s\n", p); 402 tuner_info("Tuner mode: %s\n", p);
403 tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction); 403 tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
404 tuner_info("Standard: 0x%08llx\n", t->std); 404 tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
405 if (t->mode != V4L2_TUNER_RADIO) 405 if (t->mode != V4L2_TUNER_RADIO)
406 return; 406 return;
407 if (t->has_signal) { 407 if (t->has_signal) {
@@ -558,10 +558,10 @@ static inline int set_mode(struct i2c_client *client, struct tuner *t, int mode,
558 558
559static inline int check_v4l2(struct tuner *t) 559static inline int check_v4l2(struct tuner *t)
560{ 560{
561 if (t->using_v4l2) { 561 /* bttv still uses both v4l1 and v4l2 calls to the tuner (v4l2 for
562 tuner_dbg ("ignore v4l1 call\n"); 562 TV, v4l1 for radio), until that is fixed this code is disabled.
563 return EINVAL; 563 Otherwise the radio (v4l1) wouldn't tune after using the TV (v4l2)
564 } 564 first. */
565 return 0; 565 return 0;
566} 566}
567 567
@@ -744,6 +744,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
744 switch_v4l2(); 744 switch_v4l2();
745 745
746 tuner->type = t->mode; 746 tuner->type = t->mode;
747 if (t->mode == V4L2_TUNER_ANALOG_TV)
748 tuner->capability |= V4L2_TUNER_CAP_NORM;
747 if (t->mode != V4L2_TUNER_RADIO) { 749 if (t->mode != V4L2_TUNER_RADIO) {
748 tuner->rangelow = tv_range[0] * 16; 750 tuner->rangelow = tv_range[0] * 16;
749 tuner->rangehigh = tv_range[1] * 16; 751 tuner->rangehigh = tv_range[1] * 16;
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index 356bff455ad1..c2b756107548 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -1706,21 +1706,6 @@ static int chip_command(struct i2c_client *client,
1706 break; 1706 break;
1707 } 1707 }
1708 1708
1709 case VIDIOC_S_AUDIO:
1710 {
1711 struct v4l2_audio *sarg = arg;
1712
1713 if (!(desc->flags & CHIP_HAS_INPUTSEL) || sarg->index >= 4)
1714 return -EINVAL;
1715 /* There are four inputs: tuner, radio, extern and intern. */
1716 chip->input = sarg->index;
1717 if (chip->muted)
1718 break;
1719 chip_write_masked(chip, desc->inputreg,
1720 desc->inputmap[chip->input], desc->inputmask);
1721 break;
1722 }
1723
1724 case VIDIOC_S_TUNER: 1709 case VIDIOC_S_TUNER:
1725 { 1710 {
1726 struct v4l2_tuner *vt = arg; 1711 struct v4l2_tuner *vt = arg;
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index e0d2ff83fc91..431c3e2f6c42 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -757,9 +757,9 @@ tveeprom_detect_client(struct i2c_adapter *adapter,
757static int 757static int
758tveeprom_attach_adapter (struct i2c_adapter *adapter) 758tveeprom_attach_adapter (struct i2c_adapter *adapter)
759{ 759{
760 if (adapter->id != I2C_HW_B_BT848) 760 if (adapter->class & I2C_CLASS_TV_ANALOG)
761 return 0; 761 return i2c_probe(adapter, &addr_data, tveeprom_detect_client);
762 return i2c_probe(adapter, &addr_data, tveeprom_detect_client); 762 return 0;
763} 763}
764 764
765static int 765static int
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 69d0fe159f4d..dab4973bcf82 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -53,7 +53,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
53 .minimum = 0, 53 .minimum = 0,
54 .maximum = 255, 54 .maximum = 255,
55 .step = 1, 55 .step = 1,
56 .default_value = 0, 56 .default_value = 128,
57 .flags = 0, 57 .flags = 0,
58 }, { 58 }, {
59 .id = V4L2_CID_CONTRAST, 59 .id = V4L2_CID_CONTRAST,
@@ -62,7 +62,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
62 .minimum = 0, 62 .minimum = 0,
63 .maximum = 255, 63 .maximum = 255,
64 .step = 0x1, 64 .step = 0x1,
65 .default_value = 0x10, 65 .default_value = 128,
66 .flags = 0, 66 .flags = 0,
67 }, { 67 }, {
68 .id = V4L2_CID_SATURATION, 68 .id = V4L2_CID_SATURATION,
@@ -71,7 +71,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
71 .minimum = 0, 71 .minimum = 0,
72 .maximum = 255, 72 .maximum = 255,
73 .step = 0x1, 73 .step = 0x1,
74 .default_value = 0x10, 74 .default_value = 128,
75 .flags = 0, 75 .flags = 0,
76 }, { 76 }, {
77 .id = V4L2_CID_HUE, 77 .id = V4L2_CID_HUE,
@@ -80,7 +80,7 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
80 .minimum = -128, 80 .minimum = -128,
81 .maximum = 127, 81 .maximum = 127,
82 .step = 0x1, 82 .step = 0x1,
83 .default_value = 0x10, 83 .default_value = 0,
84 .flags = 0, 84 .flags = 0,
85 } 85 }
86}; 86};
@@ -500,16 +500,21 @@ struct i2c_vbi_ram_value {
500 500
501static struct i2c_vbi_ram_value vbi_ram_default[] = 501static struct i2c_vbi_ram_value vbi_ram_default[] =
502{ 502{
503 /* FIXME: Current api doesn't handle all VBI types, those not
504 yet supported are placed under #if 0 */
505#if 0
503 {0x010, /* Teletext, SECAM, WST System A */ 506 {0x010, /* Teletext, SECAM, WST System A */
504 {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, 507 {V4L2_SLICED_TELETEXT_SECAM,6,23,1},
505 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, 508 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26,
506 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } 509 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 }
507 }, 510 },
511#endif
508 {0x030, /* Teletext, PAL, WST System B */ 512 {0x030, /* Teletext, PAL, WST System B */
509 {V4L2_SLICED_TELETEXT_PAL_B,6,22,1}, 513 {V4L2_SLICED_TELETEXT_B,6,22,1},
510 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, 514 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b,
511 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } 515 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 }
512 }, 516 },
517#if 0
513 {0x050, /* Teletext, PAL, WST System C */ 518 {0x050, /* Teletext, PAL, WST System C */
514 {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, 519 {V4L2_SLICED_TELETEXT_PAL_C,6,22,1},
515 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, 520 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
@@ -535,6 +540,7 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
535 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 540 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
536 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } 541 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
537 }, 542 },
543#endif
538 {0x0f0, /* Closed Caption, NTSC */ 544 {0x0f0, /* Closed Caption, NTSC */
539 {V4L2_SLICED_CAPTION_525,21,21,1}, 545 {V4L2_SLICED_CAPTION_525,21,21,1},
540 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 546 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
@@ -545,6 +551,7 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
545 { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, 551 { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42,
546 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } 552 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 }
547 }, 553 },
554#if 0
548 {0x130, /* Wide Screen Signal, NTSC C */ 555 {0x130, /* Wide Screen Signal, NTSC C */
549 {V4L2_SLICED_WSS_525,20,20,1}, 556 {V4L2_SLICED_WSS_525,20,20,1},
550 { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, 557 { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43,
@@ -560,6 +567,7 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
560 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, 567 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
561 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } 568 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
562 }, 569 },
570#endif
563 {0x190, /* Video Program System (VPS), PAL */ 571 {0x190, /* Video Program System (VPS), PAL */
564 {V4L2_SLICED_VPS,16,16,0}, 572 {V4L2_SLICED_VPS,16,16,0},
565 { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, 573 { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d,
@@ -850,7 +858,6 @@ static int tvp5150_command(struct i2c_client *c,
850 858
851 case 0: 859 case 0:
852 case VIDIOC_INT_RESET: 860 case VIDIOC_INT_RESET:
853 case DECODER_INIT:
854 tvp5150_reset(c); 861 tvp5150_reset(c);
855 break; 862 break;
856 case VIDIOC_S_STD: 863 case VIDIOC_S_STD:
@@ -949,99 +956,15 @@ static int tvp5150_command(struct i2c_client *c,
949#endif 956#endif
950 957
951 case VIDIOC_LOG_STATUS: 958 case VIDIOC_LOG_STATUS:
952 case DECODER_DUMP:
953 dump_reg(c); 959 dump_reg(c);
954 break; 960 break;
955 961
956 case DECODER_GET_CAPABILITIES: 962 case VIDIOC_G_TUNER:
957 { 963 {
958 struct video_decoder_capability *cap = arg; 964 struct v4l2_tuner *vt = arg;
959 965 int status = tvp5150_read(c, 0x88);
960 cap->flags = VIDEO_DECODER_PAL |
961 VIDEO_DECODER_NTSC |
962 VIDEO_DECODER_SECAM |
963 VIDEO_DECODER_AUTO | VIDEO_DECODER_CCIR;
964 cap->inputs = 3;
965 cap->outputs = 1;
966 break;
967 }
968 case DECODER_GET_STATUS:
969 {
970 int *iarg = arg;
971 int status;
972 int res=0;
973 status = tvp5150_read(c, 0x88);
974 if(status&0x08){
975 res |= DECODER_STATUS_COLOR;
976 }
977 if(status&0x04 && status&0x02){
978 res |= DECODER_STATUS_GOOD;
979 }
980 *iarg=res;
981 break;
982 }
983
984 case DECODER_SET_GPIO:
985 break;
986
987 case DECODER_SET_VBI_BYPASS:
988 break;
989
990 case DECODER_SET_NORM:
991 {
992 int *iarg = arg;
993
994 switch (*iarg) {
995
996 case VIDEO_MODE_NTSC:
997 break;
998
999 case VIDEO_MODE_PAL:
1000 break;
1001
1002 case VIDEO_MODE_SECAM:
1003 break;
1004
1005 case VIDEO_MODE_AUTO:
1006 break;
1007
1008 default:
1009 return -EINVAL;
1010
1011 }
1012 decoder->norm = *iarg;
1013 break;
1014 }
1015 case DECODER_SET_INPUT:
1016 {
1017 int *iarg = arg;
1018 if (*iarg < 0 || *iarg > 3) {
1019 return -EINVAL;
1020 }
1021
1022 decoder->input = *iarg;
1023 tvp5150_selmux(c, decoder->input);
1024
1025 break;
1026 }
1027 case DECODER_SET_OUTPUT:
1028 {
1029 int *iarg = arg;
1030
1031 /* not much choice of outputs */
1032 if (*iarg != 0) {
1033 return -EINVAL;
1034 }
1035 break;
1036 }
1037 case DECODER_ENABLE_OUTPUT:
1038 {
1039 int *iarg = arg;
1040
1041 decoder->enable = (*iarg != 0);
1042
1043 tvp5150_selmux(c, decoder->input);
1044 966
967 vt->signal = ((status & 0x04) && (status & 0x02)) ? 0xffff : 0x0;
1045 break; 968 break;
1046 } 969 }
1047 case VIDIOC_QUERYCTRL: 970 case VIDIOC_QUERYCTRL:
@@ -1087,35 +1010,6 @@ static int tvp5150_command(struct i2c_client *c,
1087 return -EINVAL; 1010 return -EINVAL;
1088 } 1011 }
1089 1012
1090 case DECODER_SET_PICTURE:
1091 {
1092 struct video_picture *pic = arg;
1093 if (decoder->bright != pic->brightness) {
1094 /* We want 0 to 255 we get 0-65535 */
1095 decoder->bright = pic->brightness;
1096 tvp5150_write(c, TVP5150_BRIGHT_CTL,
1097 decoder->bright >> 8);
1098 }
1099 if (decoder->contrast != pic->contrast) {
1100 /* We want 0 to 255 we get 0-65535 */
1101 decoder->contrast = pic->contrast;
1102 tvp5150_write(c, TVP5150_CONTRAST_CTL,
1103 decoder->contrast >> 8);
1104 }
1105 if (decoder->sat != pic->colour) {
1106 /* We want 0 to 255 we get 0-65535 */
1107 decoder->sat = pic->colour;
1108 tvp5150_write(c, TVP5150_SATURATION_CTL,
1109 decoder->contrast >> 8);
1110 }
1111 if (decoder->hue != pic->hue) {
1112 /* We want -128 to 127 we get 0-65535 */
1113 decoder->hue = pic->hue;
1114 tvp5150_write(c, TVP5150_HUE_CTL,
1115 (decoder->hue - 32768) >> 8);
1116 }
1117 break;
1118 }
1119 default: 1013 default:
1120 return -EINVAL; 1014 return -EINVAL;
1121 } 1015 }
diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c
new file mode 100644
index 000000000000..fc52201d607e
--- /dev/null
+++ b/drivers/media/video/upd64031a.c
@@ -0,0 +1,286 @@
1/*
2 * upd64031A - NEC Electronics Ghost Reduction for NTSC in Japan
3 *
4 * 2003 by T.Adachi <tadachi@tadachi-net.com>
5 * 2003 by Takeru KOMORIYA <komoriya@paken.org>
6 * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23
24#include <linux/version.h>
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/i2c.h>
28#include <linux/videodev2.h>
29#include <media/v4l2-common.h>
30#include <media/upd64031a.h>
31
32// --------------------- read registers functions define -----------------------
33
34/* bit masks */
35#define GR_MODE_MASK 0xc0
36#define DIRECT_3DYCS_CONNECT_MASK 0xc0
37#define SYNC_CIRCUIT_MASK 0xa0
38
39// -----------------------------------------------------------------------------
40
41MODULE_DESCRIPTION("uPD64031A driver");
42MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil");
43MODULE_LICENSE("GPL");
44
45static int debug = 0;
46module_param(debug, int, 0644);
47
48MODULE_PARM_DESC(debug, "Debug level (0-1)");
49
50static unsigned short normal_i2c[] = { 0x24 >> 1, 0x26 >> 1, I2C_CLIENT_END };
51
52
53I2C_CLIENT_INSMOD;
54
55enum {
56 R00 = 0, R01, R02, R03, R04,
57 R05, R06, R07, R08, R09,
58 R0A, R0B, R0C, R0D, R0E, R0F,
59 /* unused registers
60 R10, R11, R12, R13, R14,
61 R15, R16, R17,
62 */
63 TOT_REGS
64};
65
66struct upd64031a_state {
67 u8 regs[TOT_REGS];
68 u8 gr_mode;
69 u8 direct_3dycs_connect;
70 u8 ext_comp_sync;
71 u8 ext_vert_sync;
72};
73
74static u8 upd64031a_init[] = {
75 0x00, 0xb8, 0x48, 0xd2, 0xe6,
76 0x03, 0x10, 0x0b, 0xaf, 0x7f,
77 0x00, 0x00, 0x1d, 0x5e, 0x00,
78 0xd0
79};
80
81/* ------------------------------------------------------------------------ */
82
83static u8 upd64031a_read(struct i2c_client *client, u8 reg)
84{
85 u8 buf[2];
86
87 if (reg >= sizeof(buf))
88 return 0xff;
89 i2c_master_recv(client, buf, 2);
90 return buf[reg];
91}
92
93/* ------------------------------------------------------------------------ */
94
95static void upd64031a_write(struct i2c_client *client, u8 reg, u8 val)
96{
97 u8 buf[2];
98
99 buf[0] = reg;
100 buf[1] = val;
101 v4l_dbg(1, debug, client, "writing reg addr: %02X val: %02X\n", reg, val);
102 if (i2c_master_send(client, buf, 2) != 2)
103 v4l_err(client, "I/O error write 0x%02x/0x%02x\n", reg, val);
104}
105
106/* ------------------------------------------------------------------------ */
107
108/* The input changed due to new input or channel changed */
109static void upd64031a_change(struct i2c_client *client)
110{
111 struct upd64031a_state *state = i2c_get_clientdata(client);
112 u8 reg = state->regs[R00];
113
114 v4l_dbg(1, debug, client, "changed input or channel\n");
115 upd64031a_write(client, R00, reg | 0x10);
116 upd64031a_write(client, R00, reg & ~0x10);
117}
118
119/* ------------------------------------------------------------------------ */
120
121static int upd64031a_command(struct i2c_client *client, unsigned int cmd, void *arg)
122{
123 struct upd64031a_state *state = i2c_get_clientdata(client);
124 struct v4l2_routing *route = arg;
125
126 switch (cmd) {
127 case VIDIOC_S_FREQUENCY:
128 upd64031a_change(client);
129 break;
130
131 case VIDIOC_INT_G_VIDEO_ROUTING:
132 route->input = (state->gr_mode >> 6) |
133 (state->direct_3dycs_connect >> 4) |
134 (state->ext_comp_sync >> 1) |
135 (state->ext_vert_sync >> 2);
136 route->output = 0;
137 break;
138
139 case VIDIOC_INT_S_VIDEO_ROUTING:
140 {
141 u8 r00, r05, r08;
142
143 state->gr_mode = (route->input & 3) << 6;
144 state->direct_3dycs_connect = (route->input & 0xc) << 4;
145 state->ext_comp_sync = (route->input & UPD64031A_COMPOSITE_EXTERNAL) << 1;
146 state->ext_vert_sync = (route->input & UPD64031A_VERTICAL_EXTERNAL) << 2;
147 r00 = (state->regs[R00] & ~GR_MODE_MASK) | state->gr_mode;
148 r05 = (state->regs[R00] & ~SYNC_CIRCUIT_MASK) |
149 state->ext_comp_sync | state->ext_vert_sync;
150 r08 = (state->regs[R08] & ~DIRECT_3DYCS_CONNECT_MASK) |
151 state->direct_3dycs_connect;
152 upd64031a_write(client, R00, r00);
153 upd64031a_write(client, R05, r05);
154 upd64031a_write(client, R08, r08);
155 upd64031a_change(client);
156 break;
157 }
158
159 case VIDIOC_LOG_STATUS:
160 v4l_info(client, "Status: SA00=0x%02x SA01=0x%02x\n",
161 upd64031a_read(client, 0), upd64031a_read(client, 1));
162 break;
163
164#ifdef CONFIG_VIDEO_ADV_DEBUG
165 case VIDIOC_INT_G_REGISTER:
166 {
167 struct v4l2_register *reg = arg;
168
169 if (reg->i2c_id != I2C_DRIVERID_UPD64031A)
170 return -EINVAL;
171 reg->val = upd64031a_read(client, reg->reg & 0xff);
172 break;
173 }
174
175 case VIDIOC_INT_S_REGISTER:
176 {
177 struct v4l2_register *reg = arg;
178 u8 addr = reg->reg & 0xff;
179 u8 val = reg->val & 0xff;
180
181 if (reg->i2c_id != I2C_DRIVERID_UPD64031A)
182 return -EINVAL;
183 if (!capable(CAP_SYS_ADMIN))
184 return -EPERM;
185 upd64031a_write(client, addr, val);
186 break;
187 }
188#endif
189
190 default:
191 break;
192 }
193 return 0;
194}
195
196/* ------------------------------------------------------------------------ */
197
198/* i2c implementation */
199
200static struct i2c_driver i2c_driver;
201
202static int upd64031a_attach(struct i2c_adapter *adapter, int address, int kind)
203{
204 struct i2c_client *client;
205 struct upd64031a_state *state;
206 int i;
207
208 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
209 return 0;
210
211 client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
212 if (client == NULL) {
213 return -ENOMEM;
214 }
215
216 client->addr = address;
217 client->adapter = adapter;
218 client->driver = &i2c_driver;
219 snprintf(client->name, sizeof(client->name) - 1, "uPD64031A");
220
221 v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
222
223 state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
224 if (state == NULL) {
225 kfree(client);
226 return -ENOMEM;
227 }
228 i2c_set_clientdata(client, state);
229 memcpy(state->regs, upd64031a_init, sizeof(state->regs));
230 state->gr_mode = UPD64031A_GR_ON << 6;
231 state->direct_3dycs_connect = UPD64031A_3DYCS_COMPOSITE << 4;
232 state->ext_comp_sync = state->ext_vert_sync = 0;
233 for (i = 0; i < TOT_REGS; i++) {
234 upd64031a_write(client, i, state->regs[i]);
235 }
236
237 i2c_attach_client(client);
238
239 return 0;
240}
241
242static int upd64031a_probe(struct i2c_adapter *adapter)
243{
244 if (adapter->class & I2C_CLASS_TV_ANALOG)
245 return i2c_probe(adapter, &addr_data, upd64031a_attach);
246 return 0;
247}
248
249static int upd64031a_detach(struct i2c_client *client)
250{
251 int err;
252
253 err = i2c_detach_client(client);
254 if (err)
255 return err;
256
257 kfree(client);
258 return 0;
259}
260
261/* ----------------------------------------------------------------------- */
262
263/* i2c implementation */
264static struct i2c_driver i2c_driver = {
265 .driver = {
266 .name = "upd64031a",
267 },
268 .id = I2C_DRIVERID_UPD64031A,
269 .attach_adapter = upd64031a_probe,
270 .detach_client = upd64031a_detach,
271 .command = upd64031a_command,
272};
273
274
275static int __init upd64031a_init_module(void)
276{
277 return i2c_add_driver(&i2c_driver);
278}
279
280static void __exit upd64031a_exit_module(void)
281{
282 i2c_del_driver(&i2c_driver);
283}
284
285module_init(upd64031a_init_module);
286module_exit(upd64031a_exit_module);
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
new file mode 100644
index 000000000000..c3a7ffe5c267
--- /dev/null
+++ b/drivers/media/video/upd64083.c
@@ -0,0 +1,262 @@
1/*
2 * upd6408x - NEC Electronics 3-Dimensional Y/C separation driver
3 *
4 * 2003 by T.Adachi (tadachi@tadachi-net.com)
5 * 2003 by Takeru KOMORIYA <komoriya@paken.org>
6 * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/version.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/i2c.h>
27#include <linux/videodev2.h>
28#include <media/v4l2-common.h>
29#include <media/upd64083.h>
30
31MODULE_DESCRIPTION("uPD64083 driver");
32MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil");
33MODULE_LICENSE("GPL");
34
35static int debug = 0;
36module_param(debug, bool, 0644);
37
38MODULE_PARM_DESC(debug, "Debug level (0-1)");
39
40static unsigned short normal_i2c[] = { 0xb8 >> 1, 0xba >> 1, I2C_CLIENT_END };
41
42
43I2C_CLIENT_INSMOD;
44
45enum {
46 R00 = 0, R01, R02, R03, R04,
47 R05, R06, R07, R08, R09,
48 R0A, R0B, R0C, R0D, R0E, R0F,
49 R10, R11, R12, R13, R14,
50 R15, R16,
51 TOT_REGS
52};
53
54struct upd64083_state {
55 u8 mode;
56 u8 ext_y_adc;
57 u8 regs[TOT_REGS];
58};
59
60/* Initial values when used in combination with the
61 NEC upd64031a ghost reduction chip. */
62static u8 upd64083_init[] = {
63 0x1f, 0x01, 0xa0, 0x2d, 0x29, /* we use EXCSS=0 */
64 0x36, 0xdd, 0x05, 0x56, 0x48,
65 0x00, 0x3a, 0xa0, 0x05, 0x08,
66 0x44, 0x60, 0x08, 0x52, 0xf8,
67 0x53, 0x60, 0x10
68};
69
70/* ------------------------------------------------------------------------ */
71
72static void upd64083_log_status(struct i2c_client *client)
73{
74 u8 buf[7];
75
76 i2c_master_recv(client, buf, 7);
77 v4l_info(client, "Status: SA00=%02x SA01=%02x SA02=%02x SA03=%02x "
78 "SA04=%02x SA05=%02x SA06=%02x\n",
79 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
80}
81
82/* ------------------------------------------------------------------------ */
83
84static void upd64083_write(struct i2c_client *client, u8 reg, u8 val)
85{
86 u8 buf[2];
87
88 buf[0] = reg;
89 buf[1] = val;
90 v4l_dbg(1, debug, client, "writing reg addr: %02x val: %02x\n", reg, val);
91 if (i2c_master_send(client, buf, 2) != 2)
92 v4l_err(client, "I/O error write 0x%02x/0x%02x\n", reg, val);
93}
94
95/* ------------------------------------------------------------------------ */
96
97#ifdef CONFIG_VIDEO_ADV_DEBUG
98static u8 upd64083_read(struct i2c_client *client, u8 reg)
99{
100 u8 buf[7];
101
102 if (reg >= sizeof(buf))
103 return 0xff;
104 i2c_master_recv(client, buf, sizeof(buf));
105 return buf[reg];
106}
107#endif
108
109/* ------------------------------------------------------------------------ */
110
111static int upd64083_command(struct i2c_client *client, unsigned int cmd, void *arg)
112{
113 struct upd64083_state *state = i2c_get_clientdata(client);
114 struct v4l2_routing *route = arg;
115
116 switch (cmd) {
117 case VIDIOC_INT_G_VIDEO_ROUTING:
118 route->input = (state->mode >> 6) | (state->ext_y_adc >> 3);
119 route->output = 0;
120 break;
121
122 case VIDIOC_INT_S_VIDEO_ROUTING:
123 {
124 u8 r00, r02;
125
126 if (route->input > 7 || (route->input & 6) == 6)
127 return -EINVAL;
128 state->mode = (route->input & 3) << 6;
129 state->ext_y_adc = (route->input & UPD64083_EXT_Y_ADC) << 3;
130 r00 = (state->regs[R00] & ~(3 << 6)) | state->mode;
131 r02 = (state->regs[R02] & ~(1 << 5)) | state->ext_y_adc;
132 upd64083_write(client, R00, r00);
133 upd64083_write(client, R02, r02);
134 break;
135 }
136
137 case VIDIOC_LOG_STATUS:
138 upd64083_log_status(client);
139 break;
140
141#ifdef CONFIG_VIDEO_ADV_DEBUG
142 case VIDIOC_INT_G_REGISTER:
143 {
144 struct v4l2_register *reg = arg;
145
146 if (reg->i2c_id != I2C_DRIVERID_UPD64083)
147 return -EINVAL;
148 reg->val = upd64083_read(client, reg->reg & 0xff);
149 break;
150 }
151
152 case VIDIOC_INT_S_REGISTER:
153 {
154 struct v4l2_register *reg = arg;
155 u8 addr = reg->reg & 0xff;
156 u8 val = reg->val & 0xff;
157
158 if (reg->i2c_id != I2C_DRIVERID_UPD64083)
159 return -EINVAL;
160 if (!capable(CAP_SYS_ADMIN))
161 return -EPERM;
162 upd64083_write(client, addr, val);
163 break;
164 }
165#endif
166 default:
167 break;
168 }
169
170 return 0;
171}
172
173/* ------------------------------------------------------------------------ */
174
175/* i2c implementation */
176
177static struct i2c_driver i2c_driver;
178
179static int upd64083_attach(struct i2c_adapter *adapter, int address, int kind)
180{
181 struct i2c_client *client;
182 struct upd64083_state *state;
183 int i;
184
185 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
186 return 0;
187
188 client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
189 if (client == NULL) {
190 return -ENOMEM;
191 }
192
193 client->addr = address;
194 client->adapter = adapter;
195 client->driver = &i2c_driver;
196 snprintf(client->name, sizeof(client->name) - 1, "uPD64083");
197
198 v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
199
200 state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL);
201 if (state == NULL) {
202 kfree(client);
203 return -ENOMEM;
204 }
205 i2c_set_clientdata(client, state);
206 /* Initially assume that a ghost reduction chip is present */
207 state->mode = 0; /* YCS mode */
208 state->ext_y_adc = (1 << 5);
209 memcpy(state->regs, upd64083_init, TOT_REGS);
210 for (i = 0; i < TOT_REGS; i++) {
211 upd64083_write(client, i, state->regs[i]);
212 }
213 i2c_attach_client(client);
214
215 return 0;
216}
217
218static int upd64083_probe(struct i2c_adapter *adapter)
219{
220 if (adapter->class & I2C_CLASS_TV_ANALOG)
221 return i2c_probe(adapter, &addr_data, upd64083_attach);
222 return 0;
223}
224
225static int upd64083_detach(struct i2c_client *client)
226{
227 int err;
228
229 err = i2c_detach_client(client);
230 if (err)
231 return err;
232
233 kfree(client);
234 return 0;
235}
236
237/* ----------------------------------------------------------------------- */
238
239/* i2c implementation */
240static struct i2c_driver i2c_driver = {
241 .driver = {
242 .name = "upd64083",
243 },
244 .id = I2C_DRIVERID_UPD64083,
245 .attach_adapter = upd64083_probe,
246 .detach_client = upd64083_detach,
247 .command = upd64083_command,
248};
249
250
251static int __init upd64083_init_module(void)
252{
253 return i2c_add_driver(&i2c_driver);
254}
255
256static void __exit upd64083_exit_module(void)
257{
258 i2c_del_driver(&i2c_driver);
259}
260
261module_init(upd64083_init_module);
262module_exit(upd64083_exit_module);
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
new file mode 100644
index 000000000000..08a5d20bb2c0
--- /dev/null
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -0,0 +1,38 @@
1config VIDEO_USBVIDEO
2 tristate
3
4config USB_VICAM
5 tristate "USB 3com HomeConnect (aka vicam) support (EXPERIMENTAL)"
6 depends on USB && VIDEO_DEV && EXPERIMENTAL
7 select VIDEO_USBVIDEO
8 ---help---
9 Say Y here if you have 3com homeconnect camera (vicam).
10
11 To compile this driver as a module, choose M here: the
12 module will be called vicam.
13
14config USB_IBMCAM
15 tristate "USB IBM (Xirlink) C-it Camera support"
16 depends on USB && VIDEO_DEV
17 select VIDEO_USBVIDEO
18 ---help---
19 Say Y here if you want to connect a IBM "C-It" camera, also known as
20 "Xirlink PC Camera" to your computer's USB port.
21
22 To compile this driver as a module, choose M here: the
23 module will be called ibmcam.
24
25 This camera has several configuration options which
26 can be specified when you load the module. Read
27 <file:Documentation/video4linux/ibmcam.txt> to learn more.
28
29config USB_KONICAWC
30 tristate "USB Konica Webcam support"
31 depends on USB && VIDEO_DEV
32 select VIDEO_USBVIDEO
33 ---help---
34 Say Y here if you want support for webcams based on a Konica
35 chipset. This is known to work with the Intel YC76 webcam.
36
37 To compile this driver as a module, choose M here: the
38 module will be called konicawc.
diff --git a/drivers/media/video/usbvideo/Makefile b/drivers/media/video/usbvideo/Makefile
index ed410a5ee8c9..bb52eb8dc2f9 100644
--- a/drivers/media/video/usbvideo/Makefile
+++ b/drivers/media/video/usbvideo/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_USB_IBMCAM) += ibmcam.o usbvideo.o ultracam.o 1obj-$(CONFIG_VIDEO_USBVIDEO) += usbvideo.o
2obj-$(CONFIG_USB_KONICAWC) += konicawc.o usbvideo.o 2obj-$(CONFIG_USB_IBMCAM) += ibmcam.o ultracam.o
3obj-$(CONFIG_USB_VICAM) += vicam.o usbvideo.o 3obj-$(CONFIG_USB_KONICAWC) += konicawc.o
4 4obj-$(CONFIG_USB_VICAM) += vicam.o
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 11a97f30b876..d330fa985bcc 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -317,6 +317,7 @@ static const char *v4l2_int_ioctls[] = {
317 [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY", 317 [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
318 [_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG", 318 [_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG",
319 319
320 [_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE",
320 [_IOC_NR(VIDIOC_INT_S_REGISTER)] = "VIDIOC_INT_S_REGISTER", 321 [_IOC_NR(VIDIOC_INT_S_REGISTER)] = "VIDIOC_INT_S_REGISTER",
321 [_IOC_NR(VIDIOC_INT_G_REGISTER)] = "VIDIOC_INT_G_REGISTER", 322 [_IOC_NR(VIDIOC_INT_G_REGISTER)] = "VIDIOC_INT_G_REGISTER",
322 [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET", 323 [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
@@ -325,7 +326,12 @@ static const char *v4l2_int_ioctls[] = {
325 [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA", 326 [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA",
326 [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA", 327 [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA",
327 [_IOC_NR(VIDIOC_INT_G_CHIP_IDENT)] = "VIDIOC_INT_G_CHIP_IDENT", 328 [_IOC_NR(VIDIOC_INT_G_CHIP_IDENT)] = "VIDIOC_INT_G_CHIP_IDENT",
328 [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ" 329 [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ",
330 [_IOC_NR(VIDIOC_INT_S_STANDBY)] = "VIDIOC_INT_S_STANDBY",
331 [_IOC_NR(VIDIOC_INT_S_AUDIO_ROUTING)] = "VIDIOC_INT_S_AUDIO_ROUTING",
332 [_IOC_NR(VIDIOC_INT_G_AUDIO_ROUTING)] = "VIDIOC_INT_G_AUDIO_ROUTING",
333 [_IOC_NR(VIDIOC_INT_S_VIDEO_ROUTING)] = "VIDIOC_INT_S_VIDEO_ROUTING",
334 [_IOC_NR(VIDIOC_INT_G_VIDEO_ROUTING)] = "VIDIOC_INT_G_VIDEO_ROUTING"
329}; 335};
330#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls) 336#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls)
331 337
diff --git a/drivers/media/video/video-buf.c b/drivers/media/video/video-buf.c
index d2ca0f08d0df..acc5ea936687 100644
--- a/drivers/media/video/video-buf.c
+++ b/drivers/media/video/video-buf.c
@@ -399,19 +399,25 @@ void videobuf_queue_pci(struct videobuf_queue* q)
399int videobuf_pci_dma_map(struct pci_dev *pci,struct videobuf_dmabuf *dma) 399int videobuf_pci_dma_map(struct pci_dev *pci,struct videobuf_dmabuf *dma)
400{ 400{
401 struct videobuf_queue q; 401 struct videobuf_queue q;
402 struct videobuf_queue_ops qops;
402 403
403 q.dev=pci; 404 q.dev=pci;
404 q.ops->vb_map_sg=(vb_map_sg_t *)pci_unmap_sg; 405 qops.vb_map_sg=(vb_map_sg_t *)pci_map_sg;
406 qops.vb_unmap_sg=(vb_map_sg_t *)pci_unmap_sg;
407 q.ops = &qops;
405 408
406 return (videobuf_dma_unmap(&q,dma)); 409 return (videobuf_dma_map(&q,dma));
407} 410}
408 411
409int videobuf_pci_dma_unmap(struct pci_dev *pci,struct videobuf_dmabuf *dma) 412int videobuf_pci_dma_unmap(struct pci_dev *pci,struct videobuf_dmabuf *dma)
410{ 413{
411 struct videobuf_queue q; 414 struct videobuf_queue q;
415 struct videobuf_queue_ops qops;
412 416
413 q.dev=pci; 417 q.dev=pci;
414 q.ops->vb_map_sg=(vb_map_sg_t *)pci_unmap_sg; 418 qops.vb_map_sg=(vb_map_sg_t *)pci_map_sg;
419 qops.vb_unmap_sg=(vb_map_sg_t *)pci_unmap_sg;
420 q.ops = &qops;
415 421
416 return (videobuf_dma_unmap(&q,dma)); 422 return (videobuf_dma_unmap(&q,dma));
417} 423}
@@ -923,7 +929,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
923 /* need to capture a new frame */ 929 /* need to capture a new frame */
924 retval = -ENOMEM; 930 retval = -ENOMEM;
925 q->read_buf = videobuf_alloc(q->msize); 931 q->read_buf = videobuf_alloc(q->msize);
926 dprintk(1,"video alloc=0x%08x\n",(unsigned int) q->read_buf); 932 dprintk(1,"video alloc=0x%p\n", q->read_buf);
927 if (NULL == q->read_buf) 933 if (NULL == q->read_buf)
928 goto done; 934 goto done;
929 q->read_buf->memory = V4L2_MEMORY_USERPTR; 935 q->read_buf->memory = V4L2_MEMORY_USERPTR;
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
new file mode 100644
index 000000000000..a9b59c35cd67
--- /dev/null
+++ b/drivers/media/video/wm8739.c
@@ -0,0 +1,355 @@
1/*
2 * wm8739
3 *
4 * Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com>
5 *
6 * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
7 * - Cleanup
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/ioctl.h>
27#include <asm/uaccess.h>
28#include <linux/i2c.h>
29#include <linux/i2c-id.h>
30#include <linux/videodev.h>
31#include <media/v4l2-common.h>
32
33MODULE_DESCRIPTION("wm8739 driver");
34MODULE_AUTHOR("T. Adachi, Hans Verkuil");
35MODULE_LICENSE("GPL");
36
37static int debug = 0;
38static unsigned short normal_i2c[] = { 0x34 >> 1, 0x36 >> 1, I2C_CLIENT_END };
39
40module_param(debug, int, 0644);
41
42MODULE_PARM_DESC(debug, "Debug level (0-1)");
43
44
45I2C_CLIENT_INSMOD;
46
47/* ------------------------------------------------------------------------ */
48
49enum {
50 R0 = 0, R1,
51 R5 = 5, R6, R7, R8, R9, R15 = 15,
52 TOT_REGS
53};
54
55struct wm8739_state {
56 u32 clock_freq;
57 u8 muted;
58 u16 volume;
59 u16 balance;
60 u8 vol_l; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
61 u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
62};
63
64/* ------------------------------------------------------------------------ */
65
66static int wm8739_write(struct i2c_client *client, int reg, u16 val)
67{
68 int i;
69
70 if (reg < 0 || reg >= TOT_REGS) {
71 v4l_err(client, "Invalid register R%d\n", reg);
72 return -1;
73 }
74
75 v4l_dbg(1, debug, client, "write: %02x %02x\n", reg, val);
76
77 for (i = 0; i < 3; i++) {
78 if (i2c_smbus_write_byte_data(client, (reg << 1) |
79 (val >> 8), val & 0xff) == 0) {
80 return 0;
81 }
82 }
83 v4l_err(client, "I2C: cannot write %03x to register R%d\n", val, reg);
84 return -1;
85}
86
87/* write regs to set audio volume etc */
88static void wm8739_set_audio(struct i2c_client *client)
89{
90 struct wm8739_state *state = i2c_get_clientdata(client);
91 u16 mute = state->muted ? 0x80 : 0;
92
93 /* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB
94 * Default setting: 0x17 = 0 dB
95 */
96 wm8739_write(client, R0, (state->vol_l & 0x1f) | mute);
97 wm8739_write(client, R1, (state->vol_r & 0x1f) | mute);
98}
99
100static int wm8739_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
101{
102 struct wm8739_state *state = i2c_get_clientdata(client);
103
104 switch (ctrl->id) {
105 case V4L2_CID_AUDIO_MUTE:
106 ctrl->value = state->muted;
107 break;
108
109 case V4L2_CID_AUDIO_VOLUME:
110 ctrl->value = state->volume;
111 break;
112
113 case V4L2_CID_AUDIO_BALANCE:
114 ctrl->value = state->balance;
115 break;
116
117 default:
118 return -EINVAL;
119 }
120 return 0;
121}
122
123static int wm8739_set_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
124{
125 struct wm8739_state *state = i2c_get_clientdata(client);
126 unsigned int work_l, work_r;
127
128 switch (ctrl->id) {
129 case V4L2_CID_AUDIO_MUTE:
130 state->muted = ctrl->value;
131 break;
132
133 case V4L2_CID_AUDIO_VOLUME:
134 state->volume = ctrl->value;
135 break;
136
137 case V4L2_CID_AUDIO_BALANCE:
138 state->balance = ctrl->value;
139 break;
140
141 default:
142 return -EINVAL;
143 }
144
145 /* normalize ( 65535 to 0 -> 31 to 0 (12dB to -34.5dB) ) */
146 work_l = (min(65536 - state->balance, 32768) * state->volume) / 32768;
147 work_r = (min(state->balance, (u16)32768) * state->volume) / 32768;
148
149 state->vol_l = (long)work_l * 31 / 65535;
150 state->vol_r = (long)work_r * 31 / 65535;
151
152 /* set audio volume etc. */
153 wm8739_set_audio(client);
154 return 0;
155}
156
157/* ------------------------------------------------------------------------ */
158
159static struct v4l2_queryctrl wm8739_qctrl[] = {
160 {
161 .id = V4L2_CID_AUDIO_VOLUME,
162 .name = "Volume",
163 .minimum = 0,
164 .maximum = 65535,
165 .step = 65535/100,
166 .default_value = 58880,
167 .flags = 0,
168 .type = V4L2_CTRL_TYPE_INTEGER,
169 },{
170 .id = V4L2_CID_AUDIO_MUTE,
171 .name = "Mute",
172 .minimum = 0,
173 .maximum = 1,
174 .step = 1,
175 .default_value = 1,
176 .flags = 0,
177 .type = V4L2_CTRL_TYPE_BOOLEAN,
178 },{
179 .id = V4L2_CID_AUDIO_BALANCE,
180 .name = "Balance",
181 .minimum = 0,
182 .maximum = 65535,
183 .step = 65535/100,
184 .default_value = 32768,
185 .flags = 0,
186 .type = V4L2_CTRL_TYPE_INTEGER,
187 }
188};
189
190/* ------------------------------------------------------------------------ */
191
192static int wm8739_command(struct i2c_client *client, unsigned int cmd, void *arg)
193{
194 struct wm8739_state *state = i2c_get_clientdata(client);
195
196 switch (cmd) {
197 case VIDIOC_INT_AUDIO_CLOCK_FREQ:
198 {
199 u32 audiofreq = *(u32 *)arg;
200
201 state->clock_freq = audiofreq;
202 wm8739_write(client, R9, 0x000); /* de-activate */
203 switch (audiofreq) {
204 case 44100:
205 wm8739_write(client, R8, 0x020); /* 256fps, fs=44.1k */
206 break;
207 case 48000:
208 wm8739_write(client, R8, 0x000); /* 256fps, fs=48k */
209 break;
210 case 32000:
211 wm8739_write(client, R8, 0x018); /* 256fps, fs=32k */
212 break;
213 default:
214 break;
215 }
216 wm8739_write(client, R9, 0x001); /* activate */
217 break;
218 }
219
220 case VIDIOC_G_CTRL:
221 return wm8739_get_ctrl(client, arg);
222
223 case VIDIOC_S_CTRL:
224 return wm8739_set_ctrl(client, arg);
225
226 case VIDIOC_QUERYCTRL:
227 {
228 struct v4l2_queryctrl *qc = arg;
229 int i;
230
231 for (i = 0; i < ARRAY_SIZE(wm8739_qctrl); i++)
232 if (qc->id && qc->id == wm8739_qctrl[i].id) {
233 memcpy(qc, &wm8739_qctrl[i], sizeof(*qc));
234 return 0;
235 }
236 return -EINVAL;
237 }
238
239 case VIDIOC_LOG_STATUS:
240 v4l_info(client, "Frequency: %u Hz\n", state->clock_freq);
241 v4l_info(client, "Volume L: %02x%s\n", state->vol_l & 0x1f,
242 state->muted ? " (muted)" : "");
243 v4l_info(client, "Volume R: %02x%s\n", state->vol_r & 0x1f,
244 state->muted ? " (muted)" : "");
245 break;
246
247 default:
248 return -EINVAL;
249 }
250
251 return 0;
252}
253
254/* ------------------------------------------------------------------------ */
255
256/* i2c implementation */
257
258static struct i2c_driver i2c_driver;
259
260static int wm8739_attach(struct i2c_adapter *adapter, int address, int kind)
261{
262 struct i2c_client *client;
263 struct wm8739_state *state;
264
265 /* Check if the adapter supports the needed features */
266 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
267 return 0;
268
269 client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
270 if (client == NULL)
271 return -ENOMEM;
272
273 client->addr = address;
274 client->adapter = adapter;
275 client->driver = &i2c_driver;
276 snprintf(client->name, sizeof(client->name) - 1, "wm8739");
277
278 v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
279
280 state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL);
281 if (state == NULL) {
282 kfree(client);
283 return -ENOMEM;
284 }
285 state->vol_l = 0x17; /* 0dB */
286 state->vol_r = 0x17; /* 0dB */
287 state->muted = 0;
288 state->balance = 32768;
289 /* normalize (12dB(31) to -34.5dB(0) [0dB(23)] -> 65535 to 0) */
290 state->volume = ((long)state->vol_l + 1) * 65535 / 31;
291 state->clock_freq = 48000;
292 i2c_set_clientdata(client, state);
293
294 /* initialize wm8739 */
295 wm8739_write(client, R15, 0x00); /* reset */
296 wm8739_write(client, R5, 0x000); /* filter setting, high path, offet clear */
297 wm8739_write(client, R6, 0x000); /* ADC, OSC, Power Off mode Disable */
298 wm8739_write(client, R7, 0x049); /* Digital Audio interface format */
299 /* Enable Master mode */
300 /* 24 bit, MSB first/left justified */
301 wm8739_write(client, R8, 0x000); /* sampling control */
302 /* normal, 256fs, 48KHz sampling rate */
303 wm8739_write(client, R9, 0x001); /* activate */
304 wm8739_set_audio(client); /* set volume/mute */
305
306 i2c_attach_client(client);
307
308 return 0;
309}
310
311static int wm8739_probe(struct i2c_adapter *adapter)
312{
313 if (adapter->class & I2C_CLASS_TV_ANALOG)
314 return i2c_probe(adapter, &addr_data, wm8739_attach);
315 return 0;
316}
317
318static int wm8739_detach(struct i2c_client *client)
319{
320 int err;
321
322 err = i2c_detach_client(client);
323 if (err)
324 return err;
325
326 kfree(client);
327 return 0;
328}
329
330/* ----------------------------------------------------------------------- */
331
332/* i2c implementation */
333static struct i2c_driver i2c_driver = {
334 .driver = {
335 .name = "wm8739",
336 },
337 .id = I2C_DRIVERID_WM8739,
338 .attach_adapter = wm8739_probe,
339 .detach_client = wm8739_detach,
340 .command = wm8739_command,
341};
342
343
344static int __init wm8739_init_module(void)
345{
346 return i2c_add_driver(&i2c_driver);
347}
348
349static void __exit wm8739_cleanup_module(void)
350{
351 i2c_del_driver(&i2c_driver);
352}
353
354module_init(wm8739_init_module);
355module_exit(wm8739_cleanup_module);
diff --git a/drivers/media/video/zc0301/Kconfig b/drivers/media/video/zc0301/Kconfig
new file mode 100644
index 000000000000..c3bf886b80cd
--- /dev/null
+++ b/drivers/media/video/zc0301/Kconfig
@@ -0,0 +1,11 @@
1config USB_ZC0301
2 tristate "USB ZC0301 Image Processor and Control Chip support"
3 depends on USB && VIDEO_DEV
4 ---help---
5 Say Y here if you want support for cameras based on the ZC0301
6 Image Processor and Control Chip.
7
8 See <file:Documentation/video4linux/zc0301.txt> for more info.
9
10 To compile this driver as a module, choose M here: the
11 module will be called zc0301.
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 3f5d77f633fa..003b077c2324 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -60,6 +60,17 @@ config MMC_SDHCI
60 60
61 If unsure, say N. 61 If unsure, say N.
62 62
63config MMC_OMAP
64 tristate "TI OMAP Multimedia Card Interface support"
65 depends on ARCH_OMAP && MMC
66 select TPS65010 if MACH_OMAP_H2
67 help
68 This selects the TI OMAP Multimedia card Interface.
69 If you have an OMAP board with a Multimedia Card slot,
70 say Y or M here.
71
72 If unsure, say N.
73
63config MMC_WBSD 74config MMC_WBSD
64 tristate "Winbond W83L51xD SD/MMC Card Interface support" 75 tristate "Winbond W83L51xD SD/MMC Card Interface support"
65 depends on MMC && ISA_DMA_API 76 depends on MMC && ISA_DMA_API
@@ -80,4 +91,22 @@ config MMC_AU1X
80 91
81 If unsure, say N. 92 If unsure, say N.
82 93
94config MMC_AT91RM9200
95 tristate "AT91RM9200 SD/MMC Card Interface support"
96 depends on ARCH_AT91RM9200 && MMC
97 help
98 This selects the AT91RM9200 MCI controller.
99
100 If unsure, say N.
101
102config MMC_IMX
103 tristate "Motorola i.MX Multimedia Card Interface support"
104 depends on ARCH_IMX && MMC
105 help
106 This selects the Motorola i.MX Multimedia card Interface.
107 If you have a i.MX platform with a Multimedia Card slot,
108 say Y or M here.
109
110 If unsure, say N.
111
83endmenu 112endmenu
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 769d545284a4..d2957e35cc6f 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -17,8 +17,15 @@ obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
17# 17#
18obj-$(CONFIG_MMC_ARMMMCI) += mmci.o 18obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
19obj-$(CONFIG_MMC_PXA) += pxamci.o 19obj-$(CONFIG_MMC_PXA) += pxamci.o
20obj-$(CONFIG_MMC_IMX) += imxmmc.o
20obj-$(CONFIG_MMC_SDHCI) += sdhci.o 21obj-$(CONFIG_MMC_SDHCI) += sdhci.o
21obj-$(CONFIG_MMC_WBSD) += wbsd.o 22obj-$(CONFIG_MMC_WBSD) += wbsd.o
22obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 23obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
24obj-$(CONFIG_MMC_OMAP) += omap.o
25obj-$(CONFIG_MMC_AT91RM9200) += at91_mci.o
23 26
24mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o 27mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o
28
29ifeq ($(CONFIG_MMC_DEBUG),y)
30EXTRA_CFLAGS += -DDEBUG
31endif
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
new file mode 100644
index 000000000000..6061c2d101a0
--- /dev/null
+++ b/drivers/mmc/at91_mci.c
@@ -0,0 +1,988 @@
1/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 This is the AT91RM9200 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
56#include <linux/config.h>
57#include <linux/module.h>
58#include <linux/moduleparam.h>
59#include <linux/init.h>
60#include <linux/ioport.h>
61#include <linux/platform_device.h>
62#include <linux/interrupt.h>
63#include <linux/blkdev.h>
64#include <linux/delay.h>
65#include <linux/err.h>
66#include <linux/dma-mapping.h>
67#include <linux/clk.h>
68
69#include <linux/mmc/host.h>
70#include <linux/mmc/protocol.h>
71
72#include <asm/io.h>
73#include <asm/irq.h>
74#include <asm/mach/mmc.h>
75#include <asm/arch/board.h>
76#include <asm/arch/gpio.h>
77#include <asm/arch/at91rm9200_mci.h>
78#include <asm/arch/at91rm9200_pdc.h>
79
80#define DRIVER_NAME "at91_mci"
81
82#undef SUPPORT_4WIRE
83
84#ifdef CONFIG_MMC_DEBUG
85#define DBG(fmt...) \
86 printk(fmt)
87#else
88#define DBG(fmt...) do { } while (0)
89#endif
90
91static struct clk *mci_clk;
92
93#define FL_SENT_COMMAND (1 << 0)
94#define FL_SENT_STOP (1 << 1)
95
96
97
98/*
99 * Read from a MCI register.
100 */
101static inline unsigned long at91_mci_read(unsigned int reg)
102{
103 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
104
105 return __raw_readl(mci_base + reg);
106}
107
108/*
109 * Write to a MCI register.
110 */
111static inline void at91_mci_write(unsigned int reg, unsigned long value)
112{
113 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
114
115 __raw_writel(value, mci_base + reg);
116}
117
118/*
119 * Low level type for this driver
120 */
121struct at91mci_host
122{
123 struct mmc_host *mmc;
124 struct mmc_command *cmd;
125 struct mmc_request *request;
126
127 struct at91_mmc_data *board;
128 int present;
129
130 /*
131 * Flag indicating when the command has been sent. This is used to
132 * work out whether or not to send the stop
133 */
134 unsigned int flags;
135 /* flag for current bus settings */
136 u32 bus_mode;
137
138 /* DMA buffer used for transmitting */
139 unsigned int* buffer;
140 dma_addr_t physical_address;
141 unsigned int total_length;
142
143 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
144 int in_use_index;
145
146 /* Latest in the scatterlist that has been enabled for transfer */
147 int transfer_index;
148};
149
150/*
151 * Copy from sg to a dma block - used for transfers
152 */
153static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
154{
155 unsigned int len, i, size;
156 unsigned *dmabuf = host->buffer;
157
158 size = host->total_length;
159 len = data->sg_len;
160
161 /*
162 * Just loop through all entries. Size might not
163 * be the entire list though so make sure that
164 * we do not transfer too much.
165 */
166 for (i = 0; i < len; i++) {
167 struct scatterlist *sg;
168 int amount;
169 int index;
170 unsigned int *sgbuffer;
171
172 sg = &data->sg[i];
173
174 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
175 amount = min(size, sg->length);
176 size -= amount;
177 amount /= 4;
178
179 for (index = 0; index < amount; index++)
180 *dmabuf++ = swab32(sgbuffer[index]);
181
182 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
183
184 if (size == 0)
185 break;
186 }
187
188 /*
189 * Check that we didn't get a request to transfer
190 * more data than can fit into the SG list.
191 */
192 BUG_ON(size != 0);
193}
194
195/*
196 * Prepare a dma read
197 */
198static void at91mci_pre_dma_read(struct at91mci_host *host)
199{
200 int i;
201 struct scatterlist *sg;
202 struct mmc_command *cmd;
203 struct mmc_data *data;
204
205 DBG("pre dma read\n");
206
207 cmd = host->cmd;
208 if (!cmd) {
209 DBG("no command\n");
210 return;
211 }
212
213 data = cmd->data;
214 if (!data) {
215 DBG("no data\n");
216 return;
217 }
218
219 for (i = 0; i < 2; i++) {
220 /* nothing left to transfer */
221 if (host->transfer_index >= data->sg_len) {
222 DBG("Nothing left to transfer (index = %d)\n", host->transfer_index);
223 break;
224 }
225
226 /* Check to see if this needs filling */
227 if (i == 0) {
228 if (at91_mci_read(AT91_PDC_RCR) != 0) {
229 DBG("Transfer active in current\n");
230 continue;
231 }
232 }
233 else {
234 if (at91_mci_read(AT91_PDC_RNCR) != 0) {
235 DBG("Transfer active in next\n");
236 continue;
237 }
238 }
239
240 /* Setup the next transfer */
241 DBG("Using transfer index %d\n", host->transfer_index);
242
243 sg = &data->sg[host->transfer_index++];
244 DBG("sg = %p\n", sg);
245
246 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
247
248 DBG("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
249
250 if (i == 0) {
251 at91_mci_write(AT91_PDC_RPR, sg->dma_address);
252 at91_mci_write(AT91_PDC_RCR, sg->length / 4);
253 }
254 else {
255 at91_mci_write(AT91_PDC_RNPR, sg->dma_address);
256 at91_mci_write(AT91_PDC_RNCR, sg->length / 4);
257 }
258 }
259
260 DBG("pre dma read done\n");
261}
262
263/*
264 * Handle after a dma read
265 */
266static void at91mci_post_dma_read(struct at91mci_host *host)
267{
268 struct mmc_command *cmd;
269 struct mmc_data *data;
270
271 DBG("post dma read\n");
272
273 cmd = host->cmd;
274 if (!cmd) {
275 DBG("no command\n");
276 return;
277 }
278
279 data = cmd->data;
280 if (!data) {
281 DBG("no data\n");
282 return;
283 }
284
285 while (host->in_use_index < host->transfer_index) {
286 unsigned int *buffer;
287 int index;
288 int len;
289
290 struct scatterlist *sg;
291
292 DBG("finishing index %d\n", host->in_use_index);
293
294 sg = &data->sg[host->in_use_index++];
295
296 DBG("Unmapping page %08X\n", sg->dma_address);
297
298 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
299
300 /* Swap the contents of the buffer */
301 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
302 DBG("buffer = %p, length = %d\n", buffer, sg->length);
303
304 data->bytes_xfered += sg->length;
305
306 len = sg->length / 4;
307
308 for (index = 0; index < len; index++) {
309 buffer[index] = swab32(buffer[index]);
310 }
311 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
312 flush_dcache_page(sg->page);
313 }
314
315 /* Is there another transfer to trigger? */
316 if (host->transfer_index < data->sg_len)
317 at91mci_pre_dma_read(host);
318 else {
319 at91_mci_write(AT91_MCI_IER, AT91_MCI_RXBUFF);
320 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
321 }
322
323 DBG("post dma read done\n");
324}
325
326/*
327 * Handle transmitted data
328 */
329static void at91_mci_handle_transmitted(struct at91mci_host *host)
330{
331 struct mmc_command *cmd;
332 struct mmc_data *data;
333
334 DBG("Handling the transmit\n");
335
336 /* Disable the transfer */
337 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
338
339 /* Now wait for cmd ready */
340 at91_mci_write(AT91_MCI_IDR, AT91_MCI_TXBUFE);
341 at91_mci_write(AT91_MCI_IER, AT91_MCI_NOTBUSY);
342
343 cmd = host->cmd;
344 if (!cmd) return;
345
346 data = cmd->data;
347 if (!data) return;
348
349 data->bytes_xfered = host->total_length;
350}
351
352/*
353 * Enable the controller
354 */
355static void at91_mci_enable(void)
356{
357 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN);
358 at91_mci_write(AT91_MCI_IDR, 0xFFFFFFFF);
359 at91_mci_write(AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
360 at91_mci_write(AT91_MCI_MR, 0x834A);
361 at91_mci_write(AT91_MCI_SDCR, 0x0);
362}
363
364/*
365 * Disable the controller
366 */
367static void at91_mci_disable(void)
368{
369 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
370}
371
372/*
373 * Send a command
374 * return the interrupts to enable
375 */
376static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
377{
378 unsigned int cmdr, mr;
379 unsigned int block_length;
380 struct mmc_data *data = cmd->data;
381
382 unsigned int blocks;
383 unsigned int ier = 0;
384
385 host->cmd = cmd;
386
387 /* Not sure if this is needed */
388#if 0
389 if ((at91_mci_read(AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
390 DBG("Clearing timeout\n");
391 at91_mci_write(AT91_MCI_ARGR, 0);
392 at91_mci_write(AT91_MCI_CMDR, AT91_MCI_OPDCMD);
393 while (!(at91_mci_read(AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
394 /* spin */
395 DBG("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR));
396 }
397 }
398#endif
399 cmdr = cmd->opcode;
400
401 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
402 cmdr |= AT91_MCI_RSPTYP_NONE;
403 else {
404 /* if a response is expected then allow maximum response latancy */
405 cmdr |= AT91_MCI_MAXLAT;
406 /* set 136 bit response for R2, 48 bit response otherwise */
407 if (mmc_resp_type(cmd) == MMC_RSP_R2)
408 cmdr |= AT91_MCI_RSPTYP_136;
409 else
410 cmdr |= AT91_MCI_RSPTYP_48;
411 }
412
413 if (data) {
414 block_length = 1 << data->blksz_bits;
415 blocks = data->blocks;
416
417 /* always set data start - also set direction flag for read */
418 if (data->flags & MMC_DATA_READ)
419 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
420 else if (data->flags & MMC_DATA_WRITE)
421 cmdr |= AT91_MCI_TRCMD_START;
422
423 if (data->flags & MMC_DATA_STREAM)
424 cmdr |= AT91_MCI_TRTYP_STREAM;
425 if (data->flags & MMC_DATA_MULTI)
426 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
427 }
428 else {
429 block_length = 0;
430 blocks = 0;
431 }
432
433 if (cmd->opcode == MMC_STOP_TRANSMISSION)
434 cmdr |= AT91_MCI_TRCMD_STOP;
435
436 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
437 cmdr |= AT91_MCI_OPDCMD;
438
439 /*
440 * Set the arguments and send the command
441 */
442 DBG("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n",
443 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(AT91_MCI_MR));
444
445 if (!data) {
446 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
447 at91_mci_write(AT91_PDC_RPR, 0);
448 at91_mci_write(AT91_PDC_RCR, 0);
449 at91_mci_write(AT91_PDC_RNPR, 0);
450 at91_mci_write(AT91_PDC_RNCR, 0);
451 at91_mci_write(AT91_PDC_TPR, 0);
452 at91_mci_write(AT91_PDC_TCR, 0);
453 at91_mci_write(AT91_PDC_TNPR, 0);
454 at91_mci_write(AT91_PDC_TNCR, 0);
455
456 at91_mci_write(AT91_MCI_ARGR, cmd->arg);
457 at91_mci_write(AT91_MCI_CMDR, cmdr);
458 return AT91_MCI_CMDRDY;
459 }
460
461 mr = at91_mci_read(AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
462 at91_mci_write(AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
463
464 /*
465 * Disable the PDC controller
466 */
467 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
468
469 if (cmdr & AT91_MCI_TRCMD_START) {
470 data->bytes_xfered = 0;
471 host->transfer_index = 0;
472 host->in_use_index = 0;
473 if (cmdr & AT91_MCI_TRDIR) {
474 /*
475 * Handle a read
476 */
477 host->buffer = NULL;
478 host->total_length = 0;
479
480 at91mci_pre_dma_read(host);
481 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
482 }
483 else {
484 /*
485 * Handle a write
486 */
487 host->total_length = block_length * blocks;
488 host->buffer = dma_alloc_coherent(NULL,
489 host->total_length,
490 &host->physical_address, GFP_KERNEL);
491
492 at91mci_sg_to_dma(host, data);
493
494 DBG("Transmitting %d bytes\n", host->total_length);
495
496 at91_mci_write(AT91_PDC_TPR, host->physical_address);
497 at91_mci_write(AT91_PDC_TCR, host->total_length / 4);
498 ier = AT91_MCI_TXBUFE;
499 }
500 }
501
502 /*
503 * Send the command and then enable the PDC - not the other way round as
504 * the data sheet says
505 */
506
507 at91_mci_write(AT91_MCI_ARGR, cmd->arg);
508 at91_mci_write(AT91_MCI_CMDR, cmdr);
509
510 if (cmdr & AT91_MCI_TRCMD_START) {
511 if (cmdr & AT91_MCI_TRDIR)
512 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTEN);
513 else
514 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTEN);
515 }
516 return ier;
517}
518
519/*
520 * Wait for a command to complete
521 */
522static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
523{
524 unsigned int ier;
525
526 ier = at91_mci_send_command(host, cmd);
527
528 DBG("setting ier to %08X\n", ier);
529
530 /* Stop on errors or the required value */
531 at91_mci_write(AT91_MCI_IER, 0xffff0000 | ier);
532}
533
534/*
535 * Process the next step in the request
536 */
537static void at91mci_process_next(struct at91mci_host *host)
538{
539 if (!(host->flags & FL_SENT_COMMAND)) {
540 host->flags |= FL_SENT_COMMAND;
541 at91mci_process_command(host, host->request->cmd);
542 }
543 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
544 host->flags |= FL_SENT_STOP;
545 at91mci_process_command(host, host->request->stop);
546 }
547 else
548 mmc_request_done(host->mmc, host->request);
549}
550
551/*
552 * Handle a command that has been completed
553 */
554static void at91mci_completed_command(struct at91mci_host *host)
555{
556 struct mmc_command *cmd = host->cmd;
557 unsigned int status;
558
559 at91_mci_write(AT91_MCI_IDR, 0xffffffff);
560
561 cmd->resp[0] = at91_mci_read(AT91_MCI_RSPR(0));
562 cmd->resp[1] = at91_mci_read(AT91_MCI_RSPR(1));
563 cmd->resp[2] = at91_mci_read(AT91_MCI_RSPR(2));
564 cmd->resp[3] = at91_mci_read(AT91_MCI_RSPR(3));
565
566 if (host->buffer) {
567 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
568 host->buffer = NULL;
569 }
570
571 status = at91_mci_read(AT91_MCI_SR);
572
573 DBG("Status = %08X [%08X %08X %08X %08X]\n",
574 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
575
576 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
577 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
578 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
579 if ((status & AT91_MCI_RCRCE) &&
580 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
581 cmd->error = MMC_ERR_NONE;
582 }
583 else {
584 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
585 cmd->error = MMC_ERR_TIMEOUT;
586 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
587 cmd->error = MMC_ERR_BADCRC;
588 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
589 cmd->error = MMC_ERR_FIFO;
590 else
591 cmd->error = MMC_ERR_FAILED;
592
593 DBG("Error detected and set to %d (cmd = %d, retries = %d)\n",
594 cmd->error, cmd->opcode, cmd->retries);
595 }
596 }
597 else
598 cmd->error = MMC_ERR_NONE;
599
600 at91mci_process_next(host);
601}
602
603/*
604 * Handle an MMC request
605 */
606static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
607{
608 struct at91mci_host *host = mmc_priv(mmc);
609 host->request = mrq;
610 host->flags = 0;
611
612 at91mci_process_next(host);
613}
614
615/*
616 * Set the IOS
617 */
618static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
619{
620 int clkdiv;
621 struct at91mci_host *host = mmc_priv(mmc);
622 unsigned long at91_master_clock = clk_get_rate(mci_clk);
623
624 DBG("Clock %uHz, busmode %u, powermode %u, Vdd %u\n",
625 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
626
627 if (host)
628 host->bus_mode = ios->bus_mode;
629 else
630 printk("MMC: No host for bus_mode\n");
631
632 if (ios->clock == 0) {
633 /* Disable the MCI controller */
634 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS);
635 clkdiv = 0;
636 }
637 else {
638 /* Enable the MCI controller */
639 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN);
640
641 if ((at91_master_clock % (ios->clock * 2)) == 0)
642 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
643 else
644 clkdiv = (at91_master_clock / ios->clock) / 2;
645
646 DBG("clkdiv = %d. mcck = %ld\n", clkdiv,
647 at91_master_clock / (2 * (clkdiv + 1)));
648 }
649 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
650 DBG("MMC: Setting controller bus width to 4\n");
651 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
652 }
653 else {
654 DBG("MMC: Setting controller bus width to 1\n");
655 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
656 }
657
658 /* Set the clock divider */
659 at91_mci_write(AT91_MCI_MR, (at91_mci_read(AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
660
661 /* maybe switch power to the card */
662 if (host && host->board->vcc_pin) {
663 switch (ios->power_mode) {
664 case MMC_POWER_OFF:
665 at91_set_gpio_output(host->board->vcc_pin, 0);
666 break;
667 case MMC_POWER_UP:
668 case MMC_POWER_ON:
669 at91_set_gpio_output(host->board->vcc_pin, 1);
670 break;
671 }
672 }
673}
674
675/*
676 * Handle an interrupt
677 */
678static irqreturn_t at91_mci_irq(int irq, void *devid, struct pt_regs *regs)
679{
680 struct at91mci_host *host = devid;
681 int completed = 0;
682
683 unsigned int int_status;
684
685 if (host == NULL)
686 return IRQ_HANDLED;
687
688 int_status = at91_mci_read(AT91_MCI_SR);
689 DBG("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR),
690 int_status & at91_mci_read(AT91_MCI_IMR));
691
692 if ((int_status & at91_mci_read(AT91_MCI_IMR)) & 0xffff0000)
693 completed = 1;
694
695 int_status &= at91_mci_read(AT91_MCI_IMR);
696
697 if (int_status & AT91_MCI_UNRE)
698 DBG("MMC: Underrun error\n");
699 if (int_status & AT91_MCI_OVRE)
700 DBG("MMC: Overrun error\n");
701 if (int_status & AT91_MCI_DTOE)
702 DBG("MMC: Data timeout\n");
703 if (int_status & AT91_MCI_DCRCE)
704 DBG("MMC: CRC error in data\n");
705 if (int_status & AT91_MCI_RTOE)
706 DBG("MMC: Response timeout\n");
707 if (int_status & AT91_MCI_RENDE)
708 DBG("MMC: Response end bit error\n");
709 if (int_status & AT91_MCI_RCRCE)
710 DBG("MMC: Response CRC error\n");
711 if (int_status & AT91_MCI_RDIRE)
712 DBG("MMC: Response direction error\n");
713 if (int_status & AT91_MCI_RINDE)
714 DBG("MMC: Response index error\n");
715
716 /* Only continue processing if no errors */
717 if (!completed) {
718 if (int_status & AT91_MCI_TXBUFE) {
719 DBG("TX buffer empty\n");
720 at91_mci_handle_transmitted(host);
721 }
722
723 if (int_status & AT91_MCI_RXBUFF) {
724 DBG("RX buffer full\n");
725 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
726 }
727
728 if (int_status & AT91_MCI_ENDTX) {
729 DBG("Transmit has ended\n");
730 }
731
732 if (int_status & AT91_MCI_ENDRX) {
733 DBG("Receive has ended\n");
734 at91mci_post_dma_read(host);
735 }
736
737 if (int_status & AT91_MCI_NOTBUSY) {
738 DBG("Card is ready\n");
739 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
740 }
741
742 if (int_status & AT91_MCI_DTIP) {
743 DBG("Data transfer in progress\n");
744 }
745
746 if (int_status & AT91_MCI_BLKE) {
747 DBG("Block transfer has ended\n");
748 }
749
750 if (int_status & AT91_MCI_TXRDY) {
751 DBG("Ready to transmit\n");
752 }
753
754 if (int_status & AT91_MCI_RXRDY) {
755 DBG("Ready to receive\n");
756 }
757
758 if (int_status & AT91_MCI_CMDRDY) {
759 DBG("Command ready\n");
760 completed = 1;
761 }
762 }
763 at91_mci_write(AT91_MCI_IDR, int_status);
764
765 if (completed) {
766 DBG("Completed command\n");
767 at91_mci_write(AT91_MCI_IDR, 0xffffffff);
768 at91mci_completed_command(host);
769 }
770
771 return IRQ_HANDLED;
772}
773
774static irqreturn_t at91_mmc_det_irq(int irq, void *_host, struct pt_regs *regs)
775{
776 struct at91mci_host *host = _host;
777 int present = !at91_get_gpio_value(irq);
778
779 /*
780 * we expect this irq on both insert and remove,
781 * and use a short delay to debounce.
782 */
783 if (present != host->present) {
784 host->present = present;
785 DBG("%s: card %s\n", mmc_hostname(host->mmc),
786 present ? "insert" : "remove");
787 if (!present) {
788 DBG("****** Resetting SD-card bus width ******\n");
789 at91_mci_write(AT91_MCI_SDCR, 0);
790 }
791 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
792 }
793 return IRQ_HANDLED;
794}
795
796int at91_mci_get_ro(struct mmc_host *mmc)
797{
798 int read_only = 0;
799 struct at91mci_host *host = mmc_priv(mmc);
800
801 if (host->board->wp_pin) {
802 read_only = at91_get_gpio_value(host->board->wp_pin);
803 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
804 (read_only ? "read-only" : "read-write") );
805 }
806 else {
807 printk(KERN_WARNING "%s: host does not support reading read-only "
808 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
809 }
810 return read_only;
811}
812
813static struct mmc_host_ops at91_mci_ops = {
814 .request = at91_mci_request,
815 .set_ios = at91_mci_set_ios,
816 .get_ro = at91_mci_get_ro,
817};
818
819/*
820 * Probe for the device
821 */
822static int at91_mci_probe(struct platform_device *pdev)
823{
824 struct mmc_host *mmc;
825 struct at91mci_host *host;
826 int ret;
827
828 DBG("Probe MCI devices\n");
829 at91_mci_disable();
830 at91_mci_enable();
831
832 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
833 if (!mmc) {
834 DBG("Failed to allocate mmc host\n");
835 return -ENOMEM;
836 }
837
838 mmc->ops = &at91_mci_ops;
839 mmc->f_min = 375000;
840 mmc->f_max = 25000000;
841 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
842
843 host = mmc_priv(mmc);
844 host->mmc = mmc;
845 host->buffer = NULL;
846 host->bus_mode = 0;
847 host->board = pdev->dev.platform_data;
848 if (host->board->wire4) {
849#ifdef SUPPORT_4WIRE
850 mmc->caps |= MMC_CAP_4_BIT_DATA;
851#else
852 printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
853#endif
854 }
855
856 /*
857 * Get Clock
858 */
859 mci_clk = clk_get(&pdev->dev, "mci_clk");
860 if (!mci_clk) {
861 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
862 return -ENODEV;
863 }
864 clk_enable(mci_clk); /* Enable the peripheral clock */
865
866 /*
867 * Allocate the MCI interrupt
868 */
869 ret = request_irq(AT91_ID_MCI, at91_mci_irq, SA_SHIRQ, DRIVER_NAME, host);
870 if (ret) {
871 DBG("Failed to request MCI interrupt\n");
872 return ret;
873 }
874
875 platform_set_drvdata(pdev, mmc);
876
877 /*
878 * Add host to MMC layer
879 */
880 if (host->board->det_pin)
881 host->present = !at91_get_gpio_value(host->board->det_pin);
882 else
883 host->present = -1;
884
885 mmc_add_host(mmc);
886
887 /*
888 * monitor card insertion/removal if we can
889 */
890 if (host->board->det_pin) {
891 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
892 SA_SAMPLE_RANDOM, DRIVER_NAME, host);
893 if (ret)
894 DBG("couldn't allocate MMC detect irq\n");
895 }
896
897 DBG(KERN_INFO "Added MCI driver\n");
898
899 return 0;
900}
901
902/*
903 * Remove a device
904 */
905static int at91_mci_remove(struct platform_device *pdev)
906{
907 struct mmc_host *mmc = platform_get_drvdata(pdev);
908 struct at91mci_host *host;
909
910 if (!mmc)
911 return -1;
912
913 host = mmc_priv(mmc);
914
915 if (host->present != -1) {
916 free_irq(host->board->det_pin, host);
917 cancel_delayed_work(&host->mmc->detect);
918 }
919
920 mmc_remove_host(mmc);
921 at91_mci_disable();
922 free_irq(AT91_ID_MCI, host);
923 mmc_free_host(mmc);
924
925 clk_disable(mci_clk); /* Disable the peripheral clock */
926 clk_put(mci_clk);
927
928 platform_set_drvdata(pdev, NULL);
929
930 DBG("Removed\n");
931
932 return 0;
933}
934
935#ifdef CONFIG_PM
936static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
937{
938 struct mmc_host *mmc = platform_get_drvdata(pdev);
939 int ret = 0;
940
941 if (mmc)
942 ret = mmc_suspend_host(mmc, state);
943
944 return ret;
945}
946
947static int at91_mci_resume(struct platform_device *pdev)
948{
949 struct mmc_host *mmc = platform_get_drvdata(pdev);
950 int ret = 0;
951
952 if (mmc)
953 ret = mmc_resume_host(mmc);
954
955 return ret;
956}
957#else
958#define at91_mci_suspend NULL
959#define at91_mci_resume NULL
960#endif
961
962static struct platform_driver at91_mci_driver = {
963 .probe = at91_mci_probe,
964 .remove = at91_mci_remove,
965 .suspend = at91_mci_suspend,
966 .resume = at91_mci_resume,
967 .driver = {
968 .name = DRIVER_NAME,
969 .owner = THIS_MODULE,
970 },
971};
972
973static int __init at91_mci_init(void)
974{
975 return platform_driver_register(&at91_mci_driver);
976}
977
978static void __exit at91_mci_exit(void)
979{
980 platform_driver_unregister(&at91_mci_driver);
981}
982
983module_init(at91_mci_init);
984module_exit(at91_mci_exit);
985
986MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
987MODULE_AUTHOR("Nick Randell");
988MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 85e89c77bdea..c0326bbc5f28 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -56,12 +56,11 @@
56#define DRIVER_NAME "au1xxx-mmc" 56#define DRIVER_NAME "au1xxx-mmc"
57 57
58/* Set this to enable special debugging macros */ 58/* Set this to enable special debugging macros */
59/* #define MMC_DEBUG */
60 59
61#ifdef MMC_DEBUG 60#ifdef DEBUG
62#define DEBUG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args) 61#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
63#else 62#else
64#define DEBUG(fmt, idx, args...) 63#define DBG(fmt, idx, args...)
65#endif 64#endif
66 65
67const struct { 66const struct {
@@ -424,18 +423,18 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
424 break; 423 break;
425 424
426 if (status & SD_STATUS_RC) { 425 if (status & SD_STATUS_RC) {
427 DEBUG("RX CRC Error [%d + %d].\n", host->id, 426 DBG("RX CRC Error [%d + %d].\n", host->id,
428 host->pio.len, count); 427 host->pio.len, count);
429 break; 428 break;
430 } 429 }
431 430
432 if (status & SD_STATUS_RO) { 431 if (status & SD_STATUS_RO) {
433 DEBUG("RX Overrun [%d + %d]\n", host->id, 432 DBG("RX Overrun [%d + %d]\n", host->id,
434 host->pio.len, count); 433 host->pio.len, count);
435 break; 434 break;
436 } 435 }
437 else if (status & SD_STATUS_RU) { 436 else if (status & SD_STATUS_RU) {
438 DEBUG("RX Underrun [%d + %d]\n", host->id, 437 DBG("RX Underrun [%d + %d]\n", host->id,
439 host->pio.len, count); 438 host->pio.len, count);
440 break; 439 break;
441 } 440 }
@@ -721,7 +720,7 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
721{ 720{
722 struct au1xmmc_host *host = mmc_priv(mmc); 721 struct au1xmmc_host *host = mmc_priv(mmc);
723 722
724 DEBUG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n", 723 DBG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
725 host->id, ios->power_mode, ios->clock, ios->vdd, 724 host->id, ios->power_mode, ios->clock, ios->vdd,
726 ios->bus_mode); 725 ios->bus_mode);
727 726
@@ -810,7 +809,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id, struct pt_regs *regs)
810 au1xmmc_receive_pio(host); 809 au1xmmc_receive_pio(host);
811 } 810 }
812 else if (status & 0x203FBC70) { 811 else if (status & 0x203FBC70) {
813 DEBUG("Unhandled status %8.8x\n", host->id, status); 812 DBG("Unhandled status %8.8x\n", host->id, status);
814 handled = 0; 813 handled = 0;
815 } 814 }
816 815
@@ -839,7 +838,7 @@ static void au1xmmc_poll_event(unsigned long arg)
839 838
840 if (host->mrq != NULL) { 839 if (host->mrq != NULL) {
841 u32 status = au_readl(HOST_STATUS(host)); 840 u32 status = au_readl(HOST_STATUS(host));
842 DEBUG("PENDING - %8.8x\n", host->id, status); 841 DBG("PENDING - %8.8x\n", host->id, status);
843 } 842 }
844 843
845 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT); 844 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
new file mode 100644
index 000000000000..ffb7f55d3467
--- /dev/null
+++ b/drivers/mmc/imxmmc.c
@@ -0,0 +1,1096 @@
1/*
2 * linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver
3 *
4 * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
6 *
7 * derived from pxamci.c by Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * 2005-04-17 Pavel Pisa <pisa@cmp.felk.cvut.cz>
14 * Changed to conform redesigned i.MX scatter gather DMA interface
15 *
16 * 2005-11-04 Pavel Pisa <pisa@cmp.felk.cvut.cz>
17 * Updated for 2.6.14 kernel
18 *
19 * 2005-12-13 Jay Monkman <jtm@smoothsmoothie.com>
20 * Found and corrected problems in the write path
21 *
22 * 2005-12-30 Pavel Pisa <pisa@cmp.felk.cvut.cz>
23 * The event handling rewritten right way in softirq.
24 * Added many ugly hacks and delays to overcome SDHC
25 * deficiencies
26 *
27 */
28#include <linux/config.h>
29
30#ifdef CONFIG_MMC_DEBUG
31#define DEBUG
32#else
33#undef DEBUG
34#endif
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/ioport.h>
39#include <linux/platform_device.h>
40#include <linux/interrupt.h>
41#include <linux/blkdev.h>
42#include <linux/dma-mapping.h>
43#include <linux/mmc/host.h>
44#include <linux/mmc/card.h>
45#include <linux/mmc/protocol.h>
46#include <linux/delay.h>
47
48#include <asm/dma.h>
49#include <asm/io.h>
50#include <asm/irq.h>
51#include <asm/sizes.h>
52#include <asm/arch/mmc.h>
53#include <asm/arch/imx-dma.h>
54
55#include "imxmmc.h"
56
57#define DRIVER_NAME "imx-mmc"
58
59#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
60 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
61 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
62
63struct imxmci_host {
64 struct mmc_host *mmc;
65 spinlock_t lock;
66 struct resource *res;
67 int irq;
68 imx_dmach_t dma;
69 unsigned int clkrt;
70 unsigned int cmdat;
71 volatile unsigned int imask;
72 unsigned int power_mode;
73 unsigned int present;
74 struct imxmmc_platform_data *pdata;
75
76 struct mmc_request *req;
77 struct mmc_command *cmd;
78 struct mmc_data *data;
79
80 struct timer_list timer;
81 struct tasklet_struct tasklet;
82 unsigned int status_reg;
83 unsigned long pending_events;
84 /* Next to fields are there for CPU driven transfers to overcome SDHC deficiencies */
85 u16 *data_ptr;
86 unsigned int data_cnt;
87 atomic_t stuck_timeout;
88
89 unsigned int dma_nents;
90 unsigned int dma_size;
91 unsigned int dma_dir;
92 int dma_allocated;
93
94 unsigned char actual_bus_width;
95};
96
97#define IMXMCI_PEND_IRQ_b 0
98#define IMXMCI_PEND_DMA_END_b 1
99#define IMXMCI_PEND_DMA_ERR_b 2
100#define IMXMCI_PEND_WAIT_RESP_b 3
101#define IMXMCI_PEND_DMA_DATA_b 4
102#define IMXMCI_PEND_CPU_DATA_b 5
103#define IMXMCI_PEND_CARD_XCHG_b 6
104#define IMXMCI_PEND_SET_INIT_b 7
105
106#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
107#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
108#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
109#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
110#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
111#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
112#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
113#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
114
115static void imxmci_stop_clock(struct imxmci_host *host)
116{
117 int i = 0;
118 MMC_STR_STP_CLK &= ~STR_STP_CLK_START_CLK;
119 while(i < 0x1000) {
120 if(!(i & 0x7f))
121 MMC_STR_STP_CLK |= STR_STP_CLK_STOP_CLK;
122
123 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)) {
124 /* Check twice before cut */
125 if(!(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN))
126 return;
127 }
128
129 i++;
130 }
131 dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
132}
133
134static void imxmci_start_clock(struct imxmci_host *host)
135{
136 int i = 0;
137 MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
138 while(i < 0x1000) {
139 if(!(i & 0x7f))
140 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
141
142 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) {
143 /* Check twice before cut */
144 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
145 return;
146 }
147
148 i++;
149 }
150 dev_dbg(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
151}
152
153static void imxmci_softreset(void)
154{
155 /* reset sequence */
156 MMC_STR_STP_CLK = 0x8;
157 MMC_STR_STP_CLK = 0xD;
158 MMC_STR_STP_CLK = 0x5;
159 MMC_STR_STP_CLK = 0x5;
160 MMC_STR_STP_CLK = 0x5;
161 MMC_STR_STP_CLK = 0x5;
162 MMC_STR_STP_CLK = 0x5;
163 MMC_STR_STP_CLK = 0x5;
164 MMC_STR_STP_CLK = 0x5;
165 MMC_STR_STP_CLK = 0x5;
166
167 MMC_RES_TO = 0xff;
168 MMC_BLK_LEN = 512;
169 MMC_NOB = 1;
170}
171
172static int imxmci_busy_wait_for_status(struct imxmci_host *host,
173 unsigned int *pstat, unsigned int stat_mask,
174 int timeout, const char *where)
175{
176 int loops=0;
177 while(!(*pstat & stat_mask)) {
178 loops+=2;
179 if(loops >= timeout) {
180 dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
181 where, *pstat, stat_mask);
182 return -1;
183 }
184 udelay(2);
185 *pstat |= MMC_STATUS;
186 }
187 if(!loops)
188 return 0;
189
190 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
191 loops, where, *pstat, stat_mask);
192 return loops;
193}
194
195static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
196{
197 unsigned int nob = data->blocks;
198 unsigned int blksz = 1 << data->blksz_bits;
199 unsigned int datasz = nob * blksz;
200 int i;
201
202 if (data->flags & MMC_DATA_STREAM)
203 nob = 0xffff;
204
205 host->data = data;
206 data->bytes_xfered = 0;
207
208 MMC_NOB = nob;
209 MMC_BLK_LEN = blksz;
210
211 /*
212 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
213 * We are in big troubles for non-512 byte transfers according to note in the paragraph
214 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
215 * The situation is even more complex in reality. The SDHC in not able to handle wll
216 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
217 * This is required for SCR read at least.
218 */
219 if (datasz < 64) {
220 host->dma_size = datasz;
221 if (data->flags & MMC_DATA_READ) {
222 host->dma_dir = DMA_FROM_DEVICE;
223
224 /* Hack to enable read SCR */
225 if(datasz < 16) {
226 MMC_NOB = 1;
227 MMC_BLK_LEN = 16;
228 }
229 } else {
230 host->dma_dir = DMA_TO_DEVICE;
231 }
232
233 /* Convert back to virtual address */
234 host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
235 host->data_cnt = 0;
236
237 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
238 set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
239
240 return;
241 }
242
243 if (data->flags & MMC_DATA_READ) {
244 host->dma_dir = DMA_FROM_DEVICE;
245 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
246 data->sg_len, host->dma_dir);
247
248 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
249 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ);
250
251 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
252 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
253 } else {
254 host->dma_dir = DMA_TO_DEVICE;
255
256 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
257 data->sg_len, host->dma_dir);
258
259 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
260 host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE);
261
262 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
263 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
264 }
265
266#if 1 /* This code is there only for consistency checking and can be disabled in future */
267 host->dma_size = 0;
268 for(i=0; i<host->dma_nents; i++)
269 host->dma_size+=data->sg[i].length;
270
271 if (datasz > host->dma_size) {
272 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
273 datasz, host->dma_size);
274 }
275#endif
276
277 host->dma_size = datasz;
278
279 wmb();
280
281 if(host->actual_bus_width == MMC_BUS_WIDTH_4)
282 BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */
283 else
284 BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */
285
286 RSSR(host->dma) = DMA_REQ_SDHC;
287
288 set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
289 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
290
291 /* start DMA engine for read, write is delayed after initial response */
292 if (host->dma_dir == DMA_FROM_DEVICE) {
293 imx_dma_enable(host->dma);
294 }
295}
296
297static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
298{
299 unsigned long flags;
300 u32 imask;
301
302 WARN_ON(host->cmd != NULL);
303 host->cmd = cmd;
304
305 if (cmd->flags & MMC_RSP_BUSY)
306 cmdat |= CMD_DAT_CONT_BUSY;
307
308 switch (mmc_resp_type(cmd)) {
309 case MMC_RSP_R1: /* short CRC, OPCODE */
310 case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
311 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
312 break;
313 case MMC_RSP_R2: /* long 136 bit + CRC */
314 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
315 break;
316 case MMC_RSP_R3: /* short */
317 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
318 break;
319 case MMC_RSP_R6: /* short CRC */
320 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R6;
321 break;
322 default:
323 break;
324 }
325
326 if ( test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events) )
327 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
328
329 if ( host->actual_bus_width == MMC_BUS_WIDTH_4 )
330 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
331
332 MMC_CMD = cmd->opcode;
333 MMC_ARGH = cmd->arg >> 16;
334 MMC_ARGL = cmd->arg & 0xffff;
335 MMC_CMD_DAT_CONT = cmdat;
336
337 atomic_set(&host->stuck_timeout, 0);
338 set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
339
340
341 imask = IMXMCI_INT_MASK_DEFAULT;
342 imask &= ~INT_MASK_END_CMD_RES;
343 if ( cmdat & CMD_DAT_CONT_DATA_ENABLE ) {
344 /*imask &= ~INT_MASK_BUF_READY;*/
345 imask &= ~INT_MASK_DATA_TRAN;
346 if ( cmdat & CMD_DAT_CONT_WRITE )
347 imask &= ~INT_MASK_WRITE_OP_DONE;
348 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
349 imask &= ~INT_MASK_BUF_READY;
350 }
351
352 spin_lock_irqsave(&host->lock, flags);
353 host->imask = imask;
354 MMC_INT_MASK = host->imask;
355 spin_unlock_irqrestore(&host->lock, flags);
356
357 dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
358 cmd->opcode, cmd->opcode, imask);
359
360 imxmci_start_clock(host);
361}
362
363static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
364{
365 unsigned long flags;
366
367 spin_lock_irqsave(&host->lock, flags);
368
369 host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
370 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
371
372 host->imask = IMXMCI_INT_MASK_DEFAULT;
373 MMC_INT_MASK = host->imask;
374
375 spin_unlock_irqrestore(&host->lock, flags);
376
377 host->req = NULL;
378 host->cmd = NULL;
379 host->data = NULL;
380 mmc_request_done(host->mmc, req);
381}
382
383static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
384{
385 struct mmc_data *data = host->data;
386 int data_error;
387
388 if(test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)){
389 imx_dma_disable(host->dma);
390 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
391 host->dma_dir);
392 }
393
394 if ( stat & STATUS_ERR_MASK ) {
395 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",stat);
396 if(stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
397 data->error = MMC_ERR_BADCRC;
398 else if(stat & STATUS_TIME_OUT_READ)
399 data->error = MMC_ERR_TIMEOUT;
400 else
401 data->error = MMC_ERR_FAILED;
402 } else {
403 data->bytes_xfered = host->dma_size;
404 }
405
406 data_error = data->error;
407
408 host->data = NULL;
409
410 return data_error;
411}
412
413static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
414{
415 struct mmc_command *cmd = host->cmd;
416 int i;
417 u32 a,b,c;
418 struct mmc_data *data = host->data;
419
420 if (!cmd)
421 return 0;
422
423 host->cmd = NULL;
424
425 if (stat & STATUS_TIME_OUT_RESP) {
426 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
427 cmd->error = MMC_ERR_TIMEOUT;
428 } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
429 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
430 cmd->error = MMC_ERR_BADCRC;
431 }
432
433 if(cmd->flags & MMC_RSP_PRESENT) {
434 if(cmd->flags & MMC_RSP_136) {
435 for (i = 0; i < 4; i++) {
436 u32 a = MMC_RES_FIFO & 0xffff;
437 u32 b = MMC_RES_FIFO & 0xffff;
438 cmd->resp[i] = a<<16 | b;
439 }
440 } else {
441 a = MMC_RES_FIFO & 0xffff;
442 b = MMC_RES_FIFO & 0xffff;
443 c = MMC_RES_FIFO & 0xffff;
444 cmd->resp[0] = a<<24 | b<<8 | c>>8;
445 }
446 }
447
448 dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
449 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
450
451 if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) {
452 if (host->req->data->flags & MMC_DATA_WRITE) {
453
454 /* Wait for FIFO to be empty before starting DMA write */
455
456 stat = MMC_STATUS;
457 if(imxmci_busy_wait_for_status(host, &stat,
458 STATUS_APPL_BUFF_FE,
459 40, "imxmci_cmd_done DMA WR") < 0) {
460 cmd->error = MMC_ERR_FIFO;
461 imxmci_finish_data(host, stat);
462 if(host->req)
463 imxmci_finish_request(host, host->req);
464 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
465 stat);
466 return 0;
467 }
468
469 if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
470 imx_dma_enable(host->dma);
471 }
472 }
473 } else {
474 struct mmc_request *req;
475 imxmci_stop_clock(host);
476 req = host->req;
477
478 if(data)
479 imxmci_finish_data(host, stat);
480
481 if( req ) {
482 imxmci_finish_request(host, req);
483 } else {
484 dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
485 }
486 }
487
488 return 1;
489}
490
491static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
492{
493 struct mmc_data *data = host->data;
494 int data_error;
495
496 if (!data)
497 return 0;
498
499 data_error = imxmci_finish_data(host, stat);
500
501 if (host->req->stop && (data_error == MMC_ERR_NONE)) {
502 imxmci_stop_clock(host);
503 imxmci_start_cmd(host, host->req->stop, 0);
504 } else {
505 struct mmc_request *req;
506 req = host->req;
507 if( req ) {
508 imxmci_finish_request(host, req);
509 } else {
510 dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
511 }
512 }
513
514 return 1;
515}
516
517static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
518{
519 int i;
520 int burst_len;
521 int flush_len;
522 int trans_done = 0;
523 unsigned int stat = *pstat;
524
525 if(host->actual_bus_width == MMC_BUS_WIDTH_4)
526 burst_len = 16;
527 else
528 burst_len = 64;
529
530 /* This is unfortunately required */
531 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
532 stat);
533
534 if(host->dma_dir == DMA_FROM_DEVICE) {
535 imxmci_busy_wait_for_status(host, &stat,
536 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
537 20, "imxmci_cpu_driven_data read");
538
539 while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
540 (host->data_cnt < host->dma_size)) {
541 if(burst_len >= host->dma_size - host->data_cnt) {
542 flush_len = burst_len;
543 burst_len = host->dma_size - host->data_cnt;
544 flush_len -= burst_len;
545 host->data_cnt = host->dma_size;
546 trans_done = 1;
547 } else {
548 flush_len = 0;
549 host->data_cnt += burst_len;
550 }
551
552 for(i = burst_len; i>=2 ; i-=2) {
553 *(host->data_ptr++) = MMC_BUFFER_ACCESS;
554 udelay(20); /* required for clocks < 8MHz*/
555 }
556
557 if(i == 1)
558 *(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS;
559
560 stat = MMC_STATUS;
561
562 /* Flush extra bytes from FIFO */
563 while(flush_len >= 2){
564 flush_len -= 2;
565 i = MMC_BUFFER_ACCESS;
566 stat = MMC_STATUS;
567 stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
568 }
569
570 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n",
571 burst_len, stat);
572 }
573 } else {
574 imxmci_busy_wait_for_status(host, &stat,
575 STATUS_APPL_BUFF_FE,
576 20, "imxmci_cpu_driven_data write");
577
578 while((stat & STATUS_APPL_BUFF_FE) &&
579 (host->data_cnt < host->dma_size)) {
580 if(burst_len >= host->dma_size - host->data_cnt) {
581 burst_len = host->dma_size - host->data_cnt;
582 host->data_cnt = host->dma_size;
583 trans_done = 1;
584 } else {
585 host->data_cnt += burst_len;
586 }
587
588 for(i = burst_len; i>0 ; i-=2)
589 MMC_BUFFER_ACCESS = *(host->data_ptr++);
590
591 stat = MMC_STATUS;
592
593 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
594 burst_len, stat);
595 }
596 }
597
598 *pstat = stat;
599
600 return trans_done;
601}
602
603static void imxmci_dma_irq(int dma, void *devid, struct pt_regs *regs)
604{
605 struct imxmci_host *host = devid;
606 uint32_t stat = MMC_STATUS;
607
608 atomic_set(&host->stuck_timeout, 0);
609 host->status_reg = stat;
610 set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
611 tasklet_schedule(&host->tasklet);
612}
613
614static irqreturn_t imxmci_irq(int irq, void *devid, struct pt_regs *regs)
615{
616 struct imxmci_host *host = devid;
617 uint32_t stat = MMC_STATUS;
618 int handled = 1;
619
620 MMC_INT_MASK = host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT;
621
622 atomic_set(&host->stuck_timeout, 0);
623 host->status_reg = stat;
624 set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
625 tasklet_schedule(&host->tasklet);
626
627 return IRQ_RETVAL(handled);;
628}
629
630static void imxmci_tasklet_fnc(unsigned long data)
631{
632 struct imxmci_host *host = (struct imxmci_host *)data;
633 u32 stat;
634 unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
635 int timeout = 0;
636
637 if(atomic_read(&host->stuck_timeout) > 4) {
638 char *what;
639 timeout = 1;
640 stat = MMC_STATUS;
641 host->status_reg = stat;
642 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
643 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
644 what = "RESP+DMA";
645 else
646 what = "RESP";
647 else
648 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
649 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
650 what = "DATA";
651 else
652 what = "DMA";
653 else
654 what = "???";
655
656 dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
657 what, stat, MMC_INT_MASK);
658 dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
659 MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
660 dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n",
661 host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size);
662 }
663
664 if(!host->present || timeout)
665 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
666 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
667
668 if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
669 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
670
671 stat = MMC_STATUS;
672 /*
673 * This is not required in theory, but there is chance to miss some flag
674 * which clears automatically by mask write, FreeScale original code keeps
675 * stat from IRQ time so do I
676 */
677 stat |= host->status_reg;
678
679 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
680 imxmci_busy_wait_for_status(host, &stat,
681 STATUS_END_CMD_RESP | STATUS_ERR_MASK,
682 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
683 }
684
685 if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
686 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
687 imxmci_cmd_done(host, stat);
688 if(host->data && (stat & STATUS_ERR_MASK))
689 imxmci_data_done(host, stat);
690 }
691
692 if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
693 stat |= MMC_STATUS;
694 if(imxmci_cpu_driven_data(host, &stat)){
695 if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
696 imxmci_cmd_done(host, stat);
697 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
698 &host->pending_events);
699 imxmci_data_done(host, stat);
700 }
701 }
702 }
703
704 if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
705 !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
706
707 stat = MMC_STATUS;
708 /* Same as above */
709 stat |= host->status_reg;
710
711 if(host->dma_dir == DMA_TO_DEVICE) {
712 data_dir_mask = STATUS_WRITE_OP_DONE;
713 } else {
714 data_dir_mask = STATUS_DATA_TRANS_DONE;
715 }
716
717 imxmci_busy_wait_for_status(host, &stat,
718 data_dir_mask,
719 50, "imxmci_tasklet_fnc data");
720
721 if(stat & data_dir_mask) {
722 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
723 imxmci_data_done(host, stat);
724 }
725 }
726
727 if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
728
729 if(host->cmd)
730 imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
731
732 if(host->data)
733 imxmci_data_done(host, STATUS_TIME_OUT_READ |
734 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
735
736 if(host->req)
737 imxmci_finish_request(host, host->req);
738
739 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
740
741 }
742}
743
744static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
745{
746 struct imxmci_host *host = mmc_priv(mmc);
747 unsigned int cmdat;
748
749 WARN_ON(host->req != NULL);
750
751 host->req = req;
752
753 cmdat = 0;
754
755 if (req->data) {
756 imxmci_setup_data(host, req->data);
757
758 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
759
760 if (req->data->flags & MMC_DATA_WRITE)
761 cmdat |= CMD_DAT_CONT_WRITE;
762
763 if (req->data->flags & MMC_DATA_STREAM) {
764 cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
765 }
766 }
767
768 imxmci_start_cmd(host, req->cmd, cmdat);
769}
770
771#define CLK_RATE 19200000
772
773static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
774{
775 struct imxmci_host *host = mmc_priv(mmc);
776 int prescaler;
777
778 dev_dbg(mmc_dev(host->mmc), "clock %u power %u vdd %u width %u\n",
779 ios->clock, ios->power_mode, ios->vdd,
780 (ios->bus_width==MMC_BUS_WIDTH_4)?4:1);
781
782 if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
783 host->actual_bus_width = MMC_BUS_WIDTH_4;
784 imx_gpio_mode(PB11_PF_SD_DAT3);
785 }else{
786 host->actual_bus_width = MMC_BUS_WIDTH_1;
787 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
788 }
789
790 if ( host->power_mode != ios->power_mode ) {
791 switch (ios->power_mode) {
792 case MMC_POWER_OFF:
793 break;
794 case MMC_POWER_UP:
795 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
796 break;
797 case MMC_POWER_ON:
798 break;
799 }
800 host->power_mode = ios->power_mode;
801 }
802
803 if ( ios->clock ) {
804 unsigned int clk;
805
806 /* The prescaler is 5 for PERCLK2 equal to 96MHz
807 * then 96MHz / 5 = 19.2 MHz
808 */
809 clk=imx_get_perclk2();
810 prescaler=(clk+(CLK_RATE*7)/8)/CLK_RATE;
811 switch(prescaler) {
812 case 0:
813 case 1: prescaler = 0;
814 break;
815 case 2: prescaler = 1;
816 break;
817 case 3: prescaler = 2;
818 break;
819 case 4: prescaler = 4;
820 break;
821 default:
822 case 5: prescaler = 5;
823 break;
824 }
825
826 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
827 clk, prescaler);
828
829 for(clk=0; clk<8; clk++) {
830 int x;
831 x = CLK_RATE / (1<<clk);
832 if( x <= ios->clock)
833 break;
834 }
835
836 MMC_STR_STP_CLK |= STR_STP_CLK_ENABLE; /* enable controller */
837
838 imxmci_stop_clock(host);
839 MMC_CLK_RATE = (prescaler<<3) | clk;
840 imxmci_start_clock(host);
841
842 dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
843 } else {
844 imxmci_stop_clock(host);
845 }
846}
847
848static struct mmc_host_ops imxmci_ops = {
849 .request = imxmci_request,
850 .set_ios = imxmci_set_ios,
851};
852
853static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr)
854{
855 int i;
856
857 for (i = 0; i < dev->num_resources; i++)
858 if (dev->resource[i].flags == mask && nr-- == 0)
859 return &dev->resource[i];
860 return NULL;
861}
862
863static int platform_device_irq(struct platform_device *dev, int nr)
864{
865 int i;
866
867 for (i = 0; i < dev->num_resources; i++)
868 if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0)
869 return dev->resource[i].start;
870 return NO_IRQ;
871}
872
873static void imxmci_check_status(unsigned long data)
874{
875 struct imxmci_host *host = (struct imxmci_host *)data;
876
877 if( host->pdata->card_present() != host->present ) {
878 host->present ^= 1;
879 dev_info(mmc_dev(host->mmc), "card %s\n",
880 host->present ? "inserted" : "removed");
881
882 set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
883 tasklet_schedule(&host->tasklet);
884 }
885
886 if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
887 test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
888 atomic_inc(&host->stuck_timeout);
889 if(atomic_read(&host->stuck_timeout) > 4)
890 tasklet_schedule(&host->tasklet);
891 } else {
892 atomic_set(&host->stuck_timeout, 0);
893
894 }
895
896 mod_timer(&host->timer, jiffies + (HZ>>1));
897}
898
899static int imxmci_probe(struct platform_device *pdev)
900{
901 struct mmc_host *mmc;
902 struct imxmci_host *host = NULL;
903 struct resource *r;
904 int ret = 0, irq;
905
906 printk(KERN_INFO "i.MX mmc driver\n");
907
908 r = platform_device_resource(pdev, IORESOURCE_MEM, 0);
909 irq = platform_device_irq(pdev, 0);
910 if (!r || irq == NO_IRQ)
911 return -ENXIO;
912
913 r = request_mem_region(r->start, 0x100, "IMXMCI");
914 if (!r)
915 return -EBUSY;
916
917 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
918 if (!mmc) {
919 ret = -ENOMEM;
920 goto out;
921 }
922
923 mmc->ops = &imxmci_ops;
924 mmc->f_min = 150000;
925 mmc->f_max = CLK_RATE/2;
926 mmc->ocr_avail = MMC_VDD_32_33;
927 mmc->caps |= MMC_CAP_4_BIT_DATA;
928
929 /* MMC core transfer sizes tunable parameters */
930 mmc->max_hw_segs = 64;
931 mmc->max_phys_segs = 64;
932 mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */
933 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
934
935 host = mmc_priv(mmc);
936 host->mmc = mmc;
937 host->dma_allocated = 0;
938 host->pdata = pdev->dev.platform_data;
939
940 spin_lock_init(&host->lock);
941 host->res = r;
942 host->irq = irq;
943
944 imx_gpio_mode(PB8_PF_SD_DAT0);
945 imx_gpio_mode(PB9_PF_SD_DAT1);
946 imx_gpio_mode(PB10_PF_SD_DAT2);
947 /* Configured as GPIO with pull-up to ensure right MCC card mode */
948 /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
949 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
950 /* imx_gpio_mode(PB11_PF_SD_DAT3); */
951 imx_gpio_mode(PB12_PF_SD_CLK);
952 imx_gpio_mode(PB13_PF_SD_CMD);
953
954 imxmci_softreset();
955
956 if ( MMC_REV_NO != 0x390 ) {
957 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
958 MMC_REV_NO);
959 goto out;
960 }
961
962 MMC_READ_TO = 0x2db4; /* recommended in data sheet */
963
964 host->imask = IMXMCI_INT_MASK_DEFAULT;
965 MMC_INT_MASK = host->imask;
966
967
968 if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){
969 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
970 ret = -EBUSY;
971 goto out;
972 }
973 host->dma_allocated=1;
974 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
975
976 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
977 host->status_reg=0;
978 host->pending_events=0;
979
980 ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
981 if (ret)
982 goto out;
983
984 host->present = host->pdata->card_present();
985 init_timer(&host->timer);
986 host->timer.data = (unsigned long)host;
987 host->timer.function = imxmci_check_status;
988 add_timer(&host->timer);
989 mod_timer(&host->timer, jiffies + (HZ>>1));
990
991 platform_set_drvdata(pdev, mmc);
992
993 mmc_add_host(mmc);
994
995 return 0;
996
997out:
998 if (host) {
999 if(host->dma_allocated){
1000 imx_dma_free(host->dma);
1001 host->dma_allocated=0;
1002 }
1003 }
1004 if (mmc)
1005 mmc_free_host(mmc);
1006 release_resource(r);
1007 return ret;
1008}
1009
1010static int imxmci_remove(struct platform_device *pdev)
1011{
1012 struct mmc_host *mmc = platform_get_drvdata(pdev);
1013
1014 platform_set_drvdata(pdev, NULL);
1015
1016 if (mmc) {
1017 struct imxmci_host *host = mmc_priv(mmc);
1018
1019 tasklet_disable(&host->tasklet);
1020
1021 del_timer_sync(&host->timer);
1022 mmc_remove_host(mmc);
1023
1024 free_irq(host->irq, host);
1025 if(host->dma_allocated){
1026 imx_dma_free(host->dma);
1027 host->dma_allocated=0;
1028 }
1029
1030 tasklet_kill(&host->tasklet);
1031
1032 release_resource(host->res);
1033
1034 mmc_free_host(mmc);
1035 }
1036 return 0;
1037}
1038
1039#ifdef CONFIG_PM
1040static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1041{
1042 struct mmc_host *mmc = platform_get_drvdata(dev);
1043 int ret = 0;
1044
1045 if (mmc)
1046 ret = mmc_suspend_host(mmc, state);
1047
1048 return ret;
1049}
1050
1051static int imxmci_resume(struct platform_device *dev)
1052{
1053 struct mmc_host *mmc = platform_get_drvdata(dev);
1054 struct imxmci_host *host;
1055 int ret = 0;
1056
1057 if (mmc) {
1058 host = mmc_priv(mmc);
1059 if(host)
1060 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1061 ret = mmc_resume_host(mmc);
1062 }
1063
1064 return ret;
1065}
1066#else
1067#define imxmci_suspend NULL
1068#define imxmci_resume NULL
1069#endif /* CONFIG_PM */
1070
1071static struct platform_driver imxmci_driver = {
1072 .probe = imxmci_probe,
1073 .remove = imxmci_remove,
1074 .suspend = imxmci_suspend,
1075 .resume = imxmci_resume,
1076 .driver = {
1077 .name = DRIVER_NAME,
1078 }
1079};
1080
1081static int __init imxmci_init(void)
1082{
1083 return platform_driver_register(&imxmci_driver);
1084}
1085
1086static void __exit imxmci_exit(void)
1087{
1088 platform_driver_unregister(&imxmci_driver);
1089}
1090
1091module_init(imxmci_init);
1092module_exit(imxmci_exit);
1093
1094MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1095MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1096MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/imxmmc.h b/drivers/mmc/imxmmc.h
new file mode 100644
index 000000000000..e5339e334dbb
--- /dev/null
+++ b/drivers/mmc/imxmmc.h
@@ -0,0 +1,67 @@
1
2# define __REG16(x) (*((volatile u16 *)IO_ADDRESS(x)))
3
4#define MMC_STR_STP_CLK __REG16(IMX_MMC_BASE + 0x00)
5#define MMC_STATUS __REG16(IMX_MMC_BASE + 0x04)
6#define MMC_CLK_RATE __REG16(IMX_MMC_BASE + 0x08)
7#define MMC_CMD_DAT_CONT __REG16(IMX_MMC_BASE + 0x0C)
8#define MMC_RES_TO __REG16(IMX_MMC_BASE + 0x10)
9#define MMC_READ_TO __REG16(IMX_MMC_BASE + 0x14)
10#define MMC_BLK_LEN __REG16(IMX_MMC_BASE + 0x18)
11#define MMC_NOB __REG16(IMX_MMC_BASE + 0x1C)
12#define MMC_REV_NO __REG16(IMX_MMC_BASE + 0x20)
13#define MMC_INT_MASK __REG16(IMX_MMC_BASE + 0x24)
14#define MMC_CMD __REG16(IMX_MMC_BASE + 0x28)
15#define MMC_ARGH __REG16(IMX_MMC_BASE + 0x2C)
16#define MMC_ARGL __REG16(IMX_MMC_BASE + 0x30)
17#define MMC_RES_FIFO __REG16(IMX_MMC_BASE + 0x34)
18#define MMC_BUFFER_ACCESS __REG16(IMX_MMC_BASE + 0x38)
19#define MMC_BUFFER_ACCESS_OFS 0x38
20
21
22#define STR_STP_CLK_ENDIAN (1<<5)
23#define STR_STP_CLK_RESET (1<<3)
24#define STR_STP_CLK_ENABLE (1<<2)
25#define STR_STP_CLK_START_CLK (1<<1)
26#define STR_STP_CLK_STOP_CLK (1<<0)
27#define STATUS_CARD_PRESENCE (1<<15)
28#define STATUS_SDIO_INT_ACTIVE (1<<14)
29#define STATUS_END_CMD_RESP (1<<13)
30#define STATUS_WRITE_OP_DONE (1<<12)
31#define STATUS_DATA_TRANS_DONE (1<<11)
32#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
33#define STATUS_CARD_BUS_CLK_RUN (1<<8)
34#define STATUS_APPL_BUFF_FF (1<<7)
35#define STATUS_APPL_BUFF_FE (1<<6)
36#define STATUS_RESP_CRC_ERR (1<<5)
37#define STATUS_CRC_READ_ERR (1<<3)
38#define STATUS_CRC_WRITE_ERR (1<<2)
39#define STATUS_TIME_OUT_RESP (1<<1)
40#define STATUS_TIME_OUT_READ (1<<0)
41#define STATUS_ERR_MASK 0x2f
42#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
43#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
44#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
45#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
46#define CMD_DAT_CONT_START_READWAIT (1<<10)
47#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
48#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
49#define CMD_DAT_CONT_INIT (1<<7)
50#define CMD_DAT_CONT_BUSY (1<<6)
51#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
52#define CMD_DAT_CONT_WRITE (1<<4)
53#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
54#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
55#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
56#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
57#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
58#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
59#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
60#define INT_MASK_AUTO_CARD_DETECT (1<<6)
61#define INT_MASK_DAT0_EN (1<<5)
62#define INT_MASK_SDIO (1<<4)
63#define INT_MASK_BUF_READY (1<<3)
64#define INT_MASK_END_CMD_RES (1<<2)
65#define INT_MASK_WRITE_OP_DONE (1<<1)
66#define INT_MASK_DATA_TRAN (1<<0)
67#define INT_ALL (0x7f)
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 1888060c5e0c..da6ddd910fc5 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -27,12 +27,6 @@
27 27
28#include "mmc.h" 28#include "mmc.h"
29 29
30#ifdef CONFIG_MMC_DEBUG
31#define DBG(x...) printk(KERN_DEBUG x)
32#else
33#define DBG(x...) do { } while (0)
34#endif
35
36#define CMD_RETRIES 3 30#define CMD_RETRIES 3
37 31
38/* 32/*
@@ -77,8 +71,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
77{ 71{
78 struct mmc_command *cmd = mrq->cmd; 72 struct mmc_command *cmd = mrq->cmd;
79 int err = mrq->cmd->error; 73 int err = mrq->cmd->error;
80 DBG("MMC: req done (%02x): %d: %08x %08x %08x %08x\n", cmd->opcode, 74 pr_debug("MMC: req done (%02x): %d: %08x %08x %08x %08x\n",
81 err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); 75 cmd->opcode, err, cmd->resp[0], cmd->resp[1],
76 cmd->resp[2], cmd->resp[3]);
82 77
83 if (err && cmd->retries) { 78 if (err && cmd->retries) {
84 cmd->retries--; 79 cmd->retries--;
@@ -102,8 +97,8 @@ EXPORT_SYMBOL(mmc_request_done);
102void 97void
103mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 98mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
104{ 99{
105 DBG("MMC: starting cmd %02x arg %08x flags %08x\n", 100 pr_debug("MMC: starting cmd %02x arg %08x flags %08x\n",
106 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); 101 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
107 102
108 WARN_ON(host->card_busy == NULL); 103 WARN_ON(host->card_busy == NULL);
109 104
@@ -976,8 +971,8 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host)
976 if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr) 971 if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
977 max_dtr = card->csd.max_dtr; 972 max_dtr = card->csd.max_dtr;
978 973
979 DBG("MMC: selected %d.%03dMHz transfer rate\n", 974 pr_debug("MMC: selected %d.%03dMHz transfer rate\n",
980 max_dtr / 1000000, (max_dtr / 1000) % 1000); 975 max_dtr / 1000000, (max_dtr / 1000) % 1000);
981 976
982 return max_dtr; 977 return max_dtr;
983} 978}
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 9fef29d978b5..df7e861e2fc7 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -33,12 +33,8 @@
33 33
34#define DRIVER_NAME "mmci-pl18x" 34#define DRIVER_NAME "mmci-pl18x"
35 35
36#ifdef CONFIG_MMC_DEBUG
37#define DBG(host,fmt,args...) \ 36#define DBG(host,fmt,args...) \
38 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args) 37 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
39#else
40#define DBG(host,fmt,args...) do { } while (0)
41#endif
42 38
43static unsigned int fmax = 515633; 39static unsigned int fmax = 515633;
44 40
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
new file mode 100644
index 000000000000..becb3c68c34d
--- /dev/null
+++ b/drivers/mmc/omap.c
@@ -0,0 +1,1226 @@
1/*
2 * linux/drivers/media/mmc/omap.c
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7 * Other hacks (DMA, SD, etc) by David Brownell
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/init.h>
18#include <linux/ioport.h>
19#include <linux/platform_device.h>
20#include <linux/interrupt.h>
21#include <linux/dma-mapping.h>
22#include <linux/delay.h>
23#include <linux/spinlock.h>
24#include <linux/timer.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/protocol.h>
27#include <linux/mmc/card.h>
28#include <linux/clk.h>
29
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <asm/scatterlist.h>
33#include <asm/mach-types.h>
34
35#include <asm/arch/board.h>
36#include <asm/arch/gpio.h>
37#include <asm/arch/dma.h>
38#include <asm/arch/mux.h>
39#include <asm/arch/fpga.h>
40#include <asm/arch/tps65010.h>
41
42#include "omap.h"
43
44#define DRIVER_NAME "mmci-omap"
45#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
46
47/* Specifies how often in millisecs to poll for card status changes
48 * when the cover switch is open */
49#define OMAP_MMC_SWITCH_POLL_DELAY 500
50
51static int mmc_omap_enable_poll = 1;
52
53struct mmc_omap_host {
54 int initialized;
55 int suspended;
56 struct mmc_request * mrq;
57 struct mmc_command * cmd;
58 struct mmc_data * data;
59 struct mmc_host * mmc;
60 struct device * dev;
61 unsigned char id; /* 16xx chips have 2 MMC blocks */
62 struct clk * iclk;
63 struct clk * fclk;
64 void __iomem *base;
65 int irq;
66 unsigned char bus_mode;
67 unsigned char hw_bus_mode;
68
69 unsigned int sg_len;
70 int sg_idx;
71 u16 * buffer;
72 u32 buffer_bytes_left;
73 u32 total_bytes_left;
74
75 unsigned use_dma:1;
76 unsigned brs_received:1, dma_done:1;
77 unsigned dma_is_read:1;
78 unsigned dma_in_use:1;
79 int dma_ch;
80 spinlock_t dma_lock;
81 struct timer_list dma_timer;
82 unsigned dma_len;
83
84 short power_pin;
85 short wp_pin;
86
87 int switch_pin;
88 struct work_struct switch_work;
89 struct timer_list switch_timer;
90 int switch_last_state;
91};
92
93static inline int
94mmc_omap_cover_is_open(struct mmc_omap_host *host)
95{
96 if (host->switch_pin < 0)
97 return 0;
98 return omap_get_gpio_datain(host->switch_pin);
99}
100
101static ssize_t
102mmc_omap_show_cover_switch(struct device *dev,
103 struct device_attribute *attr, char *buf)
104{
105 struct mmc_omap_host *host = dev_get_drvdata(dev);
106
107 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(host) ? "open" :
108 "closed");
109}
110
111static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
112
113static ssize_t
114mmc_omap_show_enable_poll(struct device *dev,
115 struct device_attribute *attr, char *buf)
116{
117 return snprintf(buf, PAGE_SIZE, "%d\n", mmc_omap_enable_poll);
118}
119
120static ssize_t
121mmc_omap_store_enable_poll(struct device *dev,
122 struct device_attribute *attr, const char *buf,
123 size_t size)
124{
125 int enable_poll;
126
127 if (sscanf(buf, "%10d", &enable_poll) != 1)
128 return -EINVAL;
129
130 if (enable_poll != mmc_omap_enable_poll) {
131 struct mmc_omap_host *host = dev_get_drvdata(dev);
132
133 mmc_omap_enable_poll = enable_poll;
134 if (enable_poll && host->switch_pin >= 0)
135 schedule_work(&host->switch_work);
136 }
137 return size;
138}
139
140static DEVICE_ATTR(enable_poll, 0664,
141 mmc_omap_show_enable_poll, mmc_omap_store_enable_poll);
142
143static void
144mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
145{
146 u32 cmdreg;
147 u32 resptype;
148 u32 cmdtype;
149
150 host->cmd = cmd;
151
152 resptype = 0;
153 cmdtype = 0;
154
155 /* Our hardware needs to know exact type */
156 switch (RSP_TYPE(mmc_resp_type(cmd))) {
157 case RSP_TYPE(MMC_RSP_R1):
158 /* resp 1, resp 1b */
159 resptype = 1;
160 break;
161 case RSP_TYPE(MMC_RSP_R2):
162 resptype = 2;
163 break;
164 case RSP_TYPE(MMC_RSP_R3):
165 resptype = 3;
166 break;
167 default:
168 break;
169 }
170
171 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
172 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
173 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
174 cmdtype = OMAP_MMC_CMDTYPE_BC;
175 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
176 cmdtype = OMAP_MMC_CMDTYPE_BCR;
177 } else {
178 cmdtype = OMAP_MMC_CMDTYPE_AC;
179 }
180
181 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
182
183 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
184 cmdreg |= 1 << 6;
185
186 if (cmd->flags & MMC_RSP_BUSY)
187 cmdreg |= 1 << 11;
188
189 if (host->data && !(host->data->flags & MMC_DATA_WRITE))
190 cmdreg |= 1 << 15;
191
192 clk_enable(host->fclk);
193
194 OMAP_MMC_WRITE(host->base, CTO, 200);
195 OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff);
196 OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16);
197 OMAP_MMC_WRITE(host->base, IE,
198 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
199 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
200 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
201 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
202 OMAP_MMC_STAT_END_OF_DATA);
203 OMAP_MMC_WRITE(host->base, CMD, cmdreg);
204}
205
206static void
207mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
208{
209 if (host->dma_in_use) {
210 enum dma_data_direction dma_data_dir;
211
212 BUG_ON(host->dma_ch < 0);
213 if (data->error != MMC_ERR_NONE)
214 omap_stop_dma(host->dma_ch);
215 /* Release DMA channel lazily */
216 mod_timer(&host->dma_timer, jiffies + HZ);
217 if (data->flags & MMC_DATA_WRITE)
218 dma_data_dir = DMA_TO_DEVICE;
219 else
220 dma_data_dir = DMA_FROM_DEVICE;
221 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
222 dma_data_dir);
223 }
224 host->data = NULL;
225 host->sg_len = 0;
226 clk_disable(host->fclk);
227
228 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
229 * dozens of requests until the card finishes writing data.
230 * It'd be cheaper to just wait till an EOFB interrupt arrives...
231 */
232
233 if (!data->stop) {
234 host->mrq = NULL;
235 mmc_request_done(host->mmc, data->mrq);
236 return;
237 }
238
239 mmc_omap_start_command(host, data->stop);
240}
241
242static void
243mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
244{
245 unsigned long flags;
246 int done;
247
248 if (!host->dma_in_use) {
249 mmc_omap_xfer_done(host, data);
250 return;
251 }
252 done = 0;
253 spin_lock_irqsave(&host->dma_lock, flags);
254 if (host->dma_done)
255 done = 1;
256 else
257 host->brs_received = 1;
258 spin_unlock_irqrestore(&host->dma_lock, flags);
259 if (done)
260 mmc_omap_xfer_done(host, data);
261}
262
263static void
264mmc_omap_dma_timer(unsigned long data)
265{
266 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
267
268 BUG_ON(host->dma_ch < 0);
269 omap_free_dma(host->dma_ch);
270 host->dma_ch = -1;
271}
272
273static void
274mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
275{
276 unsigned long flags;
277 int done;
278
279 done = 0;
280 spin_lock_irqsave(&host->dma_lock, flags);
281 if (host->brs_received)
282 done = 1;
283 else
284 host->dma_done = 1;
285 spin_unlock_irqrestore(&host->dma_lock, flags);
286 if (done)
287 mmc_omap_xfer_done(host, data);
288}
289
290static void
291mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
292{
293 host->cmd = NULL;
294
295 if (cmd->flags & MMC_RSP_PRESENT) {
296 if (cmd->flags & MMC_RSP_136) {
297 /* response type 2 */
298 cmd->resp[3] =
299 OMAP_MMC_READ(host->base, RSP0) |
300 (OMAP_MMC_READ(host->base, RSP1) << 16);
301 cmd->resp[2] =
302 OMAP_MMC_READ(host->base, RSP2) |
303 (OMAP_MMC_READ(host->base, RSP3) << 16);
304 cmd->resp[1] =
305 OMAP_MMC_READ(host->base, RSP4) |
306 (OMAP_MMC_READ(host->base, RSP5) << 16);
307 cmd->resp[0] =
308 OMAP_MMC_READ(host->base, RSP6) |
309 (OMAP_MMC_READ(host->base, RSP7) << 16);
310 } else {
311 /* response types 1, 1b, 3, 4, 5, 6 */
312 cmd->resp[0] =
313 OMAP_MMC_READ(host->base, RSP6) |
314 (OMAP_MMC_READ(host->base, RSP7) << 16);
315 }
316 }
317
318 if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
319 host->mrq = NULL;
320 clk_disable(host->fclk);
321 mmc_request_done(host->mmc, cmd->mrq);
322 }
323}
324
325/* PIO only */
326static void
327mmc_omap_sg_to_buf(struct mmc_omap_host *host)
328{
329 struct scatterlist *sg;
330
331 sg = host->data->sg + host->sg_idx;
332 host->buffer_bytes_left = sg->length;
333 host->buffer = page_address(sg->page) + sg->offset;
334 if (host->buffer_bytes_left > host->total_bytes_left)
335 host->buffer_bytes_left = host->total_bytes_left;
336}
337
338/* PIO only */
339static void
340mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
341{
342 int n;
343 void __iomem *reg;
344 u16 *p;
345
346 if (host->buffer_bytes_left == 0) {
347 host->sg_idx++;
348 BUG_ON(host->sg_idx == host->sg_len);
349 mmc_omap_sg_to_buf(host);
350 }
351 n = 64;
352 if (n > host->buffer_bytes_left)
353 n = host->buffer_bytes_left;
354 host->buffer_bytes_left -= n;
355 host->total_bytes_left -= n;
356 host->data->bytes_xfered += n;
357
358 if (write) {
359 __raw_writesw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
360 } else {
361 __raw_readsw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
362 }
363}
364
365static inline void mmc_omap_report_irq(u16 status)
366{
367 static const char *mmc_omap_status_bits[] = {
368 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
369 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
370 };
371 int i, c = 0;
372
373 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
374 if (status & (1 << i)) {
375 if (c)
376 printk(" ");
377 printk("%s", mmc_omap_status_bits[i]);
378 c++;
379 }
380}
381
382static irqreturn_t mmc_omap_irq(int irq, void *dev_id, struct pt_regs *regs)
383{
384 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
385 u16 status;
386 int end_command;
387 int end_transfer;
388 int transfer_error;
389
390 if (host->cmd == NULL && host->data == NULL) {
391 status = OMAP_MMC_READ(host->base, STAT);
392 dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
393 if (status != 0) {
394 OMAP_MMC_WRITE(host->base, STAT, status);
395 OMAP_MMC_WRITE(host->base, IE, 0);
396 }
397 return IRQ_HANDLED;
398 }
399
400 end_command = 0;
401 end_transfer = 0;
402 transfer_error = 0;
403
404 while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) {
405 OMAP_MMC_WRITE(host->base, STAT, status);
406#ifdef CONFIG_MMC_DEBUG
407 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
408 status, host->cmd != NULL ? host->cmd->opcode : -1);
409 mmc_omap_report_irq(status);
410 printk("\n");
411#endif
412 if (host->total_bytes_left) {
413 if ((status & OMAP_MMC_STAT_A_FULL) ||
414 (status & OMAP_MMC_STAT_END_OF_DATA))
415 mmc_omap_xfer_data(host, 0);
416 if (status & OMAP_MMC_STAT_A_EMPTY)
417 mmc_omap_xfer_data(host, 1);
418 }
419
420 if (status & OMAP_MMC_STAT_END_OF_DATA) {
421 end_transfer = 1;
422 }
423
424 if (status & OMAP_MMC_STAT_DATA_TOUT) {
425 dev_dbg(mmc_dev(host->mmc), "data timeout\n");
426 if (host->data) {
427 host->data->error |= MMC_ERR_TIMEOUT;
428 transfer_error = 1;
429 }
430 }
431
432 if (status & OMAP_MMC_STAT_DATA_CRC) {
433 if (host->data) {
434 host->data->error |= MMC_ERR_BADCRC;
435 dev_dbg(mmc_dev(host->mmc),
436 "data CRC error, bytes left %d\n",
437 host->total_bytes_left);
438 transfer_error = 1;
439 } else {
440 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
441 }
442 }
443
444 if (status & OMAP_MMC_STAT_CMD_TOUT) {
445 /* Timeouts are routine with some commands */
446 if (host->cmd) {
447 if (host->cmd->opcode != MMC_ALL_SEND_CID &&
448 host->cmd->opcode !=
449 MMC_SEND_OP_COND &&
450 host->cmd->opcode !=
451 MMC_APP_CMD &&
452 !mmc_omap_cover_is_open(host))
453 dev_err(mmc_dev(host->mmc),
454 "command timeout, CMD %d\n",
455 host->cmd->opcode);
456 host->cmd->error = MMC_ERR_TIMEOUT;
457 end_command = 1;
458 }
459 }
460
461 if (status & OMAP_MMC_STAT_CMD_CRC) {
462 if (host->cmd) {
463 dev_err(mmc_dev(host->mmc),
464 "command CRC error (CMD%d, arg 0x%08x)\n",
465 host->cmd->opcode, host->cmd->arg);
466 host->cmd->error = MMC_ERR_BADCRC;
467 end_command = 1;
468 } else
469 dev_err(mmc_dev(host->mmc),
470 "command CRC error without cmd?\n");
471 }
472
473 if (status & OMAP_MMC_STAT_CARD_ERR) {
474 if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
475 u32 response = OMAP_MMC_READ(host->base, RSP6)
476 | (OMAP_MMC_READ(host->base, RSP7) << 16);
477 /* STOP sometimes sets must-ignore bits */
478 if (!(response & (R1_CC_ERROR
479 | R1_ILLEGAL_COMMAND
480 | R1_COM_CRC_ERROR))) {
481 end_command = 1;
482 continue;
483 }
484 }
485
486 dev_dbg(mmc_dev(host->mmc), "card status error (CMD%d)\n",
487 host->cmd->opcode);
488 if (host->cmd) {
489 host->cmd->error = MMC_ERR_FAILED;
490 end_command = 1;
491 }
492 if (host->data) {
493 host->data->error = MMC_ERR_FAILED;
494 transfer_error = 1;
495 }
496 }
497
498 /*
499 * NOTE: On 1610 the END_OF_CMD may come too early when
500 * starting a write
501 */
502 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
503 (!(status & OMAP_MMC_STAT_A_EMPTY))) {
504 end_command = 1;
505 }
506 }
507
508 if (end_command) {
509 mmc_omap_cmd_done(host, host->cmd);
510 }
511 if (transfer_error)
512 mmc_omap_xfer_done(host, host->data);
513 else if (end_transfer)
514 mmc_omap_end_of_data(host, host->data);
515
516 return IRQ_HANDLED;
517}
518
519static irqreturn_t mmc_omap_switch_irq(int irq, void *dev_id, struct pt_regs *regs)
520{
521 struct mmc_omap_host *host = (struct mmc_omap_host *) dev_id;
522
523 schedule_work(&host->switch_work);
524
525 return IRQ_HANDLED;
526}
527
528static void mmc_omap_switch_timer(unsigned long arg)
529{
530 struct mmc_omap_host *host = (struct mmc_omap_host *) arg;
531
532 schedule_work(&host->switch_work);
533}
534
535/* FIXME: Handle card insertion and removal properly. Maybe use a mask
536 * for MMC state? */
537static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask)
538{
539}
540
541static void mmc_omap_switch_handler(void *data)
542{
543 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
544 struct mmc_card *card;
545 static int complained = 0;
546 int cards = 0, cover_open;
547
548 if (host->switch_pin == -1)
549 return;
550 cover_open = mmc_omap_cover_is_open(host);
551 if (cover_open != host->switch_last_state) {
552 kobject_uevent(&host->dev->kobj, KOBJ_CHANGE);
553 host->switch_last_state = cover_open;
554 }
555 mmc_detect_change(host->mmc, 0);
556 list_for_each_entry(card, &host->mmc->cards, node) {
557 if (mmc_card_present(card))
558 cards++;
559 }
560 if (mmc_omap_cover_is_open(host)) {
561 if (!complained) {
562 dev_info(mmc_dev(host->mmc), "cover is open");
563 complained = 1;
564 }
565 if (mmc_omap_enable_poll)
566 mod_timer(&host->switch_timer, jiffies +
567 msecs_to_jiffies(OMAP_MMC_SWITCH_POLL_DELAY));
568 } else {
569 complained = 0;
570 }
571}
572
573/* Prepare to transfer the next segment of a scatterlist */
574static void
575mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
576{
577 int dma_ch = host->dma_ch;
578 unsigned long data_addr;
579 u16 buf, frame;
580 u32 count;
581 struct scatterlist *sg = &data->sg[host->sg_idx];
582 int src_port = 0;
583 int dst_port = 0;
584 int sync_dev = 0;
585
586 data_addr = io_v2p((u32) host->base) + OMAP_MMC_REG_DATA;
587 frame = 1 << data->blksz_bits;
588 count = sg_dma_len(sg);
589
590 if ((data->blocks == 1) && (count > (1 << data->blksz_bits)))
591 count = frame;
592
593 host->dma_len = count;
594
595 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
596 * Use 16 or 32 word frames when the blocksize is at least that large.
597 * Blocksize is usually 512 bytes; but not for some SD reads.
598 */
599 if (cpu_is_omap15xx() && frame > 32)
600 frame = 32;
601 else if (frame > 64)
602 frame = 64;
603 count /= frame;
604 frame >>= 1;
605
606 if (!(data->flags & MMC_DATA_WRITE)) {
607 buf = 0x800f | ((frame - 1) << 8);
608
609 if (cpu_class_is_omap1()) {
610 src_port = OMAP_DMA_PORT_TIPB;
611 dst_port = OMAP_DMA_PORT_EMIFF;
612 }
613 if (cpu_is_omap24xx())
614 sync_dev = OMAP24XX_DMA_MMC1_RX;
615
616 omap_set_dma_src_params(dma_ch, src_port,
617 OMAP_DMA_AMODE_CONSTANT,
618 data_addr, 0, 0);
619 omap_set_dma_dest_params(dma_ch, dst_port,
620 OMAP_DMA_AMODE_POST_INC,
621 sg_dma_address(sg), 0, 0);
622 omap_set_dma_dest_data_pack(dma_ch, 1);
623 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
624 } else {
625 buf = 0x0f80 | ((frame - 1) << 0);
626
627 if (cpu_class_is_omap1()) {
628 src_port = OMAP_DMA_PORT_EMIFF;
629 dst_port = OMAP_DMA_PORT_TIPB;
630 }
631 if (cpu_is_omap24xx())
632 sync_dev = OMAP24XX_DMA_MMC1_TX;
633
634 omap_set_dma_dest_params(dma_ch, dst_port,
635 OMAP_DMA_AMODE_CONSTANT,
636 data_addr, 0, 0);
637 omap_set_dma_src_params(dma_ch, src_port,
638 OMAP_DMA_AMODE_POST_INC,
639 sg_dma_address(sg), 0, 0);
640 omap_set_dma_src_data_pack(dma_ch, 1);
641 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
642 }
643
644 /* Max limit for DMA frame count is 0xffff */
645 if (unlikely(count > 0xffff))
646 BUG();
647
648 OMAP_MMC_WRITE(host->base, BUF, buf);
649 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
650 frame, count, OMAP_DMA_SYNC_FRAME,
651 sync_dev, 0);
652}
653
654/* A scatterlist segment completed */
655static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
656{
657 struct mmc_omap_host *host = (struct mmc_omap_host *) data;
658 struct mmc_data *mmcdat = host->data;
659
660 if (unlikely(host->dma_ch < 0)) {
661 dev_err(mmc_dev(host->mmc), "DMA callback while DMA not
662 enabled\n");
663 return;
664 }
665 /* FIXME: We really should do something to _handle_ the errors */
666 if (ch_status & OMAP_DMA_TOUT_IRQ) {
667 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
668 return;
669 }
670 if (ch_status & OMAP_DMA_DROP_IRQ) {
671 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
672 return;
673 }
674 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
675 return;
676 }
677 mmcdat->bytes_xfered += host->dma_len;
678 host->sg_idx++;
679 if (host->sg_idx < host->sg_len) {
680 mmc_omap_prepare_dma(host, host->data);
681 omap_start_dma(host->dma_ch);
682 } else
683 mmc_omap_dma_done(host, host->data);
684}
685
686static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
687{
688 const char *dev_name;
689 int sync_dev, dma_ch, is_read, r;
690
691 is_read = !(data->flags & MMC_DATA_WRITE);
692 del_timer_sync(&host->dma_timer);
693 if (host->dma_ch >= 0) {
694 if (is_read == host->dma_is_read)
695 return 0;
696 omap_free_dma(host->dma_ch);
697 host->dma_ch = -1;
698 }
699
700 if (is_read) {
701 if (host->id == 1) {
702 sync_dev = OMAP_DMA_MMC_RX;
703 dev_name = "MMC1 read";
704 } else {
705 sync_dev = OMAP_DMA_MMC2_RX;
706 dev_name = "MMC2 read";
707 }
708 } else {
709 if (host->id == 1) {
710 sync_dev = OMAP_DMA_MMC_TX;
711 dev_name = "MMC1 write";
712 } else {
713 sync_dev = OMAP_DMA_MMC2_TX;
714 dev_name = "MMC2 write";
715 }
716 }
717 r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
718 host, &dma_ch);
719 if (r != 0) {
720 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
721 return r;
722 }
723 host->dma_ch = dma_ch;
724 host->dma_is_read = is_read;
725
726 return 0;
727}
728
729static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
730{
731 u16 reg;
732
733 reg = OMAP_MMC_READ(host->base, SDIO);
734 reg &= ~(1 << 5);
735 OMAP_MMC_WRITE(host->base, SDIO, reg);
736 /* Set maximum timeout */
737 OMAP_MMC_WRITE(host->base, CTO, 0xff);
738}
739
740static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
741{
742 int timeout;
743 u16 reg;
744
745 /* Convert ns to clock cycles by assuming 20MHz frequency
746 * 1 cycle at 20MHz = 500 ns
747 */
748 timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
749
750 /* Check if we need to use timeout multiplier register */
751 reg = OMAP_MMC_READ(host->base, SDIO);
752 if (timeout > 0xffff) {
753 reg |= (1 << 5);
754 timeout /= 1024;
755 } else
756 reg &= ~(1 << 5);
757 OMAP_MMC_WRITE(host->base, SDIO, reg);
758 OMAP_MMC_WRITE(host->base, DTO, timeout);
759}
760
761static void
762mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
763{
764 struct mmc_data *data = req->data;
765 int i, use_dma, block_size;
766 unsigned sg_len;
767
768 host->data = data;
769 if (data == NULL) {
770 OMAP_MMC_WRITE(host->base, BLEN, 0);
771 OMAP_MMC_WRITE(host->base, NBLK, 0);
772 OMAP_MMC_WRITE(host->base, BUF, 0);
773 host->dma_in_use = 0;
774 set_cmd_timeout(host, req);
775 return;
776 }
777
778
779 block_size = 1 << data->blksz_bits;
780
781 OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
782 OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
783 set_data_timeout(host, req);
784
785 /* cope with calling layer confusion; it issues "single
786 * block" writes using multi-block scatterlists.
787 */
788 sg_len = (data->blocks == 1) ? 1 : data->sg_len;
789
790 /* Only do DMA for entire blocks */
791 use_dma = host->use_dma;
792 if (use_dma) {
793 for (i = 0; i < sg_len; i++) {
794 if ((data->sg[i].length % block_size) != 0) {
795 use_dma = 0;
796 break;
797 }
798 }
799 }
800
801 host->sg_idx = 0;
802 if (use_dma) {
803 if (mmc_omap_get_dma_channel(host, data) == 0) {
804 enum dma_data_direction dma_data_dir;
805
806 if (data->flags & MMC_DATA_WRITE)
807 dma_data_dir = DMA_TO_DEVICE;
808 else
809 dma_data_dir = DMA_FROM_DEVICE;
810
811 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
812 sg_len, dma_data_dir);
813 host->total_bytes_left = 0;
814 mmc_omap_prepare_dma(host, req->data);
815 host->brs_received = 0;
816 host->dma_done = 0;
817 host->dma_in_use = 1;
818 } else
819 use_dma = 0;
820 }
821
822 /* Revert to PIO? */
823 if (!use_dma) {
824 OMAP_MMC_WRITE(host->base, BUF, 0x1f1f);
825 host->total_bytes_left = data->blocks * block_size;
826 host->sg_len = sg_len;
827 mmc_omap_sg_to_buf(host);
828 host->dma_in_use = 0;
829 }
830}
831
832static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
833{
834 struct mmc_omap_host *host = mmc_priv(mmc);
835
836 WARN_ON(host->mrq != NULL);
837
838 host->mrq = req;
839
840 /* only touch fifo AFTER the controller readies it */
841 mmc_omap_prepare_data(host, req);
842 mmc_omap_start_command(host, req->cmd);
843 if (host->dma_in_use)
844 omap_start_dma(host->dma_ch);
845}
846
847static void innovator_fpga_socket_power(int on)
848{
849#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
850
851 if (on) {
852 fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
853 OMAP1510_FPGA_POWER);
854 } else {
855 fpga_write(fpga_read(OMAP1510_FPGA_POWER) & ~(1 << 3),
856 OMAP1510_FPGA_POWER);
857 }
858#endif
859}
860
861/*
862 * Turn the socket power on/off. Innovator uses FPGA, most boards
863 * probably use GPIO.
864 */
865static void mmc_omap_power(struct mmc_omap_host *host, int on)
866{
867 if (on) {
868 if (machine_is_omap_innovator())
869 innovator_fpga_socket_power(1);
870 else if (machine_is_omap_h2())
871 tps65010_set_gpio_out_value(GPIO3, HIGH);
872 else if (machine_is_omap_h3())
873 /* GPIO 4 of TPS65010 sends SD_EN signal */
874 tps65010_set_gpio_out_value(GPIO4, HIGH);
875 else if (cpu_is_omap24xx()) {
876 u16 reg = OMAP_MMC_READ(host->base, CON);
877 OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11));
878 } else
879 if (host->power_pin >= 0)
880 omap_set_gpio_dataout(host->power_pin, 1);
881 } else {
882 if (machine_is_omap_innovator())
883 innovator_fpga_socket_power(0);
884 else if (machine_is_omap_h2())
885 tps65010_set_gpio_out_value(GPIO3, LOW);
886 else if (machine_is_omap_h3())
887 tps65010_set_gpio_out_value(GPIO4, LOW);
888 else if (cpu_is_omap24xx()) {
889 u16 reg = OMAP_MMC_READ(host->base, CON);
890 OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11));
891 } else
892 if (host->power_pin >= 0)
893 omap_set_gpio_dataout(host->power_pin, 0);
894 }
895}
896
897static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
898{
899 struct mmc_omap_host *host = mmc_priv(mmc);
900 int dsor;
901 int realclock, i;
902
903 realclock = ios->clock;
904
905 if (ios->clock == 0)
906 dsor = 0;
907 else {
908 int func_clk_rate = clk_get_rate(host->fclk);
909
910 dsor = func_clk_rate / realclock;
911 if (dsor < 1)
912 dsor = 1;
913
914 if (func_clk_rate / dsor > realclock)
915 dsor++;
916
917 if (dsor > 250)
918 dsor = 250;
919 dsor++;
920
921 if (ios->bus_width == MMC_BUS_WIDTH_4)
922 dsor |= 1 << 15;
923 }
924
925 switch (ios->power_mode) {
926 case MMC_POWER_OFF:
927 mmc_omap_power(host, 0);
928 break;
929 case MMC_POWER_UP:
930 case MMC_POWER_ON:
931 mmc_omap_power(host, 1);
932 dsor |= 1<<11;
933 break;
934 }
935
936 host->bus_mode = ios->bus_mode;
937 host->hw_bus_mode = host->bus_mode;
938
939 clk_enable(host->fclk);
940
941 /* On insanely high arm_per frequencies something sometimes
942 * goes somehow out of sync, and the POW bit is not being set,
943 * which results in the while loop below getting stuck.
944 * Writing to the CON register twice seems to do the trick. */
945 for (i = 0; i < 2; i++)
946 OMAP_MMC_WRITE(host->base, CON, dsor);
947 if (ios->power_mode == MMC_POWER_UP) {
948 /* Send clock cycles, poll completion */
949 OMAP_MMC_WRITE(host->base, IE, 0);
950 OMAP_MMC_WRITE(host->base, STAT, 0xffff);
951 OMAP_MMC_WRITE(host->base, CMD, 1<<7);
952 while (0 == (OMAP_MMC_READ(host->base, STAT) & 1));
953 OMAP_MMC_WRITE(host->base, STAT, 1);
954 }
955 clk_disable(host->fclk);
956}
957
958static int mmc_omap_get_ro(struct mmc_host *mmc)
959{
960 struct mmc_omap_host *host = mmc_priv(mmc);
961
962 return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
963}
964
965static struct mmc_host_ops mmc_omap_ops = {
966 .request = mmc_omap_request,
967 .set_ios = mmc_omap_set_ios,
968 .get_ro = mmc_omap_get_ro,
969};
970
971static int __init mmc_omap_probe(struct platform_device *pdev)
972{
973 struct omap_mmc_conf *minfo = pdev->dev.platform_data;
974 struct mmc_host *mmc;
975 struct mmc_omap_host *host = NULL;
976 int ret = 0;
977
978 if (platform_get_resource(pdev, IORESOURCE_MEM, 0) ||
979 platform_get_irq(pdev, IORESOURCE_IRQ, 0)) {
980 dev_err(&pdev->dev, "mmc_omap_probe: invalid resource type\n");
981 return -ENODEV;
982 }
983
984 if (!request_mem_region(pdev->resource[0].start,
985 pdev->resource[0].end - pdev->resource[0].start + 1,
986 pdev->name)) {
987 dev_dbg(&pdev->dev, "request_mem_region failed\n");
988 return -EBUSY;
989 }
990
991 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
992 if (!mmc) {
993 ret = -ENOMEM;
994 goto out;
995 }
996
997 host = mmc_priv(mmc);
998 host->mmc = mmc;
999
1000 spin_lock_init(&host->dma_lock);
1001 init_timer(&host->dma_timer);
1002 host->dma_timer.function = mmc_omap_dma_timer;
1003 host->dma_timer.data = (unsigned long) host;
1004
1005 host->id = pdev->id;
1006
1007 if (cpu_is_omap24xx()) {
1008 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1009 if (IS_ERR(host->iclk))
1010 goto out;
1011 clk_enable(host->iclk);
1012 }
1013
1014 if (!cpu_is_omap24xx())
1015 host->fclk = clk_get(&pdev->dev, "mmc_ck");
1016 else
1017 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1018
1019 if (IS_ERR(host->fclk)) {
1020 ret = PTR_ERR(host->fclk);
1021 goto out;
1022 }
1023
1024 /* REVISIT:
1025 * Also, use minfo->cover to decide how to manage
1026 * the card detect sensing.
1027 */
1028 host->power_pin = minfo->power_pin;
1029 host->switch_pin = minfo->switch_pin;
1030 host->wp_pin = minfo->wp_pin;
1031 host->use_dma = 1;
1032 host->dma_ch = -1;
1033
1034 host->irq = pdev->resource[1].start;
1035 host->base = ioremap(pdev->res.start, SZ_4K);
1036 if (!host->base) {
1037 ret = -ENOMEM;
1038 goto out;
1039 }
1040
1041 if (minfo->wire4)
1042 mmc->caps |= MMC_CAP_4_BIT_DATA;
1043
1044 mmc->ops = &mmc_omap_ops;
1045 mmc->f_min = 400000;
1046 mmc->f_max = 24000000;
1047 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1048
1049 /* Use scatterlist DMA to reduce per-transfer costs.
1050 * NOTE max_seg_size assumption that small blocks aren't
1051 * normally used (except e.g. for reading SD registers).
1052 */
1053 mmc->max_phys_segs = 32;
1054 mmc->max_hw_segs = 32;
1055 mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */
1056 mmc->max_seg_size = mmc->max_sectors * 512;
1057
1058 if (host->power_pin >= 0) {
1059 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
1060 dev_err(mmc_dev(host->mmc), "Unable to get GPIO
1061 pin for MMC power\n");
1062 goto out;
1063 }
1064 omap_set_gpio_direction(host->power_pin, 0);
1065 }
1066
1067 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1068 if (ret)
1069 goto out;
1070
1071 host->dev = &pdev->dev;
1072 platform_set_drvdata(pdev, host);
1073
1074 mmc_add_host(mmc);
1075
1076 if (host->switch_pin >= 0) {
1077 INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host);
1078 init_timer(&host->switch_timer);
1079 host->switch_timer.function = mmc_omap_switch_timer;
1080 host->switch_timer.data = (unsigned long) host;
1081 if (omap_request_gpio(host->switch_pin) != 0) {
1082 dev_warn(mmc_dev(host->mmc), "Unable to get GPIO pin for MMC cover switch\n");
1083 host->switch_pin = -1;
1084 goto no_switch;
1085 }
1086
1087 omap_set_gpio_direction(host->switch_pin, 1);
1088 ret = request_irq(OMAP_GPIO_IRQ(host->switch_pin),
1089 mmc_omap_switch_irq, SA_TRIGGER_RISING, DRIVER_NAME, host);
1090 if (ret) {
1091 dev_warn(mmc_dev(host->mmc), "Unable to get IRQ for MMC cover switch\n");
1092 omap_free_gpio(host->switch_pin);
1093 host->switch_pin = -1;
1094 goto no_switch;
1095 }
1096 ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
1097 if (ret == 0) {
1098 ret = device_create_file(&pdev->dev, &dev_attr_enable_poll);
1099 if (ret != 0)
1100 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1101 }
1102 if (ret) {
1103 dev_wan(mmc_dev(host->mmc), "Unable to create sysfs attributes\n");
1104 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1105 omap_free_gpio(host->switch_pin);
1106 host->switch_pin = -1;
1107 goto no_switch;
1108 }
1109 if (mmc_omap_enable_poll && mmc_omap_cover_is_open(host))
1110 schedule_work(&host->switch_work);
1111 }
1112
1113no_switch:
1114 return 0;
1115
1116out:
1117 /* FIXME: Free other resources too. */
1118 if (host) {
1119 if (host->iclk && !IS_ERR(host->iclk))
1120 clk_put(host->iclk);
1121 if (host->fclk && !IS_ERR(host->fclk))
1122 clk_put(host->fclk);
1123 mmc_free_host(host->mmc);
1124 }
1125 return ret;
1126}
1127
1128static int mmc_omap_remove(struct platform_device *pdev)
1129{
1130 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1131
1132 platform_set_drvdata(pdev, NULL);
1133
1134 if (host) {
1135 mmc_remove_host(host->mmc);
1136 free_irq(host->irq, host);
1137
1138 if (host->power_pin >= 0)
1139 omap_free_gpio(host->power_pin);
1140 if (host->switch_pin >= 0) {
1141 device_remove_file(&pdev->dev, &dev_attr_enable_poll);
1142 device_remove_file(&pdev->dev, &dev_attr_cover_switch);
1143 free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
1144 omap_free_gpio(host->switch_pin);
1145 host->switch_pin = -1;
1146 del_timer_sync(&host->switch_timer);
1147 flush_scheduled_work();
1148 }
1149 if (host->iclk && !IS_ERR(host->iclk))
1150 clk_put(host->iclk);
1151 if (host->fclk && !IS_ERR(host->fclk))
1152 clk_put(host->fclk);
1153 mmc_free_host(host->mmc);
1154 }
1155
1156 release_mem_region(pdev->resource[0].start,
1157 pdev->resource[0].end - pdev->resource[0].start + 1);
1158
1159 return 0;
1160}
1161
1162#ifdef CONFIG_PM
1163static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1164{
1165 int ret = 0;
1166 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1167
1168 if (host && host->suspended)
1169 return 0;
1170
1171 if (host) {
1172 ret = mmc_suspend_host(host->mmc, mesg);
1173 if (ret == 0)
1174 host->suspended = 1;
1175 }
1176 return ret;
1177}
1178
1179static int mmc_omap_resume(struct platform_device *pdev)
1180{
1181 int ret = 0;
1182 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1183
1184 if (host && !host->suspended)
1185 return 0;
1186
1187 if (host) {
1188 ret = mmc_resume_host(host->mmc);
1189 if (ret == 0)
1190 host->suspended = 0;
1191 }
1192
1193 return ret;
1194}
1195#else
1196#define mmc_omap_suspend NULL
1197#define mmc_omap_resume NULL
1198#endif
1199
1200static struct platform_driver mmc_omap_driver = {
1201 .probe = mmc_omap_probe,
1202 .remove = mmc_omap_remove,
1203 .suspend = mmc_omap_suspend,
1204 .resume = mmc_omap_resume,
1205 .driver = {
1206 .name = DRIVER_NAME,
1207 },
1208};
1209
1210static int __init mmc_omap_init(void)
1211{
1212 return platform_driver_register(&mmc_omap_driver);
1213}
1214
1215static void __exit mmc_omap_exit(void)
1216{
1217 platform_driver_unregister(&mmc_omap_driver);
1218}
1219
1220module_init(mmc_omap_init);
1221module_exit(mmc_omap_exit);
1222
1223MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1224MODULE_LICENSE("GPL");
1225MODULE_ALIAS(DRIVER_NAME);
1226MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/omap.h b/drivers/mmc/omap.h
new file mode 100644
index 000000000000..c954d355a5e3
--- /dev/null
+++ b/drivers/mmc/omap.h
@@ -0,0 +1,55 @@
1#ifndef DRIVERS_MEDIA_MMC_OMAP_H
2#define DRIVERS_MEDIA_MMC_OMAP_H
3
4#define OMAP_MMC_REG_CMD 0x00
5#define OMAP_MMC_REG_ARGL 0x04
6#define OMAP_MMC_REG_ARGH 0x08
7#define OMAP_MMC_REG_CON 0x0c
8#define OMAP_MMC_REG_STAT 0x10
9#define OMAP_MMC_REG_IE 0x14
10#define OMAP_MMC_REG_CTO 0x18
11#define OMAP_MMC_REG_DTO 0x1c
12#define OMAP_MMC_REG_DATA 0x20
13#define OMAP_MMC_REG_BLEN 0x24
14#define OMAP_MMC_REG_NBLK 0x28
15#define OMAP_MMC_REG_BUF 0x2c
16#define OMAP_MMC_REG_SDIO 0x34
17#define OMAP_MMC_REG_REV 0x3c
18#define OMAP_MMC_REG_RSP0 0x40
19#define OMAP_MMC_REG_RSP1 0x44
20#define OMAP_MMC_REG_RSP2 0x48
21#define OMAP_MMC_REG_RSP3 0x4c
22#define OMAP_MMC_REG_RSP4 0x50
23#define OMAP_MMC_REG_RSP5 0x54
24#define OMAP_MMC_REG_RSP6 0x58
25#define OMAP_MMC_REG_RSP7 0x5c
26#define OMAP_MMC_REG_IOSR 0x60
27#define OMAP_MMC_REG_SYSC 0x64
28#define OMAP_MMC_REG_SYSS 0x68
29
30#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
31#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
32#define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
33#define OMAP_MMC_STAT_A_EMPTY (1 << 11)
34#define OMAP_MMC_STAT_A_FULL (1 << 10)
35#define OMAP_MMC_STAT_CMD_CRC (1 << 8)
36#define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
37#define OMAP_MMC_STAT_DATA_CRC (1 << 6)
38#define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
39#define OMAP_MMC_STAT_END_BUSY (1 << 4)
40#define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
41#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
42#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
43
44#define OMAP_MMC_READ(base, reg) __raw_readw((base) + OMAP_MMC_REG_##reg)
45#define OMAP_MMC_WRITE(base, reg, val) __raw_writew((val), (base) + OMAP_MMC_REG_##reg)
46
47/*
48 * Command types
49 */
50#define OMAP_MMC_CMDTYPE_BC 0
51#define OMAP_MMC_CMDTYPE_BCR 1
52#define OMAP_MMC_CMDTYPE_AC 2
53#define OMAP_MMC_CMDTYPE_ADTC 3
54
55#endif
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index c32fad1ce51c..eb9a8826e9b5 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -37,12 +37,6 @@
37 37
38#include "pxamci.h" 38#include "pxamci.h"
39 39
40#ifdef CONFIG_MMC_DEBUG
41#define DBG(x...) printk(KERN_DEBUG x)
42#else
43#define DBG(x...) do { } while (0)
44#endif
45
46#define DRIVER_NAME "pxa2xx-mci" 40#define DRIVER_NAME "pxa2xx-mci"
47 41
48#define NR_SG 1 42#define NR_SG 1
@@ -206,7 +200,7 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
206 200
207static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) 201static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
208{ 202{
209 DBG("PXAMCI: request done\n"); 203 pr_debug("PXAMCI: request done\n");
210 host->mrq = NULL; 204 host->mrq = NULL;
211 host->cmd = NULL; 205 host->cmd = NULL;
212 host->data = NULL; 206 host->data = NULL;
@@ -252,7 +246,7 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
252 if ((cmd->resp[0] & 0x80000000) == 0) 246 if ((cmd->resp[0] & 0x80000000) == 0)
253 cmd->error = MMC_ERR_BADCRC; 247 cmd->error = MMC_ERR_BADCRC;
254 } else { 248 } else {
255 DBG("ignoring CRC from command %d - *risky*\n",cmd->opcode); 249 pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
256 } 250 }
257#else 251#else
258 cmd->error = MMC_ERR_BADCRC; 252 cmd->error = MMC_ERR_BADCRC;
@@ -317,12 +311,12 @@ static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
317 311
318 ireg = readl(host->base + MMC_I_REG); 312 ireg = readl(host->base + MMC_I_REG);
319 313
320 DBG("PXAMCI: irq %08x\n", ireg); 314 pr_debug("PXAMCI: irq %08x\n", ireg);
321 315
322 if (ireg) { 316 if (ireg) {
323 unsigned stat = readl(host->base + MMC_STAT); 317 unsigned stat = readl(host->base + MMC_STAT);
324 318
325 DBG("PXAMCI: stat %08x\n", stat); 319 pr_debug("PXAMCI: stat %08x\n", stat);
326 320
327 if (ireg & END_CMD_RES) 321 if (ireg & END_CMD_RES)
328 handled |= pxamci_cmd_done(host, stat); 322 handled |= pxamci_cmd_done(host, stat);
@@ -376,9 +370,9 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
376{ 370{
377 struct pxamci_host *host = mmc_priv(mmc); 371 struct pxamci_host *host = mmc_priv(mmc);
378 372
379 DBG("pxamci_set_ios: clock %u power %u vdd %u.%02u\n", 373 pr_debug("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
380 ios->clock, ios->power_mode, ios->vdd / 100, 374 ios->clock, ios->power_mode, ios->vdd / 100,
381 ios->vdd % 100); 375 ios->vdd % 100);
382 376
383 if (ios->clock) { 377 if (ios->clock) {
384 unsigned int clk = CLOCKRATE / ios->clock; 378 unsigned int clk = CLOCKRATE / ios->clock;
@@ -405,8 +399,8 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
405 host->cmdat |= CMDAT_INIT; 399 host->cmdat |= CMDAT_INIT;
406 } 400 }
407 401
408 DBG("pxamci_set_ios: clkrt = %x cmdat = %x\n", 402 pr_debug("pxamci_set_ios: clkrt = %x cmdat = %x\n",
409 host->clkrt, host->cmdat); 403 host->clkrt, host->cmdat);
410} 404}
411 405
412static struct mmc_host_ops pxamci_ops = { 406static struct mmc_host_ops pxamci_ops = {
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 8b811d94371c..bdbfca050029 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -31,12 +31,8 @@
31 31
32#define BUGMAIL "<sdhci-devel@list.drzeus.cx>" 32#define BUGMAIL "<sdhci-devel@list.drzeus.cx>"
33 33
34#ifdef CONFIG_MMC_DEBUG
35#define DBG(f, x...) \ 34#define DBG(f, x...) \
36 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__,## x) 35 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
37#else
38#define DBG(f, x...) do { } while (0)
39#endif
40 36
41static const struct pci_device_id pci_ids[] __devinitdata = { 37static const struct pci_device_id pci_ids[] __devinitdata = {
42 /* handle any SD host controller */ 38 /* handle any SD host controller */
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 3be397d436fa..511f7b0b31d2 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -44,15 +44,10 @@
44#define DRIVER_NAME "wbsd" 44#define DRIVER_NAME "wbsd"
45#define DRIVER_VERSION "1.5" 45#define DRIVER_VERSION "1.5"
46 46
47#ifdef CONFIG_MMC_DEBUG
48#define DBG(x...) \ 47#define DBG(x...) \
49 printk(KERN_DEBUG DRIVER_NAME ": " x) 48 pr_debug(DRIVER_NAME ": " x)
50#define DBGF(f, x...) \ 49#define DBGF(f, x...) \
51 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x) 50 pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
52#else
53#define DBG(x...) do { } while (0)
54#define DBGF(x...) do { } while (0)
55#endif
56 51
57/* 52/*
58 * Device resources 53 * Device resources
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 0f6bb2e625d8..a7ec5954caf5 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -200,27 +200,6 @@ config MTD_CFI_AMDSTD
200 provides support for one of those command sets, used on chips 200 provides support for one of those command sets, used on chips
201 including the AMD Am29LV320. 201 including the AMD Am29LV320.
202 202
203config MTD_CFI_AMDSTD_RETRY
204 int "Retry failed commands (erase/program)"
205 depends on MTD_CFI_AMDSTD
206 default "0"
207 help
208 Some chips, when attached to a shared bus, don't properly filter
209 bus traffic that is destined to other devices. This broken
210 behavior causes erase and program sequences to be aborted when
211 the sequences are mixed with traffic for other devices.
212
213 SST49LF040 (and related) chips are know to be broken.
214
215config MTD_CFI_AMDSTD_RETRY_MAX
216 int "Max retries of failed commands (erase/program)"
217 depends on MTD_CFI_AMDSTD_RETRY
218 default "0"
219 help
220 If you have an SST49LF040 (or related chip) then this value should
221 be set to at least 1. This can also be adjusted at driver load
222 time with the retry_cmd_max module parameter.
223
224config MTD_CFI_STAA 203config MTD_CFI_STAA
225 tristate "Support for ST (Advanced Architecture) flash chips" 204 tristate "Support for ST (Advanced Architecture) flash chips"
226 depends on MTD_GEN_PROBE 205 depends on MTD_GEN_PROBE
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
index fdb91b6f1d97..57115618c496 100644
--- a/drivers/mtd/chips/amd_flash.c
+++ b/drivers/mtd/chips/amd_flash.c
@@ -664,7 +664,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
664 printk("%s: Probing for AMD compatible flash...\n", map->name); 664 printk("%s: Probing for AMD compatible flash...\n", map->name);
665 665
666 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table, 666 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
667 sizeof(table)/sizeof(table[0]))) 667 ARRAY_SIZE(table)))
668 == -1) { 668 == -1) {
669 printk(KERN_WARNING 669 printk(KERN_WARNING
670 "%s: Found no AMD compatible device at location zero\n", 670 "%s: Found no AMD compatible device at location zero\n",
@@ -696,7 +696,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
696 base += (1 << temp.chipshift)) { 696 base += (1 << temp.chipshift)) {
697 int numchips = temp.numchips; 697 int numchips = temp.numchips;
698 table_pos[numchips] = probe_new_chip(mtd, base, chips, 698 table_pos[numchips] = probe_new_chip(mtd, base, chips,
699 &temp, table, sizeof(table)/sizeof(table[0])); 699 &temp, table, ARRAY_SIZE(table));
700 } 700 }
701 701
702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * 702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index edb306c03c0a..517ea33e7260 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -34,6 +34,7 @@
34#define MANUFACTURER_MACRONIX 0x00C2 34#define MANUFACTURER_MACRONIX 0x00C2
35#define MANUFACTURER_NEC 0x0010 35#define MANUFACTURER_NEC 0x0010
36#define MANUFACTURER_PMC 0x009D 36#define MANUFACTURER_PMC 0x009D
37#define MANUFACTURER_SHARP 0x00b0
37#define MANUFACTURER_SST 0x00BF 38#define MANUFACTURER_SST 0x00BF
38#define MANUFACTURER_ST 0x0020 39#define MANUFACTURER_ST 0x0020
39#define MANUFACTURER_TOSHIBA 0x0098 40#define MANUFACTURER_TOSHIBA 0x0098
@@ -124,6 +125,9 @@
124#define PM49FL004 0x006E 125#define PM49FL004 0x006E
125#define PM49FL008 0x006A 126#define PM49FL008 0x006A
126 127
128/* Sharp */
129#define LH28F640BF 0x00b0
130
127/* ST - www.st.com */ 131/* ST - www.st.com */
128#define M29W800DT 0x00D7 132#define M29W800DT 0x00D7
129#define M29W800DB 0x005B 133#define M29W800DB 0x005B
@@ -1267,6 +1271,19 @@ static const struct amd_flash_info jedec_table[] = {
1267 .regions = { 1271 .regions = {
1268 ERASEINFO( 0x01000, 256 ) 1272 ERASEINFO( 0x01000, 256 )
1269 } 1273 }
1274 }, {
1275 .mfr_id = MANUFACTURER_SHARP,
1276 .dev_id = LH28F640BF,
1277 .name = "LH28F640BF",
1278 .uaddr = {
1279 [0] = MTD_UADDR_UNNECESSARY, /* x8 */
1280 },
1281 .DevSize = SIZE_4MiB,
1282 .CmdSet = P_ID_INTEL_STD,
1283 .NumEraseRegions= 1,
1284 .regions = {
1285 ERASEINFO(0x40000,16),
1286 }
1270 }, { 1287 }, {
1271 .mfr_id = MANUFACTURER_SST, 1288 .mfr_id = MANUFACTURER_SST,
1272 .dev_id = SST39LF512, 1289 .dev_id = SST39LF512,
@@ -2035,7 +2052,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2035 DEBUG(MTD_DEBUG_LEVEL3, 2052 DEBUG(MTD_DEBUG_LEVEL3,
2036 "Search for id:(%02x %02x) interleave(%d) type(%d)\n", 2053 "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
2037 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); 2054 cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
2038 for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) { 2055 for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
2039 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { 2056 if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
2040 DEBUG( MTD_DEBUG_LEVEL3, 2057 DEBUG( MTD_DEBUG_LEVEL3,
2041 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", 2058 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
index 36f61a6a766e..3cc0b23c5865 100644
--- a/drivers/mtd/chips/sharp.c
+++ b/drivers/mtd/chips/sharp.c
@@ -64,7 +64,7 @@
64 64
65#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */ 65#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
66 66
67struct mtd_info *sharp_probe(struct map_info *); 67static struct mtd_info *sharp_probe(struct map_info *);
68 68
69static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd); 69static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
70 70
@@ -96,7 +96,6 @@ struct sharp_info{
96 struct flchip chips[1]; 96 struct flchip chips[1];
97}; 97};
98 98
99struct mtd_info *sharp_probe(struct map_info *map);
100static void sharp_destroy(struct mtd_info *mtd); 99static void sharp_destroy(struct mtd_info *mtd);
101 100
102static struct mtd_chip_driver sharp_chipdrv = { 101static struct mtd_chip_driver sharp_chipdrv = {
@@ -107,7 +106,7 @@ static struct mtd_chip_driver sharp_chipdrv = {
107}; 106};
108 107
109 108
110struct mtd_info *sharp_probe(struct map_info *map) 109static struct mtd_info *sharp_probe(struct map_info *map)
111{ 110{
112 struct mtd_info *mtd = NULL; 111 struct mtd_info *mtd = NULL;
113 struct sharp_info *sharp = NULL; 112 struct sharp_info *sharp = NULL;
@@ -581,7 +580,7 @@ static void sharp_destroy(struct mtd_info *mtd)
581 580
582} 581}
583 582
584int __init sharp_probe_init(void) 583static int __init sharp_probe_init(void)
585{ 584{
586 printk("MTD Sharp chip driver <ds@lineo.com>\n"); 585 printk("MTD Sharp chip driver <ds@lineo.com>\n");
587 586
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 6b8bb2e4dcfd..a7a7bfe33879 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -42,7 +42,8 @@
42 42
43 43
44/* special size referring to all the remaining space in a partition */ 44/* special size referring to all the remaining space in a partition */
45#define SIZE_REMAINING 0xffffffff 45#define SIZE_REMAINING UINT_MAX
46#define OFFSET_CONTINUOUS UINT_MAX
46 47
47struct cmdline_mtd_partition { 48struct cmdline_mtd_partition {
48 struct cmdline_mtd_partition *next; 49 struct cmdline_mtd_partition *next;
@@ -75,7 +76,7 @@ static struct mtd_partition * newpart(char *s,
75{ 76{
76 struct mtd_partition *parts; 77 struct mtd_partition *parts;
77 unsigned long size; 78 unsigned long size;
78 unsigned long offset = 0; 79 unsigned long offset = OFFSET_CONTINUOUS;
79 char *name; 80 char *name;
80 int name_len; 81 int name_len;
81 unsigned char *extra_mem; 82 unsigned char *extra_mem;
@@ -314,7 +315,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
314 { 315 {
315 for(i = 0, offset = 0; i < part->num_parts; i++) 316 for(i = 0, offset = 0; i < part->num_parts; i++)
316 { 317 {
317 if (!part->parts[i].offset) 318 if (part->parts[i].offset == OFFSET_CONTINUOUS)
318 part->parts[i].offset = offset; 319 part->parts[i].offset = offset;
319 else 320 else
320 offset = part->parts[i].offset; 321 offset = part->parts[i].offset;
diff --git a/drivers/mtd/devices/blkmtd.c b/drivers/mtd/devices/blkmtd.c
index 04f864d238db..79f2e1f23ebd 100644
--- a/drivers/mtd/devices/blkmtd.c
+++ b/drivers/mtd/devices/blkmtd.c
@@ -28,8 +28,9 @@
28#include <linux/pagemap.h> 28#include <linux/pagemap.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/mount.h>
31#include <linux/mtd/mtd.h> 32#include <linux/mtd/mtd.h>
32 33#include <linux/mutex.h>
33 34
34#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg) 35#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)
35#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg) 36#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)
@@ -46,7 +47,7 @@ struct blkmtd_dev {
46 struct list_head list; 47 struct list_head list;
47 struct block_device *blkdev; 48 struct block_device *blkdev;
48 struct mtd_info mtd_info; 49 struct mtd_info mtd_info;
49 struct semaphore wrbuf_mutex; 50 struct mutex wrbuf_mutex;
50}; 51};
51 52
52 53
@@ -268,7 +269,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
268 if(end_len) 269 if(end_len)
269 pagecnt++; 270 pagecnt++;
270 271
271 down(&dev->wrbuf_mutex); 272 mutex_lock(&dev->wrbuf_mutex);
272 273
273 DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n", 274 DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
274 start_len, len, end_len, pagecnt); 275 start_len, len, end_len, pagecnt);
@@ -376,7 +377,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
376 blkmtd_write_out(bio); 377 blkmtd_write_out(bio);
377 378
378 DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err); 379 DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
379 up(&dev->wrbuf_mutex); 380 mutex_unlock(&dev->wrbuf_mutex);
380 381
381 if(retlen) 382 if(retlen)
382 *retlen = thislen; 383 *retlen = thislen;
@@ -614,8 +615,6 @@ static struct mtd_erase_region_info *calc_erase_regions(
614} 615}
615 616
616 617
617extern dev_t __init name_to_dev_t(const char *line);
618
619static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size) 618static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size)
620{ 619{
621 struct block_device *bdev; 620 struct block_device *bdev;
@@ -659,7 +658,7 @@ static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size
659 memset(dev, 0, sizeof(struct blkmtd_dev)); 658 memset(dev, 0, sizeof(struct blkmtd_dev));
660 dev->blkdev = bdev; 659 dev->blkdev = bdev;
661 if(!readonly) { 660 if(!readonly) {
662 init_MUTEX(&dev->wrbuf_mutex); 661 mutex_init(&dev->wrbuf_mutex);
663 } 662 }
664 663
665 dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; 664 dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 7ff403b2a0a0..4160b8334c53 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/mutex.h>
21 22
22#define VERSION "$Revision: 1.30 $" 23#define VERSION "$Revision: 1.30 $"
23 24
@@ -31,7 +32,7 @@ struct block2mtd_dev {
31 struct list_head list; 32 struct list_head list;
32 struct block_device *blkdev; 33 struct block_device *blkdev;
33 struct mtd_info mtd; 34 struct mtd_info mtd;
34 struct semaphore write_mutex; 35 struct mutex write_mutex;
35}; 36};
36 37
37 38
@@ -134,9 +135,9 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
134 int err; 135 int err;
135 136
136 instr->state = MTD_ERASING; 137 instr->state = MTD_ERASING;
137 down(&dev->write_mutex); 138 mutex_lock(&dev->write_mutex);
138 err = _block2mtd_erase(dev, from, len); 139 err = _block2mtd_erase(dev, from, len);
139 up(&dev->write_mutex); 140 mutex_unlock(&dev->write_mutex);
140 if (err) { 141 if (err) {
141 ERROR("erase failed err = %d", err); 142 ERROR("erase failed err = %d", err);
142 instr->state = MTD_ERASE_FAILED; 143 instr->state = MTD_ERASE_FAILED;
@@ -249,9 +250,9 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
249 if (to + len > mtd->size) 250 if (to + len > mtd->size)
250 len = mtd->size - to; 251 len = mtd->size - to;
251 252
252 down(&dev->write_mutex); 253 mutex_lock(&dev->write_mutex);
253 err = _block2mtd_write(dev, buf, to, len, retlen); 254 err = _block2mtd_write(dev, buf, to, len, retlen);
254 up(&dev->write_mutex); 255 mutex_unlock(&dev->write_mutex);
255 if (err > 0) 256 if (err > 0)
256 err = 0; 257 err = 0;
257 return err; 258 return err;
@@ -310,7 +311,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
310 goto devinit_err; 311 goto devinit_err;
311 } 312 }
312 313
313 init_MUTEX(&dev->write_mutex); 314 mutex_init(&dev->write_mutex);
314 315
315 /* Setup the MTD structure */ 316 /* Setup the MTD structure */
316 /* make the name contain the block device in */ 317 /* make the name contain the block device in */
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index e4345cf744a2..23e7a5c7d2c1 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/mutex.h>
23 24
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h> 26#include <linux/mtd/nand.h>
@@ -605,7 +606,7 @@ static void DoC2k_init(struct mtd_info *mtd)
605 606
606 this->curfloor = -1; 607 this->curfloor = -1;
607 this->curchip = -1; 608 this->curchip = -1;
608 init_MUTEX(&this->lock); 609 mutex_init(&this->lock);
609 610
610 /* Ident all the chips present. */ 611 /* Ident all the chips present. */
611 DoC_ScanChips(this, maxchips); 612 DoC_ScanChips(this, maxchips);
@@ -645,7 +646,7 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
645 if (from >= this->totlen) 646 if (from >= this->totlen)
646 return -EINVAL; 647 return -EINVAL;
647 648
648 down(&this->lock); 649 mutex_lock(&this->lock);
649 650
650 *retlen = 0; 651 *retlen = 0;
651 while (left) { 652 while (left) {
@@ -774,7 +775,7 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
774 buf += len; 775 buf += len;
775 } 776 }
776 777
777 up(&this->lock); 778 mutex_unlock(&this->lock);
778 779
779 return ret; 780 return ret;
780} 781}
@@ -803,7 +804,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
803 if (to >= this->totlen) 804 if (to >= this->totlen)
804 return -EINVAL; 805 return -EINVAL;
805 806
806 down(&this->lock); 807 mutex_lock(&this->lock);
807 808
808 *retlen = 0; 809 *retlen = 0;
809 while (left) { 810 while (left) {
@@ -873,7 +874,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
873 printk(KERN_ERR "Error programming flash\n"); 874 printk(KERN_ERR "Error programming flash\n");
874 /* Error in programming */ 875 /* Error in programming */
875 *retlen = 0; 876 *retlen = 0;
876 up(&this->lock); 877 mutex_unlock(&this->lock);
877 return -EIO; 878 return -EIO;
878 } 879 }
879 880
@@ -935,7 +936,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
935 printk(KERN_ERR "Error programming flash\n"); 936 printk(KERN_ERR "Error programming flash\n");
936 /* Error in programming */ 937 /* Error in programming */
937 *retlen = 0; 938 *retlen = 0;
938 up(&this->lock); 939 mutex_unlock(&this->lock);
939 return -EIO; 940 return -EIO;
940 } 941 }
941 942
@@ -956,7 +957,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
956 957
957 ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x); 958 ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
958 if (ret) { 959 if (ret) {
959 up(&this->lock); 960 mutex_unlock(&this->lock);
960 return ret; 961 return ret;
961 } 962 }
962 } 963 }
@@ -966,7 +967,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
966 buf += len; 967 buf += len;
967 } 968 }
968 969
969 up(&this->lock); 970 mutex_unlock(&this->lock);
970 return 0; 971 return 0;
971} 972}
972 973
@@ -975,13 +976,13 @@ static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
975 u_char *eccbuf, struct nand_oobinfo *oobsel) 976 u_char *eccbuf, struct nand_oobinfo *oobsel)
976{ 977{
977 static char static_buf[512]; 978 static char static_buf[512];
978 static DECLARE_MUTEX(writev_buf_sem); 979 static DEFINE_MUTEX(writev_buf_mutex);
979 980
980 size_t totretlen = 0; 981 size_t totretlen = 0;
981 size_t thisvecofs = 0; 982 size_t thisvecofs = 0;
982 int ret= 0; 983 int ret= 0;
983 984
984 down(&writev_buf_sem); 985 mutex_lock(&writev_buf_mutex);
985 986
986 while(count) { 987 while(count) {
987 size_t thislen, thisretlen; 988 size_t thislen, thisretlen;
@@ -1024,7 +1025,7 @@ static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
1024 to += thislen; 1025 to += thislen;
1025 } 1026 }
1026 1027
1027 up(&writev_buf_sem); 1028 mutex_unlock(&writev_buf_mutex);
1028 *retlen = totretlen; 1029 *retlen = totretlen;
1029 return ret; 1030 return ret;
1030} 1031}
@@ -1037,7 +1038,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1037 int len256 = 0, ret; 1038 int len256 = 0, ret;
1038 struct Nand *mychip; 1039 struct Nand *mychip;
1039 1040
1040 down(&this->lock); 1041 mutex_lock(&this->lock);
1041 1042
1042 mychip = &this->chips[ofs >> this->chipshift]; 1043 mychip = &this->chips[ofs >> this->chipshift];
1043 1044
@@ -1083,7 +1084,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1083 1084
1084 ret = DoC_WaitReady(this); 1085 ret = DoC_WaitReady(this);
1085 1086
1086 up(&this->lock); 1087 mutex_unlock(&this->lock);
1087 return ret; 1088 return ret;
1088 1089
1089} 1090}
@@ -1197,10 +1198,10 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1197 struct DiskOnChip *this = mtd->priv; 1198 struct DiskOnChip *this = mtd->priv;
1198 int ret; 1199 int ret;
1199 1200
1200 down(&this->lock); 1201 mutex_lock(&this->lock);
1201 ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf); 1202 ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf);
1202 1203
1203 up(&this->lock); 1204 mutex_unlock(&this->lock);
1204 return ret; 1205 return ret;
1205} 1206}
1206 1207
@@ -1214,10 +1215,10 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
1214 struct Nand *mychip; 1215 struct Nand *mychip;
1215 int status; 1216 int status;
1216 1217
1217 down(&this->lock); 1218 mutex_lock(&this->lock);
1218 1219
1219 if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) { 1220 if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
1220 up(&this->lock); 1221 mutex_unlock(&this->lock);
1221 return -EINVAL; 1222 return -EINVAL;
1222 } 1223 }
1223 1224
@@ -1265,7 +1266,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
1265 callback: 1266 callback:
1266 mtd_erase_callback(instr); 1267 mtd_erase_callback(instr);
1267 1268
1268 up(&this->lock); 1269 mutex_unlock(&this->lock);
1269 return 0; 1270 return 0;
1270} 1271}
1271 1272
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 1e876fcb0408..29b0ddaa324e 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -581,8 +581,6 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
581 581
582/***************************************************************************************************/ 582/***************************************************************************************************/
583 583
584#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
585
586static struct mtd_info mtd; 584static struct mtd_info mtd;
587 585
588static struct mtd_erase_region_info erase_regions[] = { 586static struct mtd_erase_region_info erase_regions[] = {
@@ -640,7 +638,7 @@ int __init lart_flash_init (void)
640 mtd.flags = MTD_CAP_NORFLASH; 638 mtd.flags = MTD_CAP_NORFLASH;
641 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; 639 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
642 mtd.erasesize = FLASH_BLOCKSIZE_MAIN; 640 mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
643 mtd.numeraseregions = NB_OF (erase_regions); 641 mtd.numeraseregions = ARRAY_SIZE(erase_regions);
644 mtd.eraseregions = erase_regions; 642 mtd.eraseregions = erase_regions;
645 mtd.erase = flash_erase; 643 mtd.erase = flash_erase;
646 mtd.read = flash_read; 644 mtd.read = flash_read;
@@ -670,9 +668,9 @@ int __init lart_flash_init (void)
670 result,mtd.eraseregions[result].numblocks); 668 result,mtd.eraseregions[result].numblocks);
671 669
672#ifdef HAVE_PARTITIONS 670#ifdef HAVE_PARTITIONS
673 printk ("\npartitions = %d\n",NB_OF (lart_partitions)); 671 printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
674 672
675 for (result = 0; result < NB_OF (lart_partitions); result++) 673 for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
676 printk (KERN_DEBUG 674 printk (KERN_DEBUG
677 "\n\n" 675 "\n\n"
678 "lart_partitions[%d].name = %s\n" 676 "lart_partitions[%d].name = %s\n"
@@ -687,7 +685,7 @@ int __init lart_flash_init (void)
687#ifndef HAVE_PARTITIONS 685#ifndef HAVE_PARTITIONS
688 result = add_mtd_device (&mtd); 686 result = add_mtd_device (&mtd);
689#else 687#else
690 result = add_mtd_partitions (&mtd,lart_partitions,NB_OF (lart_partitions)); 688 result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions));
691#endif 689#endif
692 690
693 return (result); 691 return (result);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index d5f24089be71..04e65d5dae00 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -186,7 +186,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
186 struct m25p *flash = mtd_to_m25p(mtd); 186 struct m25p *flash = mtd_to_m25p(mtd);
187 u32 addr,len; 187 u32 addr,len;
188 188
189 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 189 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
190 flash->spi->dev.bus_id, __FUNCTION__, "at", 190 flash->spi->dev.bus_id, __FUNCTION__, "at",
191 (u32)instr->addr, instr->len); 191 (u32)instr->addr, instr->len);
192 192
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 0ff2e4378244..485f663493d2 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -308,7 +308,7 @@ static int __init ms02nv_init(void)
308 break; 308 break;
309 } 309 }
310 310
311 for (i = 0; i < (sizeof(ms02nv_addrs) / sizeof(*ms02nv_addrs)); i++) 311 for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
312 if (!ms02nv_init_one(ms02nv_addrs[i] << stride)) 312 if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
313 count++; 313 count++;
314 314
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8a544890173d..a3b92479719d 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -47,9 +47,6 @@
47 */ 47 */
48#define MAX_LOOPS 10000 48#define MAX_LOOPS 10000
49 49
50extern void INFTL_dumptables(struct INFTLrecord *inftl);
51extern void INFTL_dumpVUchains(struct INFTLrecord *inftl);
52
53static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 50static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
54{ 51{
55 struct INFTLrecord *inftl; 52 struct INFTLrecord *inftl;
@@ -132,7 +129,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
132 return; 129 return;
133 } 130 }
134#ifdef PSYCHO_DEBUG 131#ifdef PSYCHO_DEBUG
135 printk(KERN_INFO "INFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a'); 132 printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
136#endif 133#endif
137 return; 134 return;
138} 135}
@@ -885,8 +882,6 @@ static struct mtd_blktrans_ops inftl_tr = {
885 .owner = THIS_MODULE, 882 .owner = THIS_MODULE,
886}; 883};
887 884
888extern char inftlmountrev[];
889
890static int __init init_inftl(void) 885static int __init init_inftl(void)
891{ 886{
892 printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.19 $, " 887 printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.19 $, "
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index a57791a6ce40..b933a2a27b18 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -126,8 +126,6 @@ static struct mtd_partition alchemy_partitions[] = {
126 } 126 }
127}; 127};
128 128
129#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
130
131static struct mtd_info *mymtd; 129static struct mtd_info *mymtd;
132 130
133int __init alchemy_mtd_init(void) 131int __init alchemy_mtd_init(void)
@@ -154,7 +152,7 @@ int __init alchemy_mtd_init(void)
154 * Static partition definition selection 152 * Static partition definition selection
155 */ 153 */
156 parts = alchemy_partitions; 154 parts = alchemy_partitions;
157 nb_parts = NB_OF(alchemy_partitions); 155 nb_parts = ARRAY_SIZE(alchemy_partitions);
158 alchemy_map.size = window_size; 156 alchemy_map.size = window_size;
159 157
160 /* 158 /*
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 6a8c0415bde8..fd0f0d3187de 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -86,7 +86,7 @@ struct mtd_partition flagadm_parts[] = {
86 } 86 }
87}; 87};
88 88
89#define PARTITION_COUNT (sizeof(flagadm_parts)/sizeof(struct mtd_partition)) 89#define PARTITION_COUNT ARRAY_SIZE(flagadm_parts)
90 90
91static struct mtd_info *mymtd; 91static struct mtd_info *mymtd;
92 92
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index 49d90542fc75..652813cd6c2d 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -57,7 +57,7 @@ static struct mtd_partition partition_info[]= {
57 } 57 }
58}; 58};
59 59
60#define NUM_PARTITIONS (sizeof(partition_info) / sizeof(partition_info[0])) 60#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
61 61
62#define WINDOW_ADDR 0x10000000 62#define WINDOW_ADDR 0x10000000
63#define WINDOW_SIZE 0x800000 63#define WINDOW_SIZE 0x800000
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index efb221692641..c299d10b33e6 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -300,7 +300,7 @@ static struct mtd_partition partition_info[]=
300 }, 300 },
301}; 301};
302 302
303#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0])) 303#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
304 304
305static struct mtd_info *mymtd; 305static struct mtd_info *mymtd;
306static struct mtd_info *lowlvl_parts[NUM_PARTITIONS]; 306static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
@@ -345,7 +345,7 @@ static struct mtd_partition higlvl_partition_info[]=
345 }, 345 },
346}; 346};
347 347
348#define NUM_HIGHLVL_PARTITIONS (sizeof(higlvl_partition_info)/sizeof(partition_info[0])) 348#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
349 349
350 350
351static int dnp_adnp_probe(void) 351static int dnp_adnp_probe(void)
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index b993ac01a9a5..2bb3c0f0f970 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -99,7 +99,7 @@ static struct mtd_info *this_mtd;
99static int __init init_svme182(void) 99static int __init init_svme182(void)
100{ 100{
101 struct mtd_partition *partitions; 101 struct mtd_partition *partitions;
102 int num_parts = sizeof(svme182_partitions) / sizeof(struct mtd_partition); 102 int num_parts = ARRAY_SIZE(svme182_partitions);
103 103
104 partitions = svme182_partitions; 104 partitions = svme182_partitions;
105 105
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 319094821101..0667101ccbe1 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -59,7 +59,7 @@ static struct mtd_partition h720x_partitions[] = {
59 } 59 }
60}; 60};
61 61
62#define NUM_PARTITIONS (sizeof(h720x_partitions)/sizeof(h720x_partitions[0])) 62#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
63 63
64static int nr_mtd_parts; 64static int nr_mtd_parts;
65static struct mtd_partition *mtd_parts; 65static struct mtd_partition *mtd_parts;
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 33060a315722..ed215470158b 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -76,7 +76,7 @@ static struct mtd_partition partition_info[]={
76 .size = 0x80000 76 .size = 0x80000
77 }, 77 },
78}; 78};
79#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0])) 79#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
80 80
81#define WINDOW_SIZE 0x00100000 81#define WINDOW_SIZE 0x00100000
82#define WINDOW_ADDR 0x00200000 82#define WINDOW_ADDR 0x00200000
@@ -88,7 +88,7 @@ static struct map_info netsc520_map = {
88 .phys = WINDOW_ADDR, 88 .phys = WINDOW_ADDR,
89}; 89};
90 90
91#define NUM_FLASH_BANKS (sizeof(netsc520_map)/sizeof(struct map_info)) 91#define NUM_FLASH_BANKS ARRAY_SIZE(netsc520_map)
92 92
93static struct mtd_info *mymtd; 93static struct mtd_info *mymtd;
94 94
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 632eb2aa968f..54a3102ab19a 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -128,8 +128,7 @@ static struct mtd_partition nettel_amd_partitions[] = {
128 } 128 }
129}; 129};
130 130
131#define NUM_AMD_PARTITIONS \ 131#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
132 (sizeof(nettel_amd_partitions)/sizeof(nettel_amd_partitions[0]))
133 132
134/****************************************************************************/ 133/****************************************************************************/
135 134
diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c
index c223514ca2eb..a21fcd195ab4 100644
--- a/drivers/mtd/maps/ocotea.c
+++ b/drivers/mtd/maps/ocotea.c
@@ -58,8 +58,6 @@ static struct mtd_partition ocotea_large_partitions[] = {
58 } 58 }
59}; 59};
60 60
61#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
62
63int __init init_ocotea(void) 61int __init init_ocotea(void)
64{ 62{
65 u8 fpga0_reg; 63 u8 fpga0_reg;
@@ -97,7 +95,7 @@ int __init init_ocotea(void)
97 if (flash) { 95 if (flash) {
98 flash->owner = THIS_MODULE; 96 flash->owner = THIS_MODULE;
99 add_mtd_partitions(flash, ocotea_small_partitions, 97 add_mtd_partitions(flash, ocotea_small_partitions,
100 NB_OF(ocotea_small_partitions)); 98 ARRAY_SIZE(ocotea_small_partitions));
101 } else { 99 } else {
102 printk("map probe failed for flash\n"); 100 printk("map probe failed for flash\n");
103 return -ENXIO; 101 return -ENXIO;
@@ -118,7 +116,7 @@ int __init init_ocotea(void)
118 if (flash) { 116 if (flash) {
119 flash->owner = THIS_MODULE; 117 flash->owner = THIS_MODULE;
120 add_mtd_partitions(flash, ocotea_large_partitions, 118 add_mtd_partitions(flash, ocotea_large_partitions,
121 NB_OF(ocotea_large_partitions)); 119 ARRAY_SIZE(ocotea_large_partitions));
122 } else { 120 } else {
123 printk("map probe failed for flash\n"); 121 printk("map probe failed for flash\n");
124 return -ENXIO; 122 return -ENXIO;
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 21822c2edbe4..d2ab1bae9c34 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -334,9 +334,6 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
334 return 0; 334 return 0;
335 335
336release: 336release:
337 if (mtd)
338 map_destroy(mtd);
339
340 if (map) { 337 if (map) {
341 map->exit(dev, map); 338 map->exit(dev, map);
342 kfree(map); 339 kfree(map);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index f988c817e196..d27f4129afd3 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -54,7 +54,7 @@ static const int debug = 0;
54#define MAX_PCMCIA_ADDR 0x4000000 54#define MAX_PCMCIA_ADDR 0x4000000
55 55
56struct pcmciamtd_dev { 56struct pcmciamtd_dev {
57 dev_link_t link; /* PCMCIA link */ 57 struct pcmcia_device *p_dev;
58 dev_node_t node; /* device node */ 58 dev_node_t node; /* device node */
59 caddr_t win_base; /* ioremapped address of PCMCIA window */ 59 caddr_t win_base; /* ioremapped address of PCMCIA window */
60 unsigned int win_size; /* size of window */ 60 unsigned int win_size; /* size of window */
@@ -111,8 +111,8 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
111 memreq_t mrq; 111 memreq_t mrq;
112 int ret; 112 int ret;
113 113
114 if(!(dev->link.state & DEV_PRESENT)) { 114 if (!pcmcia_dev_present(dev->p_dev)) {
115 DEBUG(1, "device removed state = 0x%4.4X", dev->link.state); 115 DEBUG(1, "device removed");
116 return 0; 116 return 0;
117 } 117 }
118 118
@@ -122,7 +122,7 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
122 dev->offset, mrq.CardOffset); 122 dev->offset, mrq.CardOffset);
123 mrq.Page = 0; 123 mrq.Page = 0;
124 if( (ret = pcmcia_map_mem_page(win, &mrq)) != CS_SUCCESS) { 124 if( (ret = pcmcia_map_mem_page(win, &mrq)) != CS_SUCCESS) {
125 cs_error(dev->link.handle, MapMemPage, ret); 125 cs_error(dev->p_dev, MapMemPage, ret);
126 return NULL; 126 return NULL;
127 } 127 }
128 dev->offset = mrq.CardOffset; 128 dev->offset = mrq.CardOffset;
@@ -238,7 +238,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
238 238
239/* read/write{8,16} copy_{from,to} routines with direct access */ 239/* read/write{8,16} copy_{from,to} routines with direct access */
240 240
241#define DEV_REMOVED(x) (!(*(u_int *)x->map_priv_1 & DEV_PRESENT)) 241#define DEV_REMOVED(x) (!(pcmcia_dev_present(((struct pcmciamtd_dev *)map->map_priv_1)->p_dev)))
242 242
243static map_word pcmcia_read8(struct map_info *map, unsigned long ofs) 243static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
244{ 244{
@@ -319,7 +319,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
319static void pcmciamtd_set_vpp(struct map_info *map, int on) 319static void pcmciamtd_set_vpp(struct map_info *map, int on)
320{ 320{
321 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 321 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
322 dev_link_t *link = &dev->link; 322 struct pcmcia_device *link = dev->p_dev;
323 modconf_t mod; 323 modconf_t mod;
324 int ret; 324 int ret;
325 325
@@ -328,9 +328,9 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
328 mod.Vpp1 = mod.Vpp2 = on ? dev->vpp : 0; 328 mod.Vpp1 = mod.Vpp2 = on ? dev->vpp : 0;
329 329
330 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); 330 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
331 ret = pcmcia_modify_configuration(link->handle, &mod); 331 ret = pcmcia_modify_configuration(link, &mod);
332 if(ret != CS_SUCCESS) { 332 if(ret != CS_SUCCESS) {
333 cs_error(link->handle, ModifyConfiguration, ret); 333 cs_error(link, ModifyConfiguration, ret);
334 } 334 }
335} 335}
336 336
@@ -340,7 +340,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
340 * still open, this will be postponed until it is closed. 340 * still open, this will be postponed until it is closed.
341 */ 341 */
342 342
343static void pcmciamtd_release(dev_link_t *link) 343static void pcmciamtd_release(struct pcmcia_device *link)
344{ 344{
345 struct pcmciamtd_dev *dev = link->priv; 345 struct pcmciamtd_dev *dev = link->priv;
346 346
@@ -353,12 +353,11 @@ static void pcmciamtd_release(dev_link_t *link)
353 } 353 }
354 pcmcia_release_window(link->win); 354 pcmcia_release_window(link->win);
355 } 355 }
356 pcmcia_release_configuration(link->handle); 356 pcmcia_disable_device(link);
357 link->state &= ~DEV_CONFIG;
358} 357}
359 358
360 359
361static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_name) 360static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
362{ 361{
363 int rc; 362 int rc;
364 tuple_t tuple; 363 tuple_t tuple;
@@ -371,16 +370,16 @@ static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_
371 tuple.TupleOffset = 0; 370 tuple.TupleOffset = 0;
372 tuple.DesiredTuple = RETURN_FIRST_TUPLE; 371 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
373 372
374 rc = pcmcia_get_first_tuple(link->handle, &tuple); 373 rc = pcmcia_get_first_tuple(link, &tuple);
375 while(rc == CS_SUCCESS) { 374 while(rc == CS_SUCCESS) {
376 rc = pcmcia_get_tuple_data(link->handle, &tuple); 375 rc = pcmcia_get_tuple_data(link, &tuple);
377 if(rc != CS_SUCCESS) { 376 if(rc != CS_SUCCESS) {
378 cs_error(link->handle, GetTupleData, rc); 377 cs_error(link, GetTupleData, rc);
379 break; 378 break;
380 } 379 }
381 rc = pcmcia_parse_tuple(link->handle, &tuple, &parse); 380 rc = pcmcia_parse_tuple(link, &tuple, &parse);
382 if(rc != CS_SUCCESS) { 381 if(rc != CS_SUCCESS) {
383 cs_error(link->handle, ParseTuple, rc); 382 cs_error(link, ParseTuple, rc);
384 break; 383 break;
385 } 384 }
386 385
@@ -451,7 +450,7 @@ static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_
451 DEBUG(2, "Unknown tuple code %d", tuple.TupleCode); 450 DEBUG(2, "Unknown tuple code %d", tuple.TupleCode);
452 } 451 }
453 452
454 rc = pcmcia_get_next_tuple(link->handle, &tuple); 453 rc = pcmcia_get_next_tuple(link, &tuple);
455 } 454 }
456 if(!dev->pcmcia_map.size) 455 if(!dev->pcmcia_map.size)
457 dev->pcmcia_map.size = MAX_PCMCIA_ADDR; 456 dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
@@ -488,7 +487,7 @@ static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_
488#define CS_CHECK(fn, ret) \ 487#define CS_CHECK(fn, ret) \
489do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 488do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
490 489
491static void pcmciamtd_config(dev_link_t *link) 490static int pcmciamtd_config(struct pcmcia_device *link)
492{ 491{
493 struct pcmciamtd_dev *dev = link->priv; 492 struct pcmciamtd_dev *dev = link->priv;
494 struct mtd_info *mtd = NULL; 493 struct mtd_info *mtd = NULL;
@@ -504,13 +503,10 @@ static void pcmciamtd_config(dev_link_t *link)
504 503
505 DEBUG(3, "link=0x%p", link); 504 DEBUG(3, "link=0x%p", link);
506 505
507 /* Configure card */
508 link->state |= DEV_CONFIG;
509
510 DEBUG(2, "Validating CIS"); 506 DEBUG(2, "Validating CIS");
511 ret = pcmcia_validate_cis(link->handle, &cisinfo); 507 ret = pcmcia_validate_cis(link, &cisinfo);
512 if(ret != CS_SUCCESS) { 508 if(ret != CS_SUCCESS) {
513 cs_error(link->handle, GetTupleData, ret); 509 cs_error(link, GetTupleData, ret);
514 } else { 510 } else {
515 DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains); 511 DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains);
516 } 512 }
@@ -538,7 +534,7 @@ static void pcmciamtd_config(dev_link_t *link)
538 req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16; 534 req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
539 req.Base = 0; 535 req.Base = 0;
540 req.AccessSpeed = mem_speed; 536 req.AccessSpeed = mem_speed;
541 link->win = (window_handle_t)link->handle; 537 link->win = (window_handle_t)link;
542 req.Size = (force_size) ? force_size << 20 : MAX_PCMCIA_ADDR; 538 req.Size = (force_size) ? force_size << 20 : MAX_PCMCIA_ADDR;
543 dev->win_size = 0; 539 dev->win_size = 0;
544 540
@@ -546,7 +542,7 @@ static void pcmciamtd_config(dev_link_t *link)
546 int ret; 542 int ret;
547 DEBUG(2, "requesting window with size = %dKiB memspeed = %d", 543 DEBUG(2, "requesting window with size = %dKiB memspeed = %d",
548 req.Size >> 10, req.AccessSpeed); 544 req.Size >> 10, req.AccessSpeed);
549 ret = pcmcia_request_window(&link->handle, &req, &link->win); 545 ret = pcmcia_request_window(&link, &req, &link->win);
550 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); 546 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
551 if(ret) { 547 if(ret) {
552 req.Size >>= 1; 548 req.Size >>= 1;
@@ -562,19 +558,19 @@ static void pcmciamtd_config(dev_link_t *link)
562 if(!dev->win_size) { 558 if(!dev->win_size) {
563 err("Cant allocate memory window"); 559 err("Cant allocate memory window");
564 pcmciamtd_release(link); 560 pcmciamtd_release(link);
565 return; 561 return -ENODEV;
566 } 562 }
567 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); 563 DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
568 564
569 /* Get write protect status */ 565 /* Get write protect status */
570 CS_CHECK(GetStatus, pcmcia_get_status(link->handle, &status)); 566 CS_CHECK(GetStatus, pcmcia_get_status(link, &status));
571 DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx", 567 DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx",
572 status.CardState, (unsigned long)link->win); 568 status.CardState, (unsigned long)link->win);
573 dev->win_base = ioremap(req.Base, req.Size); 569 dev->win_base = ioremap(req.Base, req.Size);
574 if(!dev->win_base) { 570 if(!dev->win_base) {
575 err("ioremap(%lu, %u) failed", req.Base, req.Size); 571 err("ioremap(%lu, %u) failed", req.Base, req.Size);
576 pcmciamtd_release(link); 572 pcmciamtd_release(link);
577 return; 573 return -ENODEV;
578 } 574 }
579 DEBUG(1, "mapped window dev = %p req.base = 0x%lx base = %p size = 0x%x", 575 DEBUG(1, "mapped window dev = %p req.base = 0x%lx base = %p size = 0x%x",
580 dev, req.Base, dev->win_base, req.Size); 576 dev, req.Base, dev->win_base, req.Size);
@@ -584,17 +580,14 @@ static void pcmciamtd_config(dev_link_t *link)
584 dev->pcmcia_map.map_priv_2 = (unsigned long)link->win; 580 dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
585 581
586 DEBUG(2, "Getting configuration"); 582 DEBUG(2, "Getting configuration");
587 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link->handle, &t)); 583 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &t));
588 DEBUG(2, "Vcc = %d Vpp1 = %d Vpp2 = %d", t.Vcc, t.Vpp1, t.Vpp2); 584 DEBUG(2, "Vcc = %d Vpp1 = %d Vpp2 = %d", t.Vcc, t.Vpp1, t.Vpp2);
589 dev->vpp = (vpp) ? vpp : t.Vpp1; 585 dev->vpp = (vpp) ? vpp : t.Vpp1;
590 link->conf.Attributes = 0; 586 link->conf.Attributes = 0;
591 link->conf.Vcc = t.Vcc;
592 if(setvpp == 2) { 587 if(setvpp == 2) {
593 link->conf.Vpp1 = dev->vpp; 588 link->conf.Vpp = dev->vpp;
594 link->conf.Vpp2 = dev->vpp;
595 } else { 589 } else {
596 link->conf.Vpp1 = 0; 590 link->conf.Vpp = 0;
597 link->conf.Vpp2 = 0;
598 } 591 }
599 592
600 link->conf.IntType = INT_MEMORY; 593 link->conf.IntType = INT_MEMORY;
@@ -606,9 +599,10 @@ static void pcmciamtd_config(dev_link_t *link)
606 link->conf.ConfigIndex = 0; 599 link->conf.ConfigIndex = 0;
607 link->conf.Present = t.Present; 600 link->conf.Present = t.Present;
608 DEBUG(2, "Setting Configuration"); 601 DEBUG(2, "Setting Configuration");
609 ret = pcmcia_request_configuration(link->handle, &link->conf); 602 ret = pcmcia_request_configuration(link, &link->conf);
610 if(ret != CS_SUCCESS) { 603 if(ret != CS_SUCCESS) {
611 cs_error(link->handle, RequestConfiguration, ret); 604 cs_error(link, RequestConfiguration, ret);
605 return -ENODEV;
612 } 606 }
613 607
614 if(mem_type == 1) { 608 if(mem_type == 1) {
@@ -616,7 +610,7 @@ static void pcmciamtd_config(dev_link_t *link)
616 } else if(mem_type == 2) { 610 } else if(mem_type == 2) {
617 mtd = do_map_probe("map_rom", &dev->pcmcia_map); 611 mtd = do_map_probe("map_rom", &dev->pcmcia_map);
618 } else { 612 } else {
619 for(i = 0; i < sizeof(probes) / sizeof(char *); i++) { 613 for(i = 0; i < ARRAY_SIZE(probes); i++) {
620 DEBUG(1, "Trying %s", probes[i]); 614 DEBUG(1, "Trying %s", probes[i]);
621 mtd = do_map_probe(probes[i], &dev->pcmcia_map); 615 mtd = do_map_probe(probes[i], &dev->pcmcia_map);
622 if(mtd) 616 if(mtd)
@@ -629,7 +623,7 @@ static void pcmciamtd_config(dev_link_t *link)
629 if(!mtd) { 623 if(!mtd) {
630 DEBUG(1, "Cant find an MTD"); 624 DEBUG(1, "Cant find an MTD");
631 pcmciamtd_release(link); 625 pcmciamtd_release(link);
632 return; 626 return -ENODEV;
633 } 627 }
634 628
635 dev->mtd_info = mtd; 629 dev->mtd_info = mtd;
@@ -654,7 +648,6 @@ static void pcmciamtd_config(dev_link_t *link)
654 use the faster non-remapping read/write functions */ 648 use the faster non-remapping read/write functions */
655 if(mtd->size <= dev->win_size) { 649 if(mtd->size <= dev->win_size) {
656 DEBUG(1, "Using non remapping memory functions"); 650 DEBUG(1, "Using non remapping memory functions");
657 dev->pcmcia_map.map_priv_1 = (unsigned long)&(dev->link.state);
658 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base; 651 dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
659 if (dev->pcmcia_map.bankwidth == 1) { 652 if (dev->pcmcia_map.bankwidth == 1) {
660 dev->pcmcia_map.read = pcmcia_read8; 653 dev->pcmcia_map.read = pcmcia_read8;
@@ -672,19 +665,18 @@ static void pcmciamtd_config(dev_link_t *link)
672 dev->mtd_info = NULL; 665 dev->mtd_info = NULL;
673 err("Couldnt register MTD device"); 666 err("Couldnt register MTD device");
674 pcmciamtd_release(link); 667 pcmciamtd_release(link);
675 return; 668 return -ENODEV;
676 } 669 }
677 snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index); 670 snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
678 info("mtd%d: %s", mtd->index, mtd->name); 671 info("mtd%d: %s", mtd->index, mtd->name);
679 link->state &= ~DEV_CONFIG_PENDING; 672 link->dev_node = &dev->node;
680 link->dev = &dev->node; 673 return 0;
681 return;
682 674
683 cs_failed: 675 cs_failed:
684 cs_error(link->handle, last_fn, last_ret); 676 cs_error(link, last_fn, last_ret);
685 err("CS Error, exiting"); 677 err("CS Error, exiting");
686 pcmciamtd_release(link); 678 pcmciamtd_release(link);
687 return; 679 return -ENODEV;
688} 680}
689 681
690 682
@@ -713,21 +705,18 @@ static int pcmciamtd_resume(struct pcmcia_device *dev)
713 * when the device is released. 705 * when the device is released.
714 */ 706 */
715 707
716static void pcmciamtd_detach(struct pcmcia_device *p_dev) 708static void pcmciamtd_detach(struct pcmcia_device *link)
717{ 709{
718 dev_link_t *link = dev_to_instance(p_dev); 710 struct pcmciamtd_dev *dev = link->priv;
719 711
720 DEBUG(3, "link=0x%p", link); 712 DEBUG(3, "link=0x%p", link);
721 713
722 if(link->state & DEV_CONFIG) { 714 if(dev->mtd_info) {
723 struct pcmciamtd_dev *dev = link->priv; 715 del_mtd_device(dev->mtd_info);
724 if(dev->mtd_info) { 716 info("mtd%d: Removed", dev->mtd_info->index);
725 del_mtd_device(dev->mtd_info);
726 info("mtd%d: Removed", dev->mtd_info->index);
727 }
728
729 pcmciamtd_release(link);
730 } 717 }
718
719 pcmciamtd_release(link);
731} 720}
732 721
733 722
@@ -736,10 +725,9 @@ static void pcmciamtd_detach(struct pcmcia_device *p_dev)
736 * with Card Services. 725 * with Card Services.
737 */ 726 */
738 727
739static int pcmciamtd_attach(struct pcmcia_device *p_dev) 728static int pcmciamtd_probe(struct pcmcia_device *link)
740{ 729{
741 struct pcmciamtd_dev *dev; 730 struct pcmciamtd_dev *dev;
742 dev_link_t *link;
743 731
744 /* Create new memory card device */ 732 /* Create new memory card device */
745 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 733 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
@@ -747,20 +735,13 @@ static int pcmciamtd_attach(struct pcmcia_device *p_dev)
747 DEBUG(1, "dev=0x%p", dev); 735 DEBUG(1, "dev=0x%p", dev);
748 736
749 memset(dev, 0, sizeof(*dev)); 737 memset(dev, 0, sizeof(*dev));
750 link = &dev->link; 738 dev->p_dev = link;
751 link->priv = dev; 739 link->priv = dev;
752 740
753 link->conf.Attributes = 0; 741 link->conf.Attributes = 0;
754 link->conf.IntType = INT_MEMORY; 742 link->conf.IntType = INT_MEMORY;
755 743
756 link->next = NULL; 744 return pcmciamtd_config(link);
757 link->handle = p_dev;
758 p_dev->instance = link;
759
760 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
761 pcmciamtd_config(link);
762
763 return 0;
764} 745}
765 746
766static struct pcmcia_device_id pcmciamtd_ids[] = { 747static struct pcmcia_device_id pcmciamtd_ids[] = {
@@ -794,7 +775,7 @@ static struct pcmcia_driver pcmciamtd_driver = {
794 .drv = { 775 .drv = {
795 .name = "pcmciamtd" 776 .name = "pcmciamtd"
796 }, 777 },
797 .probe = pcmciamtd_attach, 778 .probe = pcmciamtd_probe,
798 .remove = pcmciamtd_detach, 779 .remove = pcmciamtd_detach,
799 .owner = THIS_MODULE, 780 .owner = THIS_MODULE,
800 .id_table = pcmciamtd_ids, 781 .id_table = pcmciamtd_ids,
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index 5b76ed886185..50b14033613f 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -121,8 +121,7 @@ struct map_info redwood_flash_map = {
121}; 121};
122 122
123 123
124#define NUM_REDWOOD_FLASH_PARTITIONS \ 124#define NUM_REDWOOD_FLASH_PARTITIONS ARRAY_SIZE(redwood_flash_partitions)
125 (sizeof(redwood_flash_partitions)/sizeof(redwood_flash_partitions[0]))
126 125
127static struct mtd_info *redwood_mtd; 126static struct mtd_info *redwood_mtd;
128 127
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 225cdd9ba5b2..350286dc1d2e 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -66,7 +66,7 @@ static struct map_info sbc8240_map[2] = {
66 } 66 }
67}; 67};
68 68
69#define NUM_FLASH_BANKS (sizeof(sbc8240_map) / sizeof(struct map_info)) 69#define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map)
70 70
71/* 71/*
72 * The following defines the partition layout of SBC8240 boards. 72 * The following defines the partition layout of SBC8240 boards.
@@ -125,8 +125,6 @@ static struct mtd_partition sbc8240_fs_partitions [] = {
125 } 125 }
126}; 126};
127 127
128#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
129
130/* trivial struct to describe partition information */ 128/* trivial struct to describe partition information */
131struct mtd_part_def 129struct mtd_part_def
132{ 130{
@@ -190,10 +188,10 @@ int __init init_sbc8240_mtd (void)
190#ifdef CONFIG_MTD_PARTITIONS 188#ifdef CONFIG_MTD_PARTITIONS
191 sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions; 189 sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
192 sbc8240_part_banks[0].type = "static image"; 190 sbc8240_part_banks[0].type = "static image";
193 sbc8240_part_banks[0].nums = NB_OF(sbc8240_uboot_partitions); 191 sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions);
194 sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions; 192 sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
195 sbc8240_part_banks[1].type = "static file system"; 193 sbc8240_part_banks[1].type = "static file system";
196 sbc8240_part_banks[1].nums = NB_OF(sbc8240_fs_partitions); 194 sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions);
197 195
198 for (i = 0; i < NUM_FLASH_BANKS; i++) { 196 for (i = 0; i < NUM_FLASH_BANKS; i++) {
199 197
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index ed92afadd8a9..e8c130e1efd3 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -107,7 +107,7 @@ static struct map_info sc520cdp_map[] = {
107 }, 107 },
108}; 108};
109 109
110#define NUM_FLASH_BANKS (sizeof(sc520cdp_map)/sizeof(struct map_info)) 110#define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map)
111 111
112static struct mtd_info *mymtd[NUM_FLASH_BANKS]; 112static struct mtd_info *mymtd[NUM_FLASH_BANKS];
113static struct mtd_info *merged_mtd; 113static struct mtd_info *merged_mtd;
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 2c91dff8bb60..28b8a571a91a 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -70,7 +70,7 @@ static struct mtd_partition partition_info[] = {
70 .size = 0x80000 70 .size = 0x80000
71 }, 71 },
72}; 72};
73#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0])) 73#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
74#endif 74#endif
75 75
76 76
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 999f4bb3d845..12fe53c0d2fc 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -49,8 +49,6 @@ static struct mtd_partition sharpsl_partitions[1] = {
49 } 49 }
50}; 50};
51 51
52#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
53
54int __init init_sharpsl(void) 52int __init init_sharpsl(void)
55{ 53{
56 struct mtd_partition *parts; 54 struct mtd_partition *parts;
@@ -92,7 +90,7 @@ int __init init_sharpsl(void)
92 } 90 }
93 91
94 parts = sharpsl_partitions; 92 parts = sharpsl_partitions;
95 nb_parts = NB_OF(sharpsl_partitions); 93 nb_parts = ARRAY_SIZE(sharpsl_partitions);
96 94
97 printk(KERN_NOTICE "Using %s partision definition\n", part_type); 95 printk(KERN_NOTICE "Using %s partision definition\n", part_type);
98 add_mtd_partitions(mymtd, parts, nb_parts); 96 add_mtd_partitions(mymtd, parts, nb_parts);
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 4b372bcb17f1..a7422c200567 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -64,7 +64,7 @@ static struct mtd_partition ts5500_partitions[] = {
64 } 64 }
65}; 65};
66 66
67#define NUM_PARTITIONS (sizeof(ts5500_partitions)/sizeof(struct mtd_partition)) 67#define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions)
68 68
69static struct mtd_info *mymtd; 69static struct mtd_info *mymtd;
70 70
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 79d92808b766..f7264dc2ac9b 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -37,7 +37,7 @@ struct mtd_partition uclinux_romfs[] = {
37 { .name = "ROMfs" } 37 { .name = "ROMfs" }
38}; 38};
39 39
40#define NUM_PARTITIONS (sizeof(uclinux_romfs) / sizeof(uclinux_romfs[0])) 40#define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs)
41 41
42/****************************************************************************/ 42/****************************************************************************/
43 43
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index e0063941c0df..b3e487395435 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -182,7 +182,7 @@ int __init init_vmax301(void)
182 } 182 }
183 } 183 }
184 184
185 if (!vmax_mtd[1] && !vmax_mtd[2]) { 185 if (!vmax_mtd[0] && !vmax_mtd[1]) {
186 iounmap((void *)iomapadr); 186 iounmap((void *)iomapadr);
187 return -ENXIO; 187 return -ENXIO;
188 } 188 }
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 840dd66ce2dc..458d3c8ae1ee 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -19,12 +19,12 @@
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/hdreg.h> 20#include <linux/hdreg.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <asm/semaphore.h> 22#include <linux/mutex.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24 24
25static LIST_HEAD(blktrans_majors); 25static LIST_HEAD(blktrans_majors);
26 26
27extern struct semaphore mtd_table_mutex; 27extern struct mutex mtd_table_mutex;
28extern struct mtd_info *mtd_table[]; 28extern struct mtd_info *mtd_table[];
29 29
30struct mtd_blkcore_priv { 30struct mtd_blkcore_priv {
@@ -122,9 +122,9 @@ static int mtd_blktrans_thread(void *arg)
122 122
123 spin_unlock_irq(rq->queue_lock); 123 spin_unlock_irq(rq->queue_lock);
124 124
125 down(&dev->sem); 125 mutex_lock(&dev->lock);
126 res = do_blktrans_request(tr, dev, req); 126 res = do_blktrans_request(tr, dev, req);
127 up(&dev->sem); 127 mutex_unlock(&dev->lock);
128 128
129 spin_lock_irq(rq->queue_lock); 129 spin_lock_irq(rq->queue_lock);
130 130
@@ -235,8 +235,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
235 int last_devnum = -1; 235 int last_devnum = -1;
236 struct gendisk *gd; 236 struct gendisk *gd;
237 237
238 if (!down_trylock(&mtd_table_mutex)) { 238 if (!!mutex_trylock(&mtd_table_mutex)) {
239 up(&mtd_table_mutex); 239 mutex_unlock(&mtd_table_mutex);
240 BUG(); 240 BUG();
241 } 241 }
242 242
@@ -267,7 +267,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
267 return -EBUSY; 267 return -EBUSY;
268 } 268 }
269 269
270 init_MUTEX(&new->sem); 270 mutex_init(&new->lock);
271 list_add_tail(&new->list, &tr->devs); 271 list_add_tail(&new->list, &tr->devs);
272 added: 272 added:
273 if (!tr->writesect) 273 if (!tr->writesect)
@@ -313,8 +313,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
313 313
314int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 314int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
315{ 315{
316 if (!down_trylock(&mtd_table_mutex)) { 316 if (!!mutex_trylock(&mtd_table_mutex)) {
317 up(&mtd_table_mutex); 317 mutex_unlock(&mtd_table_mutex);
318 BUG(); 318 BUG();
319 } 319 }
320 320
@@ -378,14 +378,14 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
378 378
379 memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv)); 379 memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
380 380
381 down(&mtd_table_mutex); 381 mutex_lock(&mtd_table_mutex);
382 382
383 ret = register_blkdev(tr->major, tr->name); 383 ret = register_blkdev(tr->major, tr->name);
384 if (ret) { 384 if (ret) {
385 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 385 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
386 tr->name, tr->major, ret); 386 tr->name, tr->major, ret);
387 kfree(tr->blkcore_priv); 387 kfree(tr->blkcore_priv);
388 up(&mtd_table_mutex); 388 mutex_unlock(&mtd_table_mutex);
389 return ret; 389 return ret;
390 } 390 }
391 spin_lock_init(&tr->blkcore_priv->queue_lock); 391 spin_lock_init(&tr->blkcore_priv->queue_lock);
@@ -396,7 +396,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
396 if (!tr->blkcore_priv->rq) { 396 if (!tr->blkcore_priv->rq) {
397 unregister_blkdev(tr->major, tr->name); 397 unregister_blkdev(tr->major, tr->name);
398 kfree(tr->blkcore_priv); 398 kfree(tr->blkcore_priv);
399 up(&mtd_table_mutex); 399 mutex_unlock(&mtd_table_mutex);
400 return -ENOMEM; 400 return -ENOMEM;
401 } 401 }
402 402
@@ -407,7 +407,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
407 blk_cleanup_queue(tr->blkcore_priv->rq); 407 blk_cleanup_queue(tr->blkcore_priv->rq);
408 unregister_blkdev(tr->major, tr->name); 408 unregister_blkdev(tr->major, tr->name);
409 kfree(tr->blkcore_priv); 409 kfree(tr->blkcore_priv);
410 up(&mtd_table_mutex); 410 mutex_unlock(&mtd_table_mutex);
411 return ret; 411 return ret;
412 } 412 }
413 413
@@ -419,7 +419,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
419 tr->add_mtd(tr, mtd_table[i]); 419 tr->add_mtd(tr, mtd_table[i]);
420 } 420 }
421 421
422 up(&mtd_table_mutex); 422 mutex_unlock(&mtd_table_mutex);
423 423
424 return 0; 424 return 0;
425} 425}
@@ -428,7 +428,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
428{ 428{
429 struct list_head *this, *next; 429 struct list_head *this, *next;
430 430
431 down(&mtd_table_mutex); 431 mutex_lock(&mtd_table_mutex);
432 432
433 /* Clean up the kernel thread */ 433 /* Clean up the kernel thread */
434 tr->blkcore_priv->exiting = 1; 434 tr->blkcore_priv->exiting = 1;
@@ -446,7 +446,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
446 blk_cleanup_queue(tr->blkcore_priv->rq); 446 blk_cleanup_queue(tr->blkcore_priv->rq);
447 unregister_blkdev(tr->major, tr->name); 447 unregister_blkdev(tr->major, tr->name);
448 448
449 up(&mtd_table_mutex); 449 mutex_unlock(&mtd_table_mutex);
450 450
451 kfree(tr->blkcore_priv); 451 kfree(tr->blkcore_priv);
452 452
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index e84756644fd1..2cef280e388c 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -19,11 +19,13 @@
19 19
20#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
21#include <linux/mtd/blktrans.h> 21#include <linux/mtd/blktrans.h>
22#include <linux/mutex.h>
23
22 24
23static struct mtdblk_dev { 25static struct mtdblk_dev {
24 struct mtd_info *mtd; 26 struct mtd_info *mtd;
25 int count; 27 int count;
26 struct semaphore cache_sem; 28 struct mutex cache_mutex;
27 unsigned char *cache_data; 29 unsigned char *cache_data;
28 unsigned long cache_offset; 30 unsigned long cache_offset;
29 unsigned int cache_size; 31 unsigned int cache_size;
@@ -284,7 +286,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
284 mtdblk->count = 1; 286 mtdblk->count = 1;
285 mtdblk->mtd = mtd; 287 mtdblk->mtd = mtd;
286 288
287 init_MUTEX (&mtdblk->cache_sem); 289 mutex_init(&mtdblk->cache_mutex);
288 mtdblk->cache_state = STATE_EMPTY; 290 mtdblk->cache_state = STATE_EMPTY;
289 if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM && 291 if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
290 mtdblk->mtd->erasesize) { 292 mtdblk->mtd->erasesize) {
@@ -306,9 +308,9 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
306 308
307 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); 309 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
308 310
309 down(&mtdblk->cache_sem); 311 mutex_lock(&mtdblk->cache_mutex);
310 write_cached_data(mtdblk); 312 write_cached_data(mtdblk);
311 up(&mtdblk->cache_sem); 313 mutex_unlock(&mtdblk->cache_mutex);
312 314
313 if (!--mtdblk->count) { 315 if (!--mtdblk->count) {
314 /* It was the last usage. Free the device */ 316 /* It was the last usage. Free the device */
@@ -327,9 +329,9 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
327{ 329{
328 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 330 struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
329 331
330 down(&mtdblk->cache_sem); 332 mutex_lock(&mtdblk->cache_mutex);
331 write_cached_data(mtdblk); 333 write_cached_data(mtdblk);
332 up(&mtdblk->cache_sem); 334 mutex_unlock(&mtdblk->cache_mutex);
333 335
334 if (mtdblk->mtd->sync) 336 if (mtdblk->mtd->sync)
335 mtdblk->mtd->sync(mtdblk->mtd); 337 mtdblk->mtd->sync(mtdblk->mtd);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index dade02ab0687..9905870f56e5 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -19,15 +19,13 @@
19#include <linux/ioctl.h> 19#include <linux/ioctl.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/mtd/compatmac.h> 21#include <linux/mtd/compatmac.h>
22#ifdef CONFIG_PROC_FS
23#include <linux/proc_fs.h> 22#include <linux/proc_fs.h>
24#endif
25 23
26#include <linux/mtd/mtd.h> 24#include <linux/mtd/mtd.h>
27 25
28/* These are exported solely for the purpose of mtd_blkdevs.c. You 26/* These are exported solely for the purpose of mtd_blkdevs.c. You
29 should not use them for _anything_ else */ 27 should not use them for _anything_ else */
30DECLARE_MUTEX(mtd_table_mutex); 28DEFINE_MUTEX(mtd_table_mutex);
31struct mtd_info *mtd_table[MAX_MTD_DEVICES]; 29struct mtd_info *mtd_table[MAX_MTD_DEVICES];
32 30
33EXPORT_SYMBOL_GPL(mtd_table_mutex); 31EXPORT_SYMBOL_GPL(mtd_table_mutex);
@@ -49,7 +47,7 @@ int add_mtd_device(struct mtd_info *mtd)
49{ 47{
50 int i; 48 int i;
51 49
52 down(&mtd_table_mutex); 50 mutex_lock(&mtd_table_mutex);
53 51
54 for (i=0; i < MAX_MTD_DEVICES; i++) 52 for (i=0; i < MAX_MTD_DEVICES; i++)
55 if (!mtd_table[i]) { 53 if (!mtd_table[i]) {
@@ -67,7 +65,7 @@ int add_mtd_device(struct mtd_info *mtd)
67 not->add(mtd); 65 not->add(mtd);
68 } 66 }
69 67
70 up(&mtd_table_mutex); 68 mutex_unlock(&mtd_table_mutex);
71 /* We _know_ we aren't being removed, because 69 /* We _know_ we aren't being removed, because
72 our caller is still holding us here. So none 70 our caller is still holding us here. So none
73 of this try_ nonsense, and no bitching about it 71 of this try_ nonsense, and no bitching about it
@@ -76,7 +74,7 @@ int add_mtd_device(struct mtd_info *mtd)
76 return 0; 74 return 0;
77 } 75 }
78 76
79 up(&mtd_table_mutex); 77 mutex_unlock(&mtd_table_mutex);
80 return 1; 78 return 1;
81} 79}
82 80
@@ -94,7 +92,7 @@ int del_mtd_device (struct mtd_info *mtd)
94{ 92{
95 int ret; 93 int ret;
96 94
97 down(&mtd_table_mutex); 95 mutex_lock(&mtd_table_mutex);
98 96
99 if (mtd_table[mtd->index] != mtd) { 97 if (mtd_table[mtd->index] != mtd) {
100 ret = -ENODEV; 98 ret = -ENODEV;
@@ -118,7 +116,7 @@ int del_mtd_device (struct mtd_info *mtd)
118 ret = 0; 116 ret = 0;
119 } 117 }
120 118
121 up(&mtd_table_mutex); 119 mutex_unlock(&mtd_table_mutex);
122 return ret; 120 return ret;
123} 121}
124 122
@@ -135,7 +133,7 @@ void register_mtd_user (struct mtd_notifier *new)
135{ 133{
136 int i; 134 int i;
137 135
138 down(&mtd_table_mutex); 136 mutex_lock(&mtd_table_mutex);
139 137
140 list_add(&new->list, &mtd_notifiers); 138 list_add(&new->list, &mtd_notifiers);
141 139
@@ -145,7 +143,7 @@ void register_mtd_user (struct mtd_notifier *new)
145 if (mtd_table[i]) 143 if (mtd_table[i])
146 new->add(mtd_table[i]); 144 new->add(mtd_table[i]);
147 145
148 up(&mtd_table_mutex); 146 mutex_unlock(&mtd_table_mutex);
149} 147}
150 148
151/** 149/**
@@ -162,7 +160,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
162{ 160{
163 int i; 161 int i;
164 162
165 down(&mtd_table_mutex); 163 mutex_lock(&mtd_table_mutex);
166 164
167 module_put(THIS_MODULE); 165 module_put(THIS_MODULE);
168 166
@@ -171,7 +169,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
171 old->remove(mtd_table[i]); 169 old->remove(mtd_table[i]);
172 170
173 list_del(&old->list); 171 list_del(&old->list);
174 up(&mtd_table_mutex); 172 mutex_unlock(&mtd_table_mutex);
175 return 0; 173 return 0;
176} 174}
177 175
@@ -193,7 +191,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
193 struct mtd_info *ret = NULL; 191 struct mtd_info *ret = NULL;
194 int i; 192 int i;
195 193
196 down(&mtd_table_mutex); 194 mutex_lock(&mtd_table_mutex);
197 195
198 if (num == -1) { 196 if (num == -1) {
199 for (i=0; i< MAX_MTD_DEVICES; i++) 197 for (i=0; i< MAX_MTD_DEVICES; i++)
@@ -211,7 +209,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
211 if (ret) 209 if (ret)
212 ret->usecount++; 210 ret->usecount++;
213 211
214 up(&mtd_table_mutex); 212 mutex_unlock(&mtd_table_mutex);
215 return ret; 213 return ret;
216} 214}
217 215
@@ -219,9 +217,9 @@ void put_mtd_device(struct mtd_info *mtd)
219{ 217{
220 int c; 218 int c;
221 219
222 down(&mtd_table_mutex); 220 mutex_lock(&mtd_table_mutex);
223 c = --mtd->usecount; 221 c = --mtd->usecount;
224 up(&mtd_table_mutex); 222 mutex_unlock(&mtd_table_mutex);
225 BUG_ON(c < 0); 223 BUG_ON(c < 0);
226 224
227 module_put(mtd->owner); 225 module_put(mtd->owner);
@@ -296,10 +294,11 @@ EXPORT_SYMBOL(unregister_mtd_user);
296EXPORT_SYMBOL(default_mtd_writev); 294EXPORT_SYMBOL(default_mtd_writev);
297EXPORT_SYMBOL(default_mtd_readv); 295EXPORT_SYMBOL(default_mtd_readv);
298 296
297#ifdef CONFIG_PROC_FS
298
299/*====================================================================*/ 299/*====================================================================*/
300/* Support for /proc/mtd */ 300/* Support for /proc/mtd */
301 301
302#ifdef CONFIG_PROC_FS
303static struct proc_dir_entry *proc_mtd; 302static struct proc_dir_entry *proc_mtd;
304 303
305static inline int mtd_proc_info (char *buf, int i) 304static inline int mtd_proc_info (char *buf, int i)
@@ -319,7 +318,7 @@ static int mtd_read_proc (char *page, char **start, off_t off, int count,
319 int len, l, i; 318 int len, l, i;
320 off_t begin = 0; 319 off_t begin = 0;
321 320
322 down(&mtd_table_mutex); 321 mutex_lock(&mtd_table_mutex);
323 322
324 len = sprintf(page, "dev: size erasesize name\n"); 323 len = sprintf(page, "dev: size erasesize name\n");
325 for (i=0; i< MAX_MTD_DEVICES; i++) { 324 for (i=0; i< MAX_MTD_DEVICES; i++) {
@@ -337,38 +336,34 @@ static int mtd_read_proc (char *page, char **start, off_t off, int count,
337 *eof = 1; 336 *eof = 1;
338 337
339done: 338done:
340 up(&mtd_table_mutex); 339 mutex_unlock(&mtd_table_mutex);
341 if (off >= len+begin) 340 if (off >= len+begin)
342 return 0; 341 return 0;
343 *start = page + (off-begin); 342 *start = page + (off-begin);
344 return ((count < begin+len-off) ? count : begin+len-off); 343 return ((count < begin+len-off) ? count : begin+len-off);
345} 344}
346 345
347#endif /* CONFIG_PROC_FS */
348
349/*====================================================================*/ 346/*====================================================================*/
350/* Init code */ 347/* Init code */
351 348
352static int __init init_mtd(void) 349static int __init init_mtd(void)
353{ 350{
354#ifdef CONFIG_PROC_FS
355 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL ))) 351 if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
356 proc_mtd->read_proc = mtd_read_proc; 352 proc_mtd->read_proc = mtd_read_proc;
357#endif
358 return 0; 353 return 0;
359} 354}
360 355
361static void __exit cleanup_mtd(void) 356static void __exit cleanup_mtd(void)
362{ 357{
363#ifdef CONFIG_PROC_FS
364 if (proc_mtd) 358 if (proc_mtd)
365 remove_proc_entry( "mtd", NULL); 359 remove_proc_entry( "mtd", NULL);
366#endif
367} 360}
368 361
369module_init(init_mtd); 362module_init(init_mtd);
370module_exit(cleanup_mtd); 363module_exit(cleanup_mtd);
371 364
365#endif /* CONFIG_PROC_FS */
366
372 367
373MODULE_LICENSE("GPL"); 368MODULE_LICENSE("GPL");
374MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 369MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1fc4c134d939..cfe288a6e853 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -178,17 +178,16 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
178 Even if you leave this disabled, you can enable BBT writes at module 178 Even if you leave this disabled, you can enable BBT writes at module
179 load time (assuming you build diskonchip as a module) with the module 179 load time (assuming you build diskonchip as a module) with the module
180 parameter "inftl_bbt_write=1". 180 parameter "inftl_bbt_write=1".
181
182 config MTD_NAND_SHARPSL
183 bool "Support for NAND Flash on Sharp SL Series (C7xx + others)"
184 depends on MTD_NAND && ARCH_PXA
185
186 config MTD_NAND_NANDSIM
187 bool "Support for NAND Flash Simulator"
188 depends on MTD_NAND && MTD_PARTITIONS
189 181
182config MTD_NAND_SHARPSL
183 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
184 depends on MTD_NAND && ARCH_PXA
185
186config MTD_NAND_NANDSIM
187 tristate "Support for NAND Flash Simulator"
188 depends on MTD_NAND && MTD_PARTITIONS
190 help 189 help
191 The simulator may simulate verious NAND flash chips for the 190 The simulator may simulate verious NAND flash chips for the
192 MTD nand layer. 191 MTD nand layer.
193 192
194endmenu 193endmenu
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 201e1362da14..bde3550910a2 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -55,8 +55,6 @@ static const struct mtd_partition partition_info[] = {
55 .size = MTDPART_SIZ_FULL 55 .size = MTDPART_SIZ_FULL
56 } 56 }
57}; 57};
58#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
59
60 58
61/** 59/**
62 * au_read_byte - read one byte from the chip 60 * au_read_byte - read one byte from the chip
@@ -462,7 +460,7 @@ int __init au1xxx_nand_init (void)
462 } 460 }
463 461
464 /* Register the partitions */ 462 /* Register the partitions */
465 add_mtd_partitions(au1550_mtd, partition_info, NB_OF(partition_info)); 463 add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info));
466 464
467 return 0; 465 return 0;
468 466
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5d222460b42a..95e96fa1fceb 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -80,6 +80,7 @@
80#include <linux/mtd/compatmac.h> 80#include <linux/mtd/compatmac.h>
81#include <linux/interrupt.h> 81#include <linux/interrupt.h>
82#include <linux/bitops.h> 82#include <linux/bitops.h>
83#include <linux/leds.h>
83#include <asm/io.h> 84#include <asm/io.h>
84 85
85#ifdef CONFIG_MTD_PARTITIONS 86#ifdef CONFIG_MTD_PARTITIONS
@@ -515,6 +516,8 @@ static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, i
515 return nand_isbad_bbt (mtd, ofs, allowbbt); 516 return nand_isbad_bbt (mtd, ofs, allowbbt);
516} 517}
517 518
519DEFINE_LED_TRIGGER(nand_led_trigger);
520
518/* 521/*
519 * Wait for the ready pin, after a command 522 * Wait for the ready pin, after a command
520 * The timeout is catched later. 523 * The timeout is catched later.
@@ -524,12 +527,14 @@ static void nand_wait_ready(struct mtd_info *mtd)
524 struct nand_chip *this = mtd->priv; 527 struct nand_chip *this = mtd->priv;
525 unsigned long timeo = jiffies + 2; 528 unsigned long timeo = jiffies + 2;
526 529
530 led_trigger_event(nand_led_trigger, LED_FULL);
527 /* wait until command is processed or timeout occures */ 531 /* wait until command is processed or timeout occures */
528 do { 532 do {
529 if (this->dev_ready(mtd)) 533 if (this->dev_ready(mtd))
530 return; 534 break;
531 touch_softlockup_watchdog(); 535 touch_softlockup_watchdog();
532 } while (time_before(jiffies, timeo)); 536 } while (time_before(jiffies, timeo));
537 led_trigger_event(nand_led_trigger, LED_OFF);
533} 538}
534 539
535/** 540/**
@@ -817,6 +822,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
817 else 822 else
818 timeo += (HZ * 20) / 1000; 823 timeo += (HZ * 20) / 1000;
819 824
825 led_trigger_event(nand_led_trigger, LED_FULL);
826
820 /* Apply this short delay always to ensure that we do wait tWB in 827 /* Apply this short delay always to ensure that we do wait tWB in
821 * any case on any machine. */ 828 * any case on any machine. */
822 ndelay (100); 829 ndelay (100);
@@ -840,6 +847,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
840 } 847 }
841 cond_resched(); 848 cond_resched();
842 } 849 }
850 led_trigger_event(nand_led_trigger, LED_OFF);
851
843 status = (int) this->read_byte(mtd); 852 status = (int) this->read_byte(mtd);
844 return status; 853 return status;
845} 854}
@@ -2724,6 +2733,21 @@ void nand_release (struct mtd_info *mtd)
2724EXPORT_SYMBOL_GPL (nand_scan); 2733EXPORT_SYMBOL_GPL (nand_scan);
2725EXPORT_SYMBOL_GPL (nand_release); 2734EXPORT_SYMBOL_GPL (nand_release);
2726 2735
2736
2737static int __init nand_base_init(void)
2738{
2739 led_trigger_register_simple("nand-disk", &nand_led_trigger);
2740 return 0;
2741}
2742
2743static void __exit nand_base_exit(void)
2744{
2745 led_trigger_unregister_simple(nand_led_trigger);
2746}
2747
2748module_init(nand_base_init);
2749module_exit(nand_base_exit);
2750
2727MODULE_LICENSE ("GPL"); 2751MODULE_LICENSE ("GPL");
2728MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>"); 2752MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
2729MODULE_DESCRIPTION ("Generic NAND flash driver code"); 2753MODULE_DESCRIPTION ("Generic NAND flash driver code");
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 8815c8dbef2d..c077d2ec9cdd 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -85,10 +85,6 @@ static int parse_redboot_partitions(struct mtd_info *master,
85 85
86 numslots = (master->erasesize / sizeof(struct fis_image_desc)); 86 numslots = (master->erasesize / sizeof(struct fis_image_desc));
87 for (i = 0; i < numslots; i++) { 87 for (i = 0; i < numslots; i++) {
88 if (buf[i].name[0] == 0xff) {
89 i = numslots;
90 break;
91 }
92 if (!memcmp(buf[i].name, "FIS directory", 14)) { 88 if (!memcmp(buf[i].name, "FIS directory", 14)) {
93 /* This is apparently the FIS directory entry for the 89 /* This is apparently the FIS directory entry for the
94 * FIS directory itself. The FIS directory size is 90 * FIS directory itself. The FIS directory size is
@@ -128,7 +124,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
128 struct fis_list *new_fl, **prev; 124 struct fis_list *new_fl, **prev;
129 125
130 if (buf[i].name[0] == 0xff) 126 if (buf[i].name[0] == 0xff)
131 break; 127 continue;
132 if (!redboot_checksum(&buf[i])) 128 if (!redboot_checksum(&buf[i]))
133 break; 129 break;
134 130
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 70f63891b19c..274b0138d442 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -788,7 +788,7 @@ struct vortex_private {
788 int options; /* User-settable misc. driver options. */ 788 int options; /* User-settable misc. driver options. */
789 unsigned int media_override:4, /* Passed-in media type. */ 789 unsigned int media_override:4, /* Passed-in media type. */
790 default_media:4, /* Read from the EEPROM/Wn3_Config. */ 790 default_media:4, /* Read from the EEPROM/Wn3_Config. */
791 full_duplex:1, force_fd:1, autoselect:1, 791 full_duplex:1, autoselect:1,
792 bus_master:1, /* Vortex can only do a fragment bus-m. */ 792 bus_master:1, /* Vortex can only do a fragment bus-m. */
793 full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ 793 full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
794 flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ 794 flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
@@ -1633,12 +1633,6 @@ vortex_set_duplex(struct net_device *dev)
1633 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 1633 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1634 0x100 : 0), 1634 0x100 : 0),
1635 ioaddr + Wn3_MAC_Ctrl); 1635 ioaddr + Wn3_MAC_Ctrl);
1636
1637 issue_and_wait(dev, TxReset);
1638 /*
1639 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1640 */
1641 issue_and_wait(dev, RxReset|0x04);
1642} 1636}
1643 1637
1644static void vortex_check_media(struct net_device *dev, unsigned int init) 1638static void vortex_check_media(struct net_device *dev, unsigned int init)
@@ -1663,7 +1657,7 @@ vortex_up(struct net_device *dev)
1663 struct vortex_private *vp = netdev_priv(dev); 1657 struct vortex_private *vp = netdev_priv(dev);
1664 void __iomem *ioaddr = vp->ioaddr; 1658 void __iomem *ioaddr = vp->ioaddr;
1665 unsigned int config; 1659 unsigned int config;
1666 int i; 1660 int i, mii_reg1, mii_reg5;
1667 1661
1668 if (VORTEX_PCI(vp)) { 1662 if (VORTEX_PCI(vp)) {
1669 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ 1663 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
@@ -1723,14 +1717,23 @@ vortex_up(struct net_device *dev)
1723 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); 1717 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
1724 iowrite32(config, ioaddr + Wn3_Config); 1718 iowrite32(config, ioaddr + Wn3_Config);
1725 1719
1726 netif_carrier_off(dev);
1727 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { 1720 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1728 EL3WINDOW(4); 1721 EL3WINDOW(4);
1722 mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1723 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1724 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1725
1729 vortex_check_media(dev, 1); 1726 vortex_check_media(dev, 1);
1730 } 1727 }
1731 else 1728 else
1732 vortex_set_duplex(dev); 1729 vortex_set_duplex(dev);
1733 1730
1731 issue_and_wait(dev, TxReset);
1732 /*
1733 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1734 */
1735 issue_and_wait(dev, RxReset|0x04);
1736
1734 1737
1735 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); 1738 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1736 1739
@@ -2083,16 +2086,14 @@ vortex_error(struct net_device *dev, int status)
2083 } 2086 }
2084 if (tx_status & 0x14) vp->stats.tx_fifo_errors++; 2087 if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
2085 if (tx_status & 0x38) vp->stats.tx_aborted_errors++; 2088 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
2089 if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
2086 iowrite8(0, ioaddr + TxStatus); 2090 iowrite8(0, ioaddr + TxStatus);
2087 if (tx_status & 0x30) { /* txJabber or txUnderrun */ 2091 if (tx_status & 0x30) { /* txJabber or txUnderrun */
2088 do_tx_reset = 1; 2092 do_tx_reset = 1;
2089 } else if (tx_status & 0x08) { /* maxCollisions */ 2093 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
2090 vp->xstats.tx_max_collisions++; 2094 do_tx_reset = 1;
2091 if (vp->drv_flags & MAX_COLLISION_RESET) { 2095 reset_mask = 0x0108; /* Reset interface logic, but not download logic */
2092 do_tx_reset = 1; 2096 } else { /* Merely re-enable the transmitter. */
2093 reset_mask = 0x0108; /* Reset interface logic, but not download logic */
2094 }
2095 } else { /* Merely re-enable the transmitter. */
2096 iowrite16(TxEnable, ioaddr + EL3_CMD); 2097 iowrite16(TxEnable, ioaddr + EL3_CMD);
2097 } 2098 }
2098 } 2099 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ce99845d8266..066e22b01a94 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -539,8 +539,7 @@ rx_status_loop:
539 unsigned buflen; 539 unsigned buflen;
540 540
541 skb = cp->rx_skb[rx_tail].skb; 541 skb = cp->rx_skb[rx_tail].skb;
542 if (!skb) 542 BUG_ON(!skb);
543 BUG();
544 543
545 desc = &cp->rx_ring[rx_tail]; 544 desc = &cp->rx_ring[rx_tail];
546 status = le32_to_cpu(desc->opts1); 545 status = le32_to_cpu(desc->opts1);
@@ -723,8 +722,7 @@ static void cp_tx (struct cp_private *cp)
723 break; 722 break;
724 723
725 skb = cp->tx_skb[tx_tail].skb; 724 skb = cp->tx_skb[tx_tail].skb;
726 if (!skb) 725 BUG_ON(!skb);
727 BUG();
728 726
729 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, 727 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
730 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); 728 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
@@ -1550,8 +1548,7 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1550 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); 1548 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1551 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); 1549 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1552 tmp_stats[i++] = cp->cp_stats.rx_frags; 1550 tmp_stats[i++] = cp->cp_stats.rx_frags;
1553 if (i != CP_NUM_STATS) 1551 BUG_ON(i != CP_NUM_STATS);
1554 BUG();
1555 1552
1556 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); 1553 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1557} 1554}
@@ -1856,8 +1853,7 @@ static void cp_remove_one (struct pci_dev *pdev)
1856 struct net_device *dev = pci_get_drvdata(pdev); 1853 struct net_device *dev = pci_get_drvdata(pdev);
1857 struct cp_private *cp = netdev_priv(dev); 1854 struct cp_private *cp = netdev_priv(dev);
1858 1855
1859 if (!dev) 1856 BUG_ON(!dev);
1860 BUG();
1861 unregister_netdev(dev); 1857 unregister_netdev(dev);
1862 iounmap(cp->regs); 1858 iounmap(cp->regs);
1863 if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); 1859 if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e20b849a22e8..bdaaad8f2123 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2313,13 +2313,11 @@ config S2IO_NAPI
2313 2313
2314endmenu 2314endmenu
2315 2315
2316if !UML
2317source "drivers/net/tokenring/Kconfig" 2316source "drivers/net/tokenring/Kconfig"
2318 2317
2319source "drivers/net/wireless/Kconfig" 2318source "drivers/net/wireless/Kconfig"
2320 2319
2321source "drivers/net/pcmcia/Kconfig" 2320source "drivers/net/pcmcia/Kconfig"
2322endif
2323 2321
2324source "drivers/net/wan/Kconfig" 2322source "drivers/net/wan/Kconfig"
2325 2323
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 64e2caf3083d..fabc0607b0f1 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -765,8 +765,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
765 BUGMSG(D_DURING, "in arcnet_interrupt\n"); 765 BUGMSG(D_DURING, "in arcnet_interrupt\n");
766 766
767 lp = dev->priv; 767 lp = dev->priv;
768 if (!lp) 768 BUG_ON(!lp);
769 BUG();
770 769
771 spin_lock(&lp->lock); 770 spin_lock(&lp->lock);
772 771
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 43150b2bd13f..0d45553ff75c 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -125,11 +125,11 @@ static void __init com90xx_probe(void)
125 if (!io && !irq && !shmem && !*device && com90xx_skip_probe) 125 if (!io && !irq && !shmem && !*device && com90xx_skip_probe)
126 return; 126 return;
127 127
128 shmems = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(unsigned long), 128 shmems = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(unsigned long),
129 GFP_KERNEL); 129 GFP_KERNEL);
130 if (!shmems) 130 if (!shmems)
131 return; 131 return;
132 iomem = kzalloc(((0x10000-0xa0000) / 0x800) * sizeof(void __iomem *), 132 iomem = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(void __iomem *),
133 GFP_KERNEL); 133 GFP_KERNEL);
134 if (!iomem) { 134 if (!iomem) {
135 kfree(shmems); 135 kfree(shmems);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 15032f2c7817..c4e12b5cbb92 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -608,8 +608,7 @@ static void b44_tx(struct b44 *bp)
608 struct ring_info *rp = &bp->tx_buffers[cons]; 608 struct ring_info *rp = &bp->tx_buffers[cons];
609 struct sk_buff *skb = rp->skb; 609 struct sk_buff *skb = rp->skb;
610 610
611 if (unlikely(skb == NULL)) 611 BUG_ON(skb == NULL);
612 BUG();
613 612
614 pci_unmap_single(bp->pdev, 613 pci_unmap_single(bp->pdev,
615 pci_unmap_addr(rp, mapping), 614 pci_unmap_addr(rp, mapping),
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 30ff8ea1a402..4391bf4bf573 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1093,8 +1093,7 @@ static int process_responses(struct adapter *adapter, int budget)
1093 if (likely(e->DataValid)) { 1093 if (likely(e->DataValid)) {
1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1095 1095
1096 if (unlikely(!e->Sop || !e->Eop)) 1096 BUG_ON(!e->Sop || !e->Eop);
1097 BUG();
1098 if (unlikely(e->Offload)) 1097 if (unlikely(e->Offload))
1099 unexpected_offload(adapter, fl); 1098 unexpected_offload(adapter, fl);
1100 else 1099 else
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 49cd096a3c3d..add8dc4aa7b0 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3308,8 +3308,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3308 3308
3309 while (poll_dev != &adapter->polling_netdev[i]) { 3309 while (poll_dev != &adapter->polling_netdev[i]) {
3310 i++; 3310 i++;
3311 if (unlikely(i == adapter->num_rx_queues)) 3311 BUG_ON(i == adapter->num_rx_queues);
3312 BUG();
3313 } 3312 }
3314 3313
3315 if (likely(adapter->num_tx_queues == 1)) { 3314 if (likely(adapter->num_tx_queues == 1)) {
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index aa1569182fd6..815436c6170f 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -203,8 +203,7 @@ static int eql_open(struct net_device *dev)
203 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 203 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
204 "your slave devices.\n", dev->name); 204 "your slave devices.\n", dev->name);
205 205
206 if (!list_empty(&eql->queue.all_slaves)) 206 BUG_ON(!list_empty(&eql->queue.all_slaves));
207 BUG();
208 207
209 eql->min_slaves = 1; 208 eql->min_slaves = 1;
210 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ 209 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index ceb98fd398af..52d01027d9e7 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -235,7 +235,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
235 235
236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
237 237
238 if(lpar_rc != H_Success) { 238 if(lpar_rc != H_SUCCESS) {
239 pool->free_map[free_index] = index; 239 pool->free_map[free_index] = index;
240 pool->skbuff[index] = NULL; 240 pool->skbuff[index] = NULL;
241 pool->consumer_index--; 241 pool->consumer_index--;
@@ -373,7 +373,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
373 373
374 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 374 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
375 375
376 if(lpar_rc != H_Success) { 376 if(lpar_rc != H_SUCCESS) {
377 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); 377 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
378 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 378 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
379 } 379 }
@@ -511,7 +511,7 @@ static int ibmveth_open(struct net_device *netdev)
511 adapter->filter_list_dma, 511 adapter->filter_list_dma,
512 mac_address); 512 mac_address);
513 513
514 if(lpar_rc != H_Success) { 514 if(lpar_rc != H_SUCCESS) {
515 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 515 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
516 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n", 516 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
517 adapter->buffer_list_dma, 517 adapter->buffer_list_dma,
@@ -527,7 +527,7 @@ static int ibmveth_open(struct net_device *netdev)
527 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 527 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
528 do { 528 do {
529 rc = h_free_logical_lan(adapter->vdev->unit_address); 529 rc = h_free_logical_lan(adapter->vdev->unit_address);
530 } while (H_isLongBusy(rc) || (rc == H_Busy)); 530 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
531 531
532 ibmveth_cleanup(adapter); 532 ibmveth_cleanup(adapter);
533 return rc; 533 return rc;
@@ -556,9 +556,9 @@ static int ibmveth_close(struct net_device *netdev)
556 556
557 do { 557 do {
558 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 558 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
559 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 559 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
560 560
561 if(lpar_rc != H_Success) 561 if(lpar_rc != H_SUCCESS)
562 { 562 {
563 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 563 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
564 lpar_rc); 564 lpar_rc);
@@ -693,9 +693,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
693 desc[4].desc, 693 desc[4].desc,
694 desc[5].desc, 694 desc[5].desc,
695 correlator); 695 correlator);
696 } while ((lpar_rc == H_Busy) && (retry_count--)); 696 } while ((lpar_rc == H_BUSY) && (retry_count--));
697 697
698 if(lpar_rc != H_Success && lpar_rc != H_Dropped) { 698 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
699 int i; 699 int i;
700 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); 700 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
701 for(i = 0; i < 6; i++) { 701 for(i = 0; i < 6; i++) {
@@ -786,14 +786,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
786 /* we think we are done - reenable interrupts, then check once more to make sure we are done */ 786 /* we think we are done - reenable interrupts, then check once more to make sure we are done */
787 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); 787 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
788 788
789 ibmveth_assert(lpar_rc == H_Success); 789 ibmveth_assert(lpar_rc == H_SUCCESS);
790 790
791 netif_rx_complete(netdev); 791 netif_rx_complete(netdev);
792 792
793 if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed)) 793 if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
794 { 794 {
795 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 795 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
796 ibmveth_assert(lpar_rc == H_Success); 796 ibmveth_assert(lpar_rc == H_SUCCESS);
797 more_work = 1; 797 more_work = 1;
798 goto restart_poll; 798 goto restart_poll;
799 } 799 }
@@ -813,7 +813,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs
813 813
814 if(netif_rx_schedule_prep(netdev)) { 814 if(netif_rx_schedule_prep(netdev)) {
815 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 815 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
816 ibmveth_assert(lpar_rc == H_Success); 816 ibmveth_assert(lpar_rc == H_SUCCESS);
817 __netif_rx_schedule(netdev); 817 __netif_rx_schedule(netdev);
818 } 818 }
819 return IRQ_HANDLED; 819 return IRQ_HANDLED;
@@ -835,7 +835,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
835 IbmVethMcastEnableRecv | 835 IbmVethMcastEnableRecv |
836 IbmVethMcastDisableFiltering, 836 IbmVethMcastDisableFiltering,
837 0); 837 0);
838 if(lpar_rc != H_Success) { 838 if(lpar_rc != H_SUCCESS) {
839 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 839 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
840 } 840 }
841 } else { 841 } else {
@@ -847,7 +847,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
847 IbmVethMcastDisableFiltering | 847 IbmVethMcastDisableFiltering |
848 IbmVethMcastClearFilterTable, 848 IbmVethMcastClearFilterTable,
849 0); 849 0);
850 if(lpar_rc != H_Success) { 850 if(lpar_rc != H_SUCCESS) {
851 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 851 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
852 } 852 }
853 /* add the addresses to the filter table */ 853 /* add the addresses to the filter table */
@@ -858,7 +858,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
858 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 858 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
859 IbmVethMcastAddFilter, 859 IbmVethMcastAddFilter,
860 mcast_addr); 860 mcast_addr);
861 if(lpar_rc != H_Success) { 861 if(lpar_rc != H_SUCCESS) {
862 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); 862 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
863 } 863 }
864 } 864 }
@@ -867,7 +867,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
867 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 867 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
868 IbmVethMcastEnableFiltering, 868 IbmVethMcastEnableFiltering,
869 0); 869 0);
870 if(lpar_rc != H_Success) { 870 if(lpar_rc != H_SUCCESS) {
871 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); 871 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
872 } 872 }
873 } 873 }
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 63d38fbbd04e..f530686bd09f 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -695,8 +695,7 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
695 /* 695 /*
696 * We must not be transmitting... 696 * We must not be transmitting...
697 */ 697 */
698 if (si->txskb) 698 BUG_ON(si->txskb);
699 BUG();
700 699
701 netif_stop_queue(dev); 700 netif_stop_queue(dev);
702 701
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index d11821dd86ed..ced9fdb8335c 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -645,9 +645,7 @@ static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
645{ 645{
646 struct net_device *dev = pci_get_drvdata(pdev); 646 struct net_device *dev = pci_get_drvdata(pdev);
647 647
648 if (!dev) 648 BUG_ON(!dev);
649 BUG();
650
651 unregister_netdev(dev); 649 unregister_netdev(dev);
652 release_region(dev->base_addr, NE_IO_EXTENT); 650 release_region(dev->base_addr, NE_IO_EXTENT);
653 free_netdev(dev); 651 free_netdev(dev);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index edd1b5306b16..75b35ad760de 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -94,7 +94,7 @@ static struct console netconsole = {
94static int option_setup(char *opt) 94static int option_setup(char *opt)
95{ 95{
96 configured = !netpoll_parse_options(&np, opt); 96 configured = !netpoll_parse_options(&np, opt);
97 return 0; 97 return 1;
98} 98}
99 99
100__setup("netconsole=", option_setup); 100__setup("netconsole=", option_setup);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 8e9b1a537dee..706aed7d717f 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -568,8 +568,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
568#endif 568#endif
569 569
570 sg = dev->rx_info.descs + (next_empty * DESC_SIZE); 570 sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
571 if (unlikely(NULL != dev->rx_info.skbs[next_empty])) 571 BUG_ON(NULL != dev->rx_info.skbs[next_empty]);
572 BUG();
573 dev->rx_info.skbs[next_empty] = skb; 572 dev->rx_info.skbs[next_empty] = skb;
574 573
575 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; 574 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index ce90becb8bdf..fab93360f017 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -204,7 +204,7 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
204#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ 204#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
205 205
206struct el3_private { 206struct el3_private {
207 dev_link_t link; 207 struct pcmcia_device *p_dev;
208 dev_node_t node; 208 dev_node_t node;
209 struct net_device_stats stats; 209 struct net_device_stats stats;
210 u16 advertising, partner; /* NWay media advertisement */ 210 u16 advertising, partner; /* NWay media advertisement */
@@ -225,8 +225,8 @@ static char mii_preamble_required = 0;
225 225
226/* Index of functions. */ 226/* Index of functions. */
227 227
228static void tc574_config(dev_link_t *link); 228static int tc574_config(struct pcmcia_device *link);
229static void tc574_release(dev_link_t *link); 229static void tc574_release(struct pcmcia_device *link);
230 230
231static void mdio_sync(kio_addr_t ioaddr, int bits); 231static void mdio_sync(kio_addr_t ioaddr, int bits);
232static int mdio_read(kio_addr_t ioaddr, int phy_id, int location); 232static int mdio_read(kio_addr_t ioaddr, int phy_id, int location);
@@ -256,10 +256,9 @@ static void tc574_detach(struct pcmcia_device *p_dev);
256 with Card Services. 256 with Card Services.
257*/ 257*/
258 258
259static int tc574_attach(struct pcmcia_device *p_dev) 259static int tc574_probe(struct pcmcia_device *link)
260{ 260{
261 struct el3_private *lp; 261 struct el3_private *lp;
262 dev_link_t *link;
263 struct net_device *dev; 262 struct net_device *dev;
264 263
265 DEBUG(0, "3c574_attach()\n"); 264 DEBUG(0, "3c574_attach()\n");
@@ -269,8 +268,8 @@ static int tc574_attach(struct pcmcia_device *p_dev)
269 if (!dev) 268 if (!dev)
270 return -ENOMEM; 269 return -ENOMEM;
271 lp = netdev_priv(dev); 270 lp = netdev_priv(dev);
272 link = &lp->link;
273 link->priv = dev; 271 link->priv = dev;
272 lp->p_dev = link;
274 273
275 spin_lock_init(&lp->window_lock); 274 spin_lock_init(&lp->window_lock);
276 link->io.NumPorts1 = 32; 275 link->io.NumPorts1 = 32;
@@ -280,7 +279,6 @@ static int tc574_attach(struct pcmcia_device *p_dev)
280 link->irq.Handler = &el3_interrupt; 279 link->irq.Handler = &el3_interrupt;
281 link->irq.Instance = dev; 280 link->irq.Instance = dev;
282 link->conf.Attributes = CONF_ENABLE_IRQ; 281 link->conf.Attributes = CONF_ENABLE_IRQ;
283 link->conf.Vcc = 50;
284 link->conf.IntType = INT_MEMORY_AND_IO; 282 link->conf.IntType = INT_MEMORY_AND_IO;
285 link->conf.ConfigIndex = 1; 283 link->conf.ConfigIndex = 1;
286 link->conf.Present = PRESENT_OPTION; 284 link->conf.Present = PRESENT_OPTION;
@@ -298,13 +296,7 @@ static int tc574_attach(struct pcmcia_device *p_dev)
298 dev->watchdog_timeo = TX_TIMEOUT; 296 dev->watchdog_timeo = TX_TIMEOUT;
299#endif 297#endif
300 298
301 link->handle = p_dev; 299 return tc574_config(link);
302 p_dev->instance = link;
303
304 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
305 tc574_config(link);
306
307 return 0;
308} /* tc574_attach */ 300} /* tc574_attach */
309 301
310/* 302/*
@@ -316,18 +308,16 @@ static int tc574_attach(struct pcmcia_device *p_dev)
316 308
317*/ 309*/
318 310
319static void tc574_detach(struct pcmcia_device *p_dev) 311static void tc574_detach(struct pcmcia_device *link)
320{ 312{
321 dev_link_t *link = dev_to_instance(p_dev);
322 struct net_device *dev = link->priv; 313 struct net_device *dev = link->priv;
323 314
324 DEBUG(0, "3c574_detach(0x%p)\n", link); 315 DEBUG(0, "3c574_detach(0x%p)\n", link);
325 316
326 if (link->dev) 317 if (link->dev_node)
327 unregister_netdev(dev); 318 unregister_netdev(dev);
328 319
329 if (link->state & DEV_CONFIG) 320 tc574_release(link);
330 tc574_release(link);
331 321
332 free_netdev(dev); 322 free_netdev(dev);
333} /* tc574_detach */ 323} /* tc574_detach */
@@ -343,9 +333,8 @@ static void tc574_detach(struct pcmcia_device *p_dev)
343 333
344static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 334static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
345 335
346static void tc574_config(dev_link_t *link) 336static int tc574_config(struct pcmcia_device *link)
347{ 337{
348 client_handle_t handle = link->handle;
349 struct net_device *dev = link->priv; 338 struct net_device *dev = link->priv;
350 struct el3_private *lp = netdev_priv(dev); 339 struct el3_private *lp = netdev_priv(dev);
351 tuple_t tuple; 340 tuple_t tuple;
@@ -363,30 +352,27 @@ static void tc574_config(dev_link_t *link)
363 352
364 tuple.Attributes = 0; 353 tuple.Attributes = 0;
365 tuple.DesiredTuple = CISTPL_CONFIG; 354 tuple.DesiredTuple = CISTPL_CONFIG;
366 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 355 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
367 tuple.TupleData = (cisdata_t *)buf; 356 tuple.TupleData = (cisdata_t *)buf;
368 tuple.TupleDataMax = 64; 357 tuple.TupleDataMax = 64;
369 tuple.TupleOffset = 0; 358 tuple.TupleOffset = 0;
370 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 359 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
371 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 360 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
372 link->conf.ConfigBase = parse.config.base; 361 link->conf.ConfigBase = parse.config.base;
373 link->conf.Present = parse.config.rmask[0]; 362 link->conf.Present = parse.config.rmask[0];
374 363
375 /* Configure card */
376 link->state |= DEV_CONFIG;
377
378 link->io.IOAddrLines = 16; 364 link->io.IOAddrLines = 16;
379 for (i = j = 0; j < 0x400; j += 0x20) { 365 for (i = j = 0; j < 0x400; j += 0x20) {
380 link->io.BasePort1 = j ^ 0x300; 366 link->io.BasePort1 = j ^ 0x300;
381 i = pcmcia_request_io(link->handle, &link->io); 367 i = pcmcia_request_io(link, &link->io);
382 if (i == CS_SUCCESS) break; 368 if (i == CS_SUCCESS) break;
383 } 369 }
384 if (i != CS_SUCCESS) { 370 if (i != CS_SUCCESS) {
385 cs_error(link->handle, RequestIO, i); 371 cs_error(link, RequestIO, i);
386 goto failed; 372 goto failed;
387 } 373 }
388 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 374 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
389 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 375 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
390 376
391 dev->irq = link->irq.AssignedIRQ; 377 dev->irq = link->irq.AssignedIRQ;
392 dev->base_addr = link->io.BasePort1; 378 dev->base_addr = link->io.BasePort1;
@@ -397,8 +383,8 @@ static void tc574_config(dev_link_t *link)
397 the hardware address. The future products may include a modem chip 383 the hardware address. The future products may include a modem chip
398 and put the address in the CIS. */ 384 and put the address in the CIS. */
399 tuple.DesiredTuple = 0x88; 385 tuple.DesiredTuple = 0x88;
400 if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) { 386 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
401 pcmcia_get_tuple_data(handle, &tuple); 387 pcmcia_get_tuple_data(link, &tuple);
402 for (i = 0; i < 3; i++) 388 for (i = 0; i < 3; i++)
403 phys_addr[i] = htons(buf[i]); 389 phys_addr[i] = htons(buf[i]);
404 } else { 390 } else {
@@ -412,9 +398,9 @@ static void tc574_config(dev_link_t *link)
412 } 398 }
413 } 399 }
414 tuple.DesiredTuple = CISTPL_VERS_1; 400 tuple.DesiredTuple = CISTPL_VERS_1;
415 if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS && 401 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS &&
416 pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS && 402 pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS &&
417 pcmcia_parse_tuple(handle, &tuple, &parse) == CS_SUCCESS) { 403 pcmcia_parse_tuple(link, &tuple, &parse) == CS_SUCCESS) {
418 cardname = parse.version_1.str + parse.version_1.ofs[1]; 404 cardname = parse.version_1.str + parse.version_1.ofs[1];
419 } else 405 } else
420 cardname = "3Com 3c574"; 406 cardname = "3Com 3c574";
@@ -473,13 +459,12 @@ static void tc574_config(dev_link_t *link)
473 } 459 }
474 } 460 }
475 461
476 link->state &= ~DEV_CONFIG_PENDING; 462 link->dev_node = &lp->node;
477 link->dev = &lp->node; 463 SET_NETDEV_DEV(dev, &handle_to_dev(link));
478 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
479 464
480 if (register_netdev(dev) != 0) { 465 if (register_netdev(dev) != 0) {
481 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); 466 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
482 link->dev = NULL; 467 link->dev_node = NULL;
483 goto failed; 468 goto failed;
484 } 469 }
485 470
@@ -493,13 +478,13 @@ static void tc574_config(dev_link_t *link)
493 8 << config.u.ram_size, ram_split[config.u.ram_split], 478 8 << config.u.ram_size, ram_split[config.u.ram_split],
494 config.u.autoselect ? "autoselect " : ""); 479 config.u.autoselect ? "autoselect " : "");
495 480
496 return; 481 return 0;
497 482
498cs_failed: 483cs_failed:
499 cs_error(link->handle, last_fn, last_ret); 484 cs_error(link, last_fn, last_ret);
500failed: 485failed:
501 tc574_release(link); 486 tc574_release(link);
502 return; 487 return -ENODEV;
503 488
504} /* tc574_config */ 489} /* tc574_config */
505 490
@@ -509,44 +494,28 @@ failed:
509 still open, this will be postponed until it is closed. 494 still open, this will be postponed until it is closed.
510*/ 495*/
511 496
512static void tc574_release(dev_link_t *link) 497static void tc574_release(struct pcmcia_device *link)
513{ 498{
514 DEBUG(0, "3c574_release(0x%p)\n", link); 499 pcmcia_disable_device(link);
515
516 pcmcia_release_configuration(link->handle);
517 pcmcia_release_io(link->handle, &link->io);
518 pcmcia_release_irq(link->handle, &link->irq);
519
520 link->state &= ~DEV_CONFIG;
521} 500}
522 501
523static int tc574_suspend(struct pcmcia_device *p_dev) 502static int tc574_suspend(struct pcmcia_device *link)
524{ 503{
525 dev_link_t *link = dev_to_instance(p_dev);
526 struct net_device *dev = link->priv; 504 struct net_device *dev = link->priv;
527 505
528 link->state |= DEV_SUSPEND; 506 if (link->open)
529 if (link->state & DEV_CONFIG) { 507 netif_device_detach(dev);
530 if (link->open)
531 netif_device_detach(dev);
532 pcmcia_release_configuration(link->handle);
533 }
534 508
535 return 0; 509 return 0;
536} 510}
537 511
538static int tc574_resume(struct pcmcia_device *p_dev) 512static int tc574_resume(struct pcmcia_device *link)
539{ 513{
540 dev_link_t *link = dev_to_instance(p_dev);
541 struct net_device *dev = link->priv; 514 struct net_device *dev = link->priv;
542 515
543 link->state &= ~DEV_SUSPEND; 516 if (link->open) {
544 if (link->state & DEV_CONFIG) { 517 tc574_reset(dev);
545 pcmcia_request_configuration(link->handle, &link->conf); 518 netif_device_attach(dev);
546 if (link->open) {
547 tc574_reset(dev);
548 netif_device_attach(dev);
549 }
550 } 519 }
551 520
552 return 0; 521 return 0;
@@ -757,9 +726,9 @@ static void tc574_reset(struct net_device *dev)
757static int el3_open(struct net_device *dev) 726static int el3_open(struct net_device *dev)
758{ 727{
759 struct el3_private *lp = netdev_priv(dev); 728 struct el3_private *lp = netdev_priv(dev);
760 dev_link_t *link = &lp->link; 729 struct pcmcia_device *link = lp->p_dev;
761 730
762 if (!DEV_OK(link)) 731 if (!pcmcia_dev_present(link))
763 return -ENODEV; 732 return -ENODEV;
764 733
765 link->open++; 734 link->open++;
@@ -1203,11 +1172,11 @@ static int el3_close(struct net_device *dev)
1203{ 1172{
1204 kio_addr_t ioaddr = dev->base_addr; 1173 kio_addr_t ioaddr = dev->base_addr;
1205 struct el3_private *lp = netdev_priv(dev); 1174 struct el3_private *lp = netdev_priv(dev);
1206 dev_link_t *link = &lp->link; 1175 struct pcmcia_device *link = lp->p_dev;
1207 1176
1208 DEBUG(2, "%s: shutting down ethercard.\n", dev->name); 1177 DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
1209 1178
1210 if (DEV_OK(link)) { 1179 if (pcmcia_dev_present(link)) {
1211 unsigned long flags; 1180 unsigned long flags;
1212 1181
1213 /* Turn off statistics ASAP. We update lp->stats below. */ 1182 /* Turn off statistics ASAP. We update lp->stats below. */
@@ -1246,7 +1215,7 @@ static struct pcmcia_driver tc574_driver = {
1246 .drv = { 1215 .drv = {
1247 .name = "3c574_cs", 1216 .name = "3c574_cs",
1248 }, 1217 },
1249 .probe = tc574_attach, 1218 .probe = tc574_probe,
1250 .remove = tc574_detach, 1219 .remove = tc574_detach,
1251 .id_table = tc574_ids, 1220 .id_table = tc574_ids,
1252 .suspend = tc574_suspend, 1221 .suspend = tc574_suspend,
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 3dba50849da7..875a0fe251e7 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -105,7 +105,7 @@ enum RxFilter {
105#define TX_TIMEOUT ((400*HZ)/1000) 105#define TX_TIMEOUT ((400*HZ)/1000)
106 106
107struct el3_private { 107struct el3_private {
108 dev_link_t link; 108 struct pcmcia_device *p_dev;
109 dev_node_t node; 109 dev_node_t node;
110 struct net_device_stats stats; 110 struct net_device_stats stats;
111 /* For transceiver monitoring */ 111 /* For transceiver monitoring */
@@ -142,8 +142,8 @@ DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)";
142 142
143/*====================================================================*/ 143/*====================================================================*/
144 144
145static void tc589_config(dev_link_t *link); 145static int tc589_config(struct pcmcia_device *link);
146static void tc589_release(dev_link_t *link); 146static void tc589_release(struct pcmcia_device *link);
147 147
148static u16 read_eeprom(kio_addr_t ioaddr, int index); 148static u16 read_eeprom(kio_addr_t ioaddr, int index);
149static void tc589_reset(struct net_device *dev); 149static void tc589_reset(struct net_device *dev);
@@ -170,10 +170,9 @@ static void tc589_detach(struct pcmcia_device *p_dev);
170 170
171======================================================================*/ 171======================================================================*/
172 172
173static int tc589_attach(struct pcmcia_device *p_dev) 173static int tc589_probe(struct pcmcia_device *link)
174{ 174{
175 struct el3_private *lp; 175 struct el3_private *lp;
176 dev_link_t *link;
177 struct net_device *dev; 176 struct net_device *dev;
178 177
179 DEBUG(0, "3c589_attach()\n"); 178 DEBUG(0, "3c589_attach()\n");
@@ -183,8 +182,8 @@ static int tc589_attach(struct pcmcia_device *p_dev)
183 if (!dev) 182 if (!dev)
184 return -ENOMEM; 183 return -ENOMEM;
185 lp = netdev_priv(dev); 184 lp = netdev_priv(dev);
186 link = &lp->link;
187 link->priv = dev; 185 link->priv = dev;
186 lp->p_dev = link;
188 187
189 spin_lock_init(&lp->lock); 188 spin_lock_init(&lp->lock);
190 link->io.NumPorts1 = 16; 189 link->io.NumPorts1 = 16;
@@ -194,7 +193,6 @@ static int tc589_attach(struct pcmcia_device *p_dev)
194 link->irq.Handler = &el3_interrupt; 193 link->irq.Handler = &el3_interrupt;
195 link->irq.Instance = dev; 194 link->irq.Instance = dev;
196 link->conf.Attributes = CONF_ENABLE_IRQ; 195 link->conf.Attributes = CONF_ENABLE_IRQ;
197 link->conf.Vcc = 50;
198 link->conf.IntType = INT_MEMORY_AND_IO; 196 link->conf.IntType = INT_MEMORY_AND_IO;
199 link->conf.ConfigIndex = 1; 197 link->conf.ConfigIndex = 1;
200 link->conf.Present = PRESENT_OPTION; 198 link->conf.Present = PRESENT_OPTION;
@@ -213,13 +211,7 @@ static int tc589_attach(struct pcmcia_device *p_dev)
213#endif 211#endif
214 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 212 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
215 213
216 link->handle = p_dev; 214 return tc589_config(link);
217 p_dev->instance = link;
218
219 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
220 tc589_config(link);
221
222 return 0;
223} /* tc589_attach */ 215} /* tc589_attach */
224 216
225/*====================================================================== 217/*======================================================================
@@ -231,18 +223,16 @@ static int tc589_attach(struct pcmcia_device *p_dev)
231 223
232======================================================================*/ 224======================================================================*/
233 225
234static void tc589_detach(struct pcmcia_device *p_dev) 226static void tc589_detach(struct pcmcia_device *link)
235{ 227{
236 dev_link_t *link = dev_to_instance(p_dev);
237 struct net_device *dev = link->priv; 228 struct net_device *dev = link->priv;
238 229
239 DEBUG(0, "3c589_detach(0x%p)\n", link); 230 DEBUG(0, "3c589_detach(0x%p)\n", link);
240 231
241 if (link->dev) 232 if (link->dev_node)
242 unregister_netdev(dev); 233 unregister_netdev(dev);
243 234
244 if (link->state & DEV_CONFIG) 235 tc589_release(link);
245 tc589_release(link);
246 236
247 free_netdev(dev); 237 free_netdev(dev);
248} /* tc589_detach */ 238} /* tc589_detach */
@@ -258,9 +248,8 @@ static void tc589_detach(struct pcmcia_device *p_dev)
258#define CS_CHECK(fn, ret) \ 248#define CS_CHECK(fn, ret) \
259do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 249do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
260 250
261static void tc589_config(dev_link_t *link) 251static int tc589_config(struct pcmcia_device *link)
262{ 252{
263 client_handle_t handle = link->handle;
264 struct net_device *dev = link->priv; 253 struct net_device *dev = link->priv;
265 struct el3_private *lp = netdev_priv(dev); 254 struct el3_private *lp = netdev_priv(dev);
266 tuple_t tuple; 255 tuple_t tuple;
@@ -275,43 +264,40 @@ static void tc589_config(dev_link_t *link)
275 phys_addr = (u16 *)dev->dev_addr; 264 phys_addr = (u16 *)dev->dev_addr;
276 tuple.Attributes = 0; 265 tuple.Attributes = 0;
277 tuple.DesiredTuple = CISTPL_CONFIG; 266 tuple.DesiredTuple = CISTPL_CONFIG;
278 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 267 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
279 tuple.TupleData = (cisdata_t *)buf; 268 tuple.TupleData = (cisdata_t *)buf;
280 tuple.TupleDataMax = sizeof(buf); 269 tuple.TupleDataMax = sizeof(buf);
281 tuple.TupleOffset = 0; 270 tuple.TupleOffset = 0;
282 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 271 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
283 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 272 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
284 link->conf.ConfigBase = parse.config.base; 273 link->conf.ConfigBase = parse.config.base;
285 link->conf.Present = parse.config.rmask[0]; 274 link->conf.Present = parse.config.rmask[0];
286 275
287 /* Is this a 3c562? */ 276 /* Is this a 3c562? */
288 tuple.DesiredTuple = CISTPL_MANFID; 277 tuple.DesiredTuple = CISTPL_MANFID;
289 tuple.Attributes = TUPLE_RETURN_COMMON; 278 tuple.Attributes = TUPLE_RETURN_COMMON;
290 if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) && 279 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
291 (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) { 280 (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
292 if (le16_to_cpu(buf[0]) != MANFID_3COM) 281 if (le16_to_cpu(buf[0]) != MANFID_3COM)
293 printk(KERN_INFO "3c589_cs: hmmm, is this really a " 282 printk(KERN_INFO "3c589_cs: hmmm, is this really a "
294 "3Com card??\n"); 283 "3Com card??\n");
295 multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562); 284 multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562);
296 } 285 }
297
298 /* Configure card */
299 link->state |= DEV_CONFIG;
300 286
301 /* For the 3c562, the base address must be xx00-xx7f */ 287 /* For the 3c562, the base address must be xx00-xx7f */
302 link->io.IOAddrLines = 16; 288 link->io.IOAddrLines = 16;
303 for (i = j = 0; j < 0x400; j += 0x10) { 289 for (i = j = 0; j < 0x400; j += 0x10) {
304 if (multi && (j & 0x80)) continue; 290 if (multi && (j & 0x80)) continue;
305 link->io.BasePort1 = j ^ 0x300; 291 link->io.BasePort1 = j ^ 0x300;
306 i = pcmcia_request_io(link->handle, &link->io); 292 i = pcmcia_request_io(link, &link->io);
307 if (i == CS_SUCCESS) break; 293 if (i == CS_SUCCESS) break;
308 } 294 }
309 if (i != CS_SUCCESS) { 295 if (i != CS_SUCCESS) {
310 cs_error(link->handle, RequestIO, i); 296 cs_error(link, RequestIO, i);
311 goto failed; 297 goto failed;
312 } 298 }
313 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 299 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
314 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 300 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
315 301
316 dev->irq = link->irq.AssignedIRQ; 302 dev->irq = link->irq.AssignedIRQ;
317 dev->base_addr = link->io.BasePort1; 303 dev->base_addr = link->io.BasePort1;
@@ -321,8 +307,8 @@ static void tc589_config(dev_link_t *link)
321 /* The 3c589 has an extra EEPROM for configuration info, including 307 /* The 3c589 has an extra EEPROM for configuration info, including
322 the hardware address. The 3c562 puts the address in the CIS. */ 308 the hardware address. The 3c562 puts the address in the CIS. */
323 tuple.DesiredTuple = 0x88; 309 tuple.DesiredTuple = 0x88;
324 if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) { 310 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
325 pcmcia_get_tuple_data(handle, &tuple); 311 pcmcia_get_tuple_data(link, &tuple);
326 for (i = 0; i < 3; i++) 312 for (i = 0; i < 3; i++)
327 phys_addr[i] = htons(buf[i]); 313 phys_addr[i] = htons(buf[i]);
328 } else { 314 } else {
@@ -346,13 +332,12 @@ static void tc589_config(dev_link_t *link)
346 else 332 else
347 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 333 printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
348 334
349 link->dev = &lp->node; 335 link->dev_node = &lp->node;
350 link->state &= ~DEV_CONFIG_PENDING; 336 SET_NETDEV_DEV(dev, &handle_to_dev(link));
351 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
352 337
353 if (register_netdev(dev) != 0) { 338 if (register_netdev(dev) != 0) {
354 printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); 339 printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
355 link->dev = NULL; 340 link->dev_node = NULL;
356 goto failed; 341 goto failed;
357 } 342 }
358 343
@@ -366,14 +351,13 @@ static void tc589_config(dev_link_t *link)
366 printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n", 351 printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
367 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], 352 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
368 if_names[dev->if_port]); 353 if_names[dev->if_port]);
369 return; 354 return 0;
370 355
371cs_failed: 356cs_failed:
372 cs_error(link->handle, last_fn, last_ret); 357 cs_error(link, last_fn, last_ret);
373failed: 358failed:
374 tc589_release(link); 359 tc589_release(link);
375 return; 360 return -ENODEV;
376
377} /* tc589_config */ 361} /* tc589_config */
378 362
379/*====================================================================== 363/*======================================================================
@@ -384,44 +368,28 @@ failed:
384 368
385======================================================================*/ 369======================================================================*/
386 370
387static void tc589_release(dev_link_t *link) 371static void tc589_release(struct pcmcia_device *link)
388{ 372{
389 DEBUG(0, "3c589_release(0x%p)\n", link); 373 pcmcia_disable_device(link);
390
391 pcmcia_release_configuration(link->handle);
392 pcmcia_release_io(link->handle, &link->io);
393 pcmcia_release_irq(link->handle, &link->irq);
394
395 link->state &= ~DEV_CONFIG;
396} 374}
397 375
398static int tc589_suspend(struct pcmcia_device *p_dev) 376static int tc589_suspend(struct pcmcia_device *link)
399{ 377{
400 dev_link_t *link = dev_to_instance(p_dev);
401 struct net_device *dev = link->priv; 378 struct net_device *dev = link->priv;
402 379
403 link->state |= DEV_SUSPEND; 380 if (link->open)
404 if (link->state & DEV_CONFIG) { 381 netif_device_detach(dev);
405 if (link->open)
406 netif_device_detach(dev);
407 pcmcia_release_configuration(link->handle);
408 }
409 382
410 return 0; 383 return 0;
411} 384}
412 385
413static int tc589_resume(struct pcmcia_device *p_dev) 386static int tc589_resume(struct pcmcia_device *link)
414{ 387{
415 dev_link_t *link = dev_to_instance(p_dev);
416 struct net_device *dev = link->priv; 388 struct net_device *dev = link->priv;
417 389
418 link->state &= ~DEV_SUSPEND; 390 if (link->open) {
419 if (link->state & DEV_CONFIG) { 391 tc589_reset(dev);
420 pcmcia_request_configuration(link->handle, &link->conf); 392 netif_device_attach(dev);
421 if (link->open) {
422 tc589_reset(dev);
423 netif_device_attach(dev);
424 }
425 } 393 }
426 394
427 return 0; 395 return 0;
@@ -587,9 +555,9 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
587static int el3_open(struct net_device *dev) 555static int el3_open(struct net_device *dev)
588{ 556{
589 struct el3_private *lp = netdev_priv(dev); 557 struct el3_private *lp = netdev_priv(dev);
590 dev_link_t *link = &lp->link; 558 struct pcmcia_device *link = lp->p_dev;
591 559
592 if (!DEV_OK(link)) 560 if (!pcmcia_dev_present(link))
593 return -ENODEV; 561 return -ENODEV;
594 562
595 link->open++; 563 link->open++;
@@ -848,9 +816,9 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
848{ 816{
849 struct el3_private *lp = netdev_priv(dev); 817 struct el3_private *lp = netdev_priv(dev);
850 unsigned long flags; 818 unsigned long flags;
851 dev_link_t *link = &lp->link; 819 struct pcmcia_device *link = lp->p_dev;
852 820
853 if (DEV_OK(link)) { 821 if (pcmcia_dev_present(link)) {
854 spin_lock_irqsave(&lp->lock, flags); 822 spin_lock_irqsave(&lp->lock, flags);
855 update_stats(dev); 823 update_stats(dev);
856 spin_unlock_irqrestore(&lp->lock, flags); 824 spin_unlock_irqrestore(&lp->lock, flags);
@@ -950,11 +918,11 @@ static int el3_rx(struct net_device *dev)
950static void set_multicast_list(struct net_device *dev) 918static void set_multicast_list(struct net_device *dev)
951{ 919{
952 struct el3_private *lp = netdev_priv(dev); 920 struct el3_private *lp = netdev_priv(dev);
953 dev_link_t *link = &lp->link; 921 struct pcmcia_device *link = lp->p_dev;
954 kio_addr_t ioaddr = dev->base_addr; 922 kio_addr_t ioaddr = dev->base_addr;
955 u16 opts = SetRxFilter | RxStation | RxBroadcast; 923 u16 opts = SetRxFilter | RxStation | RxBroadcast;
956 924
957 if (!(DEV_OK(link))) return; 925 if (!pcmcia_dev_present(link)) return;
958 if (dev->flags & IFF_PROMISC) 926 if (dev->flags & IFF_PROMISC)
959 opts |= RxMulticast | RxProm; 927 opts |= RxMulticast | RxProm;
960 else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) 928 else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
@@ -965,12 +933,12 @@ static void set_multicast_list(struct net_device *dev)
965static int el3_close(struct net_device *dev) 933static int el3_close(struct net_device *dev)
966{ 934{
967 struct el3_private *lp = netdev_priv(dev); 935 struct el3_private *lp = netdev_priv(dev);
968 dev_link_t *link = &lp->link; 936 struct pcmcia_device *link = lp->p_dev;
969 kio_addr_t ioaddr = dev->base_addr; 937 kio_addr_t ioaddr = dev->base_addr;
970 938
971 DEBUG(1, "%s: shutting down ethercard.\n", dev->name); 939 DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
972 940
973 if (DEV_OK(link)) { 941 if (pcmcia_dev_present(link)) {
974 /* Turn off statistics ASAP. We update lp->stats below. */ 942 /* Turn off statistics ASAP. We update lp->stats below. */
975 outw(StatsDisable, ioaddr + EL3_CMD); 943 outw(StatsDisable, ioaddr + EL3_CMD);
976 944
@@ -1020,7 +988,7 @@ static struct pcmcia_driver tc589_driver = {
1020 .drv = { 988 .drv = {
1021 .name = "3c589_cs", 989 .name = "3c589_cs",
1022 }, 990 },
1023 .probe = tc589_attach, 991 .probe = tc589_probe,
1024 .remove = tc589_detach, 992 .remove = tc589_detach,
1025 .id_table = tc589_ids, 993 .id_table = tc589_ids,
1026 .suspend = tc589_suspend, 994 .suspend = tc589_suspend,
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1cc94b2d76c1..56233afcb2b3 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -86,8 +86,8 @@ static char *version =
86 86
87/*====================================================================*/ 87/*====================================================================*/
88 88
89static void axnet_config(dev_link_t *link); 89static int axnet_config(struct pcmcia_device *link);
90static void axnet_release(dev_link_t *link); 90static void axnet_release(struct pcmcia_device *link);
91static int axnet_open(struct net_device *dev); 91static int axnet_open(struct net_device *dev);
92static int axnet_close(struct net_device *dev); 92static int axnet_close(struct net_device *dev);
93static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 93static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -117,7 +117,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id, struct pt_regs *regs);
117/*====================================================================*/ 117/*====================================================================*/
118 118
119typedef struct axnet_dev_t { 119typedef struct axnet_dev_t {
120 dev_link_t link; 120 struct pcmcia_device *p_dev;
121 dev_node_t node; 121 dev_node_t node;
122 caddr_t base; 122 caddr_t base;
123 struct timer_list watchdog; 123 struct timer_list watchdog;
@@ -142,10 +142,9 @@ static inline axnet_dev_t *PRIV(struct net_device *dev)
142 142
143======================================================================*/ 143======================================================================*/
144 144
145static int axnet_attach(struct pcmcia_device *p_dev) 145static int axnet_probe(struct pcmcia_device *link)
146{ 146{
147 axnet_dev_t *info; 147 axnet_dev_t *info;
148 dev_link_t *link;
149 struct net_device *dev; 148 struct net_device *dev;
150 149
151 DEBUG(0, "axnet_attach()\n"); 150 DEBUG(0, "axnet_attach()\n");
@@ -157,7 +156,7 @@ static int axnet_attach(struct pcmcia_device *p_dev)
157 return -ENOMEM; 156 return -ENOMEM;
158 157
159 info = PRIV(dev); 158 info = PRIV(dev);
160 link = &info->link; 159 info->p_dev = link;
161 link->priv = dev; 160 link->priv = dev;
162 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 161 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
163 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 162 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
@@ -169,13 +168,7 @@ static int axnet_attach(struct pcmcia_device *p_dev)
169 dev->do_ioctl = &axnet_ioctl; 168 dev->do_ioctl = &axnet_ioctl;
170 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 169 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
171 170
172 link->handle = p_dev; 171 return axnet_config(link);
173 p_dev->instance = link;
174
175 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
176 axnet_config(link);
177
178 return 0;
179} /* axnet_attach */ 172} /* axnet_attach */
180 173
181/*====================================================================== 174/*======================================================================
@@ -187,18 +180,16 @@ static int axnet_attach(struct pcmcia_device *p_dev)
187 180
188======================================================================*/ 181======================================================================*/
189 182
190static void axnet_detach(struct pcmcia_device *p_dev) 183static void axnet_detach(struct pcmcia_device *link)
191{ 184{
192 dev_link_t *link = dev_to_instance(p_dev);
193 struct net_device *dev = link->priv; 185 struct net_device *dev = link->priv;
194 186
195 DEBUG(0, "axnet_detach(0x%p)\n", link); 187 DEBUG(0, "axnet_detach(0x%p)\n", link);
196 188
197 if (link->dev) 189 if (link->dev_node)
198 unregister_netdev(dev); 190 unregister_netdev(dev);
199 191
200 if (link->state & DEV_CONFIG) 192 axnet_release(link);
201 axnet_release(link);
202 193
203 free_netdev(dev); 194 free_netdev(dev);
204} /* axnet_detach */ 195} /* axnet_detach */
@@ -209,7 +200,7 @@ static void axnet_detach(struct pcmcia_device *p_dev)
209 200
210======================================================================*/ 201======================================================================*/
211 202
212static int get_prom(dev_link_t *link) 203static int get_prom(struct pcmcia_device *link)
213{ 204{
214 struct net_device *dev = link->priv; 205 struct net_device *dev = link->priv;
215 kio_addr_t ioaddr = dev->base_addr; 206 kio_addr_t ioaddr = dev->base_addr;
@@ -263,7 +254,7 @@ static int get_prom(dev_link_t *link)
263#define CS_CHECK(fn, ret) \ 254#define CS_CHECK(fn, ret) \
264do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 255do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
265 256
266static int try_io_port(dev_link_t *link) 257static int try_io_port(struct pcmcia_device *link)
267{ 258{
268 int j, ret; 259 int j, ret;
269 if (link->io.NumPorts1 == 32) { 260 if (link->io.NumPorts1 == 32) {
@@ -284,25 +275,23 @@ static int try_io_port(dev_link_t *link)
284 for (j = 0; j < 0x400; j += 0x20) { 275 for (j = 0; j < 0x400; j += 0x20) {
285 link->io.BasePort1 = j ^ 0x300; 276 link->io.BasePort1 = j ^ 0x300;
286 link->io.BasePort2 = (j ^ 0x300) + 0x10; 277 link->io.BasePort2 = (j ^ 0x300) + 0x10;
287 ret = pcmcia_request_io(link->handle, &link->io); 278 ret = pcmcia_request_io(link, &link->io);
288 if (ret == CS_SUCCESS) return ret; 279 if (ret == CS_SUCCESS) return ret;
289 } 280 }
290 return ret; 281 return ret;
291 } else { 282 } else {
292 return pcmcia_request_io(link->handle, &link->io); 283 return pcmcia_request_io(link, &link->io);
293 } 284 }
294} 285}
295 286
296static void axnet_config(dev_link_t *link) 287static int axnet_config(struct pcmcia_device *link)
297{ 288{
298 client_handle_t handle = link->handle;
299 struct net_device *dev = link->priv; 289 struct net_device *dev = link->priv;
300 axnet_dev_t *info = PRIV(dev); 290 axnet_dev_t *info = PRIV(dev);
301 tuple_t tuple; 291 tuple_t tuple;
302 cisparse_t parse; 292 cisparse_t parse;
303 int i, j, last_ret, last_fn; 293 int i, j, last_ret, last_fn;
304 u_short buf[64]; 294 u_short buf[64];
305 config_info_t conf;
306 295
307 DEBUG(0, "axnet_config(0x%p)\n", link); 296 DEBUG(0, "axnet_config(0x%p)\n", link);
308 297
@@ -311,29 +300,22 @@ static void axnet_config(dev_link_t *link)
311 tuple.TupleDataMax = sizeof(buf); 300 tuple.TupleDataMax = sizeof(buf);
312 tuple.TupleOffset = 0; 301 tuple.TupleOffset = 0;
313 tuple.DesiredTuple = CISTPL_CONFIG; 302 tuple.DesiredTuple = CISTPL_CONFIG;
314 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 303 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
315 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 304 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
316 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 305 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
317 link->conf.ConfigBase = parse.config.base; 306 link->conf.ConfigBase = parse.config.base;
318 /* don't trust the CIS on this; Linksys got it wrong */ 307 /* don't trust the CIS on this; Linksys got it wrong */
319 link->conf.Present = 0x63; 308 link->conf.Present = 0x63;
320 309
321 /* Configure card */
322 link->state |= DEV_CONFIG;
323
324 /* Look up current Vcc */
325 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
326 link->conf.Vcc = conf.Vcc;
327
328 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 310 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
329 tuple.Attributes = 0; 311 tuple.Attributes = 0;
330 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 312 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
331 while (last_ret == CS_SUCCESS) { 313 while (last_ret == CS_SUCCESS) {
332 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 314 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
333 cistpl_io_t *io = &(parse.cftable_entry.io); 315 cistpl_io_t *io = &(parse.cftable_entry.io);
334 316
335 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 317 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
336 pcmcia_parse_tuple(handle, &tuple, &parse) != 0 || 318 pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
337 cfg->index == 0 || cfg->io.nwin == 0) 319 cfg->index == 0 || cfg->io.nwin == 0)
338 goto next_entry; 320 goto next_entry;
339 321
@@ -355,21 +337,21 @@ static void axnet_config(dev_link_t *link)
355 if (last_ret == CS_SUCCESS) break; 337 if (last_ret == CS_SUCCESS) break;
356 } 338 }
357 next_entry: 339 next_entry:
358 last_ret = pcmcia_get_next_tuple(handle, &tuple); 340 last_ret = pcmcia_get_next_tuple(link, &tuple);
359 } 341 }
360 if (last_ret != CS_SUCCESS) { 342 if (last_ret != CS_SUCCESS) {
361 cs_error(handle, RequestIO, last_ret); 343 cs_error(link, RequestIO, last_ret);
362 goto failed; 344 goto failed;
363 } 345 }
364 346
365 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 347 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
366 348
367 if (link->io.NumPorts2 == 8) { 349 if (link->io.NumPorts2 == 8) {
368 link->conf.Attributes |= CONF_ENABLE_SPKR; 350 link->conf.Attributes |= CONF_ENABLE_SPKR;
369 link->conf.Status = CCSR_AUDIO_ENA; 351 link->conf.Status = CCSR_AUDIO_ENA;
370 } 352 }
371 353
372 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 354 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
373 dev->irq = link->irq.AssignedIRQ; 355 dev->irq = link->irq.AssignedIRQ;
374 dev->base_addr = link->io.BasePort1; 356 dev->base_addr = link->io.BasePort1;
375 357
@@ -406,7 +388,7 @@ static void axnet_config(dev_link_t *link)
406 Bit 2 of CCSR is active low. */ 388 Bit 2 of CCSR is active low. */
407 if (i == 32) { 389 if (i == 32) {
408 conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 }; 390 conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 };
409 pcmcia_access_configuration_register(link->handle, &reg); 391 pcmcia_access_configuration_register(link, &reg);
410 for (i = 0; i < 32; i++) { 392 for (i = 0; i < 32; i++) {
411 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 393 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
412 if ((j != 0) && (j != 0xffff)) break; 394 if ((j != 0) && (j != 0xffff)) break;
@@ -414,13 +396,12 @@ static void axnet_config(dev_link_t *link)
414 } 396 }
415 397
416 info->phy_id = (i < 32) ? i : -1; 398 info->phy_id = (i < 32) ? i : -1;
417 link->dev = &info->node; 399 link->dev_node = &info->node;
418 link->state &= ~DEV_CONFIG_PENDING; 400 SET_NETDEV_DEV(dev, &handle_to_dev(link));
419 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
420 401
421 if (register_netdev(dev) != 0) { 402 if (register_netdev(dev) != 0) {
422 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); 403 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
423 link->dev = NULL; 404 link->dev_node = NULL;
424 goto failed; 405 goto failed;
425 } 406 }
426 407
@@ -436,14 +417,13 @@ static void axnet_config(dev_link_t *link)
436 } else { 417 } else {
437 printk(KERN_NOTICE " No MII transceivers found!\n"); 418 printk(KERN_NOTICE " No MII transceivers found!\n");
438 } 419 }
439 return; 420 return 0;
440 421
441cs_failed: 422cs_failed:
442 cs_error(link->handle, last_fn, last_ret); 423 cs_error(link, last_fn, last_ret);
443failed: 424failed:
444 axnet_release(link); 425 axnet_release(link);
445 link->state &= ~DEV_CONFIG_PENDING; 426 return -ENODEV;
446 return;
447} /* axnet_config */ 427} /* axnet_config */
448 428
449/*====================================================================== 429/*======================================================================
@@ -454,45 +434,29 @@ failed:
454 434
455======================================================================*/ 435======================================================================*/
456 436
457static void axnet_release(dev_link_t *link) 437static void axnet_release(struct pcmcia_device *link)
458{ 438{
459 DEBUG(0, "axnet_release(0x%p)\n", link); 439 pcmcia_disable_device(link);
460
461 pcmcia_release_configuration(link->handle);
462 pcmcia_release_io(link->handle, &link->io);
463 pcmcia_release_irq(link->handle, &link->irq);
464
465 link->state &= ~DEV_CONFIG;
466} 440}
467 441
468static int axnet_suspend(struct pcmcia_device *p_dev) 442static int axnet_suspend(struct pcmcia_device *link)
469{ 443{
470 dev_link_t *link = dev_to_instance(p_dev);
471 struct net_device *dev = link->priv; 444 struct net_device *dev = link->priv;
472 445
473 link->state |= DEV_SUSPEND; 446 if (link->open)
474 if (link->state & DEV_CONFIG) { 447 netif_device_detach(dev);
475 if (link->open)
476 netif_device_detach(dev);
477 pcmcia_release_configuration(link->handle);
478 }
479 448
480 return 0; 449 return 0;
481} 450}
482 451
483static int axnet_resume(struct pcmcia_device *p_dev) 452static int axnet_resume(struct pcmcia_device *link)
484{ 453{
485 dev_link_t *link = dev_to_instance(p_dev);
486 struct net_device *dev = link->priv; 454 struct net_device *dev = link->priv;
487 455
488 link->state &= ~DEV_SUSPEND; 456 if (link->open) {
489 if (link->state & DEV_CONFIG) { 457 axnet_reset_8390(dev);
490 pcmcia_request_configuration(link->handle, &link->conf); 458 AX88190_init(dev, 1);
491 if (link->open) { 459 netif_device_attach(dev);
492 axnet_reset_8390(dev);
493 AX88190_init(dev, 1);
494 netif_device_attach(dev);
495 }
496 } 460 }
497 461
498 return 0; 462 return 0;
@@ -562,11 +526,11 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
562static int axnet_open(struct net_device *dev) 526static int axnet_open(struct net_device *dev)
563{ 527{
564 axnet_dev_t *info = PRIV(dev); 528 axnet_dev_t *info = PRIV(dev);
565 dev_link_t *link = &info->link; 529 struct pcmcia_device *link = info->p_dev;
566 530
567 DEBUG(2, "axnet_open('%s')\n", dev->name); 531 DEBUG(2, "axnet_open('%s')\n", dev->name);
568 532
569 if (!DEV_OK(link)) 533 if (!pcmcia_dev_present(link))
570 return -ENODEV; 534 return -ENODEV;
571 535
572 link->open++; 536 link->open++;
@@ -588,7 +552,7 @@ static int axnet_open(struct net_device *dev)
588static int axnet_close(struct net_device *dev) 552static int axnet_close(struct net_device *dev)
589{ 553{
590 axnet_dev_t *info = PRIV(dev); 554 axnet_dev_t *info = PRIV(dev);
591 dev_link_t *link = &info->link; 555 struct pcmcia_device *link = info->p_dev;
592 556
593 DEBUG(2, "axnet_close('%s')\n", dev->name); 557 DEBUG(2, "axnet_close('%s')\n", dev->name);
594 558
@@ -833,7 +797,7 @@ static struct pcmcia_driver axnet_cs_driver = {
833 .drv = { 797 .drv = {
834 .name = "axnet_cs", 798 .name = "axnet_cs",
835 }, 799 },
836 .probe = axnet_attach, 800 .probe = axnet_probe,
837 .remove = axnet_detach, 801 .remove = axnet_detach,
838 .id_table = axnet_ids, 802 .id_table = axnet_ids,
839 .suspend = axnet_suspend, 803 .suspend = axnet_suspend,
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 2827a48ea37c..441de824ab6b 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -118,8 +118,8 @@ MODULE_LICENSE("GPL");
118 118
119/*====================================================================*/ 119/*====================================================================*/
120 120
121static void com20020_config(dev_link_t *link); 121static int com20020_config(struct pcmcia_device *link);
122static void com20020_release(dev_link_t *link); 122static void com20020_release(struct pcmcia_device *link);
123 123
124static void com20020_detach(struct pcmcia_device *p_dev); 124static void com20020_detach(struct pcmcia_device *p_dev);
125 125
@@ -138,9 +138,8 @@ typedef struct com20020_dev_t {
138 138
139======================================================================*/ 139======================================================================*/
140 140
141static int com20020_attach(struct pcmcia_device *p_dev) 141static int com20020_probe(struct pcmcia_device *p_dev)
142{ 142{
143 dev_link_t *link;
144 com20020_dev_t *info; 143 com20020_dev_t *info;
145 struct net_device *dev; 144 struct net_device *dev;
146 struct arcnet_local *lp; 145 struct arcnet_local *lp;
@@ -148,10 +147,6 @@ static int com20020_attach(struct pcmcia_device *p_dev)
148 DEBUG(0, "com20020_attach()\n"); 147 DEBUG(0, "com20020_attach()\n");
149 148
150 /* Create new network device */ 149 /* Create new network device */
151 link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
152 if (!link)
153 return -ENOMEM;
154
155 info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); 150 info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
156 if (!info) 151 if (!info)
157 goto fail_alloc_info; 152 goto fail_alloc_info;
@@ -161,7 +156,6 @@ static int com20020_attach(struct pcmcia_device *p_dev)
161 goto fail_alloc_dev; 156 goto fail_alloc_dev;
162 157
163 memset(info, 0, sizeof(struct com20020_dev_t)); 158 memset(info, 0, sizeof(struct com20020_dev_t));
164 memset(link, 0, sizeof(struct dev_link_t));
165 lp = dev->priv; 159 lp = dev->priv;
166 lp->timeout = timeout; 160 lp->timeout = timeout;
167 lp->backplane = backplane; 161 lp->backplane = backplane;
@@ -172,28 +166,23 @@ static int com20020_attach(struct pcmcia_device *p_dev)
172 /* fill in our module parameters as defaults */ 166 /* fill in our module parameters as defaults */
173 dev->dev_addr[0] = node; 167 dev->dev_addr[0] = node;
174 168
175 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 169 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
176 link->io.NumPorts1 = 16; 170 p_dev->io.NumPorts1 = 16;
177 link->io.IOAddrLines = 16; 171 p_dev->io.IOAddrLines = 16;
178 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 172 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
179 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 173 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
180 link->conf.Attributes = CONF_ENABLE_IRQ; 174 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
181 link->conf.Vcc = 50; 175 p_dev->conf.IntType = INT_MEMORY_AND_IO;
182 link->conf.IntType = INT_MEMORY_AND_IO; 176 p_dev->conf.Present = PRESENT_OPTION;
183 link->conf.Present = PRESENT_OPTION;
184
185 link->irq.Instance = info->dev = dev;
186 link->priv = info;
187 177
188 link->state |= DEV_PRESENT; 178 p_dev->irq.Instance = info->dev = dev;
189 com20020_config(link); 179 p_dev->priv = info;
190 180
191 return 0; 181 return com20020_config(p_dev);
192 182
193fail_alloc_dev: 183fail_alloc_dev:
194 kfree(info); 184 kfree(info);
195fail_alloc_info: 185fail_alloc_info:
196 kfree(link);
197 return -ENOMEM; 186 return -ENOMEM;
198} /* com20020_attach */ 187} /* com20020_attach */
199 188
@@ -206,9 +195,8 @@ fail_alloc_info:
206 195
207======================================================================*/ 196======================================================================*/
208 197
209static void com20020_detach(struct pcmcia_device *p_dev) 198static void com20020_detach(struct pcmcia_device *link)
210{ 199{
211 dev_link_t *link = dev_to_instance(p_dev);
212 struct com20020_dev_t *info = link->priv; 200 struct com20020_dev_t *info = link->priv;
213 struct net_device *dev = info->dev; 201 struct net_device *dev = info->dev;
214 202
@@ -216,7 +204,7 @@ static void com20020_detach(struct pcmcia_device *p_dev)
216 204
217 DEBUG(0, "com20020_detach(0x%p)\n", link); 205 DEBUG(0, "com20020_detach(0x%p)\n", link);
218 206
219 if (link->dev) { 207 if (link->dev_node) {
220 DEBUG(1,"unregister...\n"); 208 DEBUG(1,"unregister...\n");
221 209
222 unregister_netdev(dev); 210 unregister_netdev(dev);
@@ -229,8 +217,7 @@ static void com20020_detach(struct pcmcia_device *p_dev)
229 free_irq(dev->irq, dev); 217 free_irq(dev->irq, dev);
230 } 218 }
231 219
232 if (link->state & DEV_CONFIG) 220 com20020_release(link);
233 com20020_release(link);
234 221
235 /* Unlink device structure, free bits */ 222 /* Unlink device structure, free bits */
236 DEBUG(1,"unlinking...\n"); 223 DEBUG(1,"unlinking...\n");
@@ -245,8 +232,6 @@ static void com20020_detach(struct pcmcia_device *p_dev)
245 DEBUG(1,"kfree2...\n"); 232 DEBUG(1,"kfree2...\n");
246 kfree(info); 233 kfree(info);
247 } 234 }
248 DEBUG(1,"kfree3...\n");
249 kfree(link);
250 235
251} /* com20020_detach */ 236} /* com20020_detach */
252 237
@@ -261,10 +246,9 @@ static void com20020_detach(struct pcmcia_device *p_dev)
261#define CS_CHECK(fn, ret) \ 246#define CS_CHECK(fn, ret) \
262do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 247do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
263 248
264static void com20020_config(dev_link_t *link) 249static int com20020_config(struct pcmcia_device *link)
265{ 250{
266 struct arcnet_local *lp; 251 struct arcnet_local *lp;
267 client_handle_t handle;
268 tuple_t tuple; 252 tuple_t tuple;
269 cisparse_t parse; 253 cisparse_t parse;
270 com20020_dev_t *info; 254 com20020_dev_t *info;
@@ -273,7 +257,6 @@ static void com20020_config(dev_link_t *link)
273 u_char buf[64]; 257 u_char buf[64];
274 int ioaddr; 258 int ioaddr;
275 259
276 handle = link->handle;
277 info = link->priv; 260 info = link->priv;
278 dev = info->dev; 261 dev = info->dev;
279 262
@@ -286,14 +269,11 @@ static void com20020_config(dev_link_t *link)
286 tuple.TupleDataMax = 64; 269 tuple.TupleDataMax = 64;
287 tuple.TupleOffset = 0; 270 tuple.TupleOffset = 0;
288 tuple.DesiredTuple = CISTPL_CONFIG; 271 tuple.DesiredTuple = CISTPL_CONFIG;
289 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 272 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
290 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 273 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
291 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 274 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
292 link->conf.ConfigBase = parse.config.base; 275 link->conf.ConfigBase = parse.config.base;
293 276
294 /* Configure card */
295 link->state |= DEV_CONFIG;
296
297 DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1); 277 DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1);
298 i = !CS_SUCCESS; 278 i = !CS_SUCCESS;
299 if (!link->io.BasePort1) 279 if (!link->io.BasePort1)
@@ -301,13 +281,13 @@ static void com20020_config(dev_link_t *link)
301 for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10) 281 for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10)
302 { 282 {
303 link->io.BasePort1 = ioaddr; 283 link->io.BasePort1 = ioaddr;
304 i = pcmcia_request_io(link->handle, &link->io); 284 i = pcmcia_request_io(link, &link->io);
305 if (i == CS_SUCCESS) 285 if (i == CS_SUCCESS)
306 break; 286 break;
307 } 287 }
308 } 288 }
309 else 289 else
310 i = pcmcia_request_io(link->handle, &link->io); 290 i = pcmcia_request_io(link, &link->io);
311 291
312 if (i != CS_SUCCESS) 292 if (i != CS_SUCCESS)
313 { 293 {
@@ -321,7 +301,7 @@ static void com20020_config(dev_link_t *link)
321 DEBUG(1,"arcnet: request IRQ %d (%Xh/%Xh)\n", 301 DEBUG(1,"arcnet: request IRQ %d (%Xh/%Xh)\n",
322 link->irq.AssignedIRQ, 302 link->irq.AssignedIRQ,
323 link->irq.IRQInfo1, link->irq.IRQInfo2); 303 link->irq.IRQInfo1, link->irq.IRQInfo2);
324 i = pcmcia_request_irq(link->handle, &link->irq); 304 i = pcmcia_request_irq(link, &link->irq);
325 if (i != CS_SUCCESS) 305 if (i != CS_SUCCESS)
326 { 306 {
327 DEBUG(1,"arcnet: requestIRQ failed totally!\n"); 307 DEBUG(1,"arcnet: requestIRQ failed totally!\n");
@@ -330,7 +310,7 @@ static void com20020_config(dev_link_t *link)
330 310
331 dev->irq = link->irq.AssignedIRQ; 311 dev->irq = link->irq.AssignedIRQ;
332 312
333 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 313 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
334 314
335 if (com20020_check(dev)) 315 if (com20020_check(dev))
336 { 316 {
@@ -342,15 +322,14 @@ static void com20020_config(dev_link_t *link)
342 lp->card_name = "PCMCIA COM20020"; 322 lp->card_name = "PCMCIA COM20020";
343 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ 323 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
344 324
345 link->dev = &info->node; 325 link->dev_node = &info->node;
346 link->state &= ~DEV_CONFIG_PENDING; 326 SET_NETDEV_DEV(dev, &handle_to_dev(link));
347 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
348 327
349 i = com20020_found(dev, 0); /* calls register_netdev */ 328 i = com20020_found(dev, 0); /* calls register_netdev */
350 329
351 if (i != 0) { 330 if (i != 0) {
352 DEBUG(1,KERN_NOTICE "com20020_cs: com20020_found() failed\n"); 331 DEBUG(1,KERN_NOTICE "com20020_cs: com20020_found() failed\n");
353 link->dev = NULL; 332 link->dev_node = NULL;
354 goto failed; 333 goto failed;
355 } 334 }
356 335
@@ -358,13 +337,14 @@ static void com20020_config(dev_link_t *link)
358 337
359 DEBUG(1,KERN_INFO "%s: port %#3lx, irq %d\n", 338 DEBUG(1,KERN_INFO "%s: port %#3lx, irq %d\n",
360 dev->name, dev->base_addr, dev->irq); 339 dev->name, dev->base_addr, dev->irq);
361 return; 340 return 0;
362 341
363cs_failed: 342cs_failed:
364 cs_error(link->handle, last_fn, last_ret); 343 cs_error(link, last_fn, last_ret);
365failed: 344failed:
366 DEBUG(1,"com20020_config failed...\n"); 345 DEBUG(1,"com20020_config failed...\n");
367 com20020_release(link); 346 com20020_release(link);
347 return -ENODEV;
368} /* com20020_config */ 348} /* com20020_config */
369 349
370/*====================================================================== 350/*======================================================================
@@ -375,52 +355,33 @@ failed:
375 355
376======================================================================*/ 356======================================================================*/
377 357
378static void com20020_release(dev_link_t *link) 358static void com20020_release(struct pcmcia_device *link)
379{ 359{
380 360 DEBUG(0, "com20020_release(0x%p)\n", link);
381 DEBUG(1,"release...\n"); 361 pcmcia_disable_device(link);
382
383 DEBUG(0, "com20020_release(0x%p)\n", link);
384
385 pcmcia_release_configuration(link->handle);
386 pcmcia_release_io(link->handle, &link->io);
387 pcmcia_release_irq(link->handle, &link->irq);
388
389 link->state &= ~(DEV_CONFIG | DEV_RELEASE_PENDING);
390} 362}
391 363
392static int com20020_suspend(struct pcmcia_device *p_dev) 364static int com20020_suspend(struct pcmcia_device *link)
393{ 365{
394 dev_link_t *link = dev_to_instance(p_dev);
395 com20020_dev_t *info = link->priv; 366 com20020_dev_t *info = link->priv;
396 struct net_device *dev = info->dev; 367 struct net_device *dev = info->dev;
397 368
398 link->state |= DEV_SUSPEND; 369 if (link->open)
399 if (link->state & DEV_CONFIG) { 370 netif_device_detach(dev);
400 if (link->open) {
401 netif_device_detach(dev);
402 }
403 pcmcia_release_configuration(link->handle);
404 }
405 371
406 return 0; 372 return 0;
407} 373}
408 374
409static int com20020_resume(struct pcmcia_device *p_dev) 375static int com20020_resume(struct pcmcia_device *link)
410{ 376{
411 dev_link_t *link = dev_to_instance(p_dev);
412 com20020_dev_t *info = link->priv; 377 com20020_dev_t *info = link->priv;
413 struct net_device *dev = info->dev; 378 struct net_device *dev = info->dev;
414 379
415 link->state &= ~DEV_SUSPEND; 380 if (link->open) {
416 if (link->state & DEV_CONFIG) { 381 int ioaddr = dev->base_addr;
417 pcmcia_request_configuration(link->handle, &link->conf); 382 struct arcnet_local *lp = dev->priv;
418 if (link->open) { 383 ARCRESET;
419 int ioaddr = dev->base_addr; 384 }
420 struct arcnet_local *lp = dev->priv;
421 ARCRESET;
422 }
423 }
424 385
425 return 0; 386 return 0;
426} 387}
@@ -436,7 +397,7 @@ static struct pcmcia_driver com20020_cs_driver = {
436 .drv = { 397 .drv = {
437 .name = "com20020_cs", 398 .name = "com20020_cs",
438 }, 399 },
439 .probe = com20020_attach, 400 .probe = com20020_probe,
440 .remove = com20020_detach, 401 .remove = com20020_detach,
441 .id_table = com20020_ids, 402 .id_table = com20020_ids,
442 .suspend = com20020_suspend, 403 .suspend = com20020_suspend,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b7ac14ba8877..09b11761cdfa 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -84,10 +84,10 @@ static char *version = DRV_NAME ".c " DRV_VERSION " 2002/03/23";
84/* 84/*
85 PCMCIA event handlers 85 PCMCIA event handlers
86 */ 86 */
87static void fmvj18x_config(dev_link_t *link); 87static int fmvj18x_config(struct pcmcia_device *link);
88static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id); 88static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id);
89static int fmvj18x_setup_mfc(dev_link_t *link); 89static int fmvj18x_setup_mfc(struct pcmcia_device *link);
90static void fmvj18x_release(dev_link_t *link); 90static void fmvj18x_release(struct pcmcia_device *link);
91static void fmvj18x_detach(struct pcmcia_device *p_dev); 91static void fmvj18x_detach(struct pcmcia_device *p_dev);
92 92
93/* 93/*
@@ -116,7 +116,7 @@ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
116 driver specific data structure 116 driver specific data structure
117*/ 117*/
118typedef struct local_info_t { 118typedef struct local_info_t {
119 dev_link_t link; 119 struct pcmcia_device *p_dev;
120 dev_node_t node; 120 dev_node_t node;
121 struct net_device_stats stats; 121 struct net_device_stats stats;
122 long open_time; 122 long open_time;
@@ -228,10 +228,9 @@ typedef struct local_info_t {
228#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */ 228#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */
229#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */ 229#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */
230 230
231static int fmvj18x_attach(struct pcmcia_device *p_dev) 231static int fmvj18x_probe(struct pcmcia_device *link)
232{ 232{
233 local_info_t *lp; 233 local_info_t *lp;
234 dev_link_t *link;
235 struct net_device *dev; 234 struct net_device *dev;
236 235
237 DEBUG(0, "fmvj18x_attach()\n"); 236 DEBUG(0, "fmvj18x_attach()\n");
@@ -241,8 +240,8 @@ static int fmvj18x_attach(struct pcmcia_device *p_dev)
241 if (!dev) 240 if (!dev)
242 return -ENOMEM; 241 return -ENOMEM;
243 lp = netdev_priv(dev); 242 lp = netdev_priv(dev);
244 link = &lp->link;
245 link->priv = dev; 243 link->priv = dev;
244 lp->p_dev = link;
246 245
247 /* The io structure describes IO port mapping */ 246 /* The io structure describes IO port mapping */
248 link->io.NumPorts1 = 32; 247 link->io.NumPorts1 = 32;
@@ -257,7 +256,6 @@ static int fmvj18x_attach(struct pcmcia_device *p_dev)
257 256
258 /* General socket configuration */ 257 /* General socket configuration */
259 link->conf.Attributes = CONF_ENABLE_IRQ; 258 link->conf.Attributes = CONF_ENABLE_IRQ;
260 link->conf.Vcc = 50;
261 link->conf.IntType = INT_MEMORY_AND_IO; 259 link->conf.IntType = INT_MEMORY_AND_IO;
262 260
263 /* The FMVJ18x specific entries in the device structure. */ 261 /* The FMVJ18x specific entries in the device structure. */
@@ -274,29 +272,21 @@ static int fmvj18x_attach(struct pcmcia_device *p_dev)
274#endif 272#endif
275 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 273 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
276 274
277 link->handle = p_dev; 275 return fmvj18x_config(link);
278 p_dev->instance = link;
279
280 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
281 fmvj18x_config(link);
282
283 return 0;
284} /* fmvj18x_attach */ 276} /* fmvj18x_attach */
285 277
286/*====================================================================*/ 278/*====================================================================*/
287 279
288static void fmvj18x_detach(struct pcmcia_device *p_dev) 280static void fmvj18x_detach(struct pcmcia_device *link)
289{ 281{
290 dev_link_t *link = dev_to_instance(p_dev);
291 struct net_device *dev = link->priv; 282 struct net_device *dev = link->priv;
292 283
293 DEBUG(0, "fmvj18x_detach(0x%p)\n", link); 284 DEBUG(0, "fmvj18x_detach(0x%p)\n", link);
294 285
295 if (link->dev) 286 if (link->dev_node)
296 unregister_netdev(dev); 287 unregister_netdev(dev);
297 288
298 if (link->state & DEV_CONFIG) 289 fmvj18x_release(link);
299 fmvj18x_release(link);
300 290
301 free_netdev(dev); 291 free_netdev(dev);
302} /* fmvj18x_detach */ 292} /* fmvj18x_detach */
@@ -306,7 +296,7 @@ static void fmvj18x_detach(struct pcmcia_device *p_dev)
306#define CS_CHECK(fn, ret) \ 296#define CS_CHECK(fn, ret) \
307do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 297do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
308 298
309static int mfc_try_io_port(dev_link_t *link) 299static int mfc_try_io_port(struct pcmcia_device *link)
310{ 300{
311 int i, ret; 301 int i, ret;
312 static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; 302 static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
@@ -318,13 +308,13 @@ static int mfc_try_io_port(dev_link_t *link)
318 link->io.NumPorts2 = 0; 308 link->io.NumPorts2 = 0;
319 printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n"); 309 printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n");
320 } 310 }
321 ret = pcmcia_request_io(link->handle, &link->io); 311 ret = pcmcia_request_io(link, &link->io);
322 if (ret == CS_SUCCESS) return ret; 312 if (ret == CS_SUCCESS) return ret;
323 } 313 }
324 return ret; 314 return ret;
325} 315}
326 316
327static int ungermann_try_io_port(dev_link_t *link) 317static int ungermann_try_io_port(struct pcmcia_device *link)
328{ 318{
329 int ret; 319 int ret;
330 kio_addr_t ioaddr; 320 kio_addr_t ioaddr;
@@ -334,7 +324,7 @@ static int ungermann_try_io_port(dev_link_t *link)
334 */ 324 */
335 for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) { 325 for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
336 link->io.BasePort1 = ioaddr; 326 link->io.BasePort1 = ioaddr;
337 ret = pcmcia_request_io(link->handle, &link->io); 327 ret = pcmcia_request_io(link, &link->io);
338 if (ret == CS_SUCCESS) { 328 if (ret == CS_SUCCESS) {
339 /* calculate ConfigIndex value */ 329 /* calculate ConfigIndex value */
340 link->conf.ConfigIndex = 330 link->conf.ConfigIndex =
@@ -345,9 +335,8 @@ static int ungermann_try_io_port(dev_link_t *link)
345 return ret; /* RequestIO failed */ 335 return ret; /* RequestIO failed */
346} 336}
347 337
348static void fmvj18x_config(dev_link_t *link) 338static int fmvj18x_config(struct pcmcia_device *link)
349{ 339{
350 client_handle_t handle = link->handle;
351 struct net_device *dev = link->priv; 340 struct net_device *dev = link->priv;
352 local_info_t *lp = netdev_priv(dev); 341 local_info_t *lp = netdev_priv(dev);
353 tuple_t tuple; 342 tuple_t tuple;
@@ -366,42 +355,34 @@ static void fmvj18x_config(dev_link_t *link)
366 registers. 355 registers.
367 */ 356 */
368 tuple.DesiredTuple = CISTPL_CONFIG; 357 tuple.DesiredTuple = CISTPL_CONFIG;
369 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 358 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
370 tuple.TupleData = (u_char *)buf; 359 tuple.TupleData = (u_char *)buf;
371 tuple.TupleDataMax = 64; 360 tuple.TupleDataMax = 64;
372 tuple.TupleOffset = 0; 361 tuple.TupleOffset = 0;
373 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 362 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
374 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 363 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
375
376 /* Configure card */
377 link->state |= DEV_CONFIG;
378 364
379 link->conf.ConfigBase = parse.config.base; 365 link->conf.ConfigBase = parse.config.base;
380 link->conf.Present = parse.config.rmask[0]; 366 link->conf.Present = parse.config.rmask[0];
381 367
382 tuple.DesiredTuple = CISTPL_FUNCE; 368 tuple.DesiredTuple = CISTPL_FUNCE;
383 tuple.TupleOffset = 0; 369 tuple.TupleOffset = 0;
384 if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) { 370 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
385 /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ 371 /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
386 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 372 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
387 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 373 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
388 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 374 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
389 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 375 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
390 link->conf.ConfigIndex = parse.cftable_entry.index; 376 link->conf.ConfigIndex = parse.cftable_entry.index;
391 tuple.DesiredTuple = CISTPL_MANFID; 377 tuple.DesiredTuple = CISTPL_MANFID;
392 if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) 378 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
393 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 379 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
394 else 380 else
395 buf[0] = 0xffff; 381 buf[0] = 0xffff;
396 switch (le16_to_cpu(buf[0])) { 382 switch (le16_to_cpu(buf[0])) {
397 case MANFID_TDK: 383 case MANFID_TDK:
398 cardtype = TDK; 384 cardtype = TDK;
399 if (le16_to_cpu(buf[1]) == PRODID_TDK_CF010) { 385 if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410
400 cs_status_t status;
401 pcmcia_get_status(handle, &status);
402 if (status.CardState & CS_EVENT_3VCARD)
403 link->conf.Vcc = 33; /* inserted in 3.3V slot */
404 } else if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410
405 || le16_to_cpu(buf[1]) == PRODID_TDK_NP9610 386 || le16_to_cpu(buf[1]) == PRODID_TDK_NP9610
406 || le16_to_cpu(buf[1]) == PRODID_TDK_MN3200) { 387 || le16_to_cpu(buf[1]) == PRODID_TDK_MN3200) {
407 /* MultiFunction Card */ 388 /* MultiFunction Card */
@@ -429,8 +410,8 @@ static void fmvj18x_config(dev_link_t *link)
429 } else { 410 } else {
430 /* old type card */ 411 /* old type card */
431 tuple.DesiredTuple = CISTPL_MANFID; 412 tuple.DesiredTuple = CISTPL_MANFID;
432 if (pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) 413 if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
433 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 414 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
434 else 415 else
435 buf[0] = 0xffff; 416 buf[0] = 0xffff;
436 switch (le16_to_cpu(buf[0])) { 417 switch (le16_to_cpu(buf[0])) {
@@ -461,10 +442,10 @@ static void fmvj18x_config(dev_link_t *link)
461 ret = ungermann_try_io_port(link); 442 ret = ungermann_try_io_port(link);
462 if (ret != CS_SUCCESS) goto cs_failed; 443 if (ret != CS_SUCCESS) goto cs_failed;
463 } else { 444 } else {
464 CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io)); 445 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
465 } 446 }
466 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 447 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
467 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 448 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
468 dev->irq = link->irq.AssignedIRQ; 449 dev->irq = link->irq.AssignedIRQ;
469 dev->base_addr = link->io.BasePort1; 450 dev->base_addr = link->io.BasePort1;
470 451
@@ -493,17 +474,17 @@ static void fmvj18x_config(dev_link_t *link)
493 case CONTEC: 474 case CONTEC:
494 tuple.DesiredTuple = CISTPL_FUNCE; 475 tuple.DesiredTuple = CISTPL_FUNCE;
495 tuple.TupleOffset = 0; 476 tuple.TupleOffset = 0;
496 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 477 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
497 tuple.TupleOffset = 0; 478 tuple.TupleOffset = 0;
498 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 479 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
499 if (cardtype == MBH10304) { 480 if (cardtype == MBH10304) {
500 /* MBH10304's CIS_FUNCE is corrupted */ 481 /* MBH10304's CIS_FUNCE is corrupted */
501 node_id = &(tuple.TupleData[5]); 482 node_id = &(tuple.TupleData[5]);
502 card_name = "FMV-J182"; 483 card_name = "FMV-J182";
503 } else { 484 } else {
504 while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) { 485 while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) {
505 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 486 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
506 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 487 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
507 } 488 }
508 node_id = &(tuple.TupleData[2]); 489 node_id = &(tuple.TupleData[2]);
509 if( cardtype == TDK ) { 490 if( cardtype == TDK ) {
@@ -545,13 +526,12 @@ static void fmvj18x_config(dev_link_t *link)
545 } 526 }
546 527
547 lp->cardtype = cardtype; 528 lp->cardtype = cardtype;
548 link->dev = &lp->node; 529 link->dev_node = &lp->node;
549 link->state &= ~DEV_CONFIG_PENDING; 530 SET_NETDEV_DEV(dev, &handle_to_dev(link));
550 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
551 531
552 if (register_netdev(dev) != 0) { 532 if (register_netdev(dev) != 0) {
553 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); 533 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
554 link->dev = NULL; 534 link->dev_node = NULL;
555 goto failed; 535 goto failed;
556 } 536 }
557 537
@@ -564,19 +544,18 @@ static void fmvj18x_config(dev_link_t *link)
564 for (i = 0; i < 6; i++) 544 for (i = 0; i < 6; i++)
565 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); 545 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
566 546
567 return; 547 return 0;
568 548
569cs_failed: 549cs_failed:
570 /* All Card Services errors end up here */ 550 /* All Card Services errors end up here */
571 cs_error(link->handle, last_fn, last_ret); 551 cs_error(link, last_fn, last_ret);
572failed: 552failed:
573 fmvj18x_release(link); 553 fmvj18x_release(link);
574 link->state &= ~DEV_CONFIG_PENDING; 554 return -ENODEV;
575
576} /* fmvj18x_config */ 555} /* fmvj18x_config */
577/*====================================================================*/ 556/*====================================================================*/
578 557
579static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id) 558static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
580{ 559{
581 win_req_t req; 560 win_req_t req;
582 memreq_t mem; 561 memreq_t mem;
@@ -587,9 +566,9 @@ static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
587 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 566 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
588 req.Base = 0; req.Size = 0; 567 req.Base = 0; req.Size = 0;
589 req.AccessSpeed = 0; 568 req.AccessSpeed = 0;
590 i = pcmcia_request_window(&link->handle, &req, &link->win); 569 i = pcmcia_request_window(&link, &req, &link->win);
591 if (i != CS_SUCCESS) { 570 if (i != CS_SUCCESS) {
592 cs_error(link->handle, RequestWindow, i); 571 cs_error(link, RequestWindow, i);
593 return -1; 572 return -1;
594 } 573 }
595 574
@@ -623,13 +602,13 @@ static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
623 iounmap(base); 602 iounmap(base);
624 j = pcmcia_release_window(link->win); 603 j = pcmcia_release_window(link->win);
625 if (j != CS_SUCCESS) 604 if (j != CS_SUCCESS)
626 cs_error(link->handle, ReleaseWindow, j); 605 cs_error(link, ReleaseWindow, j);
627 return (i != 0x200) ? 0 : -1; 606 return (i != 0x200) ? 0 : -1;
628 607
629} /* fmvj18x_get_hwinfo */ 608} /* fmvj18x_get_hwinfo */
630/*====================================================================*/ 609/*====================================================================*/
631 610
632static int fmvj18x_setup_mfc(dev_link_t *link) 611static int fmvj18x_setup_mfc(struct pcmcia_device *link)
633{ 612{
634 win_req_t req; 613 win_req_t req;
635 memreq_t mem; 614 memreq_t mem;
@@ -642,9 +621,9 @@ static int fmvj18x_setup_mfc(dev_link_t *link)
642 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 621 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
643 req.Base = 0; req.Size = 0; 622 req.Base = 0; req.Size = 0;
644 req.AccessSpeed = 0; 623 req.AccessSpeed = 0;
645 i = pcmcia_request_window(&link->handle, &req, &link->win); 624 i = pcmcia_request_window(&link, &req, &link->win);
646 if (i != CS_SUCCESS) { 625 if (i != CS_SUCCESS) {
647 cs_error(link->handle, RequestWindow, i); 626 cs_error(link, RequestWindow, i);
648 return -1; 627 return -1;
649 } 628 }
650 629
@@ -666,54 +645,35 @@ static int fmvj18x_setup_mfc(dev_link_t *link)
666 iounmap(base); 645 iounmap(base);
667 j = pcmcia_release_window(link->win); 646 j = pcmcia_release_window(link->win);
668 if (j != CS_SUCCESS) 647 if (j != CS_SUCCESS)
669 cs_error(link->handle, ReleaseWindow, j); 648 cs_error(link, ReleaseWindow, j);
670 return 0; 649 return 0;
671 650
672} 651}
673/*====================================================================*/ 652/*====================================================================*/
674 653
675static void fmvj18x_release(dev_link_t *link) 654static void fmvj18x_release(struct pcmcia_device *link)
676{ 655{
677 656 DEBUG(0, "fmvj18x_release(0x%p)\n", link);
678 DEBUG(0, "fmvj18x_release(0x%p)\n", link); 657 pcmcia_disable_device(link);
679
680 /* Don't bother checking to see if these succeed or not */
681 pcmcia_release_window(link->win);
682 pcmcia_release_configuration(link->handle);
683 pcmcia_release_io(link->handle, &link->io);
684 pcmcia_release_irq(link->handle, &link->irq);
685
686 link->state &= ~DEV_CONFIG;
687} 658}
688 659
689static int fmvj18x_suspend(struct pcmcia_device *p_dev) 660static int fmvj18x_suspend(struct pcmcia_device *link)
690{ 661{
691 dev_link_t *link = dev_to_instance(p_dev);
692 struct net_device *dev = link->priv; 662 struct net_device *dev = link->priv;
693 663
694 link->state |= DEV_SUSPEND; 664 if (link->open)
695 if (link->state & DEV_CONFIG) { 665 netif_device_detach(dev);
696 if (link->open)
697 netif_device_detach(dev);
698 pcmcia_release_configuration(link->handle);
699 }
700
701 666
702 return 0; 667 return 0;
703} 668}
704 669
705static int fmvj18x_resume(struct pcmcia_device *p_dev) 670static int fmvj18x_resume(struct pcmcia_device *link)
706{ 671{
707 dev_link_t *link = dev_to_instance(p_dev);
708 struct net_device *dev = link->priv; 672 struct net_device *dev = link->priv;
709 673
710 link->state &= ~DEV_SUSPEND; 674 if (link->open) {
711 if (link->state & DEV_CONFIG) { 675 fjn_reset(dev);
712 pcmcia_request_configuration(link->handle, &link->conf); 676 netif_device_attach(dev);
713 if (link->open) {
714 fjn_reset(dev);
715 netif_device_attach(dev);
716 }
717 } 677 }
718 678
719 return 0; 679 return 0;
@@ -751,7 +711,7 @@ static struct pcmcia_driver fmvj18x_cs_driver = {
751 .drv = { 711 .drv = {
752 .name = "fmvj18x_cs", 712 .name = "fmvj18x_cs",
753 }, 713 },
754 .probe = fmvj18x_attach, 714 .probe = fmvj18x_probe,
755 .remove = fmvj18x_detach, 715 .remove = fmvj18x_detach,
756 .id_table = fmvj18x_ids, 716 .id_table = fmvj18x_ids,
757 .suspend = fmvj18x_suspend, 717 .suspend = fmvj18x_suspend,
@@ -1148,11 +1108,11 @@ static int fjn_config(struct net_device *dev, struct ifmap *map){
1148static int fjn_open(struct net_device *dev) 1108static int fjn_open(struct net_device *dev)
1149{ 1109{
1150 struct local_info_t *lp = netdev_priv(dev); 1110 struct local_info_t *lp = netdev_priv(dev);
1151 dev_link_t *link = &lp->link; 1111 struct pcmcia_device *link = lp->p_dev;
1152 1112
1153 DEBUG(4, "fjn_open('%s').\n", dev->name); 1113 DEBUG(4, "fjn_open('%s').\n", dev->name);
1154 1114
1155 if (!DEV_OK(link)) 1115 if (!pcmcia_dev_present(link))
1156 return -ENODEV; 1116 return -ENODEV;
1157 1117
1158 link->open++; 1118 link->open++;
@@ -1173,7 +1133,7 @@ static int fjn_open(struct net_device *dev)
1173static int fjn_close(struct net_device *dev) 1133static int fjn_close(struct net_device *dev)
1174{ 1134{
1175 struct local_info_t *lp = netdev_priv(dev); 1135 struct local_info_t *lp = netdev_priv(dev);
1176 dev_link_t *link = &lp->link; 1136 struct pcmcia_device *link = lp->p_dev;
1177 kio_addr_t ioaddr = dev->base_addr; 1137 kio_addr_t ioaddr = dev->base_addr;
1178 1138
1179 DEBUG(4, "fjn_close('%s').\n", dev->name); 1139 DEBUG(4, "fjn_close('%s').\n", dev->name);
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index b9c7e39576f5..b8fe70b85641 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -105,15 +105,15 @@ MODULE_LICENSE("GPL");
105 105
106/*====================================================================*/ 106/*====================================================================*/
107 107
108static void ibmtr_config(dev_link_t *link); 108static int ibmtr_config(struct pcmcia_device *link);
109static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase); 109static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase);
110static void ibmtr_release(dev_link_t *link); 110static void ibmtr_release(struct pcmcia_device *link);
111static void ibmtr_detach(struct pcmcia_device *p_dev); 111static void ibmtr_detach(struct pcmcia_device *p_dev);
112 112
113/*====================================================================*/ 113/*====================================================================*/
114 114
115typedef struct ibmtr_dev_t { 115typedef struct ibmtr_dev_t {
116 dev_link_t link; 116 struct pcmcia_device *p_dev;
117 struct net_device *dev; 117 struct net_device *dev;
118 dev_node_t node; 118 dev_node_t node;
119 window_handle_t sram_win_handle; 119 window_handle_t sram_win_handle;
@@ -138,12 +138,11 @@ static struct ethtool_ops netdev_ethtool_ops = {
138 138
139======================================================================*/ 139======================================================================*/
140 140
141static int ibmtr_attach(struct pcmcia_device *p_dev) 141static int ibmtr_attach(struct pcmcia_device *link)
142{ 142{
143 ibmtr_dev_t *info; 143 ibmtr_dev_t *info;
144 dev_link_t *link;
145 struct net_device *dev; 144 struct net_device *dev;
146 145
147 DEBUG(0, "ibmtr_attach()\n"); 146 DEBUG(0, "ibmtr_attach()\n");
148 147
149 /* Create new token-ring device */ 148 /* Create new token-ring device */
@@ -156,7 +155,7 @@ static int ibmtr_attach(struct pcmcia_device *p_dev)
156 return -ENOMEM; 155 return -ENOMEM;
157 } 156 }
158 157
159 link = &info->link; 158 info->p_dev = link;
160 link->priv = info; 159 link->priv = info;
161 info->ti = netdev_priv(dev); 160 info->ti = netdev_priv(dev);
162 161
@@ -167,21 +166,14 @@ static int ibmtr_attach(struct pcmcia_device *p_dev)
167 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 166 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
168 link->irq.Handler = &tok_interrupt; 167 link->irq.Handler = &tok_interrupt;
169 link->conf.Attributes = CONF_ENABLE_IRQ; 168 link->conf.Attributes = CONF_ENABLE_IRQ;
170 link->conf.Vcc = 50;
171 link->conf.IntType = INT_MEMORY_AND_IO; 169 link->conf.IntType = INT_MEMORY_AND_IO;
172 link->conf.Present = PRESENT_OPTION; 170 link->conf.Present = PRESENT_OPTION;
173 171
174 link->irq.Instance = info->dev = dev; 172 link->irq.Instance = info->dev = dev;
175
176 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
177
178 link->handle = p_dev;
179 p_dev->instance = link;
180 173
181 link->state |= DEV_PRESENT; 174 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
182 ibmtr_config(link);
183 175
184 return 0; 176 return ibmtr_config(link);
185} /* ibmtr_attach */ 177} /* ibmtr_attach */
186 178
187/*====================================================================== 179/*======================================================================
@@ -193,23 +185,22 @@ static int ibmtr_attach(struct pcmcia_device *p_dev)
193 185
194======================================================================*/ 186======================================================================*/
195 187
196static void ibmtr_detach(struct pcmcia_device *p_dev) 188static void ibmtr_detach(struct pcmcia_device *link)
197{ 189{
198 dev_link_t *link = dev_to_instance(p_dev);
199 struct ibmtr_dev_t *info = link->priv; 190 struct ibmtr_dev_t *info = link->priv;
200 struct net_device *dev = info->dev; 191 struct net_device *dev = info->dev;
201 192
202 DEBUG(0, "ibmtr_detach(0x%p)\n", link); 193 DEBUG(0, "ibmtr_detach(0x%p)\n", link);
203 194
204 if (link->dev) 195 if (link->dev_node)
205 unregister_netdev(dev); 196 unregister_netdev(dev);
206 197
207 { 198 {
208 struct tok_info *ti = netdev_priv(dev); 199 struct tok_info *ti = netdev_priv(dev);
209 del_timer_sync(&(ti->tr_timer)); 200 del_timer_sync(&(ti->tr_timer));
210 } 201 }
211 if (link->state & DEV_CONFIG) 202
212 ibmtr_release(link); 203 ibmtr_release(link);
213 204
214 free_netdev(dev); 205 free_netdev(dev);
215 kfree(info); 206 kfree(info);
@@ -226,9 +217,8 @@ static void ibmtr_detach(struct pcmcia_device *p_dev)
226#define CS_CHECK(fn, ret) \ 217#define CS_CHECK(fn, ret) \
227do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 218do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
228 219
229static void ibmtr_config(dev_link_t *link) 220static int ibmtr_config(struct pcmcia_device *link)
230{ 221{
231 client_handle_t handle = link->handle;
232 ibmtr_dev_t *info = link->priv; 222 ibmtr_dev_t *info = link->priv;
233 struct net_device *dev = info->dev; 223 struct net_device *dev = info->dev;
234 struct tok_info *ti = netdev_priv(dev); 224 struct tok_info *ti = netdev_priv(dev);
@@ -246,29 +236,25 @@ static void ibmtr_config(dev_link_t *link)
246 tuple.TupleDataMax = 64; 236 tuple.TupleDataMax = 64;
247 tuple.TupleOffset = 0; 237 tuple.TupleOffset = 0;
248 tuple.DesiredTuple = CISTPL_CONFIG; 238 tuple.DesiredTuple = CISTPL_CONFIG;
249 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 239 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
250 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 240 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
251 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 241 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
252 link->conf.ConfigBase = parse.config.base; 242 link->conf.ConfigBase = parse.config.base;
253
254 /* Configure card */
255 link->state |= DEV_CONFIG;
256
257 link->conf.ConfigIndex = 0x61; 243 link->conf.ConfigIndex = 0x61;
258 244
259 /* Determine if this is PRIMARY or ALTERNATE. */ 245 /* Determine if this is PRIMARY or ALTERNATE. */
260 246
261 /* Try PRIMARY card at 0xA20-0xA23 */ 247 /* Try PRIMARY card at 0xA20-0xA23 */
262 link->io.BasePort1 = 0xA20; 248 link->io.BasePort1 = 0xA20;
263 i = pcmcia_request_io(link->handle, &link->io); 249 i = pcmcia_request_io(link, &link->io);
264 if (i != CS_SUCCESS) { 250 if (i != CS_SUCCESS) {
265 /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */ 251 /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
266 link->io.BasePort1 = 0xA24; 252 link->io.BasePort1 = 0xA24;
267 CS_CHECK(RequestIO, pcmcia_request_io(link->handle, &link->io)); 253 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
268 } 254 }
269 dev->base_addr = link->io.BasePort1; 255 dev->base_addr = link->io.BasePort1;
270 256
271 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 257 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
272 dev->irq = link->irq.AssignedIRQ; 258 dev->irq = link->irq.AssignedIRQ;
273 ti->irq = link->irq.AssignedIRQ; 259 ti->irq = link->irq.AssignedIRQ;
274 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); 260 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
@@ -279,7 +265,7 @@ static void ibmtr_config(dev_link_t *link)
279 req.Base = 0; 265 req.Base = 0;
280 req.Size = 0x2000; 266 req.Size = 0x2000;
281 req.AccessSpeed = 250; 267 req.AccessSpeed = 250;
282 CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win)); 268 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
283 269
284 mem.CardOffset = mmiobase; 270 mem.CardOffset = mmiobase;
285 mem.Page = 0; 271 mem.Page = 0;
@@ -292,7 +278,7 @@ static void ibmtr_config(dev_link_t *link)
292 req.Base = 0; 278 req.Base = 0;
293 req.Size = sramsize * 1024; 279 req.Size = sramsize * 1024;
294 req.AccessSpeed = 250; 280 req.AccessSpeed = 250;
295 CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &info->sram_win_handle)); 281 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &info->sram_win_handle));
296 282
297 mem.CardOffset = srambase; 283 mem.CardOffset = srambase;
298 mem.Page = 0; 284 mem.Page = 0;
@@ -302,21 +288,20 @@ static void ibmtr_config(dev_link_t *link)
302 ti->sram_virt = ioremap(req.Base, req.Size); 288 ti->sram_virt = ioremap(req.Base, req.Size);
303 ti->sram_phys = req.Base; 289 ti->sram_phys = req.Base;
304 290
305 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 291 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
306 292
307 /* Set up the Token-Ring Controller Configuration Register and 293 /* Set up the Token-Ring Controller Configuration Register and
308 turn on the card. Check the "Local Area Network Credit Card 294 turn on the card. Check the "Local Area Network Credit Card
309 Adapters Technical Reference" SC30-3585 for this info. */ 295 Adapters Technical Reference" SC30-3585 for this info. */
310 ibmtr_hw_setup(dev, mmiobase); 296 ibmtr_hw_setup(dev, mmiobase);
311 297
312 link->dev = &info->node; 298 link->dev_node = &info->node;
313 link->state &= ~DEV_CONFIG_PENDING; 299 SET_NETDEV_DEV(dev, &handle_to_dev(link));
314 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
315 300
316 i = ibmtr_probe_card(dev); 301 i = ibmtr_probe_card(dev);
317 if (i != 0) { 302 if (i != 0) {
318 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n"); 303 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
319 link->dev = NULL; 304 link->dev_node = NULL;
320 goto failed; 305 goto failed;
321 } 306 }
322 307
@@ -330,12 +315,13 @@ static void ibmtr_config(dev_link_t *link)
330 for (i = 0; i < TR_ALEN; i++) 315 for (i = 0; i < TR_ALEN; i++)
331 printk("%02X", dev->dev_addr[i]); 316 printk("%02X", dev->dev_addr[i]);
332 printk("\n"); 317 printk("\n");
333 return; 318 return 0;
334 319
335cs_failed: 320cs_failed:
336 cs_error(link->handle, last_fn, last_ret); 321 cs_error(link, last_fn, last_ret);
337failed: 322failed:
338 ibmtr_release(link); 323 ibmtr_release(link);
324 return -ENODEV;
339} /* ibmtr_config */ 325} /* ibmtr_config */
340 326
341/*====================================================================== 327/*======================================================================
@@ -346,56 +332,41 @@ failed:
346 332
347======================================================================*/ 333======================================================================*/
348 334
349static void ibmtr_release(dev_link_t *link) 335static void ibmtr_release(struct pcmcia_device *link)
350{ 336{
351 ibmtr_dev_t *info = link->priv; 337 ibmtr_dev_t *info = link->priv;
352 struct net_device *dev = info->dev; 338 struct net_device *dev = info->dev;
353
354 DEBUG(0, "ibmtr_release(0x%p)\n", link);
355 339
356 pcmcia_release_configuration(link->handle); 340 DEBUG(0, "ibmtr_release(0x%p)\n", link);
357 pcmcia_release_io(link->handle, &link->io);
358 pcmcia_release_irq(link->handle, &link->irq);
359 if (link->win) {
360 struct tok_info *ti = netdev_priv(dev);
361 iounmap(ti->mmio);
362 pcmcia_release_window(link->win);
363 pcmcia_release_window(info->sram_win_handle);
364 }
365 341
366 link->state &= ~DEV_CONFIG; 342 if (link->win) {
343 struct tok_info *ti = netdev_priv(dev);
344 iounmap(ti->mmio);
345 pcmcia_release_window(info->sram_win_handle);
346 }
347 pcmcia_disable_device(link);
367} 348}
368 349
369static int ibmtr_suspend(struct pcmcia_device *p_dev) 350static int ibmtr_suspend(struct pcmcia_device *link)
370{ 351{
371 dev_link_t *link = dev_to_instance(p_dev);
372 ibmtr_dev_t *info = link->priv; 352 ibmtr_dev_t *info = link->priv;
373 struct net_device *dev = info->dev; 353 struct net_device *dev = info->dev;
374 354
375 link->state |= DEV_SUSPEND; 355 if (link->open)
376 if (link->state & DEV_CONFIG) { 356 netif_device_detach(dev);
377 if (link->open)
378 netif_device_detach(dev);
379 pcmcia_release_configuration(link->handle);
380 }
381 357
382 return 0; 358 return 0;
383} 359}
384 360
385static int ibmtr_resume(struct pcmcia_device *p_dev) 361static int ibmtr_resume(struct pcmcia_device *link)
386{ 362{
387 dev_link_t *link = dev_to_instance(p_dev);
388 ibmtr_dev_t *info = link->priv; 363 ibmtr_dev_t *info = link->priv;
389 struct net_device *dev = info->dev; 364 struct net_device *dev = info->dev;
390 365
391 link->state &= ~DEV_SUSPEND; 366 if (link->open) {
392 if (link->state & DEV_CONFIG) { 367 ibmtr_probe(dev); /* really? */
393 pcmcia_request_configuration(link->handle, &link->conf); 368 netif_device_attach(dev);
394 if (link->open) { 369 }
395 ibmtr_probe(dev); /* really? */
396 netif_device_attach(dev);
397 }
398 }
399 370
400 return 0; 371 return 0;
401} 372}
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 787176c57fd9..4260c2128f47 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -362,7 +362,7 @@ typedef struct _mace_statistics {
362} mace_statistics; 362} mace_statistics;
363 363
364typedef struct _mace_private { 364typedef struct _mace_private {
365 dev_link_t link; 365 struct pcmcia_device *p_dev;
366 dev_node_t node; 366 dev_node_t node;
367 struct net_device_stats linux_stats; /* Linux statistics counters */ 367 struct net_device_stats linux_stats; /* Linux statistics counters */
368 mace_statistics mace_stats; /* MACE chip statistics counters */ 368 mace_statistics mace_stats; /* MACE chip statistics counters */
@@ -417,8 +417,8 @@ INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
417Function Prototypes 417Function Prototypes
418---------------------------------------------------------------------------- */ 418---------------------------------------------------------------------------- */
419 419
420static void nmclan_config(dev_link_t *link); 420static int nmclan_config(struct pcmcia_device *link);
421static void nmclan_release(dev_link_t *link); 421static void nmclan_release(struct pcmcia_device *link);
422 422
423static void nmclan_reset(struct net_device *dev); 423static void nmclan_reset(struct net_device *dev);
424static int mace_config(struct net_device *dev, struct ifmap *map); 424static int mace_config(struct net_device *dev, struct ifmap *map);
@@ -443,10 +443,9 @@ nmclan_attach
443 Services. 443 Services.
444---------------------------------------------------------------------------- */ 444---------------------------------------------------------------------------- */
445 445
446static int nmclan_attach(struct pcmcia_device *p_dev) 446static int nmclan_probe(struct pcmcia_device *link)
447{ 447{
448 mace_private *lp; 448 mace_private *lp;
449 dev_link_t *link;
450 struct net_device *dev; 449 struct net_device *dev;
451 450
452 DEBUG(0, "nmclan_attach()\n"); 451 DEBUG(0, "nmclan_attach()\n");
@@ -457,7 +456,7 @@ static int nmclan_attach(struct pcmcia_device *p_dev)
457 if (!dev) 456 if (!dev)
458 return -ENOMEM; 457 return -ENOMEM;
459 lp = netdev_priv(dev); 458 lp = netdev_priv(dev);
460 link = &lp->link; 459 lp->p_dev = link;
461 link->priv = dev; 460 link->priv = dev;
462 461
463 spin_lock_init(&lp->bank_lock); 462 spin_lock_init(&lp->bank_lock);
@@ -469,7 +468,6 @@ static int nmclan_attach(struct pcmcia_device *p_dev)
469 link->irq.Handler = &mace_interrupt; 468 link->irq.Handler = &mace_interrupt;
470 link->irq.Instance = dev; 469 link->irq.Instance = dev;
471 link->conf.Attributes = CONF_ENABLE_IRQ; 470 link->conf.Attributes = CONF_ENABLE_IRQ;
472 link->conf.Vcc = 50;
473 link->conf.IntType = INT_MEMORY_AND_IO; 471 link->conf.IntType = INT_MEMORY_AND_IO;
474 link->conf.ConfigIndex = 1; 472 link->conf.ConfigIndex = 1;
475 link->conf.Present = PRESENT_OPTION; 473 link->conf.Present = PRESENT_OPTION;
@@ -489,13 +487,7 @@ static int nmclan_attach(struct pcmcia_device *p_dev)
489 dev->watchdog_timeo = TX_TIMEOUT; 487 dev->watchdog_timeo = TX_TIMEOUT;
490#endif 488#endif
491 489
492 link->handle = p_dev; 490 return nmclan_config(link);
493 p_dev->instance = link;
494
495 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
496 nmclan_config(link);
497
498 return 0;
499} /* nmclan_attach */ 491} /* nmclan_attach */
500 492
501/* ---------------------------------------------------------------------------- 493/* ----------------------------------------------------------------------------
@@ -506,18 +498,16 @@ nmclan_detach
506 when the device is released. 498 when the device is released.
507---------------------------------------------------------------------------- */ 499---------------------------------------------------------------------------- */
508 500
509static void nmclan_detach(struct pcmcia_device *p_dev) 501static void nmclan_detach(struct pcmcia_device *link)
510{ 502{
511 dev_link_t *link = dev_to_instance(p_dev);
512 struct net_device *dev = link->priv; 503 struct net_device *dev = link->priv;
513 504
514 DEBUG(0, "nmclan_detach(0x%p)\n", link); 505 DEBUG(0, "nmclan_detach(0x%p)\n", link);
515 506
516 if (link->dev) 507 if (link->dev_node)
517 unregister_netdev(dev); 508 unregister_netdev(dev);
518 509
519 if (link->state & DEV_CONFIG) 510 nmclan_release(link);
520 nmclan_release(link);
521 511
522 free_netdev(dev); 512 free_netdev(dev);
523} /* nmclan_detach */ 513} /* nmclan_detach */
@@ -661,9 +651,8 @@ nmclan_config
661#define CS_CHECK(fn, ret) \ 651#define CS_CHECK(fn, ret) \
662 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 652 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
663 653
664static void nmclan_config(dev_link_t *link) 654static int nmclan_config(struct pcmcia_device *link)
665{ 655{
666 client_handle_t handle = link->handle;
667 struct net_device *dev = link->priv; 656 struct net_device *dev = link->priv;
668 mace_private *lp = netdev_priv(dev); 657 mace_private *lp = netdev_priv(dev);
669 tuple_t tuple; 658 tuple_t tuple;
@@ -679,17 +668,14 @@ static void nmclan_config(dev_link_t *link)
679 tuple.TupleDataMax = 64; 668 tuple.TupleDataMax = 64;
680 tuple.TupleOffset = 0; 669 tuple.TupleOffset = 0;
681 tuple.DesiredTuple = CISTPL_CONFIG; 670 tuple.DesiredTuple = CISTPL_CONFIG;
682 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 671 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
683 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 672 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
684 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 673 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
685 link->conf.ConfigBase = parse.config.base; 674 link->conf.ConfigBase = parse.config.base;
686 675
687 /* Configure card */ 676 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
688 link->state |= DEV_CONFIG; 677 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
689 678 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
690 CS_CHECK(RequestIO, pcmcia_request_io(handle, &link->io));
691 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
692 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
693 dev->irq = link->irq.AssignedIRQ; 679 dev->irq = link->irq.AssignedIRQ;
694 dev->base_addr = link->io.BasePort1; 680 dev->base_addr = link->io.BasePort1;
695 681
@@ -700,8 +686,8 @@ static void nmclan_config(dev_link_t *link)
700 tuple.TupleData = buf; 686 tuple.TupleData = buf;
701 tuple.TupleDataMax = 64; 687 tuple.TupleDataMax = 64;
702 tuple.TupleOffset = 0; 688 tuple.TupleOffset = 0;
703 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 689 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
704 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 690 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
705 memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN); 691 memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
706 692
707 /* Verify configuration by reading the MACE ID. */ 693 /* Verify configuration by reading the MACE ID. */
@@ -716,8 +702,7 @@ static void nmclan_config(dev_link_t *link)
716 } else { 702 } else {
717 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should" 703 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
718 " be 0x40 0x?9\n", sig[0], sig[1]); 704 " be 0x40 0x?9\n", sig[0], sig[1]);
719 link->state &= ~DEV_CONFIG_PENDING; 705 return -ENODEV;
720 return;
721 } 706 }
722 } 707 }
723 708
@@ -730,14 +715,13 @@ static void nmclan_config(dev_link_t *link)
730 else 715 else
731 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); 716 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
732 717
733 link->dev = &lp->node; 718 link->dev_node = &lp->node;
734 link->state &= ~DEV_CONFIG_PENDING; 719 SET_NETDEV_DEV(dev, &handle_to_dev(link));
735 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
736 720
737 i = register_netdev(dev); 721 i = register_netdev(dev);
738 if (i != 0) { 722 if (i != 0) {
739 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n"); 723 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
740 link->dev = NULL; 724 link->dev_node = NULL;
741 goto failed; 725 goto failed;
742 } 726 }
743 727
@@ -747,14 +731,13 @@ static void nmclan_config(dev_link_t *link)
747 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]); 731 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]);
748 for (i = 0; i < 6; i++) 732 for (i = 0; i < 6; i++)
749 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); 733 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
750 return; 734 return 0;
751 735
752cs_failed: 736cs_failed:
753 cs_error(link->handle, last_fn, last_ret); 737 cs_error(link, last_fn, last_ret);
754failed: 738failed:
755 nmclan_release(link); 739 nmclan_release(link);
756 return; 740 return -ENODEV;
757
758} /* nmclan_config */ 741} /* nmclan_config */
759 742
760/* ---------------------------------------------------------------------------- 743/* ----------------------------------------------------------------------------
@@ -763,46 +746,29 @@ nmclan_release
763 net device, and release the PCMCIA configuration. If the device 746 net device, and release the PCMCIA configuration. If the device
764 is still open, this will be postponed until it is closed. 747 is still open, this will be postponed until it is closed.
765---------------------------------------------------------------------------- */ 748---------------------------------------------------------------------------- */
766static void nmclan_release(dev_link_t *link) 749static void nmclan_release(struct pcmcia_device *link)
767{ 750{
768 751 DEBUG(0, "nmclan_release(0x%p)\n", link);
769 DEBUG(0, "nmclan_release(0x%p)\n", link); 752 pcmcia_disable_device(link);
770
771 pcmcia_release_configuration(link->handle);
772 pcmcia_release_io(link->handle, &link->io);
773 pcmcia_release_irq(link->handle, &link->irq);
774
775 link->state &= ~DEV_CONFIG;
776} 753}
777 754
778static int nmclan_suspend(struct pcmcia_device *p_dev) 755static int nmclan_suspend(struct pcmcia_device *link)
779{ 756{
780 dev_link_t *link = dev_to_instance(p_dev);
781 struct net_device *dev = link->priv; 757 struct net_device *dev = link->priv;
782 758
783 link->state |= DEV_SUSPEND; 759 if (link->open)
784 if (link->state & DEV_CONFIG) { 760 netif_device_detach(dev);
785 if (link->open)
786 netif_device_detach(dev);
787 pcmcia_release_configuration(link->handle);
788 }
789
790 761
791 return 0; 762 return 0;
792} 763}
793 764
794static int nmclan_resume(struct pcmcia_device *p_dev) 765static int nmclan_resume(struct pcmcia_device *link)
795{ 766{
796 dev_link_t *link = dev_to_instance(p_dev);
797 struct net_device *dev = link->priv; 767 struct net_device *dev = link->priv;
798 768
799 link->state &= ~DEV_SUSPEND; 769 if (link->open) {
800 if (link->state & DEV_CONFIG) { 770 nmclan_reset(dev);
801 pcmcia_request_configuration(link->handle, &link->conf); 771 netif_device_attach(dev);
802 if (link->open) {
803 nmclan_reset(dev);
804 netif_device_attach(dev);
805 }
806 } 772 }
807 773
808 return 0; 774 return 0;
@@ -818,7 +784,7 @@ static void nmclan_reset(struct net_device *dev)
818 mace_private *lp = netdev_priv(dev); 784 mace_private *lp = netdev_priv(dev);
819 785
820#if RESET_XILINX 786#if RESET_XILINX
821 dev_link_t *link = &lp->link; 787 struct pcmcia_device *link = &lp->link;
822 conf_reg_t reg; 788 conf_reg_t reg;
823 u_long OrigCorValue; 789 u_long OrigCorValue;
824 790
@@ -827,7 +793,7 @@ static void nmclan_reset(struct net_device *dev)
827 reg.Action = CS_READ; 793 reg.Action = CS_READ;
828 reg.Offset = CISREG_COR; 794 reg.Offset = CISREG_COR;
829 reg.Value = 0; 795 reg.Value = 0;
830 pcmcia_access_configuration_register(link->handle, &reg); 796 pcmcia_access_configuration_register(link, &reg);
831 OrigCorValue = reg.Value; 797 OrigCorValue = reg.Value;
832 798
833 /* Reset Xilinx */ 799 /* Reset Xilinx */
@@ -836,12 +802,12 @@ static void nmclan_reset(struct net_device *dev)
836 DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n", 802 DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
837 OrigCorValue); 803 OrigCorValue);
838 reg.Value = COR_SOFT_RESET; 804 reg.Value = COR_SOFT_RESET;
839 pcmcia_access_configuration_register(link->handle, &reg); 805 pcmcia_access_configuration_register(link, &reg);
840 /* Need to wait for 20 ms for PCMCIA to finish reset. */ 806 /* Need to wait for 20 ms for PCMCIA to finish reset. */
841 807
842 /* Restore original COR configuration index */ 808 /* Restore original COR configuration index */
843 reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK); 809 reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK);
844 pcmcia_access_configuration_register(link->handle, &reg); 810 pcmcia_access_configuration_register(link, &reg);
845 /* Xilinx is now completely reset along with the MACE chip. */ 811 /* Xilinx is now completely reset along with the MACE chip. */
846 lp->tx_free_frames=AM2150_MAX_TX_FRAMES; 812 lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
847 813
@@ -885,9 +851,9 @@ static int mace_open(struct net_device *dev)
885{ 851{
886 kio_addr_t ioaddr = dev->base_addr; 852 kio_addr_t ioaddr = dev->base_addr;
887 mace_private *lp = netdev_priv(dev); 853 mace_private *lp = netdev_priv(dev);
888 dev_link_t *link = &lp->link; 854 struct pcmcia_device *link = lp->p_dev;
889 855
890 if (!DEV_OK(link)) 856 if (!pcmcia_dev_present(link))
891 return -ENODEV; 857 return -ENODEV;
892 858
893 link->open++; 859 link->open++;
@@ -908,7 +874,7 @@ static int mace_close(struct net_device *dev)
908{ 874{
909 kio_addr_t ioaddr = dev->base_addr; 875 kio_addr_t ioaddr = dev->base_addr;
910 mace_private *lp = netdev_priv(dev); 876 mace_private *lp = netdev_priv(dev);
911 dev_link_t *link = &lp->link; 877 struct pcmcia_device *link = lp->p_dev;
912 878
913 DEBUG(2, "%s: shutting down ethercard.\n", dev->name); 879 DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
914 880
@@ -963,12 +929,12 @@ mace_start_xmit
963static void mace_tx_timeout(struct net_device *dev) 929static void mace_tx_timeout(struct net_device *dev)
964{ 930{
965 mace_private *lp = netdev_priv(dev); 931 mace_private *lp = netdev_priv(dev);
966 dev_link_t *link = &lp->link; 932 struct pcmcia_device *link = lp->p_dev;
967 933
968 printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name); 934 printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name);
969#if RESET_ON_TIMEOUT 935#if RESET_ON_TIMEOUT
970 printk("resetting card\n"); 936 printk("resetting card\n");
971 pcmcia_reset_card(link->handle, NULL); 937 pcmcia_reset_card(link, NULL);
972#else /* #if RESET_ON_TIMEOUT */ 938#else /* #if RESET_ON_TIMEOUT */
973 printk("NOT resetting card\n"); 939 printk("NOT resetting card\n");
974#endif /* #if RESET_ON_TIMEOUT */ 940#endif /* #if RESET_ON_TIMEOUT */
@@ -1635,7 +1601,7 @@ static struct pcmcia_driver nmclan_cs_driver = {
1635 .drv = { 1601 .drv = {
1636 .name = "nmclan_cs", 1602 .name = "nmclan_cs",
1637 }, 1603 },
1638 .probe = nmclan_attach, 1604 .probe = nmclan_probe,
1639 .remove = nmclan_detach, 1605 .remove = nmclan_detach,
1640 .id_table = nmclan_ids, 1606 .id_table = nmclan_ids,
1641 .suspend = nmclan_suspend, 1607 .suspend = nmclan_suspend,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index b46e5f703efa..506e777c5f06 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -103,8 +103,8 @@ module_param_array(hw_addr, int, NULL, 0);
103/*====================================================================*/ 103/*====================================================================*/
104 104
105static void mii_phy_probe(struct net_device *dev); 105static void mii_phy_probe(struct net_device *dev);
106static void pcnet_config(dev_link_t *link); 106static int pcnet_config(struct pcmcia_device *link);
107static void pcnet_release(dev_link_t *link); 107static void pcnet_release(struct pcmcia_device *link);
108static int pcnet_open(struct net_device *dev); 108static int pcnet_open(struct net_device *dev);
109static int pcnet_close(struct net_device *dev); 109static int pcnet_close(struct net_device *dev);
110static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 110static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -113,9 +113,9 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
113static void ei_watchdog(u_long arg); 113static void ei_watchdog(u_long arg);
114static void pcnet_reset_8390(struct net_device *dev); 114static void pcnet_reset_8390(struct net_device *dev);
115static int set_config(struct net_device *dev, struct ifmap *map); 115static int set_config(struct net_device *dev, struct ifmap *map);
116static int setup_shmem_window(dev_link_t *link, int start_pg, 116static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
117 int stop_pg, int cm_offset); 117 int stop_pg, int cm_offset);
118static int setup_dma_config(dev_link_t *link, int start_pg, 118static int setup_dma_config(struct pcmcia_device *link, int start_pg,
119 int stop_pg); 119 int stop_pg);
120 120
121static void pcnet_detach(struct pcmcia_device *p_dev); 121static void pcnet_detach(struct pcmcia_device *p_dev);
@@ -214,7 +214,7 @@ static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII };
214static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII }; 214static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
215 215
216typedef struct pcnet_dev_t { 216typedef struct pcnet_dev_t {
217 dev_link_t link; 217 struct pcmcia_device *p_dev;
218 dev_node_t node; 218 dev_node_t node;
219 u_int flags; 219 u_int flags;
220 void __iomem *base; 220 void __iomem *base;
@@ -240,10 +240,9 @@ static inline pcnet_dev_t *PRIV(struct net_device *dev)
240 240
241======================================================================*/ 241======================================================================*/
242 242
243static int pcnet_probe(struct pcmcia_device *p_dev) 243static int pcnet_probe(struct pcmcia_device *link)
244{ 244{
245 pcnet_dev_t *info; 245 pcnet_dev_t *info;
246 dev_link_t *link;
247 struct net_device *dev; 246 struct net_device *dev;
248 247
249 DEBUG(0, "pcnet_attach()\n"); 248 DEBUG(0, "pcnet_attach()\n");
@@ -252,7 +251,7 @@ static int pcnet_probe(struct pcmcia_device *p_dev)
252 dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); 251 dev = __alloc_ei_netdev(sizeof(pcnet_dev_t));
253 if (!dev) return -ENOMEM; 252 if (!dev) return -ENOMEM;
254 info = PRIV(dev); 253 info = PRIV(dev);
255 link = &info->link; 254 info->p_dev = link;
256 link->priv = dev; 255 link->priv = dev;
257 256
258 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 257 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
@@ -265,13 +264,7 @@ static int pcnet_probe(struct pcmcia_device *p_dev)
265 dev->stop = &pcnet_close; 264 dev->stop = &pcnet_close;
266 dev->set_config = &set_config; 265 dev->set_config = &set_config;
267 266
268 link->handle = p_dev; 267 return pcnet_config(link);
269 p_dev->instance = link;
270
271 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
272 pcnet_config(link);
273
274 return 0;
275} /* pcnet_attach */ 268} /* pcnet_attach */
276 269
277/*====================================================================== 270/*======================================================================
@@ -283,18 +276,16 @@ static int pcnet_probe(struct pcmcia_device *p_dev)
283 276
284======================================================================*/ 277======================================================================*/
285 278
286static void pcnet_detach(struct pcmcia_device *p_dev) 279static void pcnet_detach(struct pcmcia_device *link)
287{ 280{
288 dev_link_t *link = dev_to_instance(p_dev);
289 struct net_device *dev = link->priv; 281 struct net_device *dev = link->priv;
290 282
291 DEBUG(0, "pcnet_detach(0x%p)\n", link); 283 DEBUG(0, "pcnet_detach(0x%p)\n", link);
292 284
293 if (link->dev) 285 if (link->dev_node)
294 unregister_netdev(dev); 286 unregister_netdev(dev);
295 287
296 if (link->state & DEV_CONFIG) 288 pcnet_release(link);
297 pcnet_release(link);
298 289
299 free_netdev(dev); 290 free_netdev(dev);
300} /* pcnet_detach */ 291} /* pcnet_detach */
@@ -306,7 +297,7 @@ static void pcnet_detach(struct pcmcia_device *p_dev)
306 297
307======================================================================*/ 298======================================================================*/
308 299
309static hw_info_t *get_hwinfo(dev_link_t *link) 300static hw_info_t *get_hwinfo(struct pcmcia_device *link)
310{ 301{
311 struct net_device *dev = link->priv; 302 struct net_device *dev = link->priv;
312 win_req_t req; 303 win_req_t req;
@@ -318,9 +309,9 @@ static hw_info_t *get_hwinfo(dev_link_t *link)
318 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 309 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
319 req.Base = 0; req.Size = 0; 310 req.Base = 0; req.Size = 0;
320 req.AccessSpeed = 0; 311 req.AccessSpeed = 0;
321 i = pcmcia_request_window(&link->handle, &req, &link->win); 312 i = pcmcia_request_window(&link, &req, &link->win);
322 if (i != CS_SUCCESS) { 313 if (i != CS_SUCCESS) {
323 cs_error(link->handle, RequestWindow, i); 314 cs_error(link, RequestWindow, i);
324 return NULL; 315 return NULL;
325 } 316 }
326 317
@@ -343,7 +334,7 @@ static hw_info_t *get_hwinfo(dev_link_t *link)
343 iounmap(virt); 334 iounmap(virt);
344 j = pcmcia_release_window(link->win); 335 j = pcmcia_release_window(link->win);
345 if (j != CS_SUCCESS) 336 if (j != CS_SUCCESS)
346 cs_error(link->handle, ReleaseWindow, j); 337 cs_error(link, ReleaseWindow, j);
347 return (i < NR_INFO) ? hw_info+i : NULL; 338 return (i < NR_INFO) ? hw_info+i : NULL;
348} /* get_hwinfo */ 339} /* get_hwinfo */
349 340
@@ -355,7 +346,7 @@ static hw_info_t *get_hwinfo(dev_link_t *link)
355 346
356======================================================================*/ 347======================================================================*/
357 348
358static hw_info_t *get_prom(dev_link_t *link) 349static hw_info_t *get_prom(struct pcmcia_device *link)
359{ 350{
360 struct net_device *dev = link->priv; 351 struct net_device *dev = link->priv;
361 kio_addr_t ioaddr = dev->base_addr; 352 kio_addr_t ioaddr = dev->base_addr;
@@ -409,7 +400,7 @@ static hw_info_t *get_prom(dev_link_t *link)
409 400
410======================================================================*/ 401======================================================================*/
411 402
412static hw_info_t *get_dl10019(dev_link_t *link) 403static hw_info_t *get_dl10019(struct pcmcia_device *link)
413{ 404{
414 struct net_device *dev = link->priv; 405 struct net_device *dev = link->priv;
415 int i; 406 int i;
@@ -431,7 +422,7 @@ static hw_info_t *get_dl10019(dev_link_t *link)
431 422
432======================================================================*/ 423======================================================================*/
433 424
434static hw_info_t *get_ax88190(dev_link_t *link) 425static hw_info_t *get_ax88190(struct pcmcia_device *link)
435{ 426{
436 struct net_device *dev = link->priv; 427 struct net_device *dev = link->priv;
437 kio_addr_t ioaddr = dev->base_addr; 428 kio_addr_t ioaddr = dev->base_addr;
@@ -464,7 +455,7 @@ static hw_info_t *get_ax88190(dev_link_t *link)
464 455
465======================================================================*/ 456======================================================================*/
466 457
467static hw_info_t *get_hwired(dev_link_t *link) 458static hw_info_t *get_hwired(struct pcmcia_device *link)
468{ 459{
469 struct net_device *dev = link->priv; 460 struct net_device *dev = link->priv;
470 int i; 461 int i;
@@ -491,7 +482,7 @@ static hw_info_t *get_hwired(dev_link_t *link)
491#define CS_CHECK(fn, ret) \ 482#define CS_CHECK(fn, ret) \
492do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 483do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
493 484
494static int try_io_port(dev_link_t *link) 485static int try_io_port(struct pcmcia_device *link)
495{ 486{
496 int j, ret; 487 int j, ret;
497 if (link->io.NumPorts1 == 32) { 488 if (link->io.NumPorts1 == 32) {
@@ -512,18 +503,17 @@ static int try_io_port(dev_link_t *link)
512 for (j = 0; j < 0x400; j += 0x20) { 503 for (j = 0; j < 0x400; j += 0x20) {
513 link->io.BasePort1 = j ^ 0x300; 504 link->io.BasePort1 = j ^ 0x300;
514 link->io.BasePort2 = (j ^ 0x300) + 0x10; 505 link->io.BasePort2 = (j ^ 0x300) + 0x10;
515 ret = pcmcia_request_io(link->handle, &link->io); 506 ret = pcmcia_request_io(link, &link->io);
516 if (ret == CS_SUCCESS) return ret; 507 if (ret == CS_SUCCESS) return ret;
517 } 508 }
518 return ret; 509 return ret;
519 } else { 510 } else {
520 return pcmcia_request_io(link->handle, &link->io); 511 return pcmcia_request_io(link, &link->io);
521 } 512 }
522} 513}
523 514
524static void pcnet_config(dev_link_t *link) 515static int pcnet_config(struct pcmcia_device *link)
525{ 516{
526 client_handle_t handle = link->handle;
527 struct net_device *dev = link->priv; 517 struct net_device *dev = link->priv;
528 pcnet_dev_t *info = PRIV(dev); 518 pcnet_dev_t *info = PRIV(dev);
529 tuple_t tuple; 519 tuple_t tuple;
@@ -531,7 +521,6 @@ static void pcnet_config(dev_link_t *link)
531 int i, last_ret, last_fn, start_pg, stop_pg, cm_offset; 521 int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
532 int manfid = 0, prodid = 0, has_shmem = 0; 522 int manfid = 0, prodid = 0, has_shmem = 0;
533 u_short buf[64]; 523 u_short buf[64];
534 config_info_t conf;
535 hw_info_t *hw_info; 524 hw_info_t *hw_info;
536 525
537 DEBUG(0, "pcnet_config(0x%p)\n", link); 526 DEBUG(0, "pcnet_config(0x%p)\n", link);
@@ -541,36 +530,29 @@ static void pcnet_config(dev_link_t *link)
541 tuple.TupleDataMax = sizeof(buf); 530 tuple.TupleDataMax = sizeof(buf);
542 tuple.TupleOffset = 0; 531 tuple.TupleOffset = 0;
543 tuple.DesiredTuple = CISTPL_CONFIG; 532 tuple.DesiredTuple = CISTPL_CONFIG;
544 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 533 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
545 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 534 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
546 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 535 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
547 link->conf.ConfigBase = parse.config.base; 536 link->conf.ConfigBase = parse.config.base;
548 link->conf.Present = parse.config.rmask[0]; 537 link->conf.Present = parse.config.rmask[0];
549 538
550 /* Configure card */
551 link->state |= DEV_CONFIG;
552
553 /* Look up current Vcc */
554 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
555 link->conf.Vcc = conf.Vcc;
556
557 tuple.DesiredTuple = CISTPL_MANFID; 539 tuple.DesiredTuple = CISTPL_MANFID;
558 tuple.Attributes = TUPLE_RETURN_COMMON; 540 tuple.Attributes = TUPLE_RETURN_COMMON;
559 if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) && 541 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
560 (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) { 542 (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
561 manfid = le16_to_cpu(buf[0]); 543 manfid = le16_to_cpu(buf[0]);
562 prodid = le16_to_cpu(buf[1]); 544 prodid = le16_to_cpu(buf[1]);
563 } 545 }
564 546
565 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 547 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
566 tuple.Attributes = 0; 548 tuple.Attributes = 0;
567 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 549 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
568 while (last_ret == CS_SUCCESS) { 550 while (last_ret == CS_SUCCESS) {
569 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 551 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
570 cistpl_io_t *io = &(parse.cftable_entry.io); 552 cistpl_io_t *io = &(parse.cftable_entry.io);
571 553
572 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 554 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
573 pcmcia_parse_tuple(handle, &tuple, &parse) != 0 || 555 pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
574 cfg->index == 0 || cfg->io.nwin == 0) 556 cfg->index == 0 || cfg->io.nwin == 0)
575 goto next_entry; 557 goto next_entry;
576 558
@@ -594,14 +576,14 @@ static void pcnet_config(dev_link_t *link)
594 if (last_ret == CS_SUCCESS) break; 576 if (last_ret == CS_SUCCESS) break;
595 } 577 }
596 next_entry: 578 next_entry:
597 last_ret = pcmcia_get_next_tuple(handle, &tuple); 579 last_ret = pcmcia_get_next_tuple(link, &tuple);
598 } 580 }
599 if (last_ret != CS_SUCCESS) { 581 if (last_ret != CS_SUCCESS) {
600 cs_error(handle, RequestIO, last_ret); 582 cs_error(link, RequestIO, last_ret);
601 goto failed; 583 goto failed;
602 } 584 }
603 585
604 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 586 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
605 587
606 if (link->io.NumPorts2 == 8) { 588 if (link->io.NumPorts2 == 8) {
607 link->conf.Attributes |= CONF_ENABLE_SPKR; 589 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -611,7 +593,7 @@ static void pcnet_config(dev_link_t *link)
611 (prodid == PRODID_IBM_HOME_AND_AWAY)) 593 (prodid == PRODID_IBM_HOME_AND_AWAY))
612 link->conf.ConfigIndex |= 0x10; 594 link->conf.ConfigIndex |= 0x10;
613 595
614 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 596 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
615 dev->irq = link->irq.AssignedIRQ; 597 dev->irq = link->irq.AssignedIRQ;
616 dev->base_addr = link->io.BasePort1; 598 dev->base_addr = link->io.BasePort1;
617 if (info->flags & HAS_MISC_REG) { 599 if (info->flags & HAS_MISC_REG) {
@@ -679,9 +661,8 @@ static void pcnet_config(dev_link_t *link)
679 info->eth_phy = 0; 661 info->eth_phy = 0;
680 } 662 }
681 663
682 link->dev = &info->node; 664 link->dev_node = &info->node;
683 link->state &= ~DEV_CONFIG_PENDING; 665 SET_NETDEV_DEV(dev, &handle_to_dev(link));
684 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
685 666
686#ifdef CONFIG_NET_POLL_CONTROLLER 667#ifdef CONFIG_NET_POLL_CONTROLLER
687 dev->poll_controller = ei_poll; 668 dev->poll_controller = ei_poll;
@@ -689,7 +670,7 @@ static void pcnet_config(dev_link_t *link)
689 670
690 if (register_netdev(dev) != 0) { 671 if (register_netdev(dev) != 0) {
691 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); 672 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
692 link->dev = NULL; 673 link->dev_node = NULL;
693 goto failed; 674 goto failed;
694 } 675 }
695 676
@@ -712,14 +693,13 @@ static void pcnet_config(dev_link_t *link)
712 printk(" hw_addr "); 693 printk(" hw_addr ");
713 for (i = 0; i < 6; i++) 694 for (i = 0; i < 6; i++)
714 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); 695 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
715 return; 696 return 0;
716 697
717cs_failed: 698cs_failed:
718 cs_error(link->handle, last_fn, last_ret); 699 cs_error(link, last_fn, last_ret);
719failed: 700failed:
720 pcnet_release(link); 701 pcnet_release(link);
721 link->state &= ~DEV_CONFIG_PENDING; 702 return -ENODEV;
722 return;
723} /* pcnet_config */ 703} /* pcnet_config */
724 704
725/*====================================================================== 705/*======================================================================
@@ -730,21 +710,16 @@ failed:
730 710
731======================================================================*/ 711======================================================================*/
732 712
733static void pcnet_release(dev_link_t *link) 713static void pcnet_release(struct pcmcia_device *link)
734{ 714{
735 pcnet_dev_t *info = PRIV(link->priv); 715 pcnet_dev_t *info = PRIV(link->priv);
736 716
737 DEBUG(0, "pcnet_release(0x%p)\n", link); 717 DEBUG(0, "pcnet_release(0x%p)\n", link);
738 718
739 if (info->flags & USE_SHMEM) { 719 if (info->flags & USE_SHMEM)
740 iounmap(info->base); 720 iounmap(info->base);
741 pcmcia_release_window(link->win);
742 }
743 pcmcia_release_configuration(link->handle);
744 pcmcia_release_io(link->handle, &link->io);
745 pcmcia_release_irq(link->handle, &link->irq);
746 721
747 link->state &= ~DEV_CONFIG; 722 pcmcia_disable_device(link);
748} 723}
749 724
750/*====================================================================== 725/*======================================================================
@@ -756,34 +731,24 @@ static void pcnet_release(dev_link_t *link)
756 731
757======================================================================*/ 732======================================================================*/
758 733
759static int pcnet_suspend(struct pcmcia_device *p_dev) 734static int pcnet_suspend(struct pcmcia_device *link)
760{ 735{
761 dev_link_t *link = dev_to_instance(p_dev);
762 struct net_device *dev = link->priv; 736 struct net_device *dev = link->priv;
763 737
764 link->state |= DEV_SUSPEND; 738 if (link->open)
765 if (link->state & DEV_CONFIG) { 739 netif_device_detach(dev);
766 if (link->open)
767 netif_device_detach(dev);
768 pcmcia_release_configuration(link->handle);
769 }
770 740
771 return 0; 741 return 0;
772} 742}
773 743
774static int pcnet_resume(struct pcmcia_device *p_dev) 744static int pcnet_resume(struct pcmcia_device *link)
775{ 745{
776 dev_link_t *link = dev_to_instance(p_dev);
777 struct net_device *dev = link->priv; 746 struct net_device *dev = link->priv;
778 747
779 link->state &= ~DEV_SUSPEND; 748 if (link->open) {
780 if (link->state & DEV_CONFIG) { 749 pcnet_reset_8390(dev);
781 pcmcia_request_configuration(link->handle, &link->conf); 750 NS8390_init(dev, 1);
782 if (link->open) { 751 netif_device_attach(dev);
783 pcnet_reset_8390(dev);
784 NS8390_init(dev, 1);
785 netif_device_attach(dev);
786 }
787 } 752 }
788 753
789 return 0; 754 return 0;
@@ -1023,11 +988,11 @@ static void mii_phy_probe(struct net_device *dev)
1023static int pcnet_open(struct net_device *dev) 988static int pcnet_open(struct net_device *dev)
1024{ 989{
1025 pcnet_dev_t *info = PRIV(dev); 990 pcnet_dev_t *info = PRIV(dev);
1026 dev_link_t *link = &info->link; 991 struct pcmcia_device *link = info->p_dev;
1027 992
1028 DEBUG(2, "pcnet_open('%s')\n", dev->name); 993 DEBUG(2, "pcnet_open('%s')\n", dev->name);
1029 994
1030 if (!DEV_OK(link)) 995 if (!pcmcia_dev_present(link))
1031 return -ENODEV; 996 return -ENODEV;
1032 997
1033 link->open++; 998 link->open++;
@@ -1051,7 +1016,7 @@ static int pcnet_open(struct net_device *dev)
1051static int pcnet_close(struct net_device *dev) 1016static int pcnet_close(struct net_device *dev)
1052{ 1017{
1053 pcnet_dev_t *info = PRIV(dev); 1018 pcnet_dev_t *info = PRIV(dev);
1054 dev_link_t *link = &info->link; 1019 struct pcmcia_device *link = info->p_dev;
1055 1020
1056 DEBUG(2, "pcnet_close('%s')\n", dev->name); 1021 DEBUG(2, "pcnet_close('%s')\n", dev->name);
1057 1022
@@ -1429,7 +1394,7 @@ static void dma_block_output(struct net_device *dev, int count,
1429 1394
1430/*====================================================================*/ 1395/*====================================================================*/
1431 1396
1432static int setup_dma_config(dev_link_t *link, int start_pg, 1397static int setup_dma_config(struct pcmcia_device *link, int start_pg,
1433 int stop_pg) 1398 int stop_pg)
1434{ 1399{
1435 struct net_device *dev = link->priv; 1400 struct net_device *dev = link->priv;
@@ -1532,7 +1497,7 @@ static void shmem_block_output(struct net_device *dev, int count,
1532 1497
1533/*====================================================================*/ 1498/*====================================================================*/
1534 1499
1535static int setup_shmem_window(dev_link_t *link, int start_pg, 1500static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1536 int stop_pg, int cm_offset) 1501 int stop_pg, int cm_offset)
1537{ 1502{
1538 struct net_device *dev = link->priv; 1503 struct net_device *dev = link->priv;
@@ -1554,7 +1519,7 @@ static int setup_shmem_window(dev_link_t *link, int start_pg,
1554 req.Attributes |= WIN_USE_WAIT; 1519 req.Attributes |= WIN_USE_WAIT;
1555 req.Base = 0; req.Size = window_size; 1520 req.Base = 0; req.Size = window_size;
1556 req.AccessSpeed = mem_speed; 1521 req.AccessSpeed = mem_speed;
1557 CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win)); 1522 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
1558 1523
1559 mem.CardOffset = (start_pg << 8) + cm_offset; 1524 mem.CardOffset = (start_pg << 8) + cm_offset;
1560 offset = mem.CardOffset % window_size; 1525 offset = mem.CardOffset % window_size;
@@ -1595,7 +1560,7 @@ static int setup_shmem_window(dev_link_t *link, int start_pg,
1595 return 0; 1560 return 0;
1596 1561
1597cs_failed: 1562cs_failed:
1598 cs_error(link->handle, last_fn, last_ret); 1563 cs_error(link, last_fn, last_ret);
1599failed: 1564failed:
1600 return 1; 1565 return 1;
1601} 1566}
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 8839c4faafd6..e74bf5014ef6 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -49,6 +49,7 @@
49#include <pcmcia/cisreg.h> 49#include <pcmcia/cisreg.h>
50#include <pcmcia/ciscode.h> 50#include <pcmcia/ciscode.h>
51#include <pcmcia/ds.h> 51#include <pcmcia/ds.h>
52#include <pcmcia/ss.h>
52 53
53#include <asm/io.h> 54#include <asm/io.h>
54#include <asm/system.h> 55#include <asm/system.h>
@@ -103,7 +104,7 @@ static const char *version =
103#define MEMORY_WAIT_TIME 8 104#define MEMORY_WAIT_TIME 8
104 105
105struct smc_private { 106struct smc_private {
106 dev_link_t link; 107 struct pcmcia_device *p_dev;
107 spinlock_t lock; 108 spinlock_t lock;
108 u_short manfid; 109 u_short manfid;
109 u_short cardid; 110 u_short cardid;
@@ -278,8 +279,8 @@ enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002,
278/*====================================================================*/ 279/*====================================================================*/
279 280
280static void smc91c92_detach(struct pcmcia_device *p_dev); 281static void smc91c92_detach(struct pcmcia_device *p_dev);
281static void smc91c92_config(dev_link_t *link); 282static int smc91c92_config(struct pcmcia_device *link);
282static void smc91c92_release(dev_link_t *link); 283static void smc91c92_release(struct pcmcia_device *link);
283 284
284static int smc_open(struct net_device *dev); 285static int smc_open(struct net_device *dev);
285static int smc_close(struct net_device *dev); 286static int smc_close(struct net_device *dev);
@@ -308,10 +309,9 @@ static struct ethtool_ops ethtool_ops;
308 309
309======================================================================*/ 310======================================================================*/
310 311
311static int smc91c92_attach(struct pcmcia_device *p_dev) 312static int smc91c92_probe(struct pcmcia_device *link)
312{ 313{
313 struct smc_private *smc; 314 struct smc_private *smc;
314 dev_link_t *link;
315 struct net_device *dev; 315 struct net_device *dev;
316 316
317 DEBUG(0, "smc91c92_attach()\n"); 317 DEBUG(0, "smc91c92_attach()\n");
@@ -321,7 +321,7 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
321 if (!dev) 321 if (!dev)
322 return -ENOMEM; 322 return -ENOMEM;
323 smc = netdev_priv(dev); 323 smc = netdev_priv(dev);
324 link = &smc->link; 324 smc->p_dev = link;
325 link->priv = dev; 325 link->priv = dev;
326 326
327 spin_lock_init(&smc->lock); 327 spin_lock_init(&smc->lock);
@@ -333,7 +333,6 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
333 link->irq.Handler = &smc_interrupt; 333 link->irq.Handler = &smc_interrupt;
334 link->irq.Instance = dev; 334 link->irq.Instance = dev;
335 link->conf.Attributes = CONF_ENABLE_IRQ; 335 link->conf.Attributes = CONF_ENABLE_IRQ;
336 link->conf.Vcc = 50;
337 link->conf.IntType = INT_MEMORY_AND_IO; 336 link->conf.IntType = INT_MEMORY_AND_IO;
338 337
339 /* The SMC91c92-specific entries in the device structure. */ 338 /* The SMC91c92-specific entries in the device structure. */
@@ -357,13 +356,7 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
357 smc->mii_if.phy_id_mask = 0x1f; 356 smc->mii_if.phy_id_mask = 0x1f;
358 smc->mii_if.reg_num_mask = 0x1f; 357 smc->mii_if.reg_num_mask = 0x1f;
359 358
360 link->handle = p_dev; 359 return smc91c92_config(link);
361 p_dev->instance = link;
362
363 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
364 smc91c92_config(link);
365
366 return 0;
367} /* smc91c92_attach */ 360} /* smc91c92_attach */
368 361
369/*====================================================================== 362/*======================================================================
@@ -375,18 +368,16 @@ static int smc91c92_attach(struct pcmcia_device *p_dev)
375 368
376======================================================================*/ 369======================================================================*/
377 370
378static void smc91c92_detach(struct pcmcia_device *p_dev) 371static void smc91c92_detach(struct pcmcia_device *link)
379{ 372{
380 dev_link_t *link = dev_to_instance(p_dev);
381 struct net_device *dev = link->priv; 373 struct net_device *dev = link->priv;
382 374
383 DEBUG(0, "smc91c92_detach(0x%p)\n", link); 375 DEBUG(0, "smc91c92_detach(0x%p)\n", link);
384 376
385 if (link->dev) 377 if (link->dev_node)
386 unregister_netdev(dev); 378 unregister_netdev(dev);
387 379
388 if (link->state & DEV_CONFIG) 380 smc91c92_release(link);
389 smc91c92_release(link);
390 381
391 free_netdev(dev); 382 free_netdev(dev);
392} /* smc91c92_detach */ 383} /* smc91c92_detach */
@@ -414,7 +405,7 @@ static int cvt_ascii_address(struct net_device *dev, char *s)
414 405
415/*====================================================================*/ 406/*====================================================================*/
416 407
417static int first_tuple(client_handle_t handle, tuple_t *tuple, 408static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
418 cisparse_t *parse) 409 cisparse_t *parse)
419{ 410{
420 int i; 411 int i;
@@ -425,7 +416,7 @@ static int first_tuple(client_handle_t handle, tuple_t *tuple,
425 return pcmcia_parse_tuple(handle, tuple, parse); 416 return pcmcia_parse_tuple(handle, tuple, parse);
426} 417}
427 418
428static int next_tuple(client_handle_t handle, tuple_t *tuple, 419static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
429 cisparse_t *parse) 420 cisparse_t *parse)
430{ 421{
431 int i; 422 int i;
@@ -447,7 +438,7 @@ static int next_tuple(client_handle_t handle, tuple_t *tuple,
447 438
448======================================================================*/ 439======================================================================*/
449 440
450static int mhz_3288_power(dev_link_t *link) 441static int mhz_3288_power(struct pcmcia_device *link)
451{ 442{
452 struct net_device *dev = link->priv; 443 struct net_device *dev = link->priv;
453 struct smc_private *smc = netdev_priv(dev); 444 struct smc_private *smc = netdev_priv(dev);
@@ -469,7 +460,7 @@ static int mhz_3288_power(dev_link_t *link)
469 return 0; 460 return 0;
470} 461}
471 462
472static int mhz_mfc_config(dev_link_t *link) 463static int mhz_mfc_config(struct pcmcia_device *link)
473{ 464{
474 struct net_device *dev = link->priv; 465 struct net_device *dev = link->priv;
475 struct smc_private *smc = netdev_priv(dev); 466 struct smc_private *smc = netdev_priv(dev);
@@ -504,7 +495,7 @@ static int mhz_mfc_config(dev_link_t *link)
504 tuple->TupleDataMax = 255; 495 tuple->TupleDataMax = 255;
505 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; 496 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
506 497
507 i = first_tuple(link->handle, tuple, parse); 498 i = first_tuple(link, tuple, parse);
508 /* The Megahertz combo cards have modem-like CIS entries, so 499 /* The Megahertz combo cards have modem-like CIS entries, so
509 we have to explicitly try a bunch of port combinations. */ 500 we have to explicitly try a bunch of port combinations. */
510 while (i == CS_SUCCESS) { 501 while (i == CS_SUCCESS) {
@@ -513,11 +504,11 @@ static int mhz_mfc_config(dev_link_t *link)
513 for (k = 0; k < 0x400; k += 0x10) { 504 for (k = 0; k < 0x400; k += 0x10) {
514 if (k & 0x80) continue; 505 if (k & 0x80) continue;
515 link->io.BasePort1 = k ^ 0x300; 506 link->io.BasePort1 = k ^ 0x300;
516 i = pcmcia_request_io(link->handle, &link->io); 507 i = pcmcia_request_io(link, &link->io);
517 if (i == CS_SUCCESS) break; 508 if (i == CS_SUCCESS) break;
518 } 509 }
519 if (i == CS_SUCCESS) break; 510 if (i == CS_SUCCESS) break;
520 i = next_tuple(link->handle, tuple, parse); 511 i = next_tuple(link, tuple, parse);
521 } 512 }
522 if (i != CS_SUCCESS) 513 if (i != CS_SUCCESS)
523 goto free_cfg_mem; 514 goto free_cfg_mem;
@@ -527,7 +518,7 @@ static int mhz_mfc_config(dev_link_t *link)
527 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 518 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
528 req.Base = req.Size = 0; 519 req.Base = req.Size = 0;
529 req.AccessSpeed = 0; 520 req.AccessSpeed = 0;
530 i = pcmcia_request_window(&link->handle, &req, &link->win); 521 i = pcmcia_request_window(&link, &req, &link->win);
531 if (i != CS_SUCCESS) 522 if (i != CS_SUCCESS)
532 goto free_cfg_mem; 523 goto free_cfg_mem;
533 smc->base = ioremap(req.Base, req.Size); 524 smc->base = ioremap(req.Base, req.Size);
@@ -546,9 +537,8 @@ free_cfg_mem:
546 return i; 537 return i;
547} 538}
548 539
549static int mhz_setup(dev_link_t *link) 540static int mhz_setup(struct pcmcia_device *link)
550{ 541{
551 client_handle_t handle = link->handle;
552 struct net_device *dev = link->priv; 542 struct net_device *dev = link->priv;
553 struct smc_cfg_mem *cfg_mem; 543 struct smc_cfg_mem *cfg_mem;
554 tuple_t *tuple; 544 tuple_t *tuple;
@@ -571,13 +561,13 @@ static int mhz_setup(dev_link_t *link)
571 /* Read the station address from the CIS. It is stored as the last 561 /* Read the station address from the CIS. It is stored as the last
572 (fourth) string in the Version 1 Version/ID tuple. */ 562 (fourth) string in the Version 1 Version/ID tuple. */
573 tuple->DesiredTuple = CISTPL_VERS_1; 563 tuple->DesiredTuple = CISTPL_VERS_1;
574 if (first_tuple(handle, tuple, parse) != CS_SUCCESS) { 564 if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
575 rc = -1; 565 rc = -1;
576 goto free_cfg_mem; 566 goto free_cfg_mem;
577 } 567 }
578 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ 568 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
579 if (next_tuple(handle, tuple, parse) != CS_SUCCESS) 569 if (next_tuple(link, tuple, parse) != CS_SUCCESS)
580 first_tuple(handle, tuple, parse); 570 first_tuple(link, tuple, parse);
581 if (parse->version_1.ns > 3) { 571 if (parse->version_1.ns > 3) {
582 station_addr = parse->version_1.str + parse->version_1.ofs[3]; 572 station_addr = parse->version_1.str + parse->version_1.ofs[3];
583 if (cvt_ascii_address(dev, station_addr) == 0) { 573 if (cvt_ascii_address(dev, station_addr) == 0) {
@@ -588,11 +578,11 @@ static int mhz_setup(dev_link_t *link)
588 578
589 /* Another possibility: for the EM3288, in a special tuple */ 579 /* Another possibility: for the EM3288, in a special tuple */
590 tuple->DesiredTuple = 0x81; 580 tuple->DesiredTuple = 0x81;
591 if (pcmcia_get_first_tuple(handle, tuple) != CS_SUCCESS) { 581 if (pcmcia_get_first_tuple(link, tuple) != CS_SUCCESS) {
592 rc = -1; 582 rc = -1;
593 goto free_cfg_mem; 583 goto free_cfg_mem;
594 } 584 }
595 if (pcmcia_get_tuple_data(handle, tuple) != CS_SUCCESS) { 585 if (pcmcia_get_tuple_data(link, tuple) != CS_SUCCESS) {
596 rc = -1; 586 rc = -1;
597 goto free_cfg_mem; 587 goto free_cfg_mem;
598 } 588 }
@@ -616,7 +606,7 @@ free_cfg_mem:
616 606
617======================================================================*/ 607======================================================================*/
618 608
619static void mot_config(dev_link_t *link) 609static void mot_config(struct pcmcia_device *link)
620{ 610{
621 struct net_device *dev = link->priv; 611 struct net_device *dev = link->priv;
622 struct smc_private *smc = netdev_priv(dev); 612 struct smc_private *smc = netdev_priv(dev);
@@ -637,7 +627,7 @@ static void mot_config(dev_link_t *link)
637 mdelay(100); 627 mdelay(100);
638} 628}
639 629
640static int mot_setup(dev_link_t *link) 630static int mot_setup(struct pcmcia_device *link)
641{ 631{
642 struct net_device *dev = link->priv; 632 struct net_device *dev = link->priv;
643 kio_addr_t ioaddr = dev->base_addr; 633 kio_addr_t ioaddr = dev->base_addr;
@@ -671,7 +661,7 @@ static int mot_setup(dev_link_t *link)
671 661
672/*====================================================================*/ 662/*====================================================================*/
673 663
674static int smc_config(dev_link_t *link) 664static int smc_config(struct pcmcia_device *link)
675{ 665{
676 struct net_device *dev = link->priv; 666 struct net_device *dev = link->priv;
677 struct smc_cfg_mem *cfg_mem; 667 struct smc_cfg_mem *cfg_mem;
@@ -696,16 +686,16 @@ static int smc_config(dev_link_t *link)
696 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; 686 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
697 687
698 link->io.NumPorts1 = 16; 688 link->io.NumPorts1 = 16;
699 i = first_tuple(link->handle, tuple, parse); 689 i = first_tuple(link, tuple, parse);
700 while (i != CS_NO_MORE_ITEMS) { 690 while (i != CS_NO_MORE_ITEMS) {
701 if (i == CS_SUCCESS) { 691 if (i == CS_SUCCESS) {
702 link->conf.ConfigIndex = cf->index; 692 link->conf.ConfigIndex = cf->index;
703 link->io.BasePort1 = cf->io.win[0].base; 693 link->io.BasePort1 = cf->io.win[0].base;
704 link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK; 694 link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
705 i = pcmcia_request_io(link->handle, &link->io); 695 i = pcmcia_request_io(link, &link->io);
706 if (i == CS_SUCCESS) break; 696 if (i == CS_SUCCESS) break;
707 } 697 }
708 i = next_tuple(link->handle, tuple, parse); 698 i = next_tuple(link, tuple, parse);
709 } 699 }
710 if (i == CS_SUCCESS) 700 if (i == CS_SUCCESS)
711 dev->base_addr = link->io.BasePort1; 701 dev->base_addr = link->io.BasePort1;
@@ -714,9 +704,8 @@ static int smc_config(dev_link_t *link)
714 return i; 704 return i;
715} 705}
716 706
717static int smc_setup(dev_link_t *link) 707static int smc_setup(struct pcmcia_device *link)
718{ 708{
719 client_handle_t handle = link->handle;
720 struct net_device *dev = link->priv; 709 struct net_device *dev = link->priv;
721 struct smc_cfg_mem *cfg_mem; 710 struct smc_cfg_mem *cfg_mem;
722 tuple_t *tuple; 711 tuple_t *tuple;
@@ -739,11 +728,11 @@ static int smc_setup(dev_link_t *link)
739 728
740 /* Check for a LAN function extension tuple */ 729 /* Check for a LAN function extension tuple */
741 tuple->DesiredTuple = CISTPL_FUNCE; 730 tuple->DesiredTuple = CISTPL_FUNCE;
742 i = first_tuple(handle, tuple, parse); 731 i = first_tuple(link, tuple, parse);
743 while (i == CS_SUCCESS) { 732 while (i == CS_SUCCESS) {
744 if (parse->funce.type == CISTPL_FUNCE_LAN_NODE_ID) 733 if (parse->funce.type == CISTPL_FUNCE_LAN_NODE_ID)
745 break; 734 break;
746 i = next_tuple(handle, tuple, parse); 735 i = next_tuple(link, tuple, parse);
747 } 736 }
748 if (i == CS_SUCCESS) { 737 if (i == CS_SUCCESS) {
749 node_id = (cistpl_lan_node_id_t *)parse->funce.data; 738 node_id = (cistpl_lan_node_id_t *)parse->funce.data;
@@ -756,7 +745,7 @@ static int smc_setup(dev_link_t *link)
756 } 745 }
757 /* Try the third string in the Version 1 Version/ID tuple. */ 746 /* Try the third string in the Version 1 Version/ID tuple. */
758 tuple->DesiredTuple = CISTPL_VERS_1; 747 tuple->DesiredTuple = CISTPL_VERS_1;
759 if (first_tuple(handle, tuple, parse) != CS_SUCCESS) { 748 if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
760 rc = -1; 749 rc = -1;
761 goto free_cfg_mem; 750 goto free_cfg_mem;
762 } 751 }
@@ -774,7 +763,7 @@ free_cfg_mem:
774 763
775/*====================================================================*/ 764/*====================================================================*/
776 765
777static int osi_config(dev_link_t *link) 766static int osi_config(struct pcmcia_device *link)
778{ 767{
779 struct net_device *dev = link->priv; 768 struct net_device *dev = link->priv;
780 static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; 769 static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
@@ -794,22 +783,21 @@ static int osi_config(dev_link_t *link)
794 783
795 for (i = j = 0; j < 4; j++) { 784 for (i = j = 0; j < 4; j++) {
796 link->io.BasePort2 = com[j]; 785 link->io.BasePort2 = com[j];
797 i = pcmcia_request_io(link->handle, &link->io); 786 i = pcmcia_request_io(link, &link->io);
798 if (i == CS_SUCCESS) break; 787 if (i == CS_SUCCESS) break;
799 } 788 }
800 if (i != CS_SUCCESS) { 789 if (i != CS_SUCCESS) {
801 /* Fallback: turn off hard decode */ 790 /* Fallback: turn off hard decode */
802 link->conf.ConfigIndex = 0x03; 791 link->conf.ConfigIndex = 0x03;
803 link->io.NumPorts2 = 0; 792 link->io.NumPorts2 = 0;
804 i = pcmcia_request_io(link->handle, &link->io); 793 i = pcmcia_request_io(link, &link->io);
805 } 794 }
806 dev->base_addr = link->io.BasePort1 + 0x10; 795 dev->base_addr = link->io.BasePort1 + 0x10;
807 return i; 796 return i;
808} 797}
809 798
810static int osi_setup(dev_link_t *link, u_short manfid, u_short cardid) 799static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
811{ 800{
812 client_handle_t handle = link->handle;
813 struct net_device *dev = link->priv; 801 struct net_device *dev = link->priv;
814 struct smc_cfg_mem *cfg_mem; 802 struct smc_cfg_mem *cfg_mem;
815 tuple_t *tuple; 803 tuple_t *tuple;
@@ -830,12 +818,12 @@ static int osi_setup(dev_link_t *link, u_short manfid, u_short cardid)
830 818
831 /* Read the station address from tuple 0x90, subtuple 0x04 */ 819 /* Read the station address from tuple 0x90, subtuple 0x04 */
832 tuple->DesiredTuple = 0x90; 820 tuple->DesiredTuple = 0x90;
833 i = pcmcia_get_first_tuple(handle, tuple); 821 i = pcmcia_get_first_tuple(link, tuple);
834 while (i == CS_SUCCESS) { 822 while (i == CS_SUCCESS) {
835 i = pcmcia_get_tuple_data(handle, tuple); 823 i = pcmcia_get_tuple_data(link, tuple);
836 if ((i != CS_SUCCESS) || (buf[0] == 0x04)) 824 if ((i != CS_SUCCESS) || (buf[0] == 0x04))
837 break; 825 break;
838 i = pcmcia_get_next_tuple(handle, tuple); 826 i = pcmcia_get_next_tuple(link, tuple);
839 } 827 }
840 if (i != CS_SUCCESS) { 828 if (i != CS_SUCCESS) {
841 rc = -1; 829 rc = -1;
@@ -868,57 +856,47 @@ free_cfg_mem:
868 return rc; 856 return rc;
869} 857}
870 858
871static int smc91c92_suspend(struct pcmcia_device *p_dev) 859static int smc91c92_suspend(struct pcmcia_device *link)
872{ 860{
873 dev_link_t *link = dev_to_instance(p_dev);
874 struct net_device *dev = link->priv; 861 struct net_device *dev = link->priv;
875 862
876 link->state |= DEV_SUSPEND; 863 if (link->open)
877 if (link->state & DEV_CONFIG) { 864 netif_device_detach(dev);
878 if (link->open)
879 netif_device_detach(dev);
880 pcmcia_release_configuration(link->handle);
881 }
882 865
883 return 0; 866 return 0;
884} 867}
885 868
886static int smc91c92_resume(struct pcmcia_device *p_dev) 869static int smc91c92_resume(struct pcmcia_device *link)
887{ 870{
888 dev_link_t *link = dev_to_instance(p_dev);
889 struct net_device *dev = link->priv; 871 struct net_device *dev = link->priv;
890 struct smc_private *smc = netdev_priv(dev); 872 struct smc_private *smc = netdev_priv(dev);
891 int i; 873 int i;
892 874
893 link->state &= ~DEV_SUSPEND; 875 if ((smc->manfid == MANFID_MEGAHERTZ) &&
894 if (link->state & DEV_CONFIG) { 876 (smc->cardid == PRODID_MEGAHERTZ_EM3288))
895 if ((smc->manfid == MANFID_MEGAHERTZ) && 877 mhz_3288_power(link);
896 (smc->cardid == PRODID_MEGAHERTZ_EM3288)) 878 if (smc->manfid == MANFID_MOTOROLA)
897 mhz_3288_power(link); 879 mot_config(link);
898 pcmcia_request_configuration(link->handle, &link->conf); 880 if ((smc->manfid == MANFID_OSITECH) &&
899 if (smc->manfid == MANFID_MOTOROLA) 881 (smc->cardid != PRODID_OSITECH_SEVEN)) {
900 mot_config(link); 882 /* Power up the card and enable interrupts */
901 if ((smc->manfid == MANFID_OSITECH) && 883 set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR);
902 (smc->cardid != PRODID_OSITECH_SEVEN)) { 884 set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR);
903 /* Power up the card and enable interrupts */ 885 }
904 set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR); 886 if (((smc->manfid == MANFID_OSITECH) &&
905 set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR); 887 (smc->cardid == PRODID_OSITECH_SEVEN)) ||
906 } 888 ((smc->manfid == MANFID_PSION) &&
907 if (((smc->manfid == MANFID_OSITECH) && 889 (smc->cardid == PRODID_PSION_NET100))) {
908 (smc->cardid == PRODID_OSITECH_SEVEN)) || 890 /* Download the Seven of Diamonds firmware */
909 ((smc->manfid == MANFID_PSION) && 891 for (i = 0; i < sizeof(__Xilinx7OD); i++) {
910 (smc->cardid == PRODID_PSION_NET100))) { 892 outb(__Xilinx7OD[i], link->io.BasePort1+2);
911 /* Download the Seven of Diamonds firmware */ 893 udelay(50);
912 for (i = 0; i < sizeof(__Xilinx7OD); i++) {
913 outb(__Xilinx7OD[i], link->io.BasePort1+2);
914 udelay(50);
915 }
916 }
917 if (link->open) {
918 smc_reset(dev);
919 netif_device_attach(dev);
920 } 894 }
921 } 895 }
896 if (link->open) {
897 smc_reset(dev);
898 netif_device_attach(dev);
899 }
922 900
923 return 0; 901 return 0;
924} 902}
@@ -931,7 +909,7 @@ static int smc91c92_resume(struct pcmcia_device *p_dev)
931 909
932======================================================================*/ 910======================================================================*/
933 911
934static int check_sig(dev_link_t *link) 912static int check_sig(struct pcmcia_device *link)
935{ 913{
936 struct net_device *dev = link->priv; 914 struct net_device *dev = link->priv;
937 kio_addr_t ioaddr = dev->base_addr; 915 kio_addr_t ioaddr = dev->base_addr;
@@ -964,13 +942,15 @@ static int check_sig(dev_link_t *link)
964 } 942 }
965 943
966 if (width) { 944 if (width) {
967 printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n"); 945 modconf_t mod = {
968 smc91c92_suspend(link->handle); 946 .Attributes = CONF_IO_CHANGE_WIDTH,
969 pcmcia_release_io(link->handle, &link->io); 947 };
970 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 948 printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
971 pcmcia_request_io(link->handle, &link->io); 949
972 smc91c92_resume(link->handle); 950 smc91c92_suspend(link);
973 return check_sig(link); 951 pcmcia_modify_configuration(link, &mod);
952 smc91c92_resume(link);
953 return check_sig(link);
974 } 954 }
975 return -ENODEV; 955 return -ENODEV;
976} 956}
@@ -984,11 +964,10 @@ static int check_sig(dev_link_t *link)
984======================================================================*/ 964======================================================================*/
985 965
986#define CS_EXIT_TEST(ret, svc, label) \ 966#define CS_EXIT_TEST(ret, svc, label) \
987if (ret != CS_SUCCESS) { cs_error(link->handle, svc, ret); goto label; } 967if (ret != CS_SUCCESS) { cs_error(link, svc, ret); goto label; }
988 968
989static void smc91c92_config(dev_link_t *link) 969static int smc91c92_config(struct pcmcia_device *link)
990{ 970{
991 client_handle_t handle = link->handle;
992 struct net_device *dev = link->priv; 971 struct net_device *dev = link->priv;
993 struct smc_private *smc = netdev_priv(dev); 972 struct smc_private *smc = netdev_priv(dev);
994 struct smc_cfg_mem *cfg_mem; 973 struct smc_cfg_mem *cfg_mem;
@@ -1015,21 +994,18 @@ static void smc91c92_config(dev_link_t *link)
1015 tuple->TupleDataMax = 64; 994 tuple->TupleDataMax = 64;
1016 995
1017 tuple->DesiredTuple = CISTPL_CONFIG; 996 tuple->DesiredTuple = CISTPL_CONFIG;
1018 i = first_tuple(handle, tuple, parse); 997 i = first_tuple(link, tuple, parse);
1019 CS_EXIT_TEST(i, ParseTuple, config_failed); 998 CS_EXIT_TEST(i, ParseTuple, config_failed);
1020 link->conf.ConfigBase = parse->config.base; 999 link->conf.ConfigBase = parse->config.base;
1021 link->conf.Present = parse->config.rmask[0]; 1000 link->conf.Present = parse->config.rmask[0];
1022 1001
1023 tuple->DesiredTuple = CISTPL_MANFID; 1002 tuple->DesiredTuple = CISTPL_MANFID;
1024 tuple->Attributes = TUPLE_RETURN_COMMON; 1003 tuple->Attributes = TUPLE_RETURN_COMMON;
1025 if (first_tuple(handle, tuple, parse) == CS_SUCCESS) { 1004 if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
1026 smc->manfid = parse->manfid.manf; 1005 smc->manfid = parse->manfid.manf;
1027 smc->cardid = parse->manfid.card; 1006 smc->cardid = parse->manfid.card;
1028 } 1007 }
1029 1008
1030 /* Configure card */
1031 link->state |= DEV_CONFIG;
1032
1033 if ((smc->manfid == MANFID_OSITECH) && 1009 if ((smc->manfid == MANFID_OSITECH) &&
1034 (smc->cardid != PRODID_OSITECH_SEVEN)) { 1010 (smc->cardid != PRODID_OSITECH_SEVEN)) {
1035 i = osi_config(link); 1011 i = osi_config(link);
@@ -1043,9 +1019,9 @@ static void smc91c92_config(dev_link_t *link)
1043 } 1019 }
1044 CS_EXIT_TEST(i, RequestIO, config_failed); 1020 CS_EXIT_TEST(i, RequestIO, config_failed);
1045 1021
1046 i = pcmcia_request_irq(link->handle, &link->irq); 1022 i = pcmcia_request_irq(link, &link->irq);
1047 CS_EXIT_TEST(i, RequestIRQ, config_failed); 1023 CS_EXIT_TEST(i, RequestIRQ, config_failed);
1048 i = pcmcia_request_configuration(link->handle, &link->conf); 1024 i = pcmcia_request_configuration(link, &link->conf);
1049 CS_EXIT_TEST(i, RequestConfiguration, config_failed); 1025 CS_EXIT_TEST(i, RequestConfiguration, config_failed);
1050 1026
1051 if (smc->manfid == MANFID_MOTOROLA) 1027 if (smc->manfid == MANFID_MOTOROLA)
@@ -1124,13 +1100,12 @@ static void smc91c92_config(dev_link_t *link)
1124 SMC_SELECT_BANK(0); 1100 SMC_SELECT_BANK(0);
1125 } 1101 }
1126 1102
1127 link->dev = &smc->node; 1103 link->dev_node = &smc->node;
1128 link->state &= ~DEV_CONFIG_PENDING; 1104 SET_NETDEV_DEV(dev, &handle_to_dev(link));
1129 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
1130 1105
1131 if (register_netdev(dev) != 0) { 1106 if (register_netdev(dev) != 0) {
1132 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); 1107 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
1133 link->dev = NULL; 1108 link->dev_node = NULL;
1134 goto config_undo; 1109 goto config_undo;
1135 } 1110 }
1136 1111
@@ -1160,15 +1135,14 @@ static void smc91c92_config(dev_link_t *link)
1160 } 1135 }
1161 } 1136 }
1162 kfree(cfg_mem); 1137 kfree(cfg_mem);
1163 return; 1138 return 0;
1164 1139
1165config_undo: 1140config_undo:
1166 unregister_netdev(dev); 1141 unregister_netdev(dev);
1167config_failed: /* CS_EXIT_TEST() calls jump to here... */ 1142config_failed: /* CS_EXIT_TEST() calls jump to here... */
1168 smc91c92_release(link); 1143 smc91c92_release(link);
1169 link->state &= ~DEV_CONFIG_PENDING;
1170 kfree(cfg_mem); 1144 kfree(cfg_mem);
1171 1145 return -ENODEV;
1172} /* smc91c92_config */ 1146} /* smc91c92_config */
1173 1147
1174/*====================================================================== 1148/*======================================================================
@@ -1179,22 +1153,15 @@ config_failed: /* CS_EXIT_TEST() calls jump to here... */
1179 1153
1180======================================================================*/ 1154======================================================================*/
1181 1155
1182static void smc91c92_release(dev_link_t *link) 1156static void smc91c92_release(struct pcmcia_device *link)
1183{ 1157{
1184 1158 DEBUG(0, "smc91c92_release(0x%p)\n", link);
1185 DEBUG(0, "smc91c92_release(0x%p)\n", link); 1159 if (link->win) {
1186 1160 struct net_device *dev = link->priv;
1187 pcmcia_release_configuration(link->handle); 1161 struct smc_private *smc = netdev_priv(dev);
1188 pcmcia_release_io(link->handle, &link->io); 1162 iounmap(smc->base);
1189 pcmcia_release_irq(link->handle, &link->irq); 1163 }
1190 if (link->win) { 1164 pcmcia_disable_device(link);
1191 struct net_device *dev = link->priv;
1192 struct smc_private *smc = netdev_priv(dev);
1193 iounmap(smc->base);
1194 pcmcia_release_window(link->win);
1195 }
1196
1197 link->state &= ~DEV_CONFIG;
1198} 1165}
1199 1166
1200/*====================================================================== 1167/*======================================================================
@@ -1283,7 +1250,7 @@ static void smc_dump(struct net_device *dev)
1283static int smc_open(struct net_device *dev) 1250static int smc_open(struct net_device *dev)
1284{ 1251{
1285 struct smc_private *smc = netdev_priv(dev); 1252 struct smc_private *smc = netdev_priv(dev);
1286 dev_link_t *link = &smc->link; 1253 struct pcmcia_device *link = smc->p_dev;
1287 1254
1288#ifdef PCMCIA_DEBUG 1255#ifdef PCMCIA_DEBUG
1289 DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n", 1256 DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n",
@@ -1292,7 +1259,7 @@ static int smc_open(struct net_device *dev)
1292#endif 1259#endif
1293 1260
1294 /* Check that the PCMCIA card is still here. */ 1261 /* Check that the PCMCIA card is still here. */
1295 if (!DEV_OK(link)) 1262 if (!pcmcia_dev_present(link))
1296 return -ENODEV; 1263 return -ENODEV;
1297 /* Physical device present signature. */ 1264 /* Physical device present signature. */
1298 if (check_sig(link) < 0) { 1265 if (check_sig(link) < 0) {
@@ -1320,7 +1287,7 @@ static int smc_open(struct net_device *dev)
1320static int smc_close(struct net_device *dev) 1287static int smc_close(struct net_device *dev)
1321{ 1288{
1322 struct smc_private *smc = netdev_priv(dev); 1289 struct smc_private *smc = netdev_priv(dev);
1323 dev_link_t *link = &smc->link; 1290 struct pcmcia_device *link = smc->p_dev;
1324 kio_addr_t ioaddr = dev->base_addr; 1291 kio_addr_t ioaddr = dev->base_addr;
1325 1292
1326 DEBUG(0, "%s: smc_close(), status %4.4x.\n", 1293 DEBUG(0, "%s: smc_close(), status %4.4x.\n",
@@ -2311,7 +2278,7 @@ static struct pcmcia_driver smc91c92_cs_driver = {
2311 .drv = { 2278 .drv = {
2312 .name = "smc91c92_cs", 2279 .name = "smc91c92_cs",
2313 }, 2280 },
2314 .probe = smc91c92_attach, 2281 .probe = smc91c92_probe,
2315 .remove = smc91c92_detach, 2282 .remove = smc91c92_detach,
2316 .id_table = smc91c92_ids, 2283 .id_table = smc91c92_ids,
2317 .suspend = smc91c92_suspend, 2284 .suspend = smc91c92_suspend,
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index eed496803fe4..71f45056a70c 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -289,9 +289,9 @@ static void mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg,
289 * and ejection events. They are invoked from the event handler. 289 * and ejection events. They are invoked from the event handler.
290 */ 290 */
291 291
292static int has_ce2_string(dev_link_t * link); 292static int has_ce2_string(struct pcmcia_device * link);
293static void xirc2ps_config(dev_link_t * link); 293static int xirc2ps_config(struct pcmcia_device * link);
294static void xirc2ps_release(dev_link_t * link); 294static void xirc2ps_release(struct pcmcia_device * link);
295 295
296/**************** 296/****************
297 * The attach() and detach() entry points are used to create and destroy 297 * The attach() and detach() entry points are used to create and destroy
@@ -313,10 +313,10 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs
313/**************** 313/****************
314 * A linked list of "instances" of the device. Each actual 314 * A linked list of "instances" of the device. Each actual
315 * PCMCIA card corresponds to one device instance, and is described 315 * PCMCIA card corresponds to one device instance, and is described
316 * by one dev_link_t structure (defined in ds.h). 316 * by one struct pcmcia_device structure (defined in ds.h).
317 * 317 *
318 * You may not want to use a linked list for this -- for example, the 318 * You may not want to use a linked list for this -- for example, the
319 * memory card driver uses an array of dev_link_t pointers, where minor 319 * memory card driver uses an array of struct pcmcia_device pointers, where minor
320 * device numbers are used to derive the corresponding array index. 320 * device numbers are used to derive the corresponding array index.
321 */ 321 */
322 322
@@ -326,13 +326,13 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs
326 * example, ethernet cards, modems). In other cases, there may be 326 * example, ethernet cards, modems). In other cases, there may be
327 * many actual or logical devices (SCSI adapters, memory cards with 327 * many actual or logical devices (SCSI adapters, memory cards with
328 * multiple partitions). The dev_node_t structures need to be kept 328 * multiple partitions). The dev_node_t structures need to be kept
329 * in a linked list starting at the 'dev' field of a dev_link_t 329 * in a linked list starting at the 'dev' field of a struct pcmcia_device
330 * structure. We allocate them in the card's private data structure, 330 * structure. We allocate them in the card's private data structure,
331 * because they generally can't be allocated dynamically. 331 * because they generally can't be allocated dynamically.
332 */ 332 */
333 333
334typedef struct local_info_t { 334typedef struct local_info_t {
335 dev_link_t link; 335 struct pcmcia_device *p_dev;
336 dev_node_t node; 336 dev_node_t node;
337 struct net_device_stats stats; 337 struct net_device_stats stats;
338 int card_type; 338 int card_type;
@@ -355,7 +355,7 @@ static void do_tx_timeout(struct net_device *dev);
355static struct net_device_stats *do_get_stats(struct net_device *dev); 355static struct net_device_stats *do_get_stats(struct net_device *dev);
356static void set_addresses(struct net_device *dev); 356static void set_addresses(struct net_device *dev);
357static void set_multicast_list(struct net_device *dev); 357static void set_multicast_list(struct net_device *dev);
358static int set_card_type(dev_link_t *link, const void *s); 358static int set_card_type(struct pcmcia_device *link, const void *s);
359static int do_config(struct net_device *dev, struct ifmap *map); 359static int do_config(struct net_device *dev, struct ifmap *map);
360static int do_open(struct net_device *dev); 360static int do_open(struct net_device *dev);
361static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 361static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -368,7 +368,7 @@ static int do_stop(struct net_device *dev);
368 368
369/*=============== Helper functions =========================*/ 369/*=============== Helper functions =========================*/
370static int 370static int
371first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 371first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
372{ 372{
373 int err; 373 int err;
374 374
@@ -379,7 +379,7 @@ first_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
379} 379}
380 380
381static int 381static int
382next_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse) 382next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
383{ 383{
384 int err; 384 int err;
385 385
@@ -553,9 +553,8 @@ mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len)
553 */ 553 */
554 554
555static int 555static int
556xirc2ps_attach(struct pcmcia_device *p_dev) 556xirc2ps_probe(struct pcmcia_device *link)
557{ 557{
558 dev_link_t *link;
559 struct net_device *dev; 558 struct net_device *dev;
560 local_info_t *local; 559 local_info_t *local;
561 560
@@ -566,12 +565,11 @@ xirc2ps_attach(struct pcmcia_device *p_dev)
566 if (!dev) 565 if (!dev)
567 return -ENOMEM; 566 return -ENOMEM;
568 local = netdev_priv(dev); 567 local = netdev_priv(dev);
569 link = &local->link; 568 local->p_dev = link;
570 link->priv = dev; 569 link->priv = dev;
571 570
572 /* General socket configuration */ 571 /* General socket configuration */
573 link->conf.Attributes = CONF_ENABLE_IRQ; 572 link->conf.Attributes = CONF_ENABLE_IRQ;
574 link->conf.Vcc = 50;
575 link->conf.IntType = INT_MEMORY_AND_IO; 573 link->conf.IntType = INT_MEMORY_AND_IO;
576 link->conf.ConfigIndex = 1; 574 link->conf.ConfigIndex = 1;
577 link->conf.Present = PRESENT_OPTION; 575 link->conf.Present = PRESENT_OPTION;
@@ -593,13 +591,7 @@ xirc2ps_attach(struct pcmcia_device *p_dev)
593 dev->watchdog_timeo = TX_TIMEOUT; 591 dev->watchdog_timeo = TX_TIMEOUT;
594#endif 592#endif
595 593
596 link->handle = p_dev; 594 return xirc2ps_config(link);
597 p_dev->instance = link;
598
599 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
600 xirc2ps_config(link);
601
602 return 0;
603} /* xirc2ps_attach */ 595} /* xirc2ps_attach */
604 596
605/**************** 597/****************
@@ -610,18 +602,16 @@ xirc2ps_attach(struct pcmcia_device *p_dev)
610 */ 602 */
611 603
612static void 604static void
613xirc2ps_detach(struct pcmcia_device *p_dev) 605xirc2ps_detach(struct pcmcia_device *link)
614{ 606{
615 dev_link_t *link = dev_to_instance(p_dev);
616 struct net_device *dev = link->priv; 607 struct net_device *dev = link->priv;
617 608
618 DEBUG(0, "detach(0x%p)\n", link); 609 DEBUG(0, "detach(0x%p)\n", link);
619 610
620 if (link->dev) 611 if (link->dev_node)
621 unregister_netdev(dev); 612 unregister_netdev(dev);
622 613
623 if (link->state & DEV_CONFIG) 614 xirc2ps_release(link);
624 xirc2ps_release(link);
625 615
626 free_netdev(dev); 616 free_netdev(dev);
627} /* xirc2ps_detach */ 617} /* xirc2ps_detach */
@@ -645,7 +635,7 @@ xirc2ps_detach(struct pcmcia_device *p_dev)
645 * 635 *
646 */ 636 */
647static int 637static int
648set_card_type(dev_link_t *link, const void *s) 638set_card_type(struct pcmcia_device *link, const void *s)
649{ 639{
650 struct net_device *dev = link->priv; 640 struct net_device *dev = link->priv;
651 local_info_t *local = netdev_priv(dev); 641 local_info_t *local = netdev_priv(dev);
@@ -714,9 +704,8 @@ set_card_type(dev_link_t *link, const void *s)
714 * Returns: true if this is a CE2 704 * Returns: true if this is a CE2
715 */ 705 */
716static int 706static int
717has_ce2_string(dev_link_t * link) 707has_ce2_string(struct pcmcia_device * link)
718{ 708{
719 client_handle_t handle = link->handle;
720 tuple_t tuple; 709 tuple_t tuple;
721 cisparse_t parse; 710 cisparse_t parse;
722 u_char buf[256]; 711 u_char buf[256];
@@ -726,7 +715,7 @@ has_ce2_string(dev_link_t * link)
726 tuple.TupleDataMax = 254; 715 tuple.TupleDataMax = 254;
727 tuple.TupleOffset = 0; 716 tuple.TupleOffset = 0;
728 tuple.DesiredTuple = CISTPL_VERS_1; 717 tuple.DesiredTuple = CISTPL_VERS_1;
729 if (!first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 2) { 718 if (!first_tuple(link, &tuple, &parse) && parse.version_1.ns > 2) {
730 if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2")) 719 if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
731 return 1; 720 return 1;
732 } 721 }
@@ -738,10 +727,9 @@ has_ce2_string(dev_link_t * link)
738 * is received, to configure the PCMCIA socket, and to make the 727 * is received, to configure the PCMCIA socket, and to make the
739 * ethernet device available to the system. 728 * ethernet device available to the system.
740 */ 729 */
741static void 730static int
742xirc2ps_config(dev_link_t * link) 731xirc2ps_config(struct pcmcia_device * link)
743{ 732{
744 client_handle_t handle = link->handle;
745 struct net_device *dev = link->priv; 733 struct net_device *dev = link->priv;
746 local_info_t *local = netdev_priv(dev); 734 local_info_t *local = netdev_priv(dev);
747 tuple_t tuple; 735 tuple_t tuple;
@@ -767,7 +755,7 @@ xirc2ps_config(dev_link_t * link)
767 755
768 /* Is this a valid card */ 756 /* Is this a valid card */
769 tuple.DesiredTuple = CISTPL_MANFID; 757 tuple.DesiredTuple = CISTPL_MANFID;
770 if ((err=first_tuple(handle, &tuple, &parse))) { 758 if ((err=first_tuple(link, &tuple, &parse))) {
771 printk(KNOT_XIRC "manfid not found in CIS\n"); 759 printk(KNOT_XIRC "manfid not found in CIS\n");
772 goto failure; 760 goto failure;
773 } 761 }
@@ -803,15 +791,15 @@ xirc2ps_config(dev_link_t * link)
803 791
804 /* get configuration stuff */ 792 /* get configuration stuff */
805 tuple.DesiredTuple = CISTPL_CONFIG; 793 tuple.DesiredTuple = CISTPL_CONFIG;
806 if ((err=first_tuple(handle, &tuple, &parse))) 794 if ((err=first_tuple(link, &tuple, &parse)))
807 goto cis_error; 795 goto cis_error;
808 link->conf.ConfigBase = parse.config.base; 796 link->conf.ConfigBase = parse.config.base;
809 link->conf.Present = parse.config.rmask[0]; 797 link->conf.Present = parse.config.rmask[0];
810 798
811 /* get the ethernet address from the CIS */ 799 /* get the ethernet address from the CIS */
812 tuple.DesiredTuple = CISTPL_FUNCE; 800 tuple.DesiredTuple = CISTPL_FUNCE;
813 for (err = first_tuple(handle, &tuple, &parse); !err; 801 for (err = first_tuple(link, &tuple, &parse); !err;
814 err = next_tuple(handle, &tuple, &parse)) { 802 err = next_tuple(link, &tuple, &parse)) {
815 /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries: 803 /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries:
816 * the first one with a length of zero the second correct - 804 * the first one with a length of zero the second correct -
817 * so I skip all entries with length 0 */ 805 * so I skip all entries with length 0 */
@@ -821,8 +809,8 @@ xirc2ps_config(dev_link_t * link)
821 } 809 }
822 if (err) { /* not found: try to get the node-id from tuple 0x89 */ 810 if (err) { /* not found: try to get the node-id from tuple 0x89 */
823 tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */ 811 tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */
824 if ((err = pcmcia_get_first_tuple(handle, &tuple)) == 0 && 812 if ((err = pcmcia_get_first_tuple(link, &tuple)) == 0 &&
825 (err = pcmcia_get_tuple_data(handle, &tuple)) == 0) { 813 (err = pcmcia_get_tuple_data(link, &tuple)) == 0) {
826 if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID) 814 if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID)
827 memcpy(&parse, buf, 8); 815 memcpy(&parse, buf, 8);
828 else 816 else
@@ -831,8 +819,8 @@ xirc2ps_config(dev_link_t * link)
831 } 819 }
832 if (err) { /* another try (James Lehmer's CE2 version 4.1)*/ 820 if (err) { /* another try (James Lehmer's CE2 version 4.1)*/
833 tuple.DesiredTuple = CISTPL_FUNCE; 821 tuple.DesiredTuple = CISTPL_FUNCE;
834 for (err = first_tuple(handle, &tuple, &parse); !err; 822 for (err = first_tuple(link, &tuple, &parse); !err;
835 err = next_tuple(handle, &tuple, &parse)) { 823 err = next_tuple(link, &tuple, &parse)) {
836 if (parse.funce.type == 0x02 && parse.funce.data[0] == 1 824 if (parse.funce.type == 0x02 && parse.funce.data[0] == 1
837 && parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) { 825 && parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) {
838 buf[1] = 4; 826 buf[1] = 4;
@@ -853,9 +841,6 @@ xirc2ps_config(dev_link_t * link)
853 for (i=0; i < 6; i++) 841 for (i=0; i < 6; i++)
854 dev->dev_addr[i] = node_id->id[i]; 842 dev->dev_addr[i] = node_id->id[i];
855 843
856 /* Configure card */
857 link->state |= DEV_CONFIG;
858
859 link->io.IOAddrLines =10; 844 link->io.IOAddrLines =10;
860 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 845 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
861 link->irq.Attributes = IRQ_HANDLE_PRESENT; 846 link->irq.Attributes = IRQ_HANDLE_PRESENT;
@@ -875,14 +860,14 @@ xirc2ps_config(dev_link_t * link)
875 * Ethernet port */ 860 * Ethernet port */
876 link->io.NumPorts1 = 16; /* no Mako stuff anymore */ 861 link->io.NumPorts1 = 16; /* no Mako stuff anymore */
877 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 862 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
878 for (err = first_tuple(handle, &tuple, &parse); !err; 863 for (err = first_tuple(link, &tuple, &parse); !err;
879 err = next_tuple(handle, &tuple, &parse)) { 864 err = next_tuple(link, &tuple, &parse)) {
880 if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) { 865 if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
881 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { 866 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
882 link->conf.ConfigIndex = cf->index ; 867 link->conf.ConfigIndex = cf->index ;
883 link->io.BasePort2 = cf->io.win[0].base; 868 link->io.BasePort2 = cf->io.win[0].base;
884 link->io.BasePort1 = ioaddr; 869 link->io.BasePort1 = ioaddr;
885 if (!(err=pcmcia_request_io(link->handle, &link->io))) 870 if (!(err=pcmcia_request_io(link, &link->io)))
886 goto port_found; 871 goto port_found;
887 } 872 }
888 } 873 }
@@ -896,15 +881,15 @@ xirc2ps_config(dev_link_t * link)
896 */ 881 */
897 for (pass=0; pass < 2; pass++) { 882 for (pass=0; pass < 2; pass++) {
898 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 883 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
899 for (err = first_tuple(handle, &tuple, &parse); !err; 884 for (err = first_tuple(link, &tuple, &parse); !err;
900 err = next_tuple(handle, &tuple, &parse)){ 885 err = next_tuple(link, &tuple, &parse)){
901 if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8){ 886 if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8){
902 link->conf.ConfigIndex = cf->index ; 887 link->conf.ConfigIndex = cf->index ;
903 link->io.BasePort2 = cf->io.win[0].base; 888 link->io.BasePort2 = cf->io.win[0].base;
904 link->io.BasePort1 = link->io.BasePort2 889 link->io.BasePort1 = link->io.BasePort2
905 + (pass ? (cf->index & 0x20 ? -24:8) 890 + (pass ? (cf->index & 0x20 ? -24:8)
906 : (cf->index & 0x20 ? 8:-24)); 891 : (cf->index & 0x20 ? 8:-24));
907 if (!(err=pcmcia_request_io(link->handle, &link->io))) 892 if (!(err=pcmcia_request_io(link, &link->io)))
908 goto port_found; 893 goto port_found;
909 } 894 }
910 } 895 }
@@ -919,12 +904,12 @@ xirc2ps_config(dev_link_t * link)
919 link->io.NumPorts1 = 16; 904 link->io.NumPorts1 = 16;
920 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { 905 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
921 link->io.BasePort1 = ioaddr; 906 link->io.BasePort1 = ioaddr;
922 if (!(err=pcmcia_request_io(link->handle, &link->io))) 907 if (!(err=pcmcia_request_io(link, &link->io)))
923 goto port_found; 908 goto port_found;
924 } 909 }
925 link->io.BasePort1 = 0; /* let CS decide */ 910 link->io.BasePort1 = 0; /* let CS decide */
926 if ((err=pcmcia_request_io(link->handle, &link->io))) { 911 if ((err=pcmcia_request_io(link, &link->io))) {
927 cs_error(link->handle, RequestIO, err); 912 cs_error(link, RequestIO, err);
928 goto config_error; 913 goto config_error;
929 } 914 }
930 } 915 }
@@ -936,8 +921,8 @@ xirc2ps_config(dev_link_t * link)
936 * Now allocate an interrupt line. Note that this does not 921 * Now allocate an interrupt line. Note that this does not
937 * actually assign a handler to the interrupt. 922 * actually assign a handler to the interrupt.
938 */ 923 */
939 if ((err=pcmcia_request_irq(link->handle, &link->irq))) { 924 if ((err=pcmcia_request_irq(link, &link->irq))) {
940 cs_error(link->handle, RequestIRQ, err); 925 cs_error(link, RequestIRQ, err);
941 goto config_error; 926 goto config_error;
942 } 927 }
943 928
@@ -945,8 +930,8 @@ xirc2ps_config(dev_link_t * link)
945 * This actually configures the PCMCIA socket -- setting up 930 * This actually configures the PCMCIA socket -- setting up
946 * the I/O windows and the interrupt mapping. 931 * the I/O windows and the interrupt mapping.
947 */ 932 */
948 if ((err=pcmcia_request_configuration(link->handle, &link->conf))) { 933 if ((err=pcmcia_request_configuration(link, &link->conf))) {
949 cs_error(link->handle, RequestConfiguration, err); 934 cs_error(link, RequestConfiguration, err);
950 goto config_error; 935 goto config_error;
951 } 936 }
952 937
@@ -963,15 +948,15 @@ xirc2ps_config(dev_link_t * link)
963 reg.Action = CS_WRITE; 948 reg.Action = CS_WRITE;
964 reg.Offset = CISREG_IOBASE_0; 949 reg.Offset = CISREG_IOBASE_0;
965 reg.Value = link->io.BasePort2 & 0xff; 950 reg.Value = link->io.BasePort2 & 0xff;
966 if ((err = pcmcia_access_configuration_register(link->handle, &reg))) { 951 if ((err = pcmcia_access_configuration_register(link, &reg))) {
967 cs_error(link->handle, AccessConfigurationRegister, err); 952 cs_error(link, AccessConfigurationRegister, err);
968 goto config_error; 953 goto config_error;
969 } 954 }
970 reg.Action = CS_WRITE; 955 reg.Action = CS_WRITE;
971 reg.Offset = CISREG_IOBASE_1; 956 reg.Offset = CISREG_IOBASE_1;
972 reg.Value = (link->io.BasePort2 >> 8) & 0xff; 957 reg.Value = (link->io.BasePort2 >> 8) & 0xff;
973 if ((err = pcmcia_access_configuration_register(link->handle, &reg))) { 958 if ((err = pcmcia_access_configuration_register(link, &reg))) {
974 cs_error(link->handle, AccessConfigurationRegister, err); 959 cs_error(link, AccessConfigurationRegister, err);
975 goto config_error; 960 goto config_error;
976 } 961 }
977 962
@@ -982,15 +967,15 @@ xirc2ps_config(dev_link_t * link)
982 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 967 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
983 req.Base = req.Size = 0; 968 req.Base = req.Size = 0;
984 req.AccessSpeed = 0; 969 req.AccessSpeed = 0;
985 if ((err = pcmcia_request_window(&link->handle, &req, &link->win))) { 970 if ((err = pcmcia_request_window(&link, &req, &link->win))) {
986 cs_error(link->handle, RequestWindow, err); 971 cs_error(link, RequestWindow, err);
987 goto config_error; 972 goto config_error;
988 } 973 }
989 local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800; 974 local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
990 mem.CardOffset = 0x0; 975 mem.CardOffset = 0x0;
991 mem.Page = 0; 976 mem.Page = 0;
992 if ((err = pcmcia_map_mem_page(link->win, &mem))) { 977 if ((err = pcmcia_map_mem_page(link->win, &mem))) {
993 cs_error(link->handle, MapMemPage, err); 978 cs_error(link, MapMemPage, err);
994 goto config_error; 979 goto config_error;
995 } 980 }
996 981
@@ -1050,13 +1035,12 @@ xirc2ps_config(dev_link_t * link)
1050 if (local->dingo) 1035 if (local->dingo)
1051 do_reset(dev, 1); /* a kludge to make the cem56 work */ 1036 do_reset(dev, 1); /* a kludge to make the cem56 work */
1052 1037
1053 link->dev = &local->node; 1038 link->dev_node = &local->node;
1054 link->state &= ~DEV_CONFIG_PENDING; 1039 SET_NETDEV_DEV(dev, &handle_to_dev(link));
1055 SET_NETDEV_DEV(dev, &handle_to_dev(handle));
1056 1040
1057 if ((err=register_netdev(dev))) { 1041 if ((err=register_netdev(dev))) {
1058 printk(KNOT_XIRC "register_netdev() failed\n"); 1042 printk(KNOT_XIRC "register_netdev() failed\n");
1059 link->dev = NULL; 1043 link->dev_node = NULL;
1060 goto config_error; 1044 goto config_error;
1061 } 1045 }
1062 1046
@@ -1069,17 +1053,16 @@ xirc2ps_config(dev_link_t * link)
1069 printk("%c%02X", i?':':' ', dev->dev_addr[i]); 1053 printk("%c%02X", i?':':' ', dev->dev_addr[i]);
1070 printk("\n"); 1054 printk("\n");
1071 1055
1072 return; 1056 return 0;
1073 1057
1074 config_error: 1058 config_error:
1075 link->state &= ~DEV_CONFIG_PENDING;
1076 xirc2ps_release(link); 1059 xirc2ps_release(link);
1077 return; 1060 return -ENODEV;
1078 1061
1079 cis_error: 1062 cis_error:
1080 printk(KNOT_XIRC "unable to parse CIS\n"); 1063 printk(KNOT_XIRC "unable to parse CIS\n");
1081 failure: 1064 failure:
1082 link->state &= ~DEV_CONFIG_PENDING; 1065 return -ENODEV;
1083} /* xirc2ps_config */ 1066} /* xirc2ps_config */
1084 1067
1085/**************** 1068/****************
@@ -1088,57 +1071,41 @@ xirc2ps_config(dev_link_t * link)
1088 * still open, this will be postponed until it is closed. 1071 * still open, this will be postponed until it is closed.
1089 */ 1072 */
1090static void 1073static void
1091xirc2ps_release(dev_link_t *link) 1074xirc2ps_release(struct pcmcia_device *link)
1092{ 1075{
1076 DEBUG(0, "release(0x%p)\n", link);
1093 1077
1094 DEBUG(0, "release(0x%p)\n", link); 1078 if (link->win) {
1095 1079 struct net_device *dev = link->priv;
1096 if (link->win) { 1080 local_info_t *local = netdev_priv(dev);
1097 struct net_device *dev = link->priv; 1081 if (local->dingo)
1098 local_info_t *local = netdev_priv(dev); 1082 iounmap(local->dingo_ccr - 0x0800);
1099 if (local->dingo) 1083 }
1100 iounmap(local->dingo_ccr - 0x0800); 1084 pcmcia_disable_device(link);
1101 pcmcia_release_window(link->win);
1102 }
1103 pcmcia_release_configuration(link->handle);
1104 pcmcia_release_io(link->handle, &link->io);
1105 pcmcia_release_irq(link->handle, &link->irq);
1106 link->state &= ~DEV_CONFIG;
1107
1108} /* xirc2ps_release */ 1085} /* xirc2ps_release */
1109 1086
1110/*====================================================================*/ 1087/*====================================================================*/
1111 1088
1112 1089
1113static int xirc2ps_suspend(struct pcmcia_device *p_dev) 1090static int xirc2ps_suspend(struct pcmcia_device *link)
1114{ 1091{
1115 dev_link_t *link = dev_to_instance(p_dev);
1116 struct net_device *dev = link->priv; 1092 struct net_device *dev = link->priv;
1117 1093
1118 link->state |= DEV_SUSPEND; 1094 if (link->open) {
1119 if (link->state & DEV_CONFIG) { 1095 netif_device_detach(dev);
1120 if (link->open) { 1096 do_powerdown(dev);
1121 netif_device_detach(dev);
1122 do_powerdown(dev);
1123 }
1124 pcmcia_release_configuration(link->handle);
1125 } 1097 }
1126 1098
1127 return 0; 1099 return 0;
1128} 1100}
1129 1101
1130static int xirc2ps_resume(struct pcmcia_device *p_dev) 1102static int xirc2ps_resume(struct pcmcia_device *link)
1131{ 1103{
1132 dev_link_t *link = dev_to_instance(p_dev);
1133 struct net_device *dev = link->priv; 1104 struct net_device *dev = link->priv;
1134 1105
1135 link->state &= ~DEV_SUSPEND; 1106 if (link->open) {
1136 if (link->state & DEV_CONFIG) { 1107 do_reset(dev,1);
1137 pcmcia_request_configuration(link->handle, &link->conf); 1108 netif_device_attach(dev);
1138 if (link->open) {
1139 do_reset(dev,1);
1140 netif_device_attach(dev);
1141 }
1142 } 1109 }
1143 1110
1144 return 0; 1111 return 0;
@@ -1552,13 +1519,13 @@ static int
1552do_open(struct net_device *dev) 1519do_open(struct net_device *dev)
1553{ 1520{
1554 local_info_t *lp = netdev_priv(dev); 1521 local_info_t *lp = netdev_priv(dev);
1555 dev_link_t *link = &lp->link; 1522 struct pcmcia_device *link = lp->p_dev;
1556 1523
1557 DEBUG(0, "do_open(%p)\n", dev); 1524 DEBUG(0, "do_open(%p)\n", dev);
1558 1525
1559 /* Check that the PCMCIA card is still here. */ 1526 /* Check that the PCMCIA card is still here. */
1560 /* Physical device present signature. */ 1527 /* Physical device present signature. */
1561 if (!DEV_OK(link)) 1528 if (!pcmcia_dev_present(link))
1562 return -ENODEV; 1529 return -ENODEV;
1563 1530
1564 /* okay */ 1531 /* okay */
@@ -1882,7 +1849,7 @@ do_stop(struct net_device *dev)
1882{ 1849{
1883 kio_addr_t ioaddr = dev->base_addr; 1850 kio_addr_t ioaddr = dev->base_addr;
1884 local_info_t *lp = netdev_priv(dev); 1851 local_info_t *lp = netdev_priv(dev);
1885 dev_link_t *link = &lp->link; 1852 struct pcmcia_device *link = lp->p_dev;
1886 1853
1887 DEBUG(0, "do_stop(%p)\n", dev); 1854 DEBUG(0, "do_stop(%p)\n", dev);
1888 1855
@@ -1935,7 +1902,7 @@ static struct pcmcia_driver xirc2ps_cs_driver = {
1935 .drv = { 1902 .drv = {
1936 .name = "xirc2ps_cs", 1903 .name = "xirc2ps_cs",
1937 }, 1904 },
1938 .probe = xirc2ps_attach, 1905 .probe = xirc2ps_probe,
1939 .remove = xirc2ps_detach, 1906 .remove = xirc2ps_detach,
1940 .id_table = xirc2ps_ids, 1907 .id_table = xirc2ps_ids,
1941 .suspend = xirc2ps_suspend, 1908 .suspend = xirc2ps_suspend,
@@ -1973,7 +1940,7 @@ static int __init setup_xirc2ps_cs(char *str)
1973 MAYBE_SET(lockup_hack, 6); 1940 MAYBE_SET(lockup_hack, 6);
1974#undef MAYBE_SET 1941#undef MAYBE_SET
1975 1942
1976 return 0; 1943 return 1;
1977} 1944}
1978 1945
1979__setup("xirc2ps_cs=", setup_xirc2ps_cs); 1946__setup("xirc2ps_cs=", setup_xirc2ps_cs);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 35b18057fbdd..45ad036733e2 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -2122,8 +2122,7 @@ static void __devexit starfire_remove_one (struct pci_dev *pdev)
2122 struct net_device *dev = pci_get_drvdata(pdev); 2122 struct net_device *dev = pci_get_drvdata(pdev);
2123 struct netdev_private *np = netdev_priv(dev); 2123 struct netdev_private *np = netdev_priv(dev);
2124 2124
2125 if (!dev) 2125 BUG_ON(!dev);
2126 BUG();
2127 2126
2128 unregister_netdev(dev); 2127 unregister_netdev(dev);
2129 2128
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 964c09644832..0b5358072172 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.55" 72#define DRV_MODULE_VERSION "3.56"
73#define DRV_MODULE_RELDATE "Mar 27, 2006" 73#define DRV_MODULE_RELDATE "Apr 1, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -497,40 +497,33 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
497 unsigned long flags; 497 unsigned long flags;
498 498
499 spin_lock_irqsave(&tp->indirect_lock, flags); 499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 if (tp->write32 != tg3_write_indirect_reg32) { 500 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502 tw32_f(TG3PCI_MEM_WIN_DATA, val);
503 502
504 /* Always leave this as zero. */ 503 /* Always leave this as zero. */
505 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
508 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509
510 /* Always leave this as zero. */
511 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
513 spin_unlock_irqrestore(&tp->indirect_lock, flags); 505 spin_unlock_irqrestore(&tp->indirect_lock, flags);
514} 506}
515 507
508static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
509{
510 /* If no workaround is needed, write to mem space directly */
511 if (tp->write32 != tg3_write_indirect_reg32)
512 tw32(NIC_SRAM_WIN_BASE + off, val);
513 else
514 tg3_write_mem(tp, off, val);
515}
516
516static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 517static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
517{ 518{
518 unsigned long flags; 519 unsigned long flags;
519 520
520 spin_lock_irqsave(&tp->indirect_lock, flags); 521 spin_lock_irqsave(&tp->indirect_lock, flags);
521 if (tp->write32 != tg3_write_indirect_reg32) { 522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
522 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 523 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
523 *val = tr32(TG3PCI_MEM_WIN_DATA);
524 524
525 /* Always leave this as zero. */ 525 /* Always leave this as zero. */
526 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 526 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 } else {
528 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
529 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
530
531 /* Always leave this as zero. */
532 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
533 }
534 spin_unlock_irqrestore(&tp->indirect_lock, flags); 527 spin_unlock_irqrestore(&tp->indirect_lock, flags);
535} 528}
536 529
@@ -1374,12 +1367,12 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1374 } 1367 }
1375 } 1368 }
1376 1369
1377 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1378
1379 /* Finally, set the new power state. */ 1370 /* Finally, set the new power state. */
1380 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control); 1371 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1381 udelay(100); /* Delay after power state change */ 1372 udelay(100); /* Delay after power state change */
1382 1373
1374 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1375
1383 return 0; 1376 return 0;
1384} 1377}
1385 1378
@@ -2966,9 +2959,7 @@ static void tg3_tx(struct tg3 *tp)
2966 struct sk_buff *skb = ri->skb; 2959 struct sk_buff *skb = ri->skb;
2967 int i; 2960 int i;
2968 2961
2969 if (unlikely(skb == NULL)) 2962 BUG_ON(skb == NULL);
2970 BUG();
2971
2972 pci_unmap_single(tp->pdev, 2963 pci_unmap_single(tp->pdev,
2973 pci_unmap_addr(ri, mapping), 2964 pci_unmap_addr(ri, mapping),
2974 skb_headlen(skb), 2965 skb_headlen(skb),
@@ -2979,12 +2970,10 @@ static void tg3_tx(struct tg3 *tp)
2979 sw_idx = NEXT_TX(sw_idx); 2970 sw_idx = NEXT_TX(sw_idx);
2980 2971
2981 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2972 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2982 if (unlikely(sw_idx == hw_idx)) 2973 BUG_ON(sw_idx == hw_idx);
2983 BUG();
2984 2974
2985 ri = &tp->tx_buffers[sw_idx]; 2975 ri = &tp->tx_buffers[sw_idx];
2986 if (unlikely(ri->skb != NULL)) 2976 BUG_ON(ri->skb != NULL);
2987 BUG();
2988 2977
2989 pci_unmap_page(tp->pdev, 2978 pci_unmap_page(tp->pdev,
2990 pci_unmap_addr(ri, mapping), 2979 pci_unmap_addr(ri, mapping),
@@ -4935,9 +4924,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4935{ 4924{
4936 int i; 4925 int i;
4937 4926
4938 if (offset == TX_CPU_BASE && 4927 BUG_ON(offset == TX_CPU_BASE &&
4939 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 4928 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4940 BUG();
4941 4929
4942 if (offset == RX_CPU_BASE) { 4930 if (offset == RX_CPU_BASE) {
4943 for (i = 0; i < 10000; i++) { 4931 for (i = 0; i < 10000; i++) {
@@ -6547,11 +6535,11 @@ static void tg3_timer(unsigned long __opaque)
6547 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 6535 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6548 u32 val; 6536 u32 val;
6549 6537
6550 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 6538 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6551 FWCMD_NICDRV_ALIVE2); 6539 FWCMD_NICDRV_ALIVE2);
6552 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 6540 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6553 /* 5 seconds timeout */ 6541 /* 5 seconds timeout */
6554 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); 6542 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6555 val = tr32(GRC_RX_CPU_EVENT); 6543 val = tr32(GRC_RX_CPU_EVENT);
6556 val |= (1 << 14); 6544 val |= (1 << 14);
6557 tw32(GRC_RX_CPU_EVENT, val); 6545 tw32(GRC_RX_CPU_EVENT, val);
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index e4cfc80b283b..99c4c1922f19 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menu "Token Ring devices" 5menu "Token Ring devices"
6 depends on NETDEVICES 6 depends on NETDEVICES && !UML
7 7
8# So far, we only have PCI, ISA, and MCA token ring devices 8# So far, we only have PCI, ISA, and MCA token ring devices
9config TR 9config TR
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 9345e68c451e..649d8ea354f5 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -438,8 +438,7 @@ static void __devexit abyss_detach (struct pci_dev *pdev)
438{ 438{
439 struct net_device *dev = pci_get_drvdata(pdev); 439 struct net_device *dev = pci_get_drvdata(pdev);
440 440
441 if (!dev) 441 BUG_ON(!dev);
442 BUG();
443 unregister_netdev(dev); 442 unregister_netdev(dev);
444 release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT); 443 release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
445 free_irq(dev->irq, dev); 444 free_irq(dev->irq, dev);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 3a25d191ea4a..19e6f4dfd69c 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -735,8 +735,7 @@ static int __devexit madgemc_remove(struct device *device)
735 struct net_local *tp; 735 struct net_local *tp;
736 struct card_info *card; 736 struct card_info *card;
737 737
738 if (!dev) 738 BUG_ON(!dev);
739 BUG();
740 739
741 tp = dev->priv; 740 tp = dev->priv;
742 card = tp->tmspriv; 741 card = tp->tmspriv;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f85e30190008..bad09ebdb50b 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -356,7 +356,7 @@ config PCI_HERMES
356 356
357config ATMEL 357config ATMEL
358 tristate "Atmel at76c50x chipset 802.11b support" 358 tristate "Atmel at76c50x chipset 802.11b support"
359 depends on NET_RADIO 359 depends on NET_RADIO && (PCI || PCMCIA)
360 select FW_LOADER 360 select FW_LOADER
361 select CRC32 361 select CRC32
362 ---help--- 362 ---help---
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index a496460ce224..af0cbb6c5c0c 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -80,8 +80,8 @@ MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");
80 event handler. 80 event handler.
81*/ 81*/
82 82
83static void airo_config(dev_link_t *link); 83static int airo_config(struct pcmcia_device *link);
84static void airo_release(dev_link_t *link); 84static void airo_release(struct pcmcia_device *link);
85 85
86/* 86/*
87 The attach() and detach() entry points are used to create and destroy 87 The attach() and detach() entry points are used to create and destroy
@@ -101,10 +101,10 @@ static void airo_detach(struct pcmcia_device *p_dev);
101/* 101/*
102 A linked list of "instances" of the aironet device. Each actual 102 A linked list of "instances" of the aironet device. Each actual
103 PCMCIA card corresponds to one device instance, and is described 103 PCMCIA card corresponds to one device instance, and is described
104 by one dev_link_t structure (defined in ds.h). 104 by one struct pcmcia_device structure (defined in ds.h).
105 105
106 You may not want to use a linked list for this -- for example, the 106 You may not want to use a linked list for this -- for example, the
107 memory card driver uses an array of dev_link_t pointers, where minor 107 memory card driver uses an array of struct pcmcia_device pointers, where minor
108 device numbers are used to derive the corresponding array index. 108 device numbers are used to derive the corresponding array index.
109*/ 109*/
110 110
@@ -114,7 +114,7 @@ static void airo_detach(struct pcmcia_device *p_dev);
114 example, ethernet cards, modems). In other cases, there may be 114 example, ethernet cards, modems). In other cases, there may be
115 many actual or logical devices (SCSI adapters, memory cards with 115 many actual or logical devices (SCSI adapters, memory cards with
116 multiple partitions). The dev_node_t structures need to be kept 116 multiple partitions). The dev_node_t structures need to be kept
117 in a linked list starting at the 'dev' field of a dev_link_t 117 in a linked list starting at the 'dev' field of a struct pcmcia_device
118 structure. We allocate them in the card's private data structure, 118 structure. We allocate them in the card's private data structure,
119 because they generally shouldn't be allocated dynamically. 119 because they generally shouldn't be allocated dynamically.
120 120
@@ -141,24 +141,16 @@ typedef struct local_info_t {
141 141
142 ======================================================================*/ 142 ======================================================================*/
143 143
144static int airo_attach(struct pcmcia_device *p_dev) 144static int airo_probe(struct pcmcia_device *p_dev)
145{ 145{
146 dev_link_t *link;
147 local_info_t *local; 146 local_info_t *local;
148 147
149 DEBUG(0, "airo_attach()\n"); 148 DEBUG(0, "airo_attach()\n");
150 149
151 /* Initialize the dev_link_t structure */
152 link = kzalloc(sizeof(struct dev_link_t), GFP_KERNEL);
153 if (!link) {
154 printk(KERN_ERR "airo_cs: no memory for new device\n");
155 return -ENOMEM;
156 }
157
158 /* Interrupt setup */ 150 /* Interrupt setup */
159 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 151 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
160 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 152 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
161 link->irq.Handler = NULL; 153 p_dev->irq.Handler = NULL;
162 154
163 /* 155 /*
164 General socket configuration defaults can go here. In this 156 General socket configuration defaults can go here. In this
@@ -167,26 +159,18 @@ static int airo_attach(struct pcmcia_device *p_dev)
167 and attributes of IO windows) are fixed by the nature of the 159 and attributes of IO windows) are fixed by the nature of the
168 device, and can be hard-wired here. 160 device, and can be hard-wired here.
169 */ 161 */
170 link->conf.Attributes = 0; 162 p_dev->conf.Attributes = 0;
171 link->conf.Vcc = 50; 163 p_dev->conf.IntType = INT_MEMORY_AND_IO;
172 link->conf.IntType = INT_MEMORY_AND_IO;
173 164
174 /* Allocate space for private device-specific data */ 165 /* Allocate space for private device-specific data */
175 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 166 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
176 if (!local) { 167 if (!local) {
177 printk(KERN_ERR "airo_cs: no memory for new device\n"); 168 printk(KERN_ERR "airo_cs: no memory for new device\n");
178 kfree (link);
179 return -ENOMEM; 169 return -ENOMEM;
180 } 170 }
181 link->priv = local; 171 p_dev->priv = local;
182 172
183 link->handle = p_dev; 173 return airo_config(p_dev);
184 p_dev->instance = link;
185
186 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
187 airo_config(link);
188
189 return 0;
190} /* airo_attach */ 174} /* airo_attach */
191 175
192/*====================================================================== 176/*======================================================================
@@ -198,14 +182,11 @@ static int airo_attach(struct pcmcia_device *p_dev)
198 182
199 ======================================================================*/ 183 ======================================================================*/
200 184
201static void airo_detach(struct pcmcia_device *p_dev) 185static void airo_detach(struct pcmcia_device *link)
202{ 186{
203 dev_link_t *link = dev_to_instance(p_dev);
204
205 DEBUG(0, "airo_detach(0x%p)\n", link); 187 DEBUG(0, "airo_detach(0x%p)\n", link);
206 188
207 if (link->state & DEV_CONFIG) 189 airo_release(link);
208 airo_release(link);
209 190
210 if ( ((local_info_t*)link->priv)->eth_dev ) { 191 if ( ((local_info_t*)link->priv)->eth_dev ) {
211 stop_airo_card( ((local_info_t*)link->priv)->eth_dev, 0 ); 192 stop_airo_card( ((local_info_t*)link->priv)->eth_dev, 0 );
@@ -213,7 +194,6 @@ static void airo_detach(struct pcmcia_device *p_dev)
213 ((local_info_t*)link->priv)->eth_dev = NULL; 194 ((local_info_t*)link->priv)->eth_dev = NULL;
214 195
215 kfree(link->priv); 196 kfree(link->priv);
216 kfree(link);
217} /* airo_detach */ 197} /* airo_detach */
218 198
219/*====================================================================== 199/*======================================================================
@@ -227,9 +207,8 @@ static void airo_detach(struct pcmcia_device *p_dev)
227#define CS_CHECK(fn, ret) \ 207#define CS_CHECK(fn, ret) \
228do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 208do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
229 209
230static void airo_config(dev_link_t *link) 210static int airo_config(struct pcmcia_device *link)
231{ 211{
232 client_handle_t handle;
233 tuple_t tuple; 212 tuple_t tuple;
234 cisparse_t parse; 213 cisparse_t parse;
235 local_info_t *dev; 214 local_info_t *dev;
@@ -237,8 +216,7 @@ static void airo_config(dev_link_t *link)
237 u_char buf[64]; 216 u_char buf[64];
238 win_req_t req; 217 win_req_t req;
239 memreq_t map; 218 memreq_t map;
240 219
241 handle = link->handle;
242 dev = link->priv; 220 dev = link->priv;
243 221
244 DEBUG(0, "airo_config(0x%p)\n", link); 222 DEBUG(0, "airo_config(0x%p)\n", link);
@@ -252,15 +230,12 @@ static void airo_config(dev_link_t *link)
252 tuple.TupleData = buf; 230 tuple.TupleData = buf;
253 tuple.TupleDataMax = sizeof(buf); 231 tuple.TupleDataMax = sizeof(buf);
254 tuple.TupleOffset = 0; 232 tuple.TupleOffset = 0;
255 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 233 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
256 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 234 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
257 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 235 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
258 link->conf.ConfigBase = parse.config.base; 236 link->conf.ConfigBase = parse.config.base;
259 link->conf.Present = parse.config.rmask[0]; 237 link->conf.Present = parse.config.rmask[0];
260 238
261 /* Configure card */
262 link->state |= DEV_CONFIG;
263
264 /* 239 /*
265 In this loop, we scan the CIS for configuration table entries, 240 In this loop, we scan the CIS for configuration table entries,
266 each of which describes a valid card configuration, including 241 each of which describes a valid card configuration, including
@@ -274,12 +249,12 @@ static void airo_config(dev_link_t *link)
274 will only use the CIS to fill in implementation-defined details. 249 will only use the CIS to fill in implementation-defined details.
275 */ 250 */
276 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 251 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
277 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 252 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
278 while (1) { 253 while (1) {
279 cistpl_cftable_entry_t dflt = { 0 }; 254 cistpl_cftable_entry_t dflt = { 0 };
280 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 255 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
281 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 256 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
282 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 257 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
283 goto next_entry; 258 goto next_entry;
284 259
285 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; 260 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
@@ -294,16 +269,11 @@ static void airo_config(dev_link_t *link)
294 269
295 /* Use power settings for Vcc and Vpp if present */ 270 /* Use power settings for Vcc and Vpp if present */
296 /* Note that the CIS values need to be rescaled */ 271 /* Note that the CIS values need to be rescaled */
297 if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM))
298 link->conf.Vcc = cfg->vcc.param[CISTPL_POWER_VNOM]/10000;
299 else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM))
300 link->conf.Vcc = dflt.vcc.param[CISTPL_POWER_VNOM]/10000;
301
302 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) 272 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
303 link->conf.Vpp1 = link->conf.Vpp2 = 273 link->conf.Vpp =
304 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; 274 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
305 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM)) 275 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
306 link->conf.Vpp1 = link->conf.Vpp2 = 276 link->conf.Vpp =
307 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000; 277 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
308 278
309 /* Do we need to allocate an interrupt? */ 279 /* Do we need to allocate an interrupt? */
@@ -329,12 +299,12 @@ static void airo_config(dev_link_t *link)
329 } 299 }
330 300
331 /* This reserves IO space but doesn't actually enable it */ 301 /* This reserves IO space but doesn't actually enable it */
332 if (pcmcia_request_io(link->handle, &link->io) != 0) 302 if (pcmcia_request_io(link, &link->io) != 0)
333 goto next_entry; 303 goto next_entry;
334 304
335 /* 305 /*
336 Now set up a common memory window, if needed. There is room 306 Now set up a common memory window, if needed. There is room
337 in the dev_link_t structure for one memory window handle, 307 in the struct pcmcia_device structure for one memory window handle,
338 but if the base addresses need to be saved, or if multiple 308 but if the base addresses need to be saved, or if multiple
339 windows are needed, the info should go in the private data 309 windows are needed, the info should go in the private data
340 structure for this device. 310 structure for this device.
@@ -350,7 +320,7 @@ static void airo_config(dev_link_t *link)
350 req.Base = mem->win[0].host_addr; 320 req.Base = mem->win[0].host_addr;
351 req.Size = mem->win[0].len; 321 req.Size = mem->win[0].len;
352 req.AccessSpeed = 0; 322 req.AccessSpeed = 0;
353 if (pcmcia_request_window(&link->handle, &req, &link->win) != 0) 323 if (pcmcia_request_window(&link, &req, &link->win) != 0)
354 goto next_entry; 324 goto next_entry;
355 map.Page = 0; map.CardOffset = mem->win[0].card_addr; 325 map.Page = 0; map.CardOffset = mem->win[0].card_addr;
356 if (pcmcia_map_mem_page(link->win, &map) != 0) 326 if (pcmcia_map_mem_page(link->win, &map) != 0)
@@ -360,7 +330,7 @@ static void airo_config(dev_link_t *link)
360 break; 330 break;
361 331
362 next_entry: 332 next_entry:
363 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 333 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
364 } 334 }
365 335
366 /* 336 /*
@@ -369,33 +339,32 @@ static void airo_config(dev_link_t *link)
369 irq structure is initialized. 339 irq structure is initialized.
370 */ 340 */
371 if (link->conf.Attributes & CONF_ENABLE_IRQ) 341 if (link->conf.Attributes & CONF_ENABLE_IRQ)
372 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 342 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
373 343
374 /* 344 /*
375 This actually configures the PCMCIA socket -- setting up 345 This actually configures the PCMCIA socket -- setting up
376 the I/O windows and the interrupt mapping, and putting the 346 the I/O windows and the interrupt mapping, and putting the
377 card and host interface into "Memory and IO" mode. 347 card and host interface into "Memory and IO" mode.
378 */ 348 */
379 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 349 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
380 ((local_info_t*)link->priv)->eth_dev = 350 ((local_info_t*)link->priv)->eth_dev =
381 init_airo_card( link->irq.AssignedIRQ, 351 init_airo_card( link->irq.AssignedIRQ,
382 link->io.BasePort1, 1, &handle_to_dev(handle) ); 352 link->io.BasePort1, 1, &handle_to_dev(link) );
383 if (!((local_info_t*)link->priv)->eth_dev) goto cs_failed; 353 if (!((local_info_t*)link->priv)->eth_dev) goto cs_failed;
384 354
385 /* 355 /*
386 At this point, the dev_node_t structure(s) need to be 356 At this point, the dev_node_t structure(s) need to be
387 initialized and arranged in a linked list at link->dev. 357 initialized and arranged in a linked list at link->dev_node.
388 */ 358 */
389 strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name ); 359 strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
390 dev->node.major = dev->node.minor = 0; 360 dev->node.major = dev->node.minor = 0;
391 link->dev = &dev->node; 361 link->dev_node = &dev->node;
392 362
393 /* Finally, report what we've done */ 363 /* Finally, report what we've done */
394 printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d", 364 printk(KERN_INFO "%s: index 0x%02x: ",
395 dev->node.dev_name, link->conf.ConfigIndex, 365 dev->node.dev_name, link->conf.ConfigIndex);
396 link->conf.Vcc/10, link->conf.Vcc%10); 366 if (link->conf.Vpp)
397 if (link->conf.Vpp1) 367 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
398 printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
399 if (link->conf.Attributes & CONF_ENABLE_IRQ) 368 if (link->conf.Attributes & CONF_ENABLE_IRQ)
400 printk(", irq %d", link->irq.AssignedIRQ); 369 printk(", irq %d", link->irq.AssignedIRQ);
401 if (link->io.NumPorts1) 370 if (link->io.NumPorts1)
@@ -408,14 +377,12 @@ static void airo_config(dev_link_t *link)
408 printk(", mem 0x%06lx-0x%06lx", req.Base, 377 printk(", mem 0x%06lx-0x%06lx", req.Base,
409 req.Base+req.Size-1); 378 req.Base+req.Size-1);
410 printk("\n"); 379 printk("\n");
411 380 return 0;
412 link->state &= ~DEV_CONFIG_PENDING; 381
413 return;
414
415 cs_failed: 382 cs_failed:
416 cs_error(link->handle, last_fn, last_ret); 383 cs_error(link, last_fn, last_ret);
417 airo_release(link); 384 airo_release(link);
418 385 return -ENODEV;
419} /* airo_config */ 386} /* airo_config */
420 387
421/*====================================================================== 388/*======================================================================
@@ -426,51 +393,26 @@ static void airo_config(dev_link_t *link)
426 393
427 ======================================================================*/ 394 ======================================================================*/
428 395
429static void airo_release(dev_link_t *link) 396static void airo_release(struct pcmcia_device *link)
430{ 397{
431 DEBUG(0, "airo_release(0x%p)\n", link); 398 DEBUG(0, "airo_release(0x%p)\n", link);
432 399 pcmcia_disable_device(link);
433 /* Unlink the device chain */
434 link->dev = NULL;
435
436 /*
437 In a normal driver, additional code may be needed to release
438 other kernel data structures associated with this device.
439 */
440
441 /* Don't bother checking to see if these succeed or not */
442 if (link->win)
443 pcmcia_release_window(link->win);
444 pcmcia_release_configuration(link->handle);
445 if (link->io.NumPorts1)
446 pcmcia_release_io(link->handle, &link->io);
447 if (link->irq.AssignedIRQ)
448 pcmcia_release_irq(link->handle, &link->irq);
449 link->state &= ~DEV_CONFIG;
450} 400}
451 401
452static int airo_suspend(struct pcmcia_device *p_dev) 402static int airo_suspend(struct pcmcia_device *link)
453{ 403{
454 dev_link_t *link = dev_to_instance(p_dev);
455 local_info_t *local = link->priv; 404 local_info_t *local = link->priv;
456 405
457 link->state |= DEV_SUSPEND; 406 netif_device_detach(local->eth_dev);
458 if (link->state & DEV_CONFIG) {
459 netif_device_detach(local->eth_dev);
460 pcmcia_release_configuration(link->handle);
461 }
462 407
463 return 0; 408 return 0;
464} 409}
465 410
466static int airo_resume(struct pcmcia_device *p_dev) 411static int airo_resume(struct pcmcia_device *link)
467{ 412{
468 dev_link_t *link = dev_to_instance(p_dev);
469 local_info_t *local = link->priv; 413 local_info_t *local = link->priv;
470 414
471 link->state &= ~DEV_SUSPEND; 415 if (link->open) {
472 if (link->state & DEV_CONFIG) {
473 pcmcia_request_configuration(link->handle, &link->conf);
474 reset_airo_card(local->eth_dev); 416 reset_airo_card(local->eth_dev);
475 netif_device_attach(local->eth_dev); 417 netif_device_attach(local->eth_dev);
476 } 418 }
@@ -492,7 +434,7 @@ static struct pcmcia_driver airo_driver = {
492 .drv = { 434 .drv = {
493 .name = "airo_cs", 435 .name = "airo_cs",
494 }, 436 },
495 .probe = airo_attach, 437 .probe = airo_probe,
496 .remove = airo_detach, 438 .remove = airo_detach,
497 .id_table = airo_ids, 439 .id_table = airo_ids,
498 .suspend = airo_suspend, 440 .suspend = airo_suspend,
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index d6f4a5a3e55a..26bf1127524d 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -91,8 +91,8 @@ MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards");
91 event handler. 91 event handler.
92*/ 92*/
93 93
94static void atmel_config(dev_link_t *link); 94static int atmel_config(struct pcmcia_device *link);
95static void atmel_release(dev_link_t *link); 95static void atmel_release(struct pcmcia_device *link);
96 96
97/* 97/*
98 The attach() and detach() entry points are used to create and destroy 98 The attach() and detach() entry points are used to create and destroy
@@ -112,10 +112,10 @@ static void atmel_detach(struct pcmcia_device *p_dev);
112/* 112/*
113 A linked list of "instances" of the atmelnet device. Each actual 113 A linked list of "instances" of the atmelnet device. Each actual
114 PCMCIA card corresponds to one device instance, and is described 114 PCMCIA card corresponds to one device instance, and is described
115 by one dev_link_t structure (defined in ds.h). 115 by one struct pcmcia_device structure (defined in ds.h).
116 116
117 You may not want to use a linked list for this -- for example, the 117 You may not want to use a linked list for this -- for example, the
118 memory card driver uses an array of dev_link_t pointers, where minor 118 memory card driver uses an array of struct pcmcia_device pointers, where minor
119 device numbers are used to derive the corresponding array index. 119 device numbers are used to derive the corresponding array index.
120*/ 120*/
121 121
@@ -125,7 +125,7 @@ static void atmel_detach(struct pcmcia_device *p_dev);
125 example, ethernet cards, modems). In other cases, there may be 125 example, ethernet cards, modems). In other cases, there may be
126 many actual or logical devices (SCSI adapters, memory cards with 126 many actual or logical devices (SCSI adapters, memory cards with
127 multiple partitions). The dev_node_t structures need to be kept 127 multiple partitions). The dev_node_t structures need to be kept
128 in a linked list starting at the 'dev' field of a dev_link_t 128 in a linked list starting at the 'dev' field of a struct pcmcia_device
129 structure. We allocate them in the card's private data structure, 129 structure. We allocate them in the card's private data structure,
130 because they generally shouldn't be allocated dynamically. 130 because they generally shouldn't be allocated dynamically.
131 131
@@ -152,24 +152,16 @@ typedef struct local_info_t {
152 152
153 ======================================================================*/ 153 ======================================================================*/
154 154
155static int atmel_attach(struct pcmcia_device *p_dev) 155static int atmel_probe(struct pcmcia_device *p_dev)
156{ 156{
157 dev_link_t *link;
158 local_info_t *local; 157 local_info_t *local;
159 158
160 DEBUG(0, "atmel_attach()\n"); 159 DEBUG(0, "atmel_attach()\n");
161 160
162 /* Initialize the dev_link_t structure */
163 link = kzalloc(sizeof(struct dev_link_t), GFP_KERNEL);
164 if (!link) {
165 printk(KERN_ERR "atmel_cs: no memory for new device\n");
166 return -ENOMEM;
167 }
168
169 /* Interrupt setup */ 161 /* Interrupt setup */
170 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 162 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
171 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 163 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
172 link->irq.Handler = NULL; 164 p_dev->irq.Handler = NULL;
173 165
174 /* 166 /*
175 General socket configuration defaults can go here. In this 167 General socket configuration defaults can go here. In this
@@ -178,26 +170,18 @@ static int atmel_attach(struct pcmcia_device *p_dev)
178 and attributes of IO windows) are fixed by the nature of the 170 and attributes of IO windows) are fixed by the nature of the
179 device, and can be hard-wired here. 171 device, and can be hard-wired here.
180 */ 172 */
181 link->conf.Attributes = 0; 173 p_dev->conf.Attributes = 0;
182 link->conf.Vcc = 50; 174 p_dev->conf.IntType = INT_MEMORY_AND_IO;
183 link->conf.IntType = INT_MEMORY_AND_IO;
184 175
185 /* Allocate space for private device-specific data */ 176 /* Allocate space for private device-specific data */
186 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 177 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
187 if (!local) { 178 if (!local) {
188 printk(KERN_ERR "atmel_cs: no memory for new device\n"); 179 printk(KERN_ERR "atmel_cs: no memory for new device\n");
189 kfree (link);
190 return -ENOMEM; 180 return -ENOMEM;
191 } 181 }
192 link->priv = local; 182 p_dev->priv = local;
193 183
194 link->handle = p_dev; 184 return atmel_config(p_dev);
195 p_dev->instance = link;
196
197 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
198 atmel_config(link);
199
200 return 0;
201} /* atmel_attach */ 185} /* atmel_attach */
202 186
203/*====================================================================== 187/*======================================================================
@@ -209,17 +193,13 @@ static int atmel_attach(struct pcmcia_device *p_dev)
209 193
210 ======================================================================*/ 194 ======================================================================*/
211 195
212static void atmel_detach(struct pcmcia_device *p_dev) 196static void atmel_detach(struct pcmcia_device *link)
213{ 197{
214 dev_link_t *link = dev_to_instance(p_dev);
215
216 DEBUG(0, "atmel_detach(0x%p)\n", link); 198 DEBUG(0, "atmel_detach(0x%p)\n", link);
217 199
218 if (link->state & DEV_CONFIG) 200 atmel_release(link);
219 atmel_release(link);
220 201
221 kfree(link->priv); 202 kfree(link->priv);
222 kfree(link);
223} 203}
224 204
225/*====================================================================== 205/*======================================================================
@@ -236,19 +216,17 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
236/* Call-back function to interrogate PCMCIA-specific information 216/* Call-back function to interrogate PCMCIA-specific information
237 about the current existance of the card */ 217 about the current existance of the card */
238static int card_present(void *arg) 218static int card_present(void *arg)
239{ 219{
240 dev_link_t *link = (dev_link_t *)arg; 220 struct pcmcia_device *link = (struct pcmcia_device *)arg;
241 if (link->state & DEV_SUSPEND) 221
242 return 0; 222 if (pcmcia_dev_present(link))
243 else if (link->state & DEV_PRESENT)
244 return 1; 223 return 1;
245 224
246 return 0; 225 return 0;
247} 226}
248 227
249static void atmel_config(dev_link_t *link) 228static int atmel_config(struct pcmcia_device *link)
250{ 229{
251 client_handle_t handle;
252 tuple_t tuple; 230 tuple_t tuple;
253 cisparse_t parse; 231 cisparse_t parse;
254 local_info_t *dev; 232 local_info_t *dev;
@@ -256,9 +234,8 @@ static void atmel_config(dev_link_t *link)
256 u_char buf[64]; 234 u_char buf[64];
257 struct pcmcia_device_id *did; 235 struct pcmcia_device_id *did;
258 236
259 handle = link->handle;
260 dev = link->priv; 237 dev = link->priv;
261 did = handle_to_dev(handle).driver_data; 238 did = handle_to_dev(link).driver_data;
262 239
263 DEBUG(0, "atmel_config(0x%p)\n", link); 240 DEBUG(0, "atmel_config(0x%p)\n", link);
264 241
@@ -272,15 +249,12 @@ static void atmel_config(dev_link_t *link)
272 registers. 249 registers.
273 */ 250 */
274 tuple.DesiredTuple = CISTPL_CONFIG; 251 tuple.DesiredTuple = CISTPL_CONFIG;
275 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 252 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
276 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 253 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
277 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 254 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
278 link->conf.ConfigBase = parse.config.base; 255 link->conf.ConfigBase = parse.config.base;
279 link->conf.Present = parse.config.rmask[0]; 256 link->conf.Present = parse.config.rmask[0];
280 257
281 /* Configure card */
282 link->state |= DEV_CONFIG;
283
284 /* 258 /*
285 In this loop, we scan the CIS for configuration table entries, 259 In this loop, we scan the CIS for configuration table entries,
286 each of which describes a valid card configuration, including 260 each of which describes a valid card configuration, including
@@ -294,12 +268,12 @@ static void atmel_config(dev_link_t *link)
294 will only use the CIS to fill in implementation-defined details. 268 will only use the CIS to fill in implementation-defined details.
295 */ 269 */
296 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 270 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
297 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 271 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
298 while (1) { 272 while (1) {
299 cistpl_cftable_entry_t dflt = { 0 }; 273 cistpl_cftable_entry_t dflt = { 0 };
300 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 274 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
301 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 275 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
302 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 276 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
303 goto next_entry; 277 goto next_entry;
304 278
305 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; 279 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
@@ -314,16 +288,11 @@ static void atmel_config(dev_link_t *link)
314 288
315 /* Use power settings for Vcc and Vpp if present */ 289 /* Use power settings for Vcc and Vpp if present */
316 /* Note that the CIS values need to be rescaled */ 290 /* Note that the CIS values need to be rescaled */
317 if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM))
318 link->conf.Vcc = cfg->vcc.param[CISTPL_POWER_VNOM]/10000;
319 else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM))
320 link->conf.Vcc = dflt.vcc.param[CISTPL_POWER_VNOM]/10000;
321
322 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) 291 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
323 link->conf.Vpp1 = link->conf.Vpp2 = 292 link->conf.Vpp =
324 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; 293 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
325 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM)) 294 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
326 link->conf.Vpp1 = link->conf.Vpp2 = 295 link->conf.Vpp =
327 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000; 296 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
328 297
329 /* Do we need to allocate an interrupt? */ 298 /* Do we need to allocate an interrupt? */
@@ -349,14 +318,14 @@ static void atmel_config(dev_link_t *link)
349 } 318 }
350 319
351 /* This reserves IO space but doesn't actually enable it */ 320 /* This reserves IO space but doesn't actually enable it */
352 if (pcmcia_request_io(link->handle, &link->io) != 0) 321 if (pcmcia_request_io(link, &link->io) != 0)
353 goto next_entry; 322 goto next_entry;
354 323
355 /* If we got this far, we're cool! */ 324 /* If we got this far, we're cool! */
356 break; 325 break;
357 326
358 next_entry: 327 next_entry:
359 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 328 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
360 } 329 }
361 330
362 /* 331 /*
@@ -365,14 +334,14 @@ static void atmel_config(dev_link_t *link)
365 irq structure is initialized. 334 irq structure is initialized.
366 */ 335 */
367 if (link->conf.Attributes & CONF_ENABLE_IRQ) 336 if (link->conf.Attributes & CONF_ENABLE_IRQ)
368 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 337 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
369 338
370 /* 339 /*
371 This actually configures the PCMCIA socket -- setting up 340 This actually configures the PCMCIA socket -- setting up
372 the I/O windows and the interrupt mapping, and putting the 341 the I/O windows and the interrupt mapping, and putting the
373 card and host interface into "Memory and IO" mode. 342 card and host interface into "Memory and IO" mode.
374 */ 343 */
375 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 344 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
376 345
377 if (link->irq.AssignedIRQ == 0) { 346 if (link->irq.AssignedIRQ == 0) {
378 printk(KERN_ALERT 347 printk(KERN_ALERT
@@ -384,7 +353,7 @@ static void atmel_config(dev_link_t *link)
384 init_atmel_card(link->irq.AssignedIRQ, 353 init_atmel_card(link->irq.AssignedIRQ,
385 link->io.BasePort1, 354 link->io.BasePort1,
386 did ? did->driver_info : ATMEL_FW_TYPE_NONE, 355 did ? did->driver_info : ATMEL_FW_TYPE_NONE,
387 &handle_to_dev(handle), 356 &handle_to_dev(link),
388 card_present, 357 card_present,
389 link); 358 link);
390 if (!((local_info_t*)link->priv)->eth_dev) 359 if (!((local_info_t*)link->priv)->eth_dev)
@@ -393,18 +362,18 @@ static void atmel_config(dev_link_t *link)
393 362
394 /* 363 /*
395 At this point, the dev_node_t structure(s) need to be 364 At this point, the dev_node_t structure(s) need to be
396 initialized and arranged in a linked list at link->dev. 365 initialized and arranged in a linked list at link->dev_node.
397 */ 366 */
398 strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name ); 367 strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
399 dev->node.major = dev->node.minor = 0; 368 dev->node.major = dev->node.minor = 0;
400 link->dev = &dev->node; 369 link->dev_node = &dev->node;
401 370
402 link->state &= ~DEV_CONFIG_PENDING; 371 return 0;
403 return; 372
404
405 cs_failed: 373 cs_failed:
406 cs_error(link->handle, last_fn, last_ret); 374 cs_error(link, last_fn, last_ret);
407 atmel_release(link); 375 atmel_release(link);
376 return -ENODEV;
408} 377}
409 378
410/*====================================================================== 379/*======================================================================
@@ -415,53 +384,34 @@ static void atmel_config(dev_link_t *link)
415 384
416 ======================================================================*/ 385 ======================================================================*/
417 386
418static void atmel_release(dev_link_t *link) 387static void atmel_release(struct pcmcia_device *link)
419{ 388{
420 struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; 389 struct net_device *dev = ((local_info_t*)link->priv)->eth_dev;
421 390
422 DEBUG(0, "atmel_release(0x%p)\n", link); 391 DEBUG(0, "atmel_release(0x%p)\n", link);
423 392
424 /* Unlink the device chain */ 393 if (dev)
425 link->dev = NULL;
426
427 if (dev)
428 stop_atmel_card(dev); 394 stop_atmel_card(dev);
429 ((local_info_t*)link->priv)->eth_dev = NULL; 395 ((local_info_t*)link->priv)->eth_dev = NULL;
430 396
431 /* Don't bother checking to see if these succeed or not */ 397 pcmcia_disable_device(link);
432 pcmcia_release_configuration(link->handle);
433 if (link->io.NumPorts1)
434 pcmcia_release_io(link->handle, &link->io);
435 if (link->irq.AssignedIRQ)
436 pcmcia_release_irq(link->handle, &link->irq);
437 link->state &= ~DEV_CONFIG;
438} 398}
439 399
440static int atmel_suspend(struct pcmcia_device *dev) 400static int atmel_suspend(struct pcmcia_device *link)
441{ 401{
442 dev_link_t *link = dev_to_instance(dev);
443 local_info_t *local = link->priv; 402 local_info_t *local = link->priv;
444 403
445 link->state |= DEV_SUSPEND; 404 netif_device_detach(local->eth_dev);
446 if (link->state & DEV_CONFIG) {
447 netif_device_detach(local->eth_dev);
448 pcmcia_release_configuration(link->handle);
449 }
450 405
451 return 0; 406 return 0;
452} 407}
453 408
454static int atmel_resume(struct pcmcia_device *dev) 409static int atmel_resume(struct pcmcia_device *link)
455{ 410{
456 dev_link_t *link = dev_to_instance(dev);
457 local_info_t *local = link->priv; 411 local_info_t *local = link->priv;
458 412
459 link->state &= ~DEV_SUSPEND; 413 atmel_open(local->eth_dev);
460 if (link->state & DEV_CONFIG) { 414 netif_device_attach(local->eth_dev);
461 pcmcia_request_configuration(link->handle, &link->conf);
462 atmel_open(local->eth_dev);
463 netif_device_attach(local->eth_dev);
464 }
465 415
466 return 0; 416 return 0;
467} 417}
@@ -515,7 +465,7 @@ static struct pcmcia_driver atmel_driver = {
515 .drv = { 465 .drv = {
516 .name = "atmel_cs", 466 .name = "atmel_cs",
517 }, 467 },
518 .probe = atmel_attach, 468 .probe = atmel_probe,
519 .remove = atmel_detach, 469 .remove = atmel_detach,
520 .id_table = atmel_ids, 470 .id_table = atmel_ids,
521 .suspend = atmel_suspend, 471 .suspend = atmel_suspend,
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index d335b250923a..55bed923fbe9 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
42/* struct local_info::hw_priv */ 42/* struct local_info::hw_priv */
43struct hostap_cs_priv { 43struct hostap_cs_priv {
44 dev_node_t node; 44 dev_node_t node;
45 dev_link_t *link; 45 struct pcmcia_device *link;
46 int sandisk_connectplus; 46 int sandisk_connectplus;
47}; 47};
48 48
@@ -204,15 +204,13 @@ static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
204 204
205static void prism2_detach(struct pcmcia_device *p_dev); 205static void prism2_detach(struct pcmcia_device *p_dev);
206static void prism2_release(u_long arg); 206static void prism2_release(u_long arg);
207static int prism2_config(dev_link_t *link); 207static int prism2_config(struct pcmcia_device *link);
208 208
209 209
210static int prism2_pccard_card_present(local_info_t *local) 210static int prism2_pccard_card_present(local_info_t *local)
211{ 211{
212 struct hostap_cs_priv *hw_priv = local->hw_priv; 212 struct hostap_cs_priv *hw_priv = local->hw_priv;
213 if (hw_priv != NULL && hw_priv->link != NULL && 213 if (hw_priv != NULL && hw_priv->link != NULL && pcmcia_dev_present(hw_priv->link))
214 ((hw_priv->link->state & (DEV_PRESENT | DEV_CONFIG)) ==
215 (DEV_PRESENT | DEV_CONFIG)))
216 return 1; 214 return 1;
217 return 0; 215 return 0;
218} 216}
@@ -237,7 +235,7 @@ static void sandisk_set_iobase(local_info_t *local)
237 reg.Action = CS_WRITE; 235 reg.Action = CS_WRITE;
238 reg.Offset = 0x10; /* 0x3f0 IO base 1 */ 236 reg.Offset = 0x10; /* 0x3f0 IO base 1 */
239 reg.Value = hw_priv->link->io.BasePort1 & 0x00ff; 237 reg.Value = hw_priv->link->io.BasePort1 & 0x00ff;
240 res = pcmcia_access_configuration_register(hw_priv->link->handle, 238 res = pcmcia_access_configuration_register(hw_priv->link,
241 &reg); 239 &reg);
242 if (res != CS_SUCCESS) { 240 if (res != CS_SUCCESS) {
243 printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -" 241 printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
@@ -249,7 +247,7 @@ static void sandisk_set_iobase(local_info_t *local)
249 reg.Action = CS_WRITE; 247 reg.Action = CS_WRITE;
250 reg.Offset = 0x12; /* 0x3f2 IO base 2 */ 248 reg.Offset = 0x12; /* 0x3f2 IO base 2 */
251 reg.Value = (hw_priv->link->io.BasePort1 & 0xff00) >> 8; 249 reg.Value = (hw_priv->link->io.BasePort1 & 0xff00) >> 8;
252 res = pcmcia_access_configuration_register(hw_priv->link->handle, 250 res = pcmcia_access_configuration_register(hw_priv->link,
253 &reg); 251 &reg);
254 if (res != CS_SUCCESS) { 252 if (res != CS_SUCCESS) {
255 printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -" 253 printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
@@ -301,9 +299,9 @@ static int sandisk_enable_wireless(struct net_device *dev)
301 tuple.TupleData = buf; 299 tuple.TupleData = buf;
302 tuple.TupleDataMax = sizeof(buf); 300 tuple.TupleDataMax = sizeof(buf);
303 tuple.TupleOffset = 0; 301 tuple.TupleOffset = 0;
304 if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) || 302 if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
305 pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) || 303 pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
306 pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) || 304 pcmcia_parse_tuple(hw_priv->link, &tuple, parse) ||
307 parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) { 305 parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) {
308 /* No SanDisk manfid found */ 306 /* No SanDisk manfid found */
309 ret = -ENODEV; 307 ret = -ENODEV;
@@ -311,9 +309,9 @@ static int sandisk_enable_wireless(struct net_device *dev)
311 } 309 }
312 310
313 tuple.DesiredTuple = CISTPL_LONGLINK_MFC; 311 tuple.DesiredTuple = CISTPL_LONGLINK_MFC;
314 if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) || 312 if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
315 pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) || 313 pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
316 pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) || 314 pcmcia_parse_tuple(hw_priv->link, &tuple, parse) ||
317 parse->longlink_mfc.nfn < 2) { 315 parse->longlink_mfc.nfn < 2) {
318 /* No multi-function links found */ 316 /* No multi-function links found */
319 ret = -ENODEV; 317 ret = -ENODEV;
@@ -328,7 +326,7 @@ static int sandisk_enable_wireless(struct net_device *dev)
328 reg.Action = CS_WRITE; 326 reg.Action = CS_WRITE;
329 reg.Offset = CISREG_COR; 327 reg.Offset = CISREG_COR;
330 reg.Value = COR_SOFT_RESET; 328 reg.Value = COR_SOFT_RESET;
331 res = pcmcia_access_configuration_register(hw_priv->link->handle, 329 res = pcmcia_access_configuration_register(hw_priv->link,
332 &reg); 330 &reg);
333 if (res != CS_SUCCESS) { 331 if (res != CS_SUCCESS) {
334 printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n", 332 printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
@@ -345,7 +343,7 @@ static int sandisk_enable_wireless(struct net_device *dev)
345 * will be enabled during the first cor_sreset call. 343 * will be enabled during the first cor_sreset call.
346 */ 344 */
347 reg.Value = COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA; 345 reg.Value = COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA;
348 res = pcmcia_access_configuration_register(hw_priv->link->handle, 346 res = pcmcia_access_configuration_register(hw_priv->link,
349 &reg); 347 &reg);
350 if (res != CS_SUCCESS) { 348 if (res != CS_SUCCESS) {
351 printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n", 349 printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
@@ -380,7 +378,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
380 reg.Action = CS_READ; 378 reg.Action = CS_READ;
381 reg.Offset = CISREG_COR; 379 reg.Offset = CISREG_COR;
382 reg.Value = 0; 380 reg.Value = 0;
383 res = pcmcia_access_configuration_register(hw_priv->link->handle, 381 res = pcmcia_access_configuration_register(hw_priv->link,
384 &reg); 382 &reg);
385 if (res != CS_SUCCESS) { 383 if (res != CS_SUCCESS) {
386 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n", 384 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
@@ -392,7 +390,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
392 390
393 reg.Action = CS_WRITE; 391 reg.Action = CS_WRITE;
394 reg.Value |= COR_SOFT_RESET; 392 reg.Value |= COR_SOFT_RESET;
395 res = pcmcia_access_configuration_register(hw_priv->link->handle, 393 res = pcmcia_access_configuration_register(hw_priv->link,
396 &reg); 394 &reg);
397 if (res != CS_SUCCESS) { 395 if (res != CS_SUCCESS) {
398 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n", 396 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
@@ -405,7 +403,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
405 reg.Value &= ~COR_SOFT_RESET; 403 reg.Value &= ~COR_SOFT_RESET;
406 if (hw_priv->sandisk_connectplus) 404 if (hw_priv->sandisk_connectplus)
407 reg.Value |= COR_IREQ_ENA; 405 reg.Value |= COR_IREQ_ENA;
408 res = pcmcia_access_configuration_register(hw_priv->link->handle, 406 res = pcmcia_access_configuration_register(hw_priv->link,
409 &reg); 407 &reg);
410 if (res != CS_SUCCESS) { 408 if (res != CS_SUCCESS) {
411 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n", 409 printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
@@ -439,7 +437,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
439 reg.Action = CS_READ; 437 reg.Action = CS_READ;
440 reg.Offset = CISREG_COR; 438 reg.Offset = CISREG_COR;
441 reg.Value = 0; 439 reg.Value = 0;
442 res = pcmcia_access_configuration_register(hw_priv->link->handle, 440 res = pcmcia_access_configuration_register(hw_priv->link,
443 &reg); 441 &reg);
444 if (res != CS_SUCCESS) { 442 if (res != CS_SUCCESS) {
445 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 " 443 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
@@ -452,7 +450,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
452 450
453 reg.Action = CS_WRITE; 451 reg.Action = CS_WRITE;
454 reg.Value |= COR_SOFT_RESET; 452 reg.Value |= COR_SOFT_RESET;
455 res = pcmcia_access_configuration_register(hw_priv->link->handle, 453 res = pcmcia_access_configuration_register(hw_priv->link,
456 &reg); 454 &reg);
457 if (res != CS_SUCCESS) { 455 if (res != CS_SUCCESS) {
458 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 " 456 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
@@ -466,7 +464,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
466 reg.Action = CS_WRITE; 464 reg.Action = CS_WRITE;
467 reg.Value = hcr; 465 reg.Value = hcr;
468 reg.Offset = CISREG_CCSR; 466 reg.Offset = CISREG_CCSR;
469 res = pcmcia_access_configuration_register(hw_priv->link->handle, 467 res = pcmcia_access_configuration_register(hw_priv->link,
470 &reg); 468 &reg);
471 if (res != CS_SUCCESS) { 469 if (res != CS_SUCCESS) {
472 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 " 470 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
@@ -478,7 +476,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
478 reg.Action = CS_WRITE; 476 reg.Action = CS_WRITE;
479 reg.Offset = CISREG_COR; 477 reg.Offset = CISREG_COR;
480 reg.Value = old_cor & ~COR_SOFT_RESET; 478 reg.Value = old_cor & ~COR_SOFT_RESET;
481 res = pcmcia_access_configuration_register(hw_priv->link->handle, 479 res = pcmcia_access_configuration_register(hw_priv->link,
482 &reg); 480 &reg);
483 if (res != CS_SUCCESS) { 481 if (res != CS_SUCCESS) {
484 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 " 482 printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
@@ -501,40 +499,27 @@ static struct prism2_helper_functions prism2_pccard_funcs =
501 499
502/* allocate local data and register with CardServices 500/* allocate local data and register with CardServices
503 * initialize dev_link structure, but do not configure the card yet */ 501 * initialize dev_link structure, but do not configure the card yet */
504static int prism2_attach(struct pcmcia_device *p_dev) 502static int hostap_cs_probe(struct pcmcia_device *p_dev)
505{ 503{
506 dev_link_t *link; 504 int ret;
507
508 link = kmalloc(sizeof(dev_link_t), GFP_KERNEL);
509 if (link == NULL)
510 return -ENOMEM;
511
512 memset(link, 0, sizeof(dev_link_t));
513 505
514 PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info); 506 PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info);
515 link->conf.Vcc = 33; 507 p_dev->conf.IntType = INT_MEMORY_AND_IO;
516 link->conf.IntType = INT_MEMORY_AND_IO;
517
518 link->handle = p_dev;
519 p_dev->instance = link;
520 508
521 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; 509 ret = prism2_config(p_dev);
522 if (prism2_config(link)) 510 if (ret) {
523 PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n"); 511 PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n");
512 }
524 513
525 return 0; 514 return ret;
526} 515}
527 516
528 517
529static void prism2_detach(struct pcmcia_device *p_dev) 518static void prism2_detach(struct pcmcia_device *link)
530{ 519{
531 dev_link_t *link = dev_to_instance(p_dev);
532
533 PDEBUG(DEBUG_FLOW, "prism2_detach\n"); 520 PDEBUG(DEBUG_FLOW, "prism2_detach\n");
534 521
535 if (link->state & DEV_CONFIG) { 522 prism2_release((u_long)link);
536 prism2_release((u_long)link);
537 }
538 523
539 /* release net devices */ 524 /* release net devices */
540 if (link->priv) { 525 if (link->priv) {
@@ -547,7 +532,6 @@ static void prism2_detach(struct pcmcia_device *p_dev)
547 prism2_free_local_data(dev); 532 prism2_free_local_data(dev);
548 kfree(hw_priv); 533 kfree(hw_priv);
549 } 534 }
550 kfree(link);
551} 535}
552 536
553 537
@@ -558,7 +542,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
558do { int ret = (retf); \ 542do { int ret = (retf); \
559if (ret != 0) { \ 543if (ret != 0) { \
560 PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", ret); \ 544 PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", ret); \
561 cs_error(link->handle, fn, ret); \ 545 cs_error(link, fn, ret); \
562 goto next_entry; \ 546 goto next_entry; \
563} \ 547} \
564} while (0) 548} while (0)
@@ -566,7 +550,7 @@ if (ret != 0) { \
566 550
567/* run after a CARD_INSERTION event is received to configure the PCMCIA 551/* run after a CARD_INSERTION event is received to configure the PCMCIA
568 * socket and make the device available to the system */ 552 * socket and make the device available to the system */
569static int prism2_config(dev_link_t *link) 553static int prism2_config(struct pcmcia_device *link)
570{ 554{
571 struct net_device *dev; 555 struct net_device *dev;
572 struct hostap_interface *iface; 556 struct hostap_interface *iface;
@@ -595,27 +579,24 @@ static int prism2_config(dev_link_t *link)
595 tuple.TupleData = buf; 579 tuple.TupleData = buf;
596 tuple.TupleDataMax = sizeof(buf); 580 tuple.TupleDataMax = sizeof(buf);
597 tuple.TupleOffset = 0; 581 tuple.TupleOffset = 0;
598 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple)); 582 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
599 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link->handle, &tuple)); 583 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
600 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link->handle, &tuple, parse)); 584 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
601 link->conf.ConfigBase = parse->config.base; 585 link->conf.ConfigBase = parse->config.base;
602 link->conf.Present = parse->config.rmask[0]; 586 link->conf.Present = parse->config.rmask[0];
603 587
604 CS_CHECK(GetConfigurationInfo, 588 CS_CHECK(GetConfigurationInfo,
605 pcmcia_get_configuration_info(link->handle, &conf)); 589 pcmcia_get_configuration_info(link, &conf));
606 PDEBUG(DEBUG_HW, "%s: %s Vcc=%d (from config)\n", dev_info,
607 ignore_cis_vcc ? "ignoring" : "setting", conf.Vcc);
608 link->conf.Vcc = conf.Vcc;
609 590
610 /* Look for an appropriate configuration table entry in the CIS */ 591 /* Look for an appropriate configuration table entry in the CIS */
611 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 592 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
612 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple)); 593 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
613 for (;;) { 594 for (;;) {
614 cistpl_cftable_entry_t *cfg = &(parse->cftable_entry); 595 cistpl_cftable_entry_t *cfg = &(parse->cftable_entry);
615 CFG_CHECK2(GetTupleData, 596 CFG_CHECK2(GetTupleData,
616 pcmcia_get_tuple_data(link->handle, &tuple)); 597 pcmcia_get_tuple_data(link, &tuple));
617 CFG_CHECK2(ParseTuple, 598 CFG_CHECK2(ParseTuple,
618 pcmcia_parse_tuple(link->handle, &tuple, parse)); 599 pcmcia_parse_tuple(link, &tuple, parse));
619 600
620 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) 601 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
621 dflt = *cfg; 602 dflt = *cfg;
@@ -650,10 +631,10 @@ static int prism2_config(dev_link_t *link)
650 } 631 }
651 632
652 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) 633 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
653 link->conf.Vpp1 = link->conf.Vpp2 = 634 link->conf.Vpp =
654 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; 635 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
655 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) 636 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
656 link->conf.Vpp1 = link->conf.Vpp2 = 637 link->conf.Vpp =
657 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000; 638 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
658 639
659 /* Do we need to allocate an interrupt? */ 640 /* Do we need to allocate an interrupt? */
@@ -695,19 +676,19 @@ static int prism2_config(dev_link_t *link)
695 676
696 /* This reserves IO space but doesn't actually enable it */ 677 /* This reserves IO space but doesn't actually enable it */
697 CFG_CHECK2(RequestIO, 678 CFG_CHECK2(RequestIO,
698 pcmcia_request_io(link->handle, &link->io)); 679 pcmcia_request_io(link, &link->io));
699 680
700 /* This configuration table entry is OK */ 681 /* This configuration table entry is OK */
701 break; 682 break;
702 683
703 next_entry: 684 next_entry:
704 CS_CHECK(GetNextTuple, 685 CS_CHECK(GetNextTuple,
705 pcmcia_get_next_tuple(link->handle, &tuple)); 686 pcmcia_get_next_tuple(link, &tuple));
706 } 687 }
707 688
708 /* Need to allocate net_device before requesting IRQ handler */ 689 /* Need to allocate net_device before requesting IRQ handler */
709 dev = prism2_init_local_data(&prism2_pccard_funcs, 0, 690 dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
710 &handle_to_dev(link->handle)); 691 &handle_to_dev(link));
711 if (dev == NULL) 692 if (dev == NULL)
712 goto failed; 693 goto failed;
713 link->priv = dev; 694 link->priv = dev;
@@ -717,7 +698,7 @@ static int prism2_config(dev_link_t *link)
717 local->hw_priv = hw_priv; 698 local->hw_priv = hw_priv;
718 hw_priv->link = link; 699 hw_priv->link = link;
719 strcpy(hw_priv->node.dev_name, dev->name); 700 strcpy(hw_priv->node.dev_name, dev->name);
720 link->dev = &hw_priv->node; 701 link->dev_node = &hw_priv->node;
721 702
722 /* 703 /*
723 * Allocate an interrupt line. Note that this does not assign a 704 * Allocate an interrupt line. Note that this does not assign a
@@ -730,7 +711,7 @@ static int prism2_config(dev_link_t *link)
730 link->irq.Handler = prism2_interrupt; 711 link->irq.Handler = prism2_interrupt;
731 link->irq.Instance = dev; 712 link->irq.Instance = dev;
732 CS_CHECK(RequestIRQ, 713 CS_CHECK(RequestIRQ,
733 pcmcia_request_irq(link->handle, &link->irq)); 714 pcmcia_request_irq(link, &link->irq));
734 } 715 }
735 716
736 /* 717 /*
@@ -739,18 +720,17 @@ static int prism2_config(dev_link_t *link)
739 * card and host interface into "Memory and IO" mode. 720 * card and host interface into "Memory and IO" mode.
740 */ 721 */
741 CS_CHECK(RequestConfiguration, 722 CS_CHECK(RequestConfiguration,
742 pcmcia_request_configuration(link->handle, &link->conf)); 723 pcmcia_request_configuration(link, &link->conf));
743 724
744 dev->irq = link->irq.AssignedIRQ; 725 dev->irq = link->irq.AssignedIRQ;
745 dev->base_addr = link->io.BasePort1; 726 dev->base_addr = link->io.BasePort1;
746 727
747 /* Finally, report what we've done */ 728 /* Finally, report what we've done */
748 printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d", 729 printk(KERN_INFO "%s: index 0x%02x: ",
749 dev_info, link->conf.ConfigIndex, 730 dev_info, link->conf.ConfigIndex);
750 link->conf.Vcc / 10, link->conf.Vcc % 10); 731 if (link->conf.Vpp)
751 if (link->conf.Vpp1) 732 printk(", Vpp %d.%d", link->conf.Vpp / 10,
752 printk(", Vpp %d.%d", link->conf.Vpp1 / 10, 733 link->conf.Vpp % 10);
753 link->conf.Vpp1 % 10);
754 if (link->conf.Attributes & CONF_ENABLE_IRQ) 734 if (link->conf.Attributes & CONF_ENABLE_IRQ)
755 printk(", irq %d", link->irq.AssignedIRQ); 735 printk(", irq %d", link->irq.AssignedIRQ);
756 if (link->io.NumPorts1) 736 if (link->io.NumPorts1)
@@ -761,9 +741,6 @@ static int prism2_config(dev_link_t *link)
761 link->io.BasePort2+link->io.NumPorts2-1); 741 link->io.BasePort2+link->io.NumPorts2-1);
762 printk("\n"); 742 printk("\n");
763 743
764 link->state |= DEV_CONFIG;
765 link->state &= ~DEV_CONFIG_PENDING;
766
767 local->shutdown = 0; 744 local->shutdown = 0;
768 745
769 sandisk_enable_wireless(dev); 746 sandisk_enable_wireless(dev);
@@ -778,7 +755,7 @@ static int prism2_config(dev_link_t *link)
778 return ret; 755 return ret;
779 756
780 cs_failed: 757 cs_failed:
781 cs_error(link->handle, last_fn, last_ret); 758 cs_error(link, last_fn, last_ret);
782 759
783 failed: 760 failed:
784 kfree(parse); 761 kfree(parse);
@@ -790,7 +767,7 @@ static int prism2_config(dev_link_t *link)
790 767
791static void prism2_release(u_long arg) 768static void prism2_release(u_long arg)
792{ 769{
793 dev_link_t *link = (dev_link_t *)arg; 770 struct pcmcia_device *link = (struct pcmcia_device *)arg;
794 771
795 PDEBUG(DEBUG_FLOW, "prism2_release\n"); 772 PDEBUG(DEBUG_FLOW, "prism2_release\n");
796 773
@@ -799,71 +776,54 @@ static void prism2_release(u_long arg)
799 struct hostap_interface *iface; 776 struct hostap_interface *iface;
800 777
801 iface = netdev_priv(dev); 778 iface = netdev_priv(dev);
802 if (link->state & DEV_CONFIG) 779 prism2_hw_shutdown(dev, 0);
803 prism2_hw_shutdown(dev, 0);
804 iface->local->shutdown = 1; 780 iface->local->shutdown = 1;
805 } 781 }
806 782
807 if (link->win) 783 pcmcia_disable_device(link);
808 pcmcia_release_window(link->win);
809 pcmcia_release_configuration(link->handle);
810 if (link->io.NumPorts1)
811 pcmcia_release_io(link->handle, &link->io);
812 if (link->irq.AssignedIRQ)
813 pcmcia_release_irq(link->handle, &link->irq);
814
815 link->state &= ~DEV_CONFIG;
816
817 PDEBUG(DEBUG_FLOW, "release - done\n"); 784 PDEBUG(DEBUG_FLOW, "release - done\n");
818} 785}
819 786
820static int hostap_cs_suspend(struct pcmcia_device *p_dev) 787static int hostap_cs_suspend(struct pcmcia_device *link)
821{ 788{
822 dev_link_t *link = dev_to_instance(p_dev);
823 struct net_device *dev = (struct net_device *) link->priv; 789 struct net_device *dev = (struct net_device *) link->priv;
824 int dev_open = 0; 790 int dev_open = 0;
791 struct hostap_interface *iface = NULL;
825 792
826 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info); 793 if (dev)
827 794 iface = netdev_priv(dev);
828 link->state |= DEV_SUSPEND;
829 795
830 if (link->state & DEV_CONFIG) { 796 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
831 struct hostap_interface *iface = netdev_priv(dev); 797 if (iface && iface->local)
832 if (iface && iface->local) 798 dev_open = iface->local->num_dev_open > 0;
833 dev_open = iface->local->num_dev_open > 0; 799 if (dev_open) {
834 if (dev_open) { 800 netif_stop_queue(dev);
835 netif_stop_queue(dev); 801 netif_device_detach(dev);
836 netif_device_detach(dev);
837 }
838 prism2_suspend(dev);
839 pcmcia_release_configuration(link->handle);
840 } 802 }
803 prism2_suspend(dev);
841 804
842 return 0; 805 return 0;
843} 806}
844 807
845static int hostap_cs_resume(struct pcmcia_device *p_dev) 808static int hostap_cs_resume(struct pcmcia_device *link)
846{ 809{
847 dev_link_t *link = dev_to_instance(p_dev);
848 struct net_device *dev = (struct net_device *) link->priv; 810 struct net_device *dev = (struct net_device *) link->priv;
849 int dev_open = 0; 811 int dev_open = 0;
812 struct hostap_interface *iface = NULL;
850 813
851 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info); 814 if (dev)
815 iface = netdev_priv(dev);
852 816
853 link->state &= ~DEV_SUSPEND; 817 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
854 if (link->state & DEV_CONFIG) {
855 struct hostap_interface *iface = netdev_priv(dev);
856 if (iface && iface->local)
857 dev_open = iface->local->num_dev_open > 0;
858 818
859 pcmcia_request_configuration(link->handle, &link->conf); 819 if (iface && iface->local)
820 dev_open = iface->local->num_dev_open > 0;
860 821
861 prism2_hw_shutdown(dev, 1); 822 prism2_hw_shutdown(dev, 1);
862 prism2_hw_config(dev, dev_open ? 0 : 1); 823 prism2_hw_config(dev, dev_open ? 0 : 1);
863 if (dev_open) { 824 if (dev_open) {
864 netif_device_attach(dev); 825 netif_device_attach(dev);
865 netif_start_queue(dev); 826 netif_start_queue(dev);
866 }
867 } 827 }
868 828
869 return 0; 829 return 0;
@@ -930,7 +890,7 @@ static struct pcmcia_driver hostap_driver = {
930 .drv = { 890 .drv = {
931 .name = "hostap_cs", 891 .name = "hostap_cs",
932 }, 892 },
933 .probe = prism2_attach, 893 .probe = hostap_cs_probe,
934 .remove = prism2_detach, 894 .remove = prism2_detach,
935 .owner = THIS_MODULE, 895 .owner = THIS_MODULE,
936 .id_table = hostap_cs_ids, 896 .id_table = hostap_cs_ids,
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 9dce522526c5..bca89cff85a6 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -5573,8 +5573,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
5573 case IEEE80211_52GHZ_BAND: 5573 case IEEE80211_52GHZ_BAND:
5574 network->mode = IEEE_A; 5574 network->mode = IEEE_A;
5575 i = ieee80211_channel_to_index(priv->ieee, priv->channel); 5575 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5576 if (i == -1) 5576 BUG_ON(i == -1);
5577 BUG();
5578 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { 5577 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5579 IPW_WARNING("Overriding invalid channel\n"); 5578 IPW_WARNING("Overriding invalid channel\n");
5580 priv->channel = geo->a[0].channel; 5579 priv->channel = geo->a[0].channel;
@@ -5587,8 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
5587 else 5586 else
5588 network->mode = IEEE_B; 5587 network->mode = IEEE_B;
5589 i = ieee80211_channel_to_index(priv->ieee, priv->channel); 5588 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5590 if (i == -1) 5589 BUG_ON(i == -1);
5591 BUG();
5592 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { 5590 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5593 IPW_WARNING("Overriding invalid channel\n"); 5591 IPW_WARNING("Overriding invalid channel\n");
5594 priv->channel = geo->bg[0].channel; 5592 priv->channel = geo->bg[0].channel;
@@ -6715,8 +6713,7 @@ static int ipw_qos_association(struct ipw_priv *priv,
6715 6713
6716 switch (priv->ieee->iw_mode) { 6714 switch (priv->ieee->iw_mode) {
6717 case IW_MODE_ADHOC: 6715 case IW_MODE_ADHOC:
6718 if (!(network->capability & WLAN_CAPABILITY_IBSS)) 6716 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6719 BUG();
6720 6717
6721 qos_data = &ibss_data; 6718 qos_data = &ibss_data;
6722 break; 6719 break;
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 75ce6ddb0cf5..9343d970537b 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -190,8 +190,8 @@ module_param(mem_speed, int, 0);
190/*====================================================================*/ 190/*====================================================================*/
191 191
192/* PCMCIA (Card Services) related functions */ 192/* PCMCIA (Card Services) related functions */
193static void netwave_release(dev_link_t *link); /* Card removal */ 193static void netwave_release(struct pcmcia_device *link); /* Card removal */
194static void netwave_pcmcia_config(dev_link_t *arg); /* Runs after card 194static int netwave_pcmcia_config(struct pcmcia_device *arg); /* Runs after card
195 insertion */ 195 insertion */
196static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */ 196static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */
197 197
@@ -221,10 +221,10 @@ static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev);
221static void set_multicast_list(struct net_device *dev); 221static void set_multicast_list(struct net_device *dev);
222 222
223/* 223/*
224 A dev_link_t structure has fields for most things that are needed 224 A struct pcmcia_device structure has fields for most things that are needed
225 to keep track of a socket, but there will usually be some device 225 to keep track of a socket, but there will usually be some device
226 specific information that also needs to be kept track of. The 226 specific information that also needs to be kept track of. The
227 'priv' pointer in a dev_link_t structure can be used to point to 227 'priv' pointer in a struct pcmcia_device structure can be used to point to
228 a device-specific private data structure, like this. 228 a device-specific private data structure, like this.
229 229
230 A driver needs to provide a dev_node_t structure for each device 230 A driver needs to provide a dev_node_t structure for each device
@@ -232,7 +232,7 @@ static void set_multicast_list(struct net_device *dev);
232 example, ethernet cards, modems). In other cases, there may be 232 example, ethernet cards, modems). In other cases, there may be
233 many actual or logical devices (SCSI adapters, memory cards with 233 many actual or logical devices (SCSI adapters, memory cards with
234 multiple partitions). The dev_node_t structures need to be kept 234 multiple partitions). The dev_node_t structures need to be kept
235 in a linked list starting at the 'dev' field of a dev_link_t 235 in a linked list starting at the 'dev' field of a struct pcmcia_device
236 structure. We allocate them in the card's private data structure, 236 structure. We allocate them in the card's private data structure,
237 because they generally can't be allocated dynamically. 237 because they generally can't be allocated dynamically.
238*/ 238*/
@@ -268,7 +268,7 @@ struct site_survey {
268}; 268};
269 269
270typedef struct netwave_private { 270typedef struct netwave_private {
271 dev_link_t link; 271 struct pcmcia_device *p_dev;
272 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */ 272 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
273 dev_node_t node; 273 dev_node_t node;
274 u_char __iomem *ramBase; 274 u_char __iomem *ramBase;
@@ -376,20 +376,19 @@ static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
376 * configure the card at this point -- we wait until we receive a 376 * configure the card at this point -- we wait until we receive a
377 * card insertion event. 377 * card insertion event.
378 */ 378 */
379static int netwave_attach(struct pcmcia_device *p_dev) 379static int netwave_probe(struct pcmcia_device *link)
380{ 380{
381 dev_link_t *link;
382 struct net_device *dev; 381 struct net_device *dev;
383 netwave_private *priv; 382 netwave_private *priv;
384 383
385 DEBUG(0, "netwave_attach()\n"); 384 DEBUG(0, "netwave_attach()\n");
386 385
387 /* Initialize the dev_link_t structure */ 386 /* Initialize the struct pcmcia_device structure */
388 dev = alloc_etherdev(sizeof(netwave_private)); 387 dev = alloc_etherdev(sizeof(netwave_private));
389 if (!dev) 388 if (!dev)
390 return -ENOMEM; 389 return -ENOMEM;
391 priv = netdev_priv(dev); 390 priv = netdev_priv(dev);
392 link = &priv->link; 391 priv->p_dev = link;
393 link->priv = dev; 392 link->priv = dev;
394 393
395 /* The io structure describes IO port mapping */ 394 /* The io structure describes IO port mapping */
@@ -406,7 +405,6 @@ static int netwave_attach(struct pcmcia_device *p_dev)
406 405
407 /* General socket configuration */ 406 /* General socket configuration */
408 link->conf.Attributes = CONF_ENABLE_IRQ; 407 link->conf.Attributes = CONF_ENABLE_IRQ;
409 link->conf.Vcc = 50;
410 link->conf.IntType = INT_MEMORY_AND_IO; 408 link->conf.IntType = INT_MEMORY_AND_IO;
411 link->conf.ConfigIndex = 1; 409 link->conf.ConfigIndex = 1;
412 link->conf.Present = PRESENT_OPTION; 410 link->conf.Present = PRESENT_OPTION;
@@ -430,13 +428,7 @@ static int netwave_attach(struct pcmcia_device *p_dev)
430 dev->stop = &netwave_close; 428 dev->stop = &netwave_close;
431 link->irq.Instance = dev; 429 link->irq.Instance = dev;
432 430
433 link->handle = p_dev; 431 return netwave_pcmcia_config( link);
434 p_dev->instance = link;
435
436 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
437 netwave_pcmcia_config( link);
438
439 return 0;
440} /* netwave_attach */ 432} /* netwave_attach */
441 433
442/* 434/*
@@ -447,17 +439,15 @@ static int netwave_attach(struct pcmcia_device *p_dev)
447 * structures are freed. Otherwise, the structures will be freed 439 * structures are freed. Otherwise, the structures will be freed
448 * when the device is released. 440 * when the device is released.
449 */ 441 */
450static void netwave_detach(struct pcmcia_device *p_dev) 442static void netwave_detach(struct pcmcia_device *link)
451{ 443{
452 dev_link_t *link = dev_to_instance(p_dev);
453 struct net_device *dev = link->priv; 444 struct net_device *dev = link->priv;
454 445
455 DEBUG(0, "netwave_detach(0x%p)\n", link); 446 DEBUG(0, "netwave_detach(0x%p)\n", link);
456 447
457 if (link->state & DEV_CONFIG) 448 netwave_release(link);
458 netwave_release(link);
459 449
460 if (link->dev) 450 if (link->dev_node)
461 unregister_netdev(dev); 451 unregister_netdev(dev);
462 452
463 free_netdev(dev); 453 free_netdev(dev);
@@ -743,8 +733,7 @@ static const struct iw_handler_def netwave_handler_def =
743#define CS_CHECK(fn, ret) \ 733#define CS_CHECK(fn, ret) \
744do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 734do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
745 735
746static void netwave_pcmcia_config(dev_link_t *link) { 736static int netwave_pcmcia_config(struct pcmcia_device *link) {
747 client_handle_t handle = link->handle;
748 struct net_device *dev = link->priv; 737 struct net_device *dev = link->priv;
749 netwave_private *priv = netdev_priv(dev); 738 netwave_private *priv = netdev_priv(dev);
750 tuple_t tuple; 739 tuple_t tuple;
@@ -766,15 +755,12 @@ static void netwave_pcmcia_config(dev_link_t *link) {
766 tuple.TupleDataMax = 64; 755 tuple.TupleDataMax = 64;
767 tuple.TupleOffset = 0; 756 tuple.TupleOffset = 0;
768 tuple.DesiredTuple = CISTPL_CONFIG; 757 tuple.DesiredTuple = CISTPL_CONFIG;
769 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 758 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
770 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 759 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
771 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 760 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
772 link->conf.ConfigBase = parse.config.base; 761 link->conf.ConfigBase = parse.config.base;
773 link->conf.Present = parse.config.rmask[0]; 762 link->conf.Present = parse.config.rmask[0];
774 763
775 /* Configure card */
776 link->state |= DEV_CONFIG;
777
778 /* 764 /*
779 * Try allocating IO ports. This tries a few fixed addresses. 765 * Try allocating IO ports. This tries a few fixed addresses.
780 * If you want, you can also read the card's config table to 766 * If you want, you can also read the card's config table to
@@ -782,11 +768,11 @@ static void netwave_pcmcia_config(dev_link_t *link) {
782 */ 768 */
783 for (i = j = 0x0; j < 0x400; j += 0x20) { 769 for (i = j = 0x0; j < 0x400; j += 0x20) {
784 link->io.BasePort1 = j ^ 0x300; 770 link->io.BasePort1 = j ^ 0x300;
785 i = pcmcia_request_io(link->handle, &link->io); 771 i = pcmcia_request_io(link, &link->io);
786 if (i == CS_SUCCESS) break; 772 if (i == CS_SUCCESS) break;
787 } 773 }
788 if (i != CS_SUCCESS) { 774 if (i != CS_SUCCESS) {
789 cs_error(link->handle, RequestIO, i); 775 cs_error(link, RequestIO, i);
790 goto failed; 776 goto failed;
791 } 777 }
792 778
@@ -794,16 +780,16 @@ static void netwave_pcmcia_config(dev_link_t *link) {
794 * Now allocate an interrupt line. Note that this does not 780 * Now allocate an interrupt line. Note that this does not
795 * actually assign a handler to the interrupt. 781 * actually assign a handler to the interrupt.
796 */ 782 */
797 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 783 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
798 784
799 /* 785 /*
800 * This actually configures the PCMCIA socket -- setting up 786 * This actually configures the PCMCIA socket -- setting up
801 * the I/O windows and the interrupt mapping. 787 * the I/O windows and the interrupt mapping.
802 */ 788 */
803 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 789 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
804 790
805 /* 791 /*
806 * Allocate a 32K memory window. Note that the dev_link_t 792 * Allocate a 32K memory window. Note that the struct pcmcia_device
807 * structure provides space for one window handle -- if your 793 * structure provides space for one window handle -- if your
808 * device needs several windows, you'll need to keep track of 794 * device needs several windows, you'll need to keep track of
809 * the handles in your private data structure, dev->priv. 795 * the handles in your private data structure, dev->priv.
@@ -813,7 +799,7 @@ static void netwave_pcmcia_config(dev_link_t *link) {
813 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE; 799 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
814 req.Base = 0; req.Size = 0x8000; 800 req.Base = 0; req.Size = 0x8000;
815 req.AccessSpeed = mem_speed; 801 req.AccessSpeed = mem_speed;
816 CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win)); 802 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
817 mem.CardOffset = 0x20000; mem.Page = 0; 803 mem.CardOffset = 0x20000; mem.Page = 0;
818 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 804 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
819 805
@@ -823,7 +809,7 @@ static void netwave_pcmcia_config(dev_link_t *link) {
823 809
824 dev->irq = link->irq.AssignedIRQ; 810 dev->irq = link->irq.AssignedIRQ;
825 dev->base_addr = link->io.BasePort1; 811 dev->base_addr = link->io.BasePort1;
826 SET_NETDEV_DEV(dev, &handle_to_dev(handle)); 812 SET_NETDEV_DEV(dev, &handle_to_dev(link));
827 813
828 if (register_netdev(dev) != 0) { 814 if (register_netdev(dev) != 0) {
829 printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n"); 815 printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n");
@@ -831,8 +817,7 @@ static void netwave_pcmcia_config(dev_link_t *link) {
831 } 817 }
832 818
833 strcpy(priv->node.dev_name, dev->name); 819 strcpy(priv->node.dev_name, dev->name);
834 link->dev = &priv->node; 820 link->dev_node = &priv->node;
835 link->state &= ~DEV_CONFIG_PENDING;
836 821
837 /* Reset card before reading physical address */ 822 /* Reset card before reading physical address */
838 netwave_doreset(dev->base_addr, ramBase); 823 netwave_doreset(dev->base_addr, ramBase);
@@ -852,12 +837,13 @@ static void netwave_pcmcia_config(dev_link_t *link) {
852 printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n", 837 printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n",
853 get_uint16(ramBase + NETWAVE_EREG_ARW), 838 get_uint16(ramBase + NETWAVE_EREG_ARW),
854 get_uint16(ramBase + NETWAVE_EREG_ARW+2)); 839 get_uint16(ramBase + NETWAVE_EREG_ARW+2));
855 return; 840 return 0;
856 841
857cs_failed: 842cs_failed:
858 cs_error(link->handle, last_fn, last_ret); 843 cs_error(link, last_fn, last_ret);
859failed: 844failed:
860 netwave_release(link); 845 netwave_release(link);
846 return -ENODEV;
861} /* netwave_pcmcia_config */ 847} /* netwave_pcmcia_config */
862 848
863/* 849/*
@@ -867,52 +853,35 @@ failed:
867 * device, and release the PCMCIA configuration. If the device is 853 * device, and release the PCMCIA configuration. If the device is
868 * still open, this will be postponed until it is closed. 854 * still open, this will be postponed until it is closed.
869 */ 855 */
870static void netwave_release(dev_link_t *link) 856static void netwave_release(struct pcmcia_device *link)
871{ 857{
872 struct net_device *dev = link->priv; 858 struct net_device *dev = link->priv;
873 netwave_private *priv = netdev_priv(dev); 859 netwave_private *priv = netdev_priv(dev);
874
875 DEBUG(0, "netwave_release(0x%p)\n", link);
876 860
877 /* Don't bother checking to see if these succeed or not */ 861 DEBUG(0, "netwave_release(0x%p)\n", link);
878 if (link->win) {
879 iounmap(priv->ramBase);
880 pcmcia_release_window(link->win);
881 }
882 pcmcia_release_configuration(link->handle);
883 pcmcia_release_io(link->handle, &link->io);
884 pcmcia_release_irq(link->handle, &link->irq);
885 862
886 link->state &= ~DEV_CONFIG; 863 pcmcia_disable_device(link);
864 if (link->win)
865 iounmap(priv->ramBase);
887} 866}
888 867
889static int netwave_suspend(struct pcmcia_device *p_dev) 868static int netwave_suspend(struct pcmcia_device *link)
890{ 869{
891 dev_link_t *link = dev_to_instance(p_dev);
892 struct net_device *dev = link->priv; 870 struct net_device *dev = link->priv;
893 871
894 link->state |= DEV_SUSPEND; 872 if (link->open)
895 if (link->state & DEV_CONFIG) { 873 netif_device_detach(dev);
896 if (link->open)
897 netif_device_detach(dev);
898 pcmcia_release_configuration(link->handle);
899 }
900 874
901 return 0; 875 return 0;
902} 876}
903 877
904static int netwave_resume(struct pcmcia_device *p_dev) 878static int netwave_resume(struct pcmcia_device *link)
905{ 879{
906 dev_link_t *link = dev_to_instance(p_dev);
907 struct net_device *dev = link->priv; 880 struct net_device *dev = link->priv;
908 881
909 link->state &= ~DEV_SUSPEND; 882 if (link->open) {
910 if (link->state & DEV_CONFIG) { 883 netwave_reset(dev);
911 pcmcia_request_configuration(link->handle, &link->conf); 884 netif_device_attach(dev);
912 if (link->open) {
913 netwave_reset(dev);
914 netif_device_attach(dev);
915 }
916 } 885 }
917 886
918 return 0; 887 return 0;
@@ -1119,7 +1088,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id, struct pt_regs *regs
1119 u_char __iomem *ramBase; 1088 u_char __iomem *ramBase;
1120 struct net_device *dev = (struct net_device *)dev_id; 1089 struct net_device *dev = (struct net_device *)dev_id;
1121 struct netwave_private *priv = netdev_priv(dev); 1090 struct netwave_private *priv = netdev_priv(dev);
1122 dev_link_t *link = &priv->link; 1091 struct pcmcia_device *link = priv->p_dev;
1123 int i; 1092 int i;
1124 1093
1125 if (!netif_device_present(dev)) 1094 if (!netif_device_present(dev))
@@ -1138,7 +1107,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id, struct pt_regs *regs
1138 1107
1139 status = inb(iobase + NETWAVE_REG_ASR); 1108 status = inb(iobase + NETWAVE_REG_ASR);
1140 1109
1141 if (!DEV_OK(link)) { 1110 if (!pcmcia_dev_present(link)) {
1142 DEBUG(1, "netwave_interrupt: Interrupt with status 0x%x " 1111 DEBUG(1, "netwave_interrupt: Interrupt with status 0x%x "
1143 "from removed or suspended card!\n", status); 1112 "from removed or suspended card!\n", status);
1144 break; 1113 break;
@@ -1373,11 +1342,11 @@ static int netwave_rx(struct net_device *dev)
1373 1342
1374static int netwave_open(struct net_device *dev) { 1343static int netwave_open(struct net_device *dev) {
1375 netwave_private *priv = netdev_priv(dev); 1344 netwave_private *priv = netdev_priv(dev);
1376 dev_link_t *link = &priv->link; 1345 struct pcmcia_device *link = priv->p_dev;
1377 1346
1378 DEBUG(1, "netwave_open: starting.\n"); 1347 DEBUG(1, "netwave_open: starting.\n");
1379 1348
1380 if (!DEV_OK(link)) 1349 if (!pcmcia_dev_present(link))
1381 return -ENODEV; 1350 return -ENODEV;
1382 1351
1383 link->open++; 1352 link->open++;
@@ -1390,7 +1359,7 @@ static int netwave_open(struct net_device *dev) {
1390 1359
1391static int netwave_close(struct net_device *dev) { 1360static int netwave_close(struct net_device *dev) {
1392 netwave_private *priv = netdev_priv(dev); 1361 netwave_private *priv = netdev_priv(dev);
1393 dev_link_t *link = &priv->link; 1362 struct pcmcia_device *link = priv->p_dev;
1394 1363
1395 DEBUG(1, "netwave_close: finishing.\n"); 1364 DEBUG(1, "netwave_close: finishing.\n");
1396 1365
@@ -1411,7 +1380,7 @@ static struct pcmcia_driver netwave_driver = {
1411 .drv = { 1380 .drv = {
1412 .name = "netwave_cs", 1381 .name = "netwave_cs",
1413 }, 1382 },
1414 .probe = netwave_attach, 1383 .probe = netwave_probe,
1415 .remove = netwave_detach, 1384 .remove = netwave_detach,
1416 .id_table = netwave_ids, 1385 .id_table = netwave_ids,
1417 .suspend = netwave_suspend, 1386 .suspend = netwave_suspend,
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index ec6f2a48895b..434f7d7ad841 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -49,7 +49,7 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
49/* PCMCIA specific device information (goes in the card field of 49/* PCMCIA specific device information (goes in the card field of
50 * struct orinoco_private */ 50 * struct orinoco_private */
51struct orinoco_pccard { 51struct orinoco_pccard {
52 dev_link_t link; 52 struct pcmcia_device *p_dev;
53 dev_node_t node; 53 dev_node_t node;
54 54
55 /* Used to handle hard reset */ 55 /* Used to handle hard reset */
@@ -63,8 +63,8 @@ struct orinoco_pccard {
63/* Function prototypes */ 63/* Function prototypes */
64/********************************************************************/ 64/********************************************************************/
65 65
66static void orinoco_cs_config(dev_link_t *link); 66static int orinoco_cs_config(struct pcmcia_device *link);
67static void orinoco_cs_release(dev_link_t *link); 67static void orinoco_cs_release(struct pcmcia_device *link);
68static void orinoco_cs_detach(struct pcmcia_device *p_dev); 68static void orinoco_cs_detach(struct pcmcia_device *p_dev);
69 69
70/********************************************************************/ 70/********************************************************************/
@@ -75,13 +75,13 @@ static int
75orinoco_cs_hard_reset(struct orinoco_private *priv) 75orinoco_cs_hard_reset(struct orinoco_private *priv)
76{ 76{
77 struct orinoco_pccard *card = priv->card; 77 struct orinoco_pccard *card = priv->card;
78 dev_link_t *link = &card->link; 78 struct pcmcia_device *link = card->p_dev;
79 int err; 79 int err;
80 80
81 /* We need atomic ops here, because we're not holding the lock */ 81 /* We need atomic ops here, because we're not holding the lock */
82 set_bit(0, &card->hard_reset_in_progress); 82 set_bit(0, &card->hard_reset_in_progress);
83 83
84 err = pcmcia_reset_card(link->handle, NULL); 84 err = pcmcia_reset_card(link, NULL);
85 if (err) 85 if (err)
86 return err; 86 return err;
87 87
@@ -104,12 +104,11 @@ orinoco_cs_hard_reset(struct orinoco_private *priv)
104 * configure the card at this point -- we wait until we receive a card 104 * configure the card at this point -- we wait until we receive a card
105 * insertion event. */ 105 * insertion event. */
106static int 106static int
107orinoco_cs_attach(struct pcmcia_device *p_dev) 107orinoco_cs_probe(struct pcmcia_device *link)
108{ 108{
109 struct net_device *dev; 109 struct net_device *dev;
110 struct orinoco_private *priv; 110 struct orinoco_private *priv;
111 struct orinoco_pccard *card; 111 struct orinoco_pccard *card;
112 dev_link_t *link;
113 112
114 dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset); 113 dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset);
115 if (! dev) 114 if (! dev)
@@ -118,7 +117,7 @@ orinoco_cs_attach(struct pcmcia_device *p_dev)
118 card = priv->card; 117 card = priv->card;
119 118
120 /* Link both structures together */ 119 /* Link both structures together */
121 link = &card->link; 120 card->p_dev = link;
122 link->priv = dev; 121 link->priv = dev;
123 122
124 /* Interrupt setup */ 123 /* Interrupt setup */
@@ -135,16 +134,7 @@ orinoco_cs_attach(struct pcmcia_device *p_dev)
135 link->conf.Attributes = 0; 134 link->conf.Attributes = 0;
136 link->conf.IntType = INT_MEMORY_AND_IO; 135 link->conf.IntType = INT_MEMORY_AND_IO;
137 136
138 /* Register with Card Services */ 137 return orinoco_cs_config(link);
139 link->next = NULL;
140
141 link->handle = p_dev;
142 p_dev->instance = link;
143
144 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
145 orinoco_cs_config(link);
146
147 return 0;
148} /* orinoco_cs_attach */ 138} /* orinoco_cs_attach */
149 139
150/* 140/*
@@ -153,16 +143,14 @@ orinoco_cs_attach(struct pcmcia_device *p_dev)
153 * are freed. Otherwise, the structures will be freed when the device 143 * are freed. Otherwise, the structures will be freed when the device
154 * is released. 144 * is released.
155 */ 145 */
156static void orinoco_cs_detach(struct pcmcia_device *p_dev) 146static void orinoco_cs_detach(struct pcmcia_device *link)
157{ 147{
158 dev_link_t *link = dev_to_instance(p_dev);
159 struct net_device *dev = link->priv; 148 struct net_device *dev = link->priv;
160 149
161 if (link->state & DEV_CONFIG) 150 orinoco_cs_release(link);
162 orinoco_cs_release(link);
163 151
164 DEBUG(0, PFX "detach: link=%p link->dev=%p\n", link, link->dev); 152 DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
165 if (link->dev) { 153 if (link->dev_node) {
166 DEBUG(0, PFX "About to unregister net device %p\n", 154 DEBUG(0, PFX "About to unregister net device %p\n",
167 dev); 155 dev);
168 unregister_netdev(dev); 156 unregister_netdev(dev);
@@ -180,11 +168,10 @@ static void orinoco_cs_detach(struct pcmcia_device *p_dev)
180 last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; \ 168 last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; \
181 } while (0) 169 } while (0)
182 170
183static void 171static int
184orinoco_cs_config(dev_link_t *link) 172orinoco_cs_config(struct pcmcia_device *link)
185{ 173{
186 struct net_device *dev = link->priv; 174 struct net_device *dev = link->priv;
187 client_handle_t handle = link->handle;
188 struct orinoco_private *priv = netdev_priv(dev); 175 struct orinoco_private *priv = netdev_priv(dev);
189 struct orinoco_pccard *card = priv->card; 176 struct orinoco_pccard *card = priv->card;
190 hermes_t *hw = &priv->hw; 177 hermes_t *hw = &priv->hw;
@@ -196,7 +183,7 @@ orinoco_cs_config(dev_link_t *link)
196 cisparse_t parse; 183 cisparse_t parse;
197 void __iomem *mem; 184 void __iomem *mem;
198 185
199 CS_CHECK(ValidateCIS, pcmcia_validate_cis(handle, &info)); 186 CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
200 187
201 /* 188 /*
202 * This reads the card's CONFIG tuple to find its 189 * This reads the card's CONFIG tuple to find its
@@ -207,19 +194,15 @@ orinoco_cs_config(dev_link_t *link)
207 tuple.TupleData = buf; 194 tuple.TupleData = buf;
208 tuple.TupleDataMax = sizeof(buf); 195 tuple.TupleDataMax = sizeof(buf);
209 tuple.TupleOffset = 0; 196 tuple.TupleOffset = 0;
210 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 197 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
211 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 198 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
212 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 199 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
213 link->conf.ConfigBase = parse.config.base; 200 link->conf.ConfigBase = parse.config.base;
214 link->conf.Present = parse.config.rmask[0]; 201 link->conf.Present = parse.config.rmask[0];
215 202
216 /* Configure card */
217 link->state |= DEV_CONFIG;
218
219 /* Look up the current Vcc */ 203 /* Look up the current Vcc */
220 CS_CHECK(GetConfigurationInfo, 204 CS_CHECK(GetConfigurationInfo,
221 pcmcia_get_configuration_info(handle, &conf)); 205 pcmcia_get_configuration_info(link, &conf));
222 link->conf.Vcc = conf.Vcc;
223 206
224 /* 207 /*
225 * In this loop, we scan the CIS for configuration table 208 * In this loop, we scan the CIS for configuration table
@@ -236,13 +219,13 @@ orinoco_cs_config(dev_link_t *link)
236 * implementation-defined details. 219 * implementation-defined details.
237 */ 220 */
238 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 221 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
239 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 222 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
240 while (1) { 223 while (1) {
241 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 224 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
242 cistpl_cftable_entry_t dflt = { .index = 0 }; 225 cistpl_cftable_entry_t dflt = { .index = 0 };
243 226
244 if ( (pcmcia_get_tuple_data(handle, &tuple) != 0) 227 if ( (pcmcia_get_tuple_data(link, &tuple) != 0)
245 || (pcmcia_parse_tuple(handle, &tuple, &parse) != 0)) 228 || (pcmcia_parse_tuple(link, &tuple, &parse) != 0))
246 goto next_entry; 229 goto next_entry;
247 230
248 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) 231 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
@@ -274,10 +257,10 @@ orinoco_cs_config(dev_link_t *link)
274 } 257 }
275 258
276 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) 259 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
277 link->conf.Vpp1 = link->conf.Vpp2 = 260 link->conf.Vpp =
278 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; 261 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
279 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) 262 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
280 link->conf.Vpp1 = link->conf.Vpp2 = 263 link->conf.Vpp =
281 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000; 264 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
282 265
283 /* Do we need to allocate an interrupt? */ 266 /* Do we need to allocate an interrupt? */
@@ -307,7 +290,7 @@ orinoco_cs_config(dev_link_t *link)
307 } 290 }
308 291
309 /* This reserves IO space but doesn't actually enable it */ 292 /* This reserves IO space but doesn't actually enable it */
310 if (pcmcia_request_io(link->handle, &link->io) != 0) 293 if (pcmcia_request_io(link, &link->io) != 0)
311 goto next_entry; 294 goto next_entry;
312 } 295 }
313 296
@@ -317,9 +300,8 @@ orinoco_cs_config(dev_link_t *link)
317 break; 300 break;
318 301
319 next_entry: 302 next_entry:
320 if (link->io.NumPorts1) 303 pcmcia_disable_device(link);
321 pcmcia_release_io(link->handle, &link->io); 304 last_ret = pcmcia_get_next_tuple(link, &tuple);
322 last_ret = pcmcia_get_next_tuple(handle, &tuple);
323 if (last_ret == CS_NO_MORE_ITEMS) { 305 if (last_ret == CS_NO_MORE_ITEMS) {
324 printk(KERN_ERR PFX "GetNextTuple(): No matching " 306 printk(KERN_ERR PFX "GetNextTuple(): No matching "
325 "CIS configuration. Maybe you need the " 307 "CIS configuration. Maybe you need the "
@@ -333,7 +315,7 @@ orinoco_cs_config(dev_link_t *link)
333 * a handler to the interrupt, unless the 'Handler' member of 315 * a handler to the interrupt, unless the 'Handler' member of
334 * the irq structure is initialized. 316 * the irq structure is initialized.
335 */ 317 */
336 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 318 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
337 319
338 /* We initialize the hermes structure before completing PCMCIA 320 /* We initialize the hermes structure before completing PCMCIA
339 * configuration just in case the interrupt handler gets 321 * configuration just in case the interrupt handler gets
@@ -350,7 +332,7 @@ orinoco_cs_config(dev_link_t *link)
350 * card and host interface into "Memory and IO" mode. 332 * card and host interface into "Memory and IO" mode.
351 */ 333 */
352 CS_CHECK(RequestConfiguration, 334 CS_CHECK(RequestConfiguration,
353 pcmcia_request_configuration(link->handle, &link->conf)); 335 pcmcia_request_configuration(link, &link->conf));
354 336
355 /* Ok, we have the configuration, prepare to register the netdev */ 337 /* Ok, we have the configuration, prepare to register the netdev */
356 dev->base_addr = link->io.BasePort1; 338 dev->base_addr = link->io.BasePort1;
@@ -358,7 +340,7 @@ orinoco_cs_config(dev_link_t *link)
358 SET_MODULE_OWNER(dev); 340 SET_MODULE_OWNER(dev);
359 card->node.major = card->node.minor = 0; 341 card->node.major = card->node.minor = 0;
360 342
361 SET_NETDEV_DEV(dev, &handle_to_dev(handle)); 343 SET_NETDEV_DEV(dev, &handle_to_dev(link));
362 /* Tell the stack we exist */ 344 /* Tell the stack we exist */
363 if (register_netdev(dev) != 0) { 345 if (register_netdev(dev) != 0) {
364 printk(KERN_ERR PFX "register_netdev() failed\n"); 346 printk(KERN_ERR PFX "register_netdev() failed\n");
@@ -366,20 +348,18 @@ orinoco_cs_config(dev_link_t *link)
366 } 348 }
367 349
368 /* At this point, the dev_node_t structure(s) needs to be 350 /* At this point, the dev_node_t structure(s) needs to be
369 * initialized and arranged in a linked list at link->dev. */ 351 * initialized and arranged in a linked list at link->dev_node. */
370 strcpy(card->node.dev_name, dev->name); 352 strcpy(card->node.dev_name, dev->name);
371 link->dev = &card->node; /* link->dev being non-NULL is also 353 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
372 used to indicate that the 354 used to indicate that the
373 net_device has been registered */ 355 net_device has been registered */
374 link->state &= ~DEV_CONFIG_PENDING;
375 356
376 /* Finally, report what we've done */ 357 /* Finally, report what we've done */
377 printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d", 358 printk(KERN_DEBUG "%s: index 0x%02x: ",
378 dev->name, link->conf.ConfigIndex, 359 dev->name, link->conf.ConfigIndex);
379 link->conf.Vcc / 10, link->conf.Vcc % 10); 360 if (link->conf.Vpp)
380 if (link->conf.Vpp1) 361 printk(", Vpp %d.%d", link->conf.Vpp / 10,
381 printk(", Vpp %d.%d", link->conf.Vpp1 / 10, 362 link->conf.Vpp % 10);
382 link->conf.Vpp1 % 10);
383 printk(", irq %d", link->irq.AssignedIRQ); 363 printk(", irq %d", link->irq.AssignedIRQ);
384 if (link->io.NumPorts1) 364 if (link->io.NumPorts1)
385 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 365 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
@@ -389,13 +369,14 @@ orinoco_cs_config(dev_link_t *link)
389 link->io.BasePort2 + link->io.NumPorts2 - 1); 369 link->io.BasePort2 + link->io.NumPorts2 - 1);
390 printk("\n"); 370 printk("\n");
391 371
392 return; 372 return 0;
393 373
394 cs_failed: 374 cs_failed:
395 cs_error(link->handle, last_fn, last_ret); 375 cs_error(link, last_fn, last_ret);
396 376
397 failed: 377 failed:
398 orinoco_cs_release(link); 378 orinoco_cs_release(link);
379 return -ENODEV;
399} /* orinoco_cs_config */ 380} /* orinoco_cs_config */
400 381
401/* 382/*
@@ -404,7 +385,7 @@ orinoco_cs_config(dev_link_t *link)
404 * still open, this will be postponed until it is closed. 385 * still open, this will be postponed until it is closed.
405 */ 386 */
406static void 387static void
407orinoco_cs_release(dev_link_t *link) 388orinoco_cs_release(struct pcmcia_device *link)
408{ 389{
409 struct net_device *dev = link->priv; 390 struct net_device *dev = link->priv;
410 struct orinoco_private *priv = netdev_priv(dev); 391 struct orinoco_private *priv = netdev_priv(dev);
@@ -416,88 +397,68 @@ orinoco_cs_release(dev_link_t *link)
416 priv->hw_unavailable++; 397 priv->hw_unavailable++;
417 spin_unlock_irqrestore(&priv->lock, flags); 398 spin_unlock_irqrestore(&priv->lock, flags);
418 399
419 /* Don't bother checking to see if these succeed or not */ 400 pcmcia_disable_device(link);
420 pcmcia_release_configuration(link->handle);
421 if (link->io.NumPorts1)
422 pcmcia_release_io(link->handle, &link->io);
423 if (link->irq.AssignedIRQ)
424 pcmcia_release_irq(link->handle, &link->irq);
425 link->state &= ~DEV_CONFIG;
426 if (priv->hw.iobase) 401 if (priv->hw.iobase)
427 ioport_unmap(priv->hw.iobase); 402 ioport_unmap(priv->hw.iobase);
428} /* orinoco_cs_release */ 403} /* orinoco_cs_release */
429 404
430static int orinoco_cs_suspend(struct pcmcia_device *p_dev) 405static int orinoco_cs_suspend(struct pcmcia_device *link)
431{ 406{
432 dev_link_t *link = dev_to_instance(p_dev);
433 struct net_device *dev = link->priv; 407 struct net_device *dev = link->priv;
434 struct orinoco_private *priv = netdev_priv(dev); 408 struct orinoco_private *priv = netdev_priv(dev);
435 struct orinoco_pccard *card = priv->card; 409 struct orinoco_pccard *card = priv->card;
436 int err = 0; 410 int err = 0;
437 unsigned long flags; 411 unsigned long flags;
438 412
439 link->state |= DEV_SUSPEND; 413 /* This is probably racy, but I can't think of
440 if (link->state & DEV_CONFIG) { 414 a better way, short of rewriting the PCMCIA
441 /* This is probably racy, but I can't think of 415 layer to not suck :-( */
442 a better way, short of rewriting the PCMCIA 416 if (! test_bit(0, &card->hard_reset_in_progress)) {
443 layer to not suck :-( */ 417 spin_lock_irqsave(&priv->lock, flags);
444 if (! test_bit(0, &card->hard_reset_in_progress)) {
445 spin_lock_irqsave(&priv->lock, flags);
446 418
447 err = __orinoco_down(dev); 419 err = __orinoco_down(dev);
448 if (err) 420 if (err)
449 printk(KERN_WARNING "%s: Error %d downing interface\n", 421 printk(KERN_WARNING "%s: Error %d downing interface\n",
450 dev->name, err); 422 dev->name, err);
451 423
452 netif_device_detach(dev); 424 netif_device_detach(dev);
453 priv->hw_unavailable++; 425 priv->hw_unavailable++;
454 426
455 spin_unlock_irqrestore(&priv->lock, flags); 427 spin_unlock_irqrestore(&priv->lock, flags);
456 }
457
458 pcmcia_release_configuration(link->handle);
459 } 428 }
460 429
461 return 0; 430 return 0;
462} 431}
463 432
464static int orinoco_cs_resume(struct pcmcia_device *p_dev) 433static int orinoco_cs_resume(struct pcmcia_device *link)
465{ 434{
466 dev_link_t *link = dev_to_instance(p_dev);
467 struct net_device *dev = link->priv; 435 struct net_device *dev = link->priv;
468 struct orinoco_private *priv = netdev_priv(dev); 436 struct orinoco_private *priv = netdev_priv(dev);
469 struct orinoco_pccard *card = priv->card; 437 struct orinoco_pccard *card = priv->card;
470 int err = 0; 438 int err = 0;
471 unsigned long flags; 439 unsigned long flags;
472 440
473 link->state &= ~DEV_SUSPEND; 441 if (! test_bit(0, &card->hard_reset_in_progress)) {
474 if (link->state & DEV_CONFIG) { 442 err = orinoco_reinit_firmware(dev);
475 /* FIXME: should we double check that this is 443 if (err) {
476 * the same card as we had before */ 444 printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
477 pcmcia_request_configuration(link->handle, &link->conf); 445 dev->name, err);
478 446 return -EIO;
479 if (! test_bit(0, &card->hard_reset_in_progress)) { 447 }
480 err = orinoco_reinit_firmware(dev);
481 if (err) {
482 printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
483 dev->name, err);
484 return -EIO;
485 }
486
487 spin_lock_irqsave(&priv->lock, flags);
488 448
489 netif_device_attach(dev); 449 spin_lock_irqsave(&priv->lock, flags);
490 priv->hw_unavailable--;
491 450
492 if (priv->open && ! priv->hw_unavailable) { 451 netif_device_attach(dev);
493 err = __orinoco_up(dev); 452 priv->hw_unavailable--;
494 if (err)
495 printk(KERN_ERR "%s: Error %d restarting card\n",
496 dev->name, err);
497 }
498 453
499 spin_unlock_irqrestore(&priv->lock, flags); 454 if (priv->open && ! priv->hw_unavailable) {
455 err = __orinoco_up(dev);
456 if (err)
457 printk(KERN_ERR "%s: Error %d restarting card\n",
458 dev->name, err);
500 } 459 }
460
461 spin_unlock_irqrestore(&priv->lock, flags);
501 } 462 }
502 463
503 return 0; 464 return 0;
@@ -604,7 +565,7 @@ static struct pcmcia_driver orinoco_driver = {
604 .drv = { 565 .drv = {
605 .name = DRIVER_NAME, 566 .name = DRIVER_NAME,
606 }, 567 },
607 .probe = orinoco_cs_attach, 568 .probe = orinoco_cs_probe,
608 .remove = orinoco_cs_detach, 569 .remove = orinoco_cs_detach,
609 .id_table = orinoco_cs_ids, 570 .id_table = orinoco_cs_ids,
610 .suspend = orinoco_cs_suspend, 571 .suspend = orinoco_cs_suspend,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 7880d8c31aad..879eb427607c 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -90,8 +90,8 @@ module_param(pc_debug, int, 0);
90#define DEBUG(n, args...) 90#define DEBUG(n, args...)
91#endif 91#endif
92/** Prototypes based on PCMCIA skeleton driver *******************************/ 92/** Prototypes based on PCMCIA skeleton driver *******************************/
93static void ray_config(dev_link_t *link); 93static int ray_config(struct pcmcia_device *link);
94static void ray_release(dev_link_t *link); 94static void ray_release(struct pcmcia_device *link);
95static void ray_detach(struct pcmcia_device *p_dev); 95static void ray_detach(struct pcmcia_device *p_dev);
96 96
97/***** Prototypes indicated by device structure ******************************/ 97/***** Prototypes indicated by device structure ******************************/
@@ -190,20 +190,17 @@ static int bc;
190static char *phy_addr = NULL; 190static char *phy_addr = NULL;
191 191
192 192
193/* A linked list of "instances" of the ray device. Each actual 193/* A struct pcmcia_device structure has fields for most things that are needed
194 PCMCIA card corresponds to one device instance, and is described
195 by one dev_link_t structure (defined in ds.h).
196*/
197static dev_link_t *dev_list = NULL;
198
199/* A dev_link_t structure has fields for most things that are needed
200 to keep track of a socket, but there will usually be some device 194 to keep track of a socket, but there will usually be some device
201 specific information that also needs to be kept track of. The 195 specific information that also needs to be kept track of. The
202 'priv' pointer in a dev_link_t structure can be used to point to 196 'priv' pointer in a struct pcmcia_device structure can be used to point to
203 a device-specific private data structure, like this. 197 a device-specific private data structure, like this.
204*/ 198*/
205static unsigned int ray_mem_speed = 500; 199static unsigned int ray_mem_speed = 500;
206 200
201/* WARNING: THIS DRIVER IS NOT CAPABLE OF HANDLING MULTIPLE DEVICES! */
202static struct pcmcia_device *this_device = NULL;
203
207MODULE_AUTHOR("Corey Thomas <corey@world.std.com>"); 204MODULE_AUTHOR("Corey Thomas <corey@world.std.com>");
208MODULE_DESCRIPTION("Raylink/WebGear wireless LAN driver"); 205MODULE_DESCRIPTION("Raylink/WebGear wireless LAN driver");
209MODULE_LICENSE("GPL"); 206MODULE_LICENSE("GPL");
@@ -306,56 +303,46 @@ static char rcsid[] = "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.
306 configure the card at this point -- we wait until we receive a 303 configure the card at this point -- we wait until we receive a
307 card insertion event. 304 card insertion event.
308=============================================================================*/ 305=============================================================================*/
309static int ray_attach(struct pcmcia_device *p_dev) 306static int ray_probe(struct pcmcia_device *p_dev)
310{ 307{
311 dev_link_t *link;
312 ray_dev_t *local; 308 ray_dev_t *local;
313 struct net_device *dev; 309 struct net_device *dev;
314
315 DEBUG(1, "ray_attach()\n");
316 310
317 /* Initialize the dev_link_t structure */ 311 DEBUG(1, "ray_attach()\n");
318 link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
319
320 if (!link)
321 return -ENOMEM;
322 312
323 /* Allocate space for private device-specific data */ 313 /* Allocate space for private device-specific data */
324 dev = alloc_etherdev(sizeof(ray_dev_t)); 314 dev = alloc_etherdev(sizeof(ray_dev_t));
325
326 if (!dev) 315 if (!dev)
327 goto fail_alloc_dev; 316 goto fail_alloc_dev;
328 317
329 local = dev->priv; 318 local = dev->priv;
330 319 local->finder = p_dev;
331 memset(link, 0, sizeof(struct dev_link_t));
332 320
333 /* The io structure describes IO port mapping. None used here */ 321 /* The io structure describes IO port mapping. None used here */
334 link->io.NumPorts1 = 0; 322 p_dev->io.NumPorts1 = 0;
335 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 323 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
336 link->io.IOAddrLines = 5; 324 p_dev->io.IOAddrLines = 5;
337 325
338 /* Interrupt setup. For PCMCIA, driver takes what's given */ 326 /* Interrupt setup. For PCMCIA, driver takes what's given */
339 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 327 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
340 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 328 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
341 link->irq.Handler = &ray_interrupt; 329 p_dev->irq.Handler = &ray_interrupt;
342 330
343 /* General socket configuration */ 331 /* General socket configuration */
344 link->conf.Attributes = CONF_ENABLE_IRQ; 332 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
345 link->conf.Vcc = 50; 333 p_dev->conf.IntType = INT_MEMORY_AND_IO;
346 link->conf.IntType = INT_MEMORY_AND_IO; 334 p_dev->conf.ConfigIndex = 1;
347 link->conf.ConfigIndex = 1; 335 p_dev->conf.Present = PRESENT_OPTION;
348 link->conf.Present = PRESENT_OPTION; 336
349 337 p_dev->priv = dev;
350 link->priv = dev; 338 p_dev->irq.Instance = dev;
351 link->irq.Instance = dev;
352 339
353 local->finder = link; 340 local->finder = p_dev;
354 local->card_status = CARD_INSERTED; 341 local->card_status = CARD_INSERTED;
355 local->authentication_state = UNAUTHENTICATED; 342 local->authentication_state = UNAUTHENTICATED;
356 local->num_multi = 0; 343 local->num_multi = 0;
357 DEBUG(2,"ray_attach link = %p, dev = %p, local = %p, intr = %p\n", 344 DEBUG(2,"ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n",
358 link,dev,local,&ray_interrupt); 345 p_dev,dev,local,&ray_interrupt);
359 346
360 /* Raylink entries in the device structure */ 347 /* Raylink entries in the device structure */
361 dev->hard_start_xmit = &ray_dev_start_xmit; 348 dev->hard_start_xmit = &ray_dev_start_xmit;
@@ -379,16 +366,10 @@ static int ray_attach(struct pcmcia_device *p_dev)
379 366
380 init_timer(&local->timer); 367 init_timer(&local->timer);
381 368
382 link->handle = p_dev; 369 this_device = p_dev;
383 p_dev->instance = link; 370 return ray_config(p_dev);
384
385 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
386 ray_config(link);
387
388 return 0;
389 371
390fail_alloc_dev: 372fail_alloc_dev:
391 kfree(link);
392 return -ENOMEM; 373 return -ENOMEM;
393} /* ray_attach */ 374} /* ray_attach */
394/*============================================================================= 375/*=============================================================================
@@ -397,37 +378,25 @@ fail_alloc_dev:
397 structures are freed. Otherwise, the structures will be freed 378 structures are freed. Otherwise, the structures will be freed
398 when the device is released. 379 when the device is released.
399=============================================================================*/ 380=============================================================================*/
400static void ray_detach(struct pcmcia_device *p_dev) 381static void ray_detach(struct pcmcia_device *link)
401{ 382{
402 dev_link_t *link = dev_to_instance(p_dev);
403 dev_link_t **linkp;
404 struct net_device *dev; 383 struct net_device *dev;
405 ray_dev_t *local; 384 ray_dev_t *local;
406 385
407 DEBUG(1, "ray_detach(0x%p)\n", link); 386 DEBUG(1, "ray_detach(0x%p)\n", link);
408
409 /* Locate device structure */
410 for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
411 if (*linkp == link) break;
412 if (*linkp == NULL)
413 return;
414 387
388 this_device = NULL;
415 dev = link->priv; 389 dev = link->priv;
416 390
417 if (link->state & DEV_CONFIG) { 391 ray_release(link);
418 ray_release(link);
419 392
420 local = (ray_dev_t *)dev->priv; 393 local = (ray_dev_t *)dev->priv;
421 del_timer(&local->timer); 394 del_timer(&local->timer);
422 }
423 395
424 /* Unlink device structure, free pieces */
425 *linkp = link->next;
426 if (link->priv) { 396 if (link->priv) {
427 if (link->dev) unregister_netdev(dev); 397 if (link->dev_node) unregister_netdev(dev);
428 free_netdev(dev); 398 free_netdev(dev);
429 } 399 }
430 kfree(link);
431 DEBUG(2,"ray_cs ray_detach ending\n"); 400 DEBUG(2,"ray_cs ray_detach ending\n");
432} /* ray_detach */ 401} /* ray_detach */
433/*============================================================================= 402/*=============================================================================
@@ -438,9 +407,8 @@ static void ray_detach(struct pcmcia_device *p_dev)
438#define CS_CHECK(fn, ret) \ 407#define CS_CHECK(fn, ret) \
439do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 408do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
440#define MAX_TUPLE_SIZE 128 409#define MAX_TUPLE_SIZE 128
441static void ray_config(dev_link_t *link) 410static int ray_config(struct pcmcia_device *link)
442{ 411{
443 client_handle_t handle = link->handle;
444 tuple_t tuple; 412 tuple_t tuple;
445 cisparse_t parse; 413 cisparse_t parse;
446 int last_fn = 0, last_ret = 0; 414 int last_fn = 0, last_ret = 0;
@@ -455,48 +423,45 @@ static void ray_config(dev_link_t *link)
455 423
456 /* This reads the card's CONFIG tuple to find its configuration regs */ 424 /* This reads the card's CONFIG tuple to find its configuration regs */
457 tuple.DesiredTuple = CISTPL_CONFIG; 425 tuple.DesiredTuple = CISTPL_CONFIG;
458 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 426 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
459 tuple.TupleData = buf; 427 tuple.TupleData = buf;
460 tuple.TupleDataMax = MAX_TUPLE_SIZE; 428 tuple.TupleDataMax = MAX_TUPLE_SIZE;
461 tuple.TupleOffset = 0; 429 tuple.TupleOffset = 0;
462 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 430 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
463 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 431 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
464 link->conf.ConfigBase = parse.config.base; 432 link->conf.ConfigBase = parse.config.base;
465 link->conf.Present = parse.config.rmask[0]; 433 link->conf.Present = parse.config.rmask[0];
466 434
467 /* Determine card type and firmware version */ 435 /* Determine card type and firmware version */
468 buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0; 436 buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0;
469 tuple.DesiredTuple = CISTPL_VERS_1; 437 tuple.DesiredTuple = CISTPL_VERS_1;
470 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 438 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
471 tuple.TupleData = buf; 439 tuple.TupleData = buf;
472 tuple.TupleDataMax = MAX_TUPLE_SIZE; 440 tuple.TupleDataMax = MAX_TUPLE_SIZE;
473 tuple.TupleOffset = 2; 441 tuple.TupleOffset = 2;
474 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 442 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
475 443
476 for (i=0; i<tuple.TupleDataLen - 4; i++) 444 for (i=0; i<tuple.TupleDataLen - 4; i++)
477 if (buf[i] == 0) buf[i] = ' '; 445 if (buf[i] == 0) buf[i] = ' ';
478 printk(KERN_INFO "ray_cs Detected: %s\n",buf); 446 printk(KERN_INFO "ray_cs Detected: %s\n",buf);
479 447
480 /* Configure card */
481 link->state |= DEV_CONFIG;
482
483 /* Now allocate an interrupt line. Note that this does not 448 /* Now allocate an interrupt line. Note that this does not
484 actually assign a handler to the interrupt. 449 actually assign a handler to the interrupt.
485 */ 450 */
486 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 451 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
487 dev->irq = link->irq.AssignedIRQ; 452 dev->irq = link->irq.AssignedIRQ;
488 453
489 /* This actually configures the PCMCIA socket -- setting up 454 /* This actually configures the PCMCIA socket -- setting up
490 the I/O windows and the interrupt mapping. 455 the I/O windows and the interrupt mapping.
491 */ 456 */
492 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 457 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
493 458
494/*** Set up 32k window for shared memory (transmit and control) ************/ 459/*** Set up 32k window for shared memory (transmit and control) ************/
495 req.Attributes = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT; 460 req.Attributes = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
496 req.Base = 0; 461 req.Base = 0;
497 req.Size = 0x8000; 462 req.Size = 0x8000;
498 req.AccessSpeed = ray_mem_speed; 463 req.AccessSpeed = ray_mem_speed;
499 CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &link->win)); 464 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win));
500 mem.CardOffset = 0x0000; mem.Page = 0; 465 mem.CardOffset = 0x0000; mem.Page = 0;
501 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 466 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem));
502 local->sram = ioremap(req.Base,req.Size); 467 local->sram = ioremap(req.Base,req.Size);
@@ -506,7 +471,7 @@ static void ray_config(dev_link_t *link)
506 req.Base = 0; 471 req.Base = 0;
507 req.Size = 0x4000; 472 req.Size = 0x4000;
508 req.AccessSpeed = ray_mem_speed; 473 req.AccessSpeed = ray_mem_speed;
509 CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &local->rmem_handle)); 474 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &local->rmem_handle));
510 mem.CardOffset = 0x8000; mem.Page = 0; 475 mem.CardOffset = 0x8000; mem.Page = 0;
511 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->rmem_handle, &mem)); 476 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->rmem_handle, &mem));
512 local->rmem = ioremap(req.Base,req.Size); 477 local->rmem = ioremap(req.Base,req.Size);
@@ -516,7 +481,7 @@ static void ray_config(dev_link_t *link)
516 req.Base = 0; 481 req.Base = 0;
517 req.Size = 0x1000; 482 req.Size = 0x1000;
518 req.AccessSpeed = ray_mem_speed; 483 req.AccessSpeed = ray_mem_speed;
519 CS_CHECK(RequestWindow, pcmcia_request_window(&link->handle, &req, &local->amem_handle)); 484 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &local->amem_handle));
520 mem.CardOffset = 0x0000; mem.Page = 0; 485 mem.CardOffset = 0x0000; mem.Page = 0;
521 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->amem_handle, &mem)); 486 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->amem_handle, &mem));
522 local->amem = ioremap(req.Base,req.Size); 487 local->amem = ioremap(req.Base,req.Size);
@@ -526,32 +491,32 @@ static void ray_config(dev_link_t *link)
526 DEBUG(3,"ray_config amem=%p\n",local->amem); 491 DEBUG(3,"ray_config amem=%p\n",local->amem);
527 if (ray_init(dev) < 0) { 492 if (ray_init(dev) < 0) {
528 ray_release(link); 493 ray_release(link);
529 return; 494 return -ENODEV;
530 } 495 }
531 496
532 SET_NETDEV_DEV(dev, &handle_to_dev(handle)); 497 SET_NETDEV_DEV(dev, &handle_to_dev(link));
533 i = register_netdev(dev); 498 i = register_netdev(dev);
534 if (i != 0) { 499 if (i != 0) {
535 printk("ray_config register_netdev() failed\n"); 500 printk("ray_config register_netdev() failed\n");
536 ray_release(link); 501 ray_release(link);
537 return; 502 return i;
538 } 503 }
539 504
540 strcpy(local->node.dev_name, dev->name); 505 strcpy(local->node.dev_name, dev->name);
541 link->dev = &local->node; 506 link->dev_node = &local->node;
542 507
543 link->state &= ~DEV_CONFIG_PENDING;
544 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr ", 508 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr ",
545 dev->name, dev->irq); 509 dev->name, dev->irq);
546 for (i = 0; i < 6; i++) 510 for (i = 0; i < 6; i++)
547 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); 511 printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
548 512
549 return; 513 return 0;
550 514
551cs_failed: 515cs_failed:
552 cs_error(link->handle, last_fn, last_ret); 516 cs_error(link, last_fn, last_ret);
553 517
554 ray_release(link); 518 ray_release(link);
519 return -ENODEV;
555} /* ray_config */ 520} /* ray_config */
556 521
557static inline struct ccs __iomem *ccs_base(ray_dev_t *dev) 522static inline struct ccs __iomem *ccs_base(ray_dev_t *dev)
@@ -578,9 +543,9 @@ static int ray_init(struct net_device *dev)
578 UCHAR *p; 543 UCHAR *p;
579 struct ccs __iomem *pccs; 544 struct ccs __iomem *pccs;
580 ray_dev_t *local = (ray_dev_t *)dev->priv; 545 ray_dev_t *local = (ray_dev_t *)dev->priv;
581 dev_link_t *link = local->finder; 546 struct pcmcia_device *link = local->finder;
582 DEBUG(1, "ray_init(0x%p)\n", dev); 547 DEBUG(1, "ray_init(0x%p)\n", dev);
583 if (!(link->state & DEV_PRESENT)) { 548 if (!(pcmcia_dev_present(link))) {
584 DEBUG(0,"ray_init - device not present\n"); 549 DEBUG(0,"ray_init - device not present\n");
585 return -1; 550 return -1;
586 } 551 }
@@ -640,10 +605,10 @@ static int dl_startup_params(struct net_device *dev)
640 int ccsindex; 605 int ccsindex;
641 ray_dev_t *local = (ray_dev_t *)dev->priv; 606 ray_dev_t *local = (ray_dev_t *)dev->priv;
642 struct ccs __iomem *pccs; 607 struct ccs __iomem *pccs;
643 dev_link_t *link = local->finder; 608 struct pcmcia_device *link = local->finder;
644 609
645 DEBUG(1,"dl_startup_params entered\n"); 610 DEBUG(1,"dl_startup_params entered\n");
646 if (!(link->state & DEV_PRESENT)) { 611 if (!(pcmcia_dev_present(link))) {
647 DEBUG(2,"ray_cs dl_startup_params - device not present\n"); 612 DEBUG(2,"ray_cs dl_startup_params - device not present\n");
648 return -1; 613 return -1;
649 } 614 }
@@ -747,9 +712,9 @@ static void verify_dl_startup(u_long data)
747 ray_dev_t *local = (ray_dev_t *)data; 712 ray_dev_t *local = (ray_dev_t *)data;
748 struct ccs __iomem *pccs = ccs_base(local) + local->dl_param_ccs; 713 struct ccs __iomem *pccs = ccs_base(local) + local->dl_param_ccs;
749 UCHAR status; 714 UCHAR status;
750 dev_link_t *link = local->finder; 715 struct pcmcia_device *link = local->finder;
751 716
752 if (!(link->state & DEV_PRESENT)) { 717 if (!(pcmcia_dev_present(link))) {
753 DEBUG(2,"ray_cs verify_dl_startup - device not present\n"); 718 DEBUG(2,"ray_cs verify_dl_startup - device not present\n");
754 return; 719 return;
755 } 720 }
@@ -787,8 +752,8 @@ static void start_net(u_long data)
787 ray_dev_t *local = (ray_dev_t *)data; 752 ray_dev_t *local = (ray_dev_t *)data;
788 struct ccs __iomem *pccs; 753 struct ccs __iomem *pccs;
789 int ccsindex; 754 int ccsindex;
790 dev_link_t *link = local->finder; 755 struct pcmcia_device *link = local->finder;
791 if (!(link->state & DEV_PRESENT)) { 756 if (!(pcmcia_dev_present(link))) {
792 DEBUG(2,"ray_cs start_net - device not present\n"); 757 DEBUG(2,"ray_cs start_net - device not present\n");
793 return; 758 return;
794 } 759 }
@@ -814,9 +779,9 @@ static void join_net(u_long data)
814 779
815 struct ccs __iomem *pccs; 780 struct ccs __iomem *pccs;
816 int ccsindex; 781 int ccsindex;
817 dev_link_t *link = local->finder; 782 struct pcmcia_device *link = local->finder;
818 783
819 if (!(link->state & DEV_PRESENT)) { 784 if (!(pcmcia_dev_present(link))) {
820 DEBUG(2,"ray_cs join_net - device not present\n"); 785 DEBUG(2,"ray_cs join_net - device not present\n");
821 return; 786 return;
822 } 787 }
@@ -840,7 +805,7 @@ static void join_net(u_long data)
840 device, and release the PCMCIA configuration. If the device is 805 device, and release the PCMCIA configuration. If the device is
841 still open, this will be postponed until it is closed. 806 still open, this will be postponed until it is closed.
842=============================================================================*/ 807=============================================================================*/
843static void ray_release(dev_link_t *link) 808static void ray_release(struct pcmcia_device *link)
844{ 809{
845 struct net_device *dev = link->priv; 810 struct net_device *dev = link->priv;
846 ray_dev_t *local = dev->priv; 811 ray_dev_t *local = dev->priv;
@@ -849,56 +814,38 @@ static void ray_release(dev_link_t *link)
849 DEBUG(1, "ray_release(0x%p)\n", link); 814 DEBUG(1, "ray_release(0x%p)\n", link);
850 815
851 del_timer(&local->timer); 816 del_timer(&local->timer);
852 link->state &= ~DEV_CONFIG;
853 817
854 iounmap(local->sram); 818 iounmap(local->sram);
855 iounmap(local->rmem); 819 iounmap(local->rmem);
856 iounmap(local->amem); 820 iounmap(local->amem);
857 /* Do bother checking to see if these succeed or not */ 821 /* Do bother checking to see if these succeed or not */
858 i = pcmcia_release_window(link->win);
859 if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(link->win) ret = %x\n",i);
860 i = pcmcia_release_window(local->amem_handle); 822 i = pcmcia_release_window(local->amem_handle);
861 if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->amem) ret = %x\n",i); 823 if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->amem) ret = %x\n",i);
862 i = pcmcia_release_window(local->rmem_handle); 824 i = pcmcia_release_window(local->rmem_handle);
863 if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->rmem) ret = %x\n",i); 825 if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseWindow(local->rmem) ret = %x\n",i);
864 i = pcmcia_release_configuration(link->handle); 826 pcmcia_disable_device(link);
865 if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseConfiguration ret = %x\n",i);
866 i = pcmcia_release_irq(link->handle, &link->irq);
867 if ( i != CS_SUCCESS ) DEBUG(0,"ReleaseIRQ ret = %x\n",i);
868 827
869 DEBUG(2,"ray_release ending\n"); 828 DEBUG(2,"ray_release ending\n");
870} 829}
871 830
872static int ray_suspend(struct pcmcia_device *p_dev) 831static int ray_suspend(struct pcmcia_device *link)
873{ 832{
874 dev_link_t *link = dev_to_instance(p_dev);
875 struct net_device *dev = link->priv; 833 struct net_device *dev = link->priv;
876 834
877 link->state |= DEV_SUSPEND; 835 if (link->open)
878 if (link->state & DEV_CONFIG) { 836 netif_device_detach(dev);
879 if (link->open)
880 netif_device_detach(dev);
881
882 pcmcia_release_configuration(link->handle);
883 }
884
885 837
886 return 0; 838 return 0;
887} 839}
888 840
889static int ray_resume(struct pcmcia_device *p_dev) 841static int ray_resume(struct pcmcia_device *link)
890{ 842{
891 dev_link_t *link = dev_to_instance(p_dev);
892 struct net_device *dev = link->priv; 843 struct net_device *dev = link->priv;
893 844
894 link->state &= ~DEV_SUSPEND; 845 if (link->open) {
895 if (link->state & DEV_CONFIG) { 846 ray_reset(dev);
896 pcmcia_request_configuration(link->handle, &link->conf); 847 netif_device_attach(dev);
897 if (link->open) { 848 }
898 ray_reset(dev);
899 netif_device_attach(dev);
900 }
901 }
902 849
903 return 0; 850 return 0;
904} 851}
@@ -910,10 +857,10 @@ int ray_dev_init(struct net_device *dev)
910 int i; 857 int i;
911#endif /* RAY_IMMEDIATE_INIT */ 858#endif /* RAY_IMMEDIATE_INIT */
912 ray_dev_t *local = dev->priv; 859 ray_dev_t *local = dev->priv;
913 dev_link_t *link = local->finder; 860 struct pcmcia_device *link = local->finder;
914 861
915 DEBUG(1,"ray_dev_init(dev=%p)\n",dev); 862 DEBUG(1,"ray_dev_init(dev=%p)\n",dev);
916 if (!(link->state & DEV_PRESENT)) { 863 if (!(pcmcia_dev_present(link))) {
917 DEBUG(2,"ray_dev_init - device not present\n"); 864 DEBUG(2,"ray_dev_init - device not present\n");
918 return -1; 865 return -1;
919 } 866 }
@@ -944,10 +891,10 @@ int ray_dev_init(struct net_device *dev)
944static int ray_dev_config(struct net_device *dev, struct ifmap *map) 891static int ray_dev_config(struct net_device *dev, struct ifmap *map)
945{ 892{
946 ray_dev_t *local = dev->priv; 893 ray_dev_t *local = dev->priv;
947 dev_link_t *link = local->finder; 894 struct pcmcia_device *link = local->finder;
948 /* Dummy routine to satisfy device structure */ 895 /* Dummy routine to satisfy device structure */
949 DEBUG(1,"ray_dev_config(dev=%p,ifmap=%p)\n",dev,map); 896 DEBUG(1,"ray_dev_config(dev=%p,ifmap=%p)\n",dev,map);
950 if (!(link->state & DEV_PRESENT)) { 897 if (!(pcmcia_dev_present(link))) {
951 DEBUG(2,"ray_dev_config - device not present\n"); 898 DEBUG(2,"ray_dev_config - device not present\n");
952 return -1; 899 return -1;
953 } 900 }
@@ -958,10 +905,10 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map)
958static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev) 905static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
959{ 906{
960 ray_dev_t *local = dev->priv; 907 ray_dev_t *local = dev->priv;
961 dev_link_t *link = local->finder; 908 struct pcmcia_device *link = local->finder;
962 short length = skb->len; 909 short length = skb->len;
963 910
964 if (!(link->state & DEV_PRESENT)) { 911 if (!(pcmcia_dev_present(link))) {
965 DEBUG(2,"ray_dev_start_xmit - device not present\n"); 912 DEBUG(2,"ray_dev_start_xmit - device not present\n");
966 return -1; 913 return -1;
967 } 914 }
@@ -1570,7 +1517,7 @@ static int ray_commit(struct net_device *dev,
1570static iw_stats * ray_get_wireless_stats(struct net_device * dev) 1517static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1571{ 1518{
1572 ray_dev_t * local = (ray_dev_t *) dev->priv; 1519 ray_dev_t * local = (ray_dev_t *) dev->priv;
1573 dev_link_t *link = local->finder; 1520 struct pcmcia_device *link = local->finder;
1574 struct status __iomem *p = local->sram + STATUS_BASE; 1521 struct status __iomem *p = local->sram + STATUS_BASE;
1575 1522
1576 if(local == (ray_dev_t *) NULL) 1523 if(local == (ray_dev_t *) NULL)
@@ -1588,7 +1535,7 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1588 } 1535 }
1589#endif /* WIRELESS_SPY */ 1536#endif /* WIRELESS_SPY */
1590 1537
1591 if((link->state & DEV_PRESENT)) { 1538 if(pcmcia_dev_present(link)) {
1592 local->wstats.qual.noise = readb(&p->rxnoise); 1539 local->wstats.qual.noise = readb(&p->rxnoise);
1593 local->wstats.qual.updated |= 4; 1540 local->wstats.qual.updated |= 4;
1594 } 1541 }
@@ -1657,18 +1604,14 @@ static const struct iw_handler_def ray_handler_def =
1657/*===========================================================================*/ 1604/*===========================================================================*/
1658static int ray_open(struct net_device *dev) 1605static int ray_open(struct net_device *dev)
1659{ 1606{
1660 dev_link_t *link;
1661 ray_dev_t *local = (ray_dev_t *)dev->priv; 1607 ray_dev_t *local = (ray_dev_t *)dev->priv;
1608 struct pcmcia_device *link;
1609 link = local->finder;
1662 1610
1663 DEBUG(1, "ray_open('%s')\n", dev->name); 1611 DEBUG(1, "ray_open('%s')\n", dev->name);
1664 1612
1665 for (link = dev_list; link; link = link->next) 1613 if (link->open == 0)
1666 if (link->priv == dev) break; 1614 local->num_multi = 0;
1667 if (!DEV_OK(link)) {
1668 return -ENODEV;
1669 }
1670
1671 if (link->open == 0) local->num_multi = 0;
1672 link->open++; 1615 link->open++;
1673 1616
1674 /* If the card is not started, time to start it ! - Jean II */ 1617 /* If the card is not started, time to start it ! - Jean II */
@@ -1695,15 +1638,12 @@ static int ray_open(struct net_device *dev)
1695/*===========================================================================*/ 1638/*===========================================================================*/
1696static int ray_dev_close(struct net_device *dev) 1639static int ray_dev_close(struct net_device *dev)
1697{ 1640{
1698 dev_link_t *link; 1641 ray_dev_t *local = (ray_dev_t *)dev->priv;
1642 struct pcmcia_device *link;
1643 link = local->finder;
1699 1644
1700 DEBUG(1, "ray_dev_close('%s')\n", dev->name); 1645 DEBUG(1, "ray_dev_close('%s')\n", dev->name);
1701 1646
1702 for (link = dev_list; link; link = link->next)
1703 if (link->priv == dev) break;
1704 if (link == NULL)
1705 return -ENODEV;
1706
1707 link->open--; 1647 link->open--;
1708 netif_stop_queue(dev); 1648 netif_stop_queue(dev);
1709 1649
@@ -1725,9 +1665,9 @@ static void ray_reset(struct net_device *dev) {
1725static int interrupt_ecf(ray_dev_t *local, int ccs) 1665static int interrupt_ecf(ray_dev_t *local, int ccs)
1726{ 1666{
1727 int i = 50; 1667 int i = 50;
1728 dev_link_t *link = local->finder; 1668 struct pcmcia_device *link = local->finder;
1729 1669
1730 if (!(link->state & DEV_PRESENT)) { 1670 if (!(pcmcia_dev_present(link))) {
1731 DEBUG(2,"ray_cs interrupt_ecf - device not present\n"); 1671 DEBUG(2,"ray_cs interrupt_ecf - device not present\n");
1732 return -1; 1672 return -1;
1733 } 1673 }
@@ -1752,9 +1692,9 @@ static int get_free_tx_ccs(ray_dev_t *local)
1752{ 1692{
1753 int i; 1693 int i;
1754 struct ccs __iomem *pccs = ccs_base(local); 1694 struct ccs __iomem *pccs = ccs_base(local);
1755 dev_link_t *link = local->finder; 1695 struct pcmcia_device *link = local->finder;
1756 1696
1757 if (!(link->state & DEV_PRESENT)) { 1697 if (!(pcmcia_dev_present(link))) {
1758 DEBUG(2,"ray_cs get_free_tx_ccs - device not present\n"); 1698 DEBUG(2,"ray_cs get_free_tx_ccs - device not present\n");
1759 return ECARDGONE; 1699 return ECARDGONE;
1760 } 1700 }
@@ -1783,9 +1723,9 @@ static int get_free_ccs(ray_dev_t *local)
1783{ 1723{
1784 int i; 1724 int i;
1785 struct ccs __iomem *pccs = ccs_base(local); 1725 struct ccs __iomem *pccs = ccs_base(local);
1786 dev_link_t *link = local->finder; 1726 struct pcmcia_device *link = local->finder;
1787 1727
1788 if (!(link->state & DEV_PRESENT)) { 1728 if (!(pcmcia_dev_present(link))) {
1789 DEBUG(2,"ray_cs get_free_ccs - device not present\n"); 1729 DEBUG(2,"ray_cs get_free_ccs - device not present\n");
1790 return ECARDGONE; 1730 return ECARDGONE;
1791 } 1731 }
@@ -1858,9 +1798,9 @@ static int parse_addr(char *in_str, UCHAR *out)
1858static struct net_device_stats *ray_get_stats(struct net_device *dev) 1798static struct net_device_stats *ray_get_stats(struct net_device *dev)
1859{ 1799{
1860 ray_dev_t *local = (ray_dev_t *)dev->priv; 1800 ray_dev_t *local = (ray_dev_t *)dev->priv;
1861 dev_link_t *link = local->finder; 1801 struct pcmcia_device *link = local->finder;
1862 struct status __iomem *p = local->sram + STATUS_BASE; 1802 struct status __iomem *p = local->sram + STATUS_BASE;
1863 if (!(link->state & DEV_PRESENT)) { 1803 if (!(pcmcia_dev_present(link))) {
1864 DEBUG(2,"ray_cs net_device_stats - device not present\n"); 1804 DEBUG(2,"ray_cs net_device_stats - device not present\n");
1865 return &local->stats; 1805 return &local->stats;
1866 } 1806 }
@@ -1888,12 +1828,12 @@ static struct net_device_stats *ray_get_stats(struct net_device *dev)
1888static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len) 1828static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len)
1889{ 1829{
1890 ray_dev_t *local = (ray_dev_t *)dev->priv; 1830 ray_dev_t *local = (ray_dev_t *)dev->priv;
1891 dev_link_t *link = local->finder; 1831 struct pcmcia_device *link = local->finder;
1892 int ccsindex; 1832 int ccsindex;
1893 int i; 1833 int i;
1894 struct ccs __iomem *pccs; 1834 struct ccs __iomem *pccs;
1895 1835
1896 if (!(link->state & DEV_PRESENT)) { 1836 if (!(pcmcia_dev_present(link))) {
1897 DEBUG(2,"ray_update_parm - device not present\n"); 1837 DEBUG(2,"ray_update_parm - device not present\n");
1898 return; 1838 return;
1899 } 1839 }
@@ -1925,10 +1865,10 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1925 struct ccs __iomem *pccs; 1865 struct ccs __iomem *pccs;
1926 int i = 0; 1866 int i = 0;
1927 ray_dev_t *local = (ray_dev_t *)dev->priv; 1867 ray_dev_t *local = (ray_dev_t *)dev->priv;
1928 dev_link_t *link = local->finder; 1868 struct pcmcia_device *link = local->finder;
1929 void __iomem *p = local->sram + HOST_TO_ECF_BASE; 1869 void __iomem *p = local->sram + HOST_TO_ECF_BASE;
1930 1870
1931 if (!(link->state & DEV_PRESENT)) { 1871 if (!(pcmcia_dev_present(link))) {
1932 DEBUG(2,"ray_update_multi_list - device not present\n"); 1872 DEBUG(2,"ray_update_multi_list - device not present\n");
1933 return; 1873 return;
1934 } 1874 }
@@ -2005,7 +1945,7 @@ static void set_multicast_list(struct net_device *dev)
2005static irqreturn_t ray_interrupt(int irq, void *dev_id, struct pt_regs * regs) 1945static irqreturn_t ray_interrupt(int irq, void *dev_id, struct pt_regs * regs)
2006{ 1946{
2007 struct net_device *dev = (struct net_device *)dev_id; 1947 struct net_device *dev = (struct net_device *)dev_id;
2008 dev_link_t *link; 1948 struct pcmcia_device *link;
2009 ray_dev_t *local; 1949 ray_dev_t *local;
2010 struct ccs __iomem *pccs; 1950 struct ccs __iomem *pccs;
2011 struct rcs __iomem *prcs; 1951 struct rcs __iomem *prcs;
@@ -2020,8 +1960,8 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id, struct pt_regs * regs)
2020 DEBUG(4,"ray_cs: interrupt for *dev=%p\n",dev); 1960 DEBUG(4,"ray_cs: interrupt for *dev=%p\n",dev);
2021 1961
2022 local = (ray_dev_t *)dev->priv; 1962 local = (ray_dev_t *)dev->priv;
2023 link = (dev_link_t *)local->finder; 1963 link = (struct pcmcia_device *)local->finder;
2024 if ( ! (link->state & DEV_PRESENT) || link->state & DEV_SUSPEND ) { 1964 if (!pcmcia_dev_present(link)) {
2025 DEBUG(2,"ray_cs interrupt from device not present or suspended.\n"); 1965 DEBUG(2,"ray_cs interrupt from device not present or suspended.\n");
2026 return IRQ_NONE; 1966 return IRQ_NONE;
2027 } 1967 }
@@ -2540,9 +2480,9 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem * prcs)
2540/*===========================================================================*/ 2480/*===========================================================================*/
2541static void authenticate(ray_dev_t *local) 2481static void authenticate(ray_dev_t *local)
2542{ 2482{
2543 dev_link_t *link = local->finder; 2483 struct pcmcia_device *link = local->finder;
2544 DEBUG(0,"ray_cs Starting authentication.\n"); 2484 DEBUG(0,"ray_cs Starting authentication.\n");
2545 if (!(link->state & DEV_PRESENT)) { 2485 if (!(pcmcia_dev_present(link))) {
2546 DEBUG(2,"ray_cs authenticate - device not present\n"); 2486 DEBUG(2,"ray_cs authenticate - device not present\n");
2547 return; 2487 return;
2548 } 2488 }
@@ -2606,10 +2546,10 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
2606static void associate(ray_dev_t *local) 2546static void associate(ray_dev_t *local)
2607{ 2547{
2608 struct ccs __iomem *pccs; 2548 struct ccs __iomem *pccs;
2609 dev_link_t *link = local->finder; 2549 struct pcmcia_device *link = local->finder;
2610 struct net_device *dev = link->priv; 2550 struct net_device *dev = link->priv;
2611 int ccsindex; 2551 int ccsindex;
2612 if (!(link->state & DEV_PRESENT)) { 2552 if (!(pcmcia_dev_present(link))) {
2613 DEBUG(2,"ray_cs associate - device not present\n"); 2553 DEBUG(2,"ray_cs associate - device not present\n");
2614 return; 2554 return;
2615 } 2555 }
@@ -2689,14 +2629,14 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len)
2689 * eg ifconfig 2629 * eg ifconfig
2690 */ 2630 */
2691 int i; 2631 int i;
2692 dev_link_t *link; 2632 struct pcmcia_device *link;
2693 struct net_device *dev; 2633 struct net_device *dev;
2694 ray_dev_t *local; 2634 ray_dev_t *local;
2695 UCHAR *p; 2635 UCHAR *p;
2696 struct freq_hop_element *pfh; 2636 struct freq_hop_element *pfh;
2697 UCHAR c[33]; 2637 UCHAR c[33];
2698 2638
2699 link = dev_list; 2639 link = this_device;
2700 if (!link) 2640 if (!link)
2701 return 0; 2641 return 0;
2702 dev = (struct net_device *)link->priv; 2642 dev = (struct net_device *)link->priv;
@@ -2898,7 +2838,7 @@ static struct pcmcia_driver ray_driver = {
2898 .drv = { 2838 .drv = {
2899 .name = "ray_cs", 2839 .name = "ray_cs",
2900 }, 2840 },
2901 .probe = ray_attach, 2841 .probe = ray_probe,
2902 .remove = ray_detach, 2842 .remove = ray_detach,
2903 .id_table = ray_ids, 2843 .id_table = ray_ids,
2904 .suspend = ray_suspend, 2844 .suspend = ray_suspend,
@@ -2940,7 +2880,6 @@ static void __exit exit_ray_cs(void)
2940#endif 2880#endif
2941 2881
2942 pcmcia_unregister_driver(&ray_driver); 2882 pcmcia_unregister_driver(&ray_driver);
2943 BUG_ON(dev_list != NULL);
2944} /* exit_ray_cs */ 2883} /* exit_ray_cs */
2945 2884
2946module_init(init_ray_cs); 2885module_init(init_ray_cs);
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index 42660fe64bfd..bd73ebf03340 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -31,7 +31,7 @@ typedef struct ray_dev_t {
31 void __iomem *sram; /* pointer to beginning of shared RAM */ 31 void __iomem *sram; /* pointer to beginning of shared RAM */
32 void __iomem *amem; /* pointer to attribute mem window */ 32 void __iomem *amem; /* pointer to attribute mem window */
33 void __iomem *rmem; /* pointer to receive buffer window */ 33 void __iomem *rmem; /* pointer to receive buffer window */
34 dev_link_t *finder; /* pointer back to dev_link_t for card */ 34 struct pcmcia_device *finder; /* pointer back to struct pcmcia_device for card */
35 struct timer_list timer; 35 struct timer_list timer;
36 long tx_ccs_lock; 36 long tx_ccs_lock;
37 long ccs_lock; 37 long ccs_lock;
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 5fa6fbe35bb9..f7b77ce54d7b 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -63,7 +63,7 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
63/* PCMCIA specific device information (goes in the card field of 63/* PCMCIA specific device information (goes in the card field of
64 * struct orinoco_private */ 64 * struct orinoco_private */
65struct orinoco_pccard { 65struct orinoco_pccard {
66 dev_link_t link; 66 struct pcmcia_device *p_dev;
67 dev_node_t node; 67 dev_node_t node;
68}; 68};
69 69
@@ -71,8 +71,8 @@ struct orinoco_pccard {
71/* Function prototypes */ 71/* Function prototypes */
72/********************************************************************/ 72/********************************************************************/
73 73
74static void spectrum_cs_config(dev_link_t *link); 74static int spectrum_cs_config(struct pcmcia_device *link);
75static void spectrum_cs_release(dev_link_t *link); 75static void spectrum_cs_release(struct pcmcia_device *link);
76 76
77/********************************************************************/ 77/********************************************************************/
78/* Firmware downloader */ 78/* Firmware downloader */
@@ -238,14 +238,14 @@ spectrum_aux_open(hermes_t *hw)
238 * If IDLE is 1, stop the firmware, so that it can be safely rewritten. 238 * If IDLE is 1, stop the firmware, so that it can be safely rewritten.
239 */ 239 */
240static int 240static int
241spectrum_reset(dev_link_t *link, int idle) 241spectrum_reset(struct pcmcia_device *link, int idle)
242{ 242{
243 int last_ret, last_fn; 243 int last_ret, last_fn;
244 conf_reg_t reg; 244 conf_reg_t reg;
245 u_int save_cor; 245 u_int save_cor;
246 246
247 /* Doing it if hardware is gone is guaranteed crash */ 247 /* Doing it if hardware is gone is guaranteed crash */
248 if (!(link->state & DEV_CONFIG)) 248 if (pcmcia_dev_present(link))
249 return -ENODEV; 249 return -ENODEV;
250 250
251 /* Save original COR value */ 251 /* Save original COR value */
@@ -253,7 +253,7 @@ spectrum_reset(dev_link_t *link, int idle)
253 reg.Action = CS_READ; 253 reg.Action = CS_READ;
254 reg.Offset = CISREG_COR; 254 reg.Offset = CISREG_COR;
255 CS_CHECK(AccessConfigurationRegister, 255 CS_CHECK(AccessConfigurationRegister,
256 pcmcia_access_configuration_register(link->handle, &reg)); 256 pcmcia_access_configuration_register(link, &reg));
257 save_cor = reg.Value; 257 save_cor = reg.Value;
258 258
259 /* Soft-Reset card */ 259 /* Soft-Reset card */
@@ -261,14 +261,14 @@ spectrum_reset(dev_link_t *link, int idle)
261 reg.Offset = CISREG_COR; 261 reg.Offset = CISREG_COR;
262 reg.Value = (save_cor | COR_SOFT_RESET); 262 reg.Value = (save_cor | COR_SOFT_RESET);
263 CS_CHECK(AccessConfigurationRegister, 263 CS_CHECK(AccessConfigurationRegister,
264 pcmcia_access_configuration_register(link->handle, &reg)); 264 pcmcia_access_configuration_register(link, &reg));
265 udelay(1000); 265 udelay(1000);
266 266
267 /* Read CCSR */ 267 /* Read CCSR */
268 reg.Action = CS_READ; 268 reg.Action = CS_READ;
269 reg.Offset = CISREG_CCSR; 269 reg.Offset = CISREG_CCSR;
270 CS_CHECK(AccessConfigurationRegister, 270 CS_CHECK(AccessConfigurationRegister,
271 pcmcia_access_configuration_register(link->handle, &reg)); 271 pcmcia_access_configuration_register(link, &reg));
272 272
273 /* 273 /*
274 * Start or stop the firmware. Memory width bit should be 274 * Start or stop the firmware. Memory width bit should be
@@ -278,7 +278,7 @@ spectrum_reset(dev_link_t *link, int idle)
278 reg.Offset = CISREG_CCSR; 278 reg.Offset = CISREG_CCSR;
279 reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16); 279 reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16);
280 CS_CHECK(AccessConfigurationRegister, 280 CS_CHECK(AccessConfigurationRegister,
281 pcmcia_access_configuration_register(link->handle, &reg)); 281 pcmcia_access_configuration_register(link, &reg));
282 udelay(1000); 282 udelay(1000);
283 283
284 /* Restore original COR configuration index */ 284 /* Restore original COR configuration index */
@@ -286,12 +286,12 @@ spectrum_reset(dev_link_t *link, int idle)
286 reg.Offset = CISREG_COR; 286 reg.Offset = CISREG_COR;
287 reg.Value = (save_cor & ~COR_SOFT_RESET); 287 reg.Value = (save_cor & ~COR_SOFT_RESET);
288 CS_CHECK(AccessConfigurationRegister, 288 CS_CHECK(AccessConfigurationRegister,
289 pcmcia_access_configuration_register(link->handle, &reg)); 289 pcmcia_access_configuration_register(link, &reg));
290 udelay(1000); 290 udelay(1000);
291 return 0; 291 return 0;
292 292
293 cs_failed: 293 cs_failed:
294 cs_error(link->handle, last_fn, last_ret); 294 cs_error(link, last_fn, last_ret);
295 return -ENODEV; 295 return -ENODEV;
296} 296}
297 297
@@ -441,7 +441,7 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
441 * care of the PDA - read it and then write it on top of the firmware. 441 * care of the PDA - read it and then write it on top of the firmware.
442 */ 442 */
443static int 443static int
444spectrum_dl_image(hermes_t *hw, dev_link_t *link, 444spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link,
445 const unsigned char *image) 445 const unsigned char *image)
446{ 446{
447 int ret; 447 int ret;
@@ -505,14 +505,13 @@ spectrum_dl_image(hermes_t *hw, dev_link_t *link,
505 * reset on the card, to make sure it's in a sane state. 505 * reset on the card, to make sure it's in a sane state.
506 */ 506 */
507static int 507static int
508spectrum_dl_firmware(hermes_t *hw, dev_link_t *link) 508spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link)
509{ 509{
510 int ret; 510 int ret;
511 client_handle_t handle = link->handle;
512 const struct firmware *fw_entry; 511 const struct firmware *fw_entry;
513 512
514 if (request_firmware(&fw_entry, primary_fw_name, 513 if (request_firmware(&fw_entry, primary_fw_name,
515 &handle_to_dev(handle)) == 0) { 514 &handle_to_dev(link)) == 0) {
516 primsym = fw_entry->data; 515 primsym = fw_entry->data;
517 } else { 516 } else {
518 printk(KERN_ERR PFX "Cannot find firmware: %s\n", 517 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
@@ -521,7 +520,7 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
521 } 520 }
522 521
523 if (request_firmware(&fw_entry, secondary_fw_name, 522 if (request_firmware(&fw_entry, secondary_fw_name,
524 &handle_to_dev(handle)) == 0) { 523 &handle_to_dev(link)) == 0) {
525 secsym = fw_entry->data; 524 secsym = fw_entry->data;
526 } else { 525 } else {
527 printk(KERN_ERR PFX "Cannot find firmware: %s\n", 526 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
@@ -554,12 +553,12 @@ static int
554spectrum_cs_hard_reset(struct orinoco_private *priv) 553spectrum_cs_hard_reset(struct orinoco_private *priv)
555{ 554{
556 struct orinoco_pccard *card = priv->card; 555 struct orinoco_pccard *card = priv->card;
557 dev_link_t *link = &card->link; 556 struct pcmcia_device *link = card->p_dev;
558 int err; 557 int err;
559 558
560 if (!hermes_present(&priv->hw)) { 559 if (!hermes_present(&priv->hw)) {
561 /* The firmware needs to be reloaded */ 560 /* The firmware needs to be reloaded */
562 if (spectrum_dl_firmware(&priv->hw, &card->link) != 0) { 561 if (spectrum_dl_firmware(&priv->hw, link) != 0) {
563 printk(KERN_ERR PFX "Firmware download failed\n"); 562 printk(KERN_ERR PFX "Firmware download failed\n");
564 err = -ENODEV; 563 err = -ENODEV;
565 } 564 }
@@ -584,12 +583,11 @@ spectrum_cs_hard_reset(struct orinoco_private *priv)
584 * configure the card at this point -- we wait until we receive a card 583 * configure the card at this point -- we wait until we receive a card
585 * insertion event. */ 584 * insertion event. */
586static int 585static int
587spectrum_cs_attach(struct pcmcia_device *p_dev) 586spectrum_cs_probe(struct pcmcia_device *link)
588{ 587{
589 struct net_device *dev; 588 struct net_device *dev;
590 struct orinoco_private *priv; 589 struct orinoco_private *priv;
591 struct orinoco_pccard *card; 590 struct orinoco_pccard *card;
592 dev_link_t *link;
593 591
594 dev = alloc_orinocodev(sizeof(*card), spectrum_cs_hard_reset); 592 dev = alloc_orinocodev(sizeof(*card), spectrum_cs_hard_reset);
595 if (! dev) 593 if (! dev)
@@ -598,7 +596,7 @@ spectrum_cs_attach(struct pcmcia_device *p_dev)
598 card = priv->card; 596 card = priv->card;
599 597
600 /* Link both structures together */ 598 /* Link both structures together */
601 link = &card->link; 599 card->p_dev = link;
602 link->priv = dev; 600 link->priv = dev;
603 601
604 /* Interrupt setup */ 602 /* Interrupt setup */
@@ -615,13 +613,7 @@ spectrum_cs_attach(struct pcmcia_device *p_dev)
615 link->conf.Attributes = 0; 613 link->conf.Attributes = 0;
616 link->conf.IntType = INT_MEMORY_AND_IO; 614 link->conf.IntType = INT_MEMORY_AND_IO;
617 615
618 link->handle = p_dev; 616 return spectrum_cs_config(link);
619 p_dev->instance = link;
620
621 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
622 spectrum_cs_config(link);
623
624 return 0;
625} /* spectrum_cs_attach */ 617} /* spectrum_cs_attach */
626 618
627/* 619/*
@@ -630,16 +622,14 @@ spectrum_cs_attach(struct pcmcia_device *p_dev)
630 * are freed. Otherwise, the structures will be freed when the device 622 * are freed. Otherwise, the structures will be freed when the device
631 * is released. 623 * is released.
632 */ 624 */
633static void spectrum_cs_detach(struct pcmcia_device *p_dev) 625static void spectrum_cs_detach(struct pcmcia_device *link)
634{ 626{
635 dev_link_t *link = dev_to_instance(p_dev);
636 struct net_device *dev = link->priv; 627 struct net_device *dev = link->priv;
637 628
638 if (link->state & DEV_CONFIG) 629 spectrum_cs_release(link);
639 spectrum_cs_release(link);
640 630
641 DEBUG(0, PFX "detach: link=%p link->dev=%p\n", link, link->dev); 631 DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
642 if (link->dev) { 632 if (link->dev_node) {
643 DEBUG(0, PFX "About to unregister net device %p\n", 633 DEBUG(0, PFX "About to unregister net device %p\n",
644 dev); 634 dev);
645 unregister_netdev(dev); 635 unregister_netdev(dev);
@@ -653,11 +643,10 @@ static void spectrum_cs_detach(struct pcmcia_device *p_dev)
653 * device available to the system. 643 * device available to the system.
654 */ 644 */
655 645
656static void 646static int
657spectrum_cs_config(dev_link_t *link) 647spectrum_cs_config(struct pcmcia_device *link)
658{ 648{
659 struct net_device *dev = link->priv; 649 struct net_device *dev = link->priv;
660 client_handle_t handle = link->handle;
661 struct orinoco_private *priv = netdev_priv(dev); 650 struct orinoco_private *priv = netdev_priv(dev);
662 struct orinoco_pccard *card = priv->card; 651 struct orinoco_pccard *card = priv->card;
663 hermes_t *hw = &priv->hw; 652 hermes_t *hw = &priv->hw;
@@ -669,7 +658,7 @@ spectrum_cs_config(dev_link_t *link)
669 cisparse_t parse; 658 cisparse_t parse;
670 void __iomem *mem; 659 void __iomem *mem;
671 660
672 CS_CHECK(ValidateCIS, pcmcia_validate_cis(handle, &info)); 661 CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
673 662
674 /* 663 /*
675 * This reads the card's CONFIG tuple to find its 664 * This reads the card's CONFIG tuple to find its
@@ -680,19 +669,15 @@ spectrum_cs_config(dev_link_t *link)
680 tuple.TupleData = buf; 669 tuple.TupleData = buf;
681 tuple.TupleDataMax = sizeof(buf); 670 tuple.TupleDataMax = sizeof(buf);
682 tuple.TupleOffset = 0; 671 tuple.TupleOffset = 0;
683 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 672 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
684 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 673 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
685 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 674 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
686 link->conf.ConfigBase = parse.config.base; 675 link->conf.ConfigBase = parse.config.base;
687 link->conf.Present = parse.config.rmask[0]; 676 link->conf.Present = parse.config.rmask[0];
688 677
689 /* Configure card */
690 link->state |= DEV_CONFIG;
691
692 /* Look up the current Vcc */ 678 /* Look up the current Vcc */
693 CS_CHECK(GetConfigurationInfo, 679 CS_CHECK(GetConfigurationInfo,
694 pcmcia_get_configuration_info(handle, &conf)); 680 pcmcia_get_configuration_info(link, &conf));
695 link->conf.Vcc = conf.Vcc;
696 681
697 /* 682 /*
698 * In this loop, we scan the CIS for configuration table 683 * In this loop, we scan the CIS for configuration table
@@ -709,13 +694,13 @@ spectrum_cs_config(dev_link_t *link)
709 * implementation-defined details. 694 * implementation-defined details.
710 */ 695 */
711 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 696 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
712 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 697 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
713 while (1) { 698 while (1) {
714 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 699 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
715 cistpl_cftable_entry_t dflt = { .index = 0 }; 700 cistpl_cftable_entry_t dflt = { .index = 0 };
716 701
717 if ( (pcmcia_get_tuple_data(handle, &tuple) != 0) 702 if ( (pcmcia_get_tuple_data(link, &tuple) != 0)
718 || (pcmcia_parse_tuple(handle, &tuple, &parse) != 0)) 703 || (pcmcia_parse_tuple(link, &tuple, &parse) != 0))
719 goto next_entry; 704 goto next_entry;
720 705
721 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) 706 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
@@ -747,10 +732,10 @@ spectrum_cs_config(dev_link_t *link)
747 } 732 }
748 733
749 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) 734 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
750 link->conf.Vpp1 = link->conf.Vpp2 = 735 link->conf.Vpp =
751 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; 736 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
752 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) 737 else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
753 link->conf.Vpp1 = link->conf.Vpp2 = 738 link->conf.Vpp =
754 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000; 739 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
755 740
756 /* Do we need to allocate an interrupt? */ 741 /* Do we need to allocate an interrupt? */
@@ -780,7 +765,7 @@ spectrum_cs_config(dev_link_t *link)
780 } 765 }
781 766
782 /* This reserves IO space but doesn't actually enable it */ 767 /* This reserves IO space but doesn't actually enable it */
783 if (pcmcia_request_io(link->handle, &link->io) != 0) 768 if (pcmcia_request_io(link, &link->io) != 0)
784 goto next_entry; 769 goto next_entry;
785 } 770 }
786 771
@@ -790,9 +775,8 @@ spectrum_cs_config(dev_link_t *link)
790 break; 775 break;
791 776
792 next_entry: 777 next_entry:
793 if (link->io.NumPorts1) 778 pcmcia_disable_device(link);
794 pcmcia_release_io(link->handle, &link->io); 779 last_ret = pcmcia_get_next_tuple(link, &tuple);
795 last_ret = pcmcia_get_next_tuple(handle, &tuple);
796 if (last_ret == CS_NO_MORE_ITEMS) { 780 if (last_ret == CS_NO_MORE_ITEMS) {
797 printk(KERN_ERR PFX "GetNextTuple(): No matching " 781 printk(KERN_ERR PFX "GetNextTuple(): No matching "
798 "CIS configuration. Maybe you need the " 782 "CIS configuration. Maybe you need the "
@@ -806,7 +790,7 @@ spectrum_cs_config(dev_link_t *link)
806 * a handler to the interrupt, unless the 'Handler' member of 790 * a handler to the interrupt, unless the 'Handler' member of
807 * the irq structure is initialized. 791 * the irq structure is initialized.
808 */ 792 */
809 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 793 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
810 794
811 /* We initialize the hermes structure before completing PCMCIA 795 /* We initialize the hermes structure before completing PCMCIA
812 * configuration just in case the interrupt handler gets 796 * configuration just in case the interrupt handler gets
@@ -823,7 +807,7 @@ spectrum_cs_config(dev_link_t *link)
823 * card and host interface into "Memory and IO" mode. 807 * card and host interface into "Memory and IO" mode.
824 */ 808 */
825 CS_CHECK(RequestConfiguration, 809 CS_CHECK(RequestConfiguration,
826 pcmcia_request_configuration(link->handle, &link->conf)); 810 pcmcia_request_configuration(link, &link->conf));
827 811
828 /* Ok, we have the configuration, prepare to register the netdev */ 812 /* Ok, we have the configuration, prepare to register the netdev */
829 dev->base_addr = link->io.BasePort1; 813 dev->base_addr = link->io.BasePort1;
@@ -836,7 +820,7 @@ spectrum_cs_config(dev_link_t *link)
836 goto failed; 820 goto failed;
837 } 821 }
838 822
839 SET_NETDEV_DEV(dev, &handle_to_dev(handle)); 823 SET_NETDEV_DEV(dev, &handle_to_dev(link));
840 /* Tell the stack we exist */ 824 /* Tell the stack we exist */
841 if (register_netdev(dev) != 0) { 825 if (register_netdev(dev) != 0) {
842 printk(KERN_ERR PFX "register_netdev() failed\n"); 826 printk(KERN_ERR PFX "register_netdev() failed\n");
@@ -844,20 +828,18 @@ spectrum_cs_config(dev_link_t *link)
844 } 828 }
845 829
846 /* At this point, the dev_node_t structure(s) needs to be 830 /* At this point, the dev_node_t structure(s) needs to be
847 * initialized and arranged in a linked list at link->dev. */ 831 * initialized and arranged in a linked list at link->dev_node. */
848 strcpy(card->node.dev_name, dev->name); 832 strcpy(card->node.dev_name, dev->name);
849 link->dev = &card->node; /* link->dev being non-NULL is also 833 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
850 used to indicate that the 834 used to indicate that the
851 net_device has been registered */ 835 net_device has been registered */
852 link->state &= ~DEV_CONFIG_PENDING;
853 836
854 /* Finally, report what we've done */ 837 /* Finally, report what we've done */
855 printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d", 838 printk(KERN_DEBUG "%s: index 0x%02x: ",
856 dev->name, link->conf.ConfigIndex, 839 dev->name, link->conf.ConfigIndex);
857 link->conf.Vcc / 10, link->conf.Vcc % 10); 840 if (link->conf.Vpp)
858 if (link->conf.Vpp1) 841 printk(", Vpp %d.%d", link->conf.Vpp / 10,
859 printk(", Vpp %d.%d", link->conf.Vpp1 / 10, 842 link->conf.Vpp % 10);
860 link->conf.Vpp1 % 10);
861 printk(", irq %d", link->irq.AssignedIRQ); 843 printk(", irq %d", link->irq.AssignedIRQ);
862 if (link->io.NumPorts1) 844 if (link->io.NumPorts1)
863 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 845 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
@@ -867,13 +849,14 @@ spectrum_cs_config(dev_link_t *link)
867 link->io.BasePort2 + link->io.NumPorts2 - 1); 849 link->io.BasePort2 + link->io.NumPorts2 - 1);
868 printk("\n"); 850 printk("\n");
869 851
870 return; 852 return 0;
871 853
872 cs_failed: 854 cs_failed:
873 cs_error(link->handle, last_fn, last_ret); 855 cs_error(link, last_fn, last_ret);
874 856
875 failed: 857 failed:
876 spectrum_cs_release(link); 858 spectrum_cs_release(link);
859 return -ENODEV;
877} /* spectrum_cs_config */ 860} /* spectrum_cs_config */
878 861
879/* 862/*
@@ -882,7 +865,7 @@ spectrum_cs_config(dev_link_t *link)
882 * still open, this will be postponed until it is closed. 865 * still open, this will be postponed until it is closed.
883 */ 866 */
884static void 867static void
885spectrum_cs_release(dev_link_t *link) 868spectrum_cs_release(struct pcmcia_device *link)
886{ 869{
887 struct net_device *dev = link->priv; 870 struct net_device *dev = link->priv;
888 struct orinoco_private *priv = netdev_priv(dev); 871 struct orinoco_private *priv = netdev_priv(dev);
@@ -894,64 +877,46 @@ spectrum_cs_release(dev_link_t *link)
894 priv->hw_unavailable++; 877 priv->hw_unavailable++;
895 spin_unlock_irqrestore(&priv->lock, flags); 878 spin_unlock_irqrestore(&priv->lock, flags);
896 879
897 /* Don't bother checking to see if these succeed or not */ 880 pcmcia_disable_device(link);
898 pcmcia_release_configuration(link->handle);
899 if (link->io.NumPorts1)
900 pcmcia_release_io(link->handle, &link->io);
901 if (link->irq.AssignedIRQ)
902 pcmcia_release_irq(link->handle, &link->irq);
903 link->state &= ~DEV_CONFIG;
904 if (priv->hw.iobase) 881 if (priv->hw.iobase)
905 ioport_unmap(priv->hw.iobase); 882 ioport_unmap(priv->hw.iobase);
906} /* spectrum_cs_release */ 883} /* spectrum_cs_release */
907 884
908 885
909static int 886static int
910spectrum_cs_suspend(struct pcmcia_device *p_dev) 887spectrum_cs_suspend(struct pcmcia_device *link)
911{ 888{
912 dev_link_t *link = dev_to_instance(p_dev);
913 struct net_device *dev = link->priv; 889 struct net_device *dev = link->priv;
914 struct orinoco_private *priv = netdev_priv(dev); 890 struct orinoco_private *priv = netdev_priv(dev);
915 unsigned long flags; 891 unsigned long flags;
916 int err = 0; 892 int err = 0;
917 893
918 link->state |= DEV_SUSPEND;
919 /* Mark the device as stopped, to block IO until later */ 894 /* Mark the device as stopped, to block IO until later */
920 if (link->state & DEV_CONFIG) { 895 spin_lock_irqsave(&priv->lock, flags);
921 spin_lock_irqsave(&priv->lock, flags);
922
923 err = __orinoco_down(dev);
924 if (err)
925 printk(KERN_WARNING "%s: Error %d downing interface\n",
926 dev->name, err);
927 896
928 netif_device_detach(dev); 897 err = __orinoco_down(dev);
929 priv->hw_unavailable++; 898 if (err)
899 printk(KERN_WARNING "%s: Error %d downing interface\n",
900 dev->name, err);
930 901
931 spin_unlock_irqrestore(&priv->lock, flags); 902 netif_device_detach(dev);
903 priv->hw_unavailable++;
932 904
933 pcmcia_release_configuration(link->handle); 905 spin_unlock_irqrestore(&priv->lock, flags);
934 }
935 906
936 return 0; 907 return 0;
937} 908}
938 909
939static int 910static int
940spectrum_cs_resume(struct pcmcia_device *p_dev) 911spectrum_cs_resume(struct pcmcia_device *link)
941{ 912{
942 dev_link_t *link = dev_to_instance(p_dev);
943 struct net_device *dev = link->priv; 913 struct net_device *dev = link->priv;
944 struct orinoco_private *priv = netdev_priv(dev); 914 struct orinoco_private *priv = netdev_priv(dev);
945 915
946 link->state &= ~DEV_SUSPEND; 916 netif_device_attach(dev);
947 if (link->state & DEV_CONFIG) { 917 priv->hw_unavailable--;
948 /* FIXME: should we double check that this is 918 schedule_work(&priv->reset_work);
949 * the same card as we had before */ 919
950 pcmcia_request_configuration(link->handle, &link->conf);
951 netif_device_attach(dev);
952 priv->hw_unavailable--;
953 schedule_work(&priv->reset_work);
954 }
955 return 0; 920 return 0;
956} 921}
957 922
@@ -979,7 +944,7 @@ static struct pcmcia_driver orinoco_driver = {
979 .drv = { 944 .drv = {
980 .name = DRIVER_NAME, 945 .name = DRIVER_NAME,
981 }, 946 },
982 .probe = spectrum_cs_attach, 947 .probe = spectrum_cs_probe,
983 .remove = spectrum_cs_detach, 948 .remove = spectrum_cs_detach,
984 .suspend = spectrum_cs_suspend, 949 .suspend = spectrum_cs_suspend,
985 .resume = spectrum_cs_resume, 950 .resume = spectrum_cs_resume,
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 98122f3a4bc2..f7724eb2fa7e 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1005,7 +1005,7 @@ static inline void
1005wv_82593_reconfig(struct net_device * dev) 1005wv_82593_reconfig(struct net_device * dev)
1006{ 1006{
1007 net_local * lp = netdev_priv(dev); 1007 net_local * lp = netdev_priv(dev);
1008 dev_link_t * link = lp->link; 1008 struct pcmcia_device * link = lp->link;
1009 unsigned long flags; 1009 unsigned long flags;
1010 1010
1011 /* Arm the flag, will be cleard in wv_82593_config() */ 1011 /* Arm the flag, will be cleard in wv_82593_config() */
@@ -3744,16 +3744,16 @@ wv_pcmcia_reset(struct net_device * dev)
3744{ 3744{
3745 int i; 3745 int i;
3746 conf_reg_t reg = { 0, CS_READ, CISREG_COR, 0 }; 3746 conf_reg_t reg = { 0, CS_READ, CISREG_COR, 0 };
3747 dev_link_t * link = ((net_local *)netdev_priv(dev))->link; 3747 struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
3748 3748
3749#ifdef DEBUG_CONFIG_TRACE 3749#ifdef DEBUG_CONFIG_TRACE
3750 printk(KERN_DEBUG "%s: ->wv_pcmcia_reset()\n", dev->name); 3750 printk(KERN_DEBUG "%s: ->wv_pcmcia_reset()\n", dev->name);
3751#endif 3751#endif
3752 3752
3753 i = pcmcia_access_configuration_register(link->handle, &reg); 3753 i = pcmcia_access_configuration_register(link, &reg);
3754 if(i != CS_SUCCESS) 3754 if(i != CS_SUCCESS)
3755 { 3755 {
3756 cs_error(link->handle, AccessConfigurationRegister, i); 3756 cs_error(link, AccessConfigurationRegister, i);
3757 return FALSE; 3757 return FALSE;
3758 } 3758 }
3759 3759
@@ -3764,19 +3764,19 @@ wv_pcmcia_reset(struct net_device * dev)
3764 3764
3765 reg.Action = CS_WRITE; 3765 reg.Action = CS_WRITE;
3766 reg.Value = reg.Value | COR_SW_RESET; 3766 reg.Value = reg.Value | COR_SW_RESET;
3767 i = pcmcia_access_configuration_register(link->handle, &reg); 3767 i = pcmcia_access_configuration_register(link, &reg);
3768 if(i != CS_SUCCESS) 3768 if(i != CS_SUCCESS)
3769 { 3769 {
3770 cs_error(link->handle, AccessConfigurationRegister, i); 3770 cs_error(link, AccessConfigurationRegister, i);
3771 return FALSE; 3771 return FALSE;
3772 } 3772 }
3773 3773
3774 reg.Action = CS_WRITE; 3774 reg.Action = CS_WRITE;
3775 reg.Value = COR_LEVEL_IRQ | COR_CONFIG; 3775 reg.Value = COR_LEVEL_IRQ | COR_CONFIG;
3776 i = pcmcia_access_configuration_register(link->handle, &reg); 3776 i = pcmcia_access_configuration_register(link, &reg);
3777 if(i != CS_SUCCESS) 3777 if(i != CS_SUCCESS)
3778 { 3778 {
3779 cs_error(link->handle, AccessConfigurationRegister, i); 3779 cs_error(link, AccessConfigurationRegister, i);
3780 return FALSE; 3780 return FALSE;
3781 } 3781 }
3782 3782
@@ -3940,9 +3940,8 @@ wv_hw_reset(struct net_device * dev)
3940 * (called by wavelan_event()) 3940 * (called by wavelan_event())
3941 */ 3941 */
3942static inline int 3942static inline int
3943wv_pcmcia_config(dev_link_t * link) 3943wv_pcmcia_config(struct pcmcia_device * link)
3944{ 3944{
3945 client_handle_t handle = link->handle;
3946 tuple_t tuple; 3945 tuple_t tuple;
3947 cisparse_t parse; 3946 cisparse_t parse;
3948 struct net_device * dev = (struct net_device *) link->priv; 3947 struct net_device * dev = (struct net_device *) link->priv;
@@ -3965,16 +3964,16 @@ wv_pcmcia_config(dev_link_t * link)
3965 { 3964 {
3966 tuple.Attributes = 0; 3965 tuple.Attributes = 0;
3967 tuple.DesiredTuple = CISTPL_CONFIG; 3966 tuple.DesiredTuple = CISTPL_CONFIG;
3968 i = pcmcia_get_first_tuple(handle, &tuple); 3967 i = pcmcia_get_first_tuple(link, &tuple);
3969 if(i != CS_SUCCESS) 3968 if(i != CS_SUCCESS)
3970 break; 3969 break;
3971 tuple.TupleData = (cisdata_t *)buf; 3970 tuple.TupleData = (cisdata_t *)buf;
3972 tuple.TupleDataMax = 64; 3971 tuple.TupleDataMax = 64;
3973 tuple.TupleOffset = 0; 3972 tuple.TupleOffset = 0;
3974 i = pcmcia_get_tuple_data(handle, &tuple); 3973 i = pcmcia_get_tuple_data(link, &tuple);
3975 if(i != CS_SUCCESS) 3974 if(i != CS_SUCCESS)
3976 break; 3975 break;
3977 i = pcmcia_parse_tuple(handle, &tuple, &parse); 3976 i = pcmcia_parse_tuple(link, &tuple, &parse);
3978 if(i != CS_SUCCESS) 3977 if(i != CS_SUCCESS)
3979 break; 3978 break;
3980 link->conf.ConfigBase = parse.config.base; 3979 link->conf.ConfigBase = parse.config.base;
@@ -3983,19 +3982,16 @@ wv_pcmcia_config(dev_link_t * link)
3983 while(0); 3982 while(0);
3984 if(i != CS_SUCCESS) 3983 if(i != CS_SUCCESS)
3985 { 3984 {
3986 cs_error(link->handle, ParseTuple, i); 3985 cs_error(link, ParseTuple, i);
3987 link->state &= ~DEV_CONFIG_PENDING;
3988 return FALSE; 3986 return FALSE;
3989 } 3987 }
3990 3988
3991 /* Configure card */
3992 link->state |= DEV_CONFIG;
3993 do 3989 do
3994 { 3990 {
3995 i = pcmcia_request_io(link->handle, &link->io); 3991 i = pcmcia_request_io(link, &link->io);
3996 if(i != CS_SUCCESS) 3992 if(i != CS_SUCCESS)
3997 { 3993 {
3998 cs_error(link->handle, RequestIO, i); 3994 cs_error(link, RequestIO, i);
3999 break; 3995 break;
4000 } 3996 }
4001 3997
@@ -4003,10 +3999,10 @@ wv_pcmcia_config(dev_link_t * link)
4003 * Now allocate an interrupt line. Note that this does not 3999 * Now allocate an interrupt line. Note that this does not
4004 * actually assign a handler to the interrupt. 4000 * actually assign a handler to the interrupt.
4005 */ 4001 */
4006 i = pcmcia_request_irq(link->handle, &link->irq); 4002 i = pcmcia_request_irq(link, &link->irq);
4007 if(i != CS_SUCCESS) 4003 if(i != CS_SUCCESS)
4008 { 4004 {
4009 cs_error(link->handle, RequestIRQ, i); 4005 cs_error(link, RequestIRQ, i);
4010 break; 4006 break;
4011 } 4007 }
4012 4008
@@ -4015,15 +4011,15 @@ wv_pcmcia_config(dev_link_t * link)
4015 * the I/O windows and the interrupt mapping. 4011 * the I/O windows and the interrupt mapping.
4016 */ 4012 */
4017 link->conf.ConfigIndex = 1; 4013 link->conf.ConfigIndex = 1;
4018 i = pcmcia_request_configuration(link->handle, &link->conf); 4014 i = pcmcia_request_configuration(link, &link->conf);
4019 if(i != CS_SUCCESS) 4015 if(i != CS_SUCCESS)
4020 { 4016 {
4021 cs_error(link->handle, RequestConfiguration, i); 4017 cs_error(link, RequestConfiguration, i);
4022 break; 4018 break;
4023 } 4019 }
4024 4020
4025 /* 4021 /*
4026 * Allocate a small memory window. Note that the dev_link_t 4022 * Allocate a small memory window. Note that the struct pcmcia_device
4027 * structure provides space for one window handle -- if your 4023 * structure provides space for one window handle -- if your
4028 * device needs several windows, you'll need to keep track of 4024 * device needs several windows, you'll need to keep track of
4029 * the handles in your private data structure, link->priv. 4025 * the handles in your private data structure, link->priv.
@@ -4031,10 +4027,10 @@ wv_pcmcia_config(dev_link_t * link)
4031 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 4027 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
4032 req.Base = req.Size = 0; 4028 req.Base = req.Size = 0;
4033 req.AccessSpeed = mem_speed; 4029 req.AccessSpeed = mem_speed;
4034 i = pcmcia_request_window(&link->handle, &req, &link->win); 4030 i = pcmcia_request_window(&link, &req, &link->win);
4035 if(i != CS_SUCCESS) 4031 if(i != CS_SUCCESS)
4036 { 4032 {
4037 cs_error(link->handle, RequestWindow, i); 4033 cs_error(link, RequestWindow, i);
4038 break; 4034 break;
4039 } 4035 }
4040 4036
@@ -4046,7 +4042,7 @@ wv_pcmcia_config(dev_link_t * link)
4046 i = pcmcia_map_mem_page(link->win, &mem); 4042 i = pcmcia_map_mem_page(link->win, &mem);
4047 if(i != CS_SUCCESS) 4043 if(i != CS_SUCCESS)
4048 { 4044 {
4049 cs_error(link->handle, MapMemPage, i); 4045 cs_error(link, MapMemPage, i);
4050 break; 4046 break;
4051 } 4047 }
4052 4048
@@ -4060,7 +4056,7 @@ wv_pcmcia_config(dev_link_t * link)
4060 lp->mem, dev->irq, (u_int) dev->base_addr); 4056 lp->mem, dev->irq, (u_int) dev->base_addr);
4061#endif 4057#endif
4062 4058
4063 SET_NETDEV_DEV(dev, &handle_to_dev(handle)); 4059 SET_NETDEV_DEV(dev, &handle_to_dev(link));
4064 i = register_netdev(dev); 4060 i = register_netdev(dev);
4065 if(i != 0) 4061 if(i != 0)
4066 { 4062 {
@@ -4072,7 +4068,6 @@ wv_pcmcia_config(dev_link_t * link)
4072 } 4068 }
4073 while(0); /* Humm... Disguised goto !!! */ 4069 while(0); /* Humm... Disguised goto !!! */
4074 4070
4075 link->state &= ~DEV_CONFIG_PENDING;
4076 /* If any step failed, release any partially configured state */ 4071 /* If any step failed, release any partially configured state */
4077 if(i != 0) 4072 if(i != 0)
4078 { 4073 {
@@ -4081,7 +4076,7 @@ wv_pcmcia_config(dev_link_t * link)
4081 } 4076 }
4082 4077
4083 strcpy(((net_local *) netdev_priv(dev))->node.dev_name, dev->name); 4078 strcpy(((net_local *) netdev_priv(dev))->node.dev_name, dev->name);
4084 link->dev = &((net_local *) netdev_priv(dev))->node; 4079 link->dev_node = &((net_local *) netdev_priv(dev))->node;
4085 4080
4086#ifdef DEBUG_CONFIG_TRACE 4081#ifdef DEBUG_CONFIG_TRACE
4087 printk(KERN_DEBUG "<-wv_pcmcia_config()\n"); 4082 printk(KERN_DEBUG "<-wv_pcmcia_config()\n");
@@ -4096,26 +4091,20 @@ wv_pcmcia_config(dev_link_t * link)
4096 * still open, this will be postponed until it is closed. 4091 * still open, this will be postponed until it is closed.
4097 */ 4092 */
4098static void 4093static void
4099wv_pcmcia_release(dev_link_t *link) 4094wv_pcmcia_release(struct pcmcia_device *link)
4100{ 4095{
4101 struct net_device * dev = (struct net_device *) link->priv; 4096 struct net_device * dev = (struct net_device *) link->priv;
4102 net_local * lp = netdev_priv(dev); 4097 net_local * lp = netdev_priv(dev);
4103 4098
4104#ifdef DEBUG_CONFIG_TRACE 4099#ifdef DEBUG_CONFIG_TRACE
4105 printk(KERN_DEBUG "%s: -> wv_pcmcia_release(0x%p)\n", dev->name, link); 4100 printk(KERN_DEBUG "%s: -> wv_pcmcia_release(0x%p)\n", dev->name, link);
4106#endif 4101#endif
4107 4102
4108 /* Don't bother checking to see if these succeed or not */ 4103 iounmap(lp->mem);
4109 iounmap(lp->mem); 4104 pcmcia_disable_device(link);
4110 pcmcia_release_window(link->win);
4111 pcmcia_release_configuration(link->handle);
4112 pcmcia_release_io(link->handle, &link->io);
4113 pcmcia_release_irq(link->handle, &link->irq);
4114
4115 link->state &= ~DEV_CONFIG;
4116 4105
4117#ifdef DEBUG_CONFIG_TRACE 4106#ifdef DEBUG_CONFIG_TRACE
4118 printk(KERN_DEBUG "%s: <- wv_pcmcia_release()\n", dev->name); 4107 printk(KERN_DEBUG "%s: <- wv_pcmcia_release()\n", dev->name);
4119#endif 4108#endif
4120} 4109}
4121 4110
@@ -4479,7 +4468,7 @@ static int
4479wavelan_open(struct net_device * dev) 4468wavelan_open(struct net_device * dev)
4480{ 4469{
4481 net_local * lp = netdev_priv(dev); 4470 net_local * lp = netdev_priv(dev);
4482 dev_link_t * link = lp->link; 4471 struct pcmcia_device * link = lp->link;
4483 kio_addr_t base = dev->base_addr; 4472 kio_addr_t base = dev->base_addr;
4484 4473
4485#ifdef DEBUG_CALLBACK_TRACE 4474#ifdef DEBUG_CALLBACK_TRACE
@@ -4533,7 +4522,7 @@ wavelan_open(struct net_device * dev)
4533static int 4522static int
4534wavelan_close(struct net_device * dev) 4523wavelan_close(struct net_device * dev)
4535{ 4524{
4536 dev_link_t * link = ((net_local *)netdev_priv(dev))->link; 4525 struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
4537 kio_addr_t base = dev->base_addr; 4526 kio_addr_t base = dev->base_addr;
4538 4527
4539#ifdef DEBUG_CALLBACK_TRACE 4528#ifdef DEBUG_CALLBACK_TRACE
@@ -4587,45 +4576,36 @@ wavelan_close(struct net_device * dev)
4587 * card insertion event. 4576 * card insertion event.
4588 */ 4577 */
4589static int 4578static int
4590wavelan_attach(struct pcmcia_device *p_dev) 4579wavelan_probe(struct pcmcia_device *p_dev)
4591{ 4580{
4592 dev_link_t * link; /* Info for cardmgr */
4593 struct net_device * dev; /* Interface generic data */ 4581 struct net_device * dev; /* Interface generic data */
4594 net_local * lp; /* Interface specific data */ 4582 net_local * lp; /* Interface specific data */
4583 int ret;
4595 4584
4596#ifdef DEBUG_CALLBACK_TRACE 4585#ifdef DEBUG_CALLBACK_TRACE
4597 printk(KERN_DEBUG "-> wavelan_attach()\n"); 4586 printk(KERN_DEBUG "-> wavelan_attach()\n");
4598#endif 4587#endif
4599 4588
4600 /* Initialize the dev_link_t structure */
4601 link = kzalloc(sizeof(struct dev_link_t), GFP_KERNEL);
4602 if (!link) return -ENOMEM;
4603
4604 /* The io structure describes IO port mapping */ 4589 /* The io structure describes IO port mapping */
4605 link->io.NumPorts1 = 8; 4590 p_dev->io.NumPorts1 = 8;
4606 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 4591 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
4607 link->io.IOAddrLines = 3; 4592 p_dev->io.IOAddrLines = 3;
4608 4593
4609 /* Interrupt setup */ 4594 /* Interrupt setup */
4610 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 4595 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
4611 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 4596 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
4612 link->irq.Handler = wavelan_interrupt; 4597 p_dev->irq.Handler = wavelan_interrupt;
4613 4598
4614 /* General socket configuration */ 4599 /* General socket configuration */
4615 link->conf.Attributes = CONF_ENABLE_IRQ; 4600 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
4616 link->conf.Vcc = 50; 4601 p_dev->conf.IntType = INT_MEMORY_AND_IO;
4617 link->conf.IntType = INT_MEMORY_AND_IO;
4618
4619 /* Chain drivers */
4620 link->next = NULL;
4621 4602
4622 /* Allocate the generic data structure */ 4603 /* Allocate the generic data structure */
4623 dev = alloc_etherdev(sizeof(net_local)); 4604 dev = alloc_etherdev(sizeof(net_local));
4624 if (!dev) { 4605 if (!dev)
4625 kfree(link);
4626 return -ENOMEM; 4606 return -ENOMEM;
4627 } 4607
4628 link->priv = link->irq.Instance = dev; 4608 p_dev->priv = p_dev->irq.Instance = dev;
4629 4609
4630 lp = netdev_priv(dev); 4610 lp = netdev_priv(dev);
4631 4611
@@ -4642,7 +4622,6 @@ wavelan_attach(struct pcmcia_device *p_dev)
4642 spin_lock_init(&lp->spinlock); 4622 spin_lock_init(&lp->spinlock);
4643 4623
4644 /* back links */ 4624 /* back links */
4645 lp->link = link;
4646 lp->dev = dev; 4625 lp->dev = dev;
4647 4626
4648 /* wavelan NET3 callbacks */ 4627 /* wavelan NET3 callbacks */
@@ -4668,15 +4647,18 @@ wavelan_attach(struct pcmcia_device *p_dev)
4668 /* Other specific data */ 4647 /* Other specific data */
4669 dev->mtu = WAVELAN_MTU; 4648 dev->mtu = WAVELAN_MTU;
4670 4649
4671 link->handle = p_dev; 4650 ret = wv_pcmcia_config(p_dev);
4672 p_dev->instance = link; 4651 if (ret)
4652 return ret;
4673 4653
4674 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; 4654 ret = wv_hw_config(dev);
4675 if(wv_pcmcia_config(link) && 4655 if (ret) {
4676 wv_hw_config(dev))
4677 wv_init_info(dev);
4678 else
4679 dev->irq = 0; 4656 dev->irq = 0;
4657 pcmcia_disable_device(p_dev);
4658 return ret;
4659 }
4660
4661 wv_init_info(dev);
4680 4662
4681#ifdef DEBUG_CALLBACK_TRACE 4663#ifdef DEBUG_CALLBACK_TRACE
4682 printk(KERN_DEBUG "<- wavelan_attach()\n"); 4664 printk(KERN_DEBUG "<- wavelan_attach()\n");
@@ -4693,25 +4675,14 @@ wavelan_attach(struct pcmcia_device *p_dev)
4693 * is released. 4675 * is released.
4694 */ 4676 */
4695static void 4677static void
4696wavelan_detach(struct pcmcia_device *p_dev) 4678wavelan_detach(struct pcmcia_device *link)
4697{ 4679{
4698 dev_link_t *link = dev_to_instance(p_dev);
4699
4700#ifdef DEBUG_CALLBACK_TRACE 4680#ifdef DEBUG_CALLBACK_TRACE
4701 printk(KERN_DEBUG "-> wavelan_detach(0x%p)\n", link); 4681 printk(KERN_DEBUG "-> wavelan_detach(0x%p)\n", link);
4702#endif 4682#endif
4703 4683
4704 /* 4684 /* Some others haven't done their job : give them another chance */
4705 * If the device is currently configured and active, we won't 4685 wv_pcmcia_release(link);
4706 * actually delete it yet. Instead, it is marked so that when the
4707 * release() function is called, that will trigger a proper
4708 * detach().
4709 */
4710 if(link->state & DEV_CONFIG)
4711 {
4712 /* Some others haven't done their job : give them another chance */
4713 wv_pcmcia_release(link);
4714 }
4715 4686
4716 /* Free pieces */ 4687 /* Free pieces */
4717 if(link->priv) 4688 if(link->priv)
@@ -4720,23 +4691,21 @@ wavelan_detach(struct pcmcia_device *p_dev)
4720 4691
4721 /* Remove ourselves from the kernel list of ethernet devices */ 4692 /* Remove ourselves from the kernel list of ethernet devices */
4722 /* Warning : can't be called from interrupt, timer or wavelan_close() */ 4693 /* Warning : can't be called from interrupt, timer or wavelan_close() */
4723 if (link->dev) 4694 if (link->dev_node)
4724 unregister_netdev(dev); 4695 unregister_netdev(dev);
4725 link->dev = NULL; 4696 link->dev_node = NULL;
4726 ((net_local *)netdev_priv(dev))->link = NULL; 4697 ((net_local *)netdev_priv(dev))->link = NULL;
4727 ((net_local *)netdev_priv(dev))->dev = NULL; 4698 ((net_local *)netdev_priv(dev))->dev = NULL;
4728 free_netdev(dev); 4699 free_netdev(dev);
4729 } 4700 }
4730 kfree(link);
4731 4701
4732#ifdef DEBUG_CALLBACK_TRACE 4702#ifdef DEBUG_CALLBACK_TRACE
4733 printk(KERN_DEBUG "<- wavelan_detach()\n"); 4703 printk(KERN_DEBUG "<- wavelan_detach()\n");
4734#endif 4704#endif
4735} 4705}
4736 4706
4737static int wavelan_suspend(struct pcmcia_device *p_dev) 4707static int wavelan_suspend(struct pcmcia_device *link)
4738{ 4708{
4739 dev_link_t *link = dev_to_instance(p_dev);
4740 struct net_device * dev = (struct net_device *) link->priv; 4709 struct net_device * dev = (struct net_device *) link->priv;
4741 4710
4742 /* NB: wavelan_close will be called, but too late, so we are 4711 /* NB: wavelan_close will be called, but too late, so we are
@@ -4748,36 +4717,22 @@ static int wavelan_suspend(struct pcmcia_device *p_dev)
4748 /* Stop receiving new messages and wait end of transmission */ 4717 /* Stop receiving new messages and wait end of transmission */
4749 wv_ru_stop(dev); 4718 wv_ru_stop(dev);
4750 4719
4720 if (link->open)
4721 netif_device_detach(dev);
4722
4751 /* Power down the module */ 4723 /* Power down the module */
4752 hacr_write(dev->base_addr, HACR_DEFAULT & (~HACR_PWR_STAT)); 4724 hacr_write(dev->base_addr, HACR_DEFAULT & (~HACR_PWR_STAT));
4753 4725
4754 /* The card is now suspended */
4755 link->state |= DEV_SUSPEND;
4756
4757 if(link->state & DEV_CONFIG)
4758 {
4759 if(link->open)
4760 netif_device_detach(dev);
4761 pcmcia_release_configuration(link->handle);
4762 }
4763
4764 return 0; 4726 return 0;
4765} 4727}
4766 4728
4767static int wavelan_resume(struct pcmcia_device *p_dev) 4729static int wavelan_resume(struct pcmcia_device *link)
4768{ 4730{
4769 dev_link_t *link = dev_to_instance(p_dev);
4770 struct net_device * dev = (struct net_device *) link->priv; 4731 struct net_device * dev = (struct net_device *) link->priv;
4771 4732
4772 link->state &= ~DEV_SUSPEND; 4733 if (link->open) {
4773 if(link->state & DEV_CONFIG) 4734 wv_hw_reset(dev);
4774 { 4735 netif_device_attach(dev);
4775 pcmcia_request_configuration(link->handle, &link->conf);
4776 if(link->open) /* If RESET -> True, If RESUME -> False ? */
4777 {
4778 wv_hw_reset(dev);
4779 netif_device_attach(dev);
4780 }
4781 } 4736 }
4782 4737
4783 return 0; 4738 return 0;
@@ -4798,7 +4753,7 @@ static struct pcmcia_driver wavelan_driver = {
4798 .drv = { 4753 .drv = {
4799 .name = "wavelan_cs", 4754 .name = "wavelan_cs",
4800 }, 4755 },
4801 .probe = wavelan_attach, 4756 .probe = wavelan_probe,
4802 .remove = wavelan_detach, 4757 .remove = wavelan_detach,
4803 .id_table = wavelan_ids, 4758 .id_table = wavelan_ids,
4804 .suspend = wavelan_suspend, 4759 .suspend = wavelan_suspend,
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 451f6271dcbc..c65fe7a391ec 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -602,7 +602,7 @@ struct net_local
602 dev_node_t node; /* ???? What is this stuff ???? */ 602 dev_node_t node; /* ???? What is this stuff ???? */
603 struct net_device * dev; /* Reverse link... */ 603 struct net_device * dev; /* Reverse link... */
604 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */ 604 spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
605 dev_link_t * link; /* pcmcia structure */ 605 struct pcmcia_device * link; /* pcmcia structure */
606 en_stats stats; /* Ethernet interface statistics */ 606 en_stats stats; /* Ethernet interface statistics */
607 int nresets; /* Number of hw resets */ 607 int nresets; /* Number of hw resets */
608 u_char configured; /* If it is configured */ 608 u_char configured; /* If it is configured */
@@ -733,9 +733,9 @@ static int
733static inline void 733static inline void
734 wv_hw_reset(struct net_device *); /* Same, + start receiver unit */ 734 wv_hw_reset(struct net_device *); /* Same, + start receiver unit */
735static inline int 735static inline int
736 wv_pcmcia_config(dev_link_t *); /* Configure the pcmcia interface */ 736 wv_pcmcia_config(struct pcmcia_device *); /* Configure the pcmcia interface */
737static void 737static void
738 wv_pcmcia_release(dev_link_t *);/* Remove a device */ 738 wv_pcmcia_release(struct pcmcia_device *);/* Remove a device */
739/* ---------------------- INTERRUPT HANDLING ---------------------- */ 739/* ---------------------- INTERRUPT HANDLING ---------------------- */
740static irqreturn_t 740static irqreturn_t
741 wavelan_interrupt(int, /* Interrupt handler */ 741 wavelan_interrupt(int, /* Interrupt handler */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 4303c50c2ab6..65ceb088f700 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -611,5 +611,6 @@ struct wl3501_card {
611 struct iw_spy_data spy_data; 611 struct iw_spy_data spy_data;
612 struct iw_public_data wireless_data; 612 struct iw_public_data wireless_data;
613 struct dev_node_t node; 613 struct dev_node_t node;
614 struct pcmcia_device *p_dev;
614}; 615};
615#endif 616#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 48e10b0c7e74..e52a650f6737 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -103,8 +103,8 @@ module_param(pc_debug, int, 0);
103 * release a socket, in response to card insertion and ejection events. They 103 * release a socket, in response to card insertion and ejection events. They
104 * are invoked from the wl24 event handler. 104 * are invoked from the wl24 event handler.
105 */ 105 */
106static void wl3501_config(dev_link_t *link); 106static int wl3501_config(struct pcmcia_device *link);
107static void wl3501_release(dev_link_t *link); 107static void wl3501_release(struct pcmcia_device *link);
108 108
109/* 109/*
110 * The dev_info variable is the "key" that is used to match up this 110 * The dev_info variable is the "key" that is used to match up this
@@ -226,17 +226,6 @@ static void iw_copy_mgmt_info_element(struct iw_mgmt_info_element *to,
226 iw_set_mgmt_info_element(from->id, to, from->data, from->len); 226 iw_set_mgmt_info_element(from->id, to, from->data, from->len);
227} 227}
228 228
229/*
230 * A linked list of "instances" of the wl24 device. Each actual PCMCIA card
231 * corresponds to one device instance, and is described by one dev_link_t
232 * structure (defined in ds.h).
233 *
234 * You may not want to use a linked list for this -- for example, the memory
235 * card driver uses an array of dev_link_t pointers, where minor device numbers
236 * are used to derive the corresponding array index.
237 */
238static dev_link_t *wl3501_dev_list;
239
240static inline void wl3501_switch_page(struct wl3501_card *this, u8 page) 229static inline void wl3501_switch_page(struct wl3501_card *this, u8 page)
241{ 230{
242 wl3501_outb(page, this->base_addr + WL3501_NIC_BSS); 231 wl3501_outb(page, this->base_addr + WL3501_NIC_BSS);
@@ -1281,15 +1270,10 @@ static int wl3501_close(struct net_device *dev)
1281 struct wl3501_card *this = dev->priv; 1270 struct wl3501_card *this = dev->priv;
1282 int rc = -ENODEV; 1271 int rc = -ENODEV;
1283 unsigned long flags; 1272 unsigned long flags;
1284 dev_link_t *link; 1273 struct pcmcia_device *link;
1274 link = this->p_dev;
1285 1275
1286 spin_lock_irqsave(&this->lock, flags); 1276 spin_lock_irqsave(&this->lock, flags);
1287 /* Check if the device is in wl3501_dev_list */
1288 for (link = wl3501_dev_list; link; link = link->next)
1289 if (link->priv == dev)
1290 break;
1291 if (!link)
1292 goto out;
1293 link->open--; 1277 link->open--;
1294 1278
1295 /* Stop wl3501_hard_start_xmit() from now on */ 1279 /* Stop wl3501_hard_start_xmit() from now on */
@@ -1301,7 +1285,6 @@ static int wl3501_close(struct net_device *dev)
1301 1285
1302 rc = 0; 1286 rc = 0;
1303 printk(KERN_INFO "%s: WL3501 closed\n", dev->name); 1287 printk(KERN_INFO "%s: WL3501 closed\n", dev->name);
1304out:
1305 spin_unlock_irqrestore(&this->lock, flags); 1288 spin_unlock_irqrestore(&this->lock, flags);
1306 return rc; 1289 return rc;
1307} 1290}
@@ -1400,14 +1383,11 @@ static int wl3501_open(struct net_device *dev)
1400 int rc = -ENODEV; 1383 int rc = -ENODEV;
1401 struct wl3501_card *this = dev->priv; 1384 struct wl3501_card *this = dev->priv;
1402 unsigned long flags; 1385 unsigned long flags;
1403 dev_link_t *link; 1386 struct pcmcia_device *link;
1387 link = this->p_dev;
1404 1388
1405 spin_lock_irqsave(&this->lock, flags); 1389 spin_lock_irqsave(&this->lock, flags);
1406 /* Check if the device is in wl3501_dev_list */ 1390 if (!pcmcia_dev_present(link))
1407 for (link = wl3501_dev_list; link; link = link->next)
1408 if (link->priv == dev)
1409 break;
1410 if (!DEV_OK(link))
1411 goto out; 1391 goto out;
1412 netif_device_attach(dev); 1392 netif_device_attach(dev);
1413 link->open++; 1393 link->open++;
@@ -1497,38 +1477,23 @@ static struct ethtool_ops ops = {
1497 * Services. If it has been released, all local data structures are freed. 1477 * Services. If it has been released, all local data structures are freed.
1498 * Otherwise, the structures will be freed when the device is released. 1478 * Otherwise, the structures will be freed when the device is released.
1499 */ 1479 */
1500static void wl3501_detach(struct pcmcia_device *p_dev) 1480static void wl3501_detach(struct pcmcia_device *link)
1501{ 1481{
1502 dev_link_t *link = dev_to_instance(p_dev);
1503 dev_link_t **linkp;
1504 struct net_device *dev = link->priv; 1482 struct net_device *dev = link->priv;
1505 1483
1506 /* Locate device structure */
1507 for (linkp = &wl3501_dev_list; *linkp; linkp = &(*linkp)->next)
1508 if (*linkp == link)
1509 break;
1510 if (!*linkp)
1511 goto out;
1512
1513 /* If the device is currently configured and active, we won't actually 1484 /* If the device is currently configured and active, we won't actually
1514 * delete it yet. Instead, it is marked so that when the release() 1485 * delete it yet. Instead, it is marked so that when the release()
1515 * function is called, that will trigger a proper detach(). */ 1486 * function is called, that will trigger a proper detach(). */
1516 1487
1517 if (link->state & DEV_CONFIG) { 1488 while (link->open > 0)
1518 while (link->open > 0) 1489 wl3501_close(dev);
1519 wl3501_close(dev);
1520
1521 netif_device_detach(dev);
1522 wl3501_release(link);
1523 }
1524 1490
1525 /* Unlink device structure, free pieces */ 1491 netif_device_detach(dev);
1526 *linkp = link->next; 1492 wl3501_release(link);
1527 1493
1528 if (link->priv) 1494 if (link->priv)
1529 free_netdev(link->priv); 1495 free_netdev(link->priv);
1530 kfree(link); 1496
1531out:
1532 return; 1497 return;
1533} 1498}
1534 1499
@@ -1953,33 +1918,26 @@ static const struct iw_handler_def wl3501_handler_def = {
1953 * The dev_link structure is initialized, but we don't actually configure the 1918 * The dev_link structure is initialized, but we don't actually configure the
1954 * card at this point -- we wait until we receive a card insertion event. 1919 * card at this point -- we wait until we receive a card insertion event.
1955 */ 1920 */
1956static int wl3501_attach(struct pcmcia_device *p_dev) 1921static int wl3501_probe(struct pcmcia_device *p_dev)
1957{ 1922{
1958 dev_link_t *link;
1959 struct net_device *dev; 1923 struct net_device *dev;
1960 struct wl3501_card *this; 1924 struct wl3501_card *this;
1961 1925
1962 /* Initialize the dev_link_t structure */
1963 link = kzalloc(sizeof(*link), GFP_KERNEL);
1964 if (!link)
1965 return -ENOMEM;
1966
1967 /* The io structure describes IO port mapping */ 1926 /* The io structure describes IO port mapping */
1968 link->io.NumPorts1 = 16; 1927 p_dev->io.NumPorts1 = 16;
1969 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 1928 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1970 link->io.IOAddrLines = 5; 1929 p_dev->io.IOAddrLines = 5;
1971 1930
1972 /* Interrupt setup */ 1931 /* Interrupt setup */
1973 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 1932 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
1974 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 1933 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
1975 link->irq.Handler = wl3501_interrupt; 1934 p_dev->irq.Handler = wl3501_interrupt;
1976 1935
1977 /* General socket configuration */ 1936 /* General socket configuration */
1978 link->conf.Attributes = CONF_ENABLE_IRQ; 1937 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
1979 link->conf.Vcc = 50; 1938 p_dev->conf.IntType = INT_MEMORY_AND_IO;
1980 link->conf.IntType = INT_MEMORY_AND_IO; 1939 p_dev->conf.ConfigIndex = 1;
1981 link->conf.ConfigIndex = 1; 1940 p_dev->conf.Present = PRESENT_OPTION;
1982 link->conf.Present = PRESENT_OPTION;
1983 1941
1984 dev = alloc_etherdev(sizeof(struct wl3501_card)); 1942 dev = alloc_etherdev(sizeof(struct wl3501_card));
1985 if (!dev) 1943 if (!dev)
@@ -1992,22 +1950,15 @@ static int wl3501_attach(struct pcmcia_device *p_dev)
1992 dev->get_stats = wl3501_get_stats; 1950 dev->get_stats = wl3501_get_stats;
1993 this = dev->priv; 1951 this = dev->priv;
1994 this->wireless_data.spy_data = &this->spy_data; 1952 this->wireless_data.spy_data = &this->spy_data;
1953 this->p_dev = p_dev;
1995 dev->wireless_data = &this->wireless_data; 1954 dev->wireless_data = &this->wireless_data;
1996 dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def; 1955 dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def;
1997 SET_ETHTOOL_OPS(dev, &ops); 1956 SET_ETHTOOL_OPS(dev, &ops);
1998 netif_stop_queue(dev); 1957 netif_stop_queue(dev);
1999 link->priv = link->irq.Instance = dev; 1958 p_dev->priv = p_dev->irq.Instance = dev;
2000
2001 link->handle = p_dev;
2002 p_dev->instance = link;
2003
2004 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
2005 wl3501_config(link);
2006 1959
2007 return 0; 1960 return wl3501_config(p_dev);
2008out_link: 1961out_link:
2009 kfree(link);
2010 link = NULL;
2011 return -ENOMEM; 1962 return -ENOMEM;
2012} 1963}
2013 1964
@@ -2022,11 +1973,10 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
2022 * received, to configure the PCMCIA socket, and to make the ethernet device 1973 * received, to configure the PCMCIA socket, and to make the ethernet device
2023 * available to the system. 1974 * available to the system.
2024 */ 1975 */
2025static void wl3501_config(dev_link_t *link) 1976static int wl3501_config(struct pcmcia_device *link)
2026{ 1977{
2027 tuple_t tuple; 1978 tuple_t tuple;
2028 cisparse_t parse; 1979 cisparse_t parse;
2029 client_handle_t handle = link->handle;
2030 struct net_device *dev = link->priv; 1980 struct net_device *dev = link->priv;
2031 int i = 0, j, last_fn, last_ret; 1981 int i = 0, j, last_fn, last_ret;
2032 unsigned char bf[64]; 1982 unsigned char bf[64];
@@ -2035,18 +1985,15 @@ static void wl3501_config(dev_link_t *link)
2035 /* This reads the card's CONFIG tuple to find its config registers. */ 1985 /* This reads the card's CONFIG tuple to find its config registers. */
2036 tuple.Attributes = 0; 1986 tuple.Attributes = 0;
2037 tuple.DesiredTuple = CISTPL_CONFIG; 1987 tuple.DesiredTuple = CISTPL_CONFIG;
2038 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 1988 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
2039 tuple.TupleData = bf; 1989 tuple.TupleData = bf;
2040 tuple.TupleDataMax = sizeof(bf); 1990 tuple.TupleDataMax = sizeof(bf);
2041 tuple.TupleOffset = 0; 1991 tuple.TupleOffset = 0;
2042 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 1992 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
2043 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 1993 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
2044 link->conf.ConfigBase = parse.config.base; 1994 link->conf.ConfigBase = parse.config.base;
2045 link->conf.Present = parse.config.rmask[0]; 1995 link->conf.Present = parse.config.rmask[0];
2046 1996
2047 /* Configure card */
2048 link->state |= DEV_CONFIG;
2049
2050 /* Try allocating IO ports. This tries a few fixed addresses. If you 1997 /* Try allocating IO ports. This tries a few fixed addresses. If you
2051 * want, you can also read the card's config table to pick addresses -- 1998 * want, you can also read the card's config table to pick addresses --
2052 * see the serial driver for an example. */ 1999 * see the serial driver for an example. */
@@ -2056,28 +2003,28 @@ static void wl3501_config(dev_link_t *link)
2056 * 0x200-0x2ff, and so on, because this seems safer */ 2003 * 0x200-0x2ff, and so on, because this seems safer */
2057 link->io.BasePort1 = j; 2004 link->io.BasePort1 = j;
2058 link->io.BasePort2 = link->io.BasePort1 + 0x10; 2005 link->io.BasePort2 = link->io.BasePort1 + 0x10;
2059 i = pcmcia_request_io(link->handle, &link->io); 2006 i = pcmcia_request_io(link, &link->io);
2060 if (i == CS_SUCCESS) 2007 if (i == CS_SUCCESS)
2061 break; 2008 break;
2062 } 2009 }
2063 if (i != CS_SUCCESS) { 2010 if (i != CS_SUCCESS) {
2064 cs_error(link->handle, RequestIO, i); 2011 cs_error(link, RequestIO, i);
2065 goto failed; 2012 goto failed;
2066 } 2013 }
2067 2014
2068 /* Now allocate an interrupt line. Note that this does not actually 2015 /* Now allocate an interrupt line. Note that this does not actually
2069 * assign a handler to the interrupt. */ 2016 * assign a handler to the interrupt. */
2070 2017
2071 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 2018 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
2072 2019
2073 /* This actually configures the PCMCIA socket -- setting up the I/O 2020 /* This actually configures the PCMCIA socket -- setting up the I/O
2074 * windows and the interrupt mapping. */ 2021 * windows and the interrupt mapping. */
2075 2022
2076 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf)); 2023 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
2077 2024
2078 dev->irq = link->irq.AssignedIRQ; 2025 dev->irq = link->irq.AssignedIRQ;
2079 dev->base_addr = link->io.BasePort1; 2026 dev->base_addr = link->io.BasePort1;
2080 SET_NETDEV_DEV(dev, &handle_to_dev(handle)); 2027 SET_NETDEV_DEV(dev, &handle_to_dev(link));
2081 if (register_netdev(dev)) { 2028 if (register_netdev(dev)) {
2082 printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n"); 2029 printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
2083 goto failed; 2030 goto failed;
@@ -2088,10 +2035,9 @@ static void wl3501_config(dev_link_t *link)
2088 this = dev->priv; 2035 this = dev->priv;
2089 /* 2036 /*
2090 * At this point, the dev_node_t structure(s) should be initialized and 2037 * At this point, the dev_node_t structure(s) should be initialized and
2091 * arranged in a linked list at link->dev. 2038 * arranged in a linked list at link->dev_node.
2092 */ 2039 */
2093 link->dev = &this->node; 2040 link->dev_node = &this->node;
2094 link->state &= ~DEV_CONFIG_PENDING;
2095 2041
2096 this->base_addr = dev->base_addr; 2042 this->base_addr = dev->base_addr;
2097 2043
@@ -2127,13 +2073,13 @@ static void wl3501_config(dev_link_t *link)
2127 spin_lock_init(&this->lock); 2073 spin_lock_init(&this->lock);
2128 init_waitqueue_head(&this->wait); 2074 init_waitqueue_head(&this->wait);
2129 netif_start_queue(dev); 2075 netif_start_queue(dev);
2130 goto out; 2076 return 0;
2077
2131cs_failed: 2078cs_failed:
2132 cs_error(link->handle, last_fn, last_ret); 2079 cs_error(link, last_fn, last_ret);
2133failed: 2080failed:
2134 wl3501_release(link); 2081 wl3501_release(link);
2135out: 2082 return -ENODEV;
2136 return;
2137} 2083}
2138 2084
2139/** 2085/**
@@ -2144,52 +2090,36 @@ out:
2144 * and release the PCMCIA configuration. If the device is still open, this 2090 * and release the PCMCIA configuration. If the device is still open, this
2145 * will be postponed until it is closed. 2091 * will be postponed until it is closed.
2146 */ 2092 */
2147static void wl3501_release(dev_link_t *link) 2093static void wl3501_release(struct pcmcia_device *link)
2148{ 2094{
2149 struct net_device *dev = link->priv; 2095 struct net_device *dev = link->priv;
2150 2096
2151 /* Unlink the device chain */ 2097 /* Unlink the device chain */
2152 if (link->dev) { 2098 if (link->dev_node)
2153 unregister_netdev(dev); 2099 unregister_netdev(dev);
2154 link->dev = NULL;
2155 }
2156 2100
2157 /* Don't bother checking to see if these succeed or not */ 2101 pcmcia_disable_device(link);
2158 pcmcia_release_configuration(link->handle);
2159 pcmcia_release_io(link->handle, &link->io);
2160 pcmcia_release_irq(link->handle, &link->irq);
2161 link->state &= ~DEV_CONFIG;
2162} 2102}
2163 2103
2164static int wl3501_suspend(struct pcmcia_device *p_dev) 2104static int wl3501_suspend(struct pcmcia_device *link)
2165{ 2105{
2166 dev_link_t *link = dev_to_instance(p_dev);
2167 struct net_device *dev = link->priv; 2106 struct net_device *dev = link->priv;
2168 2107
2169 link->state |= DEV_SUSPEND;
2170
2171 wl3501_pwr_mgmt(dev->priv, WL3501_SUSPEND); 2108 wl3501_pwr_mgmt(dev->priv, WL3501_SUSPEND);
2172 if (link->state & DEV_CONFIG) { 2109 if (link->open)
2173 if (link->open) 2110 netif_device_detach(dev);
2174 netif_device_detach(dev);
2175 pcmcia_release_configuration(link->handle);
2176 }
2177 2111
2178 return 0; 2112 return 0;
2179} 2113}
2180 2114
2181static int wl3501_resume(struct pcmcia_device *p_dev) 2115static int wl3501_resume(struct pcmcia_device *link)
2182{ 2116{
2183 dev_link_t *link = dev_to_instance(p_dev);
2184 struct net_device *dev = link->priv; 2117 struct net_device *dev = link->priv;
2185 2118
2186 wl3501_pwr_mgmt(dev->priv, WL3501_RESUME); 2119 wl3501_pwr_mgmt(dev->priv, WL3501_RESUME);
2187 if (link->state & DEV_CONFIG) { 2120 if (link->open) {
2188 pcmcia_request_configuration(link->handle, &link->conf); 2121 wl3501_reset(dev);
2189 if (link->open) { 2122 netif_device_attach(dev);
2190 wl3501_reset(dev);
2191 netif_device_attach(dev);
2192 }
2193 } 2123 }
2194 2124
2195 return 0; 2125 return 0;
@@ -2207,7 +2137,7 @@ static struct pcmcia_driver wl3501_driver = {
2207 .drv = { 2137 .drv = {
2208 .name = "wl3501_cs", 2138 .name = "wl3501_cs",
2209 }, 2139 },
2210 .probe = wl3501_attach, 2140 .probe = wl3501_probe,
2211 .remove = wl3501_detach, 2141 .remove = wl3501_detach,
2212 .id_table = wl3501_ids, 2142 .id_table = wl3501_ids,
2213 .suspend = wl3501_suspend, 2143 .suspend = wl3501_suspend,
@@ -2221,9 +2151,7 @@ static int __init wl3501_init_module(void)
2221 2151
2222static void __exit wl3501_exit_module(void) 2152static void __exit wl3501_exit_module(void)
2223{ 2153{
2224 dprintk(0, ": unloading");
2225 pcmcia_unregister_driver(&wl3501_driver); 2154 pcmcia_unregister_driver(&wl3501_driver);
2226 BUG_ON(wl3501_dev_list != NULL);
2227} 2155}
2228 2156
2229module_init(wl3501_init_module); 2157module_init(wl3501_init_module);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 75d56bfef0ee..fd0f43b7db5b 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1441,8 +1441,7 @@ static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1441 struct net_device *dev = pci_get_drvdata(pdev); 1441 struct net_device *dev = pci_get_drvdata(pdev);
1442 struct yellowfin_private *np; 1442 struct yellowfin_private *np;
1443 1443
1444 if (!dev) 1444 BUG_ON(!dev);
1445 BUG();
1446 np = netdev_priv(dev); 1445 np = netdev_priv(dev);
1447 1446
1448 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, 1447 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 158d92563259..b953d5907c05 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -81,15 +81,15 @@ static char *version =
81#define FORCE_EPP_MODE 0x08 81#define FORCE_EPP_MODE 0x08
82 82
83typedef struct parport_info_t { 83typedef struct parport_info_t {
84 dev_link_t link; 84 struct pcmcia_device *p_dev;
85 int ndev; 85 int ndev;
86 dev_node_t node; 86 dev_node_t node;
87 struct parport *port; 87 struct parport *port;
88} parport_info_t; 88} parport_info_t;
89 89
90static void parport_detach(struct pcmcia_device *p_dev); 90static void parport_detach(struct pcmcia_device *p_dev);
91static void parport_config(dev_link_t *link); 91static int parport_config(struct pcmcia_device *link);
92static void parport_cs_release(dev_link_t *); 92static void parport_cs_release(struct pcmcia_device *);
93 93
94/*====================================================================== 94/*======================================================================
95 95
@@ -99,10 +99,9 @@ static void parport_cs_release(dev_link_t *);
99 99
100======================================================================*/ 100======================================================================*/
101 101
102static int parport_attach(struct pcmcia_device *p_dev) 102static int parport_probe(struct pcmcia_device *link)
103{ 103{
104 parport_info_t *info; 104 parport_info_t *info;
105 dev_link_t *link;
106 105
107 DEBUG(0, "parport_attach()\n"); 106 DEBUG(0, "parport_attach()\n");
108 107
@@ -110,23 +109,17 @@ static int parport_attach(struct pcmcia_device *p_dev)
110 info = kmalloc(sizeof(*info), GFP_KERNEL); 109 info = kmalloc(sizeof(*info), GFP_KERNEL);
111 if (!info) return -ENOMEM; 110 if (!info) return -ENOMEM;
112 memset(info, 0, sizeof(*info)); 111 memset(info, 0, sizeof(*info));
113 link = &info->link; link->priv = info; 112 link->priv = info;
113 info->p_dev = link;
114 114
115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
116 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 116 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
117 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 117 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
118 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 118 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
119 link->conf.Attributes = CONF_ENABLE_IRQ; 119 link->conf.Attributes = CONF_ENABLE_IRQ;
120 link->conf.Vcc = 50;
121 link->conf.IntType = INT_MEMORY_AND_IO; 120 link->conf.IntType = INT_MEMORY_AND_IO;
122 121
123 link->handle = p_dev; 122 return parport_config(link);
124 p_dev->instance = link;
125
126 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
127 parport_config(link);
128
129 return 0;
130} /* parport_attach */ 123} /* parport_attach */
131 124
132/*====================================================================== 125/*======================================================================
@@ -138,14 +131,11 @@ static int parport_attach(struct pcmcia_device *p_dev)
138 131
139======================================================================*/ 132======================================================================*/
140 133
141static void parport_detach(struct pcmcia_device *p_dev) 134static void parport_detach(struct pcmcia_device *link)
142{ 135{
143 dev_link_t *link = dev_to_instance(p_dev);
144
145 DEBUG(0, "parport_detach(0x%p)\n", link); 136 DEBUG(0, "parport_detach(0x%p)\n", link);
146 137
147 if (link->state & DEV_CONFIG) 138 parport_cs_release(link);
148 parport_cs_release(link);
149 139
150 kfree(link->priv); 140 kfree(link->priv);
151} /* parport_detach */ 141} /* parport_detach */
@@ -161,14 +151,12 @@ static void parport_detach(struct pcmcia_device *p_dev)
161#define CS_CHECK(fn, ret) \ 151#define CS_CHECK(fn, ret) \
162do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 152do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
163 153
164void parport_config(dev_link_t *link) 154static int parport_config(struct pcmcia_device *link)
165{ 155{
166 client_handle_t handle = link->handle;
167 parport_info_t *info = link->priv; 156 parport_info_t *info = link->priv;
168 tuple_t tuple; 157 tuple_t tuple;
169 u_short buf[128]; 158 u_short buf[128];
170 cisparse_t parse; 159 cisparse_t parse;
171 config_info_t conf;
172 cistpl_cftable_entry_t *cfg = &parse.cftable_entry; 160 cistpl_cftable_entry_t *cfg = &parse.cftable_entry;
173 cistpl_cftable_entry_t dflt = { 0 }; 161 cistpl_cftable_entry_t dflt = { 0 };
174 struct parport *p; 162 struct parport *p;
@@ -180,24 +168,18 @@ void parport_config(dev_link_t *link)
180 tuple.TupleOffset = 0; tuple.TupleDataMax = 255; 168 tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
181 tuple.Attributes = 0; 169 tuple.Attributes = 0;
182 tuple.DesiredTuple = CISTPL_CONFIG; 170 tuple.DesiredTuple = CISTPL_CONFIG;
183 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 171 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
184 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 172 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
185 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 173 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
186 link->conf.ConfigBase = parse.config.base; 174 link->conf.ConfigBase = parse.config.base;
187 link->conf.Present = parse.config.rmask[0]; 175 link->conf.Present = parse.config.rmask[0];
188
189 /* Configure card */
190 link->state |= DEV_CONFIG;
191 176
192 /* Not sure if this is right... look up the current Vcc */
193 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
194
195 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 177 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
196 tuple.Attributes = 0; 178 tuple.Attributes = 0;
197 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 179 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
198 while (1) { 180 while (1) {
199 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 181 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
200 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 182 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
201 goto next_entry; 183 goto next_entry;
202 184
203 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { 185 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
@@ -212,7 +194,7 @@ void parport_config(dev_link_t *link)
212 link->io.BasePort2 = io->win[1].base; 194 link->io.BasePort2 = io->win[1].base;
213 link->io.NumPorts2 = io->win[1].len; 195 link->io.NumPorts2 = io->win[1].len;
214 } 196 }
215 if (pcmcia_request_io(link->handle, &link->io) != 0) 197 if (pcmcia_request_io(link, &link->io) != 0)
216 goto next_entry; 198 goto next_entry;
217 /* If we've got this far, we're done */ 199 /* If we've got this far, we're done */
218 break; 200 break;
@@ -220,15 +202,12 @@ void parport_config(dev_link_t *link)
220 202
221 next_entry: 203 next_entry:
222 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg; 204 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
223 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 205 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
224 } 206 }
225 207
226 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 208 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
227 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 209 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
228 210
229 release_region(link->io.BasePort1, link->io.NumPorts1);
230 if (link->io.NumPorts2)
231 release_region(link->io.BasePort2, link->io.NumPorts2);
232 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, 211 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
233 link->irq.AssignedIRQ, PARPORT_DMA_NONE, 212 link->irq.AssignedIRQ, PARPORT_DMA_NONE,
234 NULL); 213 NULL);
@@ -247,17 +226,15 @@ void parport_config(dev_link_t *link)
247 info->node.minor = p->number; 226 info->node.minor = p->number;
248 info->port = p; 227 info->port = p;
249 strcpy(info->node.dev_name, p->name); 228 strcpy(info->node.dev_name, p->name);
250 link->dev = &info->node; 229 link->dev_node = &info->node;
230
231 return 0;
251 232
252 link->state &= ~DEV_CONFIG_PENDING;
253 return;
254
255cs_failed: 233cs_failed:
256 cs_error(link->handle, last_fn, last_ret); 234 cs_error(link, last_fn, last_ret);
257failed: 235failed:
258 parport_cs_release(link); 236 parport_cs_release(link);
259 link->state &= ~DEV_CONFIG_PENDING; 237 return -ENODEV;
260
261} /* parport_config */ 238} /* parport_config */
262 239
263/*====================================================================== 240/*======================================================================
@@ -268,53 +245,21 @@ failed:
268 245
269======================================================================*/ 246======================================================================*/
270 247
271void parport_cs_release(dev_link_t *link) 248void parport_cs_release(struct pcmcia_device *link)
272{
273 parport_info_t *info = link->priv;
274
275 DEBUG(0, "parport_release(0x%p)\n", link);
276
277 if (info->ndev) {
278 struct parport *p = info->port;
279 parport_pc_unregister_port(p);
280 request_region(link->io.BasePort1, link->io.NumPorts1,
281 info->node.dev_name);
282 if (link->io.NumPorts2)
283 request_region(link->io.BasePort2, link->io.NumPorts2,
284 info->node.dev_name);
285 }
286 info->ndev = 0;
287 link->dev = NULL;
288
289 pcmcia_release_configuration(link->handle);
290 pcmcia_release_io(link->handle, &link->io);
291 pcmcia_release_irq(link->handle, &link->irq);
292
293 link->state &= ~DEV_CONFIG;
294
295} /* parport_cs_release */
296
297static int parport_suspend(struct pcmcia_device *dev)
298{ 249{
299 dev_link_t *link = dev_to_instance(dev); 250 parport_info_t *info = link->priv;
300 251
301 link->state |= DEV_SUSPEND; 252 DEBUG(0, "parport_release(0x%p)\n", link);
302 if (link->state & DEV_CONFIG)
303 pcmcia_release_configuration(link->handle);
304 253
305 return 0; 254 if (info->ndev) {
306} 255 struct parport *p = info->port;
307 256 parport_pc_unregister_port(p);
308static int parport_resume(struct pcmcia_device *dev) 257 }
309{ 258 info->ndev = 0;
310 dev_link_t *link = dev_to_instance(dev);
311 259
312 link->state &= ~DEV_SUSPEND; 260 pcmcia_disable_device(link);
313 if (DEV_OK(link)) 261} /* parport_cs_release */
314 pcmcia_request_configuration(link->handle, &link->conf);
315 262
316 return 0;
317}
318 263
319static struct pcmcia_device_id parport_ids[] = { 264static struct pcmcia_device_id parport_ids[] = {
320 PCMCIA_DEVICE_FUNC_ID(3), 265 PCMCIA_DEVICE_FUNC_ID(3),
@@ -328,11 +273,9 @@ static struct pcmcia_driver parport_cs_driver = {
328 .drv = { 273 .drv = {
329 .name = "parport_cs", 274 .name = "parport_cs",
330 }, 275 },
331 .probe = parport_attach, 276 .probe = parport_probe,
332 .remove = parport_detach, 277 .remove = parport_detach,
333 .id_table = parport_ids, 278 .id_table = parport_ids,
334 .suspend = parport_suspend,
335 .resume = parport_resume,
336}; 279};
337 280
338static int __init init_parport_cs(void) 281static int __init init_parport_cs(void)
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 1f4ad0e7836e..cba6c9eef28e 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -263,6 +263,13 @@ config OMAP_CF
263 Say Y here to support the CompactFlash controller on OMAP. 263 Say Y here to support the CompactFlash controller on OMAP.
264 Note that this doesn't support "True IDE" mode. 264 Note that this doesn't support "True IDE" mode.
265 265
266config AT91_CF
267 tristate "AT91 CompactFlash Controller"
268 depends on PCMCIA && ARCH_AT91RM9200
269 help
270 Say Y here to support the CompactFlash controller on AT91 chips.
271 Or choose M to compile the driver as a module named "at91_cf".
272
266config PCCARD_NONSTATIC 273config PCCARD_NONSTATIC
267 tristate 274 tristate
268 275
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index bcecf5133b7e..4276965517f2 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -10,7 +10,7 @@ pcmcia_core-y += cs.o cistpl.o rsrc_mgr.o socket_sysfs.o
10pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o 10pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
11obj-$(CONFIG_PCCARD) += pcmcia_core.o 11obj-$(CONFIG_PCCARD) += pcmcia_core.o
12 12
13pcmcia-y += ds.o pcmcia_compat.o pcmcia_resource.o 13pcmcia-y += ds.o pcmcia_resource.o
14pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o 14pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
15obj-$(CONFIG_PCMCIA) += pcmcia.o 15obj-$(CONFIG_PCMCIA) += pcmcia.o
16 16
@@ -36,6 +36,7 @@ obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
36obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o 36obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
37obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o 37obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
38obj-$(CONFIG_OMAP_CF) += omap_cf.o 38obj-$(CONFIG_OMAP_CF) += omap_cf.o
39obj-$(CONFIG_AT91_CF) += at91_cf.o
39 40
40sa11xx_core-y += soc_common.o sa11xx_base.o 41sa11xx_core-y += soc_common.o sa11xx_base.o
41pxa2xx_core-y += soc_common.o pxa2xx_base.o 42pxa2xx_core-y += soc_common.o pxa2xx_base.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
new file mode 100644
index 000000000000..67cc5f7d0c90
--- /dev/null
+++ b/drivers/pcmcia/at91_cf.c
@@ -0,0 +1,365 @@
1/*
2 * at91_cf.c -- AT91 CompactFlash controller driver
3 *
4 * Copyright (C) 2005 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/platform_device.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19
20#include <pcmcia/ss.h>
21
22#include <asm/hardware.h>
23#include <asm/io.h>
24#include <asm/sizes.h>
25
26#include <asm/arch/at91rm9200.h>
27#include <asm/arch/board.h>
28#include <asm/arch/gpio.h>
29
30
31#define CF_SIZE 0x30000000 /* CS5+CS6: unavailable */
32
33/*
34 * A0..A10 work in each range; A23 indicates I/O space; A25 is CFRNW;
35 * some other bit in {A24,A22..A11} is nREG to flag memory access
36 * (vs attributes). So more than 2KB/region would just be waste.
37 */
38#define CF_ATTR_PHYS (AT91_CF_BASE)
39#define CF_IO_PHYS (AT91_CF_BASE + (1 << 23))
40#define CF_MEM_PHYS (AT91_CF_BASE + 0x017ff800)
41
42/*--------------------------------------------------------------------------*/
43
44static const char driver_name[] = "at91_cf";
45
46struct at91_cf_socket {
47 struct pcmcia_socket socket;
48
49 unsigned present:1;
50
51 struct platform_device *pdev;
52 struct at91_cf_data *board;
53};
54
55#define SZ_2K (2 * SZ_1K)
56
57static inline int at91_cf_present(struct at91_cf_socket *cf)
58{
59 return !at91_get_gpio_value(cf->board->det_pin);
60}
61
62/*--------------------------------------------------------------------------*/
63
64static int at91_cf_ss_init(struct pcmcia_socket *s)
65{
66 return 0;
67}
68
69static irqreturn_t at91_cf_irq(int irq, void *_cf, struct pt_regs *r)
70{
71 struct at91_cf_socket *cf = (struct at91_cf_socket *) _cf;
72
73 if (irq == cf->board->det_pin) {
74 unsigned present = at91_cf_present(cf);
75
76 /* kick pccard as needed */
77 if (present != cf->present) {
78 cf->present = present;
79 pr_debug("%s: card %s\n", driver_name, present ? "present" : "gone");
80 pcmcia_parse_events(&cf->socket, SS_DETECT);
81 }
82 }
83
84 return IRQ_HANDLED;
85}
86
87static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
88{
89 struct at91_cf_socket *cf;
90
91 if (!sp)
92 return -EINVAL;
93
94 cf = container_of(s, struct at91_cf_socket, socket);
95
96 /* NOTE: we assume 3VCARD, not XVCARD... */
97 if (at91_cf_present(cf)) {
98 int rdy = cf->board->irq_pin; /* RDY/nIRQ */
99 int vcc = cf->board->vcc_pin;
100
101 *sp = SS_DETECT | SS_3VCARD;
102 if (!rdy || at91_get_gpio_value(rdy))
103 *sp |= SS_READY;
104 if (!vcc || at91_get_gpio_value(vcc))
105 *sp |= SS_POWERON;
106 } else
107 *sp = 0;
108
109 return 0;
110}
111
112static int at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
113{
114 struct at91_cf_socket *cf;
115
116 cf = container_of(sock, struct at91_cf_socket, socket);
117
118 /* switch Vcc if needed and possible */
119 if (cf->board->vcc_pin) {
120 switch (s->Vcc) {
121 case 0:
122 at91_set_gpio_value(cf->board->vcc_pin, 0);
123 break;
124 case 33:
125 at91_set_gpio_value(cf->board->vcc_pin, 1);
126 break;
127 default:
128 return -EINVAL;
129 }
130 }
131
132 /* toggle reset if needed */
133 at91_set_gpio_value(cf->board->rst_pin, s->flags & SS_RESET);
134
135 pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n",
136 driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask);
137
138 return 0;
139}
140
141static int at91_cf_ss_suspend(struct pcmcia_socket *s)
142{
143 return at91_cf_set_socket(s, &dead_socket);
144}
145
146/* we already mapped the I/O region */
147static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
148{
149 struct at91_cf_socket *cf;
150 u32 csr;
151
152 cf = container_of(s, struct at91_cf_socket, socket);
153 io->flags &= (MAP_ACTIVE | MAP_16BIT | MAP_AUTOSZ);
154
155 /*
156 * Use 16 bit accesses unless/until we need 8-bit i/o space.
157 * Always set CSR4 ... PCMCIA won't always unmap things.
158 */
159 csr = at91_sys_read(AT91_SMC_CSR(4)) & ~AT91_SMC_DBW;
160
161 /*
162 * NOTE: this CF controller ignores IOIS16, so we can't really do
163 * MAP_AUTOSZ. The 16bit mode allows single byte access on either
164 * D0-D7 (even addr) or D8-D15 (odd), so it's close enough for many
165 * purposes (and handles ide-cs).
166 *
167 * The 8bit mode is needed for odd byte access on D0-D7. It seems
168 * some cards only like that way to get at the odd byte, despite
169 * CF 3.0 spec table 35 also giving the D8-D15 option.
170 */
171 if (!(io->flags & (MAP_16BIT|MAP_AUTOSZ))) {
172 csr |= AT91_SMC_DBW_8;
173 pr_debug("%s: 8bit i/o bus\n", driver_name);
174 } else {
175 csr |= AT91_SMC_DBW_16;
176 pr_debug("%s: 16bit i/o bus\n", driver_name);
177 }
178 at91_sys_write(AT91_SMC_CSR(4), csr);
179
180 io->start = cf->socket.io_offset;
181 io->stop = io->start + SZ_2K - 1;
182
183 return 0;
184}
185
186/* pcmcia layer maps/unmaps mem regions */
187static int at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
188{
189 struct at91_cf_socket *cf;
190
191 if (map->card_start)
192 return -EINVAL;
193
194 cf = container_of(s, struct at91_cf_socket, socket);
195
196 map->flags &= MAP_ACTIVE|MAP_ATTRIB|MAP_16BIT;
197 if (map->flags & MAP_ATTRIB)
198 map->static_start = CF_ATTR_PHYS;
199 else
200 map->static_start = CF_MEM_PHYS;
201
202 return 0;
203}
204
205static struct pccard_operations at91_cf_ops = {
206 .init = at91_cf_ss_init,
207 .suspend = at91_cf_ss_suspend,
208 .get_status = at91_cf_get_status,
209 .set_socket = at91_cf_set_socket,
210 .set_io_map = at91_cf_set_io_map,
211 .set_mem_map = at91_cf_set_mem_map,
212};
213
214/*--------------------------------------------------------------------------*/
215
216static int __init at91_cf_probe(struct device *dev)
217{
218 struct at91_cf_socket *cf;
219 struct at91_cf_data *board = dev->platform_data;
220 struct platform_device *pdev = to_platform_device(dev);
221 unsigned int csa;
222 int status;
223
224 if (!board || !board->det_pin || !board->rst_pin)
225 return -ENODEV;
226
227 cf = kcalloc(1, sizeof *cf, GFP_KERNEL);
228 if (!cf)
229 return -ENOMEM;
230
231 cf->board = board;
232 cf->pdev = pdev;
233 dev_set_drvdata(dev, cf);
234
235 /* CF takes over CS4, CS5, CS6 */
236 csa = at91_sys_read(AT91_EBI_CSA);
237 at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH);
238
239 /* force poweron defaults for these pins ... */
240 (void) at91_set_A_periph(AT91_PIN_PC9, 0); /* A25/CFRNW */
241 (void) at91_set_A_periph(AT91_PIN_PC10, 0); /* NCS4/CFCS */
242 (void) at91_set_A_periph(AT91_PIN_PC11, 0); /* NCS5/CFCE1 */
243 (void) at91_set_A_periph(AT91_PIN_PC12, 0); /* NCS6/CFCE2 */
244
245 /* nWAIT is _not_ a default setting */
246 (void) at91_set_A_periph(AT91_PIN_PC6, 1); /* nWAIT */
247
248 /*
249 * Static memory controller timing adjustments.
250 * REVISIT: these timings are in terms of MCK cycles, so
251 * when MCK changes (cpufreq etc) so must these values...
252 */
253 at91_sys_write(AT91_SMC_CSR(4), AT91_SMC_ACSS_STD | AT91_SMC_DBW_16 | AT91_SMC_BAT | AT91_SMC_WSEN
254 | AT91_SMC_NWS_(32) /* wait states */
255 | AT91_SMC_RWSETUP_(6) /* setup time */
256 | AT91_SMC_RWHOLD_(4) /* hold time */
257 );
258
259 /* must be a GPIO; ergo must trigger on both edges */
260 status = request_irq(board->det_pin, at91_cf_irq,
261 SA_SAMPLE_RANDOM, driver_name, cf);
262 if (status < 0)
263 goto fail0;
264
265 /*
266 * The card driver will request this irq later as needed.
267 * but it causes lots of "irqNN: nobody cared" messages
268 * unless we report that we handle everything (sigh).
269 * (Note: DK board doesn't wire the IRQ pin...)
270 */
271 if (board->irq_pin) {
272 status = request_irq(board->irq_pin, at91_cf_irq,
273 SA_SHIRQ, driver_name, cf);
274 if (status < 0)
275 goto fail0a;
276 cf->socket.pci_irq = board->irq_pin;
277 }
278 else
279 cf->socket.pci_irq = NR_IRQS + 1;
280
281 /* pcmcia layer only remaps "real" memory not iospace */
282 cf->socket.io_offset = (unsigned long) ioremap(CF_IO_PHYS, SZ_2K);
283 if (!cf->socket.io_offset)
284 goto fail1;
285
286 /* reserve CS4, CS5, and CS6 regions; but use just CS4 */
287 if (!request_mem_region(AT91_CF_BASE, CF_SIZE, driver_name))
288 goto fail1;
289
290 pr_info("%s: irqs det #%d, io #%d\n", driver_name,
291 board->det_pin, board->irq_pin);
292
293 cf->socket.owner = THIS_MODULE;
294 cf->socket.dev.dev = dev;
295 cf->socket.ops = &at91_cf_ops;
296 cf->socket.resource_ops = &pccard_static_ops;
297 cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
298 | SS_CAP_MEM_ALIGN;
299 cf->socket.map_size = SZ_2K;
300 cf->socket.io[0].NumPorts = SZ_2K;
301
302 status = pcmcia_register_socket(&cf->socket);
303 if (status < 0)
304 goto fail2;
305
306 return 0;
307
308fail2:
309 iounmap((void __iomem *) cf->socket.io_offset);
310 release_mem_region(AT91_CF_BASE, CF_SIZE);
311fail1:
312 if (board->irq_pin)
313 free_irq(board->irq_pin, cf);
314fail0a:
315 free_irq(board->det_pin, cf);
316fail0:
317 at91_sys_write(AT91_EBI_CSA, csa);
318 kfree(cf);
319 return status;
320}
321
322static int __exit at91_cf_remove(struct device *dev)
323{
324 struct at91_cf_socket *cf = dev_get_drvdata(dev);
325 unsigned int csa;
326
327 pcmcia_unregister_socket(&cf->socket);
328 free_irq(cf->board->irq_pin, cf);
329 free_irq(cf->board->det_pin, cf);
330 iounmap((void __iomem *) cf->socket.io_offset);
331 release_mem_region(AT91_CF_BASE, CF_SIZE);
332
333 csa = at91_sys_read(AT91_EBI_CSA);
334 at91_sys_write(AT91_EBI_CSA, csa & ~AT91_EBI_CS4A);
335
336 kfree(cf);
337 return 0;
338}
339
340static struct device_driver at91_cf_driver = {
341 .name = (char *) driver_name,
342 .bus = &platform_bus_type,
343 .probe = at91_cf_probe,
344 .remove = __exit_p(at91_cf_remove),
345 .suspend = pcmcia_socket_dev_suspend,
346 .resume = pcmcia_socket_dev_resume,
347};
348
349/*--------------------------------------------------------------------------*/
350
351static int __init at91_cf_init(void)
352{
353 return driver_register(&at91_cf_driver);
354}
355module_init(at91_cf_init);
356
357static void __exit at91_cf_exit(void)
358{
359 driver_unregister(&at91_cf_driver);
360}
361module_exit(at91_cf_exit);
362
363MODULE_DESCRIPTION("AT91 Compact Flash Driver");
364MODULE_AUTHOR("David Brownell");
365MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 120fa8da6392..912c03e5eb0a 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -12,7 +12,6 @@
12 * (C) 1999 David A. Hinds 12 * (C) 1999 David A. Hinds
13 */ 13 */
14 14
15#include <linux/config.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 613f2f1fbfdd..3162998579c1 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -16,7 +16,6 @@
16#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/config.h>
20#include <linux/string.h> 19#include <linux/string.h>
21#include <linux/major.h> 20#include <linux/major.h>
22#include <linux/errno.h> 21#include <linux/errno.h>
@@ -111,9 +110,9 @@ int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state)
111 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { 110 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
112 if (socket->dev.dev != dev) 111 if (socket->dev.dev != dev)
113 continue; 112 continue;
114 down(&socket->skt_sem); 113 mutex_lock(&socket->skt_mutex);
115 socket_suspend(socket); 114 socket_suspend(socket);
116 up(&socket->skt_sem); 115 mutex_unlock(&socket->skt_mutex);
117 } 116 }
118 up_read(&pcmcia_socket_list_rwsem); 117 up_read(&pcmcia_socket_list_rwsem);
119 118
@@ -129,9 +128,9 @@ int pcmcia_socket_dev_resume(struct device *dev)
129 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { 128 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
130 if (socket->dev.dev != dev) 129 if (socket->dev.dev != dev)
131 continue; 130 continue;
132 down(&socket->skt_sem); 131 mutex_lock(&socket->skt_mutex);
133 socket_resume(socket); 132 socket_resume(socket);
134 up(&socket->skt_sem); 133 mutex_unlock(&socket->skt_mutex);
135 } 134 }
136 up_read(&pcmcia_socket_list_rwsem); 135 up_read(&pcmcia_socket_list_rwsem);
137 136
@@ -237,7 +236,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
237 init_completion(&socket->socket_released); 236 init_completion(&socket->socket_released);
238 init_completion(&socket->thread_done); 237 init_completion(&socket->thread_done);
239 init_waitqueue_head(&socket->thread_wait); 238 init_waitqueue_head(&socket->thread_wait);
240 init_MUTEX(&socket->skt_sem); 239 mutex_init(&socket->skt_mutex);
241 spin_lock_init(&socket->thread_lock); 240 spin_lock_init(&socket->thread_lock);
242 241
243 ret = kernel_thread(pccardd, socket, CLONE_KERNEL); 242 ret = kernel_thread(pccardd, socket, CLONE_KERNEL);
@@ -406,8 +405,6 @@ static void socket_shutdown(struct pcmcia_socket *s)
406 cb_free(s); 405 cb_free(s);
407#endif 406#endif
408 s->functions = 0; 407 s->functions = 0;
409 kfree(s->config);
410 s->config = NULL;
411 408
412 s->ops->get_status(s, &status); 409 s->ops->get_status(s, &status);
413 if (status & SS_POWERON) { 410 if (status & SS_POWERON) {
@@ -664,7 +661,7 @@ static int pccardd(void *__skt)
664 spin_unlock_irqrestore(&skt->thread_lock, flags); 661 spin_unlock_irqrestore(&skt->thread_lock, flags);
665 662
666 if (events) { 663 if (events) {
667 down(&skt->skt_sem); 664 mutex_lock(&skt->skt_mutex);
668 if (events & SS_DETECT) 665 if (events & SS_DETECT)
669 socket_detect_change(skt); 666 socket_detect_change(skt);
670 if (events & SS_BATDEAD) 667 if (events & SS_BATDEAD)
@@ -673,7 +670,7 @@ static int pccardd(void *__skt)
673 send_event(skt, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW); 670 send_event(skt, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW);
674 if (events & SS_READY) 671 if (events & SS_READY)
675 send_event(skt, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW); 672 send_event(skt, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW);
676 up(&skt->skt_sem); 673 mutex_unlock(&skt->skt_mutex);
677 continue; 674 continue;
678 } 675 }
679 676
@@ -717,8 +714,8 @@ int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
717{ 714{
718 int ret = 0; 715 int ret = 0;
719 716
720 /* s->skt_sem also protects s->callback */ 717 /* s->skt_mutex also protects s->callback */
721 down(&s->skt_sem); 718 mutex_lock(&s->skt_mutex);
722 719
723 if (c) { 720 if (c) {
724 /* registration */ 721 /* registration */
@@ -734,7 +731,7 @@ int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
734 } else 731 } else
735 s->callback = NULL; 732 s->callback = NULL;
736 err: 733 err:
737 up(&s->skt_sem); 734 mutex_unlock(&s->skt_mutex);
738 735
739 return ret; 736 return ret;
740} 737}
@@ -752,7 +749,7 @@ int pccard_reset_card(struct pcmcia_socket *skt)
752 749
753 cs_dbg(skt, 1, "resetting socket\n"); 750 cs_dbg(skt, 1, "resetting socket\n");
754 751
755 down(&skt->skt_sem); 752 mutex_lock(&skt->skt_mutex);
756 do { 753 do {
757 if (!(skt->state & SOCKET_PRESENT)) { 754 if (!(skt->state & SOCKET_PRESENT)) {
758 ret = CS_NO_CARD; 755 ret = CS_NO_CARD;
@@ -781,7 +778,7 @@ int pccard_reset_card(struct pcmcia_socket *skt)
781 778
782 ret = CS_SUCCESS; 779 ret = CS_SUCCESS;
783 } while (0); 780 } while (0);
784 up(&skt->skt_sem); 781 mutex_unlock(&skt->skt_mutex);
785 782
786 return ret; 783 return ret;
787} /* reset_card */ 784} /* reset_card */
@@ -797,7 +794,7 @@ int pcmcia_suspend_card(struct pcmcia_socket *skt)
797 794
798 cs_dbg(skt, 1, "suspending socket\n"); 795 cs_dbg(skt, 1, "suspending socket\n");
799 796
800 down(&skt->skt_sem); 797 mutex_lock(&skt->skt_mutex);
801 do { 798 do {
802 if (!(skt->state & SOCKET_PRESENT)) { 799 if (!(skt->state & SOCKET_PRESENT)) {
803 ret = CS_NO_CARD; 800 ret = CS_NO_CARD;
@@ -814,7 +811,7 @@ int pcmcia_suspend_card(struct pcmcia_socket *skt)
814 } 811 }
815 ret = socket_suspend(skt); 812 ret = socket_suspend(skt);
816 } while (0); 813 } while (0);
817 up(&skt->skt_sem); 814 mutex_unlock(&skt->skt_mutex);
818 815
819 return ret; 816 return ret;
820} /* suspend_card */ 817} /* suspend_card */
@@ -827,7 +824,7 @@ int pcmcia_resume_card(struct pcmcia_socket *skt)
827 824
828 cs_dbg(skt, 1, "waking up socket\n"); 825 cs_dbg(skt, 1, "waking up socket\n");
829 826
830 down(&skt->skt_sem); 827 mutex_lock(&skt->skt_mutex);
831 do { 828 do {
832 if (!(skt->state & SOCKET_PRESENT)) { 829 if (!(skt->state & SOCKET_PRESENT)) {
833 ret = CS_NO_CARD; 830 ret = CS_NO_CARD;
@@ -841,7 +838,7 @@ int pcmcia_resume_card(struct pcmcia_socket *skt)
841 if (!ret && skt->callback) 838 if (!ret && skt->callback)
842 skt->callback->resume(skt); 839 skt->callback->resume(skt);
843 } while (0); 840 } while (0);
844 up(&skt->skt_sem); 841 mutex_unlock(&skt->skt_mutex);
845 842
846 return ret; 843 return ret;
847} /* resume_card */ 844} /* resume_card */
@@ -855,7 +852,7 @@ int pcmcia_eject_card(struct pcmcia_socket *skt)
855 852
856 cs_dbg(skt, 1, "user eject request\n"); 853 cs_dbg(skt, 1, "user eject request\n");
857 854
858 down(&skt->skt_sem); 855 mutex_lock(&skt->skt_mutex);
859 do { 856 do {
860 if (!(skt->state & SOCKET_PRESENT)) { 857 if (!(skt->state & SOCKET_PRESENT)) {
861 ret = -ENODEV; 858 ret = -ENODEV;
@@ -871,7 +868,7 @@ int pcmcia_eject_card(struct pcmcia_socket *skt)
871 socket_remove(skt); 868 socket_remove(skt);
872 ret = 0; 869 ret = 0;
873 } while (0); 870 } while (0);
874 up(&skt->skt_sem); 871 mutex_unlock(&skt->skt_mutex);
875 872
876 return ret; 873 return ret;
877} /* eject_card */ 874} /* eject_card */
@@ -884,7 +881,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
884 881
885 cs_dbg(skt, 1, "user insert request\n"); 882 cs_dbg(skt, 1, "user insert request\n");
886 883
887 down(&skt->skt_sem); 884 mutex_lock(&skt->skt_mutex);
888 do { 885 do {
889 if (skt->state & SOCKET_PRESENT) { 886 if (skt->state & SOCKET_PRESENT) {
890 ret = -EBUSY; 887 ret = -EBUSY;
@@ -896,7 +893,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
896 } 893 }
897 ret = 0; 894 ret = 0;
898 } while (0); 895 } while (0);
899 up(&skt->skt_sem); 896 mutex_unlock(&skt->skt_mutex);
900 897
901 return ret; 898 return ret;
902} /* insert_card */ 899} /* insert_card */
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 7b37eba35bf1..d6164cd583fd 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -15,7 +15,7 @@
15#ifndef _LINUX_CS_INTERNAL_H 15#ifndef _LINUX_CS_INTERNAL_H
16#define _LINUX_CS_INTERNAL_H 16#define _LINUX_CS_INTERNAL_H
17 17
18#include <linux/config.h> 18#include <linux/kref.h>
19 19
20/* Flags in client state */ 20/* Flags in client state */
21#define CLIENT_CONFIG_LOCKED 0x0001 21#define CLIENT_CONFIG_LOCKED 0x0001
@@ -23,7 +23,7 @@
23#define CLIENT_IO_REQ 0x0004 23#define CLIENT_IO_REQ 0x0004
24#define CLIENT_UNBOUND 0x0008 24#define CLIENT_UNBOUND 0x0008
25#define CLIENT_STALE 0x0010 25#define CLIENT_STALE 0x0010
26#define CLIENT_WIN_REQ(i) (0x20<<(i)) 26#define CLIENT_WIN_REQ(i) (0x1<<(i))
27#define CLIENT_CARDBUS 0x8000 27#define CLIENT_CARDBUS 0x8000
28 28
29#define REGION_MAGIC 0xE3C9 29#define REGION_MAGIC 0xE3C9
@@ -31,7 +31,7 @@ typedef struct region_t {
31 u_short region_magic; 31 u_short region_magic;
32 u_short state; 32 u_short state;
33 dev_info_t dev_info; 33 dev_info_t dev_info;
34 client_handle_t mtd; 34 struct pcmcia_device *mtd;
35 u_int MediaID; 35 u_int MediaID;
36 region_info_t info; 36 region_info_t info;
37} region_t; 37} region_t;
@@ -40,12 +40,12 @@ typedef struct region_t {
40 40
41/* Each card function gets one of these guys */ 41/* Each card function gets one of these guys */
42typedef struct config_t { 42typedef struct config_t {
43 struct kref ref;
43 u_int state; 44 u_int state;
44 u_int Attributes; 45 u_int Attributes;
45 u_int IntType; 46 u_int IntType;
46 u_int ConfigBase; 47 u_int ConfigBase;
47 u_char Status, Pin, Copy, Option, ExtStatus; 48 u_char Status, Pin, Copy, Option, ExtStatus;
48 u_int Present;
49 u_int CardValues; 49 u_int CardValues;
50 io_req_t io; 50 io_req_t io;
51 struct { 51 struct {
@@ -95,12 +95,6 @@ static inline void cs_socket_put(struct pcmcia_socket *skt)
95 } 95 }
96} 96}
97 97
98#define CHECK_SOCKET(s) \
99 (((s) >= sockets) || (socket_table[s]->ops == NULL))
100
101#define SOCKET(h) (h->socket)
102#define CONFIG(h) (&SOCKET(h)->config[(h)->func])
103
104/* In cardbus.c */ 98/* In cardbus.c */
105int cb_alloc(struct pcmcia_socket *s); 99int cb_alloc(struct pcmcia_socket *s);
106void cb_free(struct pcmcia_socket *s); 100void cb_free(struct pcmcia_socket *s);
@@ -133,10 +127,9 @@ extern struct class_interface pccard_sysfs_interface;
133extern struct rw_semaphore pcmcia_socket_list_rwsem; 127extern struct rw_semaphore pcmcia_socket_list_rwsem;
134extern struct list_head pcmcia_socket_list; 128extern struct list_head pcmcia_socket_list;
135int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, int idx, win_req_t *req); 129int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle, int idx, win_req_t *req);
136int pccard_get_configuration_info(struct pcmcia_socket *s, unsigned int function, config_info_t *config); 130int pccard_get_configuration_info(struct pcmcia_socket *s, struct pcmcia_device *p_dev, config_info_t *config);
137int pccard_reset_card(struct pcmcia_socket *skt); 131int pccard_reset_card(struct pcmcia_socket *skt);
138int pccard_get_status(struct pcmcia_socket *s, unsigned int function, cs_status_t *status); 132int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev, cs_status_t *status);
139int pccard_access_configuration_register(struct pcmcia_socket *s, unsigned int function, conf_reg_t *reg);
140 133
141 134
142struct pcmcia_callback{ 135struct pcmcia_callback{
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index bb96ce1db08c..ae10d1eed65e 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -10,10 +10,9 @@
10 * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. 10 * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
11 * 11 *
12 * (C) 1999 David A. Hinds 12 * (C) 1999 David A. Hinds
13 * (C) 2003 - 2005 Dominik Brodowski 13 * (C) 2003 - 2006 Dominik Brodowski
14 */ 14 */
15 15
16#include <linux/config.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/init.h> 18#include <linux/init.h>
@@ -23,6 +22,7 @@
23#include <linux/workqueue.h> 22#include <linux/workqueue.h>
24#include <linux/crc32.h> 23#include <linux/crc32.h>
25#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/kref.h>
26 26
27#define IN_CARD_SERVICES 27#define IN_CARD_SERVICES
28#include <pcmcia/cs_types.h> 28#include <pcmcia/cs_types.h>
@@ -343,12 +343,19 @@ void pcmcia_put_dev(struct pcmcia_device *p_dev)
343 put_device(&p_dev->dev); 343 put_device(&p_dev->dev);
344} 344}
345 345
346static void pcmcia_release_function(struct kref *ref)
347{
348 struct config_t *c = container_of(ref, struct config_t, ref);
349 kfree(c);
350}
351
346static void pcmcia_release_dev(struct device *dev) 352static void pcmcia_release_dev(struct device *dev)
347{ 353{
348 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 354 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
349 ds_dbg(1, "releasing dev %p\n", p_dev); 355 ds_dbg(1, "releasing dev %p\n", p_dev);
350 pcmcia_put_socket(p_dev->socket); 356 pcmcia_put_socket(p_dev->socket);
351 kfree(p_dev->devname); 357 kfree(p_dev->devname);
358 kref_put(&p_dev->function_config->ref, pcmcia_release_function);
352 kfree(p_dev); 359 kfree(p_dev);
353} 360}
354 361
@@ -377,29 +384,12 @@ static int pcmcia_device_probe(struct device * dev)
377 p_drv = to_pcmcia_drv(dev->driver); 384 p_drv = to_pcmcia_drv(dev->driver);
378 s = p_dev->socket; 385 s = p_dev->socket;
379 386
380 if ((!p_drv->probe) || (!try_module_get(p_drv->owner))) { 387 if ((!p_drv->probe) || (!p_dev->function_config) ||
388 (!try_module_get(p_drv->owner))) {
381 ret = -EINVAL; 389 ret = -EINVAL;
382 goto put_dev; 390 goto put_dev;
383 } 391 }
384 392
385 p_dev->state &= ~CLIENT_UNBOUND;
386
387 /* set up the device configuration, if it hasn't been done before */
388 if (!s->functions) {
389 cistpl_longlink_mfc_t mfc;
390 if (pccard_read_tuple(s, p_dev->func, CISTPL_LONGLINK_MFC,
391 &mfc) == CS_SUCCESS)
392 s->functions = mfc.nfn;
393 else
394 s->functions = 1;
395 s->config = kzalloc(sizeof(config_t) * s->functions,
396 GFP_KERNEL);
397 if (!s->config) {
398 ret = -ENOMEM;
399 goto put_module;
400 }
401 }
402
403 ret = p_drv->probe(p_dev); 393 ret = p_drv->probe(p_dev);
404 if (ret) 394 if (ret)
405 goto put_module; 395 goto put_module;
@@ -425,15 +415,61 @@ static int pcmcia_device_probe(struct device * dev)
425} 415}
426 416
427 417
418/*
419 * Removes a PCMCIA card from the device tree and socket list.
420 */
421static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *leftover)
422{
423 struct pcmcia_device *p_dev;
424 struct pcmcia_device *tmp;
425 unsigned long flags;
426
427 ds_dbg(2, "unbind_request(%d)\n", s->sock);
428
429
430 if (!leftover)
431 s->device_count = 0;
432 else
433 s->device_count = 1;
434
435 /* unregister all pcmcia_devices registered with this socket, except leftover */
436 list_for_each_entry_safe(p_dev, tmp, &s->devices_list, socket_device_list) {
437 if (p_dev == leftover)
438 continue;
439
440 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
441 list_del(&p_dev->socket_device_list);
442 p_dev->_removed=1;
443 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
444
445 device_unregister(&p_dev->dev);
446 }
447
448 return;
449}
450
451
428static int pcmcia_device_remove(struct device * dev) 452static int pcmcia_device_remove(struct device * dev)
429{ 453{
430 struct pcmcia_device *p_dev; 454 struct pcmcia_device *p_dev;
431 struct pcmcia_driver *p_drv; 455 struct pcmcia_driver *p_drv;
456 struct pcmcia_device_id *did;
432 int i; 457 int i;
433 458
434 /* detach the "instance" */
435 p_dev = to_pcmcia_dev(dev); 459 p_dev = to_pcmcia_dev(dev);
436 p_drv = to_pcmcia_drv(dev->driver); 460 p_drv = to_pcmcia_drv(dev->driver);
461
462 /* If we're removing the primary module driving a
463 * pseudo multi-function card, we need to unbind
464 * all devices
465 */
466 did = (struct pcmcia_device_id *) p_dev->dev.driver_data;
467 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
468 (p_dev->socket->device_count != 0) &&
469 (p_dev->device_no == 0))
470 pcmcia_card_remove(p_dev->socket, p_dev);
471
472 /* detach the "instance" */
437 if (!p_drv) 473 if (!p_drv)
438 return 0; 474 return 0;
439 475
@@ -441,17 +477,16 @@ static int pcmcia_device_remove(struct device * dev)
441 p_drv->remove(p_dev); 477 p_drv->remove(p_dev);
442 478
443 /* check for proper unloading */ 479 /* check for proper unloading */
444 if (p_dev->state & (CLIENT_IRQ_REQ|CLIENT_IO_REQ|CLIENT_CONFIG_LOCKED)) 480 if (p_dev->_irq || p_dev->_io || p_dev->_locked)
445 printk(KERN_INFO "pcmcia: driver %s did not release config properly\n", 481 printk(KERN_INFO "pcmcia: driver %s did not release config properly\n",
446 p_drv->drv.name); 482 p_drv->drv.name);
447 483
448 for (i = 0; i < MAX_WIN; i++) 484 for (i = 0; i < MAX_WIN; i++)
449 if (p_dev->state & CLIENT_WIN_REQ(i)) 485 if (p_dev->_win & CLIENT_WIN_REQ(i))
450 printk(KERN_INFO "pcmcia: driver %s did not release windows properly\n", 486 printk(KERN_INFO "pcmcia: driver %s did not release windows properly\n",
451 p_drv->drv.name); 487 p_drv->drv.name);
452 488
453 /* references from pcmcia_probe_device */ 489 /* references from pcmcia_probe_device */
454 p_dev->state = CLIENT_UNBOUND;
455 pcmcia_put_dev(p_dev); 490 pcmcia_put_dev(p_dev);
456 module_put(p_drv->owner); 491 module_put(p_drv->owner);
457 492
@@ -460,37 +495,6 @@ static int pcmcia_device_remove(struct device * dev)
460 495
461 496
462/* 497/*
463 * Removes a PCMCIA card from the device tree and socket list.
464 */
465static void pcmcia_card_remove(struct pcmcia_socket *s)
466{
467 struct pcmcia_device *p_dev;
468 unsigned long flags;
469
470 ds_dbg(2, "unbind_request(%d)\n", s->sock);
471
472 s->device_count = 0;
473
474 for (;;) {
475 /* unregister all pcmcia_devices registered with this socket*/
476 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
477 if (list_empty(&s->devices_list)) {
478 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
479 return;
480 }
481 p_dev = list_entry((&s->devices_list)->next, struct pcmcia_device, socket_device_list);
482 list_del(&p_dev->socket_device_list);
483 p_dev->state |= CLIENT_STALE;
484 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
485
486 device_unregister(&p_dev->dev);
487 }
488
489 return;
490} /* unbind_request */
491
492
493/*
494 * pcmcia_device_query -- determine information about a pcmcia device 498 * pcmcia_device_query -- determine information about a pcmcia device
495 */ 499 */
496static int pcmcia_device_query(struct pcmcia_device *p_dev) 500static int pcmcia_device_query(struct pcmcia_device *p_dev)
@@ -546,7 +550,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
546 tmp = vers1->str + vers1->ofs[i]; 550 tmp = vers1->str + vers1->ofs[i];
547 551
548 length = strlen(tmp) + 1; 552 length = strlen(tmp) + 1;
549 if ((length < 3) || (length > 255)) 553 if ((length < 2) || (length > 255))
550 continue; 554 continue;
551 555
552 p_dev->prod_id[i] = kmalloc(sizeof(char) * length, 556 p_dev->prod_id[i] = kmalloc(sizeof(char) * length,
@@ -571,11 +575,11 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
571 * won't work, this doesn't matter much at the moment: the driver core doesn't 575 * won't work, this doesn't matter much at the moment: the driver core doesn't
572 * support it either. 576 * support it either.
573 */ 577 */
574static DECLARE_MUTEX(device_add_lock); 578static DEFINE_MUTEX(device_add_lock);
575 579
576struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function) 580struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
577{ 581{
578 struct pcmcia_device *p_dev; 582 struct pcmcia_device *p_dev, *tmp_dev;
579 unsigned long flags; 583 unsigned long flags;
580 int bus_id_len; 584 int bus_id_len;
581 585
@@ -583,7 +587,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
583 if (!s) 587 if (!s)
584 return NULL; 588 return NULL;
585 589
586 down(&device_add_lock); 590 mutex_lock(&device_add_lock);
587 591
588 /* max of 2 devices per card */ 592 /* max of 2 devices per card */
589 if (s->device_count == 2) 593 if (s->device_count == 2)
@@ -596,6 +600,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
596 p_dev->socket = s; 600 p_dev->socket = s;
597 p_dev->device_no = (s->device_count++); 601 p_dev->device_no = (s->device_count++);
598 p_dev->func = function; 602 p_dev->func = function;
603 if (s->functions <= function)
604 s->functions = function + 1;
599 605
600 p_dev->dev.bus = &pcmcia_bus_type; 606 p_dev->dev.bus = &pcmcia_bus_type;
601 p_dev->dev.parent = s->dev.dev; 607 p_dev->dev.parent = s->dev.dev;
@@ -608,36 +614,55 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
608 sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id); 614 sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id);
609 615
610 /* compat */ 616 /* compat */
611 p_dev->state = CLIENT_UNBOUND; 617 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
618
619 /*
620 * p_dev->function_config must be the same for all card functions.
621 * Note that this is serialized by the device_add_lock, so that
622 * only one such struct will be created.
623 */
624 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
625 if (p_dev->func == tmp_dev->func) {
626 p_dev->function_config = tmp_dev->function_config;
627 kref_get(&p_dev->function_config->ref);
628 }
612 629
613 /* Add to the list in pcmcia_bus_socket */ 630 /* Add to the list in pcmcia_bus_socket */
614 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
615 list_add_tail(&p_dev->socket_device_list, &s->devices_list); 631 list_add_tail(&p_dev->socket_device_list, &s->devices_list);
632
616 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 633 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
617 634
635 if (!p_dev->function_config) {
636 p_dev->function_config = kzalloc(sizeof(struct config_t),
637 GFP_KERNEL);
638 if (!p_dev->function_config)
639 goto err_unreg;
640 kref_init(&p_dev->function_config->ref);
641 }
642
618 printk(KERN_NOTICE "pcmcia: registering new device %s\n", 643 printk(KERN_NOTICE "pcmcia: registering new device %s\n",
619 p_dev->devname); 644 p_dev->devname);
620 645
621 pcmcia_device_query(p_dev); 646 pcmcia_device_query(p_dev);
622 647
623 if (device_register(&p_dev->dev)) { 648 if (device_register(&p_dev->dev))
624 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 649 goto err_unreg;
625 list_del(&p_dev->socket_device_list);
626 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
627
628 goto err_free;
629 }
630 650
631 up(&device_add_lock); 651 mutex_unlock(&device_add_lock);
632 652
633 return p_dev; 653 return p_dev;
634 654
655 err_unreg:
656 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
657 list_del(&p_dev->socket_device_list);
658 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
659
635 err_free: 660 err_free:
636 kfree(p_dev->devname); 661 kfree(p_dev->devname);
637 kfree(p_dev); 662 kfree(p_dev);
638 s->device_count--; 663 s->device_count--;
639 err_put: 664 err_put:
640 up(&device_add_lock); 665 mutex_unlock(&device_add_lock);
641 pcmcia_put_socket(s); 666 pcmcia_put_socket(s);
642 667
643 return NULL; 668 return NULL;
@@ -696,7 +721,7 @@ static void pcmcia_bus_rescan(struct pcmcia_socket *skt)
696 int no_devices=0; 721 int no_devices=0;
697 unsigned long flags; 722 unsigned long flags;
698 723
699 /* must be called with skt_sem held */ 724 /* must be called with skt_mutex held */
700 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 725 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
701 if (list_empty(&skt->devices_list)) 726 if (list_empty(&skt->devices_list))
702 no_devices=1; 727 no_devices=1;
@@ -819,9 +844,11 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
819 struct pcmcia_driver * p_drv = to_pcmcia_drv(drv); 844 struct pcmcia_driver * p_drv = to_pcmcia_drv(drv);
820 struct pcmcia_device_id *did = p_drv->id_table; 845 struct pcmcia_device_id *did = p_drv->id_table;
821 846
847#ifdef CONFIG_PCMCIA_IOCTL
822 /* matching by cardmgr */ 848 /* matching by cardmgr */
823 if (p_dev->cardmgr == p_drv) 849 if (p_dev->cardmgr == p_drv)
824 return 1; 850 return 1;
851#endif
825 852
826 while (did && did->match_flags) { 853 while (did && did->match_flags) {
827 if (pcmcia_devmatch(p_dev, did)) 854 if (pcmcia_devmatch(p_dev, did))
@@ -927,7 +954,7 @@ static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute
927{ 954{
928 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 955 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
929 956
930 if (p_dev->dev.power.power_state.event != PM_EVENT_ON) 957 if (p_dev->suspended)
931 return sprintf(buf, "off\n"); 958 return sprintf(buf, "off\n");
932 else 959 else
933 return sprintf(buf, "on\n"); 960 return sprintf(buf, "on\n");
@@ -942,11 +969,9 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
942 if (!count) 969 if (!count)
943 return -EINVAL; 970 return -EINVAL;
944 971
945 if ((p_dev->dev.power.power_state.event == PM_EVENT_ON) && 972 if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
946 (!strncmp(buf, "off", 3)))
947 ret = dpm_runtime_suspend(dev, PMSG_SUSPEND); 973 ret = dpm_runtime_suspend(dev, PMSG_SUSPEND);
948 else if ((p_dev->dev.power.power_state.event != PM_EVENT_ON) && 974 else if (p_dev->suspended && !strncmp(buf, "on", 2))
949 (!strncmp(buf, "on", 2)))
950 dpm_runtime_resume(dev); 975 dpm_runtime_resume(dev);
951 976
952 return ret ? ret : count; 977 return ret ? ret : count;
@@ -982,9 +1007,9 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
982 if (!count) 1007 if (!count)
983 return -EINVAL; 1008 return -EINVAL;
984 1009
985 down(&p_dev->socket->skt_sem); 1010 mutex_lock(&p_dev->socket->skt_mutex);
986 p_dev->allow_func_id_match = 1; 1011 p_dev->allow_func_id_match = 1;
987 up(&p_dev->socket->skt_sem); 1012 mutex_unlock(&p_dev->socket->skt_mutex);
988 1013
989 bus_rescan_devices(&pcmcia_bus_type); 1014 bus_rescan_devices(&pcmcia_bus_type);
990 1015
@@ -1012,14 +1037,27 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
1012{ 1037{
1013 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 1038 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
1014 struct pcmcia_driver *p_drv = NULL; 1039 struct pcmcia_driver *p_drv = NULL;
1040 int ret = 0;
1015 1041
1016 if (dev->driver) 1042 if (dev->driver)
1017 p_drv = to_pcmcia_drv(dev->driver); 1043 p_drv = to_pcmcia_drv(dev->driver);
1018 1044
1019 if (p_drv && p_drv->suspend) 1045 if (!p_drv)
1020 return p_drv->suspend(p_dev); 1046 goto out;
1021 1047
1022 return 0; 1048 if (p_drv->suspend) {
1049 ret = p_drv->suspend(p_dev);
1050 if (ret)
1051 goto out;
1052 }
1053
1054 if (p_dev->device_no == p_dev->func)
1055 pcmcia_release_configuration(p_dev);
1056
1057 out:
1058 if (!ret)
1059 p_dev->suspended = 1;
1060 return ret;
1023} 1061}
1024 1062
1025 1063
@@ -1027,14 +1065,27 @@ static int pcmcia_dev_resume(struct device * dev)
1027{ 1065{
1028 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 1066 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
1029 struct pcmcia_driver *p_drv = NULL; 1067 struct pcmcia_driver *p_drv = NULL;
1068 int ret = 0;
1030 1069
1031 if (dev->driver) 1070 if (dev->driver)
1032 p_drv = to_pcmcia_drv(dev->driver); 1071 p_drv = to_pcmcia_drv(dev->driver);
1033 1072
1034 if (p_drv && p_drv->resume) 1073 if (!p_drv)
1035 return p_drv->resume(p_dev); 1074 goto out;
1036 1075
1037 return 0; 1076 if (p_dev->device_no == p_dev->func) {
1077 ret = pcmcia_request_configuration(p_dev, &p_dev->conf);
1078 if (ret)
1079 goto out;
1080 }
1081
1082 if (p_drv->resume)
1083 ret = p_drv->resume(p_dev);
1084
1085 out:
1086 if (!ret)
1087 p_dev->suspended = 0;
1088 return ret;
1038} 1089}
1039 1090
1040 1091
@@ -1100,7 +1151,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1100 switch (event) { 1151 switch (event) {
1101 case CS_EVENT_CARD_REMOVAL: 1152 case CS_EVENT_CARD_REMOVAL:
1102 s->pcmcia_state.present = 0; 1153 s->pcmcia_state.present = 0;
1103 pcmcia_card_remove(skt); 1154 pcmcia_card_remove(skt, NULL);
1104 handle_event(skt, event); 1155 handle_event(skt, event);
1105 break; 1156 break;
1106 1157
@@ -1128,6 +1179,32 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1128} /* ds_event */ 1179} /* ds_event */
1129 1180
1130 1181
1182struct pcmcia_device * pcmcia_dev_present(struct pcmcia_device *_p_dev)
1183{
1184 struct pcmcia_device *p_dev;
1185 struct pcmcia_device *ret = NULL;
1186
1187 p_dev = pcmcia_get_dev(_p_dev);
1188 if (!p_dev)
1189 return NULL;
1190
1191 if (!p_dev->socket->pcmcia_state.present)
1192 goto out;
1193
1194 if (p_dev->_removed)
1195 goto out;
1196
1197 if (p_dev->suspended)
1198 goto out;
1199
1200 ret = p_dev;
1201 out:
1202 pcmcia_put_dev(p_dev);
1203 return ret;
1204}
1205EXPORT_SYMBOL(pcmcia_dev_present);
1206
1207
1131static struct pcmcia_callback pcmcia_bus_callback = { 1208static struct pcmcia_callback pcmcia_bus_callback = {
1132 .owner = THIS_MODULE, 1209 .owner = THIS_MODULE,
1133 .event = ds_event, 1210 .event = ds_event,
diff --git a/drivers/pcmcia/ds_internal.h b/drivers/pcmcia/ds_internal.h
index d359bd25a51c..3a2b25e6ed73 100644
--- a/drivers/pcmcia/ds_internal.h
+++ b/drivers/pcmcia/ds_internal.h
@@ -8,6 +8,8 @@ extern void pcmcia_put_dev(struct pcmcia_device *p_dev);
8 8
9struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function); 9struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function);
10 10
11extern int pcmcia_release_configuration(struct pcmcia_device *p_dev);
12
11#ifdef CONFIG_PCMCIA_IOCTL 13#ifdef CONFIG_PCMCIA_IOCTL
12extern void __init pcmcia_setup_ioctl(void); 14extern void __init pcmcia_setup_ioctl(void);
13extern void __exit pcmcia_cleanup_ioctl(void); 15extern void __exit pcmcia_cleanup_ioctl(void);
@@ -15,7 +17,7 @@ extern void handle_event(struct pcmcia_socket *s, event_t event);
15extern int handle_request(struct pcmcia_socket *s, event_t event); 17extern int handle_request(struct pcmcia_socket *s, event_t event);
16#else 18#else
17static inline void __init pcmcia_setup_ioctl(void) { return; } 19static inline void __init pcmcia_setup_ioctl(void) { return; }
18static inline void __init pcmcia_cleanup_ioctl(void) { return; } 20static inline void __exit pcmcia_cleanup_ioctl(void) { return; }
19static inline void handle_event(struct pcmcia_socket *s, event_t event) { return; } 21static inline void handle_event(struct pcmcia_socket *s, event_t event) { return; }
20static inline int handle_request(struct pcmcia_socket *s, event_t event) { return CS_SUCCESS; } 22static inline int handle_request(struct pcmcia_socket *s, event_t event) { return CS_SUCCESS; }
21#endif 23#endif
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 7979c85df3dc..d5f03a338c6c 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/config.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/pci.h> 14#include <linux/pci.h>
16#include <linux/init.h> 15#include <linux/init.h>
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 35a92d1e4945..bd0308e89815 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -34,7 +34,6 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/config.h>
38#include <linux/types.h> 37#include <linux/types.h>
39#include <linux/fcntl.h> 38#include <linux/fcntl.h>
40#include <linux/string.h> 39#include <linux/string.h>
diff --git a/drivers/pcmcia/pcmcia_compat.c b/drivers/pcmcia/pcmcia_compat.c
deleted file mode 100644
index ebb161c4f819..000000000000
--- a/drivers/pcmcia/pcmcia_compat.c
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * PCMCIA 16-bit compatibility functions
3 *
4 * The initial developer of the original code is David A. Hinds
5 * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
6 * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
7 *
8 * Copyright (C) 2004 Dominik Brodowski
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/init.h>
19
20#define IN_CARD_SERVICES
21#include <pcmcia/cs_types.h>
22#include <pcmcia/cs.h>
23#include <pcmcia/bulkmem.h>
24#include <pcmcia/cistpl.h>
25#include <pcmcia/ds.h>
26#include <pcmcia/ss.h>
27
28#include "cs_internal.h"
29
30int pcmcia_get_first_tuple(struct pcmcia_device *p_dev, tuple_t *tuple)
31{
32 return pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple);
33}
34EXPORT_SYMBOL(pcmcia_get_first_tuple);
35
36int pcmcia_get_next_tuple(struct pcmcia_device *p_dev, tuple_t *tuple)
37{
38 return pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple);
39}
40EXPORT_SYMBOL(pcmcia_get_next_tuple);
41
42int pcmcia_get_tuple_data(struct pcmcia_device *p_dev, tuple_t *tuple)
43{
44 return pccard_get_tuple_data(p_dev->socket, tuple);
45}
46EXPORT_SYMBOL(pcmcia_get_tuple_data);
47
48int pcmcia_parse_tuple(struct pcmcia_device *p_dev, tuple_t *tuple, cisparse_t *parse)
49{
50 return pccard_parse_tuple(tuple, parse);
51}
52EXPORT_SYMBOL(pcmcia_parse_tuple);
53
54int pcmcia_validate_cis(struct pcmcia_device *p_dev, cisinfo_t *info)
55{
56 return pccard_validate_cis(p_dev->socket, p_dev->func, info);
57}
58EXPORT_SYMBOL(pcmcia_validate_cis);
59
60
61int pcmcia_reset_card(struct pcmcia_device *p_dev, client_req_t *req)
62{
63 return pccard_reset_card(p_dev->socket);
64}
65EXPORT_SYMBOL(pcmcia_reset_card);
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 80969f7e7a0b..c53db7ceda5e 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20 20
21#include <linux/config.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include <linux/init.h> 23#include <linux/init.h>
@@ -70,10 +69,26 @@ extern int ds_pc_debug;
70#define ds_dbg(lvl, fmt, arg...) do { } while (0) 69#define ds_dbg(lvl, fmt, arg...) do { } while (0)
71#endif 70#endif
72 71
72static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s,
73 unsigned int function)
74{
75 struct pcmcia_device *p_dev = NULL;
76 unsigned long flags;
77
78 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
79 list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
80 if (p_dev->func == function) {
81 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
82 return pcmcia_get_dev(p_dev);
83 }
84 }
85 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
86 return NULL;
87}
73 88
74/* backwards-compatible accessing of driver --- by name! */ 89/* backwards-compatible accessing of driver --- by name! */
75 90
76static struct pcmcia_driver * get_pcmcia_driver (dev_info_t *dev_info) 91static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info)
77{ 92{
78 struct device_driver *drv; 93 struct device_driver *drv;
79 struct pcmcia_driver *p_drv; 94 struct pcmcia_driver *p_drv;
@@ -214,7 +229,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
214 * by userspace before, we need to 229 * by userspace before, we need to
215 * return the "instance". */ 230 * return the "instance". */
216 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 231 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
217 bind_info->instance = p_dev->instance; 232 bind_info->instance = p_dev;
218 ret = -EBUSY; 233 ret = -EBUSY;
219 goto err_put_module; 234 goto err_put_module;
220 } else { 235 } else {
@@ -253,9 +268,9 @@ rescan:
253 /* 268 /*
254 * Prevent this racing with a card insertion. 269 * Prevent this racing with a card insertion.
255 */ 270 */
256 down(&s->skt_sem); 271 mutex_lock(&s->skt_mutex);
257 bus_rescan_devices(&pcmcia_bus_type); 272 bus_rescan_devices(&pcmcia_bus_type);
258 up(&s->skt_sem); 273 mutex_unlock(&s->skt_mutex);
259 274
260 /* check whether the driver indeed matched. I don't care if this 275 /* check whether the driver indeed matched. I don't care if this
261 * is racy or not, because it can only happen on cardmgr access 276 * is racy or not, because it can only happen on cardmgr access
@@ -289,6 +304,7 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
289{ 304{
290 dev_node_t *node; 305 dev_node_t *node;
291 struct pcmcia_device *p_dev; 306 struct pcmcia_device *p_dev;
307 struct pcmcia_driver *p_drv;
292 unsigned long flags; 308 unsigned long flags;
293 int ret = 0; 309 int ret = 0;
294 310
@@ -343,16 +359,16 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
343 found: 359 found:
344 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 360 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
345 361
346 if ((!p_dev->instance) || 362 p_drv = to_pcmcia_drv(p_dev->dev.driver);
347 (p_dev->instance->state & DEV_CONFIG_PENDING)) { 363 if (p_drv && !p_dev->_locked) {
348 ret = -EAGAIN; 364 ret = -EAGAIN;
349 goto err_put; 365 goto err_put;
350 } 366 }
351 367
352 if (first) 368 if (first)
353 node = p_dev->instance->dev; 369 node = p_dev->dev_node;
354 else 370 else
355 for (node = p_dev->instance->dev; node; node = node->next) 371 for (node = p_dev->dev_node; node; node = node->next)
356 if (node == bind_info->next) 372 if (node == bind_info->next)
357 break; 373 break;
358 if (!node) { 374 if (!node) {
@@ -583,14 +599,16 @@ static int ds_ioctl(struct inode * inode, struct file * file,
583 if (buf->config.Function && 599 if (buf->config.Function &&
584 (buf->config.Function >= s->functions)) 600 (buf->config.Function >= s->functions))
585 ret = CS_BAD_ARGS; 601 ret = CS_BAD_ARGS;
586 else 602 else {
587 ret = pccard_get_configuration_info(s, 603 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
588 buf->config.Function, &buf->config); 604 ret = pccard_get_configuration_info(s, p_dev, &buf->config);
605 pcmcia_put_dev(p_dev);
606 }
589 break; 607 break;
590 case DS_GET_FIRST_TUPLE: 608 case DS_GET_FIRST_TUPLE:
591 down(&s->skt_sem); 609 mutex_lock(&s->skt_mutex);
592 pcmcia_validate_mem(s); 610 pcmcia_validate_mem(s);
593 up(&s->skt_sem); 611 mutex_unlock(&s->skt_mutex);
594 ret = pccard_get_first_tuple(s, BIND_FN_ALL, &buf->tuple); 612 ret = pccard_get_first_tuple(s, BIND_FN_ALL, &buf->tuple);
595 break; 613 break;
596 case DS_GET_NEXT_TUPLE: 614 case DS_GET_NEXT_TUPLE:
@@ -609,16 +627,19 @@ static int ds_ioctl(struct inode * inode, struct file * file,
609 ret = pccard_reset_card(s); 627 ret = pccard_reset_card(s);
610 break; 628 break;
611 case DS_GET_STATUS: 629 case DS_GET_STATUS:
612 if (buf->status.Function && 630 if (buf->status.Function &&
613 (buf->status.Function >= s->functions)) 631 (buf->status.Function >= s->functions))
614 ret = CS_BAD_ARGS; 632 ret = CS_BAD_ARGS;
615 else 633 else {
616 ret = pccard_get_status(s, buf->status.Function, &buf->status); 634 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
617 break; 635 ret = pccard_get_status(s, p_dev, &buf->status);
636 pcmcia_put_dev(p_dev);
637 }
638 break;
618 case DS_VALIDATE_CIS: 639 case DS_VALIDATE_CIS:
619 down(&s->skt_sem); 640 mutex_lock(&s->skt_mutex);
620 pcmcia_validate_mem(s); 641 pcmcia_validate_mem(s);
621 up(&s->skt_sem); 642 mutex_unlock(&s->skt_mutex);
622 ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo); 643 ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo);
623 break; 644 break;
624 case DS_SUSPEND_CARD: 645 case DS_SUSPEND_CARD:
@@ -638,12 +659,16 @@ static int ds_ioctl(struct inode * inode, struct file * file,
638 err = -EPERM; 659 err = -EPERM;
639 goto free_out; 660 goto free_out;
640 } 661 }
641 if (buf->conf_reg.Function && 662
642 (buf->conf_reg.Function >= s->functions)) 663 ret = CS_BAD_ARGS;
643 ret = CS_BAD_ARGS; 664
644 else 665 if (!(buf->conf_reg.Function &&
645 ret = pccard_access_configuration_register(s, 666 (buf->conf_reg.Function >= s->functions))) {
646 buf->conf_reg.Function, &buf->conf_reg); 667 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function);
668 if (p_dev)
669 ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg);
670 pcmcia_put_dev(p_dev);
671 }
647 break; 672 break;
648 case DS_GET_FIRST_REGION: 673 case DS_GET_FIRST_REGION:
649 case DS_GET_NEXT_REGION: 674 case DS_GET_NEXT_REGION:
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 89022ad5b520..45063b4e5b78 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -14,7 +14,6 @@
14 * 14 *
15 */ 15 */
16 16
17#include <linux/config.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/interrupt.h> 19#include <linux/interrupt.h>
@@ -89,7 +88,7 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
89 } 88 }
90 if ((s->features & SS_CAP_STATIC_MAP) && s->io_offset) { 89 if ((s->features & SS_CAP_STATIC_MAP) && s->io_offset) {
91 *base = s->io_offset | (*base & 0x0fff); 90 *base = s->io_offset | (*base & 0x0fff);
92 s->io[0].Attributes = attr; 91 s->io[0].res->flags = (s->io[0].res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS);
93 return 0; 92 return 0;
94 } 93 }
95 /* Check for an already-allocated window that must conflict with 94 /* Check for an already-allocated window that must conflict with
@@ -97,38 +96,36 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
97 * potential conflicts, just the most obvious ones. 96 * potential conflicts, just the most obvious ones.
98 */ 97 */
99 for (i = 0; i < MAX_IO_WIN; i++) 98 for (i = 0; i < MAX_IO_WIN; i++)
100 if ((s->io[i].NumPorts != 0) && 99 if ((s->io[i].res) &&
101 ((s->io[i].BasePort & (align-1)) == *base)) 100 ((s->io[i].res->start & (align-1)) == *base))
102 return 1; 101 return 1;
103 for (i = 0; i < MAX_IO_WIN; i++) { 102 for (i = 0; i < MAX_IO_WIN; i++) {
104 if (s->io[i].NumPorts == 0) { 103 if (!s->io[i].res) {
105 s->io[i].res = pcmcia_find_io_region(*base, num, align, s); 104 s->io[i].res = pcmcia_find_io_region(*base, num, align, s);
106 if (s->io[i].res) { 105 if (s->io[i].res) {
107 s->io[i].Attributes = attr; 106 *base = s->io[i].res->start;
108 s->io[i].BasePort = *base = s->io[i].res->start; 107 s->io[i].res->flags = (s->io[i].res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS);
109 s->io[i].NumPorts = s->io[i].InUse = num; 108 s->io[i].InUse = num;
110 break; 109 break;
111 } else 110 } else
112 return 1; 111 return 1;
113 } else if (s->io[i].Attributes != attr) 112 } else if ((s->io[i].res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS))
114 continue; 113 continue;
115 /* Try to extend top of window */ 114 /* Try to extend top of window */
116 try = s->io[i].BasePort + s->io[i].NumPorts; 115 try = s->io[i].res->end + 1;
117 if ((*base == 0) || (*base == try)) 116 if ((*base == 0) || (*base == try))
118 if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start, 117 if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start,
119 s->io[i].res->end + num, s) == 0) { 118 s->io[i].res->end + num, s) == 0) {
120 *base = try; 119 *base = try;
121 s->io[i].NumPorts += num;
122 s->io[i].InUse += num; 120 s->io[i].InUse += num;
123 break; 121 break;
124 } 122 }
125 /* Try to extend bottom of window */ 123 /* Try to extend bottom of window */
126 try = s->io[i].BasePort - num; 124 try = s->io[i].res->start - num;
127 if ((*base == 0) || (*base == try)) 125 if ((*base == 0) || (*base == try))
128 if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start - num, 126 if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start - num,
129 s->io[i].res->end, s) == 0) { 127 s->io[i].res->end, s) == 0) {
130 s->io[i].BasePort = *base = try; 128 *base = try;
131 s->io[i].NumPorts += num;
132 s->io[i].InUse += num; 129 s->io[i].InUse += num;
133 break; 130 break;
134 } 131 }
@@ -143,12 +140,13 @@ static void release_io_space(struct pcmcia_socket *s, ioaddr_t base,
143 int i; 140 int i;
144 141
145 for (i = 0; i < MAX_IO_WIN; i++) { 142 for (i = 0; i < MAX_IO_WIN; i++) {
146 if ((s->io[i].BasePort <= base) && 143 if (!s->io[i].res)
147 (s->io[i].BasePort+s->io[i].NumPorts >= base+num)) { 144 continue;
145 if ((s->io[i].res->start <= base) &&
146 (s->io[i].res->end >= base+num-1)) {
148 s->io[i].InUse -= num; 147 s->io[i].InUse -= num;
149 /* Free the window if no one else is using it */ 148 /* Free the window if no one else is using it */
150 if (s->io[i].InUse == 0) { 149 if (s->io[i].InUse == 0) {
151 s->io[i].NumPorts = 0;
152 release_resource(s->io[i].res); 150 release_resource(s->io[i].res);
153 kfree(s->io[i].res); 151 kfree(s->io[i].res);
154 s->io[i].res = NULL; 152 s->io[i].res = NULL;
@@ -165,21 +163,19 @@ static void release_io_space(struct pcmcia_socket *s, ioaddr_t base,
165 * this and the tuple reading services. 163 * this and the tuple reading services.
166 */ 164 */
167 165
168int pccard_access_configuration_register(struct pcmcia_socket *s, 166int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
169 unsigned int function,
170 conf_reg_t *reg) 167 conf_reg_t *reg)
171{ 168{
169 struct pcmcia_socket *s;
172 config_t *c; 170 config_t *c;
173 int addr; 171 int addr;
174 u_char val; 172 u_char val;
175 173
176 if (!s || !s->config) 174 if (!p_dev || !p_dev->function_config)
177 return CS_NO_CARD; 175 return CS_NO_CARD;
178 176
179 c = &s->config[function]; 177 s = p_dev->socket;
180 178 c = p_dev->function_config;
181 if (c == NULL)
182 return CS_NO_CARD;
183 179
184 if (!(c->state & CONFIG_LOCKED)) 180 if (!(c->state & CONFIG_LOCKED))
185 return CS_CONFIGURATION_LOCKED; 181 return CS_CONFIGURATION_LOCKED;
@@ -200,20 +196,12 @@ int pccard_access_configuration_register(struct pcmcia_socket *s,
200 break; 196 break;
201 } 197 }
202 return CS_SUCCESS; 198 return CS_SUCCESS;
203} /* pccard_access_configuration_register */ 199} /* pcmcia_access_configuration_register */
204
205int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
206 conf_reg_t *reg)
207{
208 return pccard_access_configuration_register(p_dev->socket,
209 p_dev->func, reg);
210}
211EXPORT_SYMBOL(pcmcia_access_configuration_register); 200EXPORT_SYMBOL(pcmcia_access_configuration_register);
212 201
213 202
214
215int pccard_get_configuration_info(struct pcmcia_socket *s, 203int pccard_get_configuration_info(struct pcmcia_socket *s,
216 unsigned int function, 204 struct pcmcia_device *p_dev,
217 config_info_t *config) 205 config_info_t *config)
218{ 206{
219 config_t *c; 207 config_t *c;
@@ -221,7 +209,7 @@ int pccard_get_configuration_info(struct pcmcia_socket *s,
221 if (!(s->state & SOCKET_PRESENT)) 209 if (!(s->state & SOCKET_PRESENT))
222 return CS_NO_CARD; 210 return CS_NO_CARD;
223 211
224 config->Function = function; 212 config->Function = p_dev->func;
225 213
226#ifdef CONFIG_CARDBUS 214#ifdef CONFIG_CARDBUS
227 if (s->state & SOCKET_CARDBUS) { 215 if (s->state & SOCKET_CARDBUS) {
@@ -235,14 +223,14 @@ int pccard_get_configuration_info(struct pcmcia_socket *s,
235 config->AssignedIRQ = s->irq.AssignedIRQ; 223 config->AssignedIRQ = s->irq.AssignedIRQ;
236 if (config->AssignedIRQ) 224 if (config->AssignedIRQ)
237 config->Attributes |= CONF_ENABLE_IRQ; 225 config->Attributes |= CONF_ENABLE_IRQ;
238 config->BasePort1 = s->io[0].BasePort; 226 config->BasePort1 = s->io[0].res->start;
239 config->NumPorts1 = s->io[0].NumPorts; 227 config->NumPorts1 = s->io[0].res->end - config->BasePort1 + 1;
240 } 228 }
241 return CS_SUCCESS; 229 return CS_SUCCESS;
242 } 230 }
243#endif 231#endif
244 232
245 c = (s->config != NULL) ? &s->config[function] : NULL; 233 c = (p_dev) ? p_dev->function_config : NULL;
246 234
247 if ((c == NULL) || !(c->state & CONFIG_LOCKED)) { 235 if ((c == NULL) || !(c->state & CONFIG_LOCKED)) {
248 config->Attributes = 0; 236 config->Attributes = 0;
@@ -271,7 +259,7 @@ int pccard_get_configuration_info(struct pcmcia_socket *s,
271int pcmcia_get_configuration_info(struct pcmcia_device *p_dev, 259int pcmcia_get_configuration_info(struct pcmcia_device *p_dev,
272 config_info_t *config) 260 config_info_t *config)
273{ 261{
274 return pccard_get_configuration_info(p_dev->socket, p_dev->func, 262 return pccard_get_configuration_info(p_dev->socket, p_dev,
275 config); 263 config);
276} 264}
277EXPORT_SYMBOL(pcmcia_get_configuration_info); 265EXPORT_SYMBOL(pcmcia_get_configuration_info);
@@ -317,7 +305,7 @@ EXPORT_SYMBOL(pcmcia_get_window);
317 * SocketState yet: I haven't seen any point for it. 305 * SocketState yet: I haven't seen any point for it.
318 */ 306 */
319 307
320int pccard_get_status(struct pcmcia_socket *s, unsigned int function, 308int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev,
321 cs_status_t *status) 309 cs_status_t *status)
322{ 310{
323 config_t *c; 311 config_t *c;
@@ -334,11 +322,12 @@ int pccard_get_status(struct pcmcia_socket *s, unsigned int function,
334 if (!(s->state & SOCKET_PRESENT)) 322 if (!(s->state & SOCKET_PRESENT))
335 return CS_NO_CARD; 323 return CS_NO_CARD;
336 324
337 c = (s->config != NULL) ? &s->config[function] : NULL; 325 c = (p_dev) ? p_dev->function_config : NULL;
326
338 if ((c != NULL) && (c->state & CONFIG_LOCKED) && 327 if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
339 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) { 328 (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
340 u_char reg; 329 u_char reg;
341 if (c->Present & PRESENT_PIN_REPLACE) { 330 if (c->CardValues & PRESENT_PIN_REPLACE) {
342 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg); 331 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
343 status->CardState |= 332 status->CardState |=
344 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0; 333 (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
@@ -352,7 +341,7 @@ int pccard_get_status(struct pcmcia_socket *s, unsigned int function,
352 /* No PRR? Then assume we're always ready */ 341 /* No PRR? Then assume we're always ready */
353 status->CardState |= CS_EVENT_READY_CHANGE; 342 status->CardState |= CS_EVENT_READY_CHANGE;
354 } 343 }
355 if (c->Present & PRESENT_EXT_STATUS) { 344 if (c->CardValues & PRESENT_EXT_STATUS) {
356 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg); 345 pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
357 status->CardState |= 346 status->CardState |=
358 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0; 347 (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
@@ -370,11 +359,9 @@ int pccard_get_status(struct pcmcia_socket *s, unsigned int function,
370 return CS_SUCCESS; 359 return CS_SUCCESS;
371} /* pccard_get_status */ 360} /* pccard_get_status */
372 361
373int pcmcia_get_status(client_handle_t handle, cs_status_t *status) 362int pcmcia_get_status(struct pcmcia_device *p_dev, cs_status_t *status)
374{ 363{
375 struct pcmcia_socket *s; 364 return pccard_get_status(p_dev->socket, p_dev, status);
376 s = SOCKET(handle);
377 return pccard_get_status(s, handle->func, status);
378} 365}
379EXPORT_SYMBOL(pcmcia_get_status); 366EXPORT_SYMBOL(pcmcia_get_status);
380 367
@@ -422,7 +409,8 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
422 config_t *c; 409 config_t *c;
423 410
424 s = p_dev->socket; 411 s = p_dev->socket;
425 c = CONFIG(p_dev); 412 c = p_dev->function_config;
413
426 if (!(s->state & SOCKET_PRESENT)) 414 if (!(s->state & SOCKET_PRESENT))
427 return CS_NO_CARD; 415 return CS_NO_CARD;
428 if (!(c->state & CONFIG_LOCKED)) 416 if (!(c->state & CONFIG_LOCKED))
@@ -454,6 +442,28 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
454 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) 442 (mod->Attributes & CONF_VPP2_CHANGE_VALID))
455 return CS_BAD_VPP; 443 return CS_BAD_VPP;
456 444
445 if (mod->Attributes & CONF_IO_CHANGE_WIDTH) {
446 pccard_io_map io_off = { 0, 0, 0, 0, 1 };
447 pccard_io_map io_on;
448 int i;
449
450 io_on.speed = io_speed;
451 for (i = 0; i < MAX_IO_WIN; i++) {
452 if (!s->io[i].res)
453 continue;
454 io_off.map = i;
455 io_on.map = i;
456
457 io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8;
458 io_on.start = s->io[i].res->start;
459 io_on.stop = s->io[i].res->end;
460
461 s->ops->set_io_map(s, &io_off);
462 mdelay(40);
463 s->ops->set_io_map(s, &io_on);
464 }
465 }
466
457 return CS_SUCCESS; 467 return CS_SUCCESS;
458} /* modify_configuration */ 468} /* modify_configuration */
459EXPORT_SYMBOL(pcmcia_modify_configuration); 469EXPORT_SYMBOL(pcmcia_modify_configuration);
@@ -463,23 +473,23 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
463{ 473{
464 pccard_io_map io = { 0, 0, 0, 0, 1 }; 474 pccard_io_map io = { 0, 0, 0, 0, 1 };
465 struct pcmcia_socket *s = p_dev->socket; 475 struct pcmcia_socket *s = p_dev->socket;
476 config_t *c = p_dev->function_config;
466 int i; 477 int i;
467 478
468 if (!(p_dev->state & CLIENT_CONFIG_LOCKED)) 479 if (p_dev->_locked) {
469 return CS_BAD_HANDLE; 480 p_dev->_locked = 0;
470 p_dev->state &= ~CLIENT_CONFIG_LOCKED;
471
472 if (!(p_dev->state & CLIENT_STALE)) {
473 config_t *c = CONFIG(p_dev);
474 if (--(s->lock_count) == 0) { 481 if (--(s->lock_count) == 0) {
475 s->socket.flags = SS_OUTPUT_ENA; /* Is this correct? */ 482 s->socket.flags = SS_OUTPUT_ENA; /* Is this correct? */
476 s->socket.Vpp = 0; 483 s->socket.Vpp = 0;
477 s->socket.io_irq = 0; 484 s->socket.io_irq = 0;
478 s->ops->set_socket(s, &s->socket); 485 s->ops->set_socket(s, &s->socket);
479 } 486 }
487 }
488 if (c->state & CONFIG_LOCKED) {
489 c->state &= ~CONFIG_LOCKED;
480 if (c->state & CONFIG_IO_REQ) 490 if (c->state & CONFIG_IO_REQ)
481 for (i = 0; i < MAX_IO_WIN; i++) { 491 for (i = 0; i < MAX_IO_WIN; i++) {
482 if (s->io[i].NumPorts == 0) 492 if (!s->io[i].res)
483 continue; 493 continue;
484 s->io[i].Config--; 494 s->io[i].Config--;
485 if (s->io[i].Config != 0) 495 if (s->io[i].Config != 0)
@@ -487,12 +497,10 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev)
487 io.map = i; 497 io.map = i;
488 s->ops->set_io_map(s, &io); 498 s->ops->set_io_map(s, &io);
489 } 499 }
490 c->state &= ~CONFIG_LOCKED;
491 } 500 }
492 501
493 return CS_SUCCESS; 502 return CS_SUCCESS;
494} /* pcmcia_release_configuration */ 503} /* pcmcia_release_configuration */
495EXPORT_SYMBOL(pcmcia_release_configuration);
496 504
497 505
498/** pcmcia_release_io 506/** pcmcia_release_io
@@ -503,25 +511,23 @@ EXPORT_SYMBOL(pcmcia_release_configuration);
503 * don't bother checking the port ranges against the current socket 511 * don't bother checking the port ranges against the current socket
504 * values. 512 * values.
505 */ 513 */
506int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req) 514static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
507{ 515{
508 struct pcmcia_socket *s = p_dev->socket; 516 struct pcmcia_socket *s = p_dev->socket;
517 config_t *c = p_dev->function_config;
509 518
510 if (!(p_dev->state & CLIENT_IO_REQ)) 519 if (!p_dev->_io )
511 return CS_BAD_HANDLE; 520 return CS_BAD_HANDLE;
512 p_dev->state &= ~CLIENT_IO_REQ; 521
513 522 p_dev->_io = 0;
514 if (!(p_dev->state & CLIENT_STALE)) { 523
515 config_t *c = CONFIG(p_dev); 524 if ((c->io.BasePort1 != req->BasePort1) ||
516 if (c->state & CONFIG_LOCKED) 525 (c->io.NumPorts1 != req->NumPorts1) ||
517 return CS_CONFIGURATION_LOCKED; 526 (c->io.BasePort2 != req->BasePort2) ||
518 if ((c->io.BasePort1 != req->BasePort1) || 527 (c->io.NumPorts2 != req->NumPorts2))
519 (c->io.NumPorts1 != req->NumPorts1) || 528 return CS_BAD_ARGS;
520 (c->io.BasePort2 != req->BasePort2) || 529
521 (c->io.NumPorts2 != req->NumPorts2)) 530 c->state &= ~CONFIG_IO_REQ;
522 return CS_BAD_ARGS;
523 c->state &= ~CONFIG_IO_REQ;
524 }
525 531
526 release_io_space(s, req->BasePort1, req->NumPorts1); 532 release_io_space(s, req->BasePort1, req->NumPorts1);
527 if (req->NumPorts2) 533 if (req->NumPorts2)
@@ -529,28 +535,26 @@ int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
529 535
530 return CS_SUCCESS; 536 return CS_SUCCESS;
531} /* pcmcia_release_io */ 537} /* pcmcia_release_io */
532EXPORT_SYMBOL(pcmcia_release_io);
533 538
534 539
535int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req) 540static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
536{ 541{
537 struct pcmcia_socket *s = p_dev->socket; 542 struct pcmcia_socket *s = p_dev->socket;
538 if (!(p_dev->state & CLIENT_IRQ_REQ)) 543 config_t *c= p_dev->function_config;
544
545 if (!p_dev->_irq)
539 return CS_BAD_HANDLE; 546 return CS_BAD_HANDLE;
540 p_dev->state &= ~CLIENT_IRQ_REQ; 547 p_dev->_irq = 0;
541 548
542 if (!(p_dev->state & CLIENT_STALE)) { 549 if (c->state & CONFIG_LOCKED)
543 config_t *c = CONFIG(p_dev); 550 return CS_CONFIGURATION_LOCKED;
544 if (c->state & CONFIG_LOCKED) 551 if (c->irq.Attributes != req->Attributes)
545 return CS_CONFIGURATION_LOCKED; 552 return CS_BAD_ATTRIBUTE;
546 if (c->irq.Attributes != req->Attributes) 553 if (s->irq.AssignedIRQ != req->AssignedIRQ)
547 return CS_BAD_ATTRIBUTE; 554 return CS_BAD_IRQ;
548 if (s->irq.AssignedIRQ != req->AssignedIRQ) 555 if (--s->irq.Config == 0) {
549 return CS_BAD_IRQ; 556 c->state &= ~CONFIG_IRQ_REQ;
550 if (--s->irq.Config == 0) { 557 s->irq.AssignedIRQ = 0;
551 c->state &= ~CONFIG_IRQ_REQ;
552 s->irq.AssignedIRQ = 0;
553 }
554 } 558 }
555 559
556 if (req->Attributes & IRQ_HANDLE_PRESENT) { 560 if (req->Attributes & IRQ_HANDLE_PRESENT) {
@@ -563,7 +567,6 @@ int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
563 567
564 return CS_SUCCESS; 568 return CS_SUCCESS;
565} /* pcmcia_release_irq */ 569} /* pcmcia_release_irq */
566EXPORT_SYMBOL(pcmcia_release_irq);
567 570
568 571
569int pcmcia_release_window(window_handle_t win) 572int pcmcia_release_window(window_handle_t win)
@@ -573,7 +576,7 @@ int pcmcia_release_window(window_handle_t win)
573 if ((win == NULL) || (win->magic != WINDOW_MAGIC)) 576 if ((win == NULL) || (win->magic != WINDOW_MAGIC))
574 return CS_BAD_HANDLE; 577 return CS_BAD_HANDLE;
575 s = win->sock; 578 s = win->sock;
576 if (!(win->handle->state & CLIENT_WIN_REQ(win->index))) 579 if (!(win->handle->_win & CLIENT_WIN_REQ(win->index)))
577 return CS_BAD_HANDLE; 580 return CS_BAD_HANDLE;
578 581
579 /* Shut down memory window */ 582 /* Shut down memory window */
@@ -587,7 +590,7 @@ int pcmcia_release_window(window_handle_t win)
587 kfree(win->ctl.res); 590 kfree(win->ctl.res);
588 win->ctl.res = NULL; 591 win->ctl.res = NULL;
589 } 592 }
590 win->handle->state &= ~CLIENT_WIN_REQ(win->index); 593 win->handle->_win &= ~CLIENT_WIN_REQ(win->index);
591 594
592 win->magic = 0; 595 win->magic = 0;
593 596
@@ -610,16 +613,12 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
610 613
611 if (req->IntType & INT_CARDBUS) 614 if (req->IntType & INT_CARDBUS)
612 return CS_UNSUPPORTED_MODE; 615 return CS_UNSUPPORTED_MODE;
613 c = CONFIG(p_dev); 616 c = p_dev->function_config;
614 if (c->state & CONFIG_LOCKED) 617 if (c->state & CONFIG_LOCKED)
615 return CS_CONFIGURATION_LOCKED; 618 return CS_CONFIGURATION_LOCKED;
616 619
617 /* Do power control. We don't allow changes in Vcc. */ 620 /* Do power control. We don't allow changes in Vcc. */
618 if (s->socket.Vcc != req->Vcc) 621 s->socket.Vpp = req->Vpp;
619 return CS_BAD_VCC;
620 if (req->Vpp1 != req->Vpp2)
621 return CS_BAD_VPP;
622 s->socket.Vpp = req->Vpp1;
623 if (s->ops->set_socket(s, &s->socket)) 622 if (s->ops->set_socket(s, &s->socket))
624 return CS_BAD_VPP; 623 return CS_BAD_VPP;
625 624
@@ -643,7 +642,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
643 642
644 /* Set up CIS configuration registers */ 643 /* Set up CIS configuration registers */
645 base = c->ConfigBase = req->ConfigBase; 644 base = c->ConfigBase = req->ConfigBase;
646 c->Present = c->CardValues = req->Present; 645 c->CardValues = req->Present;
647 if (req->Present & PRESENT_COPY) { 646 if (req->Present & PRESENT_COPY) {
648 c->Copy = req->Copy; 647 c->Copy = req->Copy;
649 pcmcia_write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &c->Copy); 648 pcmcia_write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &c->Copy);
@@ -690,10 +689,10 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
690 if (c->state & CONFIG_IO_REQ) { 689 if (c->state & CONFIG_IO_REQ) {
691 iomap.speed = io_speed; 690 iomap.speed = io_speed;
692 for (i = 0; i < MAX_IO_WIN; i++) 691 for (i = 0; i < MAX_IO_WIN; i++)
693 if (s->io[i].NumPorts != 0) { 692 if (s->io[i].res) {
694 iomap.map = i; 693 iomap.map = i;
695 iomap.flags = MAP_ACTIVE; 694 iomap.flags = MAP_ACTIVE;
696 switch (s->io[i].Attributes & IO_DATA_PATH_WIDTH) { 695 switch (s->io[i].res->flags & IO_DATA_PATH_WIDTH) {
697 case IO_DATA_PATH_WIDTH_16: 696 case IO_DATA_PATH_WIDTH_16:
698 iomap.flags |= MAP_16BIT; break; 697 iomap.flags |= MAP_16BIT; break;
699 case IO_DATA_PATH_WIDTH_AUTO: 698 case IO_DATA_PATH_WIDTH_AUTO:
@@ -701,15 +700,15 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
701 default: 700 default:
702 break; 701 break;
703 } 702 }
704 iomap.start = s->io[i].BasePort; 703 iomap.start = s->io[i].res->start;
705 iomap.stop = iomap.start + s->io[i].NumPorts - 1; 704 iomap.stop = s->io[i].res->end;
706 s->ops->set_io_map(s, &iomap); 705 s->ops->set_io_map(s, &iomap);
707 s->io[i].Config++; 706 s->io[i].Config++;
708 } 707 }
709 } 708 }
710 709
711 c->state |= CONFIG_LOCKED; 710 c->state |= CONFIG_LOCKED;
712 p_dev->state |= CLIENT_CONFIG_LOCKED; 711 p_dev->_locked = 1;
713 return CS_SUCCESS; 712 return CS_SUCCESS;
714} /* pcmcia_request_configuration */ 713} /* pcmcia_request_configuration */
715EXPORT_SYMBOL(pcmcia_request_configuration); 714EXPORT_SYMBOL(pcmcia_request_configuration);
@@ -730,7 +729,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
730 729
731 if (!req) 730 if (!req)
732 return CS_UNSUPPORTED_MODE; 731 return CS_UNSUPPORTED_MODE;
733 c = CONFIG(p_dev); 732 c = p_dev->function_config;
734 if (c->state & CONFIG_LOCKED) 733 if (c->state & CONFIG_LOCKED)
735 return CS_CONFIGURATION_LOCKED; 734 return CS_CONFIGURATION_LOCKED;
736 if (c->state & CONFIG_IO_REQ) 735 if (c->state & CONFIG_IO_REQ)
@@ -755,7 +754,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
755 754
756 c->io = *req; 755 c->io = *req;
757 c->state |= CONFIG_IO_REQ; 756 c->state |= CONFIG_IO_REQ;
758 p_dev->state |= CLIENT_IO_REQ; 757 p_dev->_io = 1;
759 return CS_SUCCESS; 758 return CS_SUCCESS;
760} /* pcmcia_request_io */ 759} /* pcmcia_request_io */
761EXPORT_SYMBOL(pcmcia_request_io); 760EXPORT_SYMBOL(pcmcia_request_io);
@@ -786,7 +785,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
786 785
787 if (!(s->state & SOCKET_PRESENT)) 786 if (!(s->state & SOCKET_PRESENT))
788 return CS_NO_CARD; 787 return CS_NO_CARD;
789 c = CONFIG(p_dev); 788 c = p_dev->function_config;
790 if (c->state & CONFIG_LOCKED) 789 if (c->state & CONFIG_LOCKED)
791 return CS_CONFIGURATION_LOCKED; 790 return CS_CONFIGURATION_LOCKED;
792 if (c->state & CONFIG_IRQ_REQ) 791 if (c->state & CONFIG_IRQ_REQ)
@@ -851,7 +850,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
851 s->irq.Config++; 850 s->irq.Config++;
852 851
853 c->state |= CONFIG_IRQ_REQ; 852 c->state |= CONFIG_IRQ_REQ;
854 p_dev->state |= CLIENT_IRQ_REQ; 853 p_dev->_irq = 1;
855 854
856#ifdef CONFIG_PCMCIA_PROBE 855#ifdef CONFIG_PCMCIA_PROBE
857 pcmcia_used_irq[irq]++; 856 pcmcia_used_irq[irq]++;
@@ -911,7 +910,7 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
911 if (!win->ctl.res) 910 if (!win->ctl.res)
912 return CS_IN_USE; 911 return CS_IN_USE;
913 } 912 }
914 (*p_dev)->state |= CLIENT_WIN_REQ(w); 913 (*p_dev)->_win |= CLIENT_WIN_REQ(w);
915 914
916 /* Configure the socket controller */ 915 /* Configure the socket controller */
917 win->ctl.map = w+1; 916 win->ctl.map = w+1;
@@ -941,3 +940,14 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
941 return CS_SUCCESS; 940 return CS_SUCCESS;
942} /* pcmcia_request_window */ 941} /* pcmcia_request_window */
943EXPORT_SYMBOL(pcmcia_request_window); 942EXPORT_SYMBOL(pcmcia_request_window);
943
944void pcmcia_disable_device(struct pcmcia_device *p_dev) {
945 pcmcia_release_configuration(p_dev);
946 pcmcia_release_io(p_dev, &p_dev->io);
947 pcmcia_release_irq(p_dev, &p_dev->irq);
948 if (&p_dev->win)
949 pcmcia_release_window(p_dev->win);
950
951 p_dev->dev_node = NULL;
952}
953EXPORT_SYMBOL(pcmcia_disable_device);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index f2789afb22b2..16d1ea7b0a18 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/config.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/pci.h> 12#include <linux/pci.h>
14#include <linux/init.h> 13#include <linux/init.h>
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index 514609369836..81dfc2cac2b4 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -12,7 +12,6 @@
12 * (C) 1999 David A. Hinds 12 * (C) 1999 David A. Hinds
13 */ 13 */
14 14
15#include <linux/config.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
18 17
@@ -22,6 +21,8 @@
22#include "cs_internal.h" 21#include "cs_internal.h"
23 22
24 23
24#ifdef CONFIG_PCMCIA_IOCTL
25
25#ifdef CONFIG_PCMCIA_PROBE 26#ifdef CONFIG_PCMCIA_PROBE
26 27
27static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) 28static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
@@ -98,6 +99,8 @@ int pcmcia_adjust_resource_info(adjust_t *adj)
98} 99}
99EXPORT_SYMBOL(pcmcia_adjust_resource_info); 100EXPORT_SYMBOL(pcmcia_adjust_resource_info);
100 101
102#endif
103
101int pcmcia_validate_mem(struct pcmcia_socket *s) 104int pcmcia_validate_mem(struct pcmcia_socket *s)
102{ 105{
103 if (s->resource_ops->validate_mem) 106 if (s->resource_ops->validate_mem)
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 5301ac60358f..0f8b157c9717 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -12,7 +12,6 @@
12 * (C) 1999 David A. Hinds 12 * (C) 1999 David A. Hinds
13 */ 13 */
14 14
15#include <linux/config.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <linux/moduleparam.h> 16#include <linux/moduleparam.h>
18#include <linux/init.h> 17#include <linux/init.h>
@@ -61,7 +60,7 @@ struct socket_data {
61 unsigned int rsrc_mem_probe; 60 unsigned int rsrc_mem_probe;
62}; 61};
63 62
64static DECLARE_MUTEX(rsrc_sem); 63static DEFINE_MUTEX(rsrc_mutex);
65#define MEM_PROBE_LOW (1 << 0) 64#define MEM_PROBE_LOW (1 << 0)
66#define MEM_PROBE_HIGH (1 << 1) 65#define MEM_PROBE_HIGH (1 << 1)
67 66
@@ -484,7 +483,7 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
484 483
485 484
486/* 485/*
487 * Locking note: Must be called with skt_sem held! 486 * Locking note: Must be called with skt_mutex held!
488 */ 487 */
489static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s) 488static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
490{ 489{
@@ -495,7 +494,7 @@ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
495 if (!probe_mem) 494 if (!probe_mem)
496 return 0; 495 return 0;
497 496
498 down(&rsrc_sem); 497 mutex_lock(&rsrc_mutex);
499 498
500 if (s->features & SS_CAP_PAGE_REGS) 499 if (s->features & SS_CAP_PAGE_REGS)
501 probe_mask = MEM_PROBE_HIGH; 500 probe_mask = MEM_PROBE_HIGH;
@@ -507,7 +506,7 @@ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
507 s_data->rsrc_mem_probe |= probe_mask; 506 s_data->rsrc_mem_probe |= probe_mask;
508 } 507 }
509 508
510 up(&rsrc_sem); 509 mutex_unlock(&rsrc_mutex);
511 510
512 return ret; 511 return ret;
513} 512}
@@ -585,7 +584,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
585 struct socket_data *s_data = s->resource_data; 584 struct socket_data *s_data = s->resource_data;
586 int ret = -ENOMEM; 585 int ret = -ENOMEM;
587 586
588 down(&rsrc_sem); 587 mutex_lock(&rsrc_mutex);
589 for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) { 588 for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) {
590 unsigned long start = m->base; 589 unsigned long start = m->base;
591 unsigned long end = m->base + m->num - 1; 590 unsigned long end = m->base + m->num - 1;
@@ -596,7 +595,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
596 ret = adjust_resource(res, r_start, r_end - r_start + 1); 595 ret = adjust_resource(res, r_start, r_end - r_start + 1);
597 break; 596 break;
598 } 597 }
599 up(&rsrc_sem); 598 mutex_unlock(&rsrc_mutex);
600 599
601 return ret; 600 return ret;
602} 601}
@@ -630,7 +629,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
630 data.offset = base & data.mask; 629 data.offset = base & data.mask;
631 data.map = &s_data->io_db; 630 data.map = &s_data->io_db;
632 631
633 down(&rsrc_sem); 632 mutex_lock(&rsrc_mutex);
634#ifdef CONFIG_PCI 633#ifdef CONFIG_PCI
635 if (s->cb_dev) { 634 if (s->cb_dev) {
636 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, 635 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
@@ -639,7 +638,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
639#endif 638#endif
640 ret = allocate_resource(&ioport_resource, res, num, min, ~0UL, 639 ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
641 1, pcmcia_align, &data); 640 1, pcmcia_align, &data);
642 up(&rsrc_sem); 641 mutex_unlock(&rsrc_mutex);
643 642
644 if (ret != 0) { 643 if (ret != 0) {
645 kfree(res); 644 kfree(res);
@@ -672,7 +671,7 @@ static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
672 min = 0x100000UL + base; 671 min = 0x100000UL + base;
673 } 672 }
674 673
675 down(&rsrc_sem); 674 mutex_lock(&rsrc_mutex);
676#ifdef CONFIG_PCI 675#ifdef CONFIG_PCI
677 if (s->cb_dev) { 676 if (s->cb_dev) {
678 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 677 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num,
@@ -682,7 +681,7 @@ static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
682#endif 681#endif
683 ret = allocate_resource(&iomem_resource, res, num, min, 682 ret = allocate_resource(&iomem_resource, res, num, min,
684 max, 1, pcmcia_align, &data); 683 max, 1, pcmcia_align, &data);
685 up(&rsrc_sem); 684 mutex_unlock(&rsrc_mutex);
686 if (ret == 0 || low) 685 if (ret == 0 || low)
687 break; 686 break;
688 low = 1; 687 low = 1;
@@ -705,7 +704,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
705 if (end < start) 704 if (end < start)
706 return -EINVAL; 705 return -EINVAL;
707 706
708 down(&rsrc_sem); 707 mutex_lock(&rsrc_mutex);
709 switch (action) { 708 switch (action) {
710 case ADD_MANAGED_RESOURCE: 709 case ADD_MANAGED_RESOURCE:
711 ret = add_interval(&data->mem_db, start, size); 710 ret = add_interval(&data->mem_db, start, size);
@@ -723,7 +722,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
723 default: 722 default:
724 ret = -EINVAL; 723 ret = -EINVAL;
725 } 724 }
726 up(&rsrc_sem); 725 mutex_unlock(&rsrc_mutex);
727 726
728 return ret; 727 return ret;
729} 728}
@@ -741,7 +740,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
741 if (end > IO_SPACE_LIMIT) 740 if (end > IO_SPACE_LIMIT)
742 return -EINVAL; 741 return -EINVAL;
743 742
744 down(&rsrc_sem); 743 mutex_lock(&rsrc_mutex);
745 switch (action) { 744 switch (action) {
746 case ADD_MANAGED_RESOURCE: 745 case ADD_MANAGED_RESOURCE:
747 if (add_interval(&data->io_db, start, size) != 0) { 746 if (add_interval(&data->io_db, start, size) != 0) {
@@ -760,7 +759,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
760 ret = -EINVAL; 759 ret = -EINVAL;
761 break; 760 break;
762 } 761 }
763 up(&rsrc_sem); 762 mutex_unlock(&rsrc_mutex);
764 763
765 return ret; 764 return ret;
766} 765}
@@ -867,7 +866,7 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
867 struct socket_data *data = s->resource_data; 866 struct socket_data *data = s->resource_data;
868 struct resource_map *p, *q; 867 struct resource_map *p, *q;
869 868
870 down(&rsrc_sem); 869 mutex_lock(&rsrc_mutex);
871 for (p = data->mem_db.next; p != &data->mem_db; p = q) { 870 for (p = data->mem_db.next; p != &data->mem_db; p = q) {
872 q = p->next; 871 q = p->next;
873 kfree(p); 872 kfree(p);
@@ -876,7 +875,7 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
876 q = p->next; 875 q = p->next;
877 kfree(p); 876 kfree(p);
878 } 877 }
879 up(&rsrc_sem); 878 mutex_unlock(&rsrc_mutex);
880} 879}
881 880
882 881
@@ -901,7 +900,7 @@ static ssize_t show_io_db(struct class_device *class_dev, char *buf)
901 struct resource_map *p; 900 struct resource_map *p;
902 ssize_t ret = 0; 901 ssize_t ret = 0;
903 902
904 down(&rsrc_sem); 903 mutex_lock(&rsrc_mutex);
905 data = s->resource_data; 904 data = s->resource_data;
906 905
907 for (p = data->io_db.next; p != &data->io_db; p = p->next) { 906 for (p = data->io_db.next; p != &data->io_db; p = p->next) {
@@ -913,7 +912,7 @@ static ssize_t show_io_db(struct class_device *class_dev, char *buf)
913 ((unsigned long) p->base + p->num - 1)); 912 ((unsigned long) p->base + p->num - 1));
914 } 913 }
915 914
916 up(&rsrc_sem); 915 mutex_unlock(&rsrc_mutex);
917 return (ret); 916 return (ret);
918} 917}
919 918
@@ -953,7 +952,7 @@ static ssize_t show_mem_db(struct class_device *class_dev, char *buf)
953 struct resource_map *p; 952 struct resource_map *p;
954 ssize_t ret = 0; 953 ssize_t ret = 0;
955 954
956 down(&rsrc_sem); 955 mutex_lock(&rsrc_mutex);
957 data = s->resource_data; 956 data = s->resource_data;
958 957
959 for (p = data->mem_db.next; p != &data->mem_db; p = p->next) { 958 for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
@@ -965,7 +964,7 @@ static ssize_t show_mem_db(struct class_device *class_dev, char *buf)
965 ((unsigned long) p->base + p->num - 1)); 964 ((unsigned long) p->base + p->num - 1));
966 } 965 }
967 966
968 up(&rsrc_sem); 967 mutex_unlock(&rsrc_mutex);
969 return (ret); 968 return (ret);
970} 969}
971 970
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 2b3c2895b43d..eb89928f2338 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -5,7 +5,6 @@
5 * Based off the Assabet. 5 * Based off the Assabet.
6 * 6 *
7 */ 7 */
8#include <linux/config.h>
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/kernel.h> 9#include <linux/kernel.h>
11#include <linux/sched.h> 10#include <linux/sched.h>
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 5ab1cdef7c48..c5d7476da471 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -12,7 +12,6 @@
12#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/config.h>
16#include <linux/string.h> 15#include <linux/string.h>
17#include <linux/major.h> 16#include <linux/major.h>
18#include <linux/errno.h> 17#include <linux/errno.h>
@@ -25,6 +24,7 @@
25#include <linux/pm.h> 24#include <linux/pm.h>
26#include <linux/pci.h> 25#include <linux/pci.h>
27#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/mutex.h>
28#include <asm/system.h> 28#include <asm/system.h>
29#include <asm/irq.h> 29#include <asm/irq.h>
30 30
@@ -183,7 +183,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
183 s->resource_setup_done = 1; 183 s->resource_setup_done = 1;
184 spin_unlock_irqrestore(&s->lock, flags); 184 spin_unlock_irqrestore(&s->lock, flags);
185 185
186 down(&s->skt_sem); 186 mutex_lock(&s->skt_mutex);
187 if ((s->callback) && 187 if ((s->callback) &&
188 (s->state & SOCKET_PRESENT) && 188 (s->state & SOCKET_PRESENT) &&
189 !(s->state & SOCKET_CARDBUS)) { 189 !(s->state & SOCKET_CARDBUS)) {
@@ -192,7 +192,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
192 module_put(s->callback->owner); 192 module_put(s->callback->owner);
193 } 193 }
194 } 194 }
195 up(&s->skt_sem); 195 mutex_unlock(&s->skt_mutex);
196 196
197 return count; 197 return count;
198} 198}
@@ -322,7 +322,7 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
322 kfree(cis); 322 kfree(cis);
323 323
324 if (!ret) { 324 if (!ret) {
325 down(&s->skt_sem); 325 mutex_lock(&s->skt_mutex);
326 if ((s->callback) && (s->state & SOCKET_PRESENT) && 326 if ((s->callback) && (s->state & SOCKET_PRESENT) &&
327 !(s->state & SOCKET_CARDBUS)) { 327 !(s->state & SOCKET_CARDBUS)) {
328 if (try_module_get(s->callback->owner)) { 328 if (try_module_get(s->callback->owner)) {
@@ -330,7 +330,7 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
330 module_put(s->callback->owner); 330 module_put(s->callback->owner);
331 } 331 }
332 } 332 }
333 up(&s->skt_sem); 333 mutex_unlock(&s->skt_mutex);
334 } 334 }
335 335
336 336
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index d5b4ff74462e..7a3d1b8e16b9 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -30,7 +30,6 @@
30#ifndef _LINUX_TI113X_H 30#ifndef _LINUX_TI113X_H
31#define _LINUX_TI113X_H 31#define _LINUX_TI113X_H
32 32
33#include <linux/config.h>
34 33
35/* Register definitions for TI 113X PCI-to-CardBus bridges */ 34/* Register definitions for TI 113X PCI-to-CardBus bridges */
36 35
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 0574efd7828a..459e6e1946fd 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -634,7 +634,7 @@ static void vrc4171_remove_sockets(void)
634static int __devinit vrc4171_card_setup(char *options) 634static int __devinit vrc4171_card_setup(char *options)
635{ 635{
636 if (options == NULL || *options == '\0') 636 if (options == NULL || *options == '\0')
637 return 0; 637 return 1;
638 638
639 if (strncmp(options, "irq:", 4) == 0) { 639 if (strncmp(options, "irq:", 4) == 0) {
640 int irq; 640 int irq;
@@ -644,7 +644,7 @@ static int __devinit vrc4171_card_setup(char *options)
644 vrc4171_irq = irq; 644 vrc4171_irq = irq;
645 645
646 if (*options != ',') 646 if (*options != ',')
647 return 0; 647 return 1;
648 options++; 648 options++;
649 } 649 }
650 650
@@ -663,10 +663,10 @@ static int __devinit vrc4171_card_setup(char *options)
663 } 663 }
664 664
665 if (*options != ',') 665 if (*options != ',')
666 return 0; 666 return 1;
667 options++; 667 options++;
668 } else 668 } else
669 return 0; 669 return 1;
670 670
671 } 671 }
672 672
@@ -688,7 +688,7 @@ static int __devinit vrc4171_card_setup(char *options)
688 } 688 }
689 689
690 if (*options != ',') 690 if (*options != ',')
691 return 0; 691 return 1;
692 options++; 692 options++;
693 693
694 if (strncmp(options, "memnoprobe", 10) == 0) 694 if (strncmp(options, "memnoprobe", 10) == 0)
@@ -700,7 +700,7 @@ static int __devinit vrc4171_card_setup(char *options)
700 } 700 }
701 } 701 }
702 702
703 return 0; 703 return 1;
704} 704}
705 705
706__setup("vrc4171_card=", vrc4171_card_setup); 706__setup("vrc4171_card=", vrc4171_card_setup);
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index 57f38dba0a48..6004196f7cc1 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -516,7 +516,7 @@ static int __devinit vrc4173_cardu_probe(struct pci_dev *dev,
516static int __devinit vrc4173_cardu_setup(char *options) 516static int __devinit vrc4173_cardu_setup(char *options)
517{ 517{
518 if (options == NULL || *options == '\0') 518 if (options == NULL || *options == '\0')
519 return 0; 519 return 1;
520 520
521 if (strncmp(options, "cardu1:", 7) == 0) { 521 if (strncmp(options, "cardu1:", 7) == 0) {
522 options += 7; 522 options += 7;
@@ -527,9 +527,9 @@ static int __devinit vrc4173_cardu_setup(char *options)
527 } 527 }
528 528
529 if (*options != ',') 529 if (*options != ',')
530 return 0; 530 return 1;
531 } else 531 } else
532 return 0; 532 return 1;
533 } 533 }
534 534
535 if (strncmp(options, "cardu2:", 7) == 0) { 535 if (strncmp(options, "cardu2:", 7) == 0) {
@@ -538,7 +538,7 @@ static int __devinit vrc4173_cardu_setup(char *options)
538 cardu_sockets[CARDU2].noprobe = 1; 538 cardu_sockets[CARDU2].noprobe = 1;
539 } 539 }
540 540
541 return 0; 541 return 1;
542} 542}
543 543
544__setup("vrc4173_cardu=", vrc4173_cardu_setup); 544__setup("vrc4173_cardu=", vrc4173_cardu_setup);
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 8fd71ab02ef0..b842377cb0c6 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -32,9 +32,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
32 int size; 32 int size;
33 33
34 /* Sanity checks */ 34 /* Sanity checks */
35 if ( magic == NULL || datasize > PAGE_SIZE || 35 BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
36 (cplength*sizeof(struct ccw1)) > PAGE_SIZE) 36 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
37 BUG();
38 37
39 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 38 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
40 if (cplength > 0) 39 if (cplength > 0)
@@ -125,8 +124,7 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
125 struct dasd_device *device; 124 struct dasd_device *device;
126 int success; 125 int success;
127 126
128 if (cqr->refers == NULL || cqr->function == NULL) 127 BUG_ON(cqr->refers == NULL || cqr->function == NULL);
129 BUG();
130 128
131 device = cqr->device; 129 device = cqr->device;
132 success = cqr->status == DASD_CQR_DONE; 130 success = cqr->status == DASD_CQR_DONE;
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index ac10dfb20a62..91e93c78f57a 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -24,7 +24,7 @@
24 24
25/* 25/*
26 * The room for the SCCB (only for writing) is not equal to a pages size 26 * The room for the SCCB (only for writing) is not equal to a pages size
27 * (as it is specified as the maximum size in the the SCLP ducumentation) 27 * (as it is specified as the maximum size in the the SCLP documentation)
28 * because of the additional data structure described above. 28 * because of the additional data structure described above.
29 */ 29 */
30#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) 30#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 5ced2725d6c7..5c65cf3e5cc0 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -198,9 +198,7 @@ tapeblock_request_fn(request_queue_t *queue)
198 198
199 device = (struct tape_device *) queue->queuedata; 199 device = (struct tape_device *) queue->queuedata;
200 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); 200 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
201 if (device == NULL) 201 BUG_ON(device == NULL);
202 BUG();
203
204 tapeblock_trigger_requeue(device); 202 tapeblock_trigger_requeue(device);
205} 203}
206 204
@@ -307,8 +305,7 @@ tapeblock_revalidate_disk(struct gendisk *disk)
307 int rc; 305 int rc;
308 306
309 device = (struct tape_device *) disk->private_data; 307 device = (struct tape_device *) disk->private_data;
310 if (!device) 308 BUG_ON(!device);
311 BUG();
312 309
313 if (!device->blk_data.medium_changed) 310 if (!device->blk_data.medium_changed)
314 return 0; 311 return 0;
@@ -440,11 +437,9 @@ tapeblock_ioctl(
440 437
441 rc = 0; 438 rc = 0;
442 disk = inode->i_bdev->bd_disk; 439 disk = inode->i_bdev->bd_disk;
443 if (!disk) 440 BUG_ON(!disk);
444 BUG();
445 device = disk->private_data; 441 device = disk->private_data;
446 if (!device) 442 BUG_ON(!device);
447 BUG();
448 minor = iminor(inode); 443 minor = iminor(inode);
449 444
450 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); 445 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index edcf05d5d568..5d6b7a57b02f 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -675,9 +675,8 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
675 int index, rc; 675 int index, rc;
676 676
677 LCS_DBF_TEXT(5, trace, "rdybuff"); 677 LCS_DBF_TEXT(5, trace, "rdybuff");
678 if (buffer->state != BUF_STATE_LOCKED && 678 BUG_ON(buffer->state != BUF_STATE_LOCKED &&
679 buffer->state != BUF_STATE_PROCESSED) 679 buffer->state != BUF_STATE_PROCESSED);
680 BUG();
681 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 680 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
682 buffer->state = BUF_STATE_READY; 681 buffer->state = BUF_STATE_READY;
683 index = buffer - channel->iob; 682 index = buffer - channel->iob;
@@ -701,8 +700,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
701 int index, prev, next; 700 int index, prev, next;
702 701
703 LCS_DBF_TEXT(5, trace, "prcsbuff"); 702 LCS_DBF_TEXT(5, trace, "prcsbuff");
704 if (buffer->state != BUF_STATE_READY) 703 BUG_ON(buffer->state != BUF_STATE_READY);
705 BUG();
706 buffer->state = BUF_STATE_PROCESSED; 704 buffer->state = BUF_STATE_PROCESSED;
707 index = buffer - channel->iob; 705 index = buffer - channel->iob;
708 prev = (index - 1) & (LCS_NUM_BUFFS - 1); 706 prev = (index - 1) & (LCS_NUM_BUFFS - 1);
@@ -734,9 +732,8 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
734 unsigned long flags; 732 unsigned long flags;
735 733
736 LCS_DBF_TEXT(5, trace, "relbuff"); 734 LCS_DBF_TEXT(5, trace, "relbuff");
737 if (buffer->state != BUF_STATE_LOCKED && 735 BUG_ON(buffer->state != BUF_STATE_LOCKED &&
738 buffer->state != BUF_STATE_PROCESSED) 736 buffer->state != BUF_STATE_PROCESSED);
739 BUG();
740 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 737 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
741 buffer->state = BUF_STATE_EMPTY; 738 buffer->state = BUF_STATE_EMPTY;
742 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 739 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index ffba65656a83..1bd82c4e52a0 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -293,6 +293,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
293 board_ahci }, /* JMicron JMB360 */ 293 board_ahci }, /* JMicron JMB360 */
294 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 294 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
295 board_ahci }, /* JMicron JMB363 */ 295 board_ahci }, /* JMicron JMB363 */
296 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
297 board_ahci }, /* ATI SB600 non-raid */
298 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
299 board_ahci }, /* ATI SB600 raid */
296 { } /* terminate list */ 300 { } /* terminate list */
297}; 301};
298 302
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 6c2c395554ff..5517da5855f0 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -86,7 +86,7 @@ config AIC7XXX_DEBUG_MASK
86 default "0" 86 default "0"
87 help 87 help
88 Bit mask of debug options that is only valid if the 88 Bit mask of debug options that is only valid if the
89 CONFIG_AIC7XXX_DEBUG_ENBLE option is enabled. The bits in this mask 89 CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask
90 are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the 90 are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
91 variable ahc_debug in that file to find them. 91 variable ahc_debug in that file to find them.
92 92
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 2d5be84d8bd4..24e71b555172 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -301,7 +301,7 @@ static struct piix_map_db ich6_map_db = {
301 .mask = 0x3, 301 .mask = 0x3,
302 .map = { 302 .map = {
303 /* PM PS SM SS MAP */ 303 /* PM PS SM SS MAP */
304 { P0, P1, P2, P3 }, /* 00b */ 304 { P0, P2, P1, P3 }, /* 00b */
305 { IDE, IDE, P1, P3 }, /* 01b */ 305 { IDE, IDE, P1, P3 }, /* 01b */
306 { P0, P2, IDE, IDE }, /* 10b */ 306 { P0, P2, IDE, IDE }, /* 10b */
307 { RV, RV, RV, RV }, 307 { RV, RV, RV, RV },
@@ -312,7 +312,7 @@ static struct piix_map_db ich6m_map_db = {
312 .mask = 0x3, 312 .mask = 0x3,
313 .map = { 313 .map = {
314 /* PM PS SM SS MAP */ 314 /* PM PS SM SS MAP */
315 { P0, P1, P2, P3 }, /* 00b */ 315 { P0, P2, RV, RV }, /* 00b */
316 { RV, RV, RV, RV }, 316 { RV, RV, RV, RV },
317 { P0, P2, IDE, IDE }, /* 10b */ 317 { P0, P2, IDE, IDE }, /* 10b */
318 { RV, RV, RV, RV }, 318 { RV, RV, RV, RV },
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 3a8462e8d063..24eb59e143a9 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2488,7 +2488,7 @@ static int option_setup(char *str)
2488 } 2488 }
2489 ints[0] = i - 1; 2489 ints[0] = i - 1;
2490 internal_ibmmca_scsi_setup(cur, ints); 2490 internal_ibmmca_scsi_setup(cur, ints);
2491 return 0; 2491 return 1;
2492} 2492}
2493 2493
2494__setup("ibmmcascsi=", option_setup); 2494__setup("ibmmcascsi=", option_setup);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index f47dd87c05e7..892e8ed63091 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -80,7 +80,7 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue,
80 tasklet_kill(&hostdata->srp_task); 80 tasklet_kill(&hostdata->srp_task);
81 do { 81 do {
82 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 82 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
83 } while ((rc == H_Busy) || (H_isLongBusy(rc))); 83 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
84 dma_unmap_single(hostdata->dev, 84 dma_unmap_single(hostdata->dev,
85 queue->msg_token, 85 queue->msg_token,
86 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); 86 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
@@ -230,7 +230,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
230 rc = plpar_hcall_norets(H_REG_CRQ, 230 rc = plpar_hcall_norets(H_REG_CRQ,
231 vdev->unit_address, 231 vdev->unit_address,
232 queue->msg_token, PAGE_SIZE); 232 queue->msg_token, PAGE_SIZE);
233 if (rc == H_Resource) 233 if (rc == H_RESOURCE)
234 /* maybe kexecing and resource is busy. try a reset */ 234 /* maybe kexecing and resource is busy. try a reset */
235 rc = ibmvscsi_reset_crq_queue(queue, 235 rc = ibmvscsi_reset_crq_queue(queue,
236 hostdata); 236 hostdata);
@@ -269,7 +269,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
269 req_irq_failed: 269 req_irq_failed:
270 do { 270 do {
271 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 271 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
272 } while ((rc == H_Busy) || (H_isLongBusy(rc))); 272 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
273 reg_crq_failed: 273 reg_crq_failed:
274 dma_unmap_single(hostdata->dev, 274 dma_unmap_single(hostdata->dev,
275 queue->msg_token, 275 queue->msg_token,
@@ -295,7 +295,7 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
295 /* Re-enable the CRQ */ 295 /* Re-enable the CRQ */
296 do { 296 do {
297 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 297 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
298 } while ((rc == H_InProgress) || (rc == H_Busy) || (H_isLongBusy(rc))); 298 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
299 299
300 if (rc) 300 if (rc)
301 printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc); 301 printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
@@ -317,7 +317,7 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
317 /* Close the CRQ */ 317 /* Close the CRQ */
318 do { 318 do {
319 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 319 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
320 } while ((rc == H_Busy) || (H_isLongBusy(rc))); 320 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
321 321
322 /* Clean out the queue */ 322 /* Clean out the queue */
323 memset(queue->msgs, 0x00, PAGE_SIZE); 323 memset(queue->msgs, 0x00, PAGE_SIZE);
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 21b0ed583b8a..e63c1ff1e102 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -278,7 +278,7 @@ static void ata_unpack_xfermask(unsigned int xfer_mask,
278} 278}
279 279
280static const struct ata_xfer_ent { 280static const struct ata_xfer_ent {
281 unsigned int shift, bits; 281 int shift, bits;
282 u8 base; 282 u8 base;
283} ata_xfer_tbl[] = { 283} ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, 284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
@@ -989,9 +989,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
989 qc->private_data = &wait; 989 qc->private_data = &wait;
990 qc->complete_fn = ata_qc_complete_internal; 990 qc->complete_fn = ata_qc_complete_internal;
991 991
992 qc->err_mask = ata_qc_issue(qc); 992 ata_qc_issue(qc);
993 if (qc->err_mask)
994 ata_qc_complete(qc);
995 993
996 spin_unlock_irqrestore(&ap->host_set->lock, flags); 994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
997 995
@@ -3997,15 +3995,14 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3997 * 3995 *
3998 * LOCKING: 3996 * LOCKING:
3999 * spin_lock_irqsave(host_set lock) 3997 * spin_lock_irqsave(host_set lock)
4000 *
4001 * RETURNS:
4002 * Zero on success, AC_ERR_* mask on failure
4003 */ 3998 */
4004 3999void ata_qc_issue(struct ata_queued_cmd *qc)
4005unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4006{ 4000{
4007 struct ata_port *ap = qc->ap; 4001 struct ata_port *ap = qc->ap;
4008 4002
4003 qc->ap->active_tag = qc->tag;
4004 qc->flags |= ATA_QCFLAG_ACTIVE;
4005
4009 if (ata_should_dma_map(qc)) { 4006 if (ata_should_dma_map(qc)) {
4010 if (qc->flags & ATA_QCFLAG_SG) { 4007 if (qc->flags & ATA_QCFLAG_SG) {
4011 if (ata_sg_setup(qc)) 4008 if (ata_sg_setup(qc))
@@ -4020,17 +4017,18 @@ unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4020 4017
4021 ap->ops->qc_prep(qc); 4018 ap->ops->qc_prep(qc);
4022 4019
4023 qc->ap->active_tag = qc->tag; 4020 qc->err_mask |= ap->ops->qc_issue(qc);
4024 qc->flags |= ATA_QCFLAG_ACTIVE; 4021 if (unlikely(qc->err_mask))
4025 4022 goto err;
4026 return ap->ops->qc_issue(qc); 4023 return;
4027 4024
4028sg_err: 4025sg_err:
4029 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4026 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4030 return AC_ERR_SYSTEM; 4027 qc->err_mask |= AC_ERR_SYSTEM;
4028err:
4029 ata_qc_complete(qc);
4031} 4030}
4032 4031
4033
4034/** 4032/**
4035 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner 4033 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4036 * @qc: command to issue to device 4034 * @qc: command to issue to device
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 628191bfd990..53f5b0d9161c 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1431,9 +1431,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
1431 goto early_finish; 1431 goto early_finish;
1432 1432
1433 /* select device, send command to hardware */ 1433 /* select device, send command to hardware */
1434 qc->err_mask = ata_qc_issue(qc); 1434 ata_qc_issue(qc);
1435 if (qc->err_mask)
1436 ata_qc_complete(qc);
1437 1435
1438 VPRINTK("EXIT\n"); 1436 VPRINTK("EXIT\n");
1439 return; 1437 return;
@@ -2199,9 +2197,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2199 2197
2200 qc->complete_fn = atapi_sense_complete; 2198 qc->complete_fn = atapi_sense_complete;
2201 2199
2202 qc->err_mask = ata_qc_issue(qc); 2200 ata_qc_issue(qc);
2203 if (qc->err_mask)
2204 ata_qc_complete(qc);
2205 2201
2206 DPRINTK("EXIT\n"); 2202 DPRINTK("EXIT\n");
2207} 2203}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 65f52beea884..1c755b14521a 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -47,7 +47,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_port_flush_task(struct ata_port *ap); 48extern void ata_port_flush_task(struct ata_port *ap);
49extern void ata_qc_free(struct ata_queued_cmd *qc); 49extern void ata_qc_free(struct ata_queued_cmd *qc);
50extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc); 50extern void ata_qc_issue(struct ata_queued_cmd *qc);
51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
52extern void ata_dev_select(struct ata_port *ap, unsigned int device, 52extern void ata_dev_select(struct ata_port *ap, unsigned int device,
53 unsigned int wait, unsigned int can_sleep); 53 unsigned int wait, unsigned int can_sleep);
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 5609847e254a..ee449b29fc82 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -89,29 +89,29 @@ MODULE_LICENSE("Dual MPL/GPL");
89/*====================================================================*/ 89/*====================================================================*/
90 90
91typedef struct scsi_info_t { 91typedef struct scsi_info_t {
92 dev_link_t link; 92 struct pcmcia_device *p_dev;
93 dev_node_t node; 93 dev_node_t node;
94 struct Scsi_Host *host; 94 struct Scsi_Host *host;
95} scsi_info_t; 95} scsi_info_t;
96 96
97static void aha152x_release_cs(dev_link_t *link); 97static void aha152x_release_cs(struct pcmcia_device *link);
98static void aha152x_detach(struct pcmcia_device *p_dev); 98static void aha152x_detach(struct pcmcia_device *p_dev);
99static void aha152x_config_cs(dev_link_t *link); 99static int aha152x_config_cs(struct pcmcia_device *link);
100 100
101static dev_link_t *dev_list; 101static struct pcmcia_device *dev_list;
102 102
103static int aha152x_attach(struct pcmcia_device *p_dev) 103static int aha152x_probe(struct pcmcia_device *link)
104{ 104{
105 scsi_info_t *info; 105 scsi_info_t *info;
106 dev_link_t *link; 106
107
108 DEBUG(0, "aha152x_attach()\n"); 107 DEBUG(0, "aha152x_attach()\n");
109 108
110 /* Create new SCSI device */ 109 /* Create new SCSI device */
111 info = kmalloc(sizeof(*info), GFP_KERNEL); 110 info = kmalloc(sizeof(*info), GFP_KERNEL);
112 if (!info) return -ENOMEM; 111 if (!info) return -ENOMEM;
113 memset(info, 0, sizeof(*info)); 112 memset(info, 0, sizeof(*info));
114 link = &info->link; link->priv = info; 113 info->p_dev = link;
114 link->priv = info;
115 115
116 link->io.NumPorts1 = 0x20; 116 link->io.NumPorts1 = 0x20;
117 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 117 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -119,41 +119,22 @@ static int aha152x_attach(struct pcmcia_device *p_dev)
119 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 119 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
120 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 120 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
121 link->conf.Attributes = CONF_ENABLE_IRQ; 121 link->conf.Attributes = CONF_ENABLE_IRQ;
122 link->conf.Vcc = 50;
123 link->conf.IntType = INT_MEMORY_AND_IO; 122 link->conf.IntType = INT_MEMORY_AND_IO;
124 link->conf.Present = PRESENT_OPTION; 123 link->conf.Present = PRESENT_OPTION;
125 124
126 link->handle = p_dev; 125 return aha152x_config_cs(link);
127 p_dev->instance = link;
128
129 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
130 aha152x_config_cs(link);
131
132 return 0;
133} /* aha152x_attach */ 126} /* aha152x_attach */
134 127
135/*====================================================================*/ 128/*====================================================================*/
136 129
137static void aha152x_detach(struct pcmcia_device *p_dev) 130static void aha152x_detach(struct pcmcia_device *link)
138{ 131{
139 dev_link_t *link = dev_to_instance(p_dev);
140 dev_link_t **linkp;
141
142 DEBUG(0, "aha152x_detach(0x%p)\n", link); 132 DEBUG(0, "aha152x_detach(0x%p)\n", link);
143
144 /* Locate device structure */
145 for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
146 if (*linkp == link) break;
147 if (*linkp == NULL)
148 return;
149 133
150 if (link->state & DEV_CONFIG) 134 aha152x_release_cs(link);
151 aha152x_release_cs(link);
152 135
153 /* Unlink device structure, free bits */ 136 /* Unlink device structure, free bits */
154 *linkp = link->next;
155 kfree(link->priv); 137 kfree(link->priv);
156
157} /* aha152x_detach */ 138} /* aha152x_detach */
158 139
159/*====================================================================*/ 140/*====================================================================*/
@@ -161,9 +142,8 @@ static void aha152x_detach(struct pcmcia_device *p_dev)
161#define CS_CHECK(fn, ret) \ 142#define CS_CHECK(fn, ret) \
162do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 143do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
163 144
164static void aha152x_config_cs(dev_link_t *link) 145static int aha152x_config_cs(struct pcmcia_device *link)
165{ 146{
166 client_handle_t handle = link->handle;
167 scsi_info_t *info = link->priv; 147 scsi_info_t *info = link->priv;
168 struct aha152x_setup s; 148 struct aha152x_setup s;
169 tuple_t tuple; 149 tuple_t tuple;
@@ -178,19 +158,16 @@ static void aha152x_config_cs(dev_link_t *link)
178 tuple.TupleData = tuple_data; 158 tuple.TupleData = tuple_data;
179 tuple.TupleDataMax = 64; 159 tuple.TupleDataMax = 64;
180 tuple.TupleOffset = 0; 160 tuple.TupleOffset = 0;
181 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 161 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
182 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 162 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
183 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 163 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
184 link->conf.ConfigBase = parse.config.base; 164 link->conf.ConfigBase = parse.config.base;
185 165
186 /* Configure card */
187 link->state |= DEV_CONFIG;
188
189 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 166 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
190 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 167 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
191 while (1) { 168 while (1) {
192 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 169 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
193 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 170 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
194 goto next_entry; 171 goto next_entry;
195 /* For New Media T&J, look for a SCSI window */ 172 /* For New Media T&J, look for a SCSI window */
196 if (parse.cftable_entry.io.win[0].len >= 0x20) 173 if (parse.cftable_entry.io.win[0].len >= 0x20)
@@ -201,15 +178,15 @@ static void aha152x_config_cs(dev_link_t *link)
201 if ((parse.cftable_entry.io.nwin > 0) && 178 if ((parse.cftable_entry.io.nwin > 0) &&
202 (link->io.BasePort1 < 0xffff)) { 179 (link->io.BasePort1 < 0xffff)) {
203 link->conf.ConfigIndex = parse.cftable_entry.index; 180 link->conf.ConfigIndex = parse.cftable_entry.index;
204 i = pcmcia_request_io(handle, &link->io); 181 i = pcmcia_request_io(link, &link->io);
205 if (i == CS_SUCCESS) break; 182 if (i == CS_SUCCESS) break;
206 } 183 }
207 next_entry: 184 next_entry:
208 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 185 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
209 } 186 }
210 187
211 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 188 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
212 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 189 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
213 190
214 /* Set configuration options for the aha152x driver */ 191 /* Set configuration options for the aha152x driver */
215 memset(&s, 0, sizeof(s)); 192 memset(&s, 0, sizeof(s));
@@ -231,53 +208,30 @@ static void aha152x_config_cs(dev_link_t *link)
231 } 208 }
232 209
233 sprintf(info->node.dev_name, "scsi%d", host->host_no); 210 sprintf(info->node.dev_name, "scsi%d", host->host_no);
234 link->dev = &info->node; 211 link->dev_node = &info->node;
235 info->host = host; 212 info->host = host;
236 213
237 link->state &= ~DEV_CONFIG_PENDING; 214 return 0;
238 return; 215
239
240cs_failed: 216cs_failed:
241 cs_error(link->handle, last_fn, last_ret); 217 cs_error(link, last_fn, last_ret);
242 aha152x_release_cs(link); 218 aha152x_release_cs(link);
243 return; 219 return -ENODEV;
244} 220}
245 221
246static void aha152x_release_cs(dev_link_t *link) 222static void aha152x_release_cs(struct pcmcia_device *link)
247{ 223{
248 scsi_info_t *info = link->priv; 224 scsi_info_t *info = link->priv;
249 225
250 aha152x_release(info->host); 226 aha152x_release(info->host);
251 link->dev = NULL; 227 pcmcia_disable_device(link);
252
253 pcmcia_release_configuration(link->handle);
254 pcmcia_release_io(link->handle, &link->io);
255 pcmcia_release_irq(link->handle, &link->irq);
256
257 link->state &= ~DEV_CONFIG;
258} 228}
259 229
260static int aha152x_suspend(struct pcmcia_device *dev) 230static int aha152x_resume(struct pcmcia_device *link)
261{ 231{
262 dev_link_t *link = dev_to_instance(dev);
263
264 link->state |= DEV_SUSPEND;
265 if (link->state & DEV_CONFIG)
266 pcmcia_release_configuration(link->handle);
267
268 return 0;
269}
270
271static int aha152x_resume(struct pcmcia_device *dev)
272{
273 dev_link_t *link = dev_to_instance(dev);
274 scsi_info_t *info = link->priv; 232 scsi_info_t *info = link->priv;
275 233
276 link->state &= ~DEV_SUSPEND; 234 aha152x_host_reset_host(info->host);
277 if (link->state & DEV_CONFIG) {
278 pcmcia_request_configuration(link->handle, &link->conf);
279 aha152x_host_reset_host(info->host);
280 }
281 235
282 return 0; 236 return 0;
283} 237}
@@ -297,10 +251,9 @@ static struct pcmcia_driver aha152x_cs_driver = {
297 .drv = { 251 .drv = {
298 .name = "aha152x_cs", 252 .name = "aha152x_cs",
299 }, 253 },
300 .probe = aha152x_attach, 254 .probe = aha152x_probe,
301 .remove = aha152x_detach, 255 .remove = aha152x_detach,
302 .id_table = aha152x_ids, 256 .id_table = aha152x_ids,
303 .suspend = aha152x_suspend,
304 .resume = aha152x_resume, 257 .resume = aha152x_resume,
305}; 258};
306 259
@@ -317,4 +270,3 @@ static void __exit exit_aha152x_cs(void)
317 270
318module_init(init_aha152x_cs); 271module_init(init_aha152x_cs);
319module_exit(exit_aha152x_cs); 272module_exit(exit_aha152x_cs);
320
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 788c58d805f3..85f7ffac19a0 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -73,57 +73,48 @@ static char *version =
73/*====================================================================*/ 73/*====================================================================*/
74 74
75typedef struct scsi_info_t { 75typedef struct scsi_info_t {
76 dev_link_t link; 76 struct pcmcia_device *p_dev;
77 dev_node_t node; 77 dev_node_t node;
78 struct Scsi_Host *host; 78 struct Scsi_Host *host;
79} scsi_info_t; 79} scsi_info_t;
80 80
81 81
82static void fdomain_release(dev_link_t *link); 82static void fdomain_release(struct pcmcia_device *link);
83static void fdomain_detach(struct pcmcia_device *p_dev); 83static void fdomain_detach(struct pcmcia_device *p_dev);
84static void fdomain_config(dev_link_t *link); 84static int fdomain_config(struct pcmcia_device *link);
85 85
86static int fdomain_attach(struct pcmcia_device *p_dev) 86static int fdomain_probe(struct pcmcia_device *link)
87{ 87{
88 scsi_info_t *info; 88 scsi_info_t *info;
89 dev_link_t *link; 89
90 90 DEBUG(0, "fdomain_attach()\n");
91 DEBUG(0, "fdomain_attach()\n"); 91
92 92 /* Create new SCSI device */
93 /* Create new SCSI device */ 93 info = kzalloc(sizeof(*info), GFP_KERNEL);
94 info = kmalloc(sizeof(*info), GFP_KERNEL); 94 if (!info)
95 if (!info) return -ENOMEM; 95 return -ENOMEM;
96 memset(info, 0, sizeof(*info)); 96
97 link = &info->link; link->priv = info; 97 info->p_dev = link;
98 link->io.NumPorts1 = 0x10; 98 link->priv = info;
99 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 99 link->io.NumPorts1 = 0x10;
100 link->io.IOAddrLines = 10; 100 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
101 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 101 link->io.IOAddrLines = 10;
102 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 102 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
103 link->conf.Attributes = CONF_ENABLE_IRQ; 103 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
104 link->conf.Vcc = 50; 104 link->conf.Attributes = CONF_ENABLE_IRQ;
105 link->conf.IntType = INT_MEMORY_AND_IO; 105 link->conf.IntType = INT_MEMORY_AND_IO;
106 link->conf.Present = PRESENT_OPTION; 106 link->conf.Present = PRESENT_OPTION;
107 107
108 link->handle = p_dev; 108 return fdomain_config(link);
109 p_dev->instance = link;
110
111 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
112 fdomain_config(link);
113
114 return 0;
115} /* fdomain_attach */ 109} /* fdomain_attach */
116 110
117/*====================================================================*/ 111/*====================================================================*/
118 112
119static void fdomain_detach(struct pcmcia_device *p_dev) 113static void fdomain_detach(struct pcmcia_device *link)
120{ 114{
121 dev_link_t *link = dev_to_instance(p_dev);
122
123 DEBUG(0, "fdomain_detach(0x%p)\n", link); 115 DEBUG(0, "fdomain_detach(0x%p)\n", link);
124 116
125 if (link->state & DEV_CONFIG) 117 fdomain_release(link);
126 fdomain_release(link);
127 118
128 kfree(link->priv); 119 kfree(link->priv);
129} /* fdomain_detach */ 120} /* fdomain_detach */
@@ -133,9 +124,8 @@ static void fdomain_detach(struct pcmcia_device *p_dev)
133#define CS_CHECK(fn, ret) \ 124#define CS_CHECK(fn, ret) \
134do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 125do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
135 126
136static void fdomain_config(dev_link_t *link) 127static int fdomain_config(struct pcmcia_device *link)
137{ 128{
138 client_handle_t handle = link->handle;
139 scsi_info_t *info = link->priv; 129 scsi_info_t *info = link->priv;
140 tuple_t tuple; 130 tuple_t tuple;
141 cisparse_t parse; 131 cisparse_t parse;
@@ -150,103 +140,75 @@ static void fdomain_config(dev_link_t *link)
150 tuple.TupleData = tuple_data; 140 tuple.TupleData = tuple_data;
151 tuple.TupleDataMax = 64; 141 tuple.TupleDataMax = 64;
152 tuple.TupleOffset = 0; 142 tuple.TupleOffset = 0;
153 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 143 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
154 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 144 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
155 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 145 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
156 link->conf.ConfigBase = parse.config.base; 146 link->conf.ConfigBase = parse.config.base;
157 147
158 /* Configure card */
159 link->state |= DEV_CONFIG;
160
161 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 148 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
162 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 149 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
163 while (1) { 150 while (1) {
164 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 151 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
165 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 152 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
166 goto next_entry; 153 goto next_entry;
167 link->conf.ConfigIndex = parse.cftable_entry.index; 154 link->conf.ConfigIndex = parse.cftable_entry.index;
168 link->io.BasePort1 = parse.cftable_entry.io.win[0].base; 155 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
169 i = pcmcia_request_io(handle, &link->io); 156 i = pcmcia_request_io(link, &link->io);
170 if (i == CS_SUCCESS) break; 157 if (i == CS_SUCCESS) break;
171 next_entry: 158 next_entry:
172 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 159 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
173 } 160 }
174 161
175 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 162 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
176 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 163 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
177 164
178 /* A bad hack... */ 165 /* A bad hack... */
179 release_region(link->io.BasePort1, link->io.NumPorts1); 166 release_region(link->io.BasePort1, link->io.NumPorts1);
180 167
181 /* Set configuration options for the fdomain driver */ 168 /* Set configuration options for the fdomain driver */
182 sprintf(str, "%d,%d", link->io.BasePort1, link->irq.AssignedIRQ); 169 sprintf(str, "%d,%d", link->io.BasePort1, link->irq.AssignedIRQ);
183 fdomain_setup(str); 170 fdomain_setup(str);
184 171
185 host = __fdomain_16x0_detect(&fdomain_driver_template); 172 host = __fdomain_16x0_detect(&fdomain_driver_template);
186 if (!host) { 173 if (!host) {
187 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n"); 174 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
188 goto cs_failed; 175 goto cs_failed;
189 } 176 }
190 177
191 scsi_add_host(host, NULL); /* XXX handle failure */ 178 if (scsi_add_host(host, NULL))
179 goto cs_failed;
192 scsi_scan_host(host); 180 scsi_scan_host(host);
193 181
194 sprintf(info->node.dev_name, "scsi%d", host->host_no); 182 sprintf(info->node.dev_name, "scsi%d", host->host_no);
195 link->dev = &info->node; 183 link->dev_node = &info->node;
196 info->host = host; 184 info->host = host;
197 185
198 link->state &= ~DEV_CONFIG_PENDING; 186 return 0;
199 return; 187
200
201cs_failed: 188cs_failed:
202 cs_error(link->handle, last_fn, last_ret); 189 cs_error(link, last_fn, last_ret);
203 fdomain_release(link); 190 fdomain_release(link);
204 return; 191 return -ENODEV;
205
206} /* fdomain_config */ 192} /* fdomain_config */
207 193
208/*====================================================================*/ 194/*====================================================================*/
209 195
210static void fdomain_release(dev_link_t *link) 196static void fdomain_release(struct pcmcia_device *link)
211{ 197{
212 scsi_info_t *info = link->priv; 198 scsi_info_t *info = link->priv;
213 199
214 DEBUG(0, "fdomain_release(0x%p)\n", link); 200 DEBUG(0, "fdomain_release(0x%p)\n", link);
215 201
216 scsi_remove_host(info->host); 202 scsi_remove_host(info->host);
217 link->dev = NULL; 203 pcmcia_disable_device(link);
218 204 scsi_unregister(info->host);
219 pcmcia_release_configuration(link->handle);
220 pcmcia_release_io(link->handle, &link->io);
221 pcmcia_release_irq(link->handle, &link->irq);
222
223 scsi_unregister(info->host);
224
225 link->state &= ~DEV_CONFIG;
226} 205}
227 206
228/*====================================================================*/ 207/*====================================================================*/
229 208
230static int fdomain_suspend(struct pcmcia_device *dev) 209static int fdomain_resume(struct pcmcia_device *link)
231{ 210{
232 dev_link_t *link = dev_to_instance(dev); 211 fdomain_16x0_bus_reset(NULL);
233
234 link->state |= DEV_SUSPEND;
235 if (link->state & DEV_CONFIG)
236 pcmcia_release_configuration(link->handle);
237
238 return 0;
239}
240
241static int fdomain_resume(struct pcmcia_device *dev)
242{
243 dev_link_t *link = dev_to_instance(dev);
244
245 link->state &= ~DEV_SUSPEND;
246 if (link->state & DEV_CONFIG) {
247 pcmcia_request_configuration(link->handle, &link->conf);
248 fdomain_16x0_bus_reset(NULL);
249 }
250 212
251 return 0; 213 return 0;
252} 214}
@@ -264,10 +226,9 @@ static struct pcmcia_driver fdomain_cs_driver = {
264 .drv = { 226 .drv = {
265 .name = "fdomain_cs", 227 .name = "fdomain_cs",
266 }, 228 },
267 .probe = fdomain_attach, 229 .probe = fdomain_probe,
268 .remove = fdomain_detach, 230 .remove = fdomain_detach,
269 .id_table = fdomain_ids, 231 .id_table = fdomain_ids,
270 .suspend = fdomain_suspend,
271 .resume = fdomain_resume, 232 .resume = fdomain_resume,
272}; 233};
273 234
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 9e3ab3fd5355..231f9c311c69 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1593,11 +1593,11 @@ static int nsp_eh_host_reset(Scsi_Cmnd *SCpnt)
1593 configure the card at this point -- we wait until we receive a 1593 configure the card at this point -- we wait until we receive a
1594 card insertion event. 1594 card insertion event.
1595======================================================================*/ 1595======================================================================*/
1596static int nsp_cs_attach(struct pcmcia_device *p_dev) 1596static int nsp_cs_probe(struct pcmcia_device *link)
1597{ 1597{
1598 scsi_info_t *info; 1598 scsi_info_t *info;
1599 dev_link_t *link;
1600 nsp_hw_data *data = &nsp_data_base; 1599 nsp_hw_data *data = &nsp_data_base;
1600 int ret;
1601 1601
1602 nsp_dbg(NSP_DEBUG_INIT, "in"); 1602 nsp_dbg(NSP_DEBUG_INIT, "in");
1603 1603
@@ -1605,7 +1605,7 @@ static int nsp_cs_attach(struct pcmcia_device *p_dev)
1605 info = kmalloc(sizeof(*info), GFP_KERNEL); 1605 info = kmalloc(sizeof(*info), GFP_KERNEL);
1606 if (info == NULL) { return -ENOMEM; } 1606 if (info == NULL) { return -ENOMEM; }
1607 memset(info, 0, sizeof(*info)); 1607 memset(info, 0, sizeof(*info));
1608 link = &info->link; 1608 info->p_dev = link;
1609 link->priv = info; 1609 link->priv = info;
1610 data->ScsiInfo = info; 1610 data->ScsiInfo = info;
1611 1611
@@ -1627,18 +1627,13 @@ static int nsp_cs_attach(struct pcmcia_device *p_dev)
1627 1627
1628 /* General socket configuration */ 1628 /* General socket configuration */
1629 link->conf.Attributes = CONF_ENABLE_IRQ; 1629 link->conf.Attributes = CONF_ENABLE_IRQ;
1630 link->conf.Vcc = 50;
1631 link->conf.IntType = INT_MEMORY_AND_IO; 1630 link->conf.IntType = INT_MEMORY_AND_IO;
1632 link->conf.Present = PRESENT_OPTION; 1631 link->conf.Present = PRESENT_OPTION;
1633 1632
1634 link->handle = p_dev; 1633 ret = nsp_cs_config(link);
1635 p_dev->instance = link;
1636
1637 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
1638 nsp_cs_config(link);
1639 1634
1640 nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); 1635 nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link);
1641 return 0; 1636 return ret;
1642} /* nsp_cs_attach */ 1637} /* nsp_cs_attach */
1643 1638
1644 1639
@@ -1648,16 +1643,12 @@ static int nsp_cs_attach(struct pcmcia_device *p_dev)
1648 structures are freed. Otherwise, the structures will be freed 1643 structures are freed. Otherwise, the structures will be freed
1649 when the device is released. 1644 when the device is released.
1650======================================================================*/ 1645======================================================================*/
1651static void nsp_cs_detach(struct pcmcia_device *p_dev) 1646static void nsp_cs_detach(struct pcmcia_device *link)
1652{ 1647{
1653 dev_link_t *link = dev_to_instance(p_dev);
1654
1655 nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link); 1648 nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link);
1656 1649
1657 if (link->state & DEV_CONFIG) { 1650 ((scsi_info_t *)link->priv)->stop = 1;
1658 ((scsi_info_t *)link->priv)->stop = 1; 1651 nsp_cs_release(link);
1659 nsp_cs_release(link);
1660 }
1661 1652
1662 kfree(link->priv); 1653 kfree(link->priv);
1663 link->priv = NULL; 1654 link->priv = NULL;
@@ -1672,9 +1663,9 @@ static void nsp_cs_detach(struct pcmcia_device *p_dev)
1672#define CS_CHECK(fn, ret) \ 1663#define CS_CHECK(fn, ret) \
1673do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 1664do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
1674/*====================================================================*/ 1665/*====================================================================*/
1675static void nsp_cs_config(dev_link_t *link) 1666static int nsp_cs_config(struct pcmcia_device *link)
1676{ 1667{
1677 client_handle_t handle = link->handle; 1668 int ret;
1678 scsi_info_t *info = link->priv; 1669 scsi_info_t *info = link->priv;
1679 tuple_t tuple; 1670 tuple_t tuple;
1680 cisparse_t parse; 1671 cisparse_t parse;
@@ -1698,26 +1689,22 @@ static void nsp_cs_config(dev_link_t *link)
1698 tuple.TupleData = tuple_data; 1689 tuple.TupleData = tuple_data;
1699 tuple.TupleDataMax = sizeof(tuple_data); 1690 tuple.TupleDataMax = sizeof(tuple_data);
1700 tuple.TupleOffset = 0; 1691 tuple.TupleOffset = 0;
1701 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 1692 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
1702 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 1693 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
1703 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 1694 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
1704 link->conf.ConfigBase = parse.config.base; 1695 link->conf.ConfigBase = parse.config.base;
1705 link->conf.Present = parse.config.rmask[0]; 1696 link->conf.Present = parse.config.rmask[0];
1706 1697
1707 /* Configure card */
1708 link->state |= DEV_CONFIG;
1709
1710 /* Look up the current Vcc */ 1698 /* Look up the current Vcc */
1711 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf)); 1699 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
1712 link->conf.Vcc = conf.Vcc;
1713 1700
1714 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 1701 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
1715 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 1702 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
1716 while (1) { 1703 while (1) {
1717 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 1704 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
1718 1705
1719 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 1706 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
1720 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 1707 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
1721 goto next_entry; 1708 goto next_entry;
1722 1709
1723 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) { dflt = *cfg; } 1710 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) { dflt = *cfg; }
@@ -1743,10 +1730,10 @@ static void nsp_cs_config(dev_link_t *link)
1743 } 1730 }
1744 1731
1745 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) { 1732 if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) {
1746 link->conf.Vpp1 = link->conf.Vpp2 = 1733 link->conf.Vpp =
1747 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; 1734 cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
1748 } else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) { 1735 } else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM)) {
1749 link->conf.Vpp1 = link->conf.Vpp2 = 1736 link->conf.Vpp =
1750 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000; 1737 dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
1751 } 1738 }
1752 1739
@@ -1773,7 +1760,7 @@ static void nsp_cs_config(dev_link_t *link)
1773 link->io.NumPorts2 = io->win[1].len; 1760 link->io.NumPorts2 = io->win[1].len;
1774 } 1761 }
1775 /* This reserves IO space but doesn't actually enable it */ 1762 /* This reserves IO space but doesn't actually enable it */
1776 if (pcmcia_request_io(link->handle, &link->io) != 0) 1763 if (pcmcia_request_io(link, &link->io) != 0)
1777 goto next_entry; 1764 goto next_entry;
1778 } 1765 }
1779 1766
@@ -1788,7 +1775,7 @@ static void nsp_cs_config(dev_link_t *link)
1788 req.Size = 0x1000; 1775 req.Size = 0x1000;
1789 } 1776 }
1790 req.AccessSpeed = 0; 1777 req.AccessSpeed = 0;
1791 if (pcmcia_request_window(&link->handle, &req, &link->win) != 0) 1778 if (pcmcia_request_window(&link, &req, &link->win) != 0)
1792 goto next_entry; 1779 goto next_entry;
1793 map.Page = 0; map.CardOffset = mem->win[0].card_addr; 1780 map.Page = 0; map.CardOffset = mem->win[0].card_addr;
1794 if (pcmcia_map_mem_page(link->win, &map) != 0) 1781 if (pcmcia_map_mem_page(link->win, &map) != 0)
@@ -1802,17 +1789,14 @@ static void nsp_cs_config(dev_link_t *link)
1802 1789
1803 next_entry: 1790 next_entry:
1804 nsp_dbg(NSP_DEBUG_INIT, "next"); 1791 nsp_dbg(NSP_DEBUG_INIT, "next");
1805 1792 pcmcia_disable_device(link);
1806 if (link->io.NumPorts1) { 1793 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
1807 pcmcia_release_io(link->handle, &link->io);
1808 }
1809 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
1810 } 1794 }
1811 1795
1812 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1796 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
1813 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq)); 1797 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
1814 } 1798 }
1815 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 1799 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
1816 1800
1817 if (free_ports) { 1801 if (free_ports) {
1818 if (link->io.BasePort1) { 1802 if (link->io.BasePort1) {
@@ -1854,16 +1838,19 @@ static void nsp_cs_config(dev_link_t *link)
1854 1838
1855 1839
1856#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,74)) 1840#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,74))
1857 scsi_add_host (host, NULL); 1841 ret = scsi_add_host (host, NULL);
1842 if (ret)
1843 goto cs_failed;
1844
1858 scsi_scan_host(host); 1845 scsi_scan_host(host);
1859 1846
1860 snprintf(info->node.dev_name, sizeof(info->node.dev_name), "scsi%d", host->host_no); 1847 snprintf(info->node.dev_name, sizeof(info->node.dev_name), "scsi%d", host->host_no);
1861 link->dev = &info->node; 1848 link->dev_node = &info->node;
1862 info->host = host; 1849 info->host = host;
1863 1850
1864#else 1851#else
1865 nsp_dbg(NSP_DEBUG_INIT, "GET_SCSI_INFO"); 1852 nsp_dbg(NSP_DEBUG_INIT, "GET_SCSI_INFO");
1866 tail = &link->dev; 1853 tail = &link->dev_node;
1867 info->ndev = 0; 1854 info->ndev = 0;
1868 1855
1869 nsp_dbg(NSP_DEBUG_INIT, "host=0x%p", host); 1856 nsp_dbg(NSP_DEBUG_INIT, "host=0x%p", host);
@@ -1908,11 +1895,10 @@ static void nsp_cs_config(dev_link_t *link)
1908#endif 1895#endif
1909 1896
1910 /* Finally, report what we've done */ 1897 /* Finally, report what we've done */
1911 printk(KERN_INFO "nsp_cs: index 0x%02x: Vcc %d.%d", 1898 printk(KERN_INFO "nsp_cs: index 0x%02x: ",
1912 link->conf.ConfigIndex, 1899 link->conf.ConfigIndex);
1913 link->conf.Vcc/10, link->conf.Vcc%10); 1900 if (link->conf.Vpp) {
1914 if (link->conf.Vpp1) { 1901 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
1915 printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
1916 } 1902 }
1917 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1903 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
1918 printk(", irq %d", link->irq.AssignedIRQ); 1904 printk(", irq %d", link->irq.AssignedIRQ);
@@ -1929,15 +1915,14 @@ static void nsp_cs_config(dev_link_t *link)
1929 req.Base+req.Size-1); 1915 req.Base+req.Size-1);
1930 printk("\n"); 1916 printk("\n");
1931 1917
1932 link->state &= ~DEV_CONFIG_PENDING; 1918 return 0;
1933 return;
1934 1919
1935 cs_failed: 1920 cs_failed:
1936 nsp_dbg(NSP_DEBUG_INIT, "config fail"); 1921 nsp_dbg(NSP_DEBUG_INIT, "config fail");
1937 cs_error(link->handle, last_fn, last_ret); 1922 cs_error(link, last_fn, last_ret);
1938 nsp_cs_release(link); 1923 nsp_cs_release(link);
1939 1924
1940 return; 1925 return -ENODEV;
1941} /* nsp_cs_config */ 1926} /* nsp_cs_config */
1942#undef CS_CHECK 1927#undef CS_CHECK
1943 1928
@@ -1947,7 +1932,7 @@ static void nsp_cs_config(dev_link_t *link)
1947 device, and release the PCMCIA configuration. If the device is 1932 device, and release the PCMCIA configuration. If the device is
1948 still open, this will be postponed until it is closed. 1933 still open, this will be postponed until it is closed.
1949======================================================================*/ 1934======================================================================*/
1950static void nsp_cs_release(dev_link_t *link) 1935static void nsp_cs_release(struct pcmcia_device *link)
1951{ 1936{
1952 scsi_info_t *info = link->priv; 1937 scsi_info_t *info = link->priv;
1953 nsp_hw_data *data = NULL; 1938 nsp_hw_data *data = NULL;
@@ -1968,22 +1953,15 @@ static void nsp_cs_release(dev_link_t *link)
1968#else 1953#else
1969 scsi_unregister_host(&nsp_driver_template); 1954 scsi_unregister_host(&nsp_driver_template);
1970#endif 1955#endif
1971 link->dev = NULL; 1956 link->dev_node = NULL;
1972 1957
1973 if (link->win) { 1958 if (link->win) {
1974 if (data != NULL) { 1959 if (data != NULL) {
1975 iounmap((void *)(data->MmioAddress)); 1960 iounmap((void *)(data->MmioAddress));
1976 } 1961 }
1977 pcmcia_release_window(link->win);
1978 }
1979 pcmcia_release_configuration(link->handle);
1980 if (link->io.NumPorts1) {
1981 pcmcia_release_io(link->handle, &link->io);
1982 } 1962 }
1983 if (link->irq.AssignedIRQ) { 1963 pcmcia_disable_device(link);
1984 pcmcia_release_irq(link->handle, &link->irq); 1964
1985 }
1986 link->state &= ~DEV_CONFIG;
1987#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,2)) 1965#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,2))
1988 if (info->host != NULL) { 1966 if (info->host != NULL) {
1989 scsi_host_put(info->host); 1967 scsi_host_put(info->host);
@@ -1991,14 +1969,11 @@ static void nsp_cs_release(dev_link_t *link)
1991#endif 1969#endif
1992} /* nsp_cs_release */ 1970} /* nsp_cs_release */
1993 1971
1994static int nsp_cs_suspend(struct pcmcia_device *dev) 1972static int nsp_cs_suspend(struct pcmcia_device *link)
1995{ 1973{
1996 dev_link_t *link = dev_to_instance(dev);
1997 scsi_info_t *info = link->priv; 1974 scsi_info_t *info = link->priv;
1998 nsp_hw_data *data; 1975 nsp_hw_data *data;
1999 1976
2000 link->state |= DEV_SUSPEND;
2001
2002 nsp_dbg(NSP_DEBUG_INIT, "event: suspend"); 1977 nsp_dbg(NSP_DEBUG_INIT, "event: suspend");
2003 1978
2004 if (info->host != NULL) { 1979 if (info->host != NULL) {
@@ -2011,25 +1986,16 @@ static int nsp_cs_suspend(struct pcmcia_device *dev)
2011 1986
2012 info->stop = 1; 1987 info->stop = 1;
2013 1988
2014 if (link->state & DEV_CONFIG)
2015 pcmcia_release_configuration(link->handle);
2016
2017 return 0; 1989 return 0;
2018} 1990}
2019 1991
2020static int nsp_cs_resume(struct pcmcia_device *dev) 1992static int nsp_cs_resume(struct pcmcia_device *link)
2021{ 1993{
2022 dev_link_t *link = dev_to_instance(dev);
2023 scsi_info_t *info = link->priv; 1994 scsi_info_t *info = link->priv;
2024 nsp_hw_data *data; 1995 nsp_hw_data *data;
2025 1996
2026 nsp_dbg(NSP_DEBUG_INIT, "event: resume"); 1997 nsp_dbg(NSP_DEBUG_INIT, "event: resume");
2027 1998
2028 link->state &= ~DEV_SUSPEND;
2029
2030 if (link->state & DEV_CONFIG)
2031 pcmcia_request_configuration(link->handle, &link->conf);
2032
2033 info->stop = 0; 1999 info->stop = 0;
2034 2000
2035 if (info->host != NULL) { 2001 if (info->host != NULL) {
@@ -2065,7 +2031,7 @@ static struct pcmcia_driver nsp_driver = {
2065 .drv = { 2031 .drv = {
2066 .name = "nsp_cs", 2032 .name = "nsp_cs",
2067 }, 2033 },
2068 .probe = nsp_cs_attach, 2034 .probe = nsp_cs_probe,
2069 .remove = nsp_cs_detach, 2035 .remove = nsp_cs_detach,
2070 .id_table = nsp_cs_ids, 2036 .id_table = nsp_cs_ids,
2071 .suspend = nsp_cs_suspend, 2037 .suspend = nsp_cs_suspend,
@@ -2098,19 +2064,7 @@ static int __init nsp_cs_init(void)
2098static void __exit nsp_cs_exit(void) 2064static void __exit nsp_cs_exit(void)
2099{ 2065{
2100 nsp_msg(KERN_INFO, "unloading..."); 2066 nsp_msg(KERN_INFO, "unloading...");
2101
2102#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,68))
2103 pcmcia_unregister_driver(&nsp_driver); 2067 pcmcia_unregister_driver(&nsp_driver);
2104#else
2105 unregister_pcmcia_driver(&dev_info);
2106 /* XXX: this really needs to move into generic code.. */
2107 while (dev_list != NULL) {
2108 if (dev_list->state & DEV_CONFIG) {
2109 nsp_cs_release(dev_list);
2110 }
2111 nsp_cs_detach(dev_list);
2112 }
2113#endif
2114} 2068}
2115 2069
2116 2070
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index b66b140a745e..8908b8e5b78a 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -225,7 +225,7 @@
225/*====================================================================*/ 225/*====================================================================*/
226 226
227typedef struct scsi_info_t { 227typedef struct scsi_info_t {
228 dev_link_t link; 228 struct pcmcia_device *p_dev;
229 struct Scsi_Host *host; 229 struct Scsi_Host *host;
230#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,74)) 230#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,74))
231 dev_node_t node; 231 dev_node_t node;
@@ -297,8 +297,8 @@ typedef struct _nsp_hw_data {
297 297
298/* Card service functions */ 298/* Card service functions */
299static void nsp_cs_detach (struct pcmcia_device *p_dev); 299static void nsp_cs_detach (struct pcmcia_device *p_dev);
300static void nsp_cs_release(dev_link_t *link); 300static void nsp_cs_release(struct pcmcia_device *link);
301static void nsp_cs_config (dev_link_t *link); 301static int nsp_cs_config (struct pcmcia_device *link);
302 302
303/* Linux SCSI subsystem specific functions */ 303/* Linux SCSI subsystem specific functions */
304static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht); 304static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht);
@@ -450,7 +450,7 @@ static inline struct Scsi_Host *scsi_host_hn_get(unsigned short hostno)
450 return host; 450 return host;
451} 451}
452 452
453static void cs_error(client_handle_t handle, int func, int ret) 453static void cs_error(struct pcmcia_device *handle, int func, int ret)
454{ 454{
455 error_info_t err = { func, ret }; 455 error_info_t err = { func, ret };
456 pcmcia_report_error(handle, &err); 456 pcmcia_report_error(handle, &err);
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index dce7e687fd4a..86c2ac6ae623 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -91,18 +91,18 @@ static struct scsi_host_template qlogicfas_driver_template = {
91/*====================================================================*/ 91/*====================================================================*/
92 92
93typedef struct scsi_info_t { 93typedef struct scsi_info_t {
94 dev_link_t link; 94 struct pcmcia_device *p_dev;
95 dev_node_t node; 95 dev_node_t node;
96 struct Scsi_Host *host; 96 struct Scsi_Host *host;
97 unsigned short manf_id; 97 unsigned short manf_id;
98} scsi_info_t; 98} scsi_info_t;
99 99
100static void qlogic_release(dev_link_t *link); 100static void qlogic_release(struct pcmcia_device *link);
101static void qlogic_detach(struct pcmcia_device *p_dev); 101static void qlogic_detach(struct pcmcia_device *p_dev);
102static void qlogic_config(dev_link_t * link); 102static int qlogic_config(struct pcmcia_device * link);
103 103
104static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host, 104static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host,
105 dev_link_t *link, int qbase, int qlirq) 105 struct pcmcia_device *link, int qbase, int qlirq)
106{ 106{
107 int qltyp; /* type of chip */ 107 int qltyp; /* type of chip */
108 int qinitid; 108 int qinitid;
@@ -156,10 +156,9 @@ free_scsi_host:
156err: 156err:
157 return NULL; 157 return NULL;
158} 158}
159static int qlogic_attach(struct pcmcia_device *p_dev) 159static int qlogic_probe(struct pcmcia_device *link)
160{ 160{
161 scsi_info_t *info; 161 scsi_info_t *info;
162 dev_link_t *link;
163 162
164 DEBUG(0, "qlogic_attach()\n"); 163 DEBUG(0, "qlogic_attach()\n");
165 164
@@ -168,7 +167,7 @@ static int qlogic_attach(struct pcmcia_device *p_dev)
168 if (!info) 167 if (!info)
169 return -ENOMEM; 168 return -ENOMEM;
170 memset(info, 0, sizeof(*info)); 169 memset(info, 0, sizeof(*info));
171 link = &info->link; 170 info->p_dev = link;
172 link->priv = info; 171 link->priv = info;
173 link->io.NumPorts1 = 16; 172 link->io.NumPorts1 = 16;
174 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 173 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -176,30 +175,19 @@ static int qlogic_attach(struct pcmcia_device *p_dev)
176 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 175 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
177 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 176 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
178 link->conf.Attributes = CONF_ENABLE_IRQ; 177 link->conf.Attributes = CONF_ENABLE_IRQ;
179 link->conf.Vcc = 50;
180 link->conf.IntType = INT_MEMORY_AND_IO; 178 link->conf.IntType = INT_MEMORY_AND_IO;
181 link->conf.Present = PRESENT_OPTION; 179 link->conf.Present = PRESENT_OPTION;
182 180
183 link->handle = p_dev; 181 return qlogic_config(link);
184 p_dev->instance = link;
185
186 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
187 qlogic_config(link);
188
189 return 0;
190} /* qlogic_attach */ 182} /* qlogic_attach */
191 183
192/*====================================================================*/ 184/*====================================================================*/
193 185
194static void qlogic_detach(struct pcmcia_device *p_dev) 186static void qlogic_detach(struct pcmcia_device *link)
195{ 187{
196 dev_link_t *link = dev_to_instance(p_dev);
197
198 DEBUG(0, "qlogic_detach(0x%p)\n", link); 188 DEBUG(0, "qlogic_detach(0x%p)\n", link);
199 189
200 if (link->state & DEV_CONFIG) 190 qlogic_release(link);
201 qlogic_release(link);
202
203 kfree(link->priv); 191 kfree(link->priv);
204 192
205} /* qlogic_detach */ 193} /* qlogic_detach */
@@ -209,9 +197,8 @@ static void qlogic_detach(struct pcmcia_device *p_dev)
209#define CS_CHECK(fn, ret) \ 197#define CS_CHECK(fn, ret) \
210do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 198do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
211 199
212static void qlogic_config(dev_link_t * link) 200static int qlogic_config(struct pcmcia_device * link)
213{ 201{
214 client_handle_t handle = link->handle;
215 scsi_info_t *info = link->priv; 202 scsi_info_t *info = link->priv;
216 tuple_t tuple; 203 tuple_t tuple;
217 cisparse_t parse; 204 cisparse_t parse;
@@ -225,38 +212,35 @@ static void qlogic_config(dev_link_t * link)
225 tuple.TupleDataMax = 64; 212 tuple.TupleDataMax = 64;
226 tuple.TupleOffset = 0; 213 tuple.TupleOffset = 0;
227 tuple.DesiredTuple = CISTPL_CONFIG; 214 tuple.DesiredTuple = CISTPL_CONFIG;
228 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 215 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
229 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 216 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
230 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 217 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
231 link->conf.ConfigBase = parse.config.base; 218 link->conf.ConfigBase = parse.config.base;
232 219
233 tuple.DesiredTuple = CISTPL_MANFID; 220 tuple.DesiredTuple = CISTPL_MANFID;
234 if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) 221 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
235 info->manf_id = le16_to_cpu(tuple.TupleData[0]); 222 info->manf_id = le16_to_cpu(tuple.TupleData[0]);
236 223
237 /* Configure card */
238 link->state |= DEV_CONFIG;
239
240 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 224 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
241 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 225 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
242 while (1) { 226 while (1) {
243 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 227 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
244 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 228 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
245 goto next_entry; 229 goto next_entry;
246 link->conf.ConfigIndex = parse.cftable_entry.index; 230 link->conf.ConfigIndex = parse.cftable_entry.index;
247 link->io.BasePort1 = parse.cftable_entry.io.win[0].base; 231 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
248 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len; 232 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
249 if (link->io.BasePort1 != 0) { 233 if (link->io.BasePort1 != 0) {
250 i = pcmcia_request_io(handle, &link->io); 234 i = pcmcia_request_io(link, &link->io);
251 if (i == CS_SUCCESS) 235 if (i == CS_SUCCESS)
252 break; 236 break;
253 } 237 }
254 next_entry: 238 next_entry:
255 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 239 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
256 } 240 }
257 241
258 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 242 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
259 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 243 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
260 244
261 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { 245 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) {
262 /* set ATAcmd */ 246 /* set ATAcmd */
@@ -275,82 +259,54 @@ static void qlogic_config(dev_link_t * link)
275 259
276 if (!host) { 260 if (!host) {
277 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); 261 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
278 goto out; 262 goto cs_failed;
279 } 263 }
280 264
281 sprintf(info->node.dev_name, "scsi%d", host->host_no); 265 sprintf(info->node.dev_name, "scsi%d", host->host_no);
282 link->dev = &info->node; 266 link->dev_node = &info->node;
283 info->host = host; 267 info->host = host;
284 268
285out: 269 return 0;
286 link->state &= ~DEV_CONFIG_PENDING;
287 return;
288 270
289cs_failed: 271cs_failed:
290 cs_error(link->handle, last_fn, last_ret); 272 cs_error(link, last_fn, last_ret);
291 link->dev = NULL; 273 pcmcia_disable_device(link);
292 pcmcia_release_configuration(link->handle); 274 return -ENODEV;
293 pcmcia_release_io(link->handle, &link->io);
294 pcmcia_release_irq(link->handle, &link->irq);
295 link->state &= ~DEV_CONFIG;
296 return;
297 275
298} /* qlogic_config */ 276} /* qlogic_config */
299 277
300/*====================================================================*/ 278/*====================================================================*/
301 279
302static void qlogic_release(dev_link_t *link) 280static void qlogic_release(struct pcmcia_device *link)
303{ 281{
304 scsi_info_t *info = link->priv; 282 scsi_info_t *info = link->priv;
305 283
306 DEBUG(0, "qlogic_release(0x%p)\n", link); 284 DEBUG(0, "qlogic_release(0x%p)\n", link);
307 285
308 scsi_remove_host(info->host); 286 scsi_remove_host(info->host);
309 link->dev = NULL;
310 287
311 free_irq(link->irq.AssignedIRQ, info->host); 288 free_irq(link->irq.AssignedIRQ, info->host);
312 289 pcmcia_disable_device(link);
313 pcmcia_release_configuration(link->handle);
314 pcmcia_release_io(link->handle, &link->io);
315 pcmcia_release_irq(link->handle, &link->irq);
316 290
317 scsi_host_put(info->host); 291 scsi_host_put(info->host);
318
319 link->state &= ~DEV_CONFIG;
320} 292}
321 293
322/*====================================================================*/ 294/*====================================================================*/
323 295
324static int qlogic_suspend(struct pcmcia_device *dev) 296static int qlogic_resume(struct pcmcia_device *link)
325{ 297{
326 dev_link_t *link = dev_to_instance(dev); 298 scsi_info_t *info = link->priv;
327
328 link->state |= DEV_SUSPEND;
329 if (link->state & DEV_CONFIG)
330 pcmcia_release_configuration(link->handle);
331
332 return 0;
333}
334 299
335static int qlogic_resume(struct pcmcia_device *dev) 300 pcmcia_request_configuration(link, &link->conf);
336{ 301 if ((info->manf_id == MANFID_MACNICA) ||
337 dev_link_t *link = dev_to_instance(dev); 302 (info->manf_id == MANFID_PIONEER) ||
338 303 (info->manf_id == 0x0098)) {
339 link->state &= ~DEV_SUSPEND; 304 outb(0x80, link->io.BasePort1 + 0xd);
340 if (link->state & DEV_CONFIG) { 305 outb(0x24, link->io.BasePort1 + 0x9);
341 scsi_info_t *info = link->priv; 306 outb(0x04, link->io.BasePort1 + 0xd);
342
343 pcmcia_request_configuration(link->handle, &link->conf);
344 if ((info->manf_id == MANFID_MACNICA) ||
345 (info->manf_id == MANFID_PIONEER) ||
346 (info->manf_id == 0x0098)) {
347 outb(0x80, link->io.BasePort1 + 0xd);
348 outb(0x24, link->io.BasePort1 + 0x9);
349 outb(0x04, link->io.BasePort1 + 0xd);
350 }
351 /* Ugggglllyyyy!!! */
352 qlogicfas408_bus_reset(NULL);
353 } 307 }
308 /* Ugggglllyyyy!!! */
309 qlogicfas408_bus_reset(NULL);
354 310
355 return 0; 311 return 0;
356} 312}
@@ -382,10 +338,9 @@ static struct pcmcia_driver qlogic_cs_driver = {
382 .drv = { 338 .drv = {
383 .name = "qlogic_cs", 339 .name = "qlogic_cs",
384 }, 340 },
385 .probe = qlogic_attach, 341 .probe = qlogic_probe,
386 .remove = qlogic_detach, 342 .remove = qlogic_detach,
387 .id_table = qlogic_ids, 343 .id_table = qlogic_ids,
388 .suspend = qlogic_suspend,
389 .resume = qlogic_resume, 344 .resume = qlogic_resume,
390}; 345};
391 346
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 3a4dd6f5b81f..9f59827707f0 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -202,7 +202,7 @@ static char *version =
202/* ================================================================== */ 202/* ================================================================== */
203 203
204struct scsi_info_t { 204struct scsi_info_t {
205 dev_link_t link; 205 struct pcmcia_device *p_dev;
206 dev_node_t node; 206 dev_node_t node;
207 struct Scsi_Host *host; 207 struct Scsi_Host *host;
208 unsigned short manf_id; 208 unsigned short manf_id;
@@ -527,7 +527,7 @@ idle_out:
527} 527}
528 528
529static void 529static void
530SYM53C500_release(dev_link_t *link) 530SYM53C500_release(struct pcmcia_device *link)
531{ 531{
532 struct scsi_info_t *info = link->priv; 532 struct scsi_info_t *info = link->priv;
533 struct Scsi_Host *shost = info->host; 533 struct Scsi_Host *shost = info->host;
@@ -550,13 +550,7 @@ SYM53C500_release(dev_link_t *link)
550 if (shost->io_port && shost->n_io_port) 550 if (shost->io_port && shost->n_io_port)
551 release_region(shost->io_port, shost->n_io_port); 551 release_region(shost->io_port, shost->n_io_port);
552 552
553 link->dev = NULL; 553 pcmcia_disable_device(link);
554
555 pcmcia_release_configuration(link->handle);
556 pcmcia_release_io(link->handle, &link->io);
557 pcmcia_release_irq(link->handle, &link->irq);
558
559 link->state &= ~DEV_CONFIG;
560 554
561 scsi_host_put(shost); 555 scsi_host_put(shost);
562} /* SYM53C500_release */ 556} /* SYM53C500_release */
@@ -713,10 +707,9 @@ static struct scsi_host_template sym53c500_driver_template = {
713#define CS_CHECK(fn, ret) \ 707#define CS_CHECK(fn, ret) \
714do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 708do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
715 709
716static void 710static int
717SYM53C500_config(dev_link_t *link) 711SYM53C500_config(struct pcmcia_device *link)
718{ 712{
719 client_handle_t handle = link->handle;
720 struct scsi_info_t *info = link->priv; 713 struct scsi_info_t *info = link->priv;
721 tuple_t tuple; 714 tuple_t tuple;
722 cisparse_t parse; 715 cisparse_t parse;
@@ -733,40 +726,37 @@ SYM53C500_config(dev_link_t *link)
733 tuple.TupleDataMax = 64; 726 tuple.TupleDataMax = 64;
734 tuple.TupleOffset = 0; 727 tuple.TupleOffset = 0;
735 tuple.DesiredTuple = CISTPL_CONFIG; 728 tuple.DesiredTuple = CISTPL_CONFIG;
736 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 729 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
737 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 730 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
738 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 731 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
739 link->conf.ConfigBase = parse.config.base; 732 link->conf.ConfigBase = parse.config.base;
740 733
741 tuple.DesiredTuple = CISTPL_MANFID; 734 tuple.DesiredTuple = CISTPL_MANFID;
742 if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) && 735 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
743 (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) 736 (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
744 info->manf_id = le16_to_cpu(tuple.TupleData[0]); 737 info->manf_id = le16_to_cpu(tuple.TupleData[0]);
745 738
746 /* Configure card */
747 link->state |= DEV_CONFIG;
748
749 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 739 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
750 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 740 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
751 while (1) { 741 while (1) {
752 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 742 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
753 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 743 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
754 goto next_entry; 744 goto next_entry;
755 link->conf.ConfigIndex = parse.cftable_entry.index; 745 link->conf.ConfigIndex = parse.cftable_entry.index;
756 link->io.BasePort1 = parse.cftable_entry.io.win[0].base; 746 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
757 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len; 747 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
758 748
759 if (link->io.BasePort1 != 0) { 749 if (link->io.BasePort1 != 0) {
760 i = pcmcia_request_io(handle, &link->io); 750 i = pcmcia_request_io(link, &link->io);
761 if (i == CS_SUCCESS) 751 if (i == CS_SUCCESS)
762 break; 752 break;
763 } 753 }
764next_entry: 754next_entry:
765 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 755 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
766 } 756 }
767 757
768 CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); 758 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
769 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 759 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
770 760
771 /* 761 /*
772 * That's the trouble with copying liberally from another driver. 762 * That's the trouble with copying liberally from another driver.
@@ -835,7 +825,7 @@ next_entry:
835 data->fast_pio = USE_FAST_PIO; 825 data->fast_pio = USE_FAST_PIO;
836 826
837 sprintf(info->node.dev_name, "scsi%d", host->host_no); 827 sprintf(info->node.dev_name, "scsi%d", host->host_no);
838 link->dev = &info->node; 828 link->dev_node = &info->node;
839 info->host = host; 829 info->host = host;
840 830
841 if (scsi_add_host(host, NULL)) 831 if (scsi_add_host(host, NULL))
@@ -843,7 +833,7 @@ next_entry:
843 833
844 scsi_scan_host(host); 834 scsi_scan_host(host);
845 835
846 goto out; /* SUCCESS */ 836 return 0;
847 837
848err_free_irq: 838err_free_irq:
849 free_irq(irq_level, host); 839 free_irq(irq_level, host);
@@ -852,74 +842,50 @@ err_free_scsi:
852err_release: 842err_release:
853 release_region(port_base, 0x10); 843 release_region(port_base, 0x10);
854 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); 844 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n");
855 845 return -ENODEV;
856out:
857 link->state &= ~DEV_CONFIG_PENDING;
858 return;
859 846
860cs_failed: 847cs_failed:
861 cs_error(link->handle, last_fn, last_ret); 848 cs_error(link, last_fn, last_ret);
862 SYM53C500_release(link); 849 SYM53C500_release(link);
863 return; 850 return -ENODEV;
864} /* SYM53C500_config */ 851} /* SYM53C500_config */
865 852
866static int sym53c500_suspend(struct pcmcia_device *dev) 853static int sym53c500_resume(struct pcmcia_device *link)
867{
868 dev_link_t *link = dev_to_instance(dev);
869
870 link->state |= DEV_SUSPEND;
871 if (link->state & DEV_CONFIG)
872 pcmcia_release_configuration(link->handle);
873
874 return 0;
875}
876
877static int sym53c500_resume(struct pcmcia_device *dev)
878{ 854{
879 dev_link_t *link = dev_to_instance(dev);
880 struct scsi_info_t *info = link->priv; 855 struct scsi_info_t *info = link->priv;
881 856
882 link->state &= ~DEV_SUSPEND; 857 /* See earlier comment about manufacturer IDs. */
883 if (link->state & DEV_CONFIG) { 858 if ((info->manf_id == MANFID_MACNICA) ||
884 pcmcia_request_configuration(link->handle, &link->conf); 859 (info->manf_id == MANFID_PIONEER) ||
885 860 (info->manf_id == 0x0098)) {
886 /* See earlier comment about manufacturer IDs. */ 861 outb(0x80, link->io.BasePort1 + 0xd);
887 if ((info->manf_id == MANFID_MACNICA) || 862 outb(0x24, link->io.BasePort1 + 0x9);
888 (info->manf_id == MANFID_PIONEER) || 863 outb(0x04, link->io.BasePort1 + 0xd);
889 (info->manf_id == 0x0098)) {
890 outb(0x80, link->io.BasePort1 + 0xd);
891 outb(0x24, link->io.BasePort1 + 0x9);
892 outb(0x04, link->io.BasePort1 + 0xd);
893 }
894 /*
895 * If things don't work after a "resume",
896 * this is a good place to start looking.
897 */
898 SYM53C500_int_host_reset(link->io.BasePort1);
899 } 864 }
865 /*
866 * If things don't work after a "resume",
867 * this is a good place to start looking.
868 */
869 SYM53C500_int_host_reset(link->io.BasePort1);
900 870
901 return 0; 871 return 0;
902} 872}
903 873
904static void 874static void
905SYM53C500_detach(struct pcmcia_device *p_dev) 875SYM53C500_detach(struct pcmcia_device *link)
906{ 876{
907 dev_link_t *link = dev_to_instance(p_dev);
908
909 DEBUG(0, "SYM53C500_detach(0x%p)\n", link); 877 DEBUG(0, "SYM53C500_detach(0x%p)\n", link);
910 878
911 if (link->state & DEV_CONFIG) 879 SYM53C500_release(link);
912 SYM53C500_release(link);
913 880
914 kfree(link->priv); 881 kfree(link->priv);
915 link->priv = NULL; 882 link->priv = NULL;
916} /* SYM53C500_detach */ 883} /* SYM53C500_detach */
917 884
918static int 885static int
919SYM53C500_attach(struct pcmcia_device *p_dev) 886SYM53C500_probe(struct pcmcia_device *link)
920{ 887{
921 struct scsi_info_t *info; 888 struct scsi_info_t *info;
922 dev_link_t *link;
923 889
924 DEBUG(0, "SYM53C500_attach()\n"); 890 DEBUG(0, "SYM53C500_attach()\n");
925 891
@@ -928,7 +894,7 @@ SYM53C500_attach(struct pcmcia_device *p_dev)
928 if (!info) 894 if (!info)
929 return -ENOMEM; 895 return -ENOMEM;
930 memset(info, 0, sizeof(*info)); 896 memset(info, 0, sizeof(*info));
931 link = &info->link; 897 info->p_dev = link;
932 link->priv = info; 898 link->priv = info;
933 link->io.NumPorts1 = 16; 899 link->io.NumPorts1 = 16;
934 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 900 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -936,17 +902,10 @@ SYM53C500_attach(struct pcmcia_device *p_dev)
936 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 902 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
937 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 903 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
938 link->conf.Attributes = CONF_ENABLE_IRQ; 904 link->conf.Attributes = CONF_ENABLE_IRQ;
939 link->conf.Vcc = 50;
940 link->conf.IntType = INT_MEMORY_AND_IO; 905 link->conf.IntType = INT_MEMORY_AND_IO;
941 link->conf.Present = PRESENT_OPTION; 906 link->conf.Present = PRESENT_OPTION;
942 907
943 link->handle = p_dev; 908 return SYM53C500_config(link);
944 p_dev->instance = link;
945
946 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
947 SYM53C500_config(link);
948
949 return 0;
950} /* SYM53C500_attach */ 909} /* SYM53C500_attach */
951 910
952MODULE_AUTHOR("Bob Tracy <rct@frus.com>"); 911MODULE_AUTHOR("Bob Tracy <rct@frus.com>");
@@ -966,10 +925,9 @@ static struct pcmcia_driver sym53c500_cs_driver = {
966 .drv = { 925 .drv = {
967 .name = "sym53c500_cs", 926 .name = "sym53c500_cs",
968 }, 927 },
969 .probe = SYM53C500_attach, 928 .probe = SYM53C500_probe,
970 .remove = SYM53C500_detach, 929 .remove = SYM53C500_detach,
971 .id_table = sym53c500_ids, 930 .id_table = sym53c500_ids,
972 .suspend = sym53c500_suspend,
973 .resume = sym53c500_resume, 931 .resume = sym53c500_resume,
974}; 932};
975 933
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index fe0d8b8e91c8..7d22dc0478d3 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -63,6 +63,33 @@ config SERIAL_8250_CONSOLE
63 63
64 If unsure, say N. 64 If unsure, say N.
65 65
66config SERIAL_8250_GSC
67 tristate
68 depends on SERIAL_8250 && GSC
69 default SERIAL_8250
70
71config SERIAL_8250_PCI
72 tristate "8250/16550 PCI device support" if EMBEDDED
73 depends on SERIAL_8250 && PCI
74 default SERIAL_8250
75 help
76 This builds standard PCI serial support. You may be able to
77 disable this feature if you only need legacy serial support.
78 Saves about 9K.
79
80config SERIAL_8250_PNP
81 tristate "8250/16550 PNP device support" if EMBEDDED
82 depends on SERIAL_8250 && PNP
83 default SERIAL_8250
84 help
85 This builds standard PNP serial support. You may be able to
86 disable this feature if you only need legacy serial support.
87
88config SERIAL_8250_HP300
89 tristate
90 depends on SERIAL_8250 && HP300
91 default SERIAL_8250
92
66config SERIAL_8250_CS 93config SERIAL_8250_CS
67 tristate "8250/16550 PCMCIA device support" 94 tristate "8250/16550 PCMCIA device support"
68 depends on PCMCIA && SERIAL_8250 95 depends on PCMCIA && SERIAL_8250
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index d2b4c214876b..0a71bf68a03f 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -4,15 +4,13 @@
4# $Id: Makefile,v 1.8 2002/07/21 21:32:30 rmk Exp $ 4# $Id: Makefile,v 1.8 2002/07/21 21:32:30 rmk Exp $
5# 5#
6 6
7serial-8250-y :=
8serial-8250-$(CONFIG_PNP) += 8250_pnp.o
9serial-8250-$(CONFIG_GSC) += 8250_gsc.o
10serial-8250-$(CONFIG_PCI) += 8250_pci.o
11serial-8250-$(CONFIG_HP300) += 8250_hp300.o
12
13obj-$(CONFIG_SERIAL_CORE) += serial_core.o 7obj-$(CONFIG_SERIAL_CORE) += serial_core.o
14obj-$(CONFIG_SERIAL_21285) += 21285.o 8obj-$(CONFIG_SERIAL_21285) += 21285.o
15obj-$(CONFIG_SERIAL_8250) += 8250.o $(serial-8250-y) 9obj-$(CONFIG_SERIAL_8250) += 8250.o
10obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
11obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
12obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
13obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
16obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o 14obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
17obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o 15obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
18obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o 16obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index dfc1e86d3aa1..043f50b1d10c 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -20,7 +20,7 @@
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Wendy Xiong <wendyx@us.ibm.com>
24 * 24 *
25 ***********************************************************************/ 25 ***********************************************************************/
26 26
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index b1b66e71d281..b3e1f71be4da 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -20,7 +20,7 @@
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Wendy Xiong <wendyx@us.ibm.com>
24 * 24 *
25 * 25 *
26 ***********************************************************************/ 26 ***********************************************************************/
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/serial/jsm/jsm_neo.c
index 87e4e2cf8ce7..a5fc589d6ef5 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/serial/jsm/jsm_neo.c
@@ -20,7 +20,7 @@
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Wendy Xiong <wendyx@us.ibm.com>
24 * 24 *
25 ***********************************************************************/ 25 ***********************************************************************/
26#include <linux/delay.h> /* For udelay */ 26#include <linux/delay.h> /* For udelay */
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 4d48b625cd3d..7d823705193c 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -142,12 +142,14 @@ static void jsm_tty_send_xchar(struct uart_port *port, char ch)
142{ 142{
143 unsigned long lock_flags; 143 unsigned long lock_flags;
144 struct jsm_channel *channel = (struct jsm_channel *)port; 144 struct jsm_channel *channel = (struct jsm_channel *)port;
145 struct termios *termios;
145 146
146 spin_lock_irqsave(&port->lock, lock_flags); 147 spin_lock_irqsave(&port->lock, lock_flags);
147 if (ch == port->info->tty->termios->c_cc[VSTART]) 148 termios = port->info->tty->termios;
149 if (ch == termios->c_cc[VSTART])
148 channel->ch_bd->bd_ops->send_start_character(channel); 150 channel->ch_bd->bd_ops->send_start_character(channel);
149 151
150 if (ch == port->info->tty->termios->c_cc[VSTOP]) 152 if (ch == termios->c_cc[VSTOP])
151 channel->ch_bd->bd_ops->send_stop_character(channel); 153 channel->ch_bd->bd_ops->send_stop_character(channel);
152 spin_unlock_irqrestore(&port->lock, lock_flags); 154 spin_unlock_irqrestore(&port->lock, lock_flags);
153} 155}
@@ -178,6 +180,7 @@ static int jsm_tty_open(struct uart_port *port)
178 struct jsm_board *brd; 180 struct jsm_board *brd;
179 int rc = 0; 181 int rc = 0;
180 struct jsm_channel *channel = (struct jsm_channel *)port; 182 struct jsm_channel *channel = (struct jsm_channel *)port;
183 struct termios *termios;
181 184
182 /* Get board pointer from our array of majors we have allocated */ 185 /* Get board pointer from our array of majors we have allocated */
183 brd = channel->ch_bd; 186 brd = channel->ch_bd;
@@ -239,12 +242,13 @@ static int jsm_tty_open(struct uart_port *port)
239 channel->ch_cached_lsr = 0; 242 channel->ch_cached_lsr = 0;
240 channel->ch_stops_sent = 0; 243 channel->ch_stops_sent = 0;
241 244
242 channel->ch_c_cflag = port->info->tty->termios->c_cflag; 245 termios = port->info->tty->termios;
243 channel->ch_c_iflag = port->info->tty->termios->c_iflag; 246 channel->ch_c_cflag = termios->c_cflag;
244 channel->ch_c_oflag = port->info->tty->termios->c_oflag; 247 channel->ch_c_iflag = termios->c_iflag;
245 channel->ch_c_lflag = port->info->tty->termios->c_lflag; 248 channel->ch_c_oflag = termios->c_oflag;
246 channel->ch_startc = port->info->tty->termios->c_cc[VSTART]; 249 channel->ch_c_lflag = termios->c_lflag;
247 channel->ch_stopc = port->info->tty->termios->c_cc[VSTOP]; 250 channel->ch_startc = termios->c_cc[VSTART];
251 channel->ch_stopc = termios->c_cc[VSTOP];
248 252
249 /* Tell UART to init itself */ 253 /* Tell UART to init itself */
250 brd->bd_ops->uart_init(channel); 254 brd->bd_ops->uart_init(channel);
@@ -784,6 +788,7 @@ static void jsm_carrier(struct jsm_channel *ch)
784 788
785void jsm_check_queue_flow_control(struct jsm_channel *ch) 789void jsm_check_queue_flow_control(struct jsm_channel *ch)
786{ 790{
791 struct board_ops *bd_ops = ch->ch_bd->bd_ops;
787 int qleft = 0; 792 int qleft = 0;
788 793
789 /* Store how much space we have left in the queue */ 794 /* Store how much space we have left in the queue */
@@ -809,7 +814,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
809 /* HWFLOW */ 814 /* HWFLOW */
810 if (ch->ch_c_cflag & CRTSCTS) { 815 if (ch->ch_c_cflag & CRTSCTS) {
811 if(!(ch->ch_flags & CH_RECEIVER_OFF)) { 816 if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
812 ch->ch_bd->bd_ops->disable_receiver(ch); 817 bd_ops->disable_receiver(ch);
813 ch->ch_flags |= (CH_RECEIVER_OFF); 818 ch->ch_flags |= (CH_RECEIVER_OFF);
814 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, 819 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
815 "Internal queue hit hilevel mark (%d)! Turning off interrupts.\n", 820 "Internal queue hit hilevel mark (%d)! Turning off interrupts.\n",
@@ -819,7 +824,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
819 /* SWFLOW */ 824 /* SWFLOW */
820 else if (ch->ch_c_iflag & IXOFF) { 825 else if (ch->ch_c_iflag & IXOFF) {
821 if (ch->ch_stops_sent <= MAX_STOPS_SENT) { 826 if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
822 ch->ch_bd->bd_ops->send_stop_character(ch); 827 bd_ops->send_stop_character(ch);
823 ch->ch_stops_sent++; 828 ch->ch_stops_sent++;
824 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, 829 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
825 "Sending stop char! Times sent: %x\n", ch->ch_stops_sent); 830 "Sending stop char! Times sent: %x\n", ch->ch_stops_sent);
@@ -846,7 +851,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
846 /* HWFLOW */ 851 /* HWFLOW */
847 if (ch->ch_c_cflag & CRTSCTS) { 852 if (ch->ch_c_cflag & CRTSCTS) {
848 if (ch->ch_flags & CH_RECEIVER_OFF) { 853 if (ch->ch_flags & CH_RECEIVER_OFF) {
849 ch->ch_bd->bd_ops->enable_receiver(ch); 854 bd_ops->enable_receiver(ch);
850 ch->ch_flags &= ~(CH_RECEIVER_OFF); 855 ch->ch_flags &= ~(CH_RECEIVER_OFF);
851 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, 856 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
852 "Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n", 857 "Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n",
@@ -856,7 +861,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
856 /* SWFLOW */ 861 /* SWFLOW */
857 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { 862 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
858 ch->ch_stops_sent = 0; 863 ch->ch_stops_sent = 0;
859 ch->ch_bd->bd_ops->send_start_character(ch); 864 bd_ops->send_start_character(ch);
860 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Sending start char!\n"); 865 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Sending start char!\n");
861 } 866 }
862 } 867 }
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index c30333694fde..2c70773543e0 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -41,6 +41,7 @@
41#include <linux/string.h> 41#include <linux/string.h>
42#include <linux/timer.h> 42#include <linux/timer.h>
43#include <linux/serial_core.h> 43#include <linux/serial_core.h>
44#include <linux/delay.h>
44#include <linux/major.h> 45#include <linux/major.h>
45#include <asm/io.h> 46#include <asm/io.h>
46#include <asm/system.h> 47#include <asm/system.h>
@@ -97,11 +98,13 @@ static const struct multi_id multi_id[] = {
97#define MULTI_COUNT (sizeof(multi_id)/sizeof(struct multi_id)) 98#define MULTI_COUNT (sizeof(multi_id)/sizeof(struct multi_id))
98 99
99struct serial_info { 100struct serial_info {
100 dev_link_t link; 101 struct pcmcia_device *p_dev;
101 int ndev; 102 int ndev;
102 int multi; 103 int multi;
103 int slave; 104 int slave;
104 int manfid; 105 int manfid;
106 int prodid;
107 int c950ctrl;
105 dev_node_t node[4]; 108 dev_node_t node[4];
106 int line[4]; 109 int line[4];
107}; 110};
@@ -113,9 +116,36 @@ struct serial_cfg_mem {
113}; 116};
114 117
115 118
116static void serial_config(dev_link_t * link); 119static int serial_config(struct pcmcia_device * link);
117 120
118 121
122static void wakeup_card(struct serial_info *info)
123{
124 int ctrl = info->c950ctrl;
125
126 if (info->manfid == MANFID_OXSEMI) {
127 outb(12, ctrl + 1);
128 } else if (info->manfid == MANFID_POSSIO && info->prodid == PRODID_POSSIO_GCC) {
129 /* request_region? oxsemi branch does no request_region too... */
130 /* This sequence is needed to properly initialize MC45 attached to OXCF950.
131 * I tried decreasing these msleep()s, but it worked properly (survived
132 * 1000 stop/start operations) with these timeouts (or bigger). */
133 outb(0xA, ctrl + 1);
134 msleep(100);
135 outb(0xE, ctrl + 1);
136 msleep(300);
137 outb(0xC, ctrl + 1);
138 msleep(100);
139 outb(0xE, ctrl + 1);
140 msleep(200);
141 outb(0xF, ctrl + 1);
142 msleep(100);
143 outb(0xE, ctrl + 1);
144 msleep(100);
145 outb(0xC, ctrl + 1);
146 }
147}
148
119/*====================================================================== 149/*======================================================================
120 150
121 After a card is removed, serial_remove() will unregister 151 After a card is removed, serial_remove() will unregister
@@ -123,67 +153,45 @@ static void serial_config(dev_link_t * link);
123 153
124======================================================================*/ 154======================================================================*/
125 155
126static void serial_remove(dev_link_t *link) 156static void serial_remove(struct pcmcia_device *link)
127{ 157{
128 struct serial_info *info = link->priv; 158 struct serial_info *info = link->priv;
129 int i; 159 int i;
130 160
131 link->state &= ~DEV_PRESENT;
132
133 DEBUG(0, "serial_release(0x%p)\n", link); 161 DEBUG(0, "serial_release(0x%p)\n", link);
134 162
135 /* 163 /*
136 * Recheck to see if the device is still configured. 164 * Recheck to see if the device is still configured.
137 */ 165 */
138 if (info->link.state & DEV_CONFIG) { 166 for (i = 0; i < info->ndev; i++)
139 for (i = 0; i < info->ndev; i++) 167 serial8250_unregister_port(info->line[i]);
140 serial8250_unregister_port(info->line[i]);
141 168
142 info->link.dev = NULL; 169 info->p_dev->dev_node = NULL;
143 170
144 if (!info->slave) { 171 if (!info->slave)
145 pcmcia_release_configuration(info->link.handle); 172 pcmcia_disable_device(link);
146 pcmcia_release_io(info->link.handle, &info->link.io);
147 pcmcia_release_irq(info->link.handle, &info->link.irq);
148 }
149
150 info->link.state &= ~DEV_CONFIG;
151 }
152} 173}
153 174
154static int serial_suspend(struct pcmcia_device *dev) 175static int serial_suspend(struct pcmcia_device *link)
155{ 176{
156 dev_link_t *link = dev_to_instance(dev); 177 struct serial_info *info = link->priv;
157 link->state |= DEV_SUSPEND; 178 int i;
158
159 if (link->state & DEV_CONFIG) {
160 struct serial_info *info = link->priv;
161 int i;
162
163 for (i = 0; i < info->ndev; i++)
164 serial8250_suspend_port(info->line[i]);
165 179
166 if (!info->slave) 180 for (i = 0; i < info->ndev; i++)
167 pcmcia_release_configuration(link->handle); 181 serial8250_suspend_port(info->line[i]);
168 }
169 182
170 return 0; 183 return 0;
171} 184}
172 185
173static int serial_resume(struct pcmcia_device *dev) 186static int serial_resume(struct pcmcia_device *link)
174{ 187{
175 dev_link_t *link = dev_to_instance(dev); 188 if (pcmcia_dev_present(link)) {
176 link->state &= ~DEV_SUSPEND;
177
178 if (DEV_OK(link)) {
179 struct serial_info *info = link->priv; 189 struct serial_info *info = link->priv;
180 int i; 190 int i;
181 191
182 if (!info->slave)
183 pcmcia_request_configuration(link->handle, &link->conf);
184
185 for (i = 0; i < info->ndev; i++) 192 for (i = 0; i < info->ndev; i++)
186 serial8250_resume_port(info->line[i]); 193 serial8250_resume_port(info->line[i]);
194 wakeup_card(info);
187 } 195 }
188 196
189 return 0; 197 return 0;
@@ -197,10 +205,9 @@ static int serial_resume(struct pcmcia_device *dev)
197 205
198======================================================================*/ 206======================================================================*/
199 207
200static int serial_probe(struct pcmcia_device *p_dev) 208static int serial_probe(struct pcmcia_device *link)
201{ 209{
202 struct serial_info *info; 210 struct serial_info *info;
203 dev_link_t *link;
204 211
205 DEBUG(0, "serial_attach()\n"); 212 DEBUG(0, "serial_attach()\n");
206 213
@@ -209,7 +216,7 @@ static int serial_probe(struct pcmcia_device *p_dev)
209 if (!info) 216 if (!info)
210 return -ENOMEM; 217 return -ENOMEM;
211 memset(info, 0, sizeof (*info)); 218 memset(info, 0, sizeof (*info));
212 link = &info->link; 219 info->p_dev = link;
213 link->priv = info; 220 link->priv = info;
214 221
215 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 222 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -223,12 +230,7 @@ static int serial_probe(struct pcmcia_device *p_dev)
223 } 230 }
224 link->conf.IntType = INT_MEMORY_AND_IO; 231 link->conf.IntType = INT_MEMORY_AND_IO;
225 232
226 link->handle = p_dev; 233 return serial_config(link);
227 p_dev->instance = link;
228 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
229 serial_config(link);
230
231 return 0;
232} 234}
233 235
234/*====================================================================== 236/*======================================================================
@@ -240,9 +242,8 @@ static int serial_probe(struct pcmcia_device *p_dev)
240 242
241======================================================================*/ 243======================================================================*/
242 244
243static void serial_detach(struct pcmcia_device *p_dev) 245static void serial_detach(struct pcmcia_device *link)
244{ 246{
245 dev_link_t *link = dev_to_instance(p_dev);
246 struct serial_info *info = link->priv; 247 struct serial_info *info = link->priv;
247 248
248 DEBUG(0, "serial_detach(0x%p)\n", link); 249 DEBUG(0, "serial_detach(0x%p)\n", link);
@@ -263,7 +264,7 @@ static void serial_detach(struct pcmcia_device *p_dev)
263 264
264/*====================================================================*/ 265/*====================================================================*/
265 266
266static int setup_serial(client_handle_t handle, struct serial_info * info, 267static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
267 kio_addr_t iobase, int irq) 268 kio_addr_t iobase, int irq)
268{ 269{
269 struct uart_port port; 270 struct uart_port port;
@@ -298,7 +299,7 @@ static int setup_serial(client_handle_t handle, struct serial_info * info,
298/*====================================================================*/ 299/*====================================================================*/
299 300
300static int 301static int
301first_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse) 302first_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
302{ 303{
303 int i; 304 int i;
304 i = pcmcia_get_first_tuple(handle, tuple); 305 i = pcmcia_get_first_tuple(handle, tuple);
@@ -311,7 +312,7 @@ first_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse)
311} 312}
312 313
313static int 314static int
314next_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse) 315next_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
315{ 316{
316 int i; 317 int i;
317 i = pcmcia_get_next_tuple(handle, tuple); 318 i = pcmcia_get_next_tuple(handle, tuple);
@@ -325,11 +326,10 @@ next_tuple(client_handle_t handle, tuple_t * tuple, cisparse_t * parse)
325 326
326/*====================================================================*/ 327/*====================================================================*/
327 328
328static int simple_config(dev_link_t *link) 329static int simple_config(struct pcmcia_device *link)
329{ 330{
330 static const kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; 331 static const kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
331 static const int size_table[2] = { 8, 16 }; 332 static const int size_table[2] = { 8, 16 };
332 client_handle_t handle = link->handle;
333 struct serial_info *info = link->priv; 333 struct serial_info *info = link->priv;
334 struct serial_cfg_mem *cfg_mem; 334 struct serial_cfg_mem *cfg_mem;
335 tuple_t *tuple; 335 tuple_t *tuple;
@@ -350,7 +350,7 @@ static int simple_config(dev_link_t *link)
350 buf = cfg_mem->buf; 350 buf = cfg_mem->buf;
351 351
352 /* If the card is already configured, look up the port and irq */ 352 /* If the card is already configured, look up the port and irq */
353 i = pcmcia_get_configuration_info(handle, &config); 353 i = pcmcia_get_configuration_info(link, &config);
354 if ((i == CS_SUCCESS) && (config.Attributes & CONF_VALID_CLIENT)) { 354 if ((i == CS_SUCCESS) && (config.Attributes & CONF_VALID_CLIENT)) {
355 kio_addr_t port = 0; 355 kio_addr_t port = 0;
356 if ((config.BasePort2 != 0) && (config.NumPorts2 == 8)) { 356 if ((config.BasePort2 != 0) && (config.NumPorts2 == 8)) {
@@ -363,10 +363,9 @@ static int simple_config(dev_link_t *link)
363 } 363 }
364 if (info->slave) { 364 if (info->slave) {
365 kfree(cfg_mem); 365 kfree(cfg_mem);
366 return setup_serial(handle, info, port, config.AssignedIRQ); 366 return setup_serial(link, info, port, config.AssignedIRQ);
367 } 367 }
368 } 368 }
369 link->conf.Vcc = config.Vcc;
370 369
371 /* First pass: look for a config entry that looks normal. */ 370 /* First pass: look for a config entry that looks normal. */
372 tuple->TupleData = (cisdata_t *) buf; 371 tuple->TupleData = (cisdata_t *) buf;
@@ -377,12 +376,12 @@ static int simple_config(dev_link_t *link)
377 /* Two tries: without IO aliases, then with aliases */ 376 /* Two tries: without IO aliases, then with aliases */
378 for (s = 0; s < 2; s++) { 377 for (s = 0; s < 2; s++) {
379 for (try = 0; try < 2; try++) { 378 for (try = 0; try < 2; try++) {
380 i = first_tuple(handle, tuple, parse); 379 i = first_tuple(link, tuple, parse);
381 while (i != CS_NO_MORE_ITEMS) { 380 while (i != CS_NO_MORE_ITEMS) {
382 if (i != CS_SUCCESS) 381 if (i != CS_SUCCESS)
383 goto next_entry; 382 goto next_entry;
384 if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) 383 if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM))
385 link->conf.Vpp1 = link->conf.Vpp2 = 384 link->conf.Vpp =
386 cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; 385 cf->vpp1.param[CISTPL_POWER_VNOM] / 10000;
387 if ((cf->io.nwin > 0) && (cf->io.win[0].len == size_table[s]) && 386 if ((cf->io.nwin > 0) && (cf->io.win[0].len == size_table[s]) &&
388 (cf->io.win[0].base != 0)) { 387 (cf->io.win[0].base != 0)) {
@@ -390,19 +389,19 @@ static int simple_config(dev_link_t *link)
390 link->io.BasePort1 = cf->io.win[0].base; 389 link->io.BasePort1 = cf->io.win[0].base;
391 link->io.IOAddrLines = (try == 0) ? 390 link->io.IOAddrLines = (try == 0) ?
392 16 : cf->io.flags & CISTPL_IO_LINES_MASK; 391 16 : cf->io.flags & CISTPL_IO_LINES_MASK;
393 i = pcmcia_request_io(link->handle, &link->io); 392 i = pcmcia_request_io(link, &link->io);
394 if (i == CS_SUCCESS) 393 if (i == CS_SUCCESS)
395 goto found_port; 394 goto found_port;
396 } 395 }
397next_entry: 396next_entry:
398 i = next_tuple(handle, tuple, parse); 397 i = next_tuple(link, tuple, parse);
399 } 398 }
400 } 399 }
401 } 400 }
402 /* Second pass: try to find an entry that isn't picky about 401 /* Second pass: try to find an entry that isn't picky about
403 its base address, then try to grab any standard serial port 402 its base address, then try to grab any standard serial port
404 address, and finally try to get any free port. */ 403 address, and finally try to get any free port. */
405 i = first_tuple(handle, tuple, parse); 404 i = first_tuple(link, tuple, parse);
406 while (i != CS_NO_MORE_ITEMS) { 405 while (i != CS_NO_MORE_ITEMS) {
407 if ((i == CS_SUCCESS) && (cf->io.nwin > 0) && 406 if ((i == CS_SUCCESS) && (cf->io.nwin > 0) &&
408 ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { 407 ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) {
@@ -410,50 +409,48 @@ next_entry:
410 for (j = 0; j < 5; j++) { 409 for (j = 0; j < 5; j++) {
411 link->io.BasePort1 = base[j]; 410 link->io.BasePort1 = base[j];
412 link->io.IOAddrLines = base[j] ? 16 : 3; 411 link->io.IOAddrLines = base[j] ? 16 : 3;
413 i = pcmcia_request_io(link->handle, &link->io); 412 i = pcmcia_request_io(link, &link->io);
414 if (i == CS_SUCCESS) 413 if (i == CS_SUCCESS)
415 goto found_port; 414 goto found_port;
416 } 415 }
417 } 416 }
418 i = next_tuple(handle, tuple, parse); 417 i = next_tuple(link, tuple, parse);
419 } 418 }
420 419
421 found_port: 420 found_port:
422 if (i != CS_SUCCESS) { 421 if (i != CS_SUCCESS) {
423 printk(KERN_NOTICE 422 printk(KERN_NOTICE
424 "serial_cs: no usable port range found, giving up\n"); 423 "serial_cs: no usable port range found, giving up\n");
425 cs_error(link->handle, RequestIO, i); 424 cs_error(link, RequestIO, i);
426 kfree(cfg_mem); 425 kfree(cfg_mem);
427 return -1; 426 return -1;
428 } 427 }
429 428
430 i = pcmcia_request_irq(link->handle, &link->irq); 429 i = pcmcia_request_irq(link, &link->irq);
431 if (i != CS_SUCCESS) { 430 if (i != CS_SUCCESS) {
432 cs_error(link->handle, RequestIRQ, i); 431 cs_error(link, RequestIRQ, i);
433 link->irq.AssignedIRQ = 0; 432 link->irq.AssignedIRQ = 0;
434 } 433 }
435 if (info->multi && (info->manfid == MANFID_3COM)) 434 if (info->multi && (info->manfid == MANFID_3COM))
436 link->conf.ConfigIndex &= ~(0x08); 435 link->conf.ConfigIndex &= ~(0x08);
437 i = pcmcia_request_configuration(link->handle, &link->conf); 436 i = pcmcia_request_configuration(link, &link->conf);
438 if (i != CS_SUCCESS) { 437 if (i != CS_SUCCESS) {
439 cs_error(link->handle, RequestConfiguration, i); 438 cs_error(link, RequestConfiguration, i);
440 kfree(cfg_mem); 439 kfree(cfg_mem);
441 return -1; 440 return -1;
442 } 441 }
443 kfree(cfg_mem); 442 kfree(cfg_mem);
444 return setup_serial(handle, info, link->io.BasePort1, link->irq.AssignedIRQ); 443 return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
445} 444}
446 445
447static int multi_config(dev_link_t * link) 446static int multi_config(struct pcmcia_device * link)
448{ 447{
449 client_handle_t handle = link->handle;
450 struct serial_info *info = link->priv; 448 struct serial_info *info = link->priv;
451 struct serial_cfg_mem *cfg_mem; 449 struct serial_cfg_mem *cfg_mem;
452 tuple_t *tuple; 450 tuple_t *tuple;
453 u_char *buf; 451 u_char *buf;
454 cisparse_t *parse; 452 cisparse_t *parse;
455 cistpl_cftable_entry_t *cf; 453 cistpl_cftable_entry_t *cf;
456 config_info_t config;
457 int i, rc, base2 = 0; 454 int i, rc, base2 = 0;
458 455
459 cfg_mem = kmalloc(sizeof(struct serial_cfg_mem), GFP_KERNEL); 456 cfg_mem = kmalloc(sizeof(struct serial_cfg_mem), GFP_KERNEL);
@@ -464,14 +461,6 @@ static int multi_config(dev_link_t * link)
464 cf = &parse->cftable_entry; 461 cf = &parse->cftable_entry;
465 buf = cfg_mem->buf; 462 buf = cfg_mem->buf;
466 463
467 i = pcmcia_get_configuration_info(handle, &config);
468 if (i != CS_SUCCESS) {
469 cs_error(handle, GetConfigurationInfo, i);
470 rc = -1;
471 goto free_cfg_mem;
472 }
473 link->conf.Vcc = config.Vcc;
474
475 tuple->TupleData = (cisdata_t *) buf; 464 tuple->TupleData = (cisdata_t *) buf;
476 tuple->TupleOffset = 0; 465 tuple->TupleOffset = 0;
477 tuple->TupleDataMax = 255; 466 tuple->TupleDataMax = 255;
@@ -480,7 +469,7 @@ static int multi_config(dev_link_t * link)
480 469
481 /* First, look for a generic full-sized window */ 470 /* First, look for a generic full-sized window */
482 link->io.NumPorts1 = info->multi * 8; 471 link->io.NumPorts1 = info->multi * 8;
483 i = first_tuple(handle, tuple, parse); 472 i = first_tuple(link, tuple, parse);
484 while (i != CS_NO_MORE_ITEMS) { 473 while (i != CS_NO_MORE_ITEMS) {
485 /* The quad port cards have bad CIS's, so just look for a 474 /* The quad port cards have bad CIS's, so just look for a
486 window larger than 8 ports and assume it will be right */ 475 window larger than 8 ports and assume it will be right */
@@ -490,19 +479,19 @@ static int multi_config(dev_link_t * link)
490 link->io.BasePort1 = cf->io.win[0].base; 479 link->io.BasePort1 = cf->io.win[0].base;
491 link->io.IOAddrLines = 480 link->io.IOAddrLines =
492 cf->io.flags & CISTPL_IO_LINES_MASK; 481 cf->io.flags & CISTPL_IO_LINES_MASK;
493 i = pcmcia_request_io(link->handle, &link->io); 482 i = pcmcia_request_io(link, &link->io);
494 base2 = link->io.BasePort1 + 8; 483 base2 = link->io.BasePort1 + 8;
495 if (i == CS_SUCCESS) 484 if (i == CS_SUCCESS)
496 break; 485 break;
497 } 486 }
498 i = next_tuple(handle, tuple, parse); 487 i = next_tuple(link, tuple, parse);
499 } 488 }
500 489
501 /* If that didn't work, look for two windows */ 490 /* If that didn't work, look for two windows */
502 if (i != CS_SUCCESS) { 491 if (i != CS_SUCCESS) {
503 link->io.NumPorts1 = link->io.NumPorts2 = 8; 492 link->io.NumPorts1 = link->io.NumPorts2 = 8;
504 info->multi = 2; 493 info->multi = 2;
505 i = first_tuple(handle, tuple, parse); 494 i = first_tuple(link, tuple, parse);
506 while (i != CS_NO_MORE_ITEMS) { 495 while (i != CS_NO_MORE_ITEMS) {
507 if ((i == CS_SUCCESS) && (cf->io.nwin == 2)) { 496 if ((i == CS_SUCCESS) && (cf->io.nwin == 2)) {
508 link->conf.ConfigIndex = cf->index; 497 link->conf.ConfigIndex = cf->index;
@@ -510,26 +499,26 @@ static int multi_config(dev_link_t * link)
510 link->io.BasePort2 = cf->io.win[1].base; 499 link->io.BasePort2 = cf->io.win[1].base;
511 link->io.IOAddrLines = 500 link->io.IOAddrLines =
512 cf->io.flags & CISTPL_IO_LINES_MASK; 501 cf->io.flags & CISTPL_IO_LINES_MASK;
513 i = pcmcia_request_io(link->handle, &link->io); 502 i = pcmcia_request_io(link, &link->io);
514 base2 = link->io.BasePort2; 503 base2 = link->io.BasePort2;
515 if (i == CS_SUCCESS) 504 if (i == CS_SUCCESS)
516 break; 505 break;
517 } 506 }
518 i = next_tuple(handle, tuple, parse); 507 i = next_tuple(link, tuple, parse);
519 } 508 }
520 } 509 }
521 510
522 if (i != CS_SUCCESS) { 511 if (i != CS_SUCCESS) {
523 cs_error(link->handle, RequestIO, i); 512 cs_error(link, RequestIO, i);
524 rc = -1; 513 rc = -1;
525 goto free_cfg_mem; 514 goto free_cfg_mem;
526 } 515 }
527 516
528 i = pcmcia_request_irq(link->handle, &link->irq); 517 i = pcmcia_request_irq(link, &link->irq);
529 if (i != CS_SUCCESS) { 518 if (i != CS_SUCCESS) {
530 printk(KERN_NOTICE 519 printk(KERN_NOTICE
531 "serial_cs: no usable port range found, giving up\n"); 520 "serial_cs: no usable port range found, giving up\n");
532 cs_error(link->handle, RequestIRQ, i); 521 cs_error(link, RequestIRQ, i);
533 link->irq.AssignedIRQ = 0; 522 link->irq.AssignedIRQ = 0;
534 } 523 }
535 /* Socket Dual IO: this enables irq's for second port */ 524 /* Socket Dual IO: this enables irq's for second port */
@@ -537,35 +526,43 @@ static int multi_config(dev_link_t * link)
537 link->conf.Present |= PRESENT_EXT_STATUS; 526 link->conf.Present |= PRESENT_EXT_STATUS;
538 link->conf.ExtStatus = ESR_REQ_ATTN_ENA; 527 link->conf.ExtStatus = ESR_REQ_ATTN_ENA;
539 } 528 }
540 i = pcmcia_request_configuration(link->handle, &link->conf); 529 i = pcmcia_request_configuration(link, &link->conf);
541 if (i != CS_SUCCESS) { 530 if (i != CS_SUCCESS) {
542 cs_error(link->handle, RequestConfiguration, i); 531 cs_error(link, RequestConfiguration, i);
543 rc = -1; 532 rc = -1;
544 goto free_cfg_mem; 533 goto free_cfg_mem;
545 } 534 }
546 535
547 /* The Oxford Semiconductor OXCF950 cards are in fact single-port: 536 /* The Oxford Semiconductor OXCF950 cards are in fact single-port:
548 8 registers are for the UART, the others are extra registers */ 537 * 8 registers are for the UART, the others are extra registers.
549 if (info->manfid == MANFID_OXSEMI) { 538 * Siemen's MC45 PCMCIA (Possio's GCC) is OXCF950 based too.
539 */
540 if (info->manfid == MANFID_OXSEMI || (info->manfid == MANFID_POSSIO &&
541 info->prodid == PRODID_POSSIO_GCC)) {
542 int err;
543
550 if (cf->index == 1 || cf->index == 3) { 544 if (cf->index == 1 || cf->index == 3) {
551 setup_serial(handle, info, base2, link->irq.AssignedIRQ); 545 err = setup_serial(link, info, base2,
552 outb(12, link->io.BasePort1 + 1); 546 link->irq.AssignedIRQ);
547 base2 = link->io.BasePort1;
553 } else { 548 } else {
554 setup_serial(handle, info, link->io.BasePort1, link->irq.AssignedIRQ); 549 err = setup_serial(link, info, link->io.BasePort1,
555 outb(12, base2 + 1); 550 link->irq.AssignedIRQ);
556 } 551 }
552 info->c950ctrl = base2;
553 wakeup_card(info);
557 rc = 0; 554 rc = 0;
558 goto free_cfg_mem; 555 goto free_cfg_mem;
559 } 556 }
560 557
561 setup_serial(handle, info, link->io.BasePort1, link->irq.AssignedIRQ); 558 setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
562 /* The Nokia cards are not really multiport cards */ 559 /* The Nokia cards are not really multiport cards */
563 if (info->manfid == MANFID_NOKIA) { 560 if (info->manfid == MANFID_NOKIA) {
564 rc = 0; 561 rc = 0;
565 goto free_cfg_mem; 562 goto free_cfg_mem;
566 } 563 }
567 for (i = 0; i < info->multi - 1; i++) 564 for (i = 0; i < info->multi - 1; i++)
568 setup_serial(handle, info, base2 + (8 * i), 565 setup_serial(link, info, base2 + (8 * i),
569 link->irq.AssignedIRQ); 566 link->irq.AssignedIRQ);
570 rc = 0; 567 rc = 0;
571free_cfg_mem: 568free_cfg_mem:
@@ -581,9 +578,8 @@ free_cfg_mem:
581 578
582======================================================================*/ 579======================================================================*/
583 580
584void serial_config(dev_link_t * link) 581static int serial_config(struct pcmcia_device * link)
585{ 582{
586 client_handle_t handle = link->handle;
587 struct serial_info *info = link->priv; 583 struct serial_info *info = link->priv;
588 struct serial_cfg_mem *cfg_mem; 584 struct serial_cfg_mem *cfg_mem;
589 tuple_t *tuple; 585 tuple_t *tuple;
@@ -609,7 +605,7 @@ void serial_config(dev_link_t * link)
609 tuple->Attributes = 0; 605 tuple->Attributes = 0;
610 /* Get configuration register information */ 606 /* Get configuration register information */
611 tuple->DesiredTuple = CISTPL_CONFIG; 607 tuple->DesiredTuple = CISTPL_CONFIG;
612 last_ret = first_tuple(handle, tuple, parse); 608 last_ret = first_tuple(link, tuple, parse);
613 if (last_ret != CS_SUCCESS) { 609 if (last_ret != CS_SUCCESS) {
614 last_fn = ParseTuple; 610 last_fn = ParseTuple;
615 goto cs_failed; 611 goto cs_failed;
@@ -617,18 +613,16 @@ void serial_config(dev_link_t * link)
617 link->conf.ConfigBase = parse->config.base; 613 link->conf.ConfigBase = parse->config.base;
618 link->conf.Present = parse->config.rmask[0]; 614 link->conf.Present = parse->config.rmask[0];
619 615
620 /* Configure card */
621 link->state |= DEV_CONFIG;
622
623 /* Is this a compliant multifunction card? */ 616 /* Is this a compliant multifunction card? */
624 tuple->DesiredTuple = CISTPL_LONGLINK_MFC; 617 tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
625 tuple->Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK; 618 tuple->Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK;
626 info->multi = (first_tuple(handle, tuple, parse) == CS_SUCCESS); 619 info->multi = (first_tuple(link, tuple, parse) == CS_SUCCESS);
627 620
628 /* Is this a multiport card? */ 621 /* Is this a multiport card? */
629 tuple->DesiredTuple = CISTPL_MANFID; 622 tuple->DesiredTuple = CISTPL_MANFID;
630 if (first_tuple(handle, tuple, parse) == CS_SUCCESS) { 623 if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
631 info->manfid = parse->manfid.manf; 624 info->manfid = parse->manfid.manf;
625 info->prodid = le16_to_cpu(buf[1]);
632 for (i = 0; i < MULTI_COUNT; i++) 626 for (i = 0; i < MULTI_COUNT; i++)
633 if ((info->manfid == multi_id[i].manfid) && 627 if ((info->manfid == multi_id[i].manfid) &&
634 (parse->manfid.card == multi_id[i].prodid)) 628 (parse->manfid.card == multi_id[i].prodid))
@@ -641,11 +635,11 @@ void serial_config(dev_link_t * link)
641 multifunction cards that ask for appropriate IO port ranges */ 635 multifunction cards that ask for appropriate IO port ranges */
642 tuple->DesiredTuple = CISTPL_FUNCID; 636 tuple->DesiredTuple = CISTPL_FUNCID;
643 if ((info->multi == 0) && 637 if ((info->multi == 0) &&
644 ((first_tuple(handle, tuple, parse) != CS_SUCCESS) || 638 ((first_tuple(link, tuple, parse) != CS_SUCCESS) ||
645 (parse->funcid.func == CISTPL_FUNCID_MULTI) || 639 (parse->funcid.func == CISTPL_FUNCID_MULTI) ||
646 (parse->funcid.func == CISTPL_FUNCID_SERIAL))) { 640 (parse->funcid.func == CISTPL_FUNCID_SERIAL))) {
647 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; 641 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
648 if (first_tuple(handle, tuple, parse) == CS_SUCCESS) { 642 if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
649 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0)) 643 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
650 info->multi = cf->io.win[0].len >> 3; 644 info->multi = cf->io.win[0].len >> 3;
651 if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) && 645 if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) &&
@@ -664,31 +658,30 @@ void serial_config(dev_link_t * link)
664 658
665 if (info->manfid == MANFID_IBM) { 659 if (info->manfid == MANFID_IBM) {
666 conf_reg_t reg = { 0, CS_READ, 0x800, 0 }; 660 conf_reg_t reg = { 0, CS_READ, 0x800, 0 };
667 last_ret = pcmcia_access_configuration_register(link->handle, &reg); 661 last_ret = pcmcia_access_configuration_register(link, &reg);
668 if (last_ret) { 662 if (last_ret) {
669 last_fn = AccessConfigurationRegister; 663 last_fn = AccessConfigurationRegister;
670 goto cs_failed; 664 goto cs_failed;
671 } 665 }
672 reg.Action = CS_WRITE; 666 reg.Action = CS_WRITE;
673 reg.Value = reg.Value | 1; 667 reg.Value = reg.Value | 1;
674 last_ret = pcmcia_access_configuration_register(link->handle, &reg); 668 last_ret = pcmcia_access_configuration_register(link, &reg);
675 if (last_ret) { 669 if (last_ret) {
676 last_fn = AccessConfigurationRegister; 670 last_fn = AccessConfigurationRegister;
677 goto cs_failed; 671 goto cs_failed;
678 } 672 }
679 } 673 }
680 674
681 link->dev = &info->node[0]; 675 link->dev_node = &info->node[0];
682 link->state &= ~DEV_CONFIG_PENDING;
683 kfree(cfg_mem); 676 kfree(cfg_mem);
684 return; 677 return 0;
685 678
686 cs_failed: 679 cs_failed:
687 cs_error(link->handle, last_fn, last_ret); 680 cs_error(link, last_fn, last_ret);
688 failed: 681 failed:
689 serial_remove(link); 682 serial_remove(link);
690 link->state &= ~DEV_CONFIG_PENDING;
691 kfree(cfg_mem); 683 kfree(cfg_mem);
684 return -ENODEV;
692} 685}
693 686
694static struct pcmcia_device_id serial_ids[] = { 687static struct pcmcia_device_id serial_ids[] = {
@@ -739,6 +732,7 @@ static struct pcmcia_device_id serial_ids[] = {
739 PCMCIA_MFC_DEVICE_PROD_ID1(1, "Motorola MARQUIS", 0xf03e4e77), 732 PCMCIA_MFC_DEVICE_PROD_ID1(1, "Motorola MARQUIS", 0xf03e4e77),
740 PCMCIA_MFC_DEVICE_PROD_ID2(1, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302), 733 PCMCIA_MFC_DEVICE_PROD_ID2(1, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302),
741 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0301), 734 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0301),
735 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x0276),
742 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0039), 736 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0039),
743 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0006), 737 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0006),
744 PCMCIA_DEVICE_MANF_CARD(0x0105, 0x410a), 738 PCMCIA_DEVICE_MANF_CARD(0x0105, 0x410a),
@@ -757,6 +751,7 @@ static struct pcmcia_device_id serial_ids[] = {
757 PCMCIA_DEVICE_PROD_ID14("MEGAHERTZ", "PCMCIA MODEM", 0xf510db04, 0xbd6c43ef), 751 PCMCIA_DEVICE_PROD_ID14("MEGAHERTZ", "PCMCIA MODEM", 0xf510db04, 0xbd6c43ef),
758 PCMCIA_DEVICE_PROD_ID124("TOSHIBA", "T144PF", "PCMCIA MODEM", 0xb4585a1a, 0x7271409c, 0xbd6c43ef), 752 PCMCIA_DEVICE_PROD_ID124("TOSHIBA", "T144PF", "PCMCIA MODEM", 0xb4585a1a, 0x7271409c, 0xbd6c43ef),
759 PCMCIA_DEVICE_PROD_ID123("FUJITSU", "FC14F ", "MBH10213", 0x6ee5a3d8, 0x30ead12b, 0xb00f05a0), 753 PCMCIA_DEVICE_PROD_ID123("FUJITSU", "FC14F ", "MBH10213", 0x6ee5a3d8, 0x30ead12b, 0xb00f05a0),
754 PCMCIA_DEVICE_PROD_ID123("Novatel Wireless", "Merlin UMTS Modem", "U630", 0x32607776, 0xd9e73b13, 0xe87332e),
760 PCMCIA_DEVICE_PROD_ID13("MEGAHERTZ", "V.34 PCMCIA MODEM", 0xf510db04, 0xbb2cce4a), 755 PCMCIA_DEVICE_PROD_ID13("MEGAHERTZ", "V.34 PCMCIA MODEM", 0xf510db04, 0xbb2cce4a),
761 PCMCIA_DEVICE_PROD_ID12("Brain Boxes", "Bluetooth PC Card", 0xee138382, 0xd4ce9b02), 756 PCMCIA_DEVICE_PROD_ID12("Brain Boxes", "Bluetooth PC Card", 0xee138382, 0xd4ce9b02),
762 PCMCIA_DEVICE_PROD_ID12("CIRRUS LOGIC", "FAX MODEM", 0xe625f451, 0xcecd6dfa), 757 PCMCIA_DEVICE_PROD_ID12("CIRRUS LOGIC", "FAX MODEM", 0xe625f451, 0xcecd6dfa),
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index d3a7b0c3d38b..dda0ca45d904 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -35,73 +35,52 @@ typedef struct ixj_info_t {
35} ixj_info_t; 35} ixj_info_t;
36 36
37static void ixj_detach(struct pcmcia_device *p_dev); 37static void ixj_detach(struct pcmcia_device *p_dev);
38static void ixj_config(dev_link_t * link); 38static int ixj_config(struct pcmcia_device * link);
39static void ixj_cs_release(dev_link_t * link); 39static void ixj_cs_release(struct pcmcia_device * link);
40 40
41static int ixj_attach(struct pcmcia_device *p_dev) 41static int ixj_probe(struct pcmcia_device *p_dev)
42{ 42{
43 dev_link_t *link;
44
45 DEBUG(0, "ixj_attach()\n"); 43 DEBUG(0, "ixj_attach()\n");
46 /* Create new ixj device */ 44 /* Create new ixj device */
47 link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL); 45 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
48 if (!link) 46 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
49 return -ENOMEM; 47 p_dev->io.IOAddrLines = 3;
50 memset(link, 0, sizeof(struct dev_link_t)); 48 p_dev->conf.IntType = INT_MEMORY_AND_IO;
51 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 49 p_dev->priv = kmalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
52 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 50 if (!p_dev->priv) {
53 link->io.IOAddrLines = 3;
54 link->conf.Vcc = 50;
55 link->conf.IntType = INT_MEMORY_AND_IO;
56 link->priv = kmalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
57 if (!link->priv) {
58 kfree(link);
59 return -ENOMEM; 51 return -ENOMEM;
60 } 52 }
61 memset(link->priv, 0, sizeof(struct ixj_info_t)); 53 memset(p_dev->priv, 0, sizeof(struct ixj_info_t));
62
63 link->handle = p_dev;
64 p_dev->instance = link;
65 54
66 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; 55 return ixj_config(p_dev);
67 ixj_config(link);
68
69 return 0;
70} 56}
71 57
72static void ixj_detach(struct pcmcia_device *p_dev) 58static void ixj_detach(struct pcmcia_device *link)
73{ 59{
74 dev_link_t *link = dev_to_instance(p_dev);
75
76 DEBUG(0, "ixj_detach(0x%p)\n", link); 60 DEBUG(0, "ixj_detach(0x%p)\n", link);
77 61
78 link->state &= ~DEV_RELEASE_PENDING; 62 ixj_cs_release(link);
79 if (link->state & DEV_CONFIG)
80 ixj_cs_release(link);
81 63
82 kfree(link->priv); 64 kfree(link->priv);
83 kfree(link);
84} 65}
85 66
86#define CS_CHECK(fn, ret) \ 67#define CS_CHECK(fn, ret) \
87do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 68do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
88 69
89static void ixj_get_serial(dev_link_t * link, IXJ * j) 70static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
90{ 71{
91 client_handle_t handle;
92 tuple_t tuple; 72 tuple_t tuple;
93 u_short buf[128]; 73 u_short buf[128];
94 char *str; 74 char *str;
95 int last_ret, last_fn, i, place; 75 int last_ret, last_fn, i, place;
96 handle = link->handle;
97 DEBUG(0, "ixj_get_serial(0x%p)\n", link); 76 DEBUG(0, "ixj_get_serial(0x%p)\n", link);
98 tuple.TupleData = (cisdata_t *) buf; 77 tuple.TupleData = (cisdata_t *) buf;
99 tuple.TupleOffset = 0; 78 tuple.TupleOffset = 0;
100 tuple.TupleDataMax = 80; 79 tuple.TupleDataMax = 80;
101 tuple.Attributes = 0; 80 tuple.Attributes = 0;
102 tuple.DesiredTuple = CISTPL_VERS_1; 81 tuple.DesiredTuple = CISTPL_VERS_1;
103 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 82 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
104 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 83 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
105 str = (char *) buf; 84 str = (char *) buf;
106 printk("PCMCIA Version %d.%d\n", str[0], str[1]); 85 printk("PCMCIA Version %d.%d\n", str[0], str[1]);
107 str += 2; 86 str += 2;
@@ -149,22 +128,19 @@ static void ixj_get_serial(dev_link_t * link, IXJ * j)
149 return; 128 return;
150} 129}
151 130
152static void ixj_config(dev_link_t * link) 131static int ixj_config(struct pcmcia_device * link)
153{ 132{
154 IXJ *j; 133 IXJ *j;
155 client_handle_t handle;
156 ixj_info_t *info; 134 ixj_info_t *info;
157 tuple_t tuple; 135 tuple_t tuple;
158 u_short buf[128]; 136 u_short buf[128];
159 cisparse_t parse; 137 cisparse_t parse;
160 config_info_t conf;
161 cistpl_cftable_entry_t *cfg = &parse.cftable_entry; 138 cistpl_cftable_entry_t *cfg = &parse.cftable_entry;
162 cistpl_cftable_entry_t dflt = 139 cistpl_cftable_entry_t dflt =
163 { 140 {
164 0 141 0
165 }; 142 };
166 int last_ret, last_fn; 143 int last_ret, last_fn;
167 handle = link->handle;
168 info = link->priv; 144 info = link->priv;
169 DEBUG(0, "ixj_config(0x%p)\n", link); 145 DEBUG(0, "ixj_config(0x%p)\n", link);
170 tuple.TupleData = (cisdata_t *) buf; 146 tuple.TupleData = (cisdata_t *) buf;
@@ -172,19 +148,17 @@ static void ixj_config(dev_link_t * link)
172 tuple.TupleDataMax = 255; 148 tuple.TupleDataMax = 255;
173 tuple.Attributes = 0; 149 tuple.Attributes = 0;
174 tuple.DesiredTuple = CISTPL_CONFIG; 150 tuple.DesiredTuple = CISTPL_CONFIG;
175 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 151 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
176 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 152 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
177 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 153 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
178 link->conf.ConfigBase = parse.config.base; 154 link->conf.ConfigBase = parse.config.base;
179 link->conf.Present = parse.config.rmask[0]; 155 link->conf.Present = parse.config.rmask[0];
180 link->state |= DEV_CONFIG;
181 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf));
182 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 156 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
183 tuple.Attributes = 0; 157 tuple.Attributes = 0;
184 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 158 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
185 while (1) { 159 while (1) {
186 if (pcmcia_get_tuple_data(handle, &tuple) != 0 || 160 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
187 pcmcia_parse_tuple(handle, &tuple, &parse) != 0) 161 pcmcia_parse_tuple(link, &tuple, &parse) != 0)
188 goto next_entry; 162 goto next_entry;
189 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) { 163 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
190 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io; 164 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
@@ -195,7 +169,7 @@ static void ixj_config(dev_link_t * link)
195 link->io.BasePort2 = io->win[1].base; 169 link->io.BasePort2 = io->win[1].base;
196 link->io.NumPorts2 = io->win[1].len; 170 link->io.NumPorts2 = io->win[1].len;
197 } 171 }
198 if (pcmcia_request_io(link->handle, &link->io) != 0) 172 if (pcmcia_request_io(link, &link->io) != 0)
199 goto next_entry; 173 goto next_entry;
200 /* If we've got this far, we're done */ 174 /* If we've got this far, we're done */
201 break; 175 break;
@@ -203,10 +177,10 @@ static void ixj_config(dev_link_t * link)
203 next_entry: 177 next_entry:
204 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) 178 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
205 dflt = *cfg; 179 dflt = *cfg;
206 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); 180 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
207 } 181 }
208 182
209 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); 183 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
210 184
211 /* 185 /*
212 * Register the card with the core. 186 * Register the card with the core.
@@ -215,46 +189,21 @@ static void ixj_config(dev_link_t * link)
215 189
216 info->ndev = 1; 190 info->ndev = 1;
217 info->node.major = PHONE_MAJOR; 191 info->node.major = PHONE_MAJOR;
218 link->dev = &info->node; 192 link->dev_node = &info->node;
219 ixj_get_serial(link, j); 193 ixj_get_serial(link, j);
220 link->state &= ~DEV_CONFIG_PENDING; 194 return 0;
221 return;
222 cs_failed: 195 cs_failed:
223 cs_error(link->handle, last_fn, last_ret); 196 cs_error(link, last_fn, last_ret);
224 ixj_cs_release(link); 197 ixj_cs_release(link);
198 return -ENODEV;
225} 199}
226 200
227static void ixj_cs_release(dev_link_t *link) 201static void ixj_cs_release(struct pcmcia_device *link)
228{ 202{
229 ixj_info_t *info = link->priv; 203 ixj_info_t *info = link->priv;
230 DEBUG(0, "ixj_cs_release(0x%p)\n", link); 204 DEBUG(0, "ixj_cs_release(0x%p)\n", link);
231 info->ndev = 0; 205 info->ndev = 0;
232 link->dev = NULL; 206 pcmcia_disable_device(link);
233 pcmcia_release_configuration(link->handle);
234 pcmcia_release_io(link->handle, &link->io);
235 link->state &= ~DEV_CONFIG;
236}
237
238static int ixj_suspend(struct pcmcia_device *dev)
239{
240 dev_link_t *link = dev_to_instance(dev);
241
242 link->state |= DEV_SUSPEND;
243 if (link->state & DEV_CONFIG)
244 pcmcia_release_configuration(link->handle);
245
246 return 0;
247}
248
249static int ixj_resume(struct pcmcia_device *dev)
250{
251 dev_link_t *link = dev_to_instance(dev);
252
253 link->state &= ~DEV_SUSPEND;
254 if (DEV_OK(link))
255 pcmcia_request_configuration(link->handle, &link->conf);
256
257 return 0;
258} 207}
259 208
260static struct pcmcia_device_id ixj_ids[] = { 209static struct pcmcia_device_id ixj_ids[] = {
@@ -268,11 +217,9 @@ static struct pcmcia_driver ixj_driver = {
268 .drv = { 217 .drv = {
269 .name = "ixj_cs", 218 .name = "ixj_cs",
270 }, 219 },
271 .probe = ixj_attach, 220 .probe = ixj_probe,
272 .remove = ixj_detach, 221 .remove = ixj_detach,
273 .id_table = ixj_ids, 222 .id_table = ixj_ids,
274 .suspend = ixj_suspend,
275 .resume = ixj_resume,
276}; 223};
277 224
278static int __init ixj_pcmcia_init(void) 225static int __init ixj_pcmcia_init(void)
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 134d2000128a..302aa1ec312f 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -67,11 +67,11 @@ module_param(pc_debug, int, 0644);
67static const char driver_name[DEV_NAME_LEN] = "sl811_cs"; 67static const char driver_name[DEV_NAME_LEN] = "sl811_cs";
68 68
69typedef struct local_info_t { 69typedef struct local_info_t {
70 dev_link_t link; 70 struct pcmcia_device *p_dev;
71 dev_node_t node; 71 dev_node_t node;
72} local_info_t; 72} local_info_t;
73 73
74static void sl811_cs_release(dev_link_t * link); 74static void sl811_cs_release(struct pcmcia_device * link);
75 75
76/*====================================================================*/ 76/*====================================================================*/
77 77
@@ -138,41 +138,27 @@ static int sl811_hc_init(struct device *parent, ioaddr_t base_addr, int irq)
138 138
139/*====================================================================*/ 139/*====================================================================*/
140 140
141static void sl811_cs_detach(struct pcmcia_device *p_dev) 141static void sl811_cs_detach(struct pcmcia_device *link)
142{ 142{
143 dev_link_t *link = dev_to_instance(p_dev);
144
145 DBG(0, "sl811_cs_detach(0x%p)\n", link); 143 DBG(0, "sl811_cs_detach(0x%p)\n", link);
146 144
147 link->state &= ~DEV_PRESENT; 145 sl811_cs_release(link);
148 if (link->state & DEV_CONFIG)
149 sl811_cs_release(link);
150 146
151 /* This points to the parent local_info_t struct */ 147 /* This points to the parent local_info_t struct */
152 kfree(link->priv); 148 kfree(link->priv);
153} 149}
154 150
155static void sl811_cs_release(dev_link_t * link) 151static void sl811_cs_release(struct pcmcia_device * link)
156{ 152{
157
158 DBG(0, "sl811_cs_release(0x%p)\n", link); 153 DBG(0, "sl811_cs_release(0x%p)\n", link);
159 154
160 /* Unlink the device chain */ 155 pcmcia_disable_device(link);
161 link->dev = NULL;
162
163 platform_device_unregister(&platform_dev); 156 platform_device_unregister(&platform_dev);
164 pcmcia_release_configuration(link->handle);
165 if (link->io.NumPorts1)
166 pcmcia_release_io(link->handle, &link->io);
167 if (link->irq.AssignedIRQ)
168 pcmcia_release_irq(link->handle, &link->irq);
169 link->state &= ~DEV_CONFIG;
170} 157}
171 158
172static void sl811_cs_config(dev_link_t *link) 159static int sl811_cs_config(struct pcmcia_device *link)
173{ 160{
174 client_handle_t handle = link->handle; 161 struct device *parent = &handle_to_dev(link);
175 struct device *parent = &handle_to_dev(handle);
176 local_info_t *dev = link->priv; 162 local_info_t *dev = link->priv;
177 tuple_t tuple; 163 tuple_t tuple;
178 cisparse_t parse; 164 cisparse_t parse;
@@ -188,27 +174,23 @@ static void sl811_cs_config(dev_link_t *link)
188 tuple.TupleData = buf; 174 tuple.TupleData = buf;
189 tuple.TupleDataMax = sizeof(buf); 175 tuple.TupleDataMax = sizeof(buf);
190 tuple.TupleOffset = 0; 176 tuple.TupleOffset = 0;
191 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 177 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
192 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 178 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
193 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); 179 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
194 link->conf.ConfigBase = parse.config.base; 180 link->conf.ConfigBase = parse.config.base;
195 link->conf.Present = parse.config.rmask[0]; 181 link->conf.Present = parse.config.rmask[0];
196 182
197 /* Configure card */
198 link->state |= DEV_CONFIG;
199
200 /* Look up the current Vcc */ 183 /* Look up the current Vcc */
201 CS_CHECK(GetConfigurationInfo, 184 CS_CHECK(GetConfigurationInfo,
202 pcmcia_get_configuration_info(handle, &conf)); 185 pcmcia_get_configuration_info(link, &conf));
203 link->conf.Vcc = conf.Vcc;
204 186
205 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 187 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
206 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 188 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
207 while (1) { 189 while (1) {
208 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 190 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
209 191
210 if (pcmcia_get_tuple_data(handle, &tuple) != 0 192 if (pcmcia_get_tuple_data(link, &tuple) != 0
211 || pcmcia_parse_tuple(handle, &tuple, &parse) 193 || pcmcia_parse_tuple(link, &tuple, &parse)
212 != 0) 194 != 0)
213 goto next_entry; 195 goto next_entry;
214 196
@@ -234,10 +216,10 @@ static void sl811_cs_config(dev_link_t *link)
234 } 216 }
235 217
236 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) 218 if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
237 link->conf.Vpp1 = link->conf.Vpp2 = 219 link->conf.Vpp =
238 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; 220 cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
239 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM)) 221 else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
240 link->conf.Vpp1 = link->conf.Vpp2 = 222 link->conf.Vpp =
241 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000; 223 dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
242 224
243 /* we need an interrupt */ 225 /* we need an interrupt */
@@ -254,15 +236,14 @@ static void sl811_cs_config(dev_link_t *link)
254 link->io.BasePort1 = io->win[0].base; 236 link->io.BasePort1 = io->win[0].base;
255 link->io.NumPorts1 = io->win[0].len; 237 link->io.NumPorts1 = io->win[0].len;
256 238
257 if (pcmcia_request_io(link->handle, &link->io) != 0) 239 if (pcmcia_request_io(link, &link->io) != 0)
258 goto next_entry; 240 goto next_entry;
259 } 241 }
260 break; 242 break;
261 243
262next_entry: 244next_entry:
263 if (link->io.NumPorts1) 245 pcmcia_disable_device(link);
264 pcmcia_release_io(link->handle, &link->io); 246 last_ret = pcmcia_get_next_tuple(link, &tuple);
265 last_ret = pcmcia_get_next_tuple(handle, &tuple);
266 } 247 }
267 248
268 /* require an IRQ and two registers */ 249 /* require an IRQ and two registers */
@@ -270,71 +251,46 @@ next_entry:
270 goto cs_failed; 251 goto cs_failed;
271 if (link->conf.Attributes & CONF_ENABLE_IRQ) 252 if (link->conf.Attributes & CONF_ENABLE_IRQ)
272 CS_CHECK(RequestIRQ, 253 CS_CHECK(RequestIRQ,
273 pcmcia_request_irq(link->handle, &link->irq)); 254 pcmcia_request_irq(link, &link->irq));
274 else 255 else
275 goto cs_failed; 256 goto cs_failed;
276 257
277 CS_CHECK(RequestConfiguration, 258 CS_CHECK(RequestConfiguration,
278 pcmcia_request_configuration(link->handle, &link->conf)); 259 pcmcia_request_configuration(link, &link->conf));
279 260
280 sprintf(dev->node.dev_name, driver_name); 261 sprintf(dev->node.dev_name, driver_name);
281 dev->node.major = dev->node.minor = 0; 262 dev->node.major = dev->node.minor = 0;
282 link->dev = &dev->node; 263 link->dev_node = &dev->node;
283 264
284 printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d", 265 printk(KERN_INFO "%s: index 0x%02x: ",
285 dev->node.dev_name, link->conf.ConfigIndex, 266 dev->node.dev_name, link->conf.ConfigIndex);
286 link->conf.Vcc/10, link->conf.Vcc%10); 267 if (link->conf.Vpp)
287 if (link->conf.Vpp1) 268 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
288 printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
289 printk(", irq %d", link->irq.AssignedIRQ); 269 printk(", irq %d", link->irq.AssignedIRQ);
290 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 270 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
291 link->io.BasePort1+link->io.NumPorts1-1); 271 link->io.BasePort1+link->io.NumPorts1-1);
292 printk("\n"); 272 printk("\n");
293 273
294 link->state &= ~DEV_CONFIG_PENDING;
295
296 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) 274 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ)
297 < 0) { 275 < 0) {
298cs_failed: 276cs_failed:
299 printk("sl811_cs_config failed\n"); 277 printk("sl811_cs_config failed\n");
300 cs_error(link->handle, last_fn, last_ret); 278 cs_error(link, last_fn, last_ret);
301 sl811_cs_release(link); 279 sl811_cs_release(link);
302 link->state &= ~DEV_CONFIG_PENDING; 280 return -ENODEV;
303 } 281 }
304}
305
306static int sl811_suspend(struct pcmcia_device *dev)
307{
308 dev_link_t *link = dev_to_instance(dev);
309
310 link->state |= DEV_SUSPEND;
311 if (link->state & DEV_CONFIG)
312 pcmcia_release_configuration(link->handle);
313
314 return 0;
315}
316
317static int sl811_resume(struct pcmcia_device *dev)
318{
319 dev_link_t *link = dev_to_instance(dev);
320
321 link->state &= ~DEV_SUSPEND;
322 if (link->state & DEV_CONFIG)
323 pcmcia_request_configuration(link->handle, &link->conf);
324
325 return 0; 282 return 0;
326} 283}
327 284
328static int sl811_cs_attach(struct pcmcia_device *p_dev) 285static int sl811_cs_probe(struct pcmcia_device *link)
329{ 286{
330 local_info_t *local; 287 local_info_t *local;
331 dev_link_t *link;
332 288
333 local = kmalloc(sizeof(local_info_t), GFP_KERNEL); 289 local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
334 if (!local) 290 if (!local)
335 return -ENOMEM; 291 return -ENOMEM;
336 memset(local, 0, sizeof(local_info_t)); 292 memset(local, 0, sizeof(local_info_t));
337 link = &local->link; 293 local->p_dev = link;
338 link->priv = local; 294 link->priv = local;
339 295
340 /* Initialize */ 296 /* Initialize */
@@ -343,16 +299,9 @@ static int sl811_cs_attach(struct pcmcia_device *p_dev)
343 link->irq.Handler = NULL; 299 link->irq.Handler = NULL;
344 300
345 link->conf.Attributes = 0; 301 link->conf.Attributes = 0;
346 link->conf.Vcc = 33;
347 link->conf.IntType = INT_MEMORY_AND_IO; 302 link->conf.IntType = INT_MEMORY_AND_IO;
348 303
349 link->handle = p_dev; 304 return sl811_cs_config(link);
350 p_dev->instance = link;
351
352 link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
353 sl811_cs_config(link);
354
355 return 0;
356} 305}
357 306
358static struct pcmcia_device_id sl811_ids[] = { 307static struct pcmcia_device_id sl811_ids[] = {
@@ -366,11 +315,9 @@ static struct pcmcia_driver sl811_cs_driver = {
366 .drv = { 315 .drv = {
367 .name = (char *)driver_name, 316 .name = (char *)driver_name,
368 }, 317 },
369 .probe = sl811_cs_attach, 318 .probe = sl811_cs_probe,
370 .remove = sl811_cs_detach, 319 .remove = sl811_cs_detach,
371 .id_table = sl811_ids, 320 .id_table = sl811_ids,
372 .suspend = sl811_suspend,
373 .resume = sl811_resume,
374}; 321};
375 322
376/*====================================================================*/ 323/*====================================================================*/
diff --git a/drivers/usb/input/hid-input.c b/drivers/usb/input/hid-input.c
index cb0d80f49252..25bc85f8ce39 100644
--- a/drivers/usb/input/hid-input.c
+++ b/drivers/usb/input/hid-input.c
@@ -510,7 +510,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
510 case 0x025: map_key_clear(KEY_TV); break; 510 case 0x025: map_key_clear(KEY_TV); break;
511 case 0x026: map_key_clear(KEY_MENU); break; 511 case 0x026: map_key_clear(KEY_MENU); break;
512 case 0x031: map_key_clear(KEY_AUDIO); break; 512 case 0x031: map_key_clear(KEY_AUDIO); break;
513 case 0x032: map_key_clear(KEY_SUBTITLE); break; 513 case 0x032: map_key_clear(KEY_TEXT); break;
514 case 0x033: map_key_clear(KEY_LAST); break; 514 case 0x033: map_key_clear(KEY_LAST); break;
515 case 0x047: map_key_clear(KEY_MP3); break; 515 case 0x047: map_key_clear(KEY_MP3); break;
516 case 0x048: map_key_clear(KEY_DVD); break; 516 case 0x048: map_key_clear(KEY_DVD); break;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22e9d696fdd2..f87c0171f4ec 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -904,18 +904,6 @@ config FB_MATROX_MULTIHEAD
904 There is no need for enabling 'Matrox multihead support' if you have 904 There is no need for enabling 'Matrox multihead support' if you have
905 only one Matrox card in the box. 905 only one Matrox card in the box.
906 906
907config FB_RADEON_OLD
908 tristate "ATI Radeon display support (Old driver)"
909 depends on FB && PCI
910 select FB_CFB_FILLRECT
911 select FB_CFB_COPYAREA
912 select FB_CFB_IMAGEBLIT
913 select FB_MACMODES if PPC
914 help
915 Choose this option if you want to use an ATI Radeon graphics card as
916 a framebuffer device. There are both PCI and AGP versions. You
917 don't need to choose this to run the Radeon in plain VGA mode.
918
919config FB_RADEON 907config FB_RADEON
920 tristate "ATI Radeon display support" 908 tristate "ATI Radeon display support"
921 depends on FB && PCI 909 depends on FB && PCI
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index cb90218515ac..23de3b2c7856 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_FB_KYRO) += kyro/
39obj-$(CONFIG_FB_SAVAGE) += savage/ 39obj-$(CONFIG_FB_SAVAGE) += savage/
40obj-$(CONFIG_FB_GEODE) += geode/ 40obj-$(CONFIG_FB_GEODE) += geode/
41obj-$(CONFIG_FB_I810) += vgastate.o 41obj-$(CONFIG_FB_I810) += vgastate.o
42obj-$(CONFIG_FB_RADEON_OLD) += radeonfb.o
43obj-$(CONFIG_FB_NEOMAGIC) += neofb.o vgastate.o 42obj-$(CONFIG_FB_NEOMAGIC) += neofb.o vgastate.o
44obj-$(CONFIG_FB_VIRGE) += virgefb.o 43obj-$(CONFIG_FB_VIRGE) += virgefb.o
45obj-$(CONFIG_FB_3DFX) += tdfxfb.o 44obj-$(CONFIG_FB_3DFX) += tdfxfb.o
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 9d996f2c10d5..b895eaaa73fd 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -43,11 +43,11 @@ config LCD_DEVICE
43 default y 43 default y
44 44
45config BACKLIGHT_CORGI 45config BACKLIGHT_CORGI
46 tristate "Sharp Corgi Backlight Driver (SL-C7xx Series)" 46 tristate "Sharp Corgi Backlight Driver (SL Series)"
47 depends on BACKLIGHT_DEVICE && PXA_SHARPSL 47 depends on BACKLIGHT_DEVICE && PXA_SHARPSL
48 default y 48 default y
49 help 49 help
50 If you have a Sharp Zaurus SL-C7xx, say y to enable the 50 If you have a Sharp Zaurus SL-C7xx, SL-Cxx00 or SL-6000x say y to enable the
51 backlight driver. 51 backlight driver.
52 52
53config BACKLIGHT_HP680 53config BACKLIGHT_HP680
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 151fda8dded0..334b1db1bd7c 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -16,14 +16,12 @@
16 16
17static ssize_t backlight_show_power(struct class_device *cdev, char *buf) 17static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
18{ 18{
19 int rc; 19 int rc = -ENXIO;
20 struct backlight_device *bd = to_backlight_device(cdev); 20 struct backlight_device *bd = to_backlight_device(cdev);
21 21
22 down(&bd->sem); 22 down(&bd->sem);
23 if (likely(bd->props && bd->props->get_power)) 23 if (likely(bd->props))
24 rc = sprintf(buf, "%d\n", bd->props->get_power(bd)); 24 rc = sprintf(buf, "%d\n", bd->props->power);
25 else
26 rc = -ENXIO;
27 up(&bd->sem); 25 up(&bd->sem);
28 26
29 return rc; 27 return rc;
@@ -31,7 +29,7 @@ static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
31 29
32static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count) 30static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count)
33{ 31{
34 int rc, power; 32 int rc = -ENXIO, power;
35 char *endp; 33 char *endp;
36 struct backlight_device *bd = to_backlight_device(cdev); 34 struct backlight_device *bd = to_backlight_device(cdev);
37 35
@@ -40,12 +38,13 @@ static ssize_t backlight_store_power(struct class_device *cdev, const char *buf,
40 return -EINVAL; 38 return -EINVAL;
41 39
42 down(&bd->sem); 40 down(&bd->sem);
43 if (likely(bd->props && bd->props->set_power)) { 41 if (likely(bd->props)) {
44 pr_debug("backlight: set power to %d\n", power); 42 pr_debug("backlight: set power to %d\n", power);
45 bd->props->set_power(bd, power); 43 bd->props->power = power;
44 if (likely(bd->props->update_status))
45 bd->props->update_status(bd);
46 rc = count; 46 rc = count;
47 } else 47 }
48 rc = -ENXIO;
49 up(&bd->sem); 48 up(&bd->sem);
50 49
51 return rc; 50 return rc;
@@ -53,14 +52,12 @@ static ssize_t backlight_store_power(struct class_device *cdev, const char *buf,
53 52
54static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf) 53static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
55{ 54{
56 int rc; 55 int rc = -ENXIO;
57 struct backlight_device *bd = to_backlight_device(cdev); 56 struct backlight_device *bd = to_backlight_device(cdev);
58 57
59 down(&bd->sem); 58 down(&bd->sem);
60 if (likely(bd->props && bd->props->get_brightness)) 59 if (likely(bd->props))
61 rc = sprintf(buf, "%d\n", bd->props->get_brightness(bd)); 60 rc = sprintf(buf, "%d\n", bd->props->brightness);
62 else
63 rc = -ENXIO;
64 up(&bd->sem); 61 up(&bd->sem);
65 62
66 return rc; 63 return rc;
@@ -68,7 +65,7 @@ static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
68 65
69static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count) 66static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count)
70{ 67{
71 int rc, brightness; 68 int rc = -ENXIO, brightness;
72 char *endp; 69 char *endp;
73 struct backlight_device *bd = to_backlight_device(cdev); 70 struct backlight_device *bd = to_backlight_device(cdev);
74 71
@@ -77,12 +74,18 @@ static ssize_t backlight_store_brightness(struct class_device *cdev, const char
77 return -EINVAL; 74 return -EINVAL;
78 75
79 down(&bd->sem); 76 down(&bd->sem);
80 if (likely(bd->props && bd->props->set_brightness)) { 77 if (likely(bd->props)) {
81 pr_debug("backlight: set brightness to %d\n", brightness); 78 if (brightness > bd->props->max_brightness)
82 bd->props->set_brightness(bd, brightness); 79 rc = -EINVAL;
83 rc = count; 80 else {
84 } else 81 pr_debug("backlight: set brightness to %d\n",
85 rc = -ENXIO; 82 brightness);
83 bd->props->brightness = brightness;
84 if (likely(bd->props->update_status))
85 bd->props->update_status(bd);
86 rc = count;
87 }
88 }
86 up(&bd->sem); 89 up(&bd->sem);
87 90
88 return rc; 91 return rc;
@@ -90,14 +93,26 @@ static ssize_t backlight_store_brightness(struct class_device *cdev, const char
90 93
91static ssize_t backlight_show_max_brightness(struct class_device *cdev, char *buf) 94static ssize_t backlight_show_max_brightness(struct class_device *cdev, char *buf)
92{ 95{
93 int rc; 96 int rc = -ENXIO;
94 struct backlight_device *bd = to_backlight_device(cdev); 97 struct backlight_device *bd = to_backlight_device(cdev);
95 98
96 down(&bd->sem); 99 down(&bd->sem);
97 if (likely(bd->props)) 100 if (likely(bd->props))
98 rc = sprintf(buf, "%d\n", bd->props->max_brightness); 101 rc = sprintf(buf, "%d\n", bd->props->max_brightness);
99 else 102 up(&bd->sem);
100 rc = -ENXIO; 103
104 return rc;
105}
106
107static ssize_t backlight_show_actual_brightness(struct class_device *cdev,
108 char *buf)
109{
110 int rc = -ENXIO;
111 struct backlight_device *bd = to_backlight_device(cdev);
112
113 down(&bd->sem);
114 if (likely(bd->props && bd->props->get_brightness))
115 rc = sprintf(buf, "%d\n", bd->props->get_brightness(bd));
101 up(&bd->sem); 116 up(&bd->sem);
102 117
103 return rc; 118 return rc;
@@ -123,7 +138,10 @@ static struct class backlight_class = {
123 138
124static struct class_device_attribute bl_class_device_attributes[] = { 139static struct class_device_attribute bl_class_device_attributes[] = {
125 DECLARE_ATTR(power, 0644, backlight_show_power, backlight_store_power), 140 DECLARE_ATTR(power, 0644, backlight_show_power, backlight_store_power),
126 DECLARE_ATTR(brightness, 0644, backlight_show_brightness, backlight_store_brightness), 141 DECLARE_ATTR(brightness, 0644, backlight_show_brightness,
142 backlight_store_brightness),
143 DECLARE_ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
144 NULL),
127 DECLARE_ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL), 145 DECLARE_ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
128}; 146};
129 147
@@ -144,8 +162,12 @@ static int fb_notifier_callback(struct notifier_block *self,
144 bd = container_of(self, struct backlight_device, fb_notif); 162 bd = container_of(self, struct backlight_device, fb_notif);
145 down(&bd->sem); 163 down(&bd->sem);
146 if (bd->props) 164 if (bd->props)
147 if (!bd->props->check_fb || bd->props->check_fb(evdata->info)) 165 if (!bd->props->check_fb ||
148 bd->props->set_power(bd, *(int *)evdata->data); 166 bd->props->check_fb(evdata->info)) {
167 bd->props->fb_blank = *(int *)evdata->data;
168 if (likely(bd->props && bd->props->update_status))
169 bd->props->update_status(bd);
170 }
149 up(&bd->sem); 171 up(&bd->sem);
150 return 0; 172 return 0;
151} 173}
@@ -231,6 +253,12 @@ void backlight_device_unregister(struct backlight_device *bd)
231 &bl_class_device_attributes[i]); 253 &bl_class_device_attributes[i]);
232 254
233 down(&bd->sem); 255 down(&bd->sem);
256 if (likely(bd->props && bd->props->update_status)) {
257 bd->props->brightness = 0;
258 bd->props->power = 0;
259 bd->props->update_status(bd);
260 }
261
234 bd->props = NULL; 262 bd->props = NULL;
235 up(&bd->sem); 263 up(&bd->sem);
236 264
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index d0aaf450e8c7..2ebbfd95145f 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Backlight Driver for Sharp Corgi 2 * Backlight Driver for Sharp Zaurus Handhelds (various models)
3 * 3 *
4 * Copyright (c) 2004-2005 Richard Purdie 4 * Copyright (c) 2004-2006 Richard Purdie
5 * 5 *
6 * Based on Sharp's 2.4 Backlight Driver 6 * Based on Sharp's 2.4 Backlight Driver
7 * 7 *
@@ -15,80 +15,63 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/spinlock.h> 18#include <linux/mutex.h>
19#include <linux/fb.h> 19#include <linux/fb.h>
20#include <linux/backlight.h> 20#include <linux/backlight.h>
21
22#include <asm/arch/sharpsl.h> 21#include <asm/arch/sharpsl.h>
23#include <asm/hardware/sharpsl_pm.h> 22#include <asm/hardware/sharpsl_pm.h>
24 23
25#define CORGI_DEFAULT_INTENSITY 0x1f 24static int corgibl_intensity;
26#define CORGI_LIMIT_MASK 0x0b 25static DEFINE_MUTEX(bl_mutex);
27
28static int corgibl_powermode = FB_BLANK_UNBLANK;
29static int current_intensity = 0;
30static int corgibl_limit = 0;
31static void (*corgibl_mach_set_intensity)(int intensity);
32static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
33static struct backlight_properties corgibl_data; 26static struct backlight_properties corgibl_data;
27static struct backlight_device *corgi_backlight_device;
28static struct corgibl_machinfo *bl_machinfo;
34 29
35static void corgibl_send_intensity(int intensity) 30static unsigned long corgibl_flags;
31#define CORGIBL_SUSPENDED 0x01
32#define CORGIBL_BATTLOW 0x02
33
34static int corgibl_send_intensity(struct backlight_device *bd)
36{ 35{
37 unsigned long flags;
38 void (*corgi_kick_batt)(void); 36 void (*corgi_kick_batt)(void);
37 int intensity = bd->props->brightness;
39 38
40 if (corgibl_powermode != FB_BLANK_UNBLANK) { 39 if (bd->props->power != FB_BLANK_UNBLANK)
41 intensity = 0; 40 intensity = 0;
42 } else { 41 if (bd->props->fb_blank != FB_BLANK_UNBLANK)
43 if (corgibl_limit) 42 intensity = 0;
44 intensity &= CORGI_LIMIT_MASK; 43 if (corgibl_flags & CORGIBL_SUSPENDED)
45 } 44 intensity = 0;
46 45 if (corgibl_flags & CORGIBL_BATTLOW)
47 spin_lock_irqsave(&bl_lock, flags); 46 intensity &= bl_machinfo->limit_mask;
48 47
49 corgibl_mach_set_intensity(intensity); 48 mutex_lock(&bl_mutex);
49 bl_machinfo->set_bl_intensity(intensity);
50 mutex_unlock(&bl_mutex);
50 51
51 spin_unlock_irqrestore(&bl_lock, flags); 52 corgibl_intensity = intensity;
52 53
53 corgi_kick_batt = symbol_get(sharpsl_battery_kick); 54 corgi_kick_batt = symbol_get(sharpsl_battery_kick);
54 if (corgi_kick_batt) { 55 if (corgi_kick_batt) {
55 corgi_kick_batt(); 56 corgi_kick_batt();
56 symbol_put(sharpsl_battery_kick); 57 symbol_put(sharpsl_battery_kick);
57 } 58 }
58}
59 59
60static void corgibl_blank(int blank) 60 return 0;
61{
62 switch(blank) {
63
64 case FB_BLANK_NORMAL:
65 case FB_BLANK_VSYNC_SUSPEND:
66 case FB_BLANK_HSYNC_SUSPEND:
67 case FB_BLANK_POWERDOWN:
68 if (corgibl_powermode == FB_BLANK_UNBLANK) {
69 corgibl_send_intensity(0);
70 corgibl_powermode = blank;
71 }
72 break;
73 case FB_BLANK_UNBLANK:
74 if (corgibl_powermode != FB_BLANK_UNBLANK) {
75 corgibl_powermode = blank;
76 corgibl_send_intensity(current_intensity);
77 }
78 break;
79 }
80} 61}
81 62
82#ifdef CONFIG_PM 63#ifdef CONFIG_PM
83static int corgibl_suspend(struct platform_device *dev, pm_message_t state) 64static int corgibl_suspend(struct platform_device *dev, pm_message_t state)
84{ 65{
85 corgibl_blank(FB_BLANK_POWERDOWN); 66 corgibl_flags |= CORGIBL_SUSPENDED;
67 corgibl_send_intensity(corgi_backlight_device);
86 return 0; 68 return 0;
87} 69}
88 70
89static int corgibl_resume(struct platform_device *dev) 71static int corgibl_resume(struct platform_device *dev)
90{ 72{
91 corgibl_blank(FB_BLANK_UNBLANK); 73 corgibl_flags &= ~CORGIBL_SUSPENDED;
74 corgibl_send_intensity(corgi_backlight_device);
92 return 0; 75 return 0;
93} 76}
94#else 77#else
@@ -96,68 +79,55 @@ static int corgibl_resume(struct platform_device *dev)
96#define corgibl_resume NULL 79#define corgibl_resume NULL
97#endif 80#endif
98 81
99 82static int corgibl_get_intensity(struct backlight_device *bd)
100static int corgibl_set_power(struct backlight_device *bd, int state)
101{
102 corgibl_blank(state);
103 return 0;
104}
105
106static int corgibl_get_power(struct backlight_device *bd)
107{ 83{
108 return corgibl_powermode; 84 return corgibl_intensity;
109} 85}
110 86
111static int corgibl_set_intensity(struct backlight_device *bd, int intensity) 87static int corgibl_set_intensity(struct backlight_device *bd)
112{ 88{
113 if (intensity > corgibl_data.max_brightness) 89 corgibl_send_intensity(corgi_backlight_device);
114 intensity = corgibl_data.max_brightness;
115 corgibl_send_intensity(intensity);
116 current_intensity=intensity;
117 return 0; 90 return 0;
118} 91}
119 92
120static int corgibl_get_intensity(struct backlight_device *bd)
121{
122 return current_intensity;
123}
124
125/* 93/*
126 * Called when the battery is low to limit the backlight intensity. 94 * Called when the battery is low to limit the backlight intensity.
127 * If limit==0 clear any limit, otherwise limit the intensity 95 * If limit==0 clear any limit, otherwise limit the intensity
128 */ 96 */
129void corgibl_limit_intensity(int limit) 97void corgibl_limit_intensity(int limit)
130{ 98{
131 corgibl_limit = (limit ? 1 : 0); 99 if (limit)
132 corgibl_send_intensity(current_intensity); 100 corgibl_flags |= CORGIBL_BATTLOW;
101 else
102 corgibl_flags &= ~CORGIBL_BATTLOW;
103 corgibl_send_intensity(corgi_backlight_device);
133} 104}
134EXPORT_SYMBOL(corgibl_limit_intensity); 105EXPORT_SYMBOL(corgibl_limit_intensity);
135 106
136 107
137static struct backlight_properties corgibl_data = { 108static struct backlight_properties corgibl_data = {
138 .owner = THIS_MODULE, 109 .owner = THIS_MODULE,
139 .get_power = corgibl_get_power,
140 .set_power = corgibl_set_power,
141 .get_brightness = corgibl_get_intensity, 110 .get_brightness = corgibl_get_intensity,
142 .set_brightness = corgibl_set_intensity, 111 .update_status = corgibl_set_intensity,
143}; 112};
144 113
145static struct backlight_device *corgi_backlight_device;
146
147static int __init corgibl_probe(struct platform_device *pdev) 114static int __init corgibl_probe(struct platform_device *pdev)
148{ 115{
149 struct corgibl_machinfo *machinfo = pdev->dev.platform_data; 116 struct corgibl_machinfo *machinfo = pdev->dev.platform_data;
150 117
118 bl_machinfo = machinfo;
151 corgibl_data.max_brightness = machinfo->max_intensity; 119 corgibl_data.max_brightness = machinfo->max_intensity;
152 corgibl_mach_set_intensity = machinfo->set_bl_intensity; 120 if (!machinfo->limit_mask)
121 machinfo->limit_mask = -1;
153 122
154 corgi_backlight_device = backlight_device_register ("corgi-bl", 123 corgi_backlight_device = backlight_device_register ("corgi-bl",
155 NULL, &corgibl_data); 124 NULL, &corgibl_data);
156 if (IS_ERR (corgi_backlight_device)) 125 if (IS_ERR (corgi_backlight_device))
157 return PTR_ERR (corgi_backlight_device); 126 return PTR_ERR (corgi_backlight_device);
158 127
159 corgibl_set_intensity(NULL, CORGI_DEFAULT_INTENSITY); 128 corgibl_data.power = FB_BLANK_UNBLANK;
160 corgibl_limit_intensity(0); 129 corgibl_data.brightness = machinfo->default_intensity;
130 corgibl_send_intensity(corgi_backlight_device);
161 131
162 printk("Corgi Backlight Driver Initialized.\n"); 132 printk("Corgi Backlight Driver Initialized.\n");
163 return 0; 133 return 0;
@@ -167,8 +137,6 @@ static int corgibl_remove(struct platform_device *dev)
167{ 137{
168 backlight_device_unregister(corgi_backlight_device); 138 backlight_device_unregister(corgi_backlight_device);
169 139
170 corgibl_set_intensity(NULL, 0);
171
172 printk("Corgi Backlight Driver Unloaded\n"); 140 printk("Corgi Backlight Driver Unloaded\n");
173 return 0; 141 return 0;
174} 142}
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 95da4c9ed1f1..a71e984c93d4 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -13,7 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/device.h> 16#include <linux/platform_device.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/fb.h> 18#include <linux/fb.h>
19#include <linux/backlight.h> 19#include <linux/backlight.h>
@@ -25,66 +25,58 @@
25#define HP680_MAX_INTENSITY 255 25#define HP680_MAX_INTENSITY 255
26#define HP680_DEFAULT_INTENSITY 10 26#define HP680_DEFAULT_INTENSITY 10
27 27
28static int hp680bl_powermode = FB_BLANK_UNBLANK; 28static int hp680bl_suspended;
29static int current_intensity = 0; 29static int current_intensity = 0;
30static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED; 30static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
31static struct backlight_device *hp680_backlight_device;
31 32
32static void hp680bl_send_intensity(int intensity) 33static void hp680bl_send_intensity(struct backlight_device *bd)
33{ 34{
34 unsigned long flags; 35 unsigned long flags;
36 u16 v;
37 int intensity = bd->props->brightness;
35 38
36 if (hp680bl_powermode != FB_BLANK_UNBLANK) 39 if (bd->props->power != FB_BLANK_UNBLANK)
40 intensity = 0;
41 if (bd->props->fb_blank != FB_BLANK_UNBLANK)
42 intensity = 0;
43 if (hp680bl_suspended)
37 intensity = 0; 44 intensity = 0;
38 45
39 spin_lock_irqsave(&bl_lock, flags); 46 spin_lock_irqsave(&bl_lock, flags);
40 sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS); 47 if (intensity && current_intensity == 0) {
48 sh_dac_enable(DAC_LCD_BRIGHTNESS);
49 v = inw(HD64461_GPBDR);
50 v &= ~HD64461_GPBDR_LCDOFF;
51 outw(v, HD64461_GPBDR);
52 sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS);
53 } else if (intensity == 0 && current_intensity != 0) {
54 sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS);
55 sh_dac_disable(DAC_LCD_BRIGHTNESS);
56 v = inw(HD64461_GPBDR);
57 v |= HD64461_GPBDR_LCDOFF;
58 outw(v, HD64461_GPBDR);
59 } else if (intensity) {
60 sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS);
61 }
41 spin_unlock_irqrestore(&bl_lock, flags); 62 spin_unlock_irqrestore(&bl_lock, flags);
42}
43 63
44static void hp680bl_blank(int blank) 64 current_intensity = intensity;
45{
46 u16 v;
47
48 switch(blank) {
49
50 case FB_BLANK_NORMAL:
51 case FB_BLANK_VSYNC_SUSPEND:
52 case FB_BLANK_HSYNC_SUSPEND:
53 case FB_BLANK_POWERDOWN:
54 if (hp680bl_powermode == FB_BLANK_UNBLANK) {
55 hp680bl_send_intensity(0);
56 hp680bl_powermode = blank;
57 sh_dac_disable(DAC_LCD_BRIGHTNESS);
58 v = inw(HD64461_GPBDR);
59 v |= HD64461_GPBDR_LCDOFF;
60 outw(v, HD64461_GPBDR);
61 }
62 break;
63 case FB_BLANK_UNBLANK:
64 if (hp680bl_powermode != FB_BLANK_UNBLANK) {
65 sh_dac_enable(DAC_LCD_BRIGHTNESS);
66 v = inw(HD64461_GPBDR);
67 v &= ~HD64461_GPBDR_LCDOFF;
68 outw(v, HD64461_GPBDR);
69 hp680bl_powermode = blank;
70 hp680bl_send_intensity(current_intensity);
71 }
72 break;
73 }
74} 65}
75 66
67
76#ifdef CONFIG_PM 68#ifdef CONFIG_PM
77static int hp680bl_suspend(struct device *dev, pm_message_t state, u32 level) 69static int hp680bl_suspend(struct platform_device *dev, pm_message_t state)
78{ 70{
79 if (level == SUSPEND_POWER_DOWN) 71 hp680bl_suspended = 1;
80 hp680bl_blank(FB_BLANK_POWERDOWN); 72 hp680bl_send_intensity(hp680_backlight_device);
81 return 0; 73 return 0;
82} 74}
83 75
84static int hp680bl_resume(struct device *dev, u32 level) 76static int hp680bl_resume(struct platform_device *dev)
85{ 77{
86 if (level == RESUME_POWER_ON) 78 hp680bl_suspended = 0;
87 hp680bl_blank(FB_BLANK_UNBLANK); 79 hp680bl_send_intensity(hp680_backlight_device);
88 return 0; 80 return 0;
89} 81}
90#else 82#else
@@ -92,24 +84,9 @@ static int hp680bl_resume(struct device *dev, u32 level)
92#define hp680bl_resume NULL 84#define hp680bl_resume NULL
93#endif 85#endif
94 86
95 87static int hp680bl_set_intensity(struct backlight_device *bd)
96static int hp680bl_set_power(struct backlight_device *bd, int state)
97{ 88{
98 hp680bl_blank(state); 89 hp680bl_send_intensity(bd);
99 return 0;
100}
101
102static int hp680bl_get_power(struct backlight_device *bd)
103{
104 return hp680bl_powermode;
105}
106
107static int hp680bl_set_intensity(struct backlight_device *bd, int intensity)
108{
109 if (intensity > HP680_MAX_INTENSITY)
110 intensity = HP680_MAX_INTENSITY;
111 hp680bl_send_intensity(intensity);
112 current_intensity = intensity;
113 return 0; 90 return 0;
114} 91}
115 92
@@ -120,65 +97,67 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
120 97
121static struct backlight_properties hp680bl_data = { 98static struct backlight_properties hp680bl_data = {
122 .owner = THIS_MODULE, 99 .owner = THIS_MODULE,
123 .get_power = hp680bl_get_power,
124 .set_power = hp680bl_set_power,
125 .max_brightness = HP680_MAX_INTENSITY, 100 .max_brightness = HP680_MAX_INTENSITY,
126 .get_brightness = hp680bl_get_intensity, 101 .get_brightness = hp680bl_get_intensity,
127 .set_brightness = hp680bl_set_intensity, 102 .update_status = hp680bl_set_intensity,
128}; 103};
129 104
130static struct backlight_device *hp680_backlight_device; 105static int __init hp680bl_probe(struct platform_device *dev)
131
132static int __init hp680bl_probe(struct device *dev)
133{ 106{
134 hp680_backlight_device = backlight_device_register ("hp680-bl", 107 hp680_backlight_device = backlight_device_register ("hp680-bl",
135 NULL, &hp680bl_data); 108 NULL, &hp680bl_data);
136 if (IS_ERR (hp680_backlight_device)) 109 if (IS_ERR (hp680_backlight_device))
137 return PTR_ERR (hp680_backlight_device); 110 return PTR_ERR (hp680_backlight_device);
138 111
139 hp680bl_set_intensity(NULL, HP680_DEFAULT_INTENSITY); 112 hp680_backlight_device->props->brightness = HP680_DEFAULT_INTENSITY;
113 hp680bl_send_intensity(hp680_backlight_device);
140 114
141 return 0; 115 return 0;
142} 116}
143 117
144static int hp680bl_remove(struct device *dev) 118static int hp680bl_remove(struct platform_device *dev)
145{ 119{
146 backlight_device_unregister(hp680_backlight_device); 120 backlight_device_unregister(hp680_backlight_device);
147 121
148 return 0; 122 return 0;
149} 123}
150 124
151static struct device_driver hp680bl_driver = { 125static struct platform_driver hp680bl_driver = {
152 .name = "hp680-bl",
153 .bus = &platform_bus_type,
154 .probe = hp680bl_probe, 126 .probe = hp680bl_probe,
155 .remove = hp680bl_remove, 127 .remove = hp680bl_remove,
156 .suspend = hp680bl_suspend, 128 .suspend = hp680bl_suspend,
157 .resume = hp680bl_resume, 129 .resume = hp680bl_resume,
130 .driver = {
131 .name = "hp680-bl",
132 },
158}; 133};
159 134
160static struct platform_device hp680bl_device = { 135static struct platform_device *hp680bl_device;
161 .name = "hp680-bl",
162 .id = -1,
163};
164 136
165static int __init hp680bl_init(void) 137static int __init hp680bl_init(void)
166{ 138{
167 int ret; 139 int ret;
168 140
169 ret=driver_register(&hp680bl_driver); 141 ret = platform_driver_register(&hp680bl_driver);
170 if (!ret) { 142 if (!ret) {
171 ret = platform_device_register(&hp680bl_device); 143 hp680bl_device = platform_device_alloc("hp680-bl", -1);
172 if (ret) 144 if (!hp680bl_device)
173 driver_unregister(&hp680bl_driver); 145 return -ENOMEM;
146
147 ret = platform_device_add(hp680bl_device);
148
149 if (ret) {
150 platform_device_put(hp680bl_device);
151 platform_driver_unregister(&hp680bl_driver);
152 }
174 } 153 }
175 return ret; 154 return ret;
176} 155}
177 156
178static void __exit hp680bl_exit(void) 157static void __exit hp680bl_exit(void)
179{ 158{
180 platform_device_unregister(&hp680bl_device); 159 platform_device_unregister(hp680bl_device);
181 driver_unregister(&hp680bl_driver); 160 platform_driver_unregister(&hp680bl_driver);
182} 161}
183 162
184module_init(hp680bl_init); 163module_init(hp680bl_init);
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/cfbimgblt.c
index 910e2338a27e..8ba6152db2fd 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/cfbimgblt.c
@@ -169,7 +169,7 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
169 169
170 while (j--) { 170 while (j--) {
171 l--; 171 l--;
172 color = (*s & 1 << (FB_BIT_NR(l))) ? fgcolor : bgcolor; 172 color = (*s & (1 << l)) ? fgcolor : bgcolor;
173 val |= FB_SHIFT_HIGH(color, shift); 173 val |= FB_SHIFT_HIGH(color, shift);
174 174
175 /* Did the bitshift spill bits to the next long? */ 175 /* Did the bitshift spill bits to the next long? */
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 041d06987861..ca020719d20b 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -466,7 +466,7 @@ static int __init fb_console_setup(char *this_opt)
466 int i, j; 466 int i, j;
467 467
468 if (!this_opt || !*this_opt) 468 if (!this_opt || !*this_opt)
469 return 0; 469 return 1;
470 470
471 while ((options = strsep(&this_opt, ",")) != NULL) { 471 while ((options = strsep(&this_opt, ",")) != NULL) {
472 if (!strncmp(options, "font:", 5)) 472 if (!strncmp(options, "font:", 5))
@@ -481,10 +481,10 @@ static int __init fb_console_setup(char *this_opt)
481 options++; 481 options++;
482 } 482 }
483 if (*options != ',') 483 if (*options != ',')
484 return 0; 484 return 1;
485 options++; 485 options++;
486 } else 486 } else
487 return 0; 487 return 1;
488 } 488 }
489 489
490 if (!strncmp(options, "map:", 4)) { 490 if (!strncmp(options, "map:", 4)) {
@@ -496,7 +496,7 @@ static int __init fb_console_setup(char *this_opt)
496 con2fb_map_boot[i] = 496 con2fb_map_boot[i] =
497 (options[j++]-'0') % FB_MAX; 497 (options[j++]-'0') % FB_MAX;
498 } 498 }
499 return 0; 499 return 1;
500 } 500 }
501 501
502 if (!strncmp(options, "vc:", 3)) { 502 if (!strncmp(options, "vc:", 3)) {
@@ -518,7 +518,7 @@ static int __init fb_console_setup(char *this_opt)
518 rotate = 0; 518 rotate = 0;
519 } 519 }
520 } 520 }
521 return 0; 521 return 1;
522} 522}
523 523
524__setup("fbcon=", fb_console_setup); 524__setup("fbcon=", fb_console_setup);
@@ -1142,6 +1142,7 @@ static void fbcon_init(struct vc_data *vc, int init)
1142 set_blitting_type(vc, info); 1142 set_blitting_type(vc, info);
1143 } 1143 }
1144 1144
1145 ops->p = &fb_display[fg_console];
1145} 1146}
1146 1147
1147static void fbcon_deinit(struct vc_data *vc) 1148static void fbcon_deinit(struct vc_data *vc)
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index d6041e781aca..74ac2acaf72c 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -275,7 +275,7 @@ static int __init sti_setup(char *str)
275 if (str) 275 if (str)
276 strlcpy (default_sti_path, str, sizeof (default_sti_path)); 276 strlcpy (default_sti_path, str, sizeof (default_sti_path));
277 277
278 return 0; 278 return 1;
279} 279}
280 280
281/* Assuming the machine has multiple STI consoles (=graphic cards) which 281/* Assuming the machine has multiple STI consoles (=graphic cards) which
@@ -321,7 +321,7 @@ static int __init sti_font_setup(char *str)
321 i++; 321 i++;
322 } 322 }
323 323
324 return 0; 324 return 1;
325} 325}
326 326
327/* The optional linux kernel parameter "sti_font" defines which font 327/* The optional linux kernel parameter "sti_font" defines which font
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index b1a8dca76430..944855b3e4af 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1588,7 +1588,7 @@ static int __init video_setup(char *options)
1588 } 1588 }
1589 } 1589 }
1590 1590
1591 return 0; 1591 return 1;
1592} 1592}
1593__setup("video=", video_setup); 1593__setup("video=", video_setup);
1594#endif 1594#endif
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 53ad61f1038c..809fc5eefc15 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -232,9 +232,9 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
232 if (var->yres < MIN_YRES) 232 if (var->yres < MIN_YRES)
233 var->yres = MIN_YRES; 233 var->yres = MIN_YRES;
234 if (var->xres > fbi->max_xres) 234 if (var->xres > fbi->max_xres)
235 var->xres = fbi->max_xres; 235 return -EINVAL;
236 if (var->yres > fbi->max_yres) 236 if (var->yres > fbi->max_yres)
237 var->yres = fbi->max_yres; 237 return -EINVAL;
238 var->xres_virtual = 238 var->xres_virtual =
239 max(var->xres_virtual, var->xres); 239 max(var->xres_virtual, var->xres);
240 var->yres_virtual = 240 var->yres_virtual =
@@ -781,7 +781,7 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
781 LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */ 781 LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */
782 LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */ 782 LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */
783 783
784 schedule_timeout(20 * HZ / 1000); 784 schedule_timeout(200 * HZ / 1000);
785 remove_wait_queue(&fbi->ctrlr_wait, &wait); 785 remove_wait_queue(&fbi->ctrlr_wait, &wait);
786 786
787 /* disable LCD controller clock */ 787 /* disable LCD controller clock */
@@ -1274,7 +1274,7 @@ int __init pxafb_probe(struct platform_device *dev)
1274 struct pxafb_mach_info *inf; 1274 struct pxafb_mach_info *inf;
1275 int ret; 1275 int ret;
1276 1276
1277 dev_dbg(dev, "pxafb_probe\n"); 1277 dev_dbg(&dev->dev, "pxafb_probe\n");
1278 1278
1279 inf = dev->dev.platform_data; 1279 inf = dev->dev.platform_data;
1280 ret = -ENOMEM; 1280 ret = -ENOMEM;
diff --git a/drivers/video/radeonfb.c b/drivers/video/radeonfb.c
deleted file mode 100644
index afb6c2ead599..000000000000
--- a/drivers/video/radeonfb.c
+++ /dev/null
@@ -1,3167 +0,0 @@
1/*
2 * drivers/video/radeonfb.c
3 * framebuffer driver for ATI Radeon chipset video boards
4 *
5 * Copyright 2000 Ani Joshi <ajoshi@kernel.crashing.org>
6 *
7 *
8 * ChangeLog:
9 * 2000-08-03 initial version 0.0.1
10 * 2000-09-10 more bug fixes, public release 0.0.5
11 * 2001-02-19 mode bug fixes, 0.0.7
12 * 2001-07-05 fixed scrolling issues, engine initialization,
13 * and minor mode tweaking, 0.0.9
14 * 2001-09-07 Radeon VE support, Nick Kurshev
15 * blanking, pan_display, and cmap fixes, 0.1.0
16 * 2001-10-10 Radeon 7500 and 8500 support, and experimental
17 * flat panel support, 0.1.1
18 * 2001-11-17 Radeon M6 (ppc) support, Daniel Berlin, 0.1.2
19 * 2001-11-18 DFP fixes, Kevin Hendricks, 0.1.3
20 * 2001-11-29 more cmap, backlight fixes, Benjamin Herrenschmidt
21 * 2002-01-18 DFP panel detection via BIOS, Michael Clark, 0.1.4
22 * 2002-06-02 console switching, mode set fixes, accel fixes
23 * 2002-06-03 MTRR support, Peter Horton, 0.1.5
24 * 2002-09-21 rv250, r300, m9 initial support,
25 * added mirror option, 0.1.6
26 *
27 * Special thanks to ATI DevRel team for their hardware donations.
28 *
29 */
30
31
32#define RADEON_VERSION "0.1.6"
33
34
35#include <linux/config.h>
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/errno.h>
39#include <linux/string.h>
40#include <linux/mm.h>
41#include <linux/tty.h>
42#include <linux/slab.h>
43#include <linux/delay.h>
44#include <linux/fb.h>
45#include <linux/ioport.h>
46#include <linux/init.h>
47#include <linux/pci.h>
48#include <linux/vmalloc.h>
49
50#include <asm/io.h>
51#include <asm/uaccess.h>
52#if defined(__powerpc__)
53#include <asm/prom.h>
54#include <asm/pci-bridge.h>
55#include "macmodes.h"
56
57#ifdef CONFIG_NVRAM
58#include <linux/nvram.h>
59#endif
60
61#ifdef CONFIG_PMAC_BACKLIGHT
62#include <asm/backlight.h>
63#endif
64
65#ifdef CONFIG_BOOTX_TEXT
66#include <asm/btext.h>
67#endif
68
69#ifdef CONFIG_ADB_PMU
70#include <linux/adb.h>
71#include <linux/pmu.h>
72#endif
73
74#endif /* __powerpc__ */
75
76#ifdef CONFIG_MTRR
77#include <asm/mtrr.h>
78#endif
79
80#include <video/radeon.h>
81#include <linux/radeonfb.h>
82
83#define DEBUG 0
84
85#if DEBUG
86#define RTRACE printk
87#else
88#define RTRACE if(0) printk
89#endif
90
91// XXX
92#undef CONFIG_PMAC_PBOOK
93
94
95enum radeon_chips {
96 RADEON_QD,
97 RADEON_QE,
98 RADEON_QF,
99 RADEON_QG,
100 RADEON_QY,
101 RADEON_QZ,
102 RADEON_LW,
103 RADEON_LX,
104 RADEON_LY,
105 RADEON_LZ,
106 RADEON_QL,
107 RADEON_QN,
108 RADEON_QO,
109 RADEON_Ql,
110 RADEON_BB,
111 RADEON_QW,
112 RADEON_QX,
113 RADEON_Id,
114 RADEON_Ie,
115 RADEON_If,
116 RADEON_Ig,
117 RADEON_Ya,
118 RADEON_Yd,
119 RADEON_Ld,
120 RADEON_Le,
121 RADEON_Lf,
122 RADEON_Lg,
123 RADEON_ND,
124 RADEON_NE,
125 RADEON_NF,
126 RADEON_NG,
127 RADEON_QM
128};
129
130enum radeon_arch {
131 RADEON_R100,
132 RADEON_RV100,
133 RADEON_R200,
134 RADEON_RV200,
135 RADEON_RV250,
136 RADEON_R300,
137 RADEON_M6,
138 RADEON_M7,
139 RADEON_M9
140};
141
142static struct radeon_chip_info {
143 const char *name;
144 unsigned char arch;
145} radeon_chip_info[] __devinitdata = {
146 { "QD", RADEON_R100 },
147 { "QE", RADEON_R100 },
148 { "QF", RADEON_R100 },
149 { "QG", RADEON_R100 },
150 { "VE QY", RADEON_RV100 },
151 { "VE QZ", RADEON_RV100 },
152 { "M7 LW", RADEON_M7 },
153 { "M7 LX", RADEON_M7 },
154 { "M6 LY", RADEON_M6 },
155 { "M6 LZ", RADEON_M6 },
156 { "8500 QL", RADEON_R200 },
157 { "8500 QN", RADEON_R200 },
158 { "8500 QO", RADEON_R200 },
159 { "8500 Ql", RADEON_R200 },
160 { "8500 BB", RADEON_R200 },
161 { "7500 QW", RADEON_RV200 },
162 { "7500 QX", RADEON_RV200 },
163 { "9000 Id", RADEON_RV250 },
164 { "9000 Ie", RADEON_RV250 },
165 { "9000 If", RADEON_RV250 },
166 { "9000 Ig", RADEON_RV250 },
167 { "M9 Ld", RADEON_M9 },
168 { "M9 Le", RADEON_M9 },
169 { "M9 Lf", RADEON_M9 },
170 { "M9 Lg", RADEON_M9 },
171 { "9700 ND", RADEON_R300 },
172 { "9700 NE", RADEON_R300 },
173 { "9700 NF", RADEON_R300 },
174 { "9700 NG", RADEON_R300 },
175 { "9100 QM", RADEON_R200 }
176};
177
178
179enum radeon_montype
180{
181 MT_NONE,
182 MT_CRT, /* CRT */
183 MT_LCD, /* LCD */
184 MT_DFP, /* DVI */
185 MT_CTV, /* composite TV */
186 MT_STV /* S-Video out */
187};
188
189
190static struct pci_device_id radeonfb_pci_table[] = {
191 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QD},
192 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QE},
193 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QF},
194 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QG},
195 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QY},
196 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QZ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QZ},
197 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LW},
198 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LX},
199 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LY, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LY},
200 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_LZ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_LZ},
201 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QL},
202 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QN},
203 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QO},
204 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ql, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ql},
205 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_BB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_BB},
206 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QW},
207 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QX},
208 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Id},
209 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ie, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ie},
210 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_If, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_If},
211 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ig, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ig},
212 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ya, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ya},
213 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Yd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Yd},
214 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Ld, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Ld},
215 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Le, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Le},
216 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Lf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Lf},
217 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_Lg, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_Lg},
218 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_ND, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_ND},
219 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NE},
220 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NF},
221 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_NG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_NG},
222 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RADEON_QM},
223 { 0, }
224};
225MODULE_DEVICE_TABLE(pci, radeonfb_pci_table);
226
227
228typedef struct {
229 u16 reg;
230 u32 val;
231} reg_val;
232
233
234/* these common regs are cleared before mode setting so they do not
235 * interfere with anything
236 */
237static reg_val common_regs[] = {
238 { OVR_CLR, 0 },
239 { OVR_WID_LEFT_RIGHT, 0 },
240 { OVR_WID_TOP_BOTTOM, 0 },
241 { OV0_SCALE_CNTL, 0 },
242 { SUBPIC_CNTL, 0 },
243 { VIPH_CONTROL, 0 },
244 { I2C_CNTL_1, 0 },
245 { GEN_INT_CNTL, 0 },
246 { CAP0_TRIG_CNTL, 0 },
247};
248
249static reg_val common_regs_m6[] = {
250 { OVR_CLR, 0 },
251 { OVR_WID_LEFT_RIGHT, 0 },
252 { OVR_WID_TOP_BOTTOM, 0 },
253 { OV0_SCALE_CNTL, 0 },
254 { SUBPIC_CNTL, 0 },
255 { GEN_INT_CNTL, 0 },
256 { CAP0_TRIG_CNTL, 0 }
257};
258
259typedef struct {
260 u8 clock_chip_type;
261 u8 struct_size;
262 u8 accelerator_entry;
263 u8 VGA_entry;
264 u16 VGA_table_offset;
265 u16 POST_table_offset;
266 u16 XCLK;
267 u16 MCLK;
268 u8 num_PLL_blocks;
269 u8 size_PLL_blocks;
270 u16 PCLK_ref_freq;
271 u16 PCLK_ref_divider;
272 u32 PCLK_min_freq;
273 u32 PCLK_max_freq;
274 u16 MCLK_ref_freq;
275 u16 MCLK_ref_divider;
276 u32 MCLK_min_freq;
277 u32 MCLK_max_freq;
278 u16 XCLK_ref_freq;
279 u16 XCLK_ref_divider;
280 u32 XCLK_min_freq;
281 u32 XCLK_max_freq;
282} __attribute__ ((packed)) PLL_BLOCK;
283
284
285struct pll_info {
286 int ppll_max;
287 int ppll_min;
288 int xclk;
289 int ref_div;
290 int ref_clk;
291};
292
293
294struct ram_info {
295 int ml;
296 int mb;
297 int trcd;
298 int trp;
299 int twr;
300 int cl;
301 int tr2w;
302 int loop_latency;
303 int rloop;
304};
305
306
307struct radeon_regs {
308 /* CRTC regs */
309 u32 crtc_h_total_disp;
310 u32 crtc_h_sync_strt_wid;
311 u32 crtc_v_total_disp;
312 u32 crtc_v_sync_strt_wid;
313 u32 crtc_pitch;
314 u32 crtc_gen_cntl;
315 u32 crtc_ext_cntl;
316 u32 dac_cntl;
317
318 u32 flags;
319 u32 pix_clock;
320 int xres, yres;
321
322 /* DDA regs */
323 u32 dda_config;
324 u32 dda_on_off;
325
326 /* PLL regs */
327 u32 ppll_div_3;
328 u32 ppll_ref_div;
329 u32 vclk_ecp_cntl;
330
331 /* Flat panel regs */
332 u32 fp_crtc_h_total_disp;
333 u32 fp_crtc_v_total_disp;
334 u32 fp_gen_cntl;
335 u32 fp_h_sync_strt_wid;
336 u32 fp_horz_stretch;
337 u32 fp_panel_cntl;
338 u32 fp_v_sync_strt_wid;
339 u32 fp_vert_stretch;
340 u32 lvds_gen_cntl;
341 u32 lvds_pll_cntl;
342 u32 tmds_crc;
343 u32 tmds_transmitter_cntl;
344
345#if defined(__BIG_ENDIAN)
346 u32 surface_cntl;
347#endif
348};
349
350
351struct radeonfb_info {
352 struct fb_info info;
353
354 struct radeon_regs state;
355 struct radeon_regs init_state;
356
357 char name[32];
358 char ram_type[12];
359
360 unsigned long mmio_base_phys;
361 unsigned long fb_base_phys;
362
363 void __iomem *mmio_base;
364 void __iomem *fb_base;
365
366 struct pci_dev *pdev;
367
368 unsigned char *EDID;
369 unsigned char __iomem *bios_seg;
370
371 u32 pseudo_palette[17];
372 struct { u8 red, green, blue, pad; } palette[256];
373
374 int chipset;
375 unsigned char arch;
376 int video_ram;
377 u8 rev;
378 int pitch, bpp, depth;
379 int xres, yres, pixclock;
380 int xres_virtual, yres_virtual;
381 u32 accel_flags;
382
383 int use_default_var;
384 int got_dfpinfo;
385
386 int hasCRTC2;
387 int crtDisp_type;
388 int dviDisp_type;
389
390 int panel_xres, panel_yres;
391 int clock;
392 int hOver_plus, hSync_width, hblank;
393 int vOver_plus, vSync_width, vblank;
394 int hAct_high, vAct_high, interlaced;
395 int synct, misc;
396
397 u32 dp_gui_master_cntl;
398
399 struct pll_info pll;
400 int pll_output_freq, post_div, fb_div;
401
402 struct ram_info ram;
403
404 int mtrr_hdl;
405
406#ifdef CONFIG_PMAC_PBOOK
407 int pm_reg;
408 u32 save_regs[64];
409 u32 mdll, mdll2;
410#endif /* CONFIG_PMAC_PBOOK */
411 int asleep;
412
413 struct radeonfb_info *next;
414};
415
416
417static struct fb_var_screeninfo radeonfb_default_var = {
418 640, 480, 640, 480, 0, 0, 8, 0,
419 {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0},
420 0, 0, -1, -1, 0, 39721, 40, 24, 32, 11, 96, 2,
421 0, FB_VMODE_NONINTERLACED
422};
423
424/*
425 * IO macros
426 */
427
428#define INREG8(addr) readb((rinfo->mmio_base)+addr)
429#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
430#define INREG(addr) readl((rinfo->mmio_base)+addr)
431#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
432
433#define OUTPLL(addr,val) \
434 do { \
435 OUTREG8(CLOCK_CNTL_INDEX, (addr & 0x0000003f) | 0x00000080); \
436 OUTREG(CLOCK_CNTL_DATA, val); \
437 } while(0)
438
439#define OUTPLLP(addr,val,mask) \
440 do { \
441 unsigned int _tmp = INPLL(addr); \
442 _tmp &= (mask); \
443 _tmp |= (val); \
444 OUTPLL(addr, _tmp); \
445 } while (0)
446
447#define OUTREGP(addr,val,mask) \
448 do { \
449 unsigned int _tmp = INREG(addr); \
450 _tmp &= (mask); \
451 _tmp |= (val); \
452 OUTREG(addr, _tmp); \
453 } while (0)
454
455
456static __inline__ u32 _INPLL(struct radeonfb_info *rinfo, u32 addr)
457{
458 OUTREG8(CLOCK_CNTL_INDEX, addr & 0x0000003f);
459 return (INREG(CLOCK_CNTL_DATA));
460}
461
462#define INPLL(addr) _INPLL(rinfo, addr)
463
464#define PRIMARY_MONITOR(rinfo) ((rinfo->dviDisp_type != MT_NONE) && \
465 (rinfo->dviDisp_type != MT_STV) && \
466 (rinfo->dviDisp_type != MT_CTV) ? \
467 rinfo->dviDisp_type : rinfo->crtDisp_type)
468
469static char *GET_MON_NAME(int type)
470{
471 char *pret = NULL;
472
473 switch (type) {
474 case MT_NONE:
475 pret = "no";
476 break;
477 case MT_CRT:
478 pret = "CRT";
479 break;
480 case MT_DFP:
481 pret = "DFP";
482 break;
483 case MT_LCD:
484 pret = "LCD";
485 break;
486 case MT_CTV:
487 pret = "CTV";
488 break;
489 case MT_STV:
490 pret = "STV";
491 break;
492 }
493
494 return pret;
495}
496
497
498/*
499 * 2D engine routines
500 */
501
502static __inline__ void radeon_engine_flush (struct radeonfb_info *rinfo)
503{
504 int i;
505
506 /* initiate flush */
507 OUTREGP(RB2D_DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL,
508 ~RB2D_DC_FLUSH_ALL);
509
510 for (i=0; i < 2000000; i++) {
511 if (!(INREG(RB2D_DSTCACHE_CTLSTAT) & RB2D_DC_BUSY))
512 break;
513 }
514}
515
516
517static __inline__ void _radeon_fifo_wait (struct radeonfb_info *rinfo, int entries)
518{
519 int i;
520
521 for (i=0; i<2000000; i++)
522 if ((INREG(RBBM_STATUS) & 0x7f) >= entries)
523 return;
524}
525
526
527static __inline__ void _radeon_engine_idle (struct radeonfb_info *rinfo)
528{
529 int i;
530
531 /* ensure FIFO is empty before waiting for idle */
532 _radeon_fifo_wait (rinfo, 64);
533
534 for (i=0; i<2000000; i++) {
535 if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) {
536 radeon_engine_flush (rinfo);
537 return;
538 }
539 }
540}
541
542
543#define radeon_engine_idle() _radeon_engine_idle(rinfo)
544#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries)
545
546
547
548/*
549 * helper routines
550 */
551
552static __inline__ u32 radeon_get_dstbpp(u16 depth)
553{
554 switch (depth) {
555 case 8:
556 return DST_8BPP;
557 case 15:
558 return DST_15BPP;
559 case 16:
560 return DST_16BPP;
561 case 32:
562 return DST_32BPP;
563 default:
564 return 0;
565 }
566}
567
568
569static inline int var_to_depth(const struct fb_var_screeninfo *var)
570{
571 if (var->bits_per_pixel != 16)
572 return var->bits_per_pixel;
573 return (var->green.length == 6) ? 16 : 15;
574}
575
576
577static void _radeon_engine_reset(struct radeonfb_info *rinfo)
578{
579 u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset;
580
581 radeon_engine_flush (rinfo);
582
583 clock_cntl_index = INREG(CLOCK_CNTL_INDEX);
584 mclk_cntl = INPLL(MCLK_CNTL);
585
586 OUTPLL(MCLK_CNTL, (mclk_cntl |
587 FORCEON_MCLKA |
588 FORCEON_MCLKB |
589 FORCEON_YCLKA |
590 FORCEON_YCLKB |
591 FORCEON_MC |
592 FORCEON_AIC));
593 rbbm_soft_reset = INREG(RBBM_SOFT_RESET);
594
595 OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset |
596 SOFT_RESET_CP |
597 SOFT_RESET_HI |
598 SOFT_RESET_SE |
599 SOFT_RESET_RE |
600 SOFT_RESET_PP |
601 SOFT_RESET_E2 |
602 SOFT_RESET_RB);
603 INREG(RBBM_SOFT_RESET);
604 OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset & (u32)
605 ~(SOFT_RESET_CP |
606 SOFT_RESET_HI |
607 SOFT_RESET_SE |
608 SOFT_RESET_RE |
609 SOFT_RESET_PP |
610 SOFT_RESET_E2 |
611 SOFT_RESET_RB));
612 INREG(RBBM_SOFT_RESET);
613
614 OUTPLL(MCLK_CNTL, mclk_cntl);
615 OUTREG(CLOCK_CNTL_INDEX, clock_cntl_index);
616 OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset);
617
618 return;
619}
620
621#define radeon_engine_reset() _radeon_engine_reset(rinfo)
622
623
624static __inline__ int round_div(int num, int den)
625{
626 return (num + (den / 2)) / den;
627}
628
629
630
631static __inline__ int min_bits_req(int val)
632{
633 int bits_req = 0;
634
635 if (val == 0)
636 bits_req = 1;
637
638 while (val) {
639 val >>= 1;
640 bits_req++;
641 }
642
643 return (bits_req);
644}
645
646
647static __inline__ int _max(int val1, int val2)
648{
649 if (val1 >= val2)
650 return val1;
651 else
652 return val2;
653}
654
655
656
657/*
658 * globals
659 */
660
661#ifndef MODULE
662static char *mode_option;
663#endif
664
665static char noaccel = 0;
666static char mirror = 0;
667static int panel_yres = 0;
668static char force_dfp = 0;
669static struct radeonfb_info *board_list = NULL;
670static char nomtrr = 0;
671
672/*
673 * prototypes
674 */
675
676static void radeon_save_state (struct radeonfb_info *rinfo,
677 struct radeon_regs *save);
678static void radeon_engine_init (struct radeonfb_info *rinfo);
679static void radeon_write_mode (struct radeonfb_info *rinfo,
680 struct radeon_regs *mode);
681static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo);
682static int __devinit radeon_init_disp (struct radeonfb_info *rinfo);
683static int radeon_init_disp_var (struct radeonfb_info *rinfo, struct fb_var_screeninfo *var);
684static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo);
685static void radeon_get_pllinfo(struct radeonfb_info *rinfo, void __iomem *bios_seg);
686static void radeon_get_moninfo (struct radeonfb_info *rinfo);
687static int radeon_get_dfpinfo (struct radeonfb_info *rinfo);
688static int radeon_get_dfpinfo_BIOS(struct radeonfb_info *rinfo);
689static void radeon_get_EDID(struct radeonfb_info *rinfo);
690static int radeon_dfp_parse_EDID(struct radeonfb_info *rinfo);
691static void radeon_update_default_var(struct radeonfb_info *rinfo);
692
693#ifdef CONFIG_PPC_OF
694
695static int radeon_read_OF (struct radeonfb_info *rinfo);
696static int radeon_get_EDID_OF(struct radeonfb_info *rinfo);
697extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
698
699#ifdef CONFIG_PMAC_PBOOK
700int radeon_sleep_notify(struct pmu_sleep_notifier *self, int when);
701static struct pmu_sleep_notifier radeon_sleep_notifier = {
702 radeon_sleep_notify, SLEEP_LEVEL_VIDEO,
703};
704#endif /* CONFIG_PMAC_PBOOK */
705#ifdef CONFIG_PMAC_BACKLIGHT
706static int radeon_set_backlight_enable(int on, int level, void *data);
707static int radeon_set_backlight_level(int level, void *data);
708static struct backlight_controller radeon_backlight_controller = {
709 radeon_set_backlight_enable,
710 radeon_set_backlight_level
711};
712#endif /* CONFIG_PMAC_BACKLIGHT */
713
714#endif /* CONFIG_PPC_OF */
715
716
717static void __iomem *radeon_find_rom(struct radeonfb_info *rinfo)
718{
719#if defined(__i386__)
720 u32 segstart;
721 char __iomem *rom_base;
722 char __iomem *rom;
723 int stage;
724 int i,j;
725 char aty_rom_sig[] = "761295520";
726 char *radeon_sig[] = {
727 "RG6",
728 "RADEON"
729 };
730
731 for(segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) {
732
733 stage = 1;
734
735 rom_base = ioremap(segstart, 0x1000);
736
737 if ((*rom_base == 0x55) && (((*(rom_base + 1)) & 0xff) == 0xaa))
738 stage = 2;
739
740
741 if (stage != 2) {
742 iounmap(rom_base);
743 continue;
744 }
745
746 rom = rom_base;
747
748 for (i = 0; (i < 128 - strlen(aty_rom_sig)) && (stage != 3); i++) {
749 if (aty_rom_sig[0] == *rom)
750 if (strncmp(aty_rom_sig, rom,
751 strlen(aty_rom_sig)) == 0)
752 stage = 3;
753 rom++;
754 }
755 if (stage != 3) {
756 iounmap(rom_base);
757 continue;
758 }
759 rom = rom_base;
760
761 for (i = 0; (i < 512) && (stage != 4); i++) {
762 for (j = 0; j < ARRAY_SIZE(radeon_sig); j++) {
763 if (radeon_sig[j][0] == *rom)
764 if (strncmp(radeon_sig[j], rom,
765 strlen(radeon_sig[j])) == 0) {
766 stage = 4;
767 break;
768 }
769 }
770 rom++;
771 }
772 if (stage != 4) {
773 iounmap(rom_base);
774 continue;
775 }
776
777 return rom_base;
778 }
779#endif
780 return NULL;
781}
782
783
784
785
786static void radeon_get_pllinfo(struct radeonfb_info *rinfo, void __iomem *bios_seg)
787{
788 void __iomem *bios_header;
789 void __iomem *header_ptr;
790 u16 bios_header_offset, pll_info_offset;
791 PLL_BLOCK pll;
792
793 if (bios_seg) {
794 bios_header = bios_seg + 0x48L;
795 header_ptr = bios_header;
796
797 bios_header_offset = readw(header_ptr);
798 bios_header = bios_seg + bios_header_offset;
799 bios_header += 0x30;
800
801 header_ptr = bios_header;
802 pll_info_offset = readw(header_ptr);
803 header_ptr = bios_seg + pll_info_offset;
804
805 memcpy_fromio(&pll, header_ptr, 50);
806
807 rinfo->pll.xclk = (u32)pll.XCLK;
808 rinfo->pll.ref_clk = (u32)pll.PCLK_ref_freq;
809 rinfo->pll.ref_div = (u32)pll.PCLK_ref_divider;
810 rinfo->pll.ppll_min = pll.PCLK_min_freq;
811 rinfo->pll.ppll_max = pll.PCLK_max_freq;
812
813 printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d from BIOS\n",
814 rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk);
815 } else {
816#ifdef CONFIG_PPC_OF
817 if (radeon_read_OF(rinfo)) {
818 unsigned int tmp, Nx, M, ref_div, xclk;
819
820 tmp = INPLL(M_SPLL_REF_FB_DIV);
821 ref_div = INPLL(PPLL_REF_DIV) & 0x3ff;
822
823 Nx = (tmp & 0xff00) >> 8;
824 M = (tmp & 0xff);
825 xclk = ((((2 * Nx * rinfo->pll.ref_clk) + (M)) /
826 (2 * M)));
827
828 rinfo->pll.xclk = xclk;
829 rinfo->pll.ref_div = ref_div;
830 rinfo->pll.ppll_min = 12000;
831 rinfo->pll.ppll_max = 35000;
832
833 printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d from OF\n",
834 rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk);
835
836 return;
837 }
838#endif
839 /* no BIOS or BIOS not found, use defaults */
840 switch (rinfo->chipset) {
841 case PCI_DEVICE_ID_ATI_RADEON_QW:
842 case PCI_DEVICE_ID_ATI_RADEON_QX:
843 rinfo->pll.ppll_max = 35000;
844 rinfo->pll.ppll_min = 12000;
845 rinfo->pll.xclk = 23000;
846 rinfo->pll.ref_div = 12;
847 rinfo->pll.ref_clk = 2700;
848 break;
849 case PCI_DEVICE_ID_ATI_RADEON_QL:
850 case PCI_DEVICE_ID_ATI_RADEON_QN:
851 case PCI_DEVICE_ID_ATI_RADEON_QO:
852 case PCI_DEVICE_ID_ATI_RADEON_Ql:
853 case PCI_DEVICE_ID_ATI_RADEON_BB:
854 rinfo->pll.ppll_max = 35000;
855 rinfo->pll.ppll_min = 12000;
856 rinfo->pll.xclk = 27500;
857 rinfo->pll.ref_div = 12;
858 rinfo->pll.ref_clk = 2700;
859 break;
860 case PCI_DEVICE_ID_ATI_RADEON_Id:
861 case PCI_DEVICE_ID_ATI_RADEON_Ie:
862 case PCI_DEVICE_ID_ATI_RADEON_If:
863 case PCI_DEVICE_ID_ATI_RADEON_Ig:
864 rinfo->pll.ppll_max = 35000;
865 rinfo->pll.ppll_min = 12000;
866 rinfo->pll.xclk = 25000;
867 rinfo->pll.ref_div = 12;
868 rinfo->pll.ref_clk = 2700;
869 break;
870 case PCI_DEVICE_ID_ATI_RADEON_ND:
871 case PCI_DEVICE_ID_ATI_RADEON_NE:
872 case PCI_DEVICE_ID_ATI_RADEON_NF:
873 case PCI_DEVICE_ID_ATI_RADEON_NG:
874 rinfo->pll.ppll_max = 40000;
875 rinfo->pll.ppll_min = 20000;
876 rinfo->pll.xclk = 27000;
877 rinfo->pll.ref_div = 12;
878 rinfo->pll.ref_clk = 2700;
879 break;
880 case PCI_DEVICE_ID_ATI_RADEON_QD:
881 case PCI_DEVICE_ID_ATI_RADEON_QE:
882 case PCI_DEVICE_ID_ATI_RADEON_QF:
883 case PCI_DEVICE_ID_ATI_RADEON_QG:
884 default:
885 rinfo->pll.ppll_max = 35000;
886 rinfo->pll.ppll_min = 12000;
887 rinfo->pll.xclk = 16600;
888 rinfo->pll.ref_div = 67;
889 rinfo->pll.ref_clk = 2700;
890 break;
891 }
892
893 printk("radeonfb: ref_clk=%d, ref_div=%d, xclk=%d defaults\n",
894 rinfo->pll.ref_clk, rinfo->pll.ref_div, rinfo->pll.xclk);
895 }
896}
897
898
899static void radeon_get_moninfo (struct radeonfb_info *rinfo)
900{
901 unsigned int tmp;
902
903 if (force_dfp) {
904 rinfo->dviDisp_type = MT_DFP;
905 return;
906 }
907
908 tmp = INREG(BIOS_4_SCRATCH);
909 printk(KERN_DEBUG "radeon_get_moninfo: bios 4 scratch = %x\n", tmp);
910
911 if (rinfo->hasCRTC2) {
912 /* primary DVI port */
913 if (tmp & 0x08)
914 rinfo->dviDisp_type = MT_DFP;
915 else if (tmp & 0x4)
916 rinfo->dviDisp_type = MT_LCD;
917 else if (tmp & 0x200)
918 rinfo->dviDisp_type = MT_CRT;
919 else if (tmp & 0x10)
920 rinfo->dviDisp_type = MT_CTV;
921 else if (tmp & 0x20)
922 rinfo->dviDisp_type = MT_STV;
923
924 /* secondary CRT port */
925 if (tmp & 0x2)
926 rinfo->crtDisp_type = MT_CRT;
927 else if (tmp & 0x800)
928 rinfo->crtDisp_type = MT_DFP;
929 else if (tmp & 0x400)
930 rinfo->crtDisp_type = MT_LCD;
931 else if (tmp & 0x1000)
932 rinfo->crtDisp_type = MT_CTV;
933 else if (tmp & 0x2000)
934 rinfo->crtDisp_type = MT_STV;
935 } else {
936 rinfo->dviDisp_type = MT_NONE;
937
938 tmp = INREG(FP_GEN_CNTL);
939
940 if (tmp & FP_EN_TMDS)
941 rinfo->crtDisp_type = MT_DFP;
942 else
943 rinfo->crtDisp_type = MT_CRT;
944 }
945}
946
947
948
949static void radeon_get_EDID(struct radeonfb_info *rinfo)
950{
951#ifdef CONFIG_PPC_OF
952 if (!radeon_get_EDID_OF(rinfo))
953 RTRACE("radeonfb: could not retrieve EDID from OF\n");
954#else
955 /* XXX use other methods later */
956#endif
957}
958
959
960#ifdef CONFIG_PPC_OF
961static int radeon_get_EDID_OF(struct radeonfb_info *rinfo)
962{
963 struct device_node *dp;
964 unsigned char *pedid = NULL;
965 static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID", "EDID1", NULL };
966 int i;
967
968 dp = pci_device_to_OF_node(rinfo->pdev);
969 while (dp != NULL) {
970 for (i = 0; propnames[i] != NULL; ++i) {
971 pedid = (unsigned char *)
972 get_property(dp, propnames[i], NULL);
973 if (pedid != NULL) {
974 rinfo->EDID = pedid;
975 return 1;
976 }
977 }
978 dp = dp->child;
979 }
980 return 0;
981}
982#endif /* CONFIG_PPC_OF */
983
984
985static int radeon_dfp_parse_EDID(struct radeonfb_info *rinfo)
986{
987 unsigned char *block = rinfo->EDID;
988
989 if (!block)
990 return 0;
991
992 /* jump to the detailed timing block section */
993 block += 54;
994
995 rinfo->clock = (block[0] + (block[1] << 8));
996 rinfo->panel_xres = (block[2] + ((block[4] & 0xf0) << 4));
997 rinfo->hblank = (block[3] + ((block[4] & 0x0f) << 8));
998 rinfo->panel_yres = (block[5] + ((block[7] & 0xf0) << 4));
999 rinfo->vblank = (block[6] + ((block[7] & 0x0f) << 8));
1000 rinfo->hOver_plus = (block[8] + ((block[11] & 0xc0) << 2));
1001 rinfo->hSync_width = (block[9] + ((block[11] & 0x30) << 4));
1002 rinfo->vOver_plus = ((block[10] >> 4) + ((block[11] & 0x0c) << 2));
1003 rinfo->vSync_width = ((block[10] & 0x0f) + ((block[11] & 0x03) << 4));
1004 rinfo->interlaced = ((block[17] & 0x80) >> 7);
1005 rinfo->synct = ((block[17] & 0x18) >> 3);
1006 rinfo->misc = ((block[17] & 0x06) >> 1);
1007 rinfo->hAct_high = rinfo->vAct_high = 0;
1008 if (rinfo->synct == 3) {
1009 if (rinfo->misc & 2)
1010 rinfo->hAct_high = 1;
1011 if (rinfo->misc & 1)
1012 rinfo->vAct_high = 1;
1013 }
1014
1015 printk("radeonfb: detected DFP panel size from EDID: %dx%d\n",
1016 rinfo->panel_xres, rinfo->panel_yres);
1017
1018 rinfo->got_dfpinfo = 1;
1019
1020 return 1;
1021}
1022
1023
1024static void radeon_update_default_var(struct radeonfb_info *rinfo)
1025{
1026 struct fb_var_screeninfo *var = &radeonfb_default_var;
1027
1028 var->xres = rinfo->panel_xres;
1029 var->yres = rinfo->panel_yres;
1030 var->xres_virtual = rinfo->panel_xres;
1031 var->yres_virtual = rinfo->panel_yres;
1032 var->xoffset = var->yoffset = 0;
1033 var->bits_per_pixel = 8;
1034 var->pixclock = 100000000 / rinfo->clock;
1035 var->left_margin = (rinfo->hblank - rinfo->hOver_plus - rinfo->hSync_width);
1036 var->right_margin = rinfo->hOver_plus;
1037 var->upper_margin = (rinfo->vblank - rinfo->vOver_plus - rinfo->vSync_width);
1038 var->lower_margin = rinfo->vOver_plus;
1039 var->hsync_len = rinfo->hSync_width;
1040 var->vsync_len = rinfo->vSync_width;
1041 var->sync = 0;
1042 if (rinfo->synct == 3) {
1043 if (rinfo->hAct_high)
1044 var->sync |= FB_SYNC_HOR_HIGH_ACT;
1045 if (rinfo->vAct_high)
1046 var->sync |= FB_SYNC_VERT_HIGH_ACT;
1047 }
1048
1049 var->vmode = 0;
1050 if (rinfo->interlaced)
1051 var->vmode |= FB_VMODE_INTERLACED;
1052
1053 rinfo->use_default_var = 1;
1054}
1055
1056
1057static int radeon_get_dfpinfo_BIOS(struct radeonfb_info *rinfo)
1058{
1059 char __iomem *fpbiosstart, *tmp, *tmp0;
1060 char stmp[30];
1061 int i;
1062
1063 if (!rinfo->bios_seg)
1064 return 0;
1065
1066 if (!(fpbiosstart = rinfo->bios_seg + readw(rinfo->bios_seg + 0x48))) {
1067 printk("radeonfb: Failed to detect DFP panel info using BIOS\n");
1068 return 0;
1069 }
1070
1071 if (!(tmp = rinfo->bios_seg + readw(fpbiosstart + 0x40))) {
1072 printk("radeonfb: Failed to detect DFP panel info using BIOS\n");
1073 return 0;
1074 }
1075
1076 for(i=0; i<24; i++)
1077 stmp[i] = readb(tmp+i+1);
1078 stmp[24] = 0;
1079 printk("radeonfb: panel ID string: %s\n", stmp);
1080 rinfo->panel_xres = readw(tmp + 25);
1081 rinfo->panel_yres = readw(tmp + 27);
1082 printk("radeonfb: detected DFP panel size from BIOS: %dx%d\n",
1083 rinfo->panel_xres, rinfo->panel_yres);
1084
1085 for(i=0; i<32; i++) {
1086 tmp0 = rinfo->bios_seg + readw(tmp+64+i*2);
1087 if (tmp0 == 0)
1088 break;
1089 if ((readw(tmp0) == rinfo->panel_xres) &&
1090 (readw(tmp0+2) == rinfo->panel_yres)) {
1091 rinfo->hblank = (readw(tmp0+17) - readw(tmp0+19)) * 8;
1092 rinfo->hOver_plus = ((readw(tmp0+21) - readw(tmp0+19) -1) * 8) & 0x7fff;
1093 rinfo->hSync_width = readb(tmp0+23) * 8;
1094 rinfo->vblank = readw(tmp0+24) - readw(tmp0+26);
1095 rinfo->vOver_plus = (readw(tmp0+28) & 0x7ff) - readw(tmp0+26);
1096 rinfo->vSync_width = (readw(tmp0+28) & 0xf800) >> 11;
1097 rinfo->clock = readw(tmp0+9);
1098
1099 rinfo->got_dfpinfo = 1;
1100 return 1;
1101 }
1102 }
1103
1104 return 0;
1105}
1106
1107
1108
1109static int radeon_get_dfpinfo (struct radeonfb_info *rinfo)
1110{
1111 unsigned int tmp;
1112 unsigned short a, b;
1113
1114 if (radeon_get_dfpinfo_BIOS(rinfo))
1115 radeon_update_default_var(rinfo);
1116
1117 if (radeon_dfp_parse_EDID(rinfo))
1118 radeon_update_default_var(rinfo);
1119
1120 if (!rinfo->got_dfpinfo) {
1121 /*
1122 * it seems all else has failed now and we
1123 * resort to probing registers for our DFP info
1124 */
1125 if (panel_yres) {
1126 rinfo->panel_yres = panel_yres;
1127 } else {
1128 tmp = INREG(FP_VERT_STRETCH);
1129 tmp &= 0x00fff000;
1130 rinfo->panel_yres = (unsigned short)(tmp >> 0x0c) + 1;
1131 }
1132
1133 switch (rinfo->panel_yres) {
1134 case 480:
1135 rinfo->panel_xres = 640;
1136 break;
1137 case 600:
1138 rinfo->panel_xres = 800;
1139 break;
1140 case 768:
1141#if defined(__powerpc__)
1142 if (rinfo->dviDisp_type == MT_LCD)
1143 rinfo->panel_xres = 1152;
1144 else
1145#endif
1146 rinfo->panel_xres = 1024;
1147 break;
1148 case 1024:
1149 rinfo->panel_xres = 1280;
1150 break;
1151 case 1050:
1152 rinfo->panel_xres = 1400;
1153 break;
1154 case 1200:
1155 rinfo->panel_xres = 1600;
1156 break;
1157 default:
1158 printk("radeonfb: Failed to detect DFP panel size\n");
1159 return 0;
1160 }
1161
1162 printk("radeonfb: detected DFP panel size from registers: %dx%d\n",
1163 rinfo->panel_xres, rinfo->panel_yres);
1164
1165 tmp = INREG(FP_CRTC_H_TOTAL_DISP);
1166 a = (tmp & FP_CRTC_H_TOTAL_MASK) + 4;
1167 b = (tmp & 0x01ff0000) >> FP_CRTC_H_DISP_SHIFT;
1168 rinfo->hblank = (a - b + 1) * 8;
1169
1170 tmp = INREG(FP_H_SYNC_STRT_WID);
1171 rinfo->hOver_plus = (unsigned short) ((tmp & FP_H_SYNC_STRT_CHAR_MASK) >>
1172 FP_H_SYNC_STRT_CHAR_SHIFT) - b - 1;
1173 rinfo->hOver_plus *= 8;
1174 rinfo->hSync_width = (unsigned short) ((tmp & FP_H_SYNC_WID_MASK) >>
1175 FP_H_SYNC_WID_SHIFT);
1176 rinfo->hSync_width *= 8;
1177 tmp = INREG(FP_CRTC_V_TOTAL_DISP);
1178 a = (tmp & FP_CRTC_V_TOTAL_MASK) + 1;
1179 b = (tmp & FP_CRTC_V_DISP_MASK) >> FP_CRTC_V_DISP_SHIFT;
1180 rinfo->vblank = a - b /* + 24 */ ;
1181
1182 tmp = INREG(FP_V_SYNC_STRT_WID);
1183 rinfo->vOver_plus = (unsigned short) (tmp & FP_V_SYNC_STRT_MASK)
1184 - b + 1;
1185 rinfo->vSync_width = (unsigned short) ((tmp & FP_V_SYNC_WID_MASK) >>
1186 FP_V_SYNC_WID_SHIFT);
1187
1188 return 1;
1189 }
1190
1191 return 1;
1192}
1193
1194
1195#ifdef CONFIG_PPC_OF
1196static int radeon_read_OF (struct radeonfb_info *rinfo)
1197{
1198 struct device_node *dp;
1199 unsigned int *xtal;
1200
1201 dp = pci_device_to_OF_node(rinfo->pdev);
1202
1203 xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", NULL);
1204
1205 rinfo->pll.ref_clk = *xtal / 10;
1206
1207 if (*xtal)
1208 return 1;
1209 else
1210 return 0;
1211}
1212#endif
1213
1214
1215static void radeon_engine_init (struct radeonfb_info *rinfo)
1216{
1217 u32 temp;
1218
1219 /* disable 3D engine */
1220 OUTREG(RB3D_CNTL, 0);
1221
1222 radeon_engine_reset ();
1223
1224 radeon_fifo_wait (1);
1225 OUTREG(RB2D_DSTCACHE_MODE, 0);
1226
1227 radeon_fifo_wait (1);
1228 temp = INREG(DEFAULT_PITCH_OFFSET);
1229 OUTREG(DEFAULT_PITCH_OFFSET, ((temp & 0xc0000000) |
1230 (rinfo->pitch << 0x16)));
1231
1232 radeon_fifo_wait (1);
1233 OUTREGP(DP_DATATYPE, 0, ~HOST_BIG_ENDIAN_EN);
1234
1235 radeon_fifo_wait (1);
1236 OUTREG(DEFAULT_SC_BOTTOM_RIGHT, (DEFAULT_SC_RIGHT_MAX |
1237 DEFAULT_SC_BOTTOM_MAX));
1238
1239 temp = radeon_get_dstbpp(rinfo->depth);
1240 rinfo->dp_gui_master_cntl = ((temp << 8) | GMC_CLR_CMP_CNTL_DIS);
1241 radeon_fifo_wait (1);
1242 OUTREG(DP_GUI_MASTER_CNTL, (rinfo->dp_gui_master_cntl |
1243 GMC_BRUSH_SOLID_COLOR |
1244 GMC_SRC_DATATYPE_COLOR));
1245
1246 radeon_fifo_wait (7);
1247
1248 /* clear line drawing regs */
1249 OUTREG(DST_LINE_START, 0);
1250 OUTREG(DST_LINE_END, 0);
1251
1252 /* set brush color regs */
1253 OUTREG(DP_BRUSH_FRGD_CLR, 0xffffffff);
1254 OUTREG(DP_BRUSH_BKGD_CLR, 0x00000000);
1255
1256 /* set source color regs */
1257 OUTREG(DP_SRC_FRGD_CLR, 0xffffffff);
1258 OUTREG(DP_SRC_BKGD_CLR, 0x00000000);
1259
1260 /* default write mask */
1261 OUTREG(DP_WRITE_MSK, 0xffffffff);
1262
1263 radeon_engine_idle ();
1264}
1265
1266
1267static int __devinit radeon_init_disp (struct radeonfb_info *rinfo)
1268{
1269 struct fb_info *info = &rinfo->info;
1270 struct fb_var_screeninfo var;
1271
1272 var = radeonfb_default_var;
1273 if ((radeon_init_disp_var(rinfo, &var)) < 0)
1274 return -1;
1275
1276 rinfo->depth = var_to_depth(&var);
1277 rinfo->bpp = var.bits_per_pixel;
1278
1279 info->var = var;
1280 fb_alloc_cmap(&info->cmap, 256, 0);
1281
1282 var.activate = FB_ACTIVATE_NOW;
1283 return 0;
1284}
1285
1286
1287static int radeon_init_disp_var (struct radeonfb_info *rinfo,
1288 struct fb_var_screeninfo *var)
1289{
1290#ifndef MODULE
1291 if (mode_option)
1292 fb_find_mode (var, &rinfo->info, mode_option,
1293 NULL, 0, NULL, 8);
1294 else
1295#endif
1296 if (rinfo->use_default_var)
1297 /* We will use the modified default far */
1298 *var = radeonfb_default_var;
1299 else
1300
1301 fb_find_mode (var, &rinfo->info, "640x480-8@60",
1302 NULL, 0, NULL, 0);
1303
1304 if (noaccel)
1305 var->accel_flags &= ~FB_ACCELF_TEXT;
1306 else
1307 var->accel_flags |= FB_ACCELF_TEXT;
1308
1309 return 0;
1310}
1311
1312
1313static int radeon_do_maximize(struct radeonfb_info *rinfo,
1314 struct fb_var_screeninfo *var,
1315 struct fb_var_screeninfo *v,
1316 int nom, int den)
1317{
1318 static struct {
1319 int xres, yres;
1320 } modes[] = {
1321 {1600, 1280},
1322 {1280, 1024},
1323 {1024, 768},
1324 {800, 600},
1325 {640, 480},
1326 {-1, -1}
1327 };
1328 int i;
1329
1330 /* use highest possible virtual resolution */
1331 if (v->xres_virtual == -1 && v->yres_virtual == -1) {
1332 printk("radeonfb: using max available virtual resolution\n");
1333 for (i=0; modes[i].xres != -1; i++) {
1334 if (modes[i].xres * nom / den * modes[i].yres <
1335 rinfo->video_ram / 2)
1336 break;
1337 }
1338 if (modes[i].xres == -1) {
1339 printk("radeonfb: could not find virtual resolution that fits into video memory!\n");
1340 return -EINVAL;
1341 }
1342 v->xres_virtual = modes[i].xres;
1343 v->yres_virtual = modes[i].yres;
1344
1345 printk("radeonfb: virtual resolution set to max of %dx%d\n",
1346 v->xres_virtual, v->yres_virtual);
1347 } else if (v->xres_virtual == -1) {
1348 v->xres_virtual = (rinfo->video_ram * den /
1349 (nom * v->yres_virtual * 2)) & ~15;
1350 } else if (v->yres_virtual == -1) {
1351 v->xres_virtual = (v->xres_virtual + 15) & ~15;
1352 v->yres_virtual = rinfo->video_ram * den /
1353 (nom * v->xres_virtual *2);
1354 } else {
1355 if (v->xres_virtual * nom / den * v->yres_virtual >
1356 rinfo->video_ram) {
1357 return -EINVAL;
1358 }
1359 }
1360
1361 if (v->xres_virtual * nom / den >= 8192) {
1362 v->xres_virtual = 8192 * den / nom - 16;
1363 }
1364
1365 if (v->xres_virtual < v->xres)
1366 return -EINVAL;
1367
1368 if (v->yres_virtual < v->yres)
1369 return -EINVAL;
1370
1371 return 0;
1372}
1373
1374
1375static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
1376{
1377 struct radeonfb_info *rinfo = (struct radeonfb_info *) info->par;
1378 struct fb_var_screeninfo v;
1379 int nom, den;
1380
1381 memcpy (&v, var, sizeof (v));
1382
1383 switch (v.bits_per_pixel) {
1384 case 0 ... 8:
1385 v.bits_per_pixel = 8;
1386 break;
1387 case 9 ... 16:
1388 v.bits_per_pixel = 16;
1389 break;
1390 case 17 ... 24:
1391#if 0 /* Doesn't seem to work */
1392 v.bits_per_pixel = 24;
1393 break;
1394#endif
1395 return -EINVAL;
1396 case 25 ... 32:
1397 v.bits_per_pixel = 32;
1398 break;
1399 default:
1400 return -EINVAL;
1401 }
1402
1403 switch (var_to_depth(&v)) {
1404 case 8:
1405 nom = den = 1;
1406 v.red.offset = v.green.offset = v.blue.offset = 0;
1407 v.red.length = v.green.length = v.blue.length = 8;
1408 v.transp.offset = v.transp.length = 0;
1409 break;
1410 case 15:
1411 nom = 2;
1412 den = 1;
1413 v.red.offset = 10;
1414 v.green.offset = 5;
1415 v.blue.offset = 0;
1416 v.red.length = v.green.length = v.blue.length = 5;
1417 v.transp.offset = v.transp.length = 0;
1418 break;
1419 case 16:
1420 nom = 2;
1421 den = 1;
1422 v.red.offset = 11;
1423 v.green.offset = 5;
1424 v.blue.offset = 0;
1425 v.red.length = 5;
1426 v.green.length = 6;
1427 v.blue.length = 5;
1428 v.transp.offset = v.transp.length = 0;
1429 break;
1430 case 24:
1431 nom = 4;
1432 den = 1;
1433 v.red.offset = 16;
1434 v.green.offset = 8;
1435 v.blue.offset = 0;
1436 v.red.length = v.blue.length = v.green.length = 8;
1437 v.transp.offset = v.transp.length = 0;
1438 break;
1439 case 32:
1440 nom = 4;
1441 den = 1;
1442 v.red.offset = 16;
1443 v.green.offset = 8;
1444 v.blue.offset = 0;
1445 v.red.length = v.blue.length = v.green.length = 8;
1446 v.transp.offset = 24;
1447 v.transp.length = 8;
1448 break;
1449 default:
1450 printk ("radeonfb: mode %dx%dx%d rejected, color depth invalid\n",
1451 var->xres, var->yres, var->bits_per_pixel);
1452 return -EINVAL;
1453 }
1454
1455 if (radeon_do_maximize(rinfo, var, &v, nom, den) < 0)
1456 return -EINVAL;
1457
1458 if (v.xoffset < 0)
1459 v.xoffset = 0;
1460 if (v.yoffset < 0)
1461 v.yoffset = 0;
1462
1463 if (v.xoffset > v.xres_virtual - v.xres)
1464 v.xoffset = v.xres_virtual - v.xres - 1;
1465
1466 if (v.yoffset > v.yres_virtual - v.yres)
1467 v.yoffset = v.yres_virtual - v.yres - 1;
1468
1469 v.red.msb_right = v.green.msb_right = v.blue.msb_right =
1470 v.transp.offset = v.transp.length =
1471 v.transp.msb_right = 0;
1472
1473 if (noaccel)
1474 v.accel_flags = 0;
1475
1476 memcpy(var, &v, sizeof(v));
1477
1478 return 0;
1479}
1480
1481
1482static int radeonfb_pan_display (struct fb_var_screeninfo *var,
1483 struct fb_info *info)
1484{
1485 struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
1486
1487 if ((var->xoffset + var->xres > var->xres_virtual)
1488 || (var->yoffset + var->yres > var->yres_virtual))
1489 return -EINVAL;
1490
1491 if (rinfo->asleep)
1492 return 0;
1493
1494 OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset)
1495 * var->bits_per_pixel / 8) & ~7);
1496 return 0;
1497}
1498
1499
1500static int radeonfb_ioctl (struct fb_info *info, unsigned int cmd,
1501 unsigned long arg)
1502{
1503 struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
1504 unsigned int tmp;
1505 u32 value = 0;
1506 int rc;
1507
1508 switch (cmd) {
1509 /*
1510 * TODO: set mirror accordingly for non-Mobility chipsets with 2 CRTC's
1511 */
1512 case FBIO_RADEON_SET_MIRROR:
1513 switch (rinfo->arch) {
1514 case RADEON_R100:
1515 case RADEON_RV100:
1516 case RADEON_R200:
1517 case RADEON_RV200:
1518 case RADEON_RV250:
1519 case RADEON_R300:
1520 return -EINVAL;
1521 default:
1522 /* RADEON M6, RADEON_M7, RADEON_M9 */
1523 break;
1524 }
1525
1526 rc = get_user(value, (__u32 __user *)arg);
1527
1528 if (rc)
1529 return rc;
1530
1531 if (value & 0x01) {
1532 tmp = INREG(LVDS_GEN_CNTL);
1533
1534 tmp |= (LVDS_ON | LVDS_BLON);
1535 } else {
1536 tmp = INREG(LVDS_GEN_CNTL);
1537
1538 tmp &= ~(LVDS_ON | LVDS_BLON);
1539 }
1540
1541 OUTREG(LVDS_GEN_CNTL, tmp);
1542
1543 if (value & 0x02) {
1544 tmp = INREG(CRTC_EXT_CNTL);
1545 tmp |= CRTC_CRT_ON;
1546
1547 mirror = 1;
1548 } else {
1549 tmp = INREG(CRTC_EXT_CNTL);
1550 tmp &= ~CRTC_CRT_ON;
1551
1552 mirror = 0;
1553 }
1554
1555 OUTREG(CRTC_EXT_CNTL, tmp);
1556
1557 break;
1558 case FBIO_RADEON_GET_MIRROR:
1559 switch (rinfo->arch) {
1560 case RADEON_R100:
1561 case RADEON_RV100:
1562 case RADEON_R200:
1563 case RADEON_RV200:
1564 case RADEON_RV250:
1565 case RADEON_R300:
1566 return -EINVAL;
1567 default:
1568 /* RADEON M6, RADEON_M7, RADEON_M9 */
1569 break;
1570 }
1571
1572 tmp = INREG(LVDS_GEN_CNTL);
1573 if ((LVDS_ON | LVDS_BLON) & tmp)
1574 value |= 0x01;
1575
1576 tmp = INREG(CRTC_EXT_CNTL);
1577 if (CRTC_CRT_ON & tmp)
1578 value |= 0x02;
1579
1580 return put_user(value, (__u32 __user *)arg);
1581 default:
1582 return -EINVAL;
1583 }
1584
1585 return -EINVAL;
1586}
1587
1588
1589static int radeonfb_blank (int blank, struct fb_info *info)
1590{
1591 struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
1592 u32 val = INREG(CRTC_EXT_CNTL);
1593 u32 val2 = INREG(LVDS_GEN_CNTL);
1594
1595 if (rinfo->asleep)
1596 return 0;
1597
1598#ifdef CONFIG_PMAC_BACKLIGHT
1599 if (rinfo->dviDisp_type == MT_LCD && machine_is(powermac)) {
1600 set_backlight_enable(!blank);
1601 return 0;
1602 }
1603#endif
1604
1605 /* reset it */
1606 val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS |
1607 CRTC_VSYNC_DIS);
1608 val2 &= ~(LVDS_DISPLAY_DIS);
1609
1610 switch (blank) {
1611 case FB_BLANK_UNBLANK:
1612 case FB_BLANK_NORMAL:
1613 break;
1614 case FB_BLANK_VSYNC_SUSPEND:
1615 val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS);
1616 break;
1617 case FB_BLANK_HSYNC_SUSPEND:
1618 val |= (CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS);
1619 break;
1620 case FB_BLANK_POWERDOWN:
1621 val |= (CRTC_DISPLAY_DIS | CRTC_VSYNC_DIS |
1622 CRTC_HSYNC_DIS);
1623 val2 |= (LVDS_DISPLAY_DIS);
1624 break;
1625 }
1626
1627 switch (rinfo->dviDisp_type) {
1628 case MT_LCD:
1629 OUTREG(LVDS_GEN_CNTL, val2);
1630 break;
1631 case MT_CRT:
1632 default:
1633 OUTREG(CRTC_EXT_CNTL, val);
1634 break;
1635 }
1636
1637 /* let fbcon do a soft blank for us */
1638 return (blank == FB_BLANK_NORMAL) ? 1 : 0;
1639}
1640
1641
1642static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green,
1643 unsigned blue, unsigned transp, struct fb_info *info)
1644{
1645 struct radeonfb_info *rinfo = (struct radeonfb_info *) info;
1646 u32 pindex, vclk_cntl;
1647 unsigned int i;
1648
1649 if (regno > 255)
1650 return 1;
1651
1652 red >>= 8;
1653 green >>= 8;
1654 blue >>= 8;
1655 rinfo->palette[regno].red = red;
1656 rinfo->palette[regno].green = green;
1657 rinfo->palette[regno].blue = blue;
1658
1659 /* default */
1660 pindex = regno;
1661
1662 if (!rinfo->asleep) {
1663 vclk_cntl = INPLL(VCLK_ECP_CNTL);
1664 OUTPLL(VCLK_ECP_CNTL, vclk_cntl & ~PIXCLK_DAC_ALWAYS_ONb);
1665
1666 if (rinfo->bpp == 16) {
1667 pindex = regno * 8;
1668
1669 if (rinfo->depth == 16 && regno > 63)
1670 return 1;
1671 if (rinfo->depth == 15 && regno > 31)
1672 return 1;
1673
1674 /* For 565, the green component is mixed one order below */
1675 if (rinfo->depth == 16) {
1676 OUTREG(PALETTE_INDEX, pindex>>1);
1677 OUTREG(PALETTE_DATA, (rinfo->palette[regno>>1].red << 16) |
1678 (green << 8) | (rinfo->palette[regno>>1].blue));
1679 green = rinfo->palette[regno<<1].green;
1680 }
1681 }
1682
1683 if (rinfo->depth != 16 || regno < 32) {
1684 OUTREG(PALETTE_INDEX, pindex);
1685 OUTREG(PALETTE_DATA, (red << 16) | (green << 8) | blue);
1686 }
1687
1688 OUTPLL(VCLK_ECP_CNTL, vclk_cntl);
1689 }
1690 if (regno < 16) {
1691 switch (rinfo->depth) {
1692 case 15:
1693 ((u16 *) (info->pseudo_palette))[regno] =
1694 (regno << 10) | (regno << 5) | regno;
1695 break;
1696 case 16:
1697 ((u16 *) (info->pseudo_palette))[regno] =
1698 (regno << 11) | (regno << 6) | regno;
1699 break;
1700 case 24:
1701 ((u32 *) (info->pseudo_palette))[regno] =
1702 (regno << 16) | (regno << 8) | regno;
1703 break;
1704 case 32:
1705 i = (regno << 8) | regno;
1706 ((u32 *) (info->pseudo_palette))[regno] =
1707 (i << 16) | i;
1708 break;
1709 }
1710 }
1711 return 0;
1712}
1713
1714
1715
1716static void radeon_save_state (struct radeonfb_info *rinfo,
1717 struct radeon_regs *save)
1718{
1719 /* CRTC regs */
1720 save->crtc_gen_cntl = INREG(CRTC_GEN_CNTL);
1721 save->crtc_ext_cntl = INREG(CRTC_EXT_CNTL);
1722 save->dac_cntl = INREG(DAC_CNTL);
1723 save->crtc_h_total_disp = INREG(CRTC_H_TOTAL_DISP);
1724 save->crtc_h_sync_strt_wid = INREG(CRTC_H_SYNC_STRT_WID);
1725 save->crtc_v_total_disp = INREG(CRTC_V_TOTAL_DISP);
1726 save->crtc_v_sync_strt_wid = INREG(CRTC_V_SYNC_STRT_WID);
1727 save->crtc_pitch = INREG(CRTC_PITCH);
1728#if defined(__BIG_ENDIAN)
1729 save->surface_cntl = INREG(SURFACE_CNTL);
1730#endif
1731
1732 /* FP regs */
1733 save->fp_crtc_h_total_disp = INREG(FP_CRTC_H_TOTAL_DISP);
1734 save->fp_crtc_v_total_disp = INREG(FP_CRTC_V_TOTAL_DISP);
1735 save->fp_gen_cntl = INREG(FP_GEN_CNTL);
1736 save->fp_h_sync_strt_wid = INREG(FP_H_SYNC_STRT_WID);
1737 save->fp_horz_stretch = INREG(FP_HORZ_STRETCH);
1738 save->fp_v_sync_strt_wid = INREG(FP_V_SYNC_STRT_WID);
1739 save->fp_vert_stretch = INREG(FP_VERT_STRETCH);
1740 save->lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
1741 save->lvds_pll_cntl = INREG(LVDS_PLL_CNTL);
1742 save->tmds_crc = INREG(TMDS_CRC);
1743 save->tmds_transmitter_cntl = INREG(TMDS_TRANSMITTER_CNTL);
1744 save->vclk_ecp_cntl = INPLL(VCLK_ECP_CNTL);
1745}
1746
1747
1748
1749static int radeonfb_set_par (struct fb_info *info)
1750{
1751 struct radeonfb_info *rinfo = (struct radeonfb_info *)info->par;
1752 struct fb_var_screeninfo *mode = &info->var;
1753 struct radeon_regs newmode;
1754 int hTotal, vTotal, hSyncStart, hSyncEnd,
1755 hSyncPol, vSyncStart, vSyncEnd, vSyncPol, cSync;
1756 u8 hsync_adj_tab[] = {0, 0x12, 9, 9, 6, 5};
1757 u8 hsync_fudge_fp[] = {2, 2, 0, 0, 5, 5};
1758 u32 dotClock = 1000000000 / mode->pixclock,
1759 sync, h_sync_pol, v_sync_pol;
1760 int freq = dotClock / 10; /* x 100 */
1761 int xclk_freq, vclk_freq, xclk_per_trans, xclk_per_trans_precise;
1762 int useable_precision, roff, ron;
1763 int min_bits, format = 0;
1764 int hsync_start, hsync_fudge, bytpp, hsync_wid, vsync_wid;
1765 int primary_mon = PRIMARY_MONITOR(rinfo);
1766 int depth = var_to_depth(mode);
1767 int accel = (mode->accel_flags & FB_ACCELF_TEXT) != 0;
1768
1769 rinfo->xres = mode->xres;
1770 rinfo->yres = mode->yres;
1771 rinfo->xres_virtual = mode->xres_virtual;
1772 rinfo->yres_virtual = mode->yres_virtual;
1773 rinfo->pixclock = mode->pixclock;
1774
1775 hSyncStart = mode->xres + mode->right_margin;
1776 hSyncEnd = hSyncStart + mode->hsync_len;
1777 hTotal = hSyncEnd + mode->left_margin;
1778
1779 vSyncStart = mode->yres + mode->lower_margin;
1780 vSyncEnd = vSyncStart + mode->vsync_len;
1781 vTotal = vSyncEnd + mode->upper_margin;
1782
1783 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
1784 if (rinfo->panel_xres < mode->xres)
1785 rinfo->xres = mode->xres = rinfo->panel_xres;
1786 if (rinfo->panel_yres < mode->yres)
1787 rinfo->yres = mode->yres = rinfo->panel_yres;
1788
1789 hTotal = mode->xres + rinfo->hblank;
1790 hSyncStart = mode->xres + rinfo->hOver_plus;
1791 hSyncEnd = hSyncStart + rinfo->hSync_width;
1792
1793 vTotal = mode->yres + rinfo->vblank;
1794 vSyncStart = mode->yres + rinfo->vOver_plus;
1795 vSyncEnd = vSyncStart + rinfo->vSync_width;
1796 }
1797
1798 sync = mode->sync;
1799 h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
1800 v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
1801
1802 RTRACE("hStart = %d, hEnd = %d, hTotal = %d\n",
1803 hSyncStart, hSyncEnd, hTotal);
1804 RTRACE("vStart = %d, vEnd = %d, vTotal = %d\n",
1805 vSyncStart, vSyncEnd, vTotal);
1806
1807 hsync_wid = (hSyncEnd - hSyncStart) / 8;
1808 vsync_wid = vSyncEnd - vSyncStart;
1809 if (hsync_wid == 0)
1810 hsync_wid = 1;
1811 else if (hsync_wid > 0x3f) /* max */
1812 hsync_wid = 0x3f;
1813
1814 if (vsync_wid == 0)
1815 vsync_wid = 1;
1816 else if (vsync_wid > 0x1f) /* max */
1817 vsync_wid = 0x1f;
1818
1819 hSyncPol = mode->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
1820 vSyncPol = mode->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
1821
1822 cSync = mode->sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
1823
1824 format = radeon_get_dstbpp(depth);
1825 bytpp = mode->bits_per_pixel >> 3;
1826
1827 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD))
1828 hsync_fudge = hsync_fudge_fp[format-1];
1829 else
1830 hsync_fudge = hsync_adj_tab[format-1];
1831
1832 hsync_start = hSyncStart - 8 + hsync_fudge;
1833
1834 newmode.crtc_gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN |
1835 (format << 8);
1836
1837 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
1838 newmode.crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN;
1839 if (mirror)
1840 newmode.crtc_ext_cntl |= CRTC_CRT_ON;
1841
1842 newmode.crtc_gen_cntl &= ~(CRTC_DBL_SCAN_EN |
1843 CRTC_INTERLACE_EN);
1844 } else {
1845 newmode.crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN |
1846 CRTC_CRT_ON;
1847 }
1848
1849 newmode.dac_cntl = /* INREG(DAC_CNTL) | */ DAC_MASK_ALL | DAC_VGA_ADR_EN |
1850 DAC_8BIT_EN;
1851
1852 newmode.crtc_h_total_disp = ((((hTotal / 8) - 1) & 0x3ff) |
1853 (((mode->xres / 8) - 1) << 16));
1854
1855 newmode.crtc_h_sync_strt_wid = ((hsync_start & 0x1fff) |
1856 (hsync_wid << 16) | (h_sync_pol << 23));
1857
1858 newmode.crtc_v_total_disp = ((vTotal - 1) & 0xffff) |
1859 ((mode->yres - 1) << 16);
1860
1861 newmode.crtc_v_sync_strt_wid = (((vSyncStart - 1) & 0xfff) |
1862 (vsync_wid << 16) | (v_sync_pol << 23));
1863
1864 if (accel) {
1865 /* We first calculate the engine pitch */
1866 rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f)
1867 & ~(0x3f)) >> 6;
1868
1869 /* Then, re-multiply it to get the CRTC pitch */
1870 newmode.crtc_pitch = (rinfo->pitch << 3) / ((mode->bits_per_pixel + 1) / 8);
1871 } else
1872 newmode.crtc_pitch = (mode->xres_virtual >> 3);
1873 newmode.crtc_pitch |= (newmode.crtc_pitch << 16);
1874
1875#if defined(__BIG_ENDIAN)
1876 /*
1877 * It looks like recent chips have a problem with SURFACE_CNTL,
1878 * setting SURF_TRANSLATION_DIS completely disables the
1879 * swapper as well, so we leave it unset now.
1880 */
1881 newmode.surface_cntl = 0;
1882
1883 /* Setup swapping on both apertures, though we currently
1884 * only use aperture 0, enabling swapper on aperture 1
1885 * won't harm
1886 */
1887 switch (mode->bits_per_pixel) {
1888 case 16:
1889 newmode.surface_cntl |= NONSURF_AP0_SWP_16BPP;
1890 newmode.surface_cntl |= NONSURF_AP1_SWP_16BPP;
1891 break;
1892 case 24:
1893 case 32:
1894 newmode.surface_cntl |= NONSURF_AP0_SWP_32BPP;
1895 newmode.surface_cntl |= NONSURF_AP1_SWP_32BPP;
1896 break;
1897 }
1898#endif
1899
1900 rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f)
1901 & ~(0x3f)) / 64;
1902
1903 RTRACE("h_total_disp = 0x%x\t hsync_strt_wid = 0x%x\n",
1904 newmode.crtc_h_total_disp, newmode.crtc_h_sync_strt_wid);
1905 RTRACE("v_total_disp = 0x%x\t vsync_strt_wid = 0x%x\n",
1906 newmode.crtc_v_total_disp, newmode.crtc_v_sync_strt_wid);
1907
1908 newmode.xres = mode->xres;
1909 newmode.yres = mode->yres;
1910
1911 rinfo->bpp = mode->bits_per_pixel;
1912 rinfo->depth = depth;
1913
1914 if (freq > rinfo->pll.ppll_max)
1915 freq = rinfo->pll.ppll_max;
1916 if (freq*12 < rinfo->pll.ppll_min)
1917 freq = rinfo->pll.ppll_min / 12;
1918
1919 {
1920 struct {
1921 int divider;
1922 int bitvalue;
1923 } *post_div,
1924 post_divs[] = {
1925 { 1, 0 },
1926 { 2, 1 },
1927 { 4, 2 },
1928 { 8, 3 },
1929 { 3, 4 },
1930 { 16, 5 },
1931 { 6, 6 },
1932 { 12, 7 },
1933 { 0, 0 },
1934 };
1935
1936 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
1937 rinfo->pll_output_freq = post_div->divider * freq;
1938 if (rinfo->pll_output_freq >= rinfo->pll.ppll_min &&
1939 rinfo->pll_output_freq <= rinfo->pll.ppll_max)
1940 break;
1941 }
1942
1943 rinfo->post_div = post_div->divider;
1944 rinfo->fb_div = round_div(rinfo->pll.ref_div*rinfo->pll_output_freq,
1945 rinfo->pll.ref_clk);
1946 newmode.ppll_ref_div = rinfo->pll.ref_div;
1947 newmode.ppll_div_3 = rinfo->fb_div | (post_div->bitvalue << 16);
1948 }
1949 newmode.vclk_ecp_cntl = rinfo->init_state.vclk_ecp_cntl;
1950
1951#ifdef CONFIG_PPC_OF
1952 /* Gross hack for iBook with M7 until I find out a proper fix */
1953 if (machine_is_compatible("PowerBook4,3") && rinfo->arch == RADEON_M7)
1954 newmode.ppll_div_3 = 0x000600ad;
1955#endif /* CONFIG_PPC_OF */
1956
1957 RTRACE("post div = 0x%x\n", rinfo->post_div);
1958 RTRACE("fb_div = 0x%x\n", rinfo->fb_div);
1959 RTRACE("ppll_div_3 = 0x%x\n", newmode.ppll_div_3);
1960
1961 /* DDA */
1962 vclk_freq = round_div(rinfo->pll.ref_clk * rinfo->fb_div,
1963 rinfo->pll.ref_div * rinfo->post_div);
1964 xclk_freq = rinfo->pll.xclk;
1965
1966 xclk_per_trans = round_div(xclk_freq * 128, vclk_freq * mode->bits_per_pixel);
1967
1968 min_bits = min_bits_req(xclk_per_trans);
1969 useable_precision = min_bits + 1;
1970
1971 xclk_per_trans_precise = round_div((xclk_freq * 128) << (11 - useable_precision),
1972 vclk_freq * mode->bits_per_pixel);
1973
1974 ron = (4 * rinfo->ram.mb + 3 * _max(rinfo->ram.trcd - 2, 0) +
1975 2 * rinfo->ram.trp + rinfo->ram.twr + rinfo->ram.cl + rinfo->ram.tr2w +
1976 xclk_per_trans) << (11 - useable_precision);
1977 roff = xclk_per_trans_precise * (32 - 4);
1978
1979 RTRACE("ron = %d, roff = %d\n", ron, roff);
1980 RTRACE("vclk_freq = %d, per = %d\n", vclk_freq, xclk_per_trans_precise);
1981
1982 if ((ron + rinfo->ram.rloop) >= roff) {
1983 printk("radeonfb: error ron out of range\n");
1984 return -EINVAL;
1985 }
1986
1987 newmode.dda_config = (xclk_per_trans_precise |
1988 (useable_precision << 16) |
1989 (rinfo->ram.rloop << 20));
1990 newmode.dda_on_off = (ron << 16) | roff;
1991
1992 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
1993 unsigned int hRatio, vRatio;
1994
1995 /* We force the pixel clock to be always enabled. Allowing it
1996 * to be power managed during blanking would save power, but has
1997 * nasty interactions with the 2D engine & sleep code that haven't
1998 * been solved yet. --BenH
1999 */
2000 newmode.vclk_ecp_cntl &= ~PIXCLK_DAC_ALWAYS_ONb;
2001
2002 if (mode->xres > rinfo->panel_xres)
2003 mode->xres = rinfo->panel_xres;
2004 if (mode->yres > rinfo->panel_yres)
2005 mode->yres = rinfo->panel_yres;
2006
2007 newmode.fp_horz_stretch = (((rinfo->panel_xres / 8) - 1)
2008 << HORZ_PANEL_SHIFT);
2009 newmode.fp_vert_stretch = ((rinfo->panel_yres - 1)
2010 << VERT_PANEL_SHIFT);
2011
2012 if (mode->xres != rinfo->panel_xres) {
2013 hRatio = round_div(mode->xres * HORZ_STRETCH_RATIO_MAX,
2014 rinfo->panel_xres);
2015 newmode.fp_horz_stretch = (((((unsigned long)hRatio) & HORZ_STRETCH_RATIO_MASK)) |
2016 (newmode.fp_horz_stretch &
2017 (HORZ_PANEL_SIZE | HORZ_FP_LOOP_STRETCH |
2018 HORZ_AUTO_RATIO_INC)));
2019 newmode.fp_horz_stretch |= (HORZ_STRETCH_BLEND |
2020 HORZ_STRETCH_ENABLE);
2021 }
2022 newmode.fp_horz_stretch &= ~HORZ_AUTO_RATIO;
2023
2024 if (mode->yres != rinfo->panel_yres) {
2025 vRatio = round_div(mode->yres * VERT_STRETCH_RATIO_MAX,
2026 rinfo->panel_yres);
2027 newmode.fp_vert_stretch = (((((unsigned long)vRatio) & VERT_STRETCH_RATIO_MASK)) |
2028 (newmode.fp_vert_stretch &
2029 (VERT_PANEL_SIZE | VERT_STRETCH_RESERVED)));
2030 newmode.fp_vert_stretch |= (VERT_STRETCH_BLEND |
2031 VERT_STRETCH_ENABLE);
2032 }
2033 newmode.fp_vert_stretch &= ~VERT_AUTO_RATIO_EN;
2034
2035 newmode.fp_gen_cntl = (rinfo->init_state.fp_gen_cntl & (u32)
2036 ~(FP_SEL_CRTC2 |
2037 FP_RMX_HVSYNC_CONTROL_EN |
2038 FP_DFP_SYNC_SEL |
2039 FP_CRT_SYNC_SEL |
2040 FP_CRTC_LOCK_8DOT |
2041 FP_USE_SHADOW_EN |
2042 FP_CRTC_USE_SHADOW_VEND |
2043 FP_CRT_SYNC_ALT));
2044
2045 newmode.fp_gen_cntl |= (FP_CRTC_DONT_SHADOW_VPAR |
2046 FP_CRTC_DONT_SHADOW_HEND);
2047
2048 newmode.lvds_gen_cntl = rinfo->init_state.lvds_gen_cntl;
2049 newmode.lvds_pll_cntl = rinfo->init_state.lvds_pll_cntl;
2050 newmode.tmds_crc = rinfo->init_state.tmds_crc;
2051 newmode.tmds_transmitter_cntl = rinfo->init_state.tmds_transmitter_cntl;
2052
2053 if (primary_mon == MT_LCD) {
2054 newmode.lvds_gen_cntl |= (LVDS_ON | LVDS_BLON);
2055 newmode.fp_gen_cntl &= ~(FP_FPON | FP_TMDS_EN);
2056 } else {
2057 /* DFP */
2058 newmode.fp_gen_cntl |= (FP_FPON | FP_TMDS_EN);
2059 newmode.tmds_transmitter_cntl = (TMDS_RAN_PAT_RST |
2060 TMDS_ICHCSEL | TMDS_PLL_EN) &
2061 ~(TMDS_PLLRST);
2062 newmode.crtc_ext_cntl &= ~CRTC_CRT_ON;
2063 }
2064
2065 newmode.fp_crtc_h_total_disp = (((rinfo->hblank / 8) & 0x3ff) |
2066 (((mode->xres / 8) - 1) << 16));
2067 newmode.fp_crtc_v_total_disp = (rinfo->vblank & 0xffff) |
2068 ((mode->yres - 1) << 16);
2069 newmode.fp_h_sync_strt_wid = ((rinfo->hOver_plus & 0x1fff) |
2070 (hsync_wid << 16) | (h_sync_pol << 23));
2071 newmode.fp_v_sync_strt_wid = ((rinfo->vOver_plus & 0xfff) |
2072 (vsync_wid << 16) | (v_sync_pol << 23));
2073 }
2074
2075 /* do it! */
2076 if (!rinfo->asleep) {
2077 radeon_write_mode (rinfo, &newmode);
2078 /* (re)initialize the engine */
2079 if (noaccel)
2080 radeon_engine_init (rinfo);
2081
2082 }
2083 /* Update fix */
2084 if (accel)
2085 info->fix.line_length = rinfo->pitch*64;
2086 else
2087 info->fix.line_length = mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8);
2088 info->fix.visual = rinfo->depth == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
2089
2090#ifdef CONFIG_BOOTX_TEXT
2091 /* Update debug text engine */
2092 btext_update_display(rinfo->fb_base_phys, mode->xres, mode->yres,
2093 rinfo->depth, info->fix.line_length);
2094#endif
2095
2096 return 0;
2097}
2098
2099
2100static void radeon_write_mode (struct radeonfb_info *rinfo,
2101 struct radeon_regs *mode)
2102{
2103 int i;
2104 int primary_mon = PRIMARY_MONITOR(rinfo);
2105
2106 radeonfb_blank(VESA_POWERDOWN, (struct fb_info *)rinfo);
2107
2108
2109 if (rinfo->arch == RADEON_M6) {
2110 for (i=0; i<7; i++)
2111 OUTREG(common_regs_m6[i].reg, common_regs_m6[i].val);
2112 } else {
2113 for (i=0; i<9; i++)
2114 OUTREG(common_regs[i].reg, common_regs[i].val);
2115 }
2116
2117 OUTREG(CRTC_GEN_CNTL, mode->crtc_gen_cntl);
2118 OUTREGP(CRTC_EXT_CNTL, mode->crtc_ext_cntl,
2119 CRTC_HSYNC_DIS | CRTC_VSYNC_DIS | CRTC_DISPLAY_DIS);
2120 OUTREGP(DAC_CNTL, mode->dac_cntl, DAC_RANGE_CNTL | DAC_BLANKING);
2121 OUTREG(CRTC_H_TOTAL_DISP, mode->crtc_h_total_disp);
2122 OUTREG(CRTC_H_SYNC_STRT_WID, mode->crtc_h_sync_strt_wid);
2123 OUTREG(CRTC_V_TOTAL_DISP, mode->crtc_v_total_disp);
2124 OUTREG(CRTC_V_SYNC_STRT_WID, mode->crtc_v_sync_strt_wid);
2125 OUTREG(CRTC_OFFSET, 0);
2126 OUTREG(CRTC_OFFSET_CNTL, 0);
2127 OUTREG(CRTC_PITCH, mode->crtc_pitch);
2128
2129#if defined(__BIG_ENDIAN)
2130 OUTREG(SURFACE_CNTL, mode->surface_cntl);
2131#endif
2132
2133 while ((INREG(CLOCK_CNTL_INDEX) & PPLL_DIV_SEL_MASK) !=
2134 PPLL_DIV_SEL_MASK) {
2135 OUTREGP(CLOCK_CNTL_INDEX, PPLL_DIV_SEL_MASK, 0xffff);
2136 }
2137
2138 OUTPLLP(PPLL_CNTL, PPLL_RESET, 0xffff);
2139
2140 while ((INPLL(PPLL_REF_DIV) & PPLL_REF_DIV_MASK) !=
2141 (mode->ppll_ref_div & PPLL_REF_DIV_MASK)) {
2142 OUTPLLP(PPLL_REF_DIV, mode->ppll_ref_div, ~PPLL_REF_DIV_MASK);
2143 }
2144
2145 while ((INPLL(PPLL_DIV_3) & PPLL_FB3_DIV_MASK) !=
2146 (mode->ppll_div_3 & PPLL_FB3_DIV_MASK)) {
2147 OUTPLLP(PPLL_DIV_3, mode->ppll_div_3, ~PPLL_FB3_DIV_MASK);
2148 }
2149
2150 while ((INPLL(PPLL_DIV_3) & PPLL_POST3_DIV_MASK) !=
2151 (mode->ppll_div_3 & PPLL_POST3_DIV_MASK)) {
2152 OUTPLLP(PPLL_DIV_3, mode->ppll_div_3, ~PPLL_POST3_DIV_MASK);
2153 }
2154
2155 OUTPLL(HTOTAL_CNTL, 0);
2156
2157 OUTPLLP(PPLL_CNTL, 0, ~PPLL_RESET);
2158
2159// OUTREG(DDA_CONFIG, mode->dda_config);
2160// OUTREG(DDA_ON_OFF, mode->dda_on_off);
2161
2162 if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
2163 OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp);
2164 OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp);
2165 OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid);
2166 OUTREG(FP_V_SYNC_STRT_WID, mode->fp_v_sync_strt_wid);
2167 OUTREG(FP_HORZ_STRETCH, mode->fp_horz_stretch);
2168 OUTREG(FP_VERT_STRETCH, mode->fp_vert_stretch);
2169 OUTREG(FP_GEN_CNTL, mode->fp_gen_cntl);
2170 OUTREG(TMDS_CRC, mode->tmds_crc);
2171 OUTREG(TMDS_TRANSMITTER_CNTL, mode->tmds_transmitter_cntl);
2172
2173 if (primary_mon == MT_LCD) {
2174 unsigned int tmp = INREG(LVDS_GEN_CNTL);
2175
2176 mode->lvds_gen_cntl &= ~LVDS_STATE_MASK;
2177 mode->lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_STATE_MASK);
2178
2179 if ((tmp & (LVDS_ON | LVDS_BLON)) ==
2180 (mode->lvds_gen_cntl & (LVDS_ON | LVDS_BLON))) {
2181 OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl);
2182 } else {
2183 if (mode->lvds_gen_cntl & (LVDS_ON | LVDS_BLON)) {
2184 udelay(1000);
2185 OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl);
2186 } else {
2187 OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl |
2188 LVDS_BLON);
2189 udelay(1000);
2190 OUTREG(LVDS_GEN_CNTL, mode->lvds_gen_cntl);
2191 }
2192 }
2193 }
2194 }
2195
2196 radeonfb_blank(VESA_NO_BLANKING, (struct fb_info *)rinfo);
2197
2198 OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
2199
2200 return;
2201}
2202
2203static struct fb_ops radeonfb_ops = {
2204 .owner = THIS_MODULE,
2205 .fb_check_var = radeonfb_check_var,
2206 .fb_set_par = radeonfb_set_par,
2207 .fb_setcolreg = radeonfb_setcolreg,
2208 .fb_pan_display = radeonfb_pan_display,
2209 .fb_blank = radeonfb_blank,
2210 .fb_ioctl = radeonfb_ioctl,
2211#if 0
2212 .fb_fillrect = radeonfb_fillrect,
2213 .fb_copyarea = radeonfb_copyarea,
2214 .fb_imageblit = radeonfb_imageblit,
2215 .fb_rasterimg = radeonfb_rasterimg,
2216#else
2217 .fb_fillrect = cfb_fillrect,
2218 .fb_copyarea = cfb_copyarea,
2219 .fb_imageblit = cfb_imageblit,
2220#endif
2221};
2222
2223
2224static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
2225{
2226 struct fb_info *info;
2227
2228 info = &rinfo->info;
2229
2230 info->par = rinfo;
2231 info->pseudo_palette = rinfo->pseudo_palette;
2232 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
2233 info->fbops = &radeonfb_ops;
2234 info->screen_base = rinfo->fb_base;
2235
2236 /* Fill fix common fields */
2237 strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
2238 info->fix.smem_start = rinfo->fb_base_phys;
2239 info->fix.smem_len = rinfo->video_ram;
2240 info->fix.type = FB_TYPE_PACKED_PIXELS;
2241 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
2242 info->fix.xpanstep = 8;
2243 info->fix.ypanstep = 1;
2244 info->fix.ywrapstep = 0;
2245 info->fix.type_aux = 0;
2246 info->fix.mmio_start = rinfo->mmio_base_phys;
2247 info->fix.mmio_len = RADEON_REGSIZE;
2248 if (noaccel)
2249 info->fix.accel = FB_ACCEL_NONE;
2250 else
2251 info->fix.accel = FB_ACCEL_ATI_RADEON;
2252
2253 if (radeon_init_disp (rinfo) < 0)
2254 return -1;
2255
2256 return 0;
2257}
2258
2259
2260#ifdef CONFIG_PMAC_BACKLIGHT
2261
2262/* TODO: Dbl check these tables, we don't go up to full ON backlight
2263 * in these, possibly because we noticed MacOS doesn't, but I'd prefer
2264 * having some more official numbers from ATI
2265 */
2266static int backlight_conv_m6[] = {
2267 0xff, 0xc0, 0xb5, 0xaa, 0x9f, 0x94, 0x89, 0x7e,
2268 0x73, 0x68, 0x5d, 0x52, 0x47, 0x3c, 0x31, 0x24
2269};
2270static int backlight_conv_m7[] = {
2271 0x00, 0x3f, 0x4a, 0x55, 0x60, 0x6b, 0x76, 0x81,
2272 0x8c, 0x97, 0xa2, 0xad, 0xb8, 0xc3, 0xce, 0xd9
2273};
2274
2275#define BACKLIGHT_LVDS_OFF
2276#undef BACKLIGHT_DAC_OFF
2277
2278/* We turn off the LCD completely instead of just dimming the backlight.
2279 * This provides some greater power saving and the display is useless
2280 * without backlight anyway.
2281 */
2282
2283static int radeon_set_backlight_enable(int on, int level, void *data)
2284{
2285 struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
2286 unsigned int lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
2287 int* conv_table;
2288
2289 /* Pardon me for that hack... maybe some day we can figure
2290 * out in what direction backlight should work on a given
2291 * panel ?
2292 */
2293 if ((rinfo->arch == RADEON_M7 || rinfo->arch == RADEON_M9)
2294 && !machine_is_compatible("PowerBook4,3"))
2295 conv_table = backlight_conv_m7;
2296 else
2297 conv_table = backlight_conv_m6;
2298
2299 lvds_gen_cntl |= (LVDS_BL_MOD_EN | LVDS_BLON);
2300 if (on && (level > BACKLIGHT_OFF)) {
2301 lvds_gen_cntl |= LVDS_DIGON;
2302 if (!(lvds_gen_cntl & LVDS_ON)) {
2303 lvds_gen_cntl &= ~LVDS_BLON;
2304 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
2305 (void)INREG(LVDS_GEN_CNTL);
2306 mdelay(10);
2307 lvds_gen_cntl |= LVDS_BLON;
2308 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
2309 }
2310 lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
2311 lvds_gen_cntl |= (conv_table[level] <<
2312 LVDS_BL_MOD_LEVEL_SHIFT);
2313 lvds_gen_cntl |= (LVDS_ON | LVDS_EN);
2314 lvds_gen_cntl &= ~LVDS_DISPLAY_DIS;
2315 } else {
2316 lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
2317 lvds_gen_cntl |= (conv_table[0] <<
2318 LVDS_BL_MOD_LEVEL_SHIFT);
2319 lvds_gen_cntl |= LVDS_DISPLAY_DIS;
2320 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
2321 udelay(10);
2322 lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGON);
2323 }
2324
2325 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
2326 rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
2327 rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK);
2328
2329 return 0;
2330}
2331
2332static int radeon_set_backlight_level(int level, void *data)
2333{
2334 return radeon_set_backlight_enable(1, level, data);
2335}
2336#endif /* CONFIG_PMAC_BACKLIGHT */
2337
2338
2339#ifdef CONFIG_PMAC_PBOOK
2340
2341static u32 dbg_clk;
2342
2343/*
2344 * Radeon M6 Power Management code. This code currently only supports
2345 * the mobile chips, it's based from some informations provided by ATI
2346 * along with hours of tracing of MacOS drivers
2347 */
2348
2349static void radeon_pm_save_regs(struct radeonfb_info *rinfo)
2350{
2351 rinfo->save_regs[0] = INPLL(PLL_PWRMGT_CNTL);
2352 rinfo->save_regs[1] = INPLL(CLK_PWRMGT_CNTL);
2353 rinfo->save_regs[2] = INPLL(MCLK_CNTL);
2354 rinfo->save_regs[3] = INPLL(SCLK_CNTL);
2355 rinfo->save_regs[4] = INPLL(CLK_PIN_CNTL);
2356 rinfo->save_regs[5] = INPLL(VCLK_ECP_CNTL);
2357 rinfo->save_regs[6] = INPLL(PIXCLKS_CNTL);
2358 rinfo->save_regs[7] = INPLL(MCLK_MISC);
2359 rinfo->save_regs[8] = INPLL(P2PLL_CNTL);
2360
2361 rinfo->save_regs[9] = INREG(DISP_MISC_CNTL);
2362 rinfo->save_regs[10] = INREG(DISP_PWR_MAN);
2363 rinfo->save_regs[11] = INREG(LVDS_GEN_CNTL);
2364 rinfo->save_regs[12] = INREG(LVDS_PLL_CNTL);
2365 rinfo->save_regs[13] = INREG(TV_DAC_CNTL);
2366 rinfo->save_regs[14] = INREG(BUS_CNTL1);
2367 rinfo->save_regs[15] = INREG(CRTC_OFFSET_CNTL);
2368 rinfo->save_regs[16] = INREG(AGP_CNTL);
2369 rinfo->save_regs[17] = (INREG(CRTC_GEN_CNTL) & 0xfdffffff) | 0x04000000;
2370 rinfo->save_regs[18] = (INREG(CRTC2_GEN_CNTL) & 0xfdffffff) | 0x04000000;
2371 rinfo->save_regs[19] = INREG(GPIOPAD_A);
2372 rinfo->save_regs[20] = INREG(GPIOPAD_EN);
2373 rinfo->save_regs[21] = INREG(GPIOPAD_MASK);
2374 rinfo->save_regs[22] = INREG(ZV_LCDPAD_A);
2375 rinfo->save_regs[23] = INREG(ZV_LCDPAD_EN);
2376 rinfo->save_regs[24] = INREG(ZV_LCDPAD_MASK);
2377 rinfo->save_regs[25] = INREG(GPIO_VGA_DDC);
2378 rinfo->save_regs[26] = INREG(GPIO_DVI_DDC);
2379 rinfo->save_regs[27] = INREG(GPIO_MONID);
2380 rinfo->save_regs[28] = INREG(GPIO_CRT2_DDC);
2381
2382 rinfo->save_regs[29] = INREG(SURFACE_CNTL);
2383 rinfo->save_regs[30] = INREG(MC_FB_LOCATION);
2384 rinfo->save_regs[31] = INREG(DISPLAY_BASE_ADDR);
2385 rinfo->save_regs[32] = INREG(MC_AGP_LOCATION);
2386 rinfo->save_regs[33] = INREG(CRTC2_DISPLAY_BASE_ADDR);
2387}
2388
2389static void radeon_pm_restore_regs(struct radeonfb_info *rinfo)
2390{
2391 OUTPLL(P2PLL_CNTL, rinfo->save_regs[8] & 0xFFFFFFFE); /* First */
2392
2393 OUTPLL(PLL_PWRMGT_CNTL, rinfo->save_regs[0]);
2394 OUTPLL(CLK_PWRMGT_CNTL, rinfo->save_regs[1]);
2395 OUTPLL(MCLK_CNTL, rinfo->save_regs[2]);
2396 OUTPLL(SCLK_CNTL, rinfo->save_regs[3]);
2397 OUTPLL(CLK_PIN_CNTL, rinfo->save_regs[4]);
2398 OUTPLL(VCLK_ECP_CNTL, rinfo->save_regs[5]);
2399 OUTPLL(PIXCLKS_CNTL, rinfo->save_regs[6]);
2400 OUTPLL(MCLK_MISC, rinfo->save_regs[7]);
2401
2402 OUTREG(DISP_MISC_CNTL, rinfo->save_regs[9]);
2403 OUTREG(DISP_PWR_MAN, rinfo->save_regs[10]);
2404 OUTREG(LVDS_GEN_CNTL, rinfo->save_regs[11]);
2405 OUTREG(LVDS_PLL_CNTL,rinfo->save_regs[12]);
2406 OUTREG(TV_DAC_CNTL, rinfo->save_regs[13]);
2407 OUTREG(BUS_CNTL1, rinfo->save_regs[14]);
2408 OUTREG(CRTC_OFFSET_CNTL, rinfo->save_regs[15]);
2409 OUTREG(AGP_CNTL, rinfo->save_regs[16]);
2410 OUTREG(CRTC_GEN_CNTL, rinfo->save_regs[17]);
2411 OUTREG(CRTC2_GEN_CNTL, rinfo->save_regs[18]);
2412
2413 // wait VBL before that one ?
2414 OUTPLL(P2PLL_CNTL, rinfo->save_regs[8]);
2415
2416 OUTREG(GPIOPAD_A, rinfo->save_regs[19]);
2417 OUTREG(GPIOPAD_EN, rinfo->save_regs[20]);
2418 OUTREG(GPIOPAD_MASK, rinfo->save_regs[21]);
2419 OUTREG(ZV_LCDPAD_A, rinfo->save_regs[22]);
2420 OUTREG(ZV_LCDPAD_EN, rinfo->save_regs[23]);
2421 OUTREG(ZV_LCDPAD_MASK, rinfo->save_regs[24]);
2422 OUTREG(GPIO_VGA_DDC, rinfo->save_regs[25]);
2423 OUTREG(GPIO_DVI_DDC, rinfo->save_regs[26]);
2424 OUTREG(GPIO_MONID, rinfo->save_regs[27]);
2425 OUTREG(GPIO_CRT2_DDC, rinfo->save_regs[28]);
2426}
2427
2428static void radeon_pm_disable_iopad(struct radeonfb_info *rinfo)
2429{
2430 OUTREG(GPIOPAD_MASK, 0x0001ffff);
2431 OUTREG(GPIOPAD_EN, 0x00000400);
2432 OUTREG(GPIOPAD_A, 0x00000000);
2433 OUTREG(ZV_LCDPAD_MASK, 0x00000000);
2434 OUTREG(ZV_LCDPAD_EN, 0x00000000);
2435 OUTREG(ZV_LCDPAD_A, 0x00000000);
2436 OUTREG(GPIO_VGA_DDC, 0x00030000);
2437 OUTREG(GPIO_DVI_DDC, 0x00000000);
2438 OUTREG(GPIO_MONID, 0x00030000);
2439 OUTREG(GPIO_CRT2_DDC, 0x00000000);
2440}
2441
2442static void radeon_pm_program_v2clk(struct radeonfb_info *rinfo)
2443{
2444//
2445// u32 reg;
2446//
2447// OUTPLL(P2PLL_REF_DIV, 0x0c);
2448//
2449// .../... figure out what macos does here
2450}
2451
2452static void radeon_pm_low_current(struct radeonfb_info *rinfo)
2453{
2454 u32 reg;
2455
2456 reg = INREG(BUS_CNTL1);
2457 reg &= ~BUS_CNTL1_MOBILE_PLATFORM_SEL_MASK;
2458 reg |= BUS_CNTL1_AGPCLK_VALID | (1<<BUS_CNTL1_MOBILE_PLATFORM_SEL_SHIFT);
2459 OUTREG(BUS_CNTL1, reg);
2460
2461 reg = INPLL(PLL_PWRMGT_CNTL);
2462 reg |= PLL_PWRMGT_CNTL_SPLL_TURNOFF | PLL_PWRMGT_CNTL_PPLL_TURNOFF |
2463 PLL_PWRMGT_CNTL_P2PLL_TURNOFF | PLL_PWRMGT_CNTL_TVPLL_TURNOFF;
2464 reg &= ~PLL_PWRMGT_CNTL_SU_MCLK_USE_BCLK;
2465 reg &= ~PLL_PWRMGT_CNTL_MOBILE_SU;
2466 OUTPLL(PLL_PWRMGT_CNTL, reg);
2467
2468// reg = INPLL(TV_PLL_CNTL1);
2469// reg |= TV_PLL_CNTL1__TVPLL_RESET | TV_PLL_CNTL1__TVPLL_SLEEP;
2470// OUTPLL(TV_PLL_CNTL1, reg);
2471
2472 reg = INREG(TV_DAC_CNTL);
2473 reg &= ~(TV_DAC_CNTL_BGADJ_MASK |TV_DAC_CNTL_DACADJ_MASK);
2474 reg |=TV_DAC_CNTL_BGSLEEP | TV_DAC_CNTL_RDACPD | TV_DAC_CNTL_GDACPD |
2475 TV_DAC_CNTL_BDACPD |
2476 (8<<TV_DAC_CNTL_BGADJ__SHIFT) | (8<<TV_DAC_CNTL_DACADJ__SHIFT);
2477 OUTREG(TV_DAC_CNTL, reg);
2478
2479 reg = INREG(TMDS_TRANSMITTER_CNTL);
2480 reg &= ~(TMDS_PLL_EN |TMDS_PLLRST);
2481 OUTREG(TMDS_TRANSMITTER_CNTL, reg);
2482
2483// lvds_pll_cntl = regr32(g, LVDS_PLL_CNTL);
2484// lvds_pll_cntl &= ~LVDS_PLL_CNTL__LVDS_PLL_EN;
2485// lvds_pll_cntl |= LVDS_PLL_CNTL__LVDS_PLL_RESET;
2486// regw32(g, LVDS_PLL_CNTL, lvds_pll_cntl);
2487
2488 reg = INREG(DAC_CNTL);
2489 reg &= ~DAC_CMP_EN;
2490 OUTREG(DAC_CNTL, reg);
2491
2492 reg = INREG(DAC_CNTL2);
2493 reg &= ~DAC2_CMP_EN;
2494 OUTREG(DAC_CNTL2, reg);
2495
2496 reg = INREG(TV_DAC_CNTL);
2497 reg &= ~TV_DAC_CNTL_DETECT;
2498 OUTREG(TV_DAC_CNTL, reg);
2499}
2500
2501static void radeon_pm_setup_for_suspend(struct radeonfb_info *rinfo)
2502{
2503 /* This code is disabled. It does what is in the pm_init
2504 * function of the MacOS driver code ATI sent me. However,
2505 * it doesn't fix my sleep problem, and is causing other issues
2506 * on wakeup (bascially the machine dying when switching consoles
2507 * I haven't had time to investigate this yet
2508 */
2509#if 0
2510 u32 disp_misc_cntl;
2511 u32 disp_pwr_man;
2512 u32 temp;
2513
2514 // set SPLL, MPLL, PPLL, P2PLL, TVPLL, SCLK, MCLK, PCLK, P2CLK,
2515 // TCLK and TEST_MODE to 0
2516 temp = INPLL(CLK_PWRMGT_CNTL);
2517 OUTPLL(CLK_PWRMGT_CNTL , temp & ~0xc00002ff);
2518
2519 // Turn on Power Management
2520 temp = INPLL(CLK_PWRMGT_CNTL);
2521 OUTPLL(CLK_PWRMGT_CNTL , temp | 0x00000400);
2522
2523 // Turn off display clock if using mobile chips
2524 temp = INPLL(CLK_PWRMGT_CNTL);
2525 OUTREG(CLK_PWRMGT_CNTL , temp | 0x00100000);
2526
2527 // Force PIXCLK_ALWAYS_ON and PIXCLK_DAC_ALWAYS_ON
2528 temp = INPLL(VCLK_ECP_CNTL);
2529 OUTPLL(VCLK_ECP_CNTL, temp & ~0x000000c0);
2530
2531 // Force ECP_FORCE_ON to 1
2532 temp = INPLL(VCLK_ECP_CNTL);
2533 OUTPLL(VCLK_ECP_CNTL, temp | 0x00040000);
2534
2535 // Force PIXCLK_BLEND_ALWAYS_ON and PIXCLK_GV_ALWAYS_ON
2536 temp = INPLL(PIXCLKS_CNTL);
2537 OUTPLL(PIXCLKS_CNTL, temp & ~0x00001800);
2538
2539 // Forcing SCLK_CNTL to ON
2540 OUTPLL(SCLK_CNTL, (INPLL(SCLK_CNTL)& 0x00000007) | 0xffff8000 );
2541
2542 // Set PM control over XTALIN pad
2543 temp = INPLL(CLK_PIN_CNTL);
2544 OUTPLL(CLK_PIN_CNTL, temp | 0x00080000);
2545
2546 // Force MCLK and YCLK and MC as dynamic
2547 temp = INPLL(MCLK_CNTL);
2548 OUTPLL(MCLK_CNTL, temp & 0xffeaffff);
2549
2550 // PLL_TURNOFF
2551 temp = INPLL(PLL_PWRMGT_CNTL);
2552 OUTPLL(PLL_PWRMGT_CNTL, temp | 0x0000001f);
2553
2554 // set MOBILE_SU to 1 if M6 or DDR64 is detected
2555 temp = INPLL(PLL_PWRMGT_CNTL);
2556 OUTPLL(PLL_PWRMGT_CNTL, temp | 0x00010000);
2557
2558 // select PM access mode (PM_MODE_SEL) (use ACPI mode)
2559// temp = INPLL(PLL_PWRMGT_CNTL);
2560// OUTPLL(PLL_PWRMGT_CNTL, temp | 0x00002000);
2561 temp = INPLL(PLL_PWRMGT_CNTL);
2562 OUTPLL(PLL_PWRMGT_CNTL, temp & ~0x00002000);
2563
2564 // set DISP_MISC_CNTL register
2565 disp_misc_cntl = INREG(DISP_MISC_CNTL);
2566 disp_misc_cntl &= ~( DISP_MISC_CNTL_SOFT_RESET_GRPH_PP |
2567 DISP_MISC_CNTL_SOFT_RESET_SUBPIC_PP |
2568 DISP_MISC_CNTL_SOFT_RESET_OV0_PP |
2569 DISP_MISC_CNTL_SOFT_RESET_GRPH_SCLK |
2570 DISP_MISC_CNTL_SOFT_RESET_SUBPIC_SCLK |
2571 DISP_MISC_CNTL_SOFT_RESET_OV0_SCLK |
2572 DISP_MISC_CNTL_SOFT_RESET_GRPH2_PP |
2573 DISP_MISC_CNTL_SOFT_RESET_GRPH2_SCLK |
2574 DISP_MISC_CNTL_SOFT_RESET_LVDS |
2575 DISP_MISC_CNTL_SOFT_RESET_TMDS |
2576 DISP_MISC_CNTL_SOFT_RESET_DIG_TMDS |
2577 DISP_MISC_CNTL_SOFT_RESET_TV);
2578 OUTREG(DISP_MISC_CNTL, disp_misc_cntl);
2579
2580 // set DISP_PWR_MAN register
2581 disp_pwr_man = INREG(DISP_PWR_MAN);
2582 // clau - 9.29.2000 - changes made to bit23:18 to set to 1 as requested by George
2583 disp_pwr_man |= (DISP_PWR_MAN_DIG_TMDS_ENABLE_RST |
2584 DISP_PWR_MAN_TV_ENABLE_RST |
2585 // DISP_PWR_MAN_AUTO_PWRUP_EN |
2586 DISP_PWR_MAN_DISP_D3_GRPH_RST |
2587 DISP_PWR_MAN_DISP_D3_SUBPIC_RST |
2588 DISP_PWR_MAN_DISP_D3_OV0_RST |
2589 DISP_PWR_MAN_DISP_D1D2_GRPH_RST |
2590 DISP_PWR_MAN_DISP_D1D2_SUBPIC_RST |
2591 DISP_PWR_MAN_DISP_D1D2_OV0_RST);
2592 disp_pwr_man &= ~(DISP_PWR_MAN_DISP_PWR_MAN_D3_CRTC_EN |
2593 DISP_PWR_MAN_DISP2_PWR_MAN_D3_CRTC2_EN|
2594 DISP_PWR_MAN_DISP_D3_RST |
2595 DISP_PWR_MAN_DISP_D3_REG_RST);
2596 OUTREG(DISP_PWR_MAN, disp_pwr_man);
2597
2598 // clau - 10.24.2000
2599 // - add in setting for BUS_CNTL1 b27:26 = 0x01 and b31 = 0x1
2600 // - add in setting for AGP_CNTL b7:0 = 0x20
2601 // - add in setting for DVI_DDC_DATA_OUT_EN b17:16 = 0x0
2602
2603 // the following settings (two lines) are applied at a later part of this function, only on mobile platform
2604 // requres -mobile flag
2605 OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1) & 0xf3ffffff) | 0x04000000);
2606 OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | 0x80000000);
2607 OUTREG(AGP_CNTL, (INREG(AGP_CNTL) & 0xffffff00) | 0x20);
2608 OUTREG(GPIO_DVI_DDC, INREG(GPIO_DVI_DDC) & 0xfffcffff);
2609
2610 // yulee - 12.12.2000
2611 // A12 only
2612 // EN_MCLK_TRISTATE_IN_SUSPEND@MCLK_MISC = 1
2613 // ACCESS_REGS_IN_SUSPEND@CLK_PIN_CNTL = 0
2614 // only on mobile platform
2615 OUTPLL(MCLK_MISC, INPLL(MCLK_MISC) | 0x00040000 );
2616
2617 // yulee -12.12.2000
2618 // AGPCLK_VALID@BUS_CNTL1 = 1
2619 // MOBILE_PLATFORM_SEL@BUS_CNTL1 = 01
2620 // CRTC_STEREO_SYNC_OUT_EN@CRTC_OFFSET_CNTL = 0
2621 // CG_CLK_TO_OUTPIN@CLK_PIN_CNTL = 0
2622 // only on mobile platform
2623 OUTPLL(CLK_PIN_CNTL, INPLL(CLK_PIN_CNTL ) & 0xFFFFF7FF );
2624 OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1 ) & 0xF3FFFFFF) | 0x84000000 );
2625 OUTREG(CRTC_OFFSET_CNTL, INREG(CRTC_OFFSET_CNTL ) & 0xFFEFFFFF );
2626
2627 mdelay(100);
2628#endif
2629
2630 /* Disable CRTCs */
2631 OUTREG(CRTC_GEN_CNTL, (INREG(CRTC_GEN_CNTL) & ~CRTC_EN) | CRTC_DISP_REQ_EN_B);
2632 OUTREG(CRTC2_GEN_CNTL, (INREG(CRTC2_GEN_CNTL) & ~CRTC2_EN) | CRTC2_DISP_REQ_EN_B);
2633 (void)INREG(CRTC2_GEN_CNTL);
2634 mdelay(17);
2635}
2636
2637static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
2638{
2639 u16 pwr_cmd;
2640
2641 if (!rinfo->pm_reg)
2642 return;
2643
2644 /* Set the chip into appropriate suspend mode (we use D2,
2645 * D3 would require a compete re-initialization of the chip,
2646 * including PCI config registers, clocks, AGP conf, ...)
2647 */
2648 if (suspend) {
2649 /* According to ATI, we should program V2CLK here, I have
2650 * to verify what's up exactly
2651 */
2652 /* Save some registers */
2653 radeon_pm_save_regs(rinfo);
2654
2655 /* Check that on M7 too, might work might not. M7 may also
2656 * need explicit enabling of PM
2657 */
2658 if (rinfo->arch == RADEON_M6) {
2659 /* Program V2CLK */
2660 radeon_pm_program_v2clk(rinfo);
2661
2662 /* Disable IO PADs */
2663 radeon_pm_disable_iopad(rinfo);
2664
2665 /* Set low current */
2666 radeon_pm_low_current(rinfo);
2667
2668 /* Prepare chip for power management */
2669 radeon_pm_setup_for_suspend(rinfo);
2670
2671 /* Reset the MDLL */
2672 OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) | MCKOA_RESET);
2673 (void)INPLL(MDLL_RDCKA);
2674 OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) & ~MCKOA_RESET);
2675 (void)INPLL(MDLL_RDCKA);
2676 }
2677
2678 /* Switch PCI power managment to D2. */
2679 for (;;) {
2680 pci_read_config_word(
2681 rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL,
2682 &pwr_cmd);
2683 if (pwr_cmd & 2)
2684 break;
2685 pci_write_config_word(
2686 rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL,
2687 (pwr_cmd & ~PCI_PM_CTRL_STATE_MASK) | 2);
2688 mdelay(500);
2689 }
2690 } else {
2691 /* Switch back PCI powermanagment to D0 */
2692 mdelay(200);
2693 pci_write_config_word(rinfo->pdev, rinfo->pm_reg+PCI_PM_CTRL, 0);
2694 mdelay(500);
2695
2696 dbg_clk = INPLL(1);
2697
2698 /* Do we need that on M7 ? */
2699 if (rinfo->arch == RADEON_M6) {
2700 /* Restore the MDLL */
2701 OUTPLL(MDLL_CKO, INPLL(MDLL_CKO) & ~MCKOA_RESET);
2702 (void)INPLL(MDLL_CKO);
2703 }
2704
2705 /* Restore some registers */
2706 radeon_pm_restore_regs(rinfo);
2707 }
2708}
2709
2710/*
2711 * Save the contents of the framebuffer when we go to sleep,
2712 * and restore it when we wake up again.
2713 */
2714
2715int radeon_sleep_notify(struct pmu_sleep_notifier *self, int when)
2716{
2717 struct radeonfb_info *rinfo;
2718
2719 for (rinfo = board_list; rinfo != NULL; rinfo = rinfo->next) {
2720 struct fb_fix_screeninfo fix;
2721 int nb;
2722 struct display *disp;
2723
2724 disp = (rinfo->currcon < 0) ? rinfo->info.disp : &fb_display[rinfo->currcon];
2725
2726 switch (rinfo->arch) {
2727 case RADEON_M6:
2728 case RADEON_M7:
2729 case RADEON_M9:
2730 break;
2731 default:
2732 return PBOOK_SLEEP_REFUSE;
2733 }
2734
2735 radeonfb_get_fix(&fix, fg_console, (struct fb_info *)rinfo);
2736 nb = fb_display[fg_console].var.yres * fix.line_length;
2737
2738 switch (when) {
2739 case PBOOK_SLEEP_NOW:
2740 acquire_console_sem();
2741 disp->dispsw = &fbcon_dummy;
2742
2743 if (!noaccel) {
2744 /* Make sure engine is reset */
2745 radeon_engine_reset();
2746 radeon_engine_idle();
2747 }
2748
2749 /* Blank display and LCD */
2750 radeonfb_blank(VESA_POWERDOWN+1,
2751 (struct fb_info *)rinfo);
2752
2753 /* Sleep */
2754 rinfo->asleep = 1;
2755 radeon_set_suspend(rinfo, 1);
2756 release_console_sem();
2757
2758 break;
2759 case PBOOK_WAKE:
2760 acquire_console_sem();
2761 /* Wakeup */
2762 radeon_set_suspend(rinfo, 0);
2763
2764 if (!noaccel)
2765 radeon_engine_init(rinfo);
2766 rinfo->asleep = 0;
2767 radeon_set_dispsw(rinfo, disp);
2768 radeon_load_video_mode(rinfo, &disp->var);
2769 do_install_cmap(rinfo->currcon < 0 ? 0 : rinfo->currcon,
2770 (struct fb_info *)rinfo);
2771
2772 radeonfb_blank(0, (struct fb_info *)rinfo);
2773 release_console_sem();
2774 printk("CLK_PIN_CNTL on wakeup was: %08x\n", dbg_clk);
2775 break;
2776 }
2777 }
2778
2779 return PBOOK_SLEEP_OK;
2780}
2781
2782#endif /* CONFIG_PMAC_PBOOK */
2783
2784static int radeonfb_pci_register (struct pci_dev *pdev,
2785 const struct pci_device_id *ent)
2786{
2787 struct radeonfb_info *rinfo;
2788 struct radeon_chip_info *rci = &radeon_chip_info[ent->driver_data];
2789 u32 tmp;
2790
2791 RTRACE("radeonfb_pci_register BEGIN\n");
2792
2793 /* Enable device in PCI config */
2794 if (pci_enable_device(pdev) != 0) {
2795 printk(KERN_ERR "radeonfb: Cannot enable PCI device\n");
2796 return -ENODEV;
2797 }
2798
2799 rinfo = kmalloc (sizeof (struct radeonfb_info), GFP_KERNEL);
2800 if (!rinfo) {
2801 printk ("radeonfb: could not allocate memory\n");
2802 return -ENODEV;
2803 }
2804
2805 memset (rinfo, 0, sizeof (struct radeonfb_info));
2806 //info = &rinfo->info;
2807 rinfo->pdev = pdev;
2808 strcpy(rinfo->name, rci->name);
2809 rinfo->arch = rci->arch;
2810
2811 /* Set base addrs */
2812 rinfo->fb_base_phys = pci_resource_start (pdev, 0);
2813 rinfo->mmio_base_phys = pci_resource_start (pdev, 2);
2814
2815 /* request the mem regions */
2816 if (!request_mem_region (rinfo->fb_base_phys,
2817 pci_resource_len(pdev, 0), "radeonfb")) {
2818 printk ("radeonfb: cannot reserve FB region\n");
2819 kfree (rinfo);
2820 return -ENODEV;
2821 }
2822
2823 if (!request_mem_region (rinfo->mmio_base_phys,
2824 pci_resource_len(pdev, 2), "radeonfb")) {
2825 printk ("radeonfb: cannot reserve MMIO region\n");
2826 release_mem_region (rinfo->fb_base_phys,
2827 pci_resource_len(pdev, 0));
2828 kfree (rinfo);
2829 return -ENODEV;
2830 }
2831
2832 /* map the regions */
2833 rinfo->mmio_base = ioremap (rinfo->mmio_base_phys, RADEON_REGSIZE);
2834 if (!rinfo->mmio_base) {
2835 printk ("radeonfb: cannot map MMIO\n");
2836 release_mem_region (rinfo->mmio_base_phys,
2837 pci_resource_len(pdev, 2));
2838 release_mem_region (rinfo->fb_base_phys,
2839 pci_resource_len(pdev, 0));
2840 kfree (rinfo);
2841 return -ENODEV;
2842 }
2843
2844 rinfo->chipset = pdev->device;
2845
2846 switch (rinfo->arch) {
2847 case RADEON_R100:
2848 rinfo->hasCRTC2 = 0;
2849 break;
2850 default:
2851 /* all the rest have it */
2852 rinfo->hasCRTC2 = 1;
2853 break;
2854 }
2855#if 0
2856 if (rinfo->arch == RADEON_M7) {
2857 /*
2858 * Noticed some errors in accel with M7, will have to work these out...
2859 */
2860 noaccel = 1;
2861 }
2862#endif
2863 if (mirror)
2864 printk("radeonfb: mirroring display to CRT\n");
2865
2866 /* framebuffer size */
2867 tmp = INREG(CONFIG_MEMSIZE);
2868
2869 /* mem size is bits [28:0], mask off the rest */
2870 rinfo->video_ram = tmp & CONFIG_MEMSIZE_MASK;
2871
2872 /* ram type */
2873 tmp = INREG(MEM_SDRAM_MODE_REG);
2874 switch ((MEM_CFG_TYPE & tmp) >> 30) {
2875 case 0:
2876 /* SDR SGRAM (2:1) */
2877 strcpy(rinfo->ram_type, "SDR SGRAM");
2878 rinfo->ram.ml = 4;
2879 rinfo->ram.mb = 4;
2880 rinfo->ram.trcd = 1;
2881 rinfo->ram.trp = 2;
2882 rinfo->ram.twr = 1;
2883 rinfo->ram.cl = 2;
2884 rinfo->ram.loop_latency = 16;
2885 rinfo->ram.rloop = 16;
2886
2887 break;
2888 case 1:
2889 /* DDR SGRAM */
2890 strcpy(rinfo->ram_type, "DDR SGRAM");
2891 rinfo->ram.ml = 4;
2892 rinfo->ram.mb = 4;
2893 rinfo->ram.trcd = 3;
2894 rinfo->ram.trp = 3;
2895 rinfo->ram.twr = 2;
2896 rinfo->ram.cl = 3;
2897 rinfo->ram.tr2w = 1;
2898 rinfo->ram.loop_latency = 16;
2899 rinfo->ram.rloop = 16;
2900
2901 break;
2902 default:
2903 /* 64-bit SDR SGRAM */
2904 strcpy(rinfo->ram_type, "SDR SGRAM 64");
2905 rinfo->ram.ml = 4;
2906 rinfo->ram.mb = 8;
2907 rinfo->ram.trcd = 3;
2908 rinfo->ram.trp = 3;
2909 rinfo->ram.twr = 1;
2910 rinfo->ram.cl = 3;
2911 rinfo->ram.tr2w = 1;
2912 rinfo->ram.loop_latency = 17;
2913 rinfo->ram.rloop = 17;
2914
2915 break;
2916 }
2917
2918 rinfo->bios_seg = radeon_find_rom(rinfo);
2919 radeon_get_pllinfo(rinfo, rinfo->bios_seg);
2920
2921 /*
2922 * Hack to get around some busted production M6's
2923 * reporting no ram
2924 */
2925 if (rinfo->video_ram == 0) {
2926 switch (pdev->device) {
2927 case PCI_DEVICE_ID_ATI_RADEON_LY:
2928 case PCI_DEVICE_ID_ATI_RADEON_LZ:
2929 rinfo->video_ram = 8192 * 1024;
2930 break;
2931 default:
2932 break;
2933 }
2934 }
2935
2936
2937 RTRACE("radeonfb: probed %s %dk videoram\n", (rinfo->ram_type), (rinfo->video_ram/1024));
2938
2939#if !defined(__powerpc__)
2940 radeon_get_moninfo(rinfo);
2941#else
2942 switch (pdev->device) {
2943 case PCI_DEVICE_ID_ATI_RADEON_LW:
2944 case PCI_DEVICE_ID_ATI_RADEON_LX:
2945 case PCI_DEVICE_ID_ATI_RADEON_LY:
2946 case PCI_DEVICE_ID_ATI_RADEON_LZ:
2947 rinfo->dviDisp_type = MT_LCD;
2948 break;
2949 default:
2950 radeon_get_moninfo(rinfo);
2951 break;
2952 }
2953#endif
2954
2955 radeon_get_EDID(rinfo);
2956
2957 if ((rinfo->dviDisp_type == MT_DFP) || (rinfo->dviDisp_type == MT_LCD) ||
2958 (rinfo->crtDisp_type == MT_DFP)) {
2959 if (!radeon_get_dfpinfo(rinfo)) {
2960 iounmap(rinfo->mmio_base);
2961 release_mem_region (rinfo->mmio_base_phys,
2962 pci_resource_len(pdev, 2));
2963 release_mem_region (rinfo->fb_base_phys,
2964 pci_resource_len(pdev, 0));
2965 kfree (rinfo);
2966 return -ENODEV;
2967 }
2968 }
2969
2970 rinfo->fb_base = ioremap (rinfo->fb_base_phys, rinfo->video_ram);
2971 if (!rinfo->fb_base) {
2972 printk ("radeonfb: cannot map FB\n");
2973 iounmap(rinfo->mmio_base);
2974 release_mem_region (rinfo->mmio_base_phys,
2975 pci_resource_len(pdev, 2));
2976 release_mem_region (rinfo->fb_base_phys,
2977 pci_resource_len(pdev, 0));
2978 kfree (rinfo);
2979 return -ENODEV;
2980 }
2981
2982 /* I SHOULD FIX THAT CRAP ! I should probably mimmic XFree DRI
2983 * driver setup here.
2984 *
2985 * On PPC, OF based cards setup the internal memory
2986 * mapping in strange ways. We change it so that the
2987 * framebuffer is mapped at 0 and given half of the card's
2988 * address space (2Gb). AGP is mapped high (0xe0000000) and
2989 * can use up to 512Mb. Once DRI is fully implemented, we
2990 * will have to setup the PCI remapper to remap the agp_special_page
2991 * memory page somewhere between those regions so that the card
2992 * use a normal PCI bus master cycle to access the ring read ptr.
2993 * --BenH.
2994 */
2995#ifdef CONFIG_ALL_PPC
2996 if (rinfo->hasCRTC2)
2997 OUTREG(CRTC2_GEN_CNTL,
2998 (INREG(CRTC2_GEN_CNTL) & ~CRTC2_EN) | CRTC2_DISP_REQ_EN_B);
2999 OUTREG(CRTC_EXT_CNTL, INREG(CRTC_EXT_CNTL) | CRTC_DISPLAY_DIS);
3000 OUTREG(MC_FB_LOCATION, 0x7fff0000);
3001 OUTREG(MC_AGP_LOCATION, 0xffffe000);
3002 OUTREG(DISPLAY_BASE_ADDR, 0x00000000);
3003 if (rinfo->hasCRTC2)
3004 OUTREG(CRTC2_DISPLAY_BASE_ADDR, 0x00000000);
3005 OUTREG(SRC_OFFSET, 0x00000000);
3006 OUTREG(DST_OFFSET, 0x00000000);
3007 mdelay(10);
3008 OUTREG(CRTC_EXT_CNTL, INREG(CRTC_EXT_CNTL) & ~CRTC_DISPLAY_DIS);
3009#endif /* CONFIG_ALL_PPC */
3010
3011 /* save current mode regs before we switch into the new one
3012 * so we can restore this upon __exit
3013 */
3014 radeon_save_state (rinfo, &rinfo->init_state);
3015
3016 /* set all the vital stuff */
3017 radeon_set_fbinfo (rinfo);
3018
3019 pci_set_drvdata(pdev, rinfo);
3020 rinfo->next = board_list;
3021 board_list = rinfo;
3022 ((struct fb_info *) rinfo)->device = &pdev->dev;
3023 if (register_framebuffer ((struct fb_info *) rinfo) < 0) {
3024 printk ("radeonfb: could not register framebuffer\n");
3025 iounmap(rinfo->fb_base);
3026 iounmap(rinfo->mmio_base);
3027 release_mem_region (rinfo->mmio_base_phys,
3028 pci_resource_len(pdev, 2));
3029 release_mem_region (rinfo->fb_base_phys,
3030 pci_resource_len(pdev, 0));
3031 kfree (rinfo);
3032 return -ENODEV;
3033 }
3034
3035#ifdef CONFIG_MTRR
3036 rinfo->mtrr_hdl = nomtrr ? -1 : mtrr_add(rinfo->fb_base_phys,
3037 rinfo->video_ram,
3038 MTRR_TYPE_WRCOMB, 1);
3039#endif
3040
3041#ifdef CONFIG_PMAC_BACKLIGHT
3042 if (rinfo->dviDisp_type == MT_LCD)
3043 register_backlight_controller(&radeon_backlight_controller,
3044 rinfo, "ati");
3045#endif
3046
3047#ifdef CONFIG_PMAC_PBOOK
3048 if (rinfo->dviDisp_type == MT_LCD) {
3049 rinfo->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM);
3050 pmu_register_sleep_notifier(&radeon_sleep_notifier);
3051 }
3052#endif
3053
3054 printk ("radeonfb: ATI Radeon %s %s %d MB\n", rinfo->name, rinfo->ram_type,
3055 (rinfo->video_ram/(1024*1024)));
3056
3057 if (rinfo->hasCRTC2) {
3058 printk("radeonfb: DVI port %s monitor connected\n",
3059 GET_MON_NAME(rinfo->dviDisp_type));
3060 printk("radeonfb: CRT port %s monitor connected\n",
3061 GET_MON_NAME(rinfo->crtDisp_type));
3062 } else {
3063 printk("radeonfb: CRT port %s monitor connected\n",
3064 GET_MON_NAME(rinfo->crtDisp_type));
3065 }
3066
3067 RTRACE("radeonfb_pci_register END\n");
3068
3069 return 0;
3070}
3071
3072
3073
3074static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev)
3075{
3076 struct radeonfb_info *rinfo = pci_get_drvdata(pdev);
3077
3078 if (!rinfo)
3079 return;
3080
3081 /* restore original state
3082 *
3083 * Doesn't quite work yet, possibly because of the PPC hacking
3084 * I do on startup, disable for now. --BenH
3085 */
3086 radeon_write_mode (rinfo, &rinfo->init_state);
3087
3088#ifdef CONFIG_MTRR
3089 if (rinfo->mtrr_hdl >= 0)
3090 mtrr_del(rinfo->mtrr_hdl, 0, 0);
3091#endif
3092
3093 unregister_framebuffer ((struct fb_info *) rinfo);
3094
3095 iounmap(rinfo->mmio_base);
3096 iounmap(rinfo->fb_base);
3097
3098 release_mem_region (rinfo->mmio_base_phys,
3099 pci_resource_len(pdev, 2));
3100 release_mem_region (rinfo->fb_base_phys,
3101 pci_resource_len(pdev, 0));
3102
3103 kfree (rinfo);
3104}
3105
3106
3107static struct pci_driver radeonfb_driver = {
3108 .name = "radeonfb",
3109 .id_table = radeonfb_pci_table,
3110 .probe = radeonfb_pci_register,
3111 .remove = __devexit_p(radeonfb_pci_unregister),
3112};
3113
3114#ifndef MODULE
3115static int __init radeonfb_old_setup (char *options)
3116{
3117 char *this_opt;
3118
3119 if (!options || !*options)
3120 return 0;
3121
3122 while ((this_opt = strsep (&options, ",")) != NULL) {
3123 if (!*this_opt)
3124 continue;
3125 if (!strncmp(this_opt, "noaccel", 7)) {
3126 noaccel = 1;
3127 } else if (!strncmp(this_opt, "mirror", 6)) {
3128 mirror = 1;
3129 } else if (!strncmp(this_opt, "dfp", 3)) {
3130 force_dfp = 1;
3131 } else if (!strncmp(this_opt, "panel_yres:", 11)) {
3132 panel_yres = simple_strtoul((this_opt+11), NULL, 0);
3133 } else if (!strncmp(this_opt, "nomtrr", 6)) {
3134 nomtrr = 1;
3135 } else
3136 mode_option = this_opt;
3137 }
3138
3139 return 0;
3140}
3141#endif /* MODULE */
3142
3143static int __init radeonfb_old_init (void)
3144{
3145#ifndef MODULE
3146 char *option = NULL;
3147
3148 if (fb_get_options("radeonfb_old", &option))
3149 return -ENODEV;
3150 radeonfb_old_setup(option);
3151#endif
3152 return pci_register_driver (&radeonfb_driver);
3153}
3154
3155
3156static void __exit radeonfb_old_exit (void)
3157{
3158 pci_unregister_driver (&radeonfb_driver);
3159}
3160
3161module_init(radeonfb_old_init);
3162module_exit(radeonfb_old_exit);
3163
3164
3165MODULE_AUTHOR("Ani Joshi");
3166MODULE_DESCRIPTION("framebuffer driver for ATI Radeon chipset");
3167MODULE_LICENSE("GPL");
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index 8d5f35676f9a..4a292aae6eb2 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1378,7 +1378,7 @@ stifb_setup(char *options)
1378 int i; 1378 int i;
1379 1379
1380 if (!options || !*options) 1380 if (!options || !*options)
1381 return 0; 1381 return 1;
1382 1382
1383 if (strncmp(options, "off", 3) == 0) { 1383 if (strncmp(options, "off", 3) == 0) {
1384 stifb_disabled = 1; 1384 stifb_disabled = 1;
@@ -1393,7 +1393,7 @@ stifb_setup(char *options)
1393 stifb_bpp_pref[i] = simple_strtoul(options, &options, 10); 1393 stifb_bpp_pref[i] = simple_strtoul(options, &options, 10);
1394 } 1394 }
1395 } 1395 }
1396 return 0; 1396 return 1;
1397} 1397}
1398 1398
1399__setup("stifb=", stifb_setup); 1399__setup("stifb=", stifb_setup);
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index f6e24ee85f07..5fc86ea20692 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -4,8 +4,9 @@
4 * Frame Buffer Device for ATI Imageon w100 (Wallaby) 4 * Frame Buffer Device for ATI Imageon w100 (Wallaby)
5 * 5 *
6 * Copyright (C) 2002, ATI Corp. 6 * Copyright (C) 2002, ATI Corp.
7 * Copyright (C) 2004-2005 Richard Purdie 7 * Copyright (C) 2004-2006 Richard Purdie
8 * Copyright (c) 2005 Ian Molton 8 * Copyright (c) 2005 Ian Molton
9 * Copyright (c) 2006 Alberto Mardegan
9 * 10 *
10 * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net> 11 * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net>
11 * 12 *
@@ -14,6 +15,9 @@
14 * 15 *
15 * w32xx support by Ian Molton 16 * w32xx support by Ian Molton
16 * 17 *
18 * Hardware acceleration support by Alberto Mardegan
19 * <mardy@users.sourceforge.net>
20 *
17 * This program is free software; you can redistribute it and/or modify 21 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as 22 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation. 23 * published by the Free Software Foundation.
@@ -47,6 +51,7 @@ static void w100_set_dispregs(struct w100fb_par*);
47static void w100_update_enable(void); 51static void w100_update_enable(void);
48static void w100_update_disable(void); 52static void w100_update_disable(void);
49static void calc_hsync(struct w100fb_par *par); 53static void calc_hsync(struct w100fb_par *par);
54static void w100_init_graphic_engine(struct w100fb_par *par);
50struct w100_pll_info *w100_get_xtal_table(unsigned int freq); 55struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
51 56
52/* Pseudo palette size */ 57/* Pseudo palette size */
@@ -248,6 +253,152 @@ static int w100fb_blank(int blank_mode, struct fb_info *info)
248} 253}
249 254
250 255
256static void w100_fifo_wait(int entries)
257{
258 union rbbm_status_u status;
259 int i;
260
261 for (i = 0; i < 2000000; i++) {
262 status.val = readl(remapped_regs + mmRBBM_STATUS);
263 if (status.f.cmdfifo_avail >= entries)
264 return;
265 udelay(1);
266 }
267 printk(KERN_ERR "w100fb: FIFO Timeout!\n");
268}
269
270
271static int w100fb_sync(struct fb_info *info)
272{
273 union rbbm_status_u status;
274 int i;
275
276 for (i = 0; i < 2000000; i++) {
277 status.val = readl(remapped_regs + mmRBBM_STATUS);
278 if (!status.f.gui_active)
279 return 0;
280 udelay(1);
281 }
282 printk(KERN_ERR "w100fb: Graphic engine timeout!\n");
283 return -EBUSY;
284}
285
286
287static void w100_init_graphic_engine(struct w100fb_par *par)
288{
289 union dp_gui_master_cntl_u gmc;
290 union dp_mix_u dp_mix;
291 union dp_datatype_u dp_datatype;
292 union dp_cntl_u dp_cntl;
293
294 w100_fifo_wait(4);
295 writel(W100_FB_BASE, remapped_regs + mmDST_OFFSET);
296 writel(par->xres, remapped_regs + mmDST_PITCH);
297 writel(W100_FB_BASE, remapped_regs + mmSRC_OFFSET);
298 writel(par->xres, remapped_regs + mmSRC_PITCH);
299
300 w100_fifo_wait(3);
301 writel(0, remapped_regs + mmSC_TOP_LEFT);
302 writel((par->yres << 16) | par->xres, remapped_regs + mmSC_BOTTOM_RIGHT);
303 writel(0x1fff1fff, remapped_regs + mmSRC_SC_BOTTOM_RIGHT);
304
305 w100_fifo_wait(4);
306 dp_cntl.val = 0;
307 dp_cntl.f.dst_x_dir = 1;
308 dp_cntl.f.dst_y_dir = 1;
309 dp_cntl.f.src_x_dir = 1;
310 dp_cntl.f.src_y_dir = 1;
311 dp_cntl.f.dst_major_x = 1;
312 dp_cntl.f.src_major_x = 1;
313 writel(dp_cntl.val, remapped_regs + mmDP_CNTL);
314
315 gmc.val = 0;
316 gmc.f.gmc_src_pitch_offset_cntl = 1;
317 gmc.f.gmc_dst_pitch_offset_cntl = 1;
318 gmc.f.gmc_src_clipping = 1;
319 gmc.f.gmc_dst_clipping = 1;
320 gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
321 gmc.f.gmc_dst_datatype = 3; /* from DstType_16Bpp_444 */
322 gmc.f.gmc_src_datatype = SRC_DATATYPE_EQU_DST;
323 gmc.f.gmc_byte_pix_order = 1;
324 gmc.f.gmc_default_sel = 0;
325 gmc.f.gmc_rop3 = ROP3_SRCCOPY;
326 gmc.f.gmc_dp_src_source = DP_SRC_MEM_RECTANGULAR;
327 gmc.f.gmc_clr_cmp_fcn_dis = 1;
328 gmc.f.gmc_wr_msk_dis = 1;
329 gmc.f.gmc_dp_op = DP_OP_ROP;
330 writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
331
332 dp_datatype.val = dp_mix.val = 0;
333 dp_datatype.f.dp_dst_datatype = gmc.f.gmc_dst_datatype;
334 dp_datatype.f.dp_brush_datatype = gmc.f.gmc_brush_datatype;
335 dp_datatype.f.dp_src2_type = 0;
336 dp_datatype.f.dp_src2_datatype = gmc.f.gmc_src_datatype;
337 dp_datatype.f.dp_src_datatype = gmc.f.gmc_src_datatype;
338 dp_datatype.f.dp_byte_pix_order = gmc.f.gmc_byte_pix_order;
339 writel(dp_datatype.val, remapped_regs + mmDP_DATATYPE);
340
341 dp_mix.f.dp_src_source = gmc.f.gmc_dp_src_source;
342 dp_mix.f.dp_src2_source = 1;
343 dp_mix.f.dp_rop3 = gmc.f.gmc_rop3;
344 dp_mix.f.dp_op = gmc.f.gmc_dp_op;
345 writel(dp_mix.val, remapped_regs + mmDP_MIX);
346}
347
348
349static void w100fb_fillrect(struct fb_info *info,
350 const struct fb_fillrect *rect)
351{
352 union dp_gui_master_cntl_u gmc;
353
354 if (info->state != FBINFO_STATE_RUNNING)
355 return;
356 if (info->flags & FBINFO_HWACCEL_DISABLED) {
357 cfb_fillrect(info, rect);
358 return;
359 }
360
361 gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
362 gmc.f.gmc_rop3 = ROP3_PATCOPY;
363 gmc.f.gmc_brush_datatype = GMC_BRUSH_SOLID_COLOR;
364 w100_fifo_wait(2);
365 writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
366 writel(rect->color, remapped_regs + mmDP_BRUSH_FRGD_CLR);
367
368 w100_fifo_wait(2);
369 writel((rect->dy << 16) | (rect->dx & 0xffff), remapped_regs + mmDST_Y_X);
370 writel((rect->width << 16) | (rect->height & 0xffff),
371 remapped_regs + mmDST_WIDTH_HEIGHT);
372}
373
374
375static void w100fb_copyarea(struct fb_info *info,
376 const struct fb_copyarea *area)
377{
378 u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
379 u32 h = area->height, w = area->width;
380 union dp_gui_master_cntl_u gmc;
381
382 if (info->state != FBINFO_STATE_RUNNING)
383 return;
384 if (info->flags & FBINFO_HWACCEL_DISABLED) {
385 cfb_copyarea(info, area);
386 return;
387 }
388
389 gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
390 gmc.f.gmc_rop3 = ROP3_SRCCOPY;
391 gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
392 w100_fifo_wait(1);
393 writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
394
395 w100_fifo_wait(3);
396 writel((sy << 16) | (sx & 0xffff), remapped_regs + mmSRC_Y_X);
397 writel((dy << 16) | (dx & 0xffff), remapped_regs + mmDST_Y_X);
398 writel((w << 16) | (h & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT);
399}
400
401
251/* 402/*
252 * Change the resolution by calling the appropriate hardware functions 403 * Change the resolution by calling the appropriate hardware functions
253 */ 404 */
@@ -265,6 +416,7 @@ static void w100fb_activate_var(struct w100fb_par *par)
265 w100_init_lcd(par); 416 w100_init_lcd(par);
266 w100_set_dispregs(par); 417 w100_set_dispregs(par);
267 w100_update_enable(); 418 w100_update_enable();
419 w100_init_graphic_engine(par);
268 420
269 calc_hsync(par); 421 calc_hsync(par);
270 422
@@ -394,9 +546,10 @@ static struct fb_ops w100fb_ops = {
394 .fb_set_par = w100fb_set_par, 546 .fb_set_par = w100fb_set_par,
395 .fb_setcolreg = w100fb_setcolreg, 547 .fb_setcolreg = w100fb_setcolreg,
396 .fb_blank = w100fb_blank, 548 .fb_blank = w100fb_blank,
397 .fb_fillrect = cfb_fillrect, 549 .fb_fillrect = w100fb_fillrect,
398 .fb_copyarea = cfb_copyarea, 550 .fb_copyarea = w100fb_copyarea,
399 .fb_imageblit = cfb_imageblit, 551 .fb_imageblit = cfb_imageblit,
552 .fb_sync = w100fb_sync,
400}; 553};
401 554
402#ifdef CONFIG_PM 555#ifdef CONFIG_PM
@@ -543,7 +696,8 @@ int __init w100fb_probe(struct platform_device *pdev)
543 } 696 }
544 697
545 info->fbops = &w100fb_ops; 698 info->fbops = &w100fb_ops;
546 info->flags = FBINFO_DEFAULT; 699 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
700 FBINFO_HWACCEL_FILLRECT;
547 info->node = -1; 701 info->node = -1;
548 info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE); 702 info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE);
549 info->screen_size = REMAPPED_FB_LEN; 703 info->screen_size = REMAPPED_FB_LEN;
diff --git a/drivers/video/w100fb.h b/drivers/video/w100fb.h
index 7a58a1e3e427..fffae7b4f6e9 100644
--- a/drivers/video/w100fb.h
+++ b/drivers/video/w100fb.h
@@ -122,15 +122,32 @@
122/* Block DISPLAY End: */ 122/* Block DISPLAY End: */
123 123
124/* Block GFX Start: */ 124/* Block GFX Start: */
125#define mmDST_OFFSET 0x1004
126#define mmDST_PITCH 0x1008
127#define mmDST_Y_X 0x1038
128#define mmDST_WIDTH_HEIGHT 0x1198
129#define mmDP_GUI_MASTER_CNTL 0x106C
125#define mmBRUSH_OFFSET 0x108C 130#define mmBRUSH_OFFSET 0x108C
126#define mmBRUSH_Y_X 0x1074 131#define mmBRUSH_Y_X 0x1074
132#define mmDP_BRUSH_FRGD_CLR 0x107C
133#define mmSRC_OFFSET 0x11AC
134#define mmSRC_PITCH 0x11B0
135#define mmSRC_Y_X 0x1034
127#define mmDEFAULT_PITCH_OFFSET 0x10A0 136#define mmDEFAULT_PITCH_OFFSET 0x10A0
128#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8 137#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8
129#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC 138#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC
139#define mmSC_TOP_LEFT 0x11BC
140#define mmSC_BOTTOM_RIGHT 0x11C0
141#define mmSRC_SC_BOTTOM_RIGHT 0x11C4
130#define mmGLOBAL_ALPHA 0x1210 142#define mmGLOBAL_ALPHA 0x1210
131#define mmFILTER_COEF 0x1214 143#define mmFILTER_COEF 0x1214
132#define mmMVC_CNTL_START 0x11E0 144#define mmMVC_CNTL_START 0x11E0
133#define mmE2_ARITHMETIC_CNTL 0x1220 145#define mmE2_ARITHMETIC_CNTL 0x1220
146#define mmDP_CNTL 0x11C8
147#define mmDP_CNTL_DST_DIR 0x11CC
148#define mmDP_DATATYPE 0x12C4
149#define mmDP_MIX 0x12C8
150#define mmDP_WRITE_MSK 0x12CC
134#define mmENG_CNTL 0x13E8 151#define mmENG_CNTL 0x13E8
135#define mmENG_PERF_CNT 0x13F0 152#define mmENG_PERF_CNT 0x13F0
136/* Block GFX End: */ 153/* Block GFX End: */
@@ -179,6 +196,7 @@
179/* Block RBBM Start: */ 196/* Block RBBM Start: */
180#define mmWAIT_UNTIL 0x1400 197#define mmWAIT_UNTIL 0x1400
181#define mmISYNC_CNTL 0x1404 198#define mmISYNC_CNTL 0x1404
199#define mmRBBM_STATUS 0x0140
182#define mmRBBM_CNTL 0x0144 200#define mmRBBM_CNTL 0x0144
183#define mmNQWAIT_UNTIL 0x0150 201#define mmNQWAIT_UNTIL 0x0150
184/* Block RBBM End: */ 202/* Block RBBM End: */
@@ -225,147 +243,147 @@
225/* Register structure definitions */ 243/* Register structure definitions */
226 244
227struct wrap_top_dir_t { 245struct wrap_top_dir_t {
228 unsigned long top_addr : 23; 246 u32 top_addr : 23;
229 unsigned long : 9; 247 u32 : 9;
230} __attribute__((packed)); 248} __attribute__((packed));
231 249
232union wrap_top_dir_u { 250union wrap_top_dir_u {
233 unsigned long val : 32; 251 u32 val : 32;
234 struct wrap_top_dir_t f; 252 struct wrap_top_dir_t f;
235} __attribute__((packed)); 253} __attribute__((packed));
236 254
237struct wrap_start_dir_t { 255struct wrap_start_dir_t {
238 unsigned long start_addr : 23; 256 u32 start_addr : 23;
239 unsigned long : 9; 257 u32 : 9;
240} __attribute__((packed)); 258} __attribute__((packed));
241 259
242union wrap_start_dir_u { 260union wrap_start_dir_u {
243 unsigned long val : 32; 261 u32 val : 32;
244 struct wrap_start_dir_t f; 262 struct wrap_start_dir_t f;
245} __attribute__((packed)); 263} __attribute__((packed));
246 264
247struct cif_cntl_t { 265struct cif_cntl_t {
248 unsigned long swap_reg : 2; 266 u32 swap_reg : 2;
249 unsigned long swap_fbuf_1 : 2; 267 u32 swap_fbuf_1 : 2;
250 unsigned long swap_fbuf_2 : 2; 268 u32 swap_fbuf_2 : 2;
251 unsigned long swap_fbuf_3 : 2; 269 u32 swap_fbuf_3 : 2;
252 unsigned long pmi_int_disable : 1; 270 u32 pmi_int_disable : 1;
253 unsigned long pmi_schmen_disable : 1; 271 u32 pmi_schmen_disable : 1;
254 unsigned long intb_oe : 1; 272 u32 intb_oe : 1;
255 unsigned long en_wait_to_compensate_dq_prop_dly : 1; 273 u32 en_wait_to_compensate_dq_prop_dly : 1;
256 unsigned long compensate_wait_rd_size : 2; 274 u32 compensate_wait_rd_size : 2;
257 unsigned long wait_asserted_timeout_val : 2; 275 u32 wait_asserted_timeout_val : 2;
258 unsigned long wait_masked_val : 2; 276 u32 wait_masked_val : 2;
259 unsigned long en_wait_timeout : 1; 277 u32 en_wait_timeout : 1;
260 unsigned long en_one_clk_setup_before_wait : 1; 278 u32 en_one_clk_setup_before_wait : 1;
261 unsigned long interrupt_active_high : 1; 279 u32 interrupt_active_high : 1;
262 unsigned long en_overwrite_straps : 1; 280 u32 en_overwrite_straps : 1;
263 unsigned long strap_wait_active_hi : 1; 281 u32 strap_wait_active_hi : 1;
264 unsigned long lat_busy_count : 2; 282 u32 lat_busy_count : 2;
265 unsigned long lat_rd_pm4_sclk_busy : 1; 283 u32 lat_rd_pm4_sclk_busy : 1;
266 unsigned long dis_system_bits : 1; 284 u32 dis_system_bits : 1;
267 unsigned long dis_mr : 1; 285 u32 dis_mr : 1;
268 unsigned long cif_spare_1 : 4; 286 u32 cif_spare_1 : 4;
269} __attribute__((packed)); 287} __attribute__((packed));
270 288
271union cif_cntl_u { 289union cif_cntl_u {
272 unsigned long val : 32; 290 u32 val : 32;
273 struct cif_cntl_t f; 291 struct cif_cntl_t f;
274} __attribute__((packed)); 292} __attribute__((packed));
275 293
276struct cfgreg_base_t { 294struct cfgreg_base_t {
277 unsigned long cfgreg_base : 24; 295 u32 cfgreg_base : 24;
278 unsigned long : 8; 296 u32 : 8;
279} __attribute__((packed)); 297} __attribute__((packed));
280 298
281union cfgreg_base_u { 299union cfgreg_base_u {
282 unsigned long val : 32; 300 u32 val : 32;
283 struct cfgreg_base_t f; 301 struct cfgreg_base_t f;
284} __attribute__((packed)); 302} __attribute__((packed));
285 303
286struct cif_io_t { 304struct cif_io_t {
287 unsigned long dq_srp : 1; 305 u32 dq_srp : 1;
288 unsigned long dq_srn : 1; 306 u32 dq_srn : 1;
289 unsigned long dq_sp : 4; 307 u32 dq_sp : 4;
290 unsigned long dq_sn : 4; 308 u32 dq_sn : 4;
291 unsigned long waitb_srp : 1; 309 u32 waitb_srp : 1;
292 unsigned long waitb_srn : 1; 310 u32 waitb_srn : 1;
293 unsigned long waitb_sp : 4; 311 u32 waitb_sp : 4;
294 unsigned long waitb_sn : 4; 312 u32 waitb_sn : 4;
295 unsigned long intb_srp : 1; 313 u32 intb_srp : 1;
296 unsigned long intb_srn : 1; 314 u32 intb_srn : 1;
297 unsigned long intb_sp : 4; 315 u32 intb_sp : 4;
298 unsigned long intb_sn : 4; 316 u32 intb_sn : 4;
299 unsigned long : 2; 317 u32 : 2;
300} __attribute__((packed)); 318} __attribute__((packed));
301 319
302union cif_io_u { 320union cif_io_u {
303 unsigned long val : 32; 321 u32 val : 32;
304 struct cif_io_t f; 322 struct cif_io_t f;
305} __attribute__((packed)); 323} __attribute__((packed));
306 324
307struct cif_read_dbg_t { 325struct cif_read_dbg_t {
308 unsigned long unpacker_pre_fetch_trig_gen : 2; 326 u32 unpacker_pre_fetch_trig_gen : 2;
309 unsigned long dly_second_rd_fetch_trig : 1; 327 u32 dly_second_rd_fetch_trig : 1;
310 unsigned long rst_rd_burst_id : 1; 328 u32 rst_rd_burst_id : 1;
311 unsigned long dis_rd_burst_id : 1; 329 u32 dis_rd_burst_id : 1;
312 unsigned long en_block_rd_when_packer_is_not_emp : 1; 330 u32 en_block_rd_when_packer_is_not_emp : 1;
313 unsigned long dis_pre_fetch_cntl_sm : 1; 331 u32 dis_pre_fetch_cntl_sm : 1;
314 unsigned long rbbm_chrncy_dis : 1; 332 u32 rbbm_chrncy_dis : 1;
315 unsigned long rbbm_rd_after_wr_lat : 2; 333 u32 rbbm_rd_after_wr_lat : 2;
316 unsigned long dis_be_during_rd : 1; 334 u32 dis_be_during_rd : 1;
317 unsigned long one_clk_invalidate_pulse : 1; 335 u32 one_clk_invalidate_pulse : 1;
318 unsigned long dis_chnl_priority : 1; 336 u32 dis_chnl_priority : 1;
319 unsigned long rst_read_path_a_pls : 1; 337 u32 rst_read_path_a_pls : 1;
320 unsigned long rst_read_path_b_pls : 1; 338 u32 rst_read_path_b_pls : 1;
321 unsigned long dis_reg_rd_fetch_trig : 1; 339 u32 dis_reg_rd_fetch_trig : 1;
322 unsigned long dis_rd_fetch_trig_from_ind_addr : 1; 340 u32 dis_rd_fetch_trig_from_ind_addr : 1;
323 unsigned long dis_rd_same_byte_to_trig_fetch : 1; 341 u32 dis_rd_same_byte_to_trig_fetch : 1;
324 unsigned long dis_dir_wrap : 1; 342 u32 dis_dir_wrap : 1;
325 unsigned long dis_ring_buf_to_force_dec : 1; 343 u32 dis_ring_buf_to_force_dec : 1;
326 unsigned long dis_addr_comp_in_16bit : 1; 344 u32 dis_addr_comp_in_16bit : 1;
327 unsigned long clr_w : 1; 345 u32 clr_w : 1;
328 unsigned long err_rd_tag_is_3 : 1; 346 u32 err_rd_tag_is_3 : 1;
329 unsigned long err_load_when_ful_a : 1; 347 u32 err_load_when_ful_a : 1;
330 unsigned long err_load_when_ful_b : 1; 348 u32 err_load_when_ful_b : 1;
331 unsigned long : 7; 349 u32 : 7;
332} __attribute__((packed)); 350} __attribute__((packed));
333 351
334union cif_read_dbg_u { 352union cif_read_dbg_u {
335 unsigned long val : 32; 353 u32 val : 32;
336 struct cif_read_dbg_t f; 354 struct cif_read_dbg_t f;
337} __attribute__((packed)); 355} __attribute__((packed));
338 356
339struct cif_write_dbg_t { 357struct cif_write_dbg_t {
340 unsigned long packer_timeout_count : 2; 358 u32 packer_timeout_count : 2;
341 unsigned long en_upper_load_cond : 1; 359 u32 en_upper_load_cond : 1;
342 unsigned long en_chnl_change_cond : 1; 360 u32 en_chnl_change_cond : 1;
343 unsigned long dis_addr_comp_cond : 1; 361 u32 dis_addr_comp_cond : 1;
344 unsigned long dis_load_same_byte_addr_cond : 1; 362 u32 dis_load_same_byte_addr_cond : 1;
345 unsigned long dis_timeout_cond : 1; 363 u32 dis_timeout_cond : 1;
346 unsigned long dis_timeout_during_rbbm : 1; 364 u32 dis_timeout_during_rbbm : 1;
347 unsigned long dis_packer_ful_during_rbbm_timeout : 1; 365 u32 dis_packer_ful_during_rbbm_timeout : 1;
348 unsigned long en_dword_split_to_rbbm : 1; 366 u32 en_dword_split_to_rbbm : 1;
349 unsigned long en_dummy_val : 1; 367 u32 en_dummy_val : 1;
350 unsigned long dummy_val_sel : 1; 368 u32 dummy_val_sel : 1;
351 unsigned long mask_pm4_wrptr_dec : 1; 369 u32 mask_pm4_wrptr_dec : 1;
352 unsigned long dis_mc_clean_cond : 1; 370 u32 dis_mc_clean_cond : 1;
353 unsigned long err_two_reqi_during_ful : 1; 371 u32 err_two_reqi_during_ful : 1;
354 unsigned long err_reqi_during_idle_clk : 1; 372 u32 err_reqi_during_idle_clk : 1;
355 unsigned long err_global : 1; 373 u32 err_global : 1;
356 unsigned long en_wr_buf_dbg_load : 1; 374 u32 en_wr_buf_dbg_load : 1;
357 unsigned long en_wr_buf_dbg_path : 1; 375 u32 en_wr_buf_dbg_path : 1;
358 unsigned long sel_wr_buf_byte : 3; 376 u32 sel_wr_buf_byte : 3;
359 unsigned long dis_rd_flush_wr : 1; 377 u32 dis_rd_flush_wr : 1;
360 unsigned long dis_packer_ful_cond : 1; 378 u32 dis_packer_ful_cond : 1;
361 unsigned long dis_invalidate_by_ops_chnl : 1; 379 u32 dis_invalidate_by_ops_chnl : 1;
362 unsigned long en_halt_when_reqi_err : 1; 380 u32 en_halt_when_reqi_err : 1;
363 unsigned long cif_spare_2 : 5; 381 u32 cif_spare_2 : 5;
364 unsigned long : 1; 382 u32 : 1;
365} __attribute__((packed)); 383} __attribute__((packed));
366 384
367union cif_write_dbg_u { 385union cif_write_dbg_u {
368 unsigned long val : 32; 386 u32 val : 32;
369 struct cif_write_dbg_t f; 387 struct cif_write_dbg_t f;
370} __attribute__((packed)); 388} __attribute__((packed));
371 389
@@ -403,327 +421,327 @@ union cpu_defaults_u {
403} __attribute__((packed)); 421} __attribute__((packed));
404 422
405struct crtc_total_t { 423struct crtc_total_t {
406 unsigned long crtc_h_total : 10; 424 u32 crtc_h_total : 10;
407 unsigned long : 6; 425 u32 : 6;
408 unsigned long crtc_v_total : 10; 426 u32 crtc_v_total : 10;
409 unsigned long : 6; 427 u32 : 6;
410} __attribute__((packed)); 428} __attribute__((packed));
411 429
412union crtc_total_u { 430union crtc_total_u {
413 unsigned long val : 32; 431 u32 val : 32;
414 struct crtc_total_t f; 432 struct crtc_total_t f;
415} __attribute__((packed)); 433} __attribute__((packed));
416 434
417struct crtc_ss_t { 435struct crtc_ss_t {
418 unsigned long ss_start : 10; 436 u32 ss_start : 10;
419 unsigned long : 6; 437 u32 : 6;
420 unsigned long ss_end : 10; 438 u32 ss_end : 10;
421 unsigned long : 2; 439 u32 : 2;
422 unsigned long ss_align : 1; 440 u32 ss_align : 1;
423 unsigned long ss_pol : 1; 441 u32 ss_pol : 1;
424 unsigned long ss_run_mode : 1; 442 u32 ss_run_mode : 1;
425 unsigned long ss_en : 1; 443 u32 ss_en : 1;
426} __attribute__((packed)); 444} __attribute__((packed));
427 445
428union crtc_ss_u { 446union crtc_ss_u {
429 unsigned long val : 32; 447 u32 val : 32;
430 struct crtc_ss_t f; 448 struct crtc_ss_t f;
431} __attribute__((packed)); 449} __attribute__((packed));
432 450
433struct active_h_disp_t { 451struct active_h_disp_t {
434 unsigned long active_h_start : 10; 452 u32 active_h_start : 10;
435 unsigned long : 6; 453 u32 : 6;
436 unsigned long active_h_end : 10; 454 u32 active_h_end : 10;
437 unsigned long : 6; 455 u32 : 6;
438} __attribute__((packed)); 456} __attribute__((packed));
439 457
440union active_h_disp_u { 458union active_h_disp_u {
441 unsigned long val : 32; 459 u32 val : 32;
442 struct active_h_disp_t f; 460 struct active_h_disp_t f;
443} __attribute__((packed)); 461} __attribute__((packed));
444 462
445struct active_v_disp_t { 463struct active_v_disp_t {
446 unsigned long active_v_start : 10; 464 u32 active_v_start : 10;
447 unsigned long : 6; 465 u32 : 6;
448 unsigned long active_v_end : 10; 466 u32 active_v_end : 10;
449 unsigned long : 6; 467 u32 : 6;
450} __attribute__((packed)); 468} __attribute__((packed));
451 469
452union active_v_disp_u { 470union active_v_disp_u {
453 unsigned long val : 32; 471 u32 val : 32;
454 struct active_v_disp_t f; 472 struct active_v_disp_t f;
455} __attribute__((packed)); 473} __attribute__((packed));
456 474
457struct graphic_h_disp_t { 475struct graphic_h_disp_t {
458 unsigned long graphic_h_start : 10; 476 u32 graphic_h_start : 10;
459 unsigned long : 6; 477 u32 : 6;
460 unsigned long graphic_h_end : 10; 478 u32 graphic_h_end : 10;
461 unsigned long : 6; 479 u32 : 6;
462} __attribute__((packed)); 480} __attribute__((packed));
463 481
464union graphic_h_disp_u { 482union graphic_h_disp_u {
465 unsigned long val : 32; 483 u32 val : 32;
466 struct graphic_h_disp_t f; 484 struct graphic_h_disp_t f;
467} __attribute__((packed)); 485} __attribute__((packed));
468 486
469struct graphic_v_disp_t { 487struct graphic_v_disp_t {
470 unsigned long graphic_v_start : 10; 488 u32 graphic_v_start : 10;
471 unsigned long : 6; 489 u32 : 6;
472 unsigned long graphic_v_end : 10; 490 u32 graphic_v_end : 10;
473 unsigned long : 6; 491 u32 : 6;
474} __attribute__((packed)); 492} __attribute__((packed));
475 493
476union graphic_v_disp_u{ 494union graphic_v_disp_u{
477 unsigned long val : 32; 495 u32 val : 32;
478 struct graphic_v_disp_t f; 496 struct graphic_v_disp_t f;
479} __attribute__((packed)); 497} __attribute__((packed));
480 498
481struct graphic_ctrl_t_w100 { 499struct graphic_ctrl_t_w100 {
482 unsigned long color_depth : 3; 500 u32 color_depth : 3;
483 unsigned long portrait_mode : 2; 501 u32 portrait_mode : 2;
484 unsigned long low_power_on : 1; 502 u32 low_power_on : 1;
485 unsigned long req_freq : 4; 503 u32 req_freq : 4;
486 unsigned long en_crtc : 1; 504 u32 en_crtc : 1;
487 unsigned long en_graphic_req : 1; 505 u32 en_graphic_req : 1;
488 unsigned long en_graphic_crtc : 1; 506 u32 en_graphic_crtc : 1;
489 unsigned long total_req_graphic : 9; 507 u32 total_req_graphic : 9;
490 unsigned long lcd_pclk_on : 1; 508 u32 lcd_pclk_on : 1;
491 unsigned long lcd_sclk_on : 1; 509 u32 lcd_sclk_on : 1;
492 unsigned long pclk_running : 1; 510 u32 pclk_running : 1;
493 unsigned long sclk_running : 1; 511 u32 sclk_running : 1;
494 unsigned long : 6; 512 u32 : 6;
495} __attribute__((packed)); 513} __attribute__((packed));
496 514
497struct graphic_ctrl_t_w32xx { 515struct graphic_ctrl_t_w32xx {
498 unsigned long color_depth : 3; 516 u32 color_depth : 3;
499 unsigned long portrait_mode : 2; 517 u32 portrait_mode : 2;
500 unsigned long low_power_on : 1; 518 u32 low_power_on : 1;
501 unsigned long req_freq : 4; 519 u32 req_freq : 4;
502 unsigned long en_crtc : 1; 520 u32 en_crtc : 1;
503 unsigned long en_graphic_req : 1; 521 u32 en_graphic_req : 1;
504 unsigned long en_graphic_crtc : 1; 522 u32 en_graphic_crtc : 1;
505 unsigned long total_req_graphic : 10; 523 u32 total_req_graphic : 10;
506 unsigned long lcd_pclk_on : 1; 524 u32 lcd_pclk_on : 1;
507 unsigned long lcd_sclk_on : 1; 525 u32 lcd_sclk_on : 1;
508 unsigned long pclk_running : 1; 526 u32 pclk_running : 1;
509 unsigned long sclk_running : 1; 527 u32 sclk_running : 1;
510 unsigned long : 5; 528 u32 : 5;
511} __attribute__((packed)); 529} __attribute__((packed));
512 530
513union graphic_ctrl_u { 531union graphic_ctrl_u {
514 unsigned long val : 32; 532 u32 val : 32;
515 struct graphic_ctrl_t_w100 f_w100; 533 struct graphic_ctrl_t_w100 f_w100;
516 struct graphic_ctrl_t_w32xx f_w32xx; 534 struct graphic_ctrl_t_w32xx f_w32xx;
517} __attribute__((packed)); 535} __attribute__((packed));
518 536
519struct video_ctrl_t { 537struct video_ctrl_t {
520 unsigned long video_mode : 1; 538 u32 video_mode : 1;
521 unsigned long keyer_en : 1; 539 u32 keyer_en : 1;
522 unsigned long en_video_req : 1; 540 u32 en_video_req : 1;
523 unsigned long en_graphic_req_video : 1; 541 u32 en_graphic_req_video : 1;
524 unsigned long en_video_crtc : 1; 542 u32 en_video_crtc : 1;
525 unsigned long video_hor_exp : 2; 543 u32 video_hor_exp : 2;
526 unsigned long video_ver_exp : 2; 544 u32 video_ver_exp : 2;
527 unsigned long uv_combine : 1; 545 u32 uv_combine : 1;
528 unsigned long total_req_video : 9; 546 u32 total_req_video : 9;
529 unsigned long video_ch_sel : 1; 547 u32 video_ch_sel : 1;
530 unsigned long video_portrait : 2; 548 u32 video_portrait : 2;
531 unsigned long yuv2rgb_en : 1; 549 u32 yuv2rgb_en : 1;
532 unsigned long yuv2rgb_option : 1; 550 u32 yuv2rgb_option : 1;
533 unsigned long video_inv_hor : 1; 551 u32 video_inv_hor : 1;
534 unsigned long video_inv_ver : 1; 552 u32 video_inv_ver : 1;
535 unsigned long gamma_sel : 2; 553 u32 gamma_sel : 2;
536 unsigned long dis_limit : 1; 554 u32 dis_limit : 1;
537 unsigned long en_uv_hblend : 1; 555 u32 en_uv_hblend : 1;
538 unsigned long rgb_gamma_sel : 2; 556 u32 rgb_gamma_sel : 2;
539} __attribute__((packed)); 557} __attribute__((packed));
540 558
541union video_ctrl_u { 559union video_ctrl_u {
542 unsigned long val : 32; 560 u32 val : 32;
543 struct video_ctrl_t f; 561 struct video_ctrl_t f;
544} __attribute__((packed)); 562} __attribute__((packed));
545 563
546struct disp_db_buf_cntl_rd_t { 564struct disp_db_buf_cntl_rd_t {
547 unsigned long en_db_buf : 1; 565 u32 en_db_buf : 1;
548 unsigned long update_db_buf_done : 1; 566 u32 update_db_buf_done : 1;
549 unsigned long db_buf_cntl : 6; 567 u32 db_buf_cntl : 6;
550 unsigned long : 24; 568 u32 : 24;
551} __attribute__((packed)); 569} __attribute__((packed));
552 570
553union disp_db_buf_cntl_rd_u { 571union disp_db_buf_cntl_rd_u {
554 unsigned long val : 32; 572 u32 val : 32;
555 struct disp_db_buf_cntl_rd_t f; 573 struct disp_db_buf_cntl_rd_t f;
556} __attribute__((packed)); 574} __attribute__((packed));
557 575
558struct disp_db_buf_cntl_wr_t { 576struct disp_db_buf_cntl_wr_t {
559 unsigned long en_db_buf : 1; 577 u32 en_db_buf : 1;
560 unsigned long update_db_buf : 1; 578 u32 update_db_buf : 1;
561 unsigned long db_buf_cntl : 6; 579 u32 db_buf_cntl : 6;
562 unsigned long : 24; 580 u32 : 24;
563} __attribute__((packed)); 581} __attribute__((packed));
564 582
565union disp_db_buf_cntl_wr_u { 583union disp_db_buf_cntl_wr_u {
566 unsigned long val : 32; 584 u32 val : 32;
567 struct disp_db_buf_cntl_wr_t f; 585 struct disp_db_buf_cntl_wr_t f;
568} __attribute__((packed)); 586} __attribute__((packed));
569 587
570struct gamma_value1_t { 588struct gamma_value1_t {
571 unsigned long gamma1 : 8; 589 u32 gamma1 : 8;
572 unsigned long gamma2 : 8; 590 u32 gamma2 : 8;
573 unsigned long gamma3 : 8; 591 u32 gamma3 : 8;
574 unsigned long gamma4 : 8; 592 u32 gamma4 : 8;
575} __attribute__((packed)); 593} __attribute__((packed));
576 594
577union gamma_value1_u { 595union gamma_value1_u {
578 unsigned long val : 32; 596 u32 val : 32;
579 struct gamma_value1_t f; 597 struct gamma_value1_t f;
580} __attribute__((packed)); 598} __attribute__((packed));
581 599
582struct gamma_value2_t { 600struct gamma_value2_t {
583 unsigned long gamma5 : 8; 601 u32 gamma5 : 8;
584 unsigned long gamma6 : 8; 602 u32 gamma6 : 8;
585 unsigned long gamma7 : 8; 603 u32 gamma7 : 8;
586 unsigned long gamma8 : 8; 604 u32 gamma8 : 8;
587} __attribute__((packed)); 605} __attribute__((packed));
588 606
589union gamma_value2_u { 607union gamma_value2_u {
590 unsigned long val : 32; 608 u32 val : 32;
591 struct gamma_value2_t f; 609 struct gamma_value2_t f;
592} __attribute__((packed)); 610} __attribute__((packed));
593 611
594struct gamma_slope_t { 612struct gamma_slope_t {
595 unsigned long slope1 : 3; 613 u32 slope1 : 3;
596 unsigned long slope2 : 3; 614 u32 slope2 : 3;
597 unsigned long slope3 : 3; 615 u32 slope3 : 3;
598 unsigned long slope4 : 3; 616 u32 slope4 : 3;
599 unsigned long slope5 : 3; 617 u32 slope5 : 3;
600 unsigned long slope6 : 3; 618 u32 slope6 : 3;
601 unsigned long slope7 : 3; 619 u32 slope7 : 3;
602 unsigned long slope8 : 3; 620 u32 slope8 : 3;
603 unsigned long : 8; 621 u32 : 8;
604} __attribute__((packed)); 622} __attribute__((packed));
605 623
606union gamma_slope_u { 624union gamma_slope_u {
607 unsigned long val : 32; 625 u32 val : 32;
608 struct gamma_slope_t f; 626 struct gamma_slope_t f;
609} __attribute__((packed)); 627} __attribute__((packed));
610 628
611struct mc_ext_mem_location_t { 629struct mc_ext_mem_location_t {
612 unsigned long mc_ext_mem_start : 16; 630 u32 mc_ext_mem_start : 16;
613 unsigned long mc_ext_mem_top : 16; 631 u32 mc_ext_mem_top : 16;
614} __attribute__((packed)); 632} __attribute__((packed));
615 633
616union mc_ext_mem_location_u { 634union mc_ext_mem_location_u {
617 unsigned long val : 32; 635 u32 val : 32;
618 struct mc_ext_mem_location_t f; 636 struct mc_ext_mem_location_t f;
619} __attribute__((packed)); 637} __attribute__((packed));
620 638
621struct mc_fb_location_t { 639struct mc_fb_location_t {
622 unsigned long mc_fb_start : 16; 640 u32 mc_fb_start : 16;
623 unsigned long mc_fb_top : 16; 641 u32 mc_fb_top : 16;
624} __attribute__((packed)); 642} __attribute__((packed));
625 643
626union mc_fb_location_u { 644union mc_fb_location_u {
627 unsigned long val : 32; 645 u32 val : 32;
628 struct mc_fb_location_t f; 646 struct mc_fb_location_t f;
629} __attribute__((packed)); 647} __attribute__((packed));
630 648
631struct clk_pin_cntl_t { 649struct clk_pin_cntl_t {
632 unsigned long osc_en : 1; 650 u32 osc_en : 1;
633 unsigned long osc_gain : 5; 651 u32 osc_gain : 5;
634 unsigned long dont_use_xtalin : 1; 652 u32 dont_use_xtalin : 1;
635 unsigned long xtalin_pm_en : 1; 653 u32 xtalin_pm_en : 1;
636 unsigned long xtalin_dbl_en : 1; 654 u32 xtalin_dbl_en : 1;
637 unsigned long : 7; 655 u32 : 7;
638 unsigned long cg_debug : 16; 656 u32 cg_debug : 16;
639} __attribute__((packed)); 657} __attribute__((packed));
640 658
641union clk_pin_cntl_u { 659union clk_pin_cntl_u {
642 unsigned long val : 32; 660 u32 val : 32;
643 struct clk_pin_cntl_t f; 661 struct clk_pin_cntl_t f;
644} __attribute__((packed)); 662} __attribute__((packed));
645 663
646struct pll_ref_fb_div_t { 664struct pll_ref_fb_div_t {
647 unsigned long pll_ref_div : 4; 665 u32 pll_ref_div : 4;
648 unsigned long : 4; 666 u32 : 4;
649 unsigned long pll_fb_div_int : 6; 667 u32 pll_fb_div_int : 6;
650 unsigned long : 2; 668 u32 : 2;
651 unsigned long pll_fb_div_frac : 3; 669 u32 pll_fb_div_frac : 3;
652 unsigned long : 1; 670 u32 : 1;
653 unsigned long pll_reset_time : 4; 671 u32 pll_reset_time : 4;
654 unsigned long pll_lock_time : 8; 672 u32 pll_lock_time : 8;
655} __attribute__((packed)); 673} __attribute__((packed));
656 674
657union pll_ref_fb_div_u { 675union pll_ref_fb_div_u {
658 unsigned long val : 32; 676 u32 val : 32;
659 struct pll_ref_fb_div_t f; 677 struct pll_ref_fb_div_t f;
660} __attribute__((packed)); 678} __attribute__((packed));
661 679
662struct pll_cntl_t { 680struct pll_cntl_t {
663 unsigned long pll_pwdn : 1; 681 u32 pll_pwdn : 1;
664 unsigned long pll_reset : 1; 682 u32 pll_reset : 1;
665 unsigned long pll_pm_en : 1; 683 u32 pll_pm_en : 1;
666 unsigned long pll_mode : 1; 684 u32 pll_mode : 1;
667 unsigned long pll_refclk_sel : 1; 685 u32 pll_refclk_sel : 1;
668 unsigned long pll_fbclk_sel : 1; 686 u32 pll_fbclk_sel : 1;
669 unsigned long pll_tcpoff : 1; 687 u32 pll_tcpoff : 1;
670 unsigned long pll_pcp : 3; 688 u32 pll_pcp : 3;
671 unsigned long pll_pvg : 3; 689 u32 pll_pvg : 3;
672 unsigned long pll_vcofr : 1; 690 u32 pll_vcofr : 1;
673 unsigned long pll_ioffset : 2; 691 u32 pll_ioffset : 2;
674 unsigned long pll_pecc_mode : 2; 692 u32 pll_pecc_mode : 2;
675 unsigned long pll_pecc_scon : 2; 693 u32 pll_pecc_scon : 2;
676 unsigned long pll_dactal : 4; 694 u32 pll_dactal : 4;
677 unsigned long pll_cp_clip : 2; 695 u32 pll_cp_clip : 2;
678 unsigned long pll_conf : 3; 696 u32 pll_conf : 3;
679 unsigned long pll_mbctrl : 2; 697 u32 pll_mbctrl : 2;
680 unsigned long pll_ring_off : 1; 698 u32 pll_ring_off : 1;
681} __attribute__((packed)); 699} __attribute__((packed));
682 700
683union pll_cntl_u { 701union pll_cntl_u {
684 unsigned long val : 32; 702 u32 val : 32;
685 struct pll_cntl_t f; 703 struct pll_cntl_t f;
686} __attribute__((packed)); 704} __attribute__((packed));
687 705
688struct sclk_cntl_t { 706struct sclk_cntl_t {
689 unsigned long sclk_src_sel : 2; 707 u32 sclk_src_sel : 2;
690 unsigned long : 2; 708 u32 : 2;
691 unsigned long sclk_post_div_fast : 4; 709 u32 sclk_post_div_fast : 4;
692 unsigned long sclk_clkon_hys : 3; 710 u32 sclk_clkon_hys : 3;
693 unsigned long sclk_post_div_slow : 4; 711 u32 sclk_post_div_slow : 4;
694 unsigned long disp_cg_ok2switch_en : 1; 712 u32 disp_cg_ok2switch_en : 1;
695 unsigned long sclk_force_reg : 1; 713 u32 sclk_force_reg : 1;
696 unsigned long sclk_force_disp : 1; 714 u32 sclk_force_disp : 1;
697 unsigned long sclk_force_mc : 1; 715 u32 sclk_force_mc : 1;
698 unsigned long sclk_force_extmc : 1; 716 u32 sclk_force_extmc : 1;
699 unsigned long sclk_force_cp : 1; 717 u32 sclk_force_cp : 1;
700 unsigned long sclk_force_e2 : 1; 718 u32 sclk_force_e2 : 1;
701 unsigned long sclk_force_e3 : 1; 719 u32 sclk_force_e3 : 1;
702 unsigned long sclk_force_idct : 1; 720 u32 sclk_force_idct : 1;
703 unsigned long sclk_force_bist : 1; 721 u32 sclk_force_bist : 1;
704 unsigned long busy_extend_cp : 1; 722 u32 busy_extend_cp : 1;
705 unsigned long busy_extend_e2 : 1; 723 u32 busy_extend_e2 : 1;
706 unsigned long busy_extend_e3 : 1; 724 u32 busy_extend_e3 : 1;
707 unsigned long busy_extend_idct : 1; 725 u32 busy_extend_idct : 1;
708 unsigned long : 3; 726 u32 : 3;
709} __attribute__((packed)); 727} __attribute__((packed));
710 728
711union sclk_cntl_u { 729union sclk_cntl_u {
712 unsigned long val : 32; 730 u32 val : 32;
713 struct sclk_cntl_t f; 731 struct sclk_cntl_t f;
714} __attribute__((packed)); 732} __attribute__((packed));
715 733
716struct pclk_cntl_t { 734struct pclk_cntl_t {
717 unsigned long pclk_src_sel : 2; 735 u32 pclk_src_sel : 2;
718 unsigned long : 2; 736 u32 : 2;
719 unsigned long pclk_post_div : 4; 737 u32 pclk_post_div : 4;
720 unsigned long : 8; 738 u32 : 8;
721 unsigned long pclk_force_disp : 1; 739 u32 pclk_force_disp : 1;
722 unsigned long : 15; 740 u32 : 15;
723} __attribute__((packed)); 741} __attribute__((packed));
724 742
725union pclk_cntl_u { 743union pclk_cntl_u {
726 unsigned long val : 32; 744 u32 val : 32;
727 struct pclk_cntl_t f; 745 struct pclk_cntl_t f;
728} __attribute__((packed)); 746} __attribute__((packed));
729 747
@@ -735,36 +753,176 @@ union pclk_cntl_u {
735#define TESTCLK_SRC_XTAL 0x06 753#define TESTCLK_SRC_XTAL 0x06
736 754
737struct clk_test_cntl_t { 755struct clk_test_cntl_t {
738 unsigned long testclk_sel : 4; 756 u32 testclk_sel : 4;
739 unsigned long : 3; 757 u32 : 3;
740 unsigned long start_check_freq : 1; 758 u32 start_check_freq : 1;
741 unsigned long tstcount_rst : 1; 759 u32 tstcount_rst : 1;
742 unsigned long : 15; 760 u32 : 15;
743 unsigned long test_count : 8; 761 u32 test_count : 8;
744} __attribute__((packed)); 762} __attribute__((packed));
745 763
746union clk_test_cntl_u { 764union clk_test_cntl_u {
747 unsigned long val : 32; 765 u32 val : 32;
748 struct clk_test_cntl_t f; 766 struct clk_test_cntl_t f;
749} __attribute__((packed)); 767} __attribute__((packed));
750 768
751struct pwrmgt_cntl_t { 769struct pwrmgt_cntl_t {
752 unsigned long pwm_enable : 1; 770 u32 pwm_enable : 1;
753 unsigned long : 1; 771 u32 : 1;
754 unsigned long pwm_mode_req : 2; 772 u32 pwm_mode_req : 2;
755 unsigned long pwm_wakeup_cond : 2; 773 u32 pwm_wakeup_cond : 2;
756 unsigned long pwm_fast_noml_hw_en : 1; 774 u32 pwm_fast_noml_hw_en : 1;
757 unsigned long pwm_noml_fast_hw_en : 1; 775 u32 pwm_noml_fast_hw_en : 1;
758 unsigned long pwm_fast_noml_cond : 4; 776 u32 pwm_fast_noml_cond : 4;
759 unsigned long pwm_noml_fast_cond : 4; 777 u32 pwm_noml_fast_cond : 4;
760 unsigned long pwm_idle_timer : 8; 778 u32 pwm_idle_timer : 8;
761 unsigned long pwm_busy_timer : 8; 779 u32 pwm_busy_timer : 8;
762} __attribute__((packed)); 780} __attribute__((packed));
763 781
764union pwrmgt_cntl_u { 782union pwrmgt_cntl_u {
765 unsigned long val : 32; 783 u32 val : 32;
766 struct pwrmgt_cntl_t f; 784 struct pwrmgt_cntl_t f;
767} __attribute__((packed)); 785} __attribute__((packed));
768 786
787#define SRC_DATATYPE_EQU_DST 3
788
789#define ROP3_SRCCOPY 0xcc
790#define ROP3_PATCOPY 0xf0
791
792#define GMC_BRUSH_SOLID_COLOR 13
793#define GMC_BRUSH_NONE 15
794
795#define DP_SRC_MEM_RECTANGULAR 2
796
797#define DP_OP_ROP 0
798
799struct dp_gui_master_cntl_t {
800 u32 gmc_src_pitch_offset_cntl : 1;
801 u32 gmc_dst_pitch_offset_cntl : 1;
802 u32 gmc_src_clipping : 1;
803 u32 gmc_dst_clipping : 1;
804 u32 gmc_brush_datatype : 4;
805 u32 gmc_dst_datatype : 4;
806 u32 gmc_src_datatype : 3;
807 u32 gmc_byte_pix_order : 1;
808 u32 gmc_default_sel : 1;
809 u32 gmc_rop3 : 8;
810 u32 gmc_dp_src_source : 3;
811 u32 gmc_clr_cmp_fcn_dis : 1;
812 u32 : 1;
813 u32 gmc_wr_msk_dis : 1;
814 u32 gmc_dp_op : 1;
815} __attribute__((packed));
816
817union dp_gui_master_cntl_u {
818 u32 val : 32;
819 struct dp_gui_master_cntl_t f;
820} __attribute__((packed));
821
822struct rbbm_status_t {
823 u32 cmdfifo_avail : 7;
824 u32 : 1;
825 u32 hirq_on_rbb : 1;
826 u32 cprq_on_rbb : 1;
827 u32 cfrq_on_rbb : 1;
828 u32 hirq_in_rtbuf : 1;
829 u32 cprq_in_rtbuf : 1;
830 u32 cfrq_in_rtbuf : 1;
831 u32 cf_pipe_busy : 1;
832 u32 eng_ev_busy : 1;
833 u32 cp_cmdstrm_busy : 1;
834 u32 e2_busy : 1;
835 u32 rb2d_busy : 1;
836 u32 rb3d_busy : 1;
837 u32 se_busy : 1;
838 u32 re_busy : 1;
839 u32 tam_busy : 1;
840 u32 tdm_busy : 1;
841 u32 pb_busy : 1;
842 u32 : 6;
843 u32 gui_active : 1;
844} __attribute__((packed));
845
846union rbbm_status_u {
847 u32 val : 32;
848 struct rbbm_status_t f;
849} __attribute__((packed));
850
851struct dp_datatype_t {
852 u32 dp_dst_datatype : 4;
853 u32 : 4;
854 u32 dp_brush_datatype : 4;
855 u32 dp_src2_type : 1;
856 u32 dp_src2_datatype : 3;
857 u32 dp_src_datatype : 3;
858 u32 : 11;
859 u32 dp_byte_pix_order : 1;
860 u32 : 1;
861} __attribute__((packed));
862
863union dp_datatype_u {
864 u32 val : 32;
865 struct dp_datatype_t f;
866} __attribute__((packed));
867
868struct dp_mix_t {
869 u32 : 8;
870 u32 dp_src_source : 3;
871 u32 dp_src2_source : 3;
872 u32 : 2;
873 u32 dp_rop3 : 8;
874 u32 dp_op : 1;
875 u32 : 7;
876} __attribute__((packed));
877
878union dp_mix_u {
879 u32 val : 32;
880 struct dp_mix_t f;
881} __attribute__((packed));
882
883struct eng_cntl_t {
884 u32 erc_reg_rd_ws : 1;
885 u32 erc_reg_wr_ws : 1;
886 u32 erc_idle_reg_wr : 1;
887 u32 dis_engine_triggers : 1;
888 u32 dis_rop_src_uses_dst_w_h : 1;
889 u32 dis_src_uses_dst_dirmaj : 1;
890 u32 : 6;
891 u32 force_3dclk_when_2dclk : 1;
892 u32 : 19;
893} __attribute__((packed));
894
895union eng_cntl_u {
896 u32 val : 32;
897 struct eng_cntl_t f;
898} __attribute__((packed));
899
900struct dp_cntl_t {
901 u32 dst_x_dir : 1;
902 u32 dst_y_dir : 1;
903 u32 src_x_dir : 1;
904 u32 src_y_dir : 1;
905 u32 dst_major_x : 1;
906 u32 src_major_x : 1;
907 u32 : 26;
908} __attribute__((packed));
909
910union dp_cntl_u {
911 u32 val : 32;
912 struct dp_cntl_t f;
913} __attribute__((packed));
914
915struct dp_cntl_dst_dir_t {
916 u32 : 15;
917 u32 dst_y_dir : 1;
918 u32 : 15;
919 u32 dst_x_dir : 1;
920} __attribute__((packed));
921
922union dp_cntl_dst_dir_u {
923 u32 val : 32;
924 struct dp_cntl_dst_dir_t f;
925} __attribute__((packed));
926
769#endif 927#endif
770 928
diff --git a/fs/Makefile b/fs/Makefile
index 414484ac48c1..2c22e282c777 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -10,7 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
10 ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ 10 ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \
11 attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ 11 attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \
12 seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ 12 seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \
13 ioprio.o pnode.o drop_caches.o splice.o 13 ioprio.o pnode.o drop_caches.o splice.o sync.o
14 14
15obj-$(CONFIG_INOTIFY) += inotify.o 15obj-$(CONFIG_INOTIFY) += inotify.o
16obj-$(CONFIG_EPOLL) += eventpoll.o 16obj-$(CONFIG_EPOLL) += eventpoll.o
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 4e1b849f912f..f3418f7a6e9d 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
17#include <linux/devfs_fs_kernel.h> 17#include <linux/devfs_fs_kernel.h>
18#include <linux/seq_file.h>
18 19
19#include <linux/kobject.h> 20#include <linux/kobject.h>
20#include <linux/kobj_map.h> 21#include <linux/kobj_map.h>
@@ -27,8 +28,6 @@
27 28
28static struct kobj_map *cdev_map; 29static struct kobj_map *cdev_map;
29 30
30#define MAX_PROBE_HASH 255 /* random */
31
32static DEFINE_MUTEX(chrdevs_lock); 31static DEFINE_MUTEX(chrdevs_lock);
33 32
34static struct char_device_struct { 33static struct char_device_struct {
@@ -39,93 +38,29 @@ static struct char_device_struct {
39 char name[64]; 38 char name[64];
40 struct file_operations *fops; 39 struct file_operations *fops;
41 struct cdev *cdev; /* will die */ 40 struct cdev *cdev; /* will die */
42} *chrdevs[MAX_PROBE_HASH]; 41} *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
43 42
44/* index in the above */ 43/* index in the above */
45static inline int major_to_index(int major) 44static inline int major_to_index(int major)
46{ 45{
47 return major % MAX_PROBE_HASH; 46 return major % CHRDEV_MAJOR_HASH_SIZE;
48}
49
50struct chrdev_info {
51 int index;
52 struct char_device_struct *cd;
53};
54
55void *get_next_chrdev(void *dev)
56{
57 struct chrdev_info *info;
58
59 if (dev == NULL) {
60 info = kmalloc(sizeof(*info), GFP_KERNEL);
61 if (!info)
62 goto out;
63 info->index=0;
64 info->cd = chrdevs[info->index];
65 if (info->cd)
66 goto out;
67 } else {
68 info = dev;
69 }
70
71 while (info->index < ARRAY_SIZE(chrdevs)) {
72 if (info->cd)
73 info->cd = info->cd->next;
74 if (info->cd)
75 goto out;
76 /*
77 * No devices on this chain, move to the next
78 */
79 info->index++;
80 info->cd = (info->index < ARRAY_SIZE(chrdevs)) ?
81 chrdevs[info->index] : NULL;
82 if (info->cd)
83 goto out;
84 }
85
86out:
87 return info;
88}
89
90void *acquire_chrdev_list(void)
91{
92 mutex_lock(&chrdevs_lock);
93 return get_next_chrdev(NULL);
94}
95
96void release_chrdev_list(void *dev)
97{
98 mutex_unlock(&chrdevs_lock);
99 kfree(dev);
100} 47}
101 48
49#ifdef CONFIG_PROC_FS
102 50
103int count_chrdev_list(void) 51void chrdev_show(struct seq_file *f, off_t offset)
104{ 52{
105 struct char_device_struct *cd; 53 struct char_device_struct *cd;
106 int i, count;
107
108 count = 0;
109 54
110 for (i = 0; i < ARRAY_SIZE(chrdevs) ; i++) { 55 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
111 for (cd = chrdevs[i]; cd; cd = cd->next) 56 mutex_lock(&chrdevs_lock);
112 count++; 57 for (cd = chrdevs[offset]; cd; cd = cd->next)
58 seq_printf(f, "%3d %s\n", cd->major, cd->name);
59 mutex_unlock(&chrdevs_lock);
113 } 60 }
114
115 return count;
116} 61}
117 62
118int get_chrdev_info(void *dev, int *major, char **name) 63#endif /* CONFIG_PROC_FS */
119{
120 struct chrdev_info *info = dev;
121
122 if (info->cd == NULL)
123 return 1;
124
125 *major = info->cd->major;
126 *name = info->cd->name;
127 return 0;
128}
129 64
130/* 65/*
131 * Register a single major with a specified minor range. 66 * Register a single major with a specified minor range.
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index cb68efba35db..8a2de038882e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,21 @@
1Version 1.42
2------------
3Fix slow oplock break when mounted to different servers at the same time and
4the tids match and we try to find matching fid on wrong server.
5
6Version 1.41
7------------
8Fix NTLMv2 security (can be enabled in /proc/fs/cifs) so customers can
9configure stronger authentication. Fix sfu symlinks so they can
10be followed (not just recognized). Fix wraparound of bcc on
11read responses when buffer size over 64K and also fix wrap of
12max smb buffer size when CIFSMaxBufSize over 64K. Fix oops in
13cifs_user_read and cifs_readpages (when EAGAIN on send of smb
14on socket is returned over and over). Add POSIX (advisory) byte range
15locking support (requires server with newest CIFS UNIX Extensions
16to the protocol implemented). Slow down negprot slightly in port 139
17RFC1001 case to give session_init time on buggy servers.
18
1Version 1.40 19Version 1.40
2------------ 20------------
3Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance 21Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 7384947a0f93..58c77254a23b 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -3,4 +3,4 @@
3# 3#
4obj-$(CONFIG_CIFS) += cifs.o 4obj-$(CONFIG_CIFS) += cifs.o
5 5
6cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o 6cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o
diff --git a/fs/cifs/README b/fs/cifs/README
index b0070d1b149d..b2b4d0803761 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -422,6 +422,13 @@ A partial list of the supported mount options follows:
422 nomapchars Do not translate any of these seven characters (default). 422 nomapchars Do not translate any of these seven characters (default).
423 nocase Request case insensitive path name matching (case 423 nocase Request case insensitive path name matching (case
424 sensitive is the default if the server suports it). 424 sensitive is the default if the server suports it).
425 posixpaths If CIFS Unix extensions are supported, attempt to
426 negotiate posix path name support which allows certain
427 characters forbidden in typical CIFS filenames, without
428 requiring remapping. (default)
429 noposixpaths If CIFS Unix extensions are supported, do not request
430 posix path name support (this may cause servers to
431 reject creatingfile with certain reserved characters).
425 nobrl Do not send byte range lock requests to the server. 432 nobrl Do not send byte range lock requests to the server.
426 This is necessary for certain applications that break 433 This is necessary for certain applications that break
427 with cifs style mandatory byte range locks (and most 434 with cifs style mandatory byte range locks (and most
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a2c24858d40f..e7d63737e651 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/cifsencrypt.c 2 * fs/cifs/cifsencrypt.c
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2005 4 * Copyright (C) International Business Machines Corp., 2005,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -36,7 +36,8 @@
36extern void mdfour(unsigned char *out, unsigned char *in, int n); 36extern void mdfour(unsigned char *out, unsigned char *in, int n);
37extern void E_md4hash(const unsigned char *passwd, unsigned char *p16); 37extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
38 38
39static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, const char * key, char * signature) 39static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
40 const char * key, char * signature)
40{ 41{
41 struct MD5Context context; 42 struct MD5Context context;
42 43
@@ -56,9 +57,6 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
56 int rc = 0; 57 int rc = 0;
57 char smb_signature[20]; 58 char smb_signature[20];
58 59
59 /* BB remember to initialize sequence number elsewhere and initialize mac_signing key elsewhere BB */
60 /* BB remember to add code to save expected sequence number in midQ entry BB */
61
62 if((cifs_pdu == NULL) || (server == NULL)) 60 if((cifs_pdu == NULL) || (server == NULL))
63 return -EINVAL; 61 return -EINVAL;
64 62
@@ -85,20 +83,33 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
85static int cifs_calc_signature2(const struct kvec * iov, int n_vec, 83static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
86 const char * key, char * signature) 84 const char * key, char * signature)
87{ 85{
88 struct MD5Context context; 86 struct MD5Context context;
89 87 int i;
90 if((iov == NULL) || (signature == NULL))
91 return -EINVAL;
92 88
93 MD5Init(&context); 89 if((iov == NULL) || (signature == NULL))
94 MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16); 90 return -EINVAL;
95 91
96/* MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); */ /* BB FIXME BB */ 92 MD5Init(&context);
93 MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
94 for(i=0;i<n_vec;i++) {
95 if(iov[i].iov_base == NULL) {
96 cERROR(1,("null iovec entry"));
97 return -EIO;
98 } else if(iov[i].iov_len == 0)
99 break; /* bail out if we are sent nothing to sign */
100 /* The first entry includes a length field (which does not get
101 signed that occupies the first 4 bytes before the header */
102 if(i==0) {
103 if (iov[0].iov_len <= 8 ) /* cmd field at offset 9 */
104 break; /* nothing to sign or corrupt header */
105 MD5Update(&context,iov[0].iov_base+4, iov[0].iov_len-4);
106 } else
107 MD5Update(&context,iov[i].iov_base, iov[i].iov_len);
108 }
97 109
98 MD5Final(signature,&context); 110 MD5Final(signature,&context);
99 111
100 return -EOPNOTSUPP; 112 return 0;
101/* return 0; */
102} 113}
103 114
104 115
@@ -259,4 +270,5 @@ void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_respon
259/* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */ 270/* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */
260 271
261 hmac_md5_final(v2_session_response,&context); 272 hmac_md5_final(v2_session_response,&context);
273 cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */
262} 274}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bbc544857bc..d4b713e5affb 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -93,13 +93,10 @@ cifs_read_super(struct super_block *sb, void *data,
93 int rc = 0; 93 int rc = 0;
94 94
95 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */ 95 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
96 sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL); 96 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
97 cifs_sb = CIFS_SB(sb); 97 cifs_sb = CIFS_SB(sb);
98 if(cifs_sb == NULL) 98 if(cifs_sb == NULL)
99 return -ENOMEM; 99 return -ENOMEM;
100 else
101 memset(cifs_sb,0,sizeof(struct cifs_sb_info));
102
103 100
104 rc = cifs_mount(sb, cifs_sb, data, devname); 101 rc = cifs_mount(sb, cifs_sb, data, devname);
105 102
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 74f405ae4da3..4e829dc672a6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -99,5 +99,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
99extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 99extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
100extern int cifs_ioctl (struct inode * inode, struct file * filep, 100extern int cifs_ioctl (struct inode * inode, struct file * filep,
101 unsigned int command, unsigned long arg); 101 unsigned int command, unsigned long arg);
102#define CIFS_VERSION "1.40" 102#define CIFS_VERSION "1.42"
103#endif /* _CIFSFS_H */ 103#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 7bed27601ce5..006eb33bff5f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/cifsglob.h 2 * fs/cifs/cifsglob.h
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2005 4 * Copyright (C) International Business Machines Corp., 2002,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -430,6 +430,15 @@ struct dir_notify_req {
430#define CIFS_LARGE_BUFFER 2 430#define CIFS_LARGE_BUFFER 2
431#define CIFS_IOVEC 4 /* array of response buffers */ 431#define CIFS_IOVEC 4 /* array of response buffers */
432 432
433/* Type of session setup needed */
434#define CIFS_PLAINTEXT 0
435#define CIFS_LANMAN 1
436#define CIFS_NTLM 2
437#define CIFS_NTLMSSP_NEG 3
438#define CIFS_NTLMSSP_AUTH 4
439#define CIFS_SPNEGO_INIT 5
440#define CIFS_SPNEGO_TARG 6
441
433/* 442/*
434 ***************************************************************** 443 *****************************************************************
435 * All constants go here 444 * All constants go here
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index cc2471094ca5..b2233ac05bd2 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -859,7 +859,10 @@ typedef struct smb_com_lock_req {
859 LOCKING_ANDX_RANGE Locks[1]; 859 LOCKING_ANDX_RANGE Locks[1];
860} __attribute__((packed)) LOCK_REQ; 860} __attribute__((packed)) LOCK_REQ;
861 861
862 862/* lock type */
863#define CIFS_RDLCK 0
864#define CIFS_WRLCK 1
865#define CIFS_UNLCK 2
863typedef struct cifs_posix_lock { 866typedef struct cifs_posix_lock {
864 __le16 lock_type; /* 0 = Read, 1 = Write, 2 = Unlock */ 867 __le16 lock_type; /* 0 = Read, 1 = Write, 2 = Unlock */
865 __le16 lock_flags; /* 1 = Wait (only valid for setlock) */ 868 __le16 lock_flags; /* 1 = Wait (only valid for setlock) */
@@ -1786,7 +1789,13 @@ typedef struct {
1786#define CIFS_UNIX_POSIX_ACL_CAP 0x00000002 /* support getfacl/setfacl */ 1789#define CIFS_UNIX_POSIX_ACL_CAP 0x00000002 /* support getfacl/setfacl */
1787#define CIFS_UNIX_XATTR_CAP 0x00000004 /* support new namespace */ 1790#define CIFS_UNIX_XATTR_CAP 0x00000004 /* support new namespace */
1788#define CIFS_UNIX_EXTATTR_CAP 0x00000008 /* support chattr/chflag */ 1791#define CIFS_UNIX_EXTATTR_CAP 0x00000008 /* support chattr/chflag */
1789#define CIFS_UNIX_POSIX_PATHNAMES_CAP 0x00000010 /* Use POSIX pathnames on the wire. */ 1792#define CIFS_UNIX_POSIX_PATHNAMES_CAP 0x00000010 /* Allow POSIX path chars */
1793#ifdef CONFIG_CIFS_POSIX
1794#define CIFS_UNIX_CAP_MASK 0x0000001b
1795#else
1796#define CIFS_UNIX_CAP_MASK 0x00000013
1797#endif /* CONFIG_CIFS_POSIX */
1798
1790 1799
1791#define CIFS_POSIX_EXTENSIONS 0x00000010 /* support for new QFSInfo */ 1800#define CIFS_POSIX_EXTENSIONS 0x00000010 /* support for new QFSInfo */
1792 1801
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7b25463d3c14..2879ba343ca7 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/cifsproto.h 2 * fs/cifs/cifsproto.h
3 * 3 *
4 * Copyright (c) International Business Machines Corp., 2002,2005 4 * Copyright (c) International Business Machines Corp., 2002,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -64,6 +64,14 @@ extern int map_smb_to_linux_error(struct smb_hdr *smb);
64extern void header_assemble(struct smb_hdr *, char /* command */ , 64extern void header_assemble(struct smb_hdr *, char /* command */ ,
65 const struct cifsTconInfo *, int /* length of 65 const struct cifsTconInfo *, int /* length of
66 fixed section (word count) in two byte units */); 66 fixed section (word count) in two byte units */);
67#ifdef CONFIG_CIFS_EXPERIMENTAL
68extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
69 struct cifsSesInfo *ses,
70 void ** request_buf);
71extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
72 const int stage, int * pNTLMv2_flg,
73 const struct nls_table *nls_cp);
74#endif
67extern __u16 GetNextMid(struct TCP_Server_Info *server); 75extern __u16 GetNextMid(struct TCP_Server_Info *server);
68extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16, 76extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
69 struct cifsTconInfo *); 77 struct cifsTconInfo *);
@@ -257,7 +265,10 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
257 const __u64 offset, const __u32 numUnlock, 265 const __u64 offset, const __u32 numUnlock,
258 const __u32 numLock, const __u8 lockType, 266 const __u32 numLock, const __u8 lockType,
259 const int waitFlag); 267 const int waitFlag);
260 268extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
269 const __u16 smb_file_id, const int get_flag,
270 const __u64 len, const __u64 offset,
271 const __u16 lock_type, const int waitFlag);
261extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon); 272extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
262extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses); 273extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
263 274
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a243fe2792d5..d705500aa283 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/cifssmb.c 2 * fs/cifs/cifssmb.c
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2005 4 * Copyright (C) International Business Machines Corp., 2002,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * Contains the routines for constructing the SMB PDUs themselves 7 * Contains the routines for constructing the SMB PDUs themselves
@@ -186,7 +186,35 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
186 cifs_stats_inc(&tcon->num_smbs_sent); 186 cifs_stats_inc(&tcon->num_smbs_sent);
187 187
188 return rc; 188 return rc;
189} 189}
190
191#ifdef CONFIG_CIFS_EXPERIMENTAL
192int
193small_smb_init_no_tc(const int smb_command, const int wct,
194 struct cifsSesInfo *ses, void **request_buf)
195{
196 int rc;
197 struct smb_hdr * buffer;
198
199 rc = small_smb_init(smb_command, wct, NULL, request_buf);
200 if(rc)
201 return rc;
202
203 buffer = (struct smb_hdr *)*request_buf;
204 buffer->Mid = GetNextMid(ses->server);
205 if (ses->capabilities & CAP_UNICODE)
206 buffer->Flags2 |= SMBFLG2_UNICODE;
207 if (ses->capabilities & CAP_STATUS32)
208 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
209
210 /* uid, tid can stay at zero as set in header assemble */
211
212 /* BB add support for turning on the signing when
213 this function is used after 1st of session setup requests */
214
215 return rc;
216}
217#endif /* CONFIG_CIFS_EXPERIMENTAL */
190 218
191/* If the return code is zero, this function must fill in request_buf pointer */ 219/* If the return code is zero, this function must fill in request_buf pointer */
192static int 220static int
@@ -1042,7 +1070,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
1042 } 1070 }
1043 } 1071 }
1044 1072
1045 cifs_small_buf_release(pSMB); 1073/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
1046 if(*buf) { 1074 if(*buf) {
1047 if(resp_buf_type == CIFS_SMALL_BUFFER) 1075 if(resp_buf_type == CIFS_SMALL_BUFFER)
1048 cifs_small_buf_release(iov[0].iov_base); 1076 cifs_small_buf_release(iov[0].iov_base);
@@ -1246,7 +1274,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1246 *nbytes += le16_to_cpu(pSMBr->Count); 1274 *nbytes += le16_to_cpu(pSMBr->Count);
1247 } 1275 }
1248 1276
1249 cifs_small_buf_release(pSMB); 1277/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
1250 if(resp_buf_type == CIFS_SMALL_BUFFER) 1278 if(resp_buf_type == CIFS_SMALL_BUFFER)
1251 cifs_small_buf_release(iov[0].iov_base); 1279 cifs_small_buf_release(iov[0].iov_base);
1252 else if(resp_buf_type == CIFS_LARGE_BUFFER) 1280 else if(resp_buf_type == CIFS_LARGE_BUFFER)
@@ -1325,6 +1353,85 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
1325} 1353}
1326 1354
1327int 1355int
1356CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1357 const __u16 smb_file_id, const int get_flag, const __u64 len,
1358 const __u64 lkoffset, const __u16 lock_type, const int waitFlag)
1359{
1360 struct smb_com_transaction2_sfi_req *pSMB = NULL;
1361 struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
1362 char *data_offset;
1363 struct cifs_posix_lock *parm_data;
1364 int rc = 0;
1365 int bytes_returned = 0;
1366 __u16 params, param_offset, offset, byte_count, count;
1367
1368 cFYI(1, ("Posix Lock"));
1369 rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
1370
1371 if (rc)
1372 return rc;
1373
1374 pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
1375
1376 params = 6;
1377 pSMB->MaxSetupCount = 0;
1378 pSMB->Reserved = 0;
1379 pSMB->Flags = 0;
1380 pSMB->Timeout = 0;
1381 pSMB->Reserved2 = 0;
1382 param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
1383 offset = param_offset + params;
1384
1385 data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
1386
1387 count = sizeof(struct cifs_posix_lock);
1388 pSMB->MaxParameterCount = cpu_to_le16(2);
1389 pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
1390 pSMB->SetupCount = 1;
1391 pSMB->Reserved3 = 0;
1392 if(get_flag)
1393 pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
1394 else
1395 pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
1396 byte_count = 3 /* pad */ + params + count;
1397 pSMB->DataCount = cpu_to_le16(count);
1398 pSMB->ParameterCount = cpu_to_le16(params);
1399 pSMB->TotalDataCount = pSMB->DataCount;
1400 pSMB->TotalParameterCount = pSMB->ParameterCount;
1401 pSMB->ParameterOffset = cpu_to_le16(param_offset);
1402 parm_data = (struct cifs_posix_lock *)
1403 (((char *) &pSMB->hdr.Protocol) + offset);
1404
1405 parm_data->lock_type = cpu_to_le16(lock_type);
1406 if(waitFlag)
1407 parm_data->lock_flags = 1;
1408 parm_data->pid = cpu_to_le32(current->tgid);
1409 parm_data->start = lkoffset;
1410 parm_data->length = len; /* normalize negative numbers */
1411
1412 pSMB->DataOffset = cpu_to_le16(offset);
1413 pSMB->Fid = smb_file_id;
1414 pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK);
1415 pSMB->Reserved4 = 0;
1416 pSMB->hdr.smb_buf_length += byte_count;
1417 pSMB->ByteCount = cpu_to_le16(byte_count);
1418 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
1419 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
1420 if (rc) {
1421 cFYI(1, ("Send error in Posix Lock = %d", rc));
1422 }
1423
1424 if (pSMB)
1425 cifs_small_buf_release(pSMB);
1426
1427 /* Note: On -EAGAIN error only caller can retry on handle based calls
1428 since file handle passed in no longer valid */
1429
1430 return rc;
1431}
1432
1433
1434int
1328CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) 1435CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
1329{ 1436{
1330 int rc = 0; 1437 int rc = 0;
@@ -2578,7 +2685,7 @@ qsec_out:
2578 cifs_small_buf_release(iov[0].iov_base); 2685 cifs_small_buf_release(iov[0].iov_base);
2579 else if(buf_type == CIFS_LARGE_BUFFER) 2686 else if(buf_type == CIFS_LARGE_BUFFER)
2580 cifs_buf_release(iov[0].iov_base); 2687 cifs_buf_release(iov[0].iov_base);
2581 cifs_small_buf_release(pSMB); 2688/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
2582 return rc; 2689 return rc;
2583} 2690}
2584 2691
@@ -2954,7 +3061,8 @@ findFirstRetry:
2954 pSMB->TotalParameterCount = cpu_to_le16(params); 3061 pSMB->TotalParameterCount = cpu_to_le16(params);
2955 pSMB->ParameterCount = pSMB->TotalParameterCount; 3062 pSMB->ParameterCount = pSMB->TotalParameterCount;
2956 pSMB->ParameterOffset = cpu_to_le16( 3063 pSMB->ParameterOffset = cpu_to_le16(
2957 offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4); 3064 offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes)
3065 - 4);
2958 pSMB->DataCount = 0; 3066 pSMB->DataCount = 0;
2959 pSMB->DataOffset = 0; 3067 pSMB->DataOffset = 0;
2960 pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */ 3068 pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */
@@ -2977,12 +3085,12 @@ findFirstRetry:
2977 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 3085 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
2978 cifs_stats_inc(&tcon->num_ffirst); 3086 cifs_stats_inc(&tcon->num_ffirst);
2979 3087
2980 if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */ 3088 if (rc) {/* BB add logic to retry regular search if Unix search
3089 rejected unexpectedly by server */
2981 /* BB Add code to handle unsupported level rc */ 3090 /* BB Add code to handle unsupported level rc */
2982 cFYI(1, ("Error in FindFirst = %d", rc)); 3091 cFYI(1, ("Error in FindFirst = %d", rc));
2983 3092
2984 if (pSMB) 3093 cifs_buf_release(pSMB);
2985 cifs_buf_release(pSMB);
2986 3094
2987 /* BB eventually could optimize out free and realloc of buf */ 3095 /* BB eventually could optimize out free and realloc of buf */
2988 /* for this case */ 3096 /* for this case */
@@ -2998,6 +3106,7 @@ findFirstRetry:
2998 psrch_inf->unicode = FALSE; 3106 psrch_inf->unicode = FALSE;
2999 3107
3000 psrch_inf->ntwrk_buf_start = (char *)pSMBr; 3108 psrch_inf->ntwrk_buf_start = (char *)pSMBr;
3109 psrch_inf->smallBuf = 0;
3001 psrch_inf->srch_entries_start = 3110 psrch_inf->srch_entries_start =
3002 (char *) &pSMBr->hdr.Protocol + 3111 (char *) &pSMBr->hdr.Protocol +
3003 le16_to_cpu(pSMBr->t2.DataOffset); 3112 le16_to_cpu(pSMBr->t2.DataOffset);
@@ -3118,9 +3227,14 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
3118 parms = (T2_FNEXT_RSP_PARMS *)response_data; 3227 parms = (T2_FNEXT_RSP_PARMS *)response_data;
3119 response_data = (char *)&pSMBr->hdr.Protocol + 3228 response_data = (char *)&pSMBr->hdr.Protocol +
3120 le16_to_cpu(pSMBr->t2.DataOffset); 3229 le16_to_cpu(pSMBr->t2.DataOffset);
3121 cifs_buf_release(psrch_inf->ntwrk_buf_start); 3230 if(psrch_inf->smallBuf)
3231 cifs_small_buf_release(
3232 psrch_inf->ntwrk_buf_start);
3233 else
3234 cifs_buf_release(psrch_inf->ntwrk_buf_start);
3122 psrch_inf->srch_entries_start = response_data; 3235 psrch_inf->srch_entries_start = response_data;
3123 psrch_inf->ntwrk_buf_start = (char *)pSMB; 3236 psrch_inf->ntwrk_buf_start = (char *)pSMB;
3237 psrch_inf->smallBuf = 0;
3124 if(parms->EndofSearch) 3238 if(parms->EndofSearch)
3125 psrch_inf->endOfSearch = TRUE; 3239 psrch_inf->endOfSearch = TRUE;
3126 else 3240 else
@@ -3834,6 +3948,7 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
3834 3948
3835 cFYI(1, ("In SETFSUnixInfo")); 3949 cFYI(1, ("In SETFSUnixInfo"));
3836SETFSUnixRetry: 3950SETFSUnixRetry:
3951 /* BB switch to small buf init to save memory */
3837 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 3952 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
3838 (void **) &pSMBr); 3953 (void **) &pSMBr);
3839 if (rc) 3954 if (rc)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2a0c1f4ca0ae..0b86d5ca9014 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/connect.c 2 * fs/cifs/connect.c
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2005 4 * Copyright (C) International Business Machines Corp., 2002,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -564,7 +564,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
564 564
565 565
566 dump_smb(smb_buffer, length); 566 dump_smb(smb_buffer, length);
567 if (checkSMB (smb_buffer, smb_buffer->Mid, total_read+4)) { 567 if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
568 cifs_dump_mem("Bad SMB: ", smb_buffer, 48); 568 cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
569 continue; 569 continue;
570 } 570 }
@@ -1476,6 +1476,14 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
1476 rc = smb_send(*csocket, smb_buf, 0x44, 1476 rc = smb_send(*csocket, smb_buf, 0x44,
1477 (struct sockaddr *)psin_server); 1477 (struct sockaddr *)psin_server);
1478 kfree(ses_init_buf); 1478 kfree(ses_init_buf);
1479 msleep(1); /* RFC1001 layer in at least one server
1480 requires very short break before negprot
1481 presumably because not expecting negprot
1482 to follow so fast. This is a simple
1483 solution that works without
1484 complicating the code and causes no
1485 significant slowing down on mount
1486 for everyone else */
1479 } 1487 }
1480 /* else the negprot may still work without this 1488 /* else the negprot may still work without this
1481 even though malloc failed */ 1489 even though malloc failed */
@@ -1920,27 +1928,34 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1920 cifs_sb->tcon = tcon; 1928 cifs_sb->tcon = tcon;
1921 tcon->ses = pSesInfo; 1929 tcon->ses = pSesInfo;
1922 1930
1923 /* do not care if following two calls succeed - informational only */ 1931 /* do not care if following two calls succeed - informational */
1924 CIFSSMBQFSDeviceInfo(xid, tcon); 1932 CIFSSMBQFSDeviceInfo(xid, tcon);
1925 CIFSSMBQFSAttributeInfo(xid, tcon); 1933 CIFSSMBQFSAttributeInfo(xid, tcon);
1934
1926 if (tcon->ses->capabilities & CAP_UNIX) { 1935 if (tcon->ses->capabilities & CAP_UNIX) {
1927 if(!CIFSSMBQFSUnixInfo(xid, tcon)) { 1936 if(!CIFSSMBQFSUnixInfo(xid, tcon)) {
1928 if(!volume_info.no_psx_acl) { 1937 __u64 cap =
1929 if(CIFS_UNIX_POSIX_ACL_CAP & 1938 le64_to_cpu(tcon->fsUnixInfo.Capability);
1930 le64_to_cpu(tcon->fsUnixInfo.Capability)) 1939 cap &= CIFS_UNIX_CAP_MASK;
1931 cFYI(1,("server negotiated posix acl support")); 1940 if(volume_info.no_psx_acl)
1932 sb->s_flags |= MS_POSIXACL; 1941 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
1942 else if(CIFS_UNIX_POSIX_ACL_CAP & cap) {
1943 cFYI(1,("negotiated posix acl support"));
1944 sb->s_flags |= MS_POSIXACL;
1933 } 1945 }
1934 1946
1935 /* Try and negotiate POSIX pathnames if we can. */ 1947 if(volume_info.posix_paths == 0)
1936 if (volume_info.posix_paths && (CIFS_UNIX_POSIX_PATHNAMES_CAP & 1948 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
1937 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1949 else if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
1938 if (!CIFSSMBSetFSUnixInfo(xid, tcon, CIFS_UNIX_POSIX_PATHNAMES_CAP)) { 1950 cFYI(1,("negotiate posix pathnames"));
1939 cFYI(1,("negotiated posix pathnames support")); 1951 cifs_sb->mnt_cifs_flags |=
1940 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; 1952 CIFS_MOUNT_POSIX_PATHS;
1941 } else { 1953 }
1942 cFYI(1,("posix pathnames support requested but not supported")); 1954
1943 } 1955 cFYI(1,("Negotiate caps 0x%x",(int)cap));
1956
1957 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
1958 cFYI(1,("setting capabilities failed"));
1944 } 1959 }
1945 } 1960 }
1946 } 1961 }
@@ -2278,6 +2293,8 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2278 smb_buffer->Mid = GetNextMid(ses->server); 2293 smb_buffer->Mid = GetNextMid(ses->server);
2279 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; 2294 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
2280 pSMB->req.AndXCommand = 0xFF; 2295 pSMB->req.AndXCommand = 0xFF;
2296 if(ses->server->maxBuf > 64*1024)
2297 ses->server->maxBuf = (64*1023);
2281 pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf); 2298 pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
2282 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); 2299 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
2283 2300
@@ -2525,7 +2542,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2525 __u32 negotiate_flags, capabilities; 2542 __u32 negotiate_flags, capabilities;
2526 __u16 count; 2543 __u16 count;
2527 2544
2528 cFYI(1, ("In NTLMSSP sesssetup (negotiate) ")); 2545 cFYI(1, ("In NTLMSSP sesssetup (negotiate)"));
2529 if(ses == NULL) 2546 if(ses == NULL)
2530 return -EINVAL; 2547 return -EINVAL;
2531 domain = ses->domainName; 2548 domain = ses->domainName;
@@ -2575,7 +2592,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2575 SecurityBlob->MessageType = NtLmNegotiate; 2592 SecurityBlob->MessageType = NtLmNegotiate;
2576 negotiate_flags = 2593 negotiate_flags =
2577 NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_OEM | 2594 NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_OEM |
2578 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM | 0x80000000 | 2595 NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM |
2596 NTLMSSP_NEGOTIATE_56 |
2579 /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128; 2597 /* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
2580 if(sign_CIFS_PDUs) 2598 if(sign_CIFS_PDUs)
2581 negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN; 2599 negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -2588,26 +2606,11 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2588 SecurityBlob->WorkstationName.Length = 0; 2606 SecurityBlob->WorkstationName.Length = 0;
2589 SecurityBlob->WorkstationName.MaximumLength = 0; 2607 SecurityBlob->WorkstationName.MaximumLength = 0;
2590 2608
2591 if (domain == NULL) { 2609 /* Domain not sent on first Sesssetup in NTLMSSP, instead it is sent
2592 SecurityBlob->DomainName.Buffer = 0; 2610 along with username on auth request (ie the response to challenge) */
2593 SecurityBlob->DomainName.Length = 0; 2611 SecurityBlob->DomainName.Buffer = 0;
2594 SecurityBlob->DomainName.MaximumLength = 0; 2612 SecurityBlob->DomainName.Length = 0;
2595 } else { 2613 SecurityBlob->DomainName.MaximumLength = 0;
2596 __u16 len;
2597 negotiate_flags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED;
2598 strncpy(bcc_ptr, domain, 63);
2599 len = strnlen(domain, 64);
2600 SecurityBlob->DomainName.MaximumLength =
2601 cpu_to_le16(len);
2602 SecurityBlob->DomainName.Buffer =
2603 cpu_to_le32((long) &SecurityBlob->
2604 DomainString -
2605 (long) &SecurityBlob->Signature);
2606 bcc_ptr += len;
2607 SecurityBlobLength += len;
2608 SecurityBlob->DomainName.Length =
2609 cpu_to_le16(len);
2610 }
2611 if (ses->capabilities & CAP_UNICODE) { 2614 if (ses->capabilities & CAP_UNICODE) {
2612 if ((long) bcc_ptr % 2) { 2615 if ((long) bcc_ptr % 2) {
2613 *bcc_ptr = 0; 2616 *bcc_ptr = 0;
@@ -2677,7 +2680,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2677 SecurityBlob2->MessageType)); 2680 SecurityBlob2->MessageType));
2678 } else if (ses) { 2681 } else if (ses) {
2679 ses->Suid = smb_buffer_response->Uid; /* UID left in le format */ 2682 ses->Suid = smb_buffer_response->Uid; /* UID left in le format */
2680 cFYI(1, ("UID = %d ", ses->Suid)); 2683 cFYI(1, ("UID = %d", ses->Suid));
2681 if ((pSMBr->resp.hdr.WordCount == 3) 2684 if ((pSMBr->resp.hdr.WordCount == 3)
2682 || ((pSMBr->resp.hdr.WordCount == 4) 2685 || ((pSMBr->resp.hdr.WordCount == 4)
2683 && (blob_len < 2686 && (blob_len <
@@ -2685,17 +2688,17 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2685 2688
2686 if (pSMBr->resp.hdr.WordCount == 4) { 2689 if (pSMBr->resp.hdr.WordCount == 4) {
2687 bcc_ptr += blob_len; 2690 bcc_ptr += blob_len;
2688 cFYI(1, 2691 cFYI(1, ("Security Blob Length %d",
2689 ("Security Blob Length %d ",
2690 blob_len)); 2692 blob_len));
2691 } 2693 }
2692 2694
2693 cFYI(1, ("NTLMSSP Challenge rcvd ")); 2695 cFYI(1, ("NTLMSSP Challenge rcvd"));
2694 2696
2695 memcpy(ses->server->cryptKey, 2697 memcpy(ses->server->cryptKey,
2696 SecurityBlob2->Challenge, 2698 SecurityBlob2->Challenge,
2697 CIFS_CRYPTO_KEY_SIZE); 2699 CIFS_CRYPTO_KEY_SIZE);
2698 if(SecurityBlob2->NegotiateFlags & cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2)) 2700 if(SecurityBlob2->NegotiateFlags &
2701 cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
2699 *pNTLMv2_flag = TRUE; 2702 *pNTLMv2_flag = TRUE;
2700 2703
2701 if((SecurityBlob2->NegotiateFlags & 2704 if((SecurityBlob2->NegotiateFlags &
@@ -2818,7 +2821,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2818 bcc_ptr++; 2821 bcc_ptr++;
2819 } else 2822 } else
2820 cFYI(1, 2823 cFYI(1,
2821 ("Variable field of length %d extends beyond end of smb ", 2824 ("Variable field of length %d extends beyond end of smb",
2822 len)); 2825 len));
2823 } 2826 }
2824 } else { 2827 } else {
@@ -2830,7 +2833,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2830 } 2833 }
2831 } else { 2834 } else {
2832 cERROR(1, 2835 cERROR(1,
2833 (" Invalid Word count %d: ", 2836 (" Invalid Word count %d:",
2834 smb_buffer_response->WordCount)); 2837 smb_buffer_response->WordCount));
2835 rc = -EIO; 2838 rc = -EIO;
2836 } 2839 }
@@ -3447,7 +3450,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
3447 if (extended_security 3450 if (extended_security
3448 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) 3451 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
3449 && (pSesInfo->server->secType == NTLMSSP)) { 3452 && (pSesInfo->server->secType == NTLMSSP)) {
3450 cFYI(1, ("New style sesssetup ")); 3453 cFYI(1, ("New style sesssetup"));
3451 rc = CIFSSpnegoSessSetup(xid, pSesInfo, 3454 rc = CIFSSpnegoSessSetup(xid, pSesInfo,
3452 NULL /* security blob */, 3455 NULL /* security blob */,
3453 0 /* blob length */, 3456 0 /* blob length */,
@@ -3455,7 +3458,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
3455 } else if (extended_security 3458 } else if (extended_security
3456 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY) 3459 && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
3457 && (pSesInfo->server->secType == RawNTLMSSP)) { 3460 && (pSesInfo->server->secType == RawNTLMSSP)) {
3458 cFYI(1, ("NTLMSSP sesssetup ")); 3461 cFYI(1, ("NTLMSSP sesssetup"));
3459 rc = CIFSNTLMSSPNegotiateSessSetup(xid, 3462 rc = CIFSNTLMSSPNegotiateSessSetup(xid,
3460 pSesInfo, 3463 pSesInfo,
3461 &ntlmv2_flag, 3464 &ntlmv2_flag,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 632561dd9c50..1d0ca3eaaca5 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -48,13 +48,14 @@ build_path_from_dentry(struct dentry *direntry)
48 struct dentry *temp; 48 struct dentry *temp;
49 int namelen = 0; 49 int namelen = 0;
50 char *full_path; 50 char *full_path;
51 char dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb)); 51 char dirsep;
52 52
53 if(direntry == NULL) 53 if(direntry == NULL)
54 return NULL; /* not much we can do if dentry is freed and 54 return NULL; /* not much we can do if dentry is freed and
55 we need to reopen the file after it was closed implicitly 55 we need to reopen the file after it was closed implicitly
56 when the server crashed */ 56 when the server crashed */
57 57
58 dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
58cifs_bp_rename_retry: 59cifs_bp_rename_retry:
59 for (temp = direntry; !IS_ROOT(temp);) { 60 for (temp = direntry; !IS_ROOT(temp);) {
60 namelen += (1 + temp->d_name.len); 61 namelen += (1 + temp->d_name.len);
@@ -255,12 +256,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
255 CIFSSMBClose(xid, pTcon, fileHandle); 256 CIFSSMBClose(xid, pTcon, fileHandle);
256 } else if(newinode) { 257 } else if(newinode) {
257 pCifsFile = 258 pCifsFile =
258 kmalloc(sizeof (struct cifsFileInfo), GFP_KERNEL); 259 kzalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
259 260
260 if(pCifsFile == NULL) 261 if(pCifsFile == NULL)
261 goto cifs_create_out; 262 goto cifs_create_out;
262 memset((char *)pCifsFile, 0,
263 sizeof (struct cifsFileInfo));
264 pCifsFile->netfid = fileHandle; 263 pCifsFile->netfid = fileHandle;
265 pCifsFile->pid = current->tgid; 264 pCifsFile->pid = current->tgid;
266 pCifsFile->pInode = newinode; 265 pCifsFile->pInode = newinode;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fb49aef1f2ec..5c497c529772 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -555,7 +555,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
555 if (ptmp) { 555 if (ptmp) {
556 cFYI(1, ("closedir free smb buf in srch struct")); 556 cFYI(1, ("closedir free smb buf in srch struct"));
557 pCFileStruct->srch_inf.ntwrk_buf_start = NULL; 557 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
558 cifs_buf_release(ptmp); 558 if(pCFileStruct->srch_inf.smallBuf)
559 cifs_small_buf_release(ptmp);
560 else
561 cifs_buf_release(ptmp);
559 } 562 }
560 ptmp = pCFileStruct->search_resume_name; 563 ptmp = pCFileStruct->search_resume_name;
561 if (ptmp) { 564 if (ptmp) {
@@ -574,13 +577,14 @@ int cifs_closedir(struct inode *inode, struct file *file)
574int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) 577int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
575{ 578{
576 int rc, xid; 579 int rc, xid;
577 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
578 __u32 numLock = 0; 580 __u32 numLock = 0;
579 __u32 numUnlock = 0; 581 __u32 numUnlock = 0;
580 __u64 length; 582 __u64 length;
581 int wait_flag = FALSE; 583 int wait_flag = FALSE;
582 struct cifs_sb_info *cifs_sb; 584 struct cifs_sb_info *cifs_sb;
583 struct cifsTconInfo *pTcon; 585 struct cifsTconInfo *pTcon;
586 __u16 netfid;
587 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
584 588
585 length = 1 + pfLock->fl_end - pfLock->fl_start; 589 length = 1 + pfLock->fl_end - pfLock->fl_start;
586 rc = -EACCES; 590 rc = -EACCES;
@@ -592,11 +596,11 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
592 pfLock->fl_end)); 596 pfLock->fl_end));
593 597
594 if (pfLock->fl_flags & FL_POSIX) 598 if (pfLock->fl_flags & FL_POSIX)
595 cFYI(1, ("Posix ")); 599 cFYI(1, ("Posix"));
596 if (pfLock->fl_flags & FL_FLOCK) 600 if (pfLock->fl_flags & FL_FLOCK)
597 cFYI(1, ("Flock ")); 601 cFYI(1, ("Flock"));
598 if (pfLock->fl_flags & FL_SLEEP) { 602 if (pfLock->fl_flags & FL_SLEEP) {
599 cFYI(1, ("Blocking lock ")); 603 cFYI(1, ("Blocking lock"));
600 wait_flag = TRUE; 604 wait_flag = TRUE;
601 } 605 }
602 if (pfLock->fl_flags & FL_ACCESS) 606 if (pfLock->fl_flags & FL_ACCESS)
@@ -612,21 +616,23 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
612 cFYI(1, ("F_WRLCK ")); 616 cFYI(1, ("F_WRLCK "));
613 numLock = 1; 617 numLock = 1;
614 } else if (pfLock->fl_type == F_UNLCK) { 618 } else if (pfLock->fl_type == F_UNLCK) {
615 cFYI(1, ("F_UNLCK ")); 619 cFYI(1, ("F_UNLCK"));
616 numUnlock = 1; 620 numUnlock = 1;
621 /* Check if unlock includes more than
622 one lock range */
617 } else if (pfLock->fl_type == F_RDLCK) { 623 } else if (pfLock->fl_type == F_RDLCK) {
618 cFYI(1, ("F_RDLCK ")); 624 cFYI(1, ("F_RDLCK"));
619 lockType |= LOCKING_ANDX_SHARED_LOCK; 625 lockType |= LOCKING_ANDX_SHARED_LOCK;
620 numLock = 1; 626 numLock = 1;
621 } else if (pfLock->fl_type == F_EXLCK) { 627 } else if (pfLock->fl_type == F_EXLCK) {
622 cFYI(1, ("F_EXLCK ")); 628 cFYI(1, ("F_EXLCK"));
623 numLock = 1; 629 numLock = 1;
624 } else if (pfLock->fl_type == F_SHLCK) { 630 } else if (pfLock->fl_type == F_SHLCK) {
625 cFYI(1, ("F_SHLCK ")); 631 cFYI(1, ("F_SHLCK"));
626 lockType |= LOCKING_ANDX_SHARED_LOCK; 632 lockType |= LOCKING_ANDX_SHARED_LOCK;
627 numLock = 1; 633 numLock = 1;
628 } else 634 } else
629 cFYI(1, ("Unknown type of lock ")); 635 cFYI(1, ("Unknown type of lock"));
630 636
631 cifs_sb = CIFS_SB(file->f_dentry->d_sb); 637 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
632 pTcon = cifs_sb->tcon; 638 pTcon = cifs_sb->tcon;
@@ -635,27 +641,41 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
635 FreeXid(xid); 641 FreeXid(xid);
636 return -EBADF; 642 return -EBADF;
637 } 643 }
644 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
645
638 646
647 /* BB add code here to normalize offset and length to
648 account for negative length which we can not accept over the
649 wire */
639 if (IS_GETLK(cmd)) { 650 if (IS_GETLK(cmd)) {
640 rc = CIFSSMBLock(xid, pTcon, 651 if(experimEnabled &&
641 ((struct cifsFileInfo *)file-> 652 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
642 private_data)->netfid, 653 (CIFS_UNIX_FCNTL_CAP &
643 length, 654 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
644 pfLock->fl_start, 0, 1, lockType, 655 int posix_lock_type;
645 0 /* wait flag */ ); 656 if(lockType & LOCKING_ANDX_SHARED_LOCK)
657 posix_lock_type = CIFS_RDLCK;
658 else
659 posix_lock_type = CIFS_WRLCK;
660 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
661 length, pfLock->fl_start,
662 posix_lock_type, wait_flag);
663 FreeXid(xid);
664 return rc;
665 }
666
667 /* BB we could chain these into one lock request BB */
668 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
669 0, 1, lockType, 0 /* wait flag */ );
646 if (rc == 0) { 670 if (rc == 0) {
647 rc = CIFSSMBLock(xid, pTcon, 671 rc = CIFSSMBLock(xid, pTcon, netfid, length,
648 ((struct cifsFileInfo *) file->
649 private_data)->netfid,
650 length,
651 pfLock->fl_start, 1 /* numUnlock */ , 672 pfLock->fl_start, 1 /* numUnlock */ ,
652 0 /* numLock */ , lockType, 673 0 /* numLock */ , lockType,
653 0 /* wait flag */ ); 674 0 /* wait flag */ );
654 pfLock->fl_type = F_UNLCK; 675 pfLock->fl_type = F_UNLCK;
655 if (rc != 0) 676 if (rc != 0)
656 cERROR(1, ("Error unlocking previously locked " 677 cERROR(1, ("Error unlocking previously locked "
657 "range %d during test of lock ", 678 "range %d during test of lock", rc));
658 rc));
659 rc = 0; 679 rc = 0;
660 680
661 } else { 681 } else {
@@ -667,12 +687,30 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
667 FreeXid(xid); 687 FreeXid(xid);
668 return rc; 688 return rc;
669 } 689 }
670 690 if (experimEnabled &&
671 rc = CIFSSMBLock(xid, pTcon, 691 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
672 ((struct cifsFileInfo *) file->private_data)-> 692 (CIFS_UNIX_FCNTL_CAP &
673 netfid, length, 693 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
674 pfLock->fl_start, numUnlock, numLock, lockType, 694 int posix_lock_type;
675 wait_flag); 695 if(lockType & LOCKING_ANDX_SHARED_LOCK)
696 posix_lock_type = CIFS_RDLCK;
697 else
698 posix_lock_type = CIFS_WRLCK;
699
700 if(numUnlock == 1)
701 posix_lock_type = CIFS_UNLCK;
702 else if(numLock == 0) {
703 /* if no lock or unlock then nothing
704 to do since we do not know what it is */
705 FreeXid(xid);
706 return -EOPNOTSUPP;
707 }
708 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
709 length, pfLock->fl_start,
710 posix_lock_type, wait_flag);
711 } else
712 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
713 numUnlock, numLock, lockType, wait_flag);
676 if (pfLock->fl_flags & FL_POSIX) 714 if (pfLock->fl_flags & FL_POSIX)
677 posix_lock_file_wait(file, pfLock); 715 posix_lock_file_wait(file, pfLock);
678 FreeXid(xid); 716 FreeXid(xid);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 598eec9778f6..957ddd1571c6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -565,11 +565,14 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
565 struct cifsInodeInfo *cifsInode; 565 struct cifsInodeInfo *cifsInode;
566 FILE_BASIC_INFO *pinfo_buf; 566 FILE_BASIC_INFO *pinfo_buf;
567 567
568 cFYI(1, ("cifs_unlink, inode = 0x%p with ", inode)); 568 cFYI(1, ("cifs_unlink, inode = 0x%p", inode));
569 569
570 xid = GetXid(); 570 xid = GetXid();
571 571
572 cifs_sb = CIFS_SB(inode->i_sb); 572 if(inode)
573 cifs_sb = CIFS_SB(inode->i_sb);
574 else
575 cifs_sb = CIFS_SB(direntry->d_sb);
573 pTcon = cifs_sb->tcon; 576 pTcon = cifs_sb->tcon;
574 577
575 /* Unlink can be called from rename so we can not grab the sem here 578 /* Unlink can be called from rename so we can not grab the sem here
@@ -609,9 +612,8 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
609 } 612 }
610 } else if (rc == -EACCES) { 613 } else if (rc == -EACCES) {
611 /* try only if r/o attribute set in local lookup data? */ 614 /* try only if r/o attribute set in local lookup data? */
612 pinfo_buf = kmalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL); 615 pinfo_buf = kzalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
613 if (pinfo_buf) { 616 if (pinfo_buf) {
614 memset(pinfo_buf, 0, sizeof(FILE_BASIC_INFO));
615 /* ATTRS set to normal clears r/o bit */ 617 /* ATTRS set to normal clears r/o bit */
616 pinfo_buf->Attributes = cpu_to_le32(ATTR_NORMAL); 618 pinfo_buf->Attributes = cpu_to_le32(ATTR_NORMAL);
617 if (!(pTcon->ses->flags & CIFS_SES_NT4)) 619 if (!(pTcon->ses->flags & CIFS_SES_NT4))
@@ -693,9 +695,11 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
693 when needed */ 695 when needed */
694 direntry->d_inode->i_ctime = current_fs_time(inode->i_sb); 696 direntry->d_inode->i_ctime = current_fs_time(inode->i_sb);
695 } 697 }
696 inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); 698 if(inode) {
697 cifsInode = CIFS_I(inode); 699 inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
698 cifsInode->time = 0; /* force revalidate of dir as well */ 700 cifsInode = CIFS_I(inode);
701 cifsInode->time = 0; /* force revalidate of dir as well */
702 }
699 703
700 kfree(full_path); 704 kfree(full_path);
701 FreeXid(xid); 705 FreeXid(xid);
@@ -1167,7 +1171,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
1167 nfid, npid, FALSE); 1171 nfid, npid, FALSE);
1168 atomic_dec(&open_file->wrtPending); 1172 atomic_dec(&open_file->wrtPending);
1169 cFYI(1,("SetFSize for attrs rc = %d", rc)); 1173 cFYI(1,("SetFSize for attrs rc = %d", rc));
1170 if(rc == -EINVAL) { 1174 if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1171 int bytes_written; 1175 int bytes_written;
1172 rc = CIFSSMBWrite(xid, pTcon, 1176 rc = CIFSSMBWrite(xid, pTcon,
1173 nfid, 0, attrs->ia_size, 1177 nfid, 0, attrs->ia_size,
@@ -1189,7 +1193,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
1189 cifs_sb->mnt_cifs_flags & 1193 cifs_sb->mnt_cifs_flags &
1190 CIFS_MOUNT_MAP_SPECIAL_CHR); 1194 CIFS_MOUNT_MAP_SPECIAL_CHR);
1191 cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc)); 1195 cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
1192 if(rc == -EINVAL) { 1196 if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1193 __u16 netfid; 1197 __u16 netfid;
1194 int oplock = FALSE; 1198 int oplock = FALSE;
1195 1199
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 8d0da7c87c7b..9562f5bba65c 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -67,7 +67,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
67 cifs_sb_target->local_nls, 67 cifs_sb_target->local_nls,
68 cifs_sb_target->mnt_cifs_flags & 68 cifs_sb_target->mnt_cifs_flags &
69 CIFS_MOUNT_MAP_SPECIAL_CHR); 69 CIFS_MOUNT_MAP_SPECIAL_CHR);
70 if(rc == -EIO) 70 if((rc == -EIO) || (rc == -EINVAL))
71 rc = -EOPNOTSUPP; 71 rc = -EOPNOTSUPP;
72 } 72 }
73 73
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 432ba15e2c2d..fafd056426e4 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -72,10 +72,9 @@ sesInfoAlloc(void)
72 struct cifsSesInfo *ret_buf; 72 struct cifsSesInfo *ret_buf;
73 73
74 ret_buf = 74 ret_buf =
75 (struct cifsSesInfo *) kmalloc(sizeof (struct cifsSesInfo), 75 (struct cifsSesInfo *) kzalloc(sizeof (struct cifsSesInfo),
76 GFP_KERNEL); 76 GFP_KERNEL);
77 if (ret_buf) { 77 if (ret_buf) {
78 memset(ret_buf, 0, sizeof (struct cifsSesInfo));
79 write_lock(&GlobalSMBSeslock); 78 write_lock(&GlobalSMBSeslock);
80 atomic_inc(&sesInfoAllocCount); 79 atomic_inc(&sesInfoAllocCount);
81 ret_buf->status = CifsNew; 80 ret_buf->status = CifsNew;
@@ -110,10 +109,9 @@ tconInfoAlloc(void)
110{ 109{
111 struct cifsTconInfo *ret_buf; 110 struct cifsTconInfo *ret_buf;
112 ret_buf = 111 ret_buf =
113 (struct cifsTconInfo *) kmalloc(sizeof (struct cifsTconInfo), 112 (struct cifsTconInfo *) kzalloc(sizeof (struct cifsTconInfo),
114 GFP_KERNEL); 113 GFP_KERNEL);
115 if (ret_buf) { 114 if (ret_buf) {
116 memset(ret_buf, 0, sizeof (struct cifsTconInfo));
117 write_lock(&GlobalSMBSeslock); 115 write_lock(&GlobalSMBSeslock);
118 atomic_inc(&tconInfoAllocCount); 116 atomic_inc(&tconInfoAllocCount);
119 list_add(&ret_buf->cifsConnectionList, 117 list_add(&ret_buf->cifsConnectionList,
@@ -423,9 +421,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
423{ 421{
424 __u32 len = smb->smb_buf_length; 422 __u32 len = smb->smb_buf_length;
425 __u32 clc_len; /* calculated length */ 423 __u32 clc_len; /* calculated length */
426 cFYI(0, 424 cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len));
427 ("Entering checkSMB with Length: %x, smb_buf_length: %x",
428 length, len));
429 if (((unsigned int)length < 2 + sizeof (struct smb_hdr)) || 425 if (((unsigned int)length < 2 + sizeof (struct smb_hdr)) ||
430 (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)) { 426 (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)) {
431 if ((unsigned int)length < 2 + sizeof (struct smb_hdr)) { 427 if ((unsigned int)length < 2 + sizeof (struct smb_hdr)) {
@@ -433,29 +429,36 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
433 sizeof (struct smb_hdr) - 1) 429 sizeof (struct smb_hdr) - 1)
434 && (smb->Status.CifsError != 0)) { 430 && (smb->Status.CifsError != 0)) {
435 smb->WordCount = 0; 431 smb->WordCount = 0;
436 return 0; /* some error cases do not return wct and bcc */ 432 /* some error cases do not return wct and bcc */
433 return 0;
437 } else { 434 } else {
438 cERROR(1, ("Length less than smb header size")); 435 cERROR(1, ("Length less than smb header size"));
439 } 436 }
440
441 } 437 }
442 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) 438 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
443 cERROR(1, 439 cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
444 ("smb_buf_length greater than MaxBufSize")); 440 smb->Mid));
445 cERROR(1,
446 ("bad smb detected. Illegal length. mid=%d",
447 smb->Mid));
448 return 1; 441 return 1;
449 } 442 }
450 443
451 if (checkSMBhdr(smb, mid)) 444 if (checkSMBhdr(smb, mid))
452 return 1; 445 return 1;
453 clc_len = smbCalcSize_LE(smb); 446 clc_len = smbCalcSize_LE(smb);
454 if ((4 + len != clc_len) 447
455 || (4 + len != (unsigned int)length)) { 448 if(4 + len != (unsigned int)length) {
456 cERROR(1, ("Calculated size 0x%x vs actual length 0x%x", 449 cERROR(1, ("Length read does not match RFC1001 length %d",len));
457 clc_len, 4 + len)); 450 return 1;
458 cERROR(1, ("bad smb size detected for Mid=%d", smb->Mid)); 451 }
452
453 if (4 + len != clc_len) {
454 /* check if bcc wrapped around for large read responses */
455 if((len > 64 * 1024) && (len > clc_len)) {
456 /* check if lengths match mod 64K */
457 if(((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
458 return 0; /* bcc wrapped */
459 }
460 cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
461 clc_len, 4 + len, smb->Mid));
459 /* Windows XP can return a few bytes too much, presumably 462 /* Windows XP can return a few bytes too much, presumably
460 an illegal pad, at the end of byte range lock responses 463 an illegal pad, at the end of byte range lock responses
461 so we allow for that three byte pad, as long as actual 464 so we allow for that three byte pad, as long as actual
@@ -469,8 +472,11 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
469 wct and bcc to minimum size and drop the t2 parms and data */ 472 wct and bcc to minimum size and drop the t2 parms and data */
470 if((4+len > clc_len) && (len <= clc_len + 512)) 473 if((4+len > clc_len) && (len <= clc_len + 512))
471 return 0; 474 return 0;
472 else 475 else {
476 cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
477 len, smb->Mid));
473 return 1; 478 return 1;
479 }
474 } 480 }
475 return 0; 481 return 0;
476} 482}
diff --git a/fs/cifs/ntlmssp.c b/fs/cifs/ntlmssp.c
new file mode 100644
index 000000000000..78866f925747
--- /dev/null
+++ b/fs/cifs/ntlmssp.c
@@ -0,0 +1,129 @@
1/*
2 * fs/cifs/ntlmssp.h
3 *
4 * Copyright (c) International Business Machines Corp., 2006
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include "cifspdu.h"
23#include "cifsglob.h"
24#include "cifsproto.h"
25#include "cifs_unicode.h"
26#include "cifs_debug.h"
27#include "ntlmssp.h"
28#include "nterr.h"
29
30#ifdef CONFIG_CIFS_EXPERIMENTAL
31static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
32{
33 __u32 capabilities = 0;
34
35 /* init fields common to all four types of SessSetup */
36 /* note that header is initialized to zero in header_assemble */
37 pSMB->req.AndXCommand = 0xFF;
38 pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
39 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
40
41 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
42
43 /* BB verify whether signing required on neg or just on auth frame
44 (and NTLM case) */
45
46 capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
47 CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
48
49 if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
50 pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
51
52 if (ses->capabilities & CAP_UNICODE) {
53 pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
54 capabilities |= CAP_UNICODE;
55 }
56 if (ses->capabilities & CAP_STATUS32) {
57 pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
58 capabilities |= CAP_STATUS32;
59 }
60 if (ses->capabilities & CAP_DFS) {
61 pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
62 capabilities |= CAP_DFS;
63 }
64
65 /* BB check whether to init vcnum BB */
66 return capabilities;
67}
68int
69CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type,
70 int * pNTLMv2_flg, const struct nls_table *nls_cp)
71{
72 int rc = 0;
73 int wct;
74 struct smb_hdr *smb_buffer;
75 char *bcc_ptr;
76 SESSION_SETUP_ANDX *pSMB;
77 __u32 capabilities;
78
79 if(ses == NULL)
80 return -EINVAL;
81
82 cFYI(1,("SStp type: %d",type));
83 if(type < CIFS_NTLM) {
84#ifndef CONFIG_CIFS_WEAK_PW_HASH
85 /* LANMAN and plaintext are less secure and off by default.
86 So we make this explicitly be turned on in kconfig (in the
87 build) and turned on at runtime (changed from the default)
88 in proc/fs/cifs or via mount parm. Unfortunately this is
89 needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
90 return -EOPNOTSUPP;
91#endif
92 wct = 10; /* lanman 2 style sessionsetup */
93 } else if(type < CIFS_NTLMSSP_NEG)
94 wct = 13; /* old style NTLM sessionsetup */
95 else /* same size for negotiate or auth, NTLMSSP or extended security */
96 wct = 12;
97
98 rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
99 (void **)&smb_buffer);
100 if(rc)
101 return rc;
102
103 pSMB = (SESSION_SETUP_ANDX *)smb_buffer;
104
105 capabilities = cifs_ssetup_hdr(ses, pSMB);
106 bcc_ptr = pByteArea(smb_buffer);
107 if(type > CIFS_NTLM) {
108 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
109 capabilities |= CAP_EXTENDED_SECURITY;
110 pSMB->req.Capabilities = cpu_to_le32(capabilities);
111 /* BB set password lengths */
112 } else if(type < CIFS_NTLM) /* lanman */ {
113 /* no capabilities flags in old lanman negotiation */
114 /* pSMB->old_req.PasswordLength = */ /* BB fixme BB */
115 } else /* type CIFS_NTLM */ {
116 pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
117 pSMB->req_no_secext.CaseInsensitivePasswordLength =
118 cpu_to_le16(CIFS_SESSION_KEY_SIZE);
119 pSMB->req_no_secext.CaseSensitivePasswordLength =
120 cpu_to_le16(CIFS_SESSION_KEY_SIZE);
121 }
122
123
124/* rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */
125 /* SMB request buf freed in SendReceive2 */
126
127 return rc;
128}
129#endif /* CONFIG_CIFS_EXPERIMENTAL */
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 803389b64a2c..d39b712a11c5 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/ntlmssp.h 2 * fs/cifs/ntlmssp.h
3 * 3 *
4 * Copyright (c) International Business Machines Corp., 2002 4 * Copyright (c) International Business Machines Corp., 2002,2006
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 488bd0d81dcf..2f6e2825571e 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -604,7 +604,12 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
604 cifsFile->search_resume_name = NULL; 604 cifsFile->search_resume_name = NULL;
605 if(cifsFile->srch_inf.ntwrk_buf_start) { 605 if(cifsFile->srch_inf.ntwrk_buf_start) {
606 cFYI(1,("freeing SMB ff cache buf on search rewind")); 606 cFYI(1,("freeing SMB ff cache buf on search rewind"));
607 cifs_buf_release(cifsFile->srch_inf.ntwrk_buf_start); 607 if(cifsFile->srch_inf.smallBuf)
608 cifs_small_buf_release(cifsFile->srch_inf.
609 ntwrk_buf_start);
610 else
611 cifs_buf_release(cifsFile->srch_inf.
612 ntwrk_buf_start);
608 } 613 }
609 rc = initiate_cifs_search(xid,file); 614 rc = initiate_cifs_search(xid,file);
610 if(rc) { 615 if(rc) {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b12cb8a7da7c..3da80409466c 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -309,17 +309,16 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
309 309
310 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ 310 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
311 311
312 if (ses == NULL) { 312 if ((ses == NULL) || (ses->server == NULL)) {
313 cERROR(1,("Null smb session")); 313 cifs_small_buf_release(in_buf);
314 return -EIO; 314 cERROR(1,("Null session"));
315 }
316 if(ses->server == NULL) {
317 cERROR(1,("Null tcp session"));
318 return -EIO; 315 return -EIO;
319 } 316 }
320 317
321 if(ses->server->tcpStatus == CifsExiting) 318 if(ses->server->tcpStatus == CifsExiting) {
319 cifs_small_buf_release(in_buf);
322 return -ENOENT; 320 return -ENOENT;
321 }
323 322
324 /* Ensure that we do not send more than 50 overlapping requests 323 /* Ensure that we do not send more than 50 overlapping requests
325 to the same server. We may make this configurable later or 324 to the same server. We may make this configurable later or
@@ -346,6 +345,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
346 } else { 345 } else {
347 if(ses->server->tcpStatus == CifsExiting) { 346 if(ses->server->tcpStatus == CifsExiting) {
348 spin_unlock(&GlobalMid_Lock); 347 spin_unlock(&GlobalMid_Lock);
348 cifs_small_buf_release(in_buf);
349 return -ENOENT; 349 return -ENOENT;
350 } 350 }
351 351
@@ -385,6 +385,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
385 midQ = AllocMidQEntry(in_buf, ses); 385 midQ = AllocMidQEntry(in_buf, ses);
386 if (midQ == NULL) { 386 if (midQ == NULL) {
387 up(&ses->server->tcpSem); 387 up(&ses->server->tcpSem);
388 cifs_small_buf_release(in_buf);
388 /* If not lock req, update # of requests on wire to server */ 389 /* If not lock req, update # of requests on wire to server */
389 if(long_op < 3) { 390 if(long_op < 3) {
390 atomic_dec(&ses->server->inFlight); 391 atomic_dec(&ses->server->inFlight);
@@ -408,14 +409,18 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
408 if(rc < 0) { 409 if(rc < 0) {
409 DeleteMidQEntry(midQ); 410 DeleteMidQEntry(midQ);
410 up(&ses->server->tcpSem); 411 up(&ses->server->tcpSem);
412 cifs_small_buf_release(in_buf);
411 /* If not lock req, update # of requests on wire to server */ 413 /* If not lock req, update # of requests on wire to server */
412 if(long_op < 3) { 414 if(long_op < 3) {
413 atomic_dec(&ses->server->inFlight); 415 atomic_dec(&ses->server->inFlight);
414 wake_up(&ses->server->request_q); 416 wake_up(&ses->server->request_q);
415 } 417 }
416 return rc; 418 return rc;
417 } else 419 } else {
418 up(&ses->server->tcpSem); 420 up(&ses->server->tcpSem);
421 cifs_small_buf_release(in_buf);
422 }
423
419 if (long_op == -1) 424 if (long_op == -1)
420 goto cifs_no_response_exit2; 425 goto cifs_no_response_exit2;
421 else if (long_op == 2) /* writes past end of file can take loong time */ 426 else if (long_op == 2) /* writes past end of file can take loong time */
@@ -543,6 +548,7 @@ cifs_no_response_exit2:
543 548
544out_unlock2: 549out_unlock2:
545 up(&ses->server->tcpSem); 550 up(&ses->server->tcpSem);
551 cifs_small_buf_release(in_buf);
546 /* If not lock req, update # of requests on wire to server */ 552 /* If not lock req, update # of requests on wire to server */
547 if(long_op < 3) { 553 if(long_op < 3) {
548 atomic_dec(&ses->server->inFlight); 554 atomic_dec(&ses->server->inFlight);
diff --git a/fs/dcache.c b/fs/dcache.c
index 19458d399502..940d188e5d14 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1101,6 +1101,32 @@ next:
1101} 1101}
1102 1102
1103/** 1103/**
1104 * d_hash_and_lookup - hash the qstr then search for a dentry
1105 * @dir: Directory to search in
1106 * @name: qstr of name we wish to find
1107 *
1108 * On hash failure or on lookup failure NULL is returned.
1109 */
1110struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1111{
1112 struct dentry *dentry = NULL;
1113
1114 /*
1115 * Check for a fs-specific hash function. Note that we must
1116 * calculate the standard hash first, as the d_op->d_hash()
1117 * routine may choose to leave the hash value unchanged.
1118 */
1119 name->hash = full_name_hash(name->name, name->len);
1120 if (dir->d_op && dir->d_op->d_hash) {
1121 if (dir->d_op->d_hash(dir, name) < 0)
1122 goto out;
1123 }
1124 dentry = d_lookup(dir, name);
1125out:
1126 return dentry;
1127}
1128
1129/**
1104 * d_validate - verify dentry provided from insecure source 1130 * d_validate - verify dentry provided from insecure source
1105 * @dentry: The dentry alleged to be valid child of @dparent 1131 * @dentry: The dentry alleged to be valid child of @dparent
1106 * @dparent: The parent dentry (known to be valid) 1132 * @dparent: The parent dentry (known to be valid)
@@ -1172,11 +1198,11 @@ void d_delete(struct dentry * dentry)
1172 spin_lock(&dentry->d_lock); 1198 spin_lock(&dentry->d_lock);
1173 isdir = S_ISDIR(dentry->d_inode->i_mode); 1199 isdir = S_ISDIR(dentry->d_inode->i_mode);
1174 if (atomic_read(&dentry->d_count) == 1) { 1200 if (atomic_read(&dentry->d_count) == 1) {
1175 /* remove this and other inotify debug checks after 2.6.18 */
1176 dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
1177
1178 dentry_iput(dentry); 1201 dentry_iput(dentry);
1179 fsnotify_nameremove(dentry, isdir); 1202 fsnotify_nameremove(dentry, isdir);
1203
1204 /* remove this and other inotify debug checks after 2.6.18 */
1205 dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
1180 return; 1206 return;
1181 } 1207 }
1182 1208
@@ -1616,26 +1642,12 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
1616 struct dentry * dentry; 1642 struct dentry * dentry;
1617 ino_t ino = 0; 1643 ino_t ino = 0;
1618 1644
1619 /* 1645 dentry = d_hash_and_lookup(dir, name);
1620 * Check for a fs-specific hash function. Note that we must 1646 if (dentry) {
1621 * calculate the standard hash first, as the d_op->d_hash()
1622 * routine may choose to leave the hash value unchanged.
1623 */
1624 name->hash = full_name_hash(name->name, name->len);
1625 if (dir->d_op && dir->d_op->d_hash)
1626 {
1627 if (dir->d_op->d_hash(dir, name) != 0)
1628 goto out;
1629 }
1630
1631 dentry = d_lookup(dir, name);
1632 if (dentry)
1633 {
1634 if (dentry->d_inode) 1647 if (dentry->d_inode)
1635 ino = dentry->d_inode->i_ino; 1648 ino = dentry->d_inode->i_ino;
1636 dput(dentry); 1649 dput(dentry);
1637 } 1650 }
1638out:
1639 return ino; 1651 return ino;
1640} 1652}
1641 1653
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 910a8ed74b5d..b05d1b218776 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -929,8 +929,7 @@ do_holes:
929 block_in_page += this_chunk_blocks; 929 block_in_page += this_chunk_blocks;
930 dio->blocks_available -= this_chunk_blocks; 930 dio->blocks_available -= this_chunk_blocks;
931next_block: 931next_block:
932 if (dio->block_in_file > dio->final_block_in_request) 932 BUG_ON(dio->block_in_file > dio->final_block_in_request);
933 BUG();
934 if (dio->block_in_file == dio->final_block_in_request) 933 if (dio->block_in_file == dio->final_block_in_request)
935 break; 934 break;
936 } 935 }
diff --git a/fs/dquot.c b/fs/dquot.c
index 6b3886920939..81d87a413c68 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -590,8 +590,7 @@ we_slept:
590 atomic_dec(&dquot->dq_count); 590 atomic_dec(&dquot->dq_count);
591#ifdef __DQUOT_PARANOIA 591#ifdef __DQUOT_PARANOIA
592 /* sanity check */ 592 /* sanity check */
593 if (!list_empty(&dquot->dq_free)) 593 BUG_ON(!list_empty(&dquot->dq_free));
594 BUG();
595#endif 594#endif
596 put_dquot_last(dquot); 595 put_dquot_last(dquot);
597 spin_unlock(&dq_list_lock); 596 spin_unlock(&dq_list_lock);
@@ -666,8 +665,7 @@ we_slept:
666 return NODQUOT; 665 return NODQUOT;
667 } 666 }
668#ifdef __DQUOT_PARANOIA 667#ifdef __DQUOT_PARANOIA
669 if (!dquot->dq_sb) /* Has somebody invalidated entry under us? */ 668 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
670 BUG();
671#endif 669#endif
672 670
673 return dquot; 671 return dquot;
diff --git a/fs/exec.c b/fs/exec.c
index 950ebd43cdc3..0291a68a3626 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -561,7 +561,7 @@ static int exec_mmap(struct mm_struct *mm)
561 arch_pick_mmap_layout(mm); 561 arch_pick_mmap_layout(mm);
562 if (old_mm) { 562 if (old_mm) {
563 up_read(&old_mm->mmap_sem); 563 up_read(&old_mm->mmap_sem);
564 if (active_mm != old_mm) BUG(); 564 BUG_ON(active_mm != old_mm);
565 mmput(old_mm); 565 mmput(old_mm);
566 return 0; 566 return 0;
567 } 567 }
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 2a2479196f96..d35cbc6bc112 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -453,8 +453,7 @@ static void send_sigio_to_task(struct task_struct *p,
453 /* Make sure we are called with one of the POLL_* 453 /* Make sure we are called with one of the POLL_*
454 reasons, otherwise we could leak kernel stack into 454 reasons, otherwise we could leak kernel stack into
455 userspace. */ 455 userspace. */
456 if ((reason & __SI_MASK) != __SI_POLL) 456 BUG_ON((reason & __SI_MASK) != __SI_POLL);
457 BUG();
458 if (reason - POLL_IN >= NSIGPOLL) 457 if (reason - POLL_IN >= NSIGPOLL)
459 si.si_band = ~0L; 458 si.si_band = ~0L;
460 else 459 else
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
index 76a0708ae978..049500847903 100644
--- a/fs/freevxfs/vxfs_olt.c
+++ b/fs/freevxfs/vxfs_olt.c
@@ -42,24 +42,21 @@
42static inline void 42static inline void
43vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp) 43vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
44{ 44{
45 if (infp->vsi_fshino) 45 BUG_ON(infp->vsi_fshino);
46 BUG();
47 infp->vsi_fshino = fshp->olt_fsino[0]; 46 infp->vsi_fshino = fshp->olt_fsino[0];
48} 47}
49 48
50static inline void 49static inline void
51vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp) 50vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
52{ 51{
53 if (infp->vsi_iext) 52 BUG_ON(infp->vsi_iext);
54 BUG();
55 infp->vsi_iext = ilistp->olt_iext[0]; 53 infp->vsi_iext = ilistp->olt_iext[0];
56} 54}
57 55
58static inline u_long 56static inline u_long
59vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize) 57vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
60{ 58{
61 if (sbp->s_blocksize % bsize) 59 BUG_ON(sbp->s_blocksize % bsize);
62 BUG();
63 return (block * (sbp->s_blocksize / bsize)); 60 return (block * (sbp->s_blocksize / bsize));
64} 61}
65 62
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 8f07e8fbd03d..746abc9ecf70 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -466,8 +466,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
466 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 466 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
467 *p && *p != node; p = &(*p)->next_hash) 467 *p && *p != node; p = &(*p)->next_hash)
468 ; 468 ;
469 if (!*p) 469 BUG_ON(!*p);
470 BUG();
471 *p = node->next_hash; 470 *p = node->next_hash;
472 node->tree->node_hash_cnt--; 471 node->tree->node_hash_cnt--;
473} 472}
@@ -622,8 +621,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
622 621
623 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", 622 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
624 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 623 node->tree->cnid, node->this, atomic_read(&node->refcnt));
625 if (!atomic_read(&node->refcnt)) 624 BUG_ON(!atomic_read(&node->refcnt));
626 BUG();
627 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) 625 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
628 return; 626 return;
629 for (i = 0; i < tree->pages_per_bnode; i++) { 627 for (i = 0; i < tree->pages_per_bnode; i++) {
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index a67edfa34e9e..effa8991999c 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -269,8 +269,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
269 u8 *data, byte, m; 269 u8 *data, byte, m;
270 270
271 dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); 271 dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
272 if (!node->this) 272 BUG_ON(!node->this);
273 BUG();
274 tree = node->tree; 273 tree = node->tree;
275 nidx = node->this; 274 nidx = node->this;
276 node = hfs_bnode_find(tree, 0); 275 node = hfs_bnode_find(tree, 0);
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 2ba20cdb5baa..5e6363be246f 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -216,10 +216,10 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
216static struct inode_operations hppfs_file_iops = { 216static struct inode_operations hppfs_file_iops = {
217}; 217};
218 218
219static ssize_t read_proc(struct file *file, char *buf, ssize_t count, 219static ssize_t read_proc(struct file *file, char __user *buf, ssize_t count,
220 loff_t *ppos, int is_user) 220 loff_t *ppos, int is_user)
221{ 221{
222 ssize_t (*read)(struct file *, char *, size_t, loff_t *); 222 ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
223 ssize_t n; 223 ssize_t n;
224 224
225 read = file->f_dentry->d_inode->i_fop->read; 225 read = file->f_dentry->d_inode->i_fop->read;
@@ -236,7 +236,7 @@ static ssize_t read_proc(struct file *file, char *buf, ssize_t count,
236 return n; 236 return n;
237} 237}
238 238
239static ssize_t hppfs_read_file(int fd, char *buf, ssize_t count) 239static ssize_t hppfs_read_file(int fd, char __user *buf, ssize_t count)
240{ 240{
241 ssize_t n; 241 ssize_t n;
242 int cur, err; 242 int cur, err;
@@ -274,7 +274,7 @@ static ssize_t hppfs_read_file(int fd, char *buf, ssize_t count)
274 return n; 274 return n;
275} 275}
276 276
277static ssize_t hppfs_read(struct file *file, char *buf, size_t count, 277static ssize_t hppfs_read(struct file *file, char __user *buf, size_t count,
278 loff_t *ppos) 278 loff_t *ppos)
279{ 279{
280 struct hppfs_private *hppfs = file->private_data; 280 struct hppfs_private *hppfs = file->private_data;
@@ -313,12 +313,12 @@ static ssize_t hppfs_read(struct file *file, char *buf, size_t count,
313 return(count); 313 return(count);
314} 314}
315 315
316static ssize_t hppfs_write(struct file *file, const char *buf, size_t len, 316static ssize_t hppfs_write(struct file *file, const char __user *buf, size_t len,
317 loff_t *ppos) 317 loff_t *ppos)
318{ 318{
319 struct hppfs_private *data = file->private_data; 319 struct hppfs_private *data = file->private_data;
320 struct file *proc_file = data->proc_file; 320 struct file *proc_file = data->proc_file;
321 ssize_t (*write)(struct file *, const char *, size_t, loff_t *); 321 ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
322 int err; 322 int err;
323 323
324 write = proc_file->f_dentry->d_inode->i_fop->write; 324 write = proc_file->f_dentry->d_inode->i_fop->write;
@@ -658,7 +658,7 @@ static struct super_operations hppfs_sbops = {
658 .statfs = hppfs_statfs, 658 .statfs = hppfs_statfs,
659}; 659};
660 660
661static int hppfs_readlink(struct dentry *dentry, char *buffer, int buflen) 661static int hppfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
662{ 662{
663 struct file *proc_file; 663 struct file *proc_file;
664 struct dentry *proc_dentry; 664 struct dentry *proc_dentry;
diff --git a/fs/inode.c b/fs/inode.c
index 32b7c3375021..3a2446a27d2c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -172,8 +172,7 @@ static struct inode *alloc_inode(struct super_block *sb)
172 172
173void destroy_inode(struct inode *inode) 173void destroy_inode(struct inode *inode)
174{ 174{
175 if (inode_has_buffers(inode)) 175 BUG_ON(inode_has_buffers(inode));
176 BUG();
177 security_inode_free(inode); 176 security_inode_free(inode);
178 if (inode->i_sb->s_op->destroy_inode) 177 if (inode->i_sb->s_op->destroy_inode)
179 inode->i_sb->s_op->destroy_inode(inode); 178 inode->i_sb->s_op->destroy_inode(inode);
@@ -249,12 +248,9 @@ void clear_inode(struct inode *inode)
249 might_sleep(); 248 might_sleep();
250 invalidate_inode_buffers(inode); 249 invalidate_inode_buffers(inode);
251 250
252 if (inode->i_data.nrpages) 251 BUG_ON(inode->i_data.nrpages);
253 BUG(); 252 BUG_ON(!(inode->i_state & I_FREEING));
254 if (!(inode->i_state & I_FREEING)) 253 BUG_ON(inode->i_state & I_CLEAR);
255 BUG();
256 if (inode->i_state & I_CLEAR)
257 BUG();
258 wait_on_inode(inode); 254 wait_on_inode(inode);
259 DQUOT_DROP(inode); 255 DQUOT_DROP(inode);
260 if (inode->i_sb && inode->i_sb->s_op->clear_inode) 256 if (inode->i_sb && inode->i_sb->s_op->clear_inode)
@@ -1054,8 +1050,7 @@ void generic_delete_inode(struct inode *inode)
1054 hlist_del_init(&inode->i_hash); 1050 hlist_del_init(&inode->i_hash);
1055 spin_unlock(&inode_lock); 1051 spin_unlock(&inode_lock);
1056 wake_up_inode(inode); 1052 wake_up_inode(inode);
1057 if (inode->i_state != I_CLEAR) 1053 BUG_ON(inode->i_state != I_CLEAR);
1058 BUG();
1059 destroy_inode(inode); 1054 destroy_inode(inode);
1060} 1055}
1061 1056
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 7b77a9541125..ff2a872e80e7 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -35,8 +35,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
35 pid_t pid; 35 pid_t pid;
36 int ret = 0; 36 int ret = 0;
37 37
38 if (c->gc_task) 38 BUG_ON(c->gc_task);
39 BUG();
40 39
41 init_completion(&c->gc_thread_start); 40 init_completion(&c->gc_thread_start);
42 init_completion(&c->gc_thread_exit); 41 init_completion(&c->gc_thread_exit);
diff --git a/fs/locks.c b/fs/locks.c
index 4d9e71d43e7e..dda83d6cd48b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -168,18 +168,9 @@ static void locks_release_private(struct file_lock *fl)
168/* Free a lock which is not in use. */ 168/* Free a lock which is not in use. */
169static void locks_free_lock(struct file_lock *fl) 169static void locks_free_lock(struct file_lock *fl)
170{ 170{
171 if (fl == NULL) { 171 BUG_ON(waitqueue_active(&fl->fl_wait));
172 BUG(); 172 BUG_ON(!list_empty(&fl->fl_block));
173 return; 173 BUG_ON(!list_empty(&fl->fl_link));
174 }
175 if (waitqueue_active(&fl->fl_wait))
176 panic("Attempting to free lock with active wait queue");
177
178 if (!list_empty(&fl->fl_block))
179 panic("Attempting to free lock with active block list");
180
181 if (!list_empty(&fl->fl_link))
182 panic("Attempting to free lock on active lock list");
183 174
184 locks_release_private(fl); 175 locks_release_private(fl);
185 kmem_cache_free(filelock_cache, fl); 176 kmem_cache_free(filelock_cache, fl);
@@ -735,8 +726,9 @@ EXPORT_SYMBOL(posix_locks_deadlock);
735 * at the head of the list, but that's secret knowledge known only to 726 * at the head of the list, but that's secret knowledge known only to
736 * flock_lock_file and posix_lock_file. 727 * flock_lock_file and posix_lock_file.
737 */ 728 */
738static int flock_lock_file(struct file *filp, struct file_lock *new_fl) 729static int flock_lock_file(struct file *filp, struct file_lock *request)
739{ 730{
731 struct file_lock *new_fl = NULL;
740 struct file_lock **before; 732 struct file_lock **before;
741 struct inode * inode = filp->f_dentry->d_inode; 733 struct inode * inode = filp->f_dentry->d_inode;
742 int error = 0; 734 int error = 0;
@@ -751,17 +743,19 @@ static int flock_lock_file(struct file *filp, struct file_lock *new_fl)
751 continue; 743 continue;
752 if (filp != fl->fl_file) 744 if (filp != fl->fl_file)
753 continue; 745 continue;
754 if (new_fl->fl_type == fl->fl_type) 746 if (request->fl_type == fl->fl_type)
755 goto out; 747 goto out;
756 found = 1; 748 found = 1;
757 locks_delete_lock(before); 749 locks_delete_lock(before);
758 break; 750 break;
759 } 751 }
760 unlock_kernel();
761 752
762 if (new_fl->fl_type == F_UNLCK) 753 if (request->fl_type == F_UNLCK)
763 return 0; 754 goto out;
764 755
756 new_fl = locks_alloc_lock();
757 if (new_fl == NULL)
758 goto out;
765 /* 759 /*
766 * If a higher-priority process was blocked on the old file lock, 760 * If a higher-priority process was blocked on the old file lock,
767 * give it the opportunity to lock the file. 761 * give it the opportunity to lock the file.
@@ -769,26 +763,27 @@ static int flock_lock_file(struct file *filp, struct file_lock *new_fl)
769 if (found) 763 if (found)
770 cond_resched(); 764 cond_resched();
771 765
772 lock_kernel();
773 for_each_lock(inode, before) { 766 for_each_lock(inode, before) {
774 struct file_lock *fl = *before; 767 struct file_lock *fl = *before;
775 if (IS_POSIX(fl)) 768 if (IS_POSIX(fl))
776 break; 769 break;
777 if (IS_LEASE(fl)) 770 if (IS_LEASE(fl))
778 continue; 771 continue;
779 if (!flock_locks_conflict(new_fl, fl)) 772 if (!flock_locks_conflict(request, fl))
780 continue; 773 continue;
781 error = -EAGAIN; 774 error = -EAGAIN;
782 if (new_fl->fl_flags & FL_SLEEP) { 775 if (request->fl_flags & FL_SLEEP)
783 locks_insert_block(fl, new_fl); 776 locks_insert_block(fl, request);
784 }
785 goto out; 777 goto out;
786 } 778 }
779 locks_copy_lock(new_fl, request);
787 locks_insert_lock(&inode->i_flock, new_fl); 780 locks_insert_lock(&inode->i_flock, new_fl);
788 error = 0; 781 new_fl = NULL;
789 782
790out: 783out:
791 unlock_kernel(); 784 unlock_kernel();
785 if (new_fl)
786 locks_free_lock(new_fl);
792 return error; 787 return error;
793} 788}
794 789
@@ -1569,9 +1564,7 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
1569 error = flock_lock_file_wait(filp, lock); 1564 error = flock_lock_file_wait(filp, lock);
1570 1565
1571 out_free: 1566 out_free:
1572 if (list_empty(&lock->fl_link)) { 1567 locks_free_lock(lock);
1573 locks_free_lock(lock);
1574 }
1575 1568
1576 out_putf: 1569 out_putf:
1577 fput(filp); 1570 fput(filp);
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
index 626a367bcd81..5b76ccd19e3f 100644
--- a/fs/msdos/namei.c
+++ b/fs/msdos/namei.c
@@ -12,14 +12,6 @@
12#include <linux/msdos_fs.h> 12#include <linux/msdos_fs.h>
13#include <linux/smp_lock.h> 13#include <linux/smp_lock.h>
14 14
15/* MS-DOS "device special files" */
16static const unsigned char *reserved_names[] = {
17 "CON ", "PRN ", "NUL ", "AUX ",
18 "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
19 "COM1 ", "COM2 ", "COM3 ", "COM4 ",
20 NULL
21};
22
23/* Characters that are undesirable in an MS-DOS file name */ 15/* Characters that are undesirable in an MS-DOS file name */
24static unsigned char bad_chars[] = "*?<>|\""; 16static unsigned char bad_chars[] = "*?<>|\"";
25static unsigned char bad_if_strict_pc[] = "+=,; "; 17static unsigned char bad_if_strict_pc[] = "+=,; ";
@@ -40,7 +32,6 @@ static int msdos_format_name(const unsigned char *name, int len,
40 */ 32 */
41{ 33{
42 unsigned char *walk; 34 unsigned char *walk;
43 const unsigned char **reserved;
44 unsigned char c; 35 unsigned char c;
45 int space; 36 int space;
46 37
@@ -127,11 +118,7 @@ static int msdos_format_name(const unsigned char *name, int len,
127 } 118 }
128 while (walk - res < MSDOS_NAME) 119 while (walk - res < MSDOS_NAME)
129 *walk++ = ' '; 120 *walk++ = ' ';
130 if (!opts->atari) 121
131 /* GEMDOS is less stupid and has no reserved names */
132 for (reserved = reserved_names; *reserved; reserved++)
133 if (!strncmp(res, *reserved, 8))
134 return -EINVAL;
135 return 0; 122 return 0;
136} 123}
137 124
diff --git a/fs/namei.c b/fs/namei.c
index 22f6e8d16aa8..96723ae83c89 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1254,7 +1254,7 @@ out:
1254 return dentry; 1254 return dentry;
1255} 1255}
1256 1256
1257struct dentry * lookup_hash(struct nameidata *nd) 1257static struct dentry *lookup_hash(struct nameidata *nd)
1258{ 1258{
1259 return __lookup_hash(&nd->last, nd->dentry, nd); 1259 return __lookup_hash(&nd->last, nd->dentry, nd);
1260} 1260}
@@ -2697,7 +2697,6 @@ EXPORT_SYMBOL(follow_up);
2697EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ 2697EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
2698EXPORT_SYMBOL(getname); 2698EXPORT_SYMBOL(getname);
2699EXPORT_SYMBOL(lock_rename); 2699EXPORT_SYMBOL(lock_rename);
2700EXPORT_SYMBOL(lookup_hash);
2701EXPORT_SYMBOL(lookup_one_len); 2700EXPORT_SYMBOL(lookup_one_len);
2702EXPORT_SYMBOL(page_follow_link_light); 2701EXPORT_SYMBOL(page_follow_link_light);
2703EXPORT_SYMBOL(page_put_link); 2702EXPORT_SYMBOL(page_put_link);
diff --git a/fs/pipe.c b/fs/pipe.c
index 109a102c150d..795df987cd38 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -95,6 +95,8 @@ static void anon_pipe_buf_release(struct pipe_inode_info *info, struct pipe_buff
95{ 95{
96 struct page *page = buf->page; 96 struct page *page = buf->page;
97 97
98 buf->flags &= ~PIPE_BUF_FLAG_STOLEN;
99
98 /* 100 /*
99 * If nobody else uses this page, and we don't already have a 101 * If nobody else uses this page, and we don't already have a
100 * temporary page, let's keep track of it as a one-deep 102 * temporary page, let's keep track of it as a one-deep
@@ -124,7 +126,7 @@ static void anon_pipe_buf_unmap(struct pipe_inode_info *info, struct pipe_buffer
124static int anon_pipe_buf_steal(struct pipe_inode_info *info, 126static int anon_pipe_buf_steal(struct pipe_inode_info *info,
125 struct pipe_buffer *buf) 127 struct pipe_buffer *buf)
126{ 128{
127 buf->stolen = 1; 129 buf->flags |= PIPE_BUF_FLAG_STOLEN;
128 return 0; 130 return 0;
129} 131}
130 132
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8f1f49ceebec..a3a3eecef689 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -534,12 +534,15 @@ static int proc_oom_score(struct task_struct *task, char *buffer)
534 534
535/* If the process being read is separated by chroot from the reading process, 535/* If the process being read is separated by chroot from the reading process,
536 * don't let the reader access the threads. 536 * don't let the reader access the threads.
537 *
538 * note: this does dput(root) and mntput(vfsmnt) on exit.
537 */ 539 */
538static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt) 540static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
539{ 541{
540 struct dentry *de, *base; 542 struct dentry *de, *base;
541 struct vfsmount *our_vfsmnt, *mnt; 543 struct vfsmount *our_vfsmnt, *mnt;
542 int res = 0; 544 int res = 0;
545
543 read_lock(&current->fs->lock); 546 read_lock(&current->fs->lock);
544 our_vfsmnt = mntget(current->fs->rootmnt); 547 our_vfsmnt = mntget(current->fs->rootmnt);
545 base = dget(current->fs->root); 548 base = dget(current->fs->root);
@@ -549,11 +552,11 @@ static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
549 de = root; 552 de = root;
550 mnt = vfsmnt; 553 mnt = vfsmnt;
551 554
552 while (vfsmnt != our_vfsmnt) { 555 while (mnt != our_vfsmnt) {
553 if (vfsmnt == vfsmnt->mnt_parent) 556 if (mnt == mnt->mnt_parent)
554 goto out; 557 goto out;
555 de = vfsmnt->mnt_mountpoint; 558 de = mnt->mnt_mountpoint;
556 vfsmnt = vfsmnt->mnt_parent; 559 mnt = mnt->mnt_parent;
557 } 560 }
558 561
559 if (!is_subdir(de, base)) 562 if (!is_subdir(de, base))
@@ -564,7 +567,7 @@ exit:
564 dput(base); 567 dput(base);
565 mntput(our_vfsmnt); 568 mntput(our_vfsmnt);
566 dput(root); 569 dput(root);
567 mntput(mnt); 570 mntput(vfsmnt);
568 return res; 571 return res;
569out: 572out:
570 spin_unlock(&vfsmount_lock); 573 spin_unlock(&vfsmount_lock);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index ef5a3323f4b5..5c10ea157425 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -249,144 +249,60 @@ static int cpuinfo_open(struct inode *inode, struct file *file)
249 return seq_open(file, &cpuinfo_op); 249 return seq_open(file, &cpuinfo_op);
250} 250}
251 251
252enum devinfo_states { 252static struct file_operations proc_cpuinfo_operations = {
253 CHR_HDR, 253 .open = cpuinfo_open,
254 CHR_LIST, 254 .read = seq_read,
255 BLK_HDR, 255 .llseek = seq_lseek,
256 BLK_LIST, 256 .release = seq_release,
257 DEVINFO_DONE
258};
259
260struct devinfo_state {
261 void *chrdev;
262 void *blkdev;
263 unsigned int num_records;
264 unsigned int cur_record;
265 enum devinfo_states state;
266}; 257};
267 258
268static void *devinfo_start(struct seq_file *f, loff_t *pos) 259static int devinfo_show(struct seq_file *f, void *v)
269{ 260{
270 struct devinfo_state *info = f->private; 261 int i = *(loff_t *) v;
271 262
272 if (*pos) { 263 if (i < CHRDEV_MAJOR_HASH_SIZE) {
273 if ((info) && (*pos <= info->num_records)) 264 if (i == 0)
274 return info; 265 seq_printf(f, "Character devices:\n");
275 return NULL; 266 chrdev_show(f, i);
267 } else {
268 i -= CHRDEV_MAJOR_HASH_SIZE;
269 if (i == 0)
270 seq_printf(f, "\nBlock devices:\n");
271 blkdev_show(f, i);
276 } 272 }
277 info = kmalloc(sizeof(*info), GFP_KERNEL); 273 return 0;
278 f->private = info;
279 info->chrdev = acquire_chrdev_list();
280 info->blkdev = acquire_blkdev_list();
281 info->state = CHR_HDR;
282 info->num_records = count_chrdev_list();
283 info->num_records += count_blkdev_list();
284 info->num_records += 2; /* Character and Block headers */
285 *pos = 1;
286 info->cur_record = *pos;
287 return info;
288} 274}
289 275
290static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) 276static void *devinfo_start(struct seq_file *f, loff_t *pos)
291{ 277{
292 int idummy; 278 if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
293 char *ndummy; 279 return pos;
294 struct devinfo_state *info = f->private; 280 return NULL;
295
296 switch (info->state) {
297 case CHR_HDR:
298 info->state = CHR_LIST;
299 (*pos)++;
300 /*fallthrough*/
301 case CHR_LIST:
302 if (get_chrdev_info(info->chrdev,&idummy,&ndummy)) {
303 /*
304 * The character dev list is complete
305 */
306 info->state = BLK_HDR;
307 } else {
308 info->chrdev = get_next_chrdev(info->chrdev);
309 }
310 (*pos)++;
311 break;
312 case BLK_HDR:
313 info->state = BLK_LIST;
314 (*pos)++;
315 /*fallthrough*/
316 case BLK_LIST:
317 if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) {
318 /*
319 * The block dev list is complete
320 */
321 info->state = DEVINFO_DONE;
322 } else {
323 info->blkdev = get_next_blkdev(info->blkdev);
324 }
325 (*pos)++;
326 break;
327 case DEVINFO_DONE:
328 (*pos)++;
329 info->cur_record = *pos;
330 info = NULL;
331 break;
332 default:
333 break;
334 }
335 if (info)
336 info->cur_record = *pos;
337 return info;
338} 281}
339 282
340static void devinfo_stop(struct seq_file *f, void *v) 283static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
341{ 284{
342 struct devinfo_state *info = f->private; 285 (*pos)++;
343 286 if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE))
344 if (info) { 287 return NULL;
345 release_chrdev_list(info->chrdev); 288 return pos;
346 release_blkdev_list(info->blkdev);
347 f->private = NULL;
348 kfree(info);
349 }
350} 289}
351 290
352static int devinfo_show(struct seq_file *f, void *arg) 291static void devinfo_stop(struct seq_file *f, void *v)
353{ 292{
354 int major; 293 /* Nothing to do */
355 char *name;
356 struct devinfo_state *info = f->private;
357
358 switch(info->state) {
359 case CHR_HDR:
360 seq_printf(f,"Character devices:\n");
361 /* fallthrough */
362 case CHR_LIST:
363 if (!get_chrdev_info(info->chrdev,&major,&name))
364 seq_printf(f,"%3d %s\n",major,name);
365 break;
366 case BLK_HDR:
367 seq_printf(f,"\nBlock devices:\n");
368 /* fallthrough */
369 case BLK_LIST:
370 if (!get_blkdev_info(info->blkdev,&major,&name))
371 seq_printf(f,"%3d %s\n",major,name);
372 break;
373 default:
374 break;
375 }
376
377 return 0;
378} 294}
379 295
380static struct seq_operations devinfo_op = { 296static struct seq_operations devinfo_ops = {
381 .start = devinfo_start, 297 .start = devinfo_start,
382 .next = devinfo_next, 298 .next = devinfo_next,
383 .stop = devinfo_stop, 299 .stop = devinfo_stop,
384 .show = devinfo_show, 300 .show = devinfo_show
385}; 301};
386 302
387static int devinfo_open(struct inode *inode, struct file *file) 303static int devinfo_open(struct inode *inode, struct file *filp)
388{ 304{
389 return seq_open(file, &devinfo_op); 305 return seq_open(filp, &devinfo_ops);
390} 306}
391 307
392static struct file_operations proc_devinfo_operations = { 308static struct file_operations proc_devinfo_operations = {
@@ -396,13 +312,6 @@ static struct file_operations proc_devinfo_operations = {
396 .release = seq_release, 312 .release = seq_release,
397}; 313};
398 314
399static struct file_operations proc_cpuinfo_operations = {
400 .open = cpuinfo_open,
401 .read = seq_read,
402 .llseek = seq_lseek,
403 .release = seq_release,
404};
405
406extern struct seq_operations vmstat_op; 315extern struct seq_operations vmstat_op;
407static int vmstat_open(struct inode *inode, struct file *file) 316static int vmstat_open(struct inode *inode, struct file *file)
408{ 317{
diff --git a/fs/select.c b/fs/select.c
index b3a3a1326af6..071660fa7b01 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -314,7 +314,7 @@ static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
314 int ret, size, max_fdset; 314 int ret, size, max_fdset;
315 struct fdtable *fdt; 315 struct fdtable *fdt;
316 /* Allocate small arguments on the stack to save memory and be faster */ 316 /* Allocate small arguments on the stack to save memory and be faster */
317 char stack_fds[SELECT_STACK_ALLOC]; 317 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
318 318
319 ret = -EINVAL; 319 ret = -EINVAL;
320 if (n < 0) 320 if (n < 0)
@@ -639,8 +639,10 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
639 struct poll_list *walk; 639 struct poll_list *walk;
640 struct fdtable *fdt; 640 struct fdtable *fdt;
641 int max_fdset; 641 int max_fdset;
642 /* Allocate small arguments on the stack to save memory and be faster */ 642 /* Allocate small arguments on the stack to save memory and be
643 char stack_pps[POLL_STACK_ALLOC]; 643 faster - use long to make sure the buffer is aligned properly
644 on 64 bit archs to avoid unaligned access */
645 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
644 struct poll_list *stack_pp = NULL; 646 struct poll_list *stack_pp = NULL;
645 647
646 /* Do a sanity check on nfds ... */ 648 /* Do a sanity check on nfds ... */
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
index c56bd99a9701..ed9a24d19d7d 100644
--- a/fs/smbfs/file.c
+++ b/fs/smbfs/file.c
@@ -178,11 +178,9 @@ smb_writepage(struct page *page, struct writeback_control *wbc)
178 unsigned offset = PAGE_CACHE_SIZE; 178 unsigned offset = PAGE_CACHE_SIZE;
179 int err; 179 int err;
180 180
181 if (!mapping) 181 BUG_ON(!mapping);
182 BUG();
183 inode = mapping->host; 182 inode = mapping->host;
184 if (!inode) 183 BUG_ON(!inode);
185 BUG();
186 184
187 end_index = inode->i_size >> PAGE_CACHE_SHIFT; 185 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
188 186
diff --git a/fs/splice.c b/fs/splice.c
index 7c2bbf18d7a7..bfa42a277bb8 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -22,7 +22,10 @@
22#include <linux/pipe_fs_i.h> 22#include <linux/pipe_fs_i.h>
23#include <linux/mm_inline.h> 23#include <linux/mm_inline.h>
24#include <linux/swap.h> 24#include <linux/swap.h>
25#include <linux/writeback.h>
26#include <linux/buffer_head.h>
25#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/syscalls.h>
26 29
27/* 30/*
28 * Passed to the actors 31 * Passed to the actors
@@ -34,28 +37,37 @@ struct splice_desc {
34 loff_t pos; /* file position */ 37 loff_t pos; /* file position */
35}; 38};
36 39
40/*
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
45 */
37static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, 46static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
38 struct pipe_buffer *buf) 47 struct pipe_buffer *buf)
39{ 48{
40 struct page *page = buf->page; 49 struct page *page = buf->page;
50 struct address_space *mapping = page_mapping(page);
41 51
42 WARN_ON(!PageLocked(page)); 52 WARN_ON(!PageLocked(page));
43 WARN_ON(!PageUptodate(page)); 53 WARN_ON(!PageUptodate(page));
44 54
45 if (!remove_mapping(page_mapping(page), page)) 55 /*
46 return 1; 56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
61 */
62 wait_on_page_writeback(page);
47 63
48 if (PageLRU(page)) { 64 if (PagePrivate(page))
49 struct zone *zone = page_zone(page); 65 try_to_release_page(page, mapping_gfp_mask(mapping));
50 66
51 spin_lock_irq(&zone->lru_lock); 67 if (!remove_mapping(mapping, page))
52 BUG_ON(!PageLRU(page)); 68 return 1;
53 __ClearPageLRU(page);
54 del_page_from_lru(zone, page);
55 spin_unlock_irq(&zone->lru_lock);
56 }
57 69
58 buf->stolen = 1; 70 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
59 return 0; 71 return 0;
60} 72}
61 73
@@ -64,7 +76,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
64{ 76{
65 page_cache_release(buf->page); 77 page_cache_release(buf->page);
66 buf->page = NULL; 78 buf->page = NULL;
67 buf->stolen = 0; 79 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
68} 80}
69 81
70static void *page_cache_pipe_buf_map(struct file *file, 82static void *page_cache_pipe_buf_map(struct file *file,
@@ -91,8 +103,7 @@ static void *page_cache_pipe_buf_map(struct file *file,
91static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info, 103static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
92 struct pipe_buffer *buf) 104 struct pipe_buffer *buf)
93{ 105{
94 if (!buf->stolen) 106 unlock_page(buf->page);
95 unlock_page(buf->page);
96 kunmap(buf->page); 107 kunmap(buf->page);
97} 108}
98 109
@@ -104,9 +115,13 @@ static struct pipe_buf_operations page_cache_pipe_buf_ops = {
104 .steal = page_cache_pipe_buf_steal, 115 .steal = page_cache_pipe_buf_steal,
105}; 116};
106 117
118/*
119 * Pipe output worker. This sets up our pipe format with the page cache
120 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
121 */
107static ssize_t move_to_pipe(struct inode *inode, struct page **pages, 122static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
108 int nr_pages, unsigned long offset, 123 int nr_pages, unsigned long offset,
109 unsigned long len) 124 unsigned long len, unsigned int flags)
110{ 125{
111 struct pipe_inode_info *info; 126 struct pipe_inode_info *info;
112 int ret, do_wakeup, i; 127 int ret, do_wakeup, i;
@@ -159,6 +174,12 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
159 break; 174 break;
160 } 175 }
161 176
177 if (flags & SPLICE_F_NONBLOCK) {
178 if (!ret)
179 ret = -EAGAIN;
180 break;
181 }
182
162 if (signal_pending(current)) { 183 if (signal_pending(current)) {
163 if (!ret) 184 if (!ret)
164 ret = -ERESTARTSYS; 185 ret = -ERESTARTSYS;
@@ -191,7 +212,7 @@ static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
191} 212}
192 213
193static int __generic_file_splice_read(struct file *in, struct inode *pipe, 214static int __generic_file_splice_read(struct file *in, struct inode *pipe,
194 size_t len) 215 size_t len, unsigned int flags)
195{ 216{
196 struct address_space *mapping = in->f_mapping; 217 struct address_space *mapping = in->f_mapping;
197 unsigned int offset, nr_pages; 218 unsigned int offset, nr_pages;
@@ -231,9 +252,9 @@ static int __generic_file_splice_read(struct file *in, struct inode *pipe,
231 * fill shadow[] with pages at the right locations, so we only 252 * fill shadow[] with pages at the right locations, so we only
232 * have to fill holes 253 * have to fill holes
233 */ 254 */
234 memset(shadow, 0, i * sizeof(struct page *)); 255 memset(shadow, 0, nr_pages * sizeof(struct page *));
235 for (j = 0, pidx = index; j < i; pidx++, j++) 256 for (j = 0; j < i; j++)
236 shadow[pages[j]->index - pidx] = pages[j]; 257 shadow[pages[j]->index - index] = pages[j];
237 258
238 /* 259 /*
239 * now fill in the holes 260 * now fill in the holes
@@ -279,9 +300,19 @@ static int __generic_file_splice_read(struct file *in, struct inode *pipe,
279 * Now we splice them into the pipe.. 300 * Now we splice them into the pipe..
280 */ 301 */
281splice_them: 302splice_them:
282 return move_to_pipe(pipe, pages, i, offset, len); 303 return move_to_pipe(pipe, pages, i, offset, len, flags);
283} 304}
284 305
306/**
307 * generic_file_splice_read - splice data from file to a pipe
308 * @in: file to splice from
309 * @pipe: pipe to splice to
310 * @len: number of bytes to splice
311 * @flags: splice modifier flags
312 *
313 * Will read pages from given file and fill them into a pipe.
314 *
315 */
285ssize_t generic_file_splice_read(struct file *in, struct inode *pipe, 316ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
286 size_t len, unsigned int flags) 317 size_t len, unsigned int flags)
287{ 318{
@@ -291,7 +322,7 @@ ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
291 ret = 0; 322 ret = 0;
292 spliced = 0; 323 spliced = 0;
293 while (len) { 324 while (len) {
294 ret = __generic_file_splice_read(in, pipe, len); 325 ret = __generic_file_splice_read(in, pipe, len, flags);
295 326
296 if (ret <= 0) 327 if (ret <= 0)
297 break; 328 break;
@@ -299,6 +330,11 @@ ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
299 in->f_pos += ret; 330 in->f_pos += ret;
300 len -= ret; 331 len -= ret;
301 spliced += ret; 332 spliced += ret;
333
334 if (!(flags & SPLICE_F_NONBLOCK))
335 continue;
336 ret = -EAGAIN;
337 break;
302 } 338 }
303 339
304 if (spliced) 340 if (spliced)
@@ -307,8 +343,11 @@ ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
307 return ret; 343 return ret;
308} 344}
309 345
346EXPORT_SYMBOL(generic_file_splice_read);
347
310/* 348/*
311 * Send 'len' bytes to socket from 'file' at position 'pos' using sendpage(). 349 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
350 * using sendpage().
312 */ 351 */
313static int pipe_to_sendpage(struct pipe_inode_info *info, 352static int pipe_to_sendpage(struct pipe_inode_info *info,
314 struct pipe_buffer *buf, struct splice_desc *sd) 353 struct pipe_buffer *buf, struct splice_desc *sd)
@@ -318,6 +357,7 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
318 unsigned int offset; 357 unsigned int offset;
319 ssize_t ret; 358 ssize_t ret;
320 void *ptr; 359 void *ptr;
360 int more;
321 361
322 /* 362 /*
323 * sub-optimal, but we are limited by the pipe ->map. we don't 363 * sub-optimal, but we are limited by the pipe ->map. we don't
@@ -330,9 +370,9 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
330 return PTR_ERR(ptr); 370 return PTR_ERR(ptr);
331 371
332 offset = pos & ~PAGE_CACHE_MASK; 372 offset = pos & ~PAGE_CACHE_MASK;
373 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
333 374
334 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos, 375 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
335 sd->len < sd->total_len);
336 376
337 buf->ops->unmap(info, buf); 377 buf->ops->unmap(info, buf);
338 if (ret == sd->len) 378 if (ret == sd->len)
@@ -354,16 +394,19 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
354 * - Destination page does not exist, we can add the pipe page to 394 * - Destination page does not exist, we can add the pipe page to
355 * the page cache and avoid the copy. 395 * the page cache and avoid the copy.
356 * 396 *
357 * For now we just do the slower thing and always copy pages over, it's 397 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
358 * easier than migrating pages from the pipe to the target file. For the 398 * sd->flags), we attempt to migrate pages from the pipe to the output
359 * case of doing file | file splicing, the migrate approach had some LRU 399 * file address space page cache. This is possible if no one else has
360 * nastiness... 400 * the pipe page referenced outside of the pipe and page cache. If
401 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
402 * a new page in the output file page cache and fill/dirty that.
361 */ 403 */
362static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, 404static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
363 struct splice_desc *sd) 405 struct splice_desc *sd)
364{ 406{
365 struct file *file = sd->file; 407 struct file *file = sd->file;
366 struct address_space *mapping = file->f_mapping; 408 struct address_space *mapping = file->f_mapping;
409 gfp_t gfp_mask = mapping_gfp_mask(mapping);
367 unsigned int offset; 410 unsigned int offset;
368 struct page *page; 411 struct page *page;
369 pgoff_t index; 412 pgoff_t index;
@@ -384,18 +427,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
384 * reuse buf page, if SPLICE_F_MOVE is set 427 * reuse buf page, if SPLICE_F_MOVE is set
385 */ 428 */
386 if (sd->flags & SPLICE_F_MOVE) { 429 if (sd->flags & SPLICE_F_MOVE) {
430 /*
431 * If steal succeeds, buf->page is now pruned from the vm
432 * side (LRU and page cache) and we can reuse it.
433 */
387 if (buf->ops->steal(info, buf)) 434 if (buf->ops->steal(info, buf))
388 goto find_page; 435 goto find_page;
389 436
390 page = buf->page; 437 page = buf->page;
391 if (add_to_page_cache_lru(page, mapping, index, 438 if (add_to_page_cache(page, mapping, index, gfp_mask))
392 mapping_gfp_mask(mapping)))
393 goto find_page; 439 goto find_page;
440
441 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
442 lru_cache_add(page);
394 } else { 443 } else {
395find_page: 444find_page:
396 ret = -ENOMEM; 445 ret = -ENOMEM;
397 page = find_or_create_page(mapping, index, 446 page = find_or_create_page(mapping, index, gfp_mask);
398 mapping_gfp_mask(mapping));
399 if (!page) 447 if (!page)
400 goto out; 448 goto out;
401 449
@@ -432,10 +480,13 @@ find_page:
432 } 480 }
433 481
434 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len); 482 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
435 if (ret) 483 if (ret == AOP_TRUNCATED_PAGE) {
484 page_cache_release(page);
485 goto find_page;
486 } else if (ret)
436 goto out; 487 goto out;
437 488
438 if (!buf->stolen) { 489 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
439 char *dst = kmap_atomic(page, KM_USER0); 490 char *dst = kmap_atomic(page, KM_USER0);
440 491
441 memcpy(dst + offset, src + buf->offset, sd->len); 492 memcpy(dst + offset, src + buf->offset, sd->len);
@@ -444,16 +495,18 @@ find_page:
444 } 495 }
445 496
446 ret = mapping->a_ops->commit_write(file, page, 0, sd->len); 497 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
447 if (ret < 0) 498 if (ret == AOP_TRUNCATED_PAGE) {
499 page_cache_release(page);
500 goto find_page;
501 } else if (ret)
448 goto out; 502 goto out;
449 503
450 set_page_dirty(page); 504 balance_dirty_pages_ratelimited(mapping);
451 ret = write_one_page(page, 0);
452out: 505out:
453 if (ret < 0) 506 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
454 unlock_page(page);
455 if (!buf->stolen)
456 page_cache_release(page); 507 page_cache_release(page);
508 unlock_page(page);
509 }
457 buf->ops->unmap(info, buf); 510 buf->ops->unmap(info, buf);
458 return ret; 511 return ret;
459} 512}
@@ -461,6 +514,11 @@ out:
461typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, 514typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
462 struct splice_desc *); 515 struct splice_desc *);
463 516
517/*
518 * Pipe input worker. Most of this logic works like a regular pipe, the
519 * key here is the 'actor' worker passed in that actually moves the data
520 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
521 */
464static ssize_t move_from_pipe(struct inode *inode, struct file *out, 522static ssize_t move_from_pipe(struct inode *inode, struct file *out,
465 size_t len, unsigned int flags, 523 size_t len, unsigned int flags,
466 splice_actor *actor) 524 splice_actor *actor)
@@ -527,6 +585,12 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
527 break; 585 break;
528 } 586 }
529 587
588 if (flags & SPLICE_F_NONBLOCK) {
589 if (!ret)
590 ret = -EAGAIN;
591 break;
592 }
593
530 if (signal_pending(current)) { 594 if (signal_pending(current)) {
531 if (!ret) 595 if (!ret)
532 ret = -ERESTARTSYS; 596 ret = -ERESTARTSYS;
@@ -556,21 +620,67 @@ static ssize_t move_from_pipe(struct inode *inode, struct file *out,
556 620
557} 621}
558 622
623/**
624 * generic_file_splice_write - splice data from a pipe to a file
625 * @inode: pipe inode
626 * @out: file to write to
627 * @len: number of bytes to splice
628 * @flags: splice modifier flags
629 *
630 * Will either move or copy pages (determined by @flags options) from
631 * the given pipe inode to the given file.
632 *
633 */
559ssize_t generic_file_splice_write(struct inode *inode, struct file *out, 634ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
560 size_t len, unsigned int flags) 635 size_t len, unsigned int flags)
561{ 636{
562 return move_from_pipe(inode, out, len, flags, pipe_to_file); 637 struct address_space *mapping = out->f_mapping;
638 ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
639
640 /*
641 * if file or inode is SYNC and we actually wrote some data, sync it
642 */
643 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
644 && ret > 0) {
645 struct inode *inode = mapping->host;
646 int err;
647
648 mutex_lock(&inode->i_mutex);
649 err = generic_osync_inode(mapping->host, mapping,
650 OSYNC_METADATA|OSYNC_DATA);
651 mutex_unlock(&inode->i_mutex);
652
653 if (err)
654 ret = err;
655 }
656
657 return ret;
563} 658}
564 659
660EXPORT_SYMBOL(generic_file_splice_write);
661
662/**
663 * generic_splice_sendpage - splice data from a pipe to a socket
664 * @inode: pipe inode
665 * @out: socket to write to
666 * @len: number of bytes to splice
667 * @flags: splice modifier flags
668 *
669 * Will send @len bytes from the pipe to a network socket. No data copying
670 * is involved.
671 *
672 */
565ssize_t generic_splice_sendpage(struct inode *inode, struct file *out, 673ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
566 size_t len, unsigned int flags) 674 size_t len, unsigned int flags)
567{ 675{
568 return move_from_pipe(inode, out, len, flags, pipe_to_sendpage); 676 return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
569} 677}
570 678
571EXPORT_SYMBOL(generic_file_splice_write); 679EXPORT_SYMBOL(generic_splice_sendpage);
572EXPORT_SYMBOL(generic_file_splice_read);
573 680
681/*
682 * Attempt to initiate a splice from pipe to file.
683 */
574static long do_splice_from(struct inode *pipe, struct file *out, size_t len, 684static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
575 unsigned int flags) 685 unsigned int flags)
576{ 686{
@@ -591,6 +701,9 @@ static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
591 return out->f_op->splice_write(pipe, out, len, flags); 701 return out->f_op->splice_write(pipe, out, len, flags);
592} 702}
593 703
704/*
705 * Attempt to initiate a splice from a file to a pipe.
706 */
594static long do_splice_to(struct file *in, struct inode *pipe, size_t len, 707static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
595 unsigned int flags) 708 unsigned int flags)
596{ 709{
@@ -619,6 +732,9 @@ static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
619 return in->f_op->splice_read(in, pipe, len, flags); 732 return in->f_op->splice_read(in, pipe, len, flags);
620} 733}
621 734
735/*
736 * Determine where to splice to/from.
737 */
622static long do_splice(struct file *in, struct file *out, size_t len, 738static long do_splice(struct file *in, struct file *out, size_t len,
623 unsigned int flags) 739 unsigned int flags)
624{ 740{
diff --git a/fs/sync.c b/fs/sync.c
new file mode 100644
index 000000000000..8616006d2094
--- /dev/null
+++ b/fs/sync.c
@@ -0,0 +1,164 @@
1/*
2 * High-level sync()-related operations
3 */
4
5#include <linux/kernel.h>
6#include <linux/file.h>
7#include <linux/fs.h>
8#include <linux/module.h>
9#include <linux/writeback.h>
10#include <linux/syscalls.h>
11#include <linux/linkage.h>
12#include <linux/pagemap.h>
13
14#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
15 SYNC_FILE_RANGE_WAIT_AFTER)
16
17/*
18 * sys_sync_file_range() permits finely controlled syncing over a segment of
19 * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
20 * zero then sys_sync_file_range() will operate from offset out to EOF.
21 *
22 * The flag bits are:
23 *
24 * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
25 * before performing the write.
26 *
27 * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
28 * range which are not presently under writeback.
29 *
30 * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
31 * after performing the write.
32 *
33 * Useful combinations of the flag bits are:
34 *
35 * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
36 * in the range which were dirty on entry to sys_sync_file_range() are placed
37 * under writeout. This is a start-write-for-data-integrity operation.
38 *
39 * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
40 * are not presently under writeout. This is an asynchronous flush-to-disk
41 * operation. Not suitable for data integrity operations.
42 *
43 * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
44 * completion of writeout of all pages in the range. This will be used after an
45 * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
46 * for that operation to complete and to return the result.
47 *
48 * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
49 * a traditional sync() operation. This is a write-for-data-integrity operation
50 * which will ensure that all pages in the range which were dirty on entry to
51 * sys_sync_file_range() are committed to disk.
52 *
53 *
54 * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
55 * I/O errors or ENOSPC conditions and will return those to the caller, after
56 * clearing the EIO and ENOSPC flags in the address_space.
57 *
58 * It should be noted that none of these operations write out the file's
59 * metadata. So unless the application is strictly performing overwrites of
60 * already-instantiated disk blocks, there are no guarantees here that the data
61 * will be available after a crash.
62 */
63asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
64 int flags)
65{
66 int ret;
67 struct file *file;
68 loff_t endbyte; /* inclusive */
69 int fput_needed;
70 umode_t i_mode;
71
72 ret = -EINVAL;
73 if (flags & ~VALID_FLAGS)
74 goto out;
75
76 endbyte = offset + nbytes;
77
78 if ((s64)offset < 0)
79 goto out;
80 if ((s64)endbyte < 0)
81 goto out;
82 if (endbyte < offset)
83 goto out;
84
85 if (sizeof(pgoff_t) == 4) {
86 if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
87 /*
88 * The range starts outside a 32 bit machine's
89 * pagecache addressing capabilities. Let it "succeed"
90 */
91 ret = 0;
92 goto out;
93 }
94 if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
95 /*
96 * Out to EOF
97 */
98 nbytes = 0;
99 }
100 }
101
102 if (nbytes == 0)
103 endbyte = -1;
104 else
105 endbyte--; /* inclusive */
106
107 ret = -EBADF;
108 file = fget_light(fd, &fput_needed);
109 if (!file)
110 goto out;
111
112 i_mode = file->f_dentry->d_inode->i_mode;
113 ret = -ESPIPE;
114 if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
115 !S_ISLNK(i_mode))
116 goto out_put;
117
118 ret = do_sync_file_range(file, offset, endbyte, flags);
119out_put:
120 fput_light(file, fput_needed);
121out:
122 return ret;
123}
124
125/*
126 * `endbyte' is inclusive
127 */
128int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte,
129 int flags)
130{
131 int ret;
132 struct address_space *mapping;
133
134 mapping = file->f_mapping;
135 if (!mapping) {
136 ret = -EINVAL;
137 goto out;
138 }
139
140 ret = 0;
141 if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
142 ret = wait_on_page_writeback_range(mapping,
143 offset >> PAGE_CACHE_SHIFT,
144 endbyte >> PAGE_CACHE_SHIFT);
145 if (ret < 0)
146 goto out;
147 }
148
149 if (flags & SYNC_FILE_RANGE_WRITE) {
150 ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
151 WB_SYNC_NONE);
152 if (ret < 0)
153 goto out;
154 }
155
156 if (flags & SYNC_FILE_RANGE_WAIT_AFTER) {
157 ret = wait_on_page_writeback_range(mapping,
158 offset >> PAGE_CACHE_SHIFT,
159 endbyte >> PAGE_CACHE_SHIFT);
160 }
161out:
162 return ret;
163}
164EXPORT_SYMBOL_GPL(do_sync_file_range);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index f26880a4785e..6cfdc9a87772 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -50,7 +50,7 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
50 return sd; 50 return sd;
51} 51}
52 52
53/** 53/*
54 * 54 *
55 * Return -EEXIST if there is already a sysfs element with the same name for 55 * Return -EEXIST if there is already a sysfs element with the same name for
56 * the same parent. 56 * the same parent.
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 830f76fa098c..f1cb1ddde511 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -183,7 +183,7 @@ fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 if (count >= PAGE_SIZE) 185 if (count >= PAGE_SIZE)
186 count = PAGE_SIZE; 186 count = PAGE_SIZE - 1;
187 error = copy_from_user(buffer->page,buf,count); 187 error = copy_from_user(buffer->page,buf,count);
188 buffer->needs_read_fill = 1; 188 buffer->needs_read_fill = 1;
189 return error ? -EFAULT : count; 189 return error ? -EFAULT : count;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 4c29ac41ac3e..f0b347bd12ca 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -175,8 +175,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
175 struct bin_attribute * bin_attr; 175 struct bin_attribute * bin_attr;
176 struct sysfs_symlink * sl; 176 struct sysfs_symlink * sl;
177 177
178 if (!sd || !sd->s_element) 178 BUG_ON(!sd || !sd->s_element);
179 BUG();
180 179
181 switch (sd->s_type) { 180 switch (sd->s_type) {
182 case SYSFS_DIR: 181 case SYSFS_DIR:
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 8c66e9270dd6..d7074341ee87 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -253,8 +253,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
253 253
254 lock_page(page); 254 lock_page(page);
255 err = mapping->a_ops->prepare_write(NULL, page, from, to); 255 err = mapping->a_ops->prepare_write(NULL, page, from, to);
256 if (err) 256 BUG_ON(err);
257 BUG();
258 de->inode = 0; 257 de->inode = 0;
259 err = dir_commit_chunk(page, from, to); 258 err = dir_commit_chunk(page, from, to);
260 dir_put_page(page); 259 dir_put_page(page);
@@ -353,8 +352,7 @@ void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
353 352
354 lock_page(page); 353 lock_page(page);
355 err = page->mapping->a_ops->prepare_write(NULL, page, from, to); 354 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
356 if (err) 355 BUG_ON(err);
357 BUG();
358 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 356 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
359 err = dir_commit_chunk(page, from, to); 357 err = dir_commit_chunk(page, from, to);
360 dir_put_page(page); 358 dir_put_page(page);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 81e0e8459af1..2983afd5e7fd 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -312,12 +312,10 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
312 err = 0; 312 err = 0;
313 313
314 bh = inode_getblk(inode, block, &err, &phys, &new); 314 bh = inode_getblk(inode, block, &err, &phys, &new);
315 if (bh) 315 BUG_ON(bh);
316 BUG();
317 if (err) 316 if (err)
318 goto abort; 317 goto abort;
319 if (!phys) 318 BUG_ON(!phys);
320 BUG();
321 319
322 if (new) 320 if (new)
323 set_buffer_new(bh_result); 321 set_buffer_new(bh_result);
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index ef46939c0c1a..a56cec3be5f0 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -185,24 +185,6 @@ static int vfat_valid_longname(const unsigned char *name, unsigned int len)
185 return -EINVAL; 185 return -EINVAL;
186 if (len >= 256) 186 if (len >= 256)
187 return -ENAMETOOLONG; 187 return -ENAMETOOLONG;
188
189 /* MS-DOS "device special files" */
190 if (len == 3 || (len > 3 && name[3] == '.')) { /* basename == 3 */
191 if (!strnicmp(name, "aux", 3) ||
192 !strnicmp(name, "con", 3) ||
193 !strnicmp(name, "nul", 3) ||
194 !strnicmp(name, "prn", 3))
195 return -EINVAL;
196 }
197 if (len == 4 || (len > 4 && name[4] == '.')) { /* basename == 4 */
198 /* "com1", "com2", ... */
199 if ('1' <= name[3] && name[3] <= '9') {
200 if (!strnicmp(name, "com", 3) ||
201 !strnicmp(name, "lpt", 3))
202 return -EINVAL;
203 }
204 }
205
206 return 0; 188 return 0;
207} 189}
208 190
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 85997b1205f5..ae4c4754ed31 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -69,7 +69,6 @@ __xfs_file_read(
69 return rval; 69 return rval;
70} 70}
71 71
72
73STATIC ssize_t 72STATIC ssize_t
74xfs_file_aio_read( 73xfs_file_aio_read(
75 struct kiocb *iocb, 74 struct kiocb *iocb,
@@ -90,7 +89,6 @@ xfs_file_aio_read_invis(
90 return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); 89 return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
91} 90}
92 91
93
94STATIC inline ssize_t 92STATIC inline ssize_t
95__xfs_file_write( 93__xfs_file_write(
96 struct kiocb *iocb, 94 struct kiocb *iocb,
@@ -113,7 +111,6 @@ __xfs_file_write(
113 return rval; 111 return rval;
114} 112}
115 113
116
117STATIC ssize_t 114STATIC ssize_t
118xfs_file_aio_write( 115xfs_file_aio_write(
119 struct kiocb *iocb, 116 struct kiocb *iocb,
@@ -134,7 +131,6 @@ xfs_file_aio_write_invis(
134 return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); 131 return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
135} 132}
136 133
137
138STATIC inline ssize_t 134STATIC inline ssize_t
139__xfs_file_readv( 135__xfs_file_readv(
140 struct file *file, 136 struct file *file,
@@ -179,7 +175,6 @@ xfs_file_readv_invis(
179 return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos); 175 return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos);
180} 176}
181 177
182
183STATIC inline ssize_t 178STATIC inline ssize_t
184__xfs_file_writev( 179__xfs_file_writev(
185 struct file *file, 180 struct file *file,
@@ -204,7 +199,6 @@ __xfs_file_writev(
204 return rval; 199 return rval;
205} 200}
206 201
207
208STATIC ssize_t 202STATIC ssize_t
209xfs_file_writev( 203xfs_file_writev(
210 struct file *file, 204 struct file *file,
@@ -228,7 +222,7 @@ xfs_file_writev_invis(
228STATIC ssize_t 222STATIC ssize_t
229xfs_file_sendfile( 223xfs_file_sendfile(
230 struct file *filp, 224 struct file *filp,
231 loff_t *ppos, 225 loff_t *pos,
232 size_t count, 226 size_t count,
233 read_actor_t actor, 227 read_actor_t actor,
234 void *target) 228 void *target)
@@ -236,10 +230,80 @@ xfs_file_sendfile(
236 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode); 230 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
237 ssize_t rval; 231 ssize_t rval;
238 232
239 VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval); 233 VOP_SENDFILE(vp, filp, pos, 0, count, actor, target, NULL, rval);
240 return rval; 234 return rval;
241} 235}
242 236
237STATIC ssize_t
238xfs_file_sendfile_invis(
239 struct file *filp,
240 loff_t *pos,
241 size_t count,
242 read_actor_t actor,
243 void *target)
244{
245 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
246 ssize_t rval;
247
248 VOP_SENDFILE(vp, filp, pos, IO_INVIS, count, actor, target, NULL, rval);
249 return rval;
250}
251
252STATIC ssize_t
253xfs_file_splice_read(
254 struct file *infilp,
255 struct inode *pipe,
256 size_t len,
257 unsigned int flags)
258{
259 vnode_t *vp = vn_from_inode(infilp->f_dentry->d_inode);
260 ssize_t rval;
261
262 VOP_SPLICE_READ(vp, infilp, pipe, len, flags, 0, NULL, rval);
263 return rval;
264}
265
266STATIC ssize_t
267xfs_file_splice_read_invis(
268 struct file *infilp,
269 struct inode *pipe,
270 size_t len,
271 unsigned int flags)
272{
273 vnode_t *vp = vn_from_inode(infilp->f_dentry->d_inode);
274 ssize_t rval;
275
276 VOP_SPLICE_READ(vp, infilp, pipe, len, flags, IO_INVIS, NULL, rval);
277 return rval;
278}
279
280STATIC ssize_t
281xfs_file_splice_write(
282 struct inode *pipe,
283 struct file *outfilp,
284 size_t len,
285 unsigned int flags)
286{
287 vnode_t *vp = vn_from_inode(outfilp->f_dentry->d_inode);
288 ssize_t rval;
289
290 VOP_SPLICE_WRITE(vp, pipe, outfilp, len, flags, 0, NULL, rval);
291 return rval;
292}
293
294STATIC ssize_t
295xfs_file_splice_write_invis(
296 struct inode *pipe,
297 struct file *outfilp,
298 size_t len,
299 unsigned int flags)
300{
301 vnode_t *vp = vn_from_inode(outfilp->f_dentry->d_inode);
302 ssize_t rval;
303
304 VOP_SPLICE_WRITE(vp, pipe, outfilp, len, flags, IO_INVIS, NULL, rval);
305 return rval;
306}
243 307
244STATIC int 308STATIC int
245xfs_file_open( 309xfs_file_open(
@@ -251,13 +315,10 @@ xfs_file_open(
251 315
252 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 316 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
253 return -EFBIG; 317 return -EFBIG;
254
255 ASSERT(vp);
256 VOP_OPEN(vp, NULL, error); 318 VOP_OPEN(vp, NULL, error);
257 return -error; 319 return -error;
258} 320}
259 321
260
261STATIC int 322STATIC int
262xfs_file_release( 323xfs_file_release(
263 struct inode *inode, 324 struct inode *inode,
@@ -271,7 +332,6 @@ xfs_file_release(
271 return -error; 332 return -error;
272} 333}
273 334
274
275STATIC int 335STATIC int
276xfs_file_fsync( 336xfs_file_fsync(
277 struct file *filp, 337 struct file *filp,
@@ -285,21 +345,11 @@ xfs_file_fsync(
285 345
286 if (datasync) 346 if (datasync)
287 flags |= FSYNC_DATA; 347 flags |= FSYNC_DATA;
288
289 ASSERT(vp);
290 VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error); 348 VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
291 return -error; 349 return -error;
292} 350}
293 351
294/*
295 * xfs_file_readdir maps to VOP_READDIR().
296 * We need to build a uio, cred, ...
297 */
298
299#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
300
301#ifdef CONFIG_XFS_DMAPI 352#ifdef CONFIG_XFS_DMAPI
302
303STATIC struct page * 353STATIC struct page *
304xfs_vm_nopage( 354xfs_vm_nopage(
305 struct vm_area_struct *area, 355 struct vm_area_struct *area,
@@ -319,10 +369,8 @@ xfs_vm_nopage(
319 369
320 return filemap_nopage(area, address, type); 370 return filemap_nopage(area, address, type);
321} 371}
322
323#endif /* CONFIG_XFS_DMAPI */ 372#endif /* CONFIG_XFS_DMAPI */
324 373
325
326STATIC int 374STATIC int
327xfs_file_readdir( 375xfs_file_readdir(
328 struct file *filp, 376 struct file *filp,
@@ -330,7 +378,7 @@ xfs_file_readdir(
330 filldir_t filldir) 378 filldir_t filldir)
331{ 379{
332 int error = 0; 380 int error = 0;
333 vnode_t *vp; 381 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
334 uio_t uio; 382 uio_t uio;
335 iovec_t iov; 383 iovec_t iov;
336 int eof = 0; 384 int eof = 0;
@@ -340,9 +388,6 @@ xfs_file_readdir(
340 xfs_off_t start_offset, curr_offset; 388 xfs_off_t start_offset, curr_offset;
341 xfs_dirent_t *dbp = NULL; 389 xfs_dirent_t *dbp = NULL;
342 390
343 vp = vn_from_inode(filp->f_dentry->d_inode);
344 ASSERT(vp);
345
346 /* Try fairly hard to get memory */ 391 /* Try fairly hard to get memory */
347 do { 392 do {
348 if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL))) 393 if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
@@ -387,7 +432,7 @@ xfs_file_readdir(
387 } 432 }
388 size -= dbp->d_reclen; 433 size -= dbp->d_reclen;
389 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */; 434 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
390 dbp = nextdp(dbp); 435 dbp = (xfs_dirent_t *)((char *)dbp + dbp->d_reclen);
391 } 436 }
392 } 437 }
393done: 438done:
@@ -402,7 +447,6 @@ done:
402 return -error; 447 return -error;
403} 448}
404 449
405
406STATIC int 450STATIC int
407xfs_file_mmap( 451xfs_file_mmap(
408 struct file *filp, 452 struct file *filp,
@@ -457,11 +501,10 @@ xfs_file_ioctl_invis(
457 unsigned int cmd, 501 unsigned int cmd,
458 unsigned long arg) 502 unsigned long arg)
459{ 503{
460 int error;
461 struct inode *inode = filp->f_dentry->d_inode; 504 struct inode *inode = filp->f_dentry->d_inode;
462 vnode_t *vp = vn_from_inode(inode); 505 vnode_t *vp = vn_from_inode(inode);
506 int error;
463 507
464 ASSERT(vp);
465 VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error); 508 VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
466 VMODIFY(vp); 509 VMODIFY(vp);
467 510
@@ -537,6 +580,8 @@ const struct file_operations xfs_file_operations = {
537 .aio_read = xfs_file_aio_read, 580 .aio_read = xfs_file_aio_read,
538 .aio_write = xfs_file_aio_write, 581 .aio_write = xfs_file_aio_write,
539 .sendfile = xfs_file_sendfile, 582 .sendfile = xfs_file_sendfile,
583 .splice_read = xfs_file_splice_read,
584 .splice_write = xfs_file_splice_write,
540 .unlocked_ioctl = xfs_file_ioctl, 585 .unlocked_ioctl = xfs_file_ioctl,
541#ifdef CONFIG_COMPAT 586#ifdef CONFIG_COMPAT
542 .compat_ioctl = xfs_file_compat_ioctl, 587 .compat_ioctl = xfs_file_compat_ioctl,
@@ -558,7 +603,9 @@ const struct file_operations xfs_invis_file_operations = {
558 .writev = xfs_file_writev_invis, 603 .writev = xfs_file_writev_invis,
559 .aio_read = xfs_file_aio_read_invis, 604 .aio_read = xfs_file_aio_read_invis,
560 .aio_write = xfs_file_aio_write_invis, 605 .aio_write = xfs_file_aio_write_invis,
561 .sendfile = xfs_file_sendfile, 606 .sendfile = xfs_file_sendfile_invis,
607 .splice_read = xfs_file_splice_read_invis,
608 .splice_write = xfs_file_splice_write_invis,
562 .unlocked_ioctl = xfs_file_ioctl_invis, 609 .unlocked_ioctl = xfs_file_ioctl_invis,
563#ifdef CONFIG_COMPAT 610#ifdef CONFIG_COMPAT
564 .compat_ioctl = xfs_file_compat_invis_ioctl, 611 .compat_ioctl = xfs_file_compat_invis_ioctl,
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 1fe09f2d6519..e9fe43d74768 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -103,6 +103,7 @@
103 */ 103 */
104#undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */ 104#undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */
105#define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */ 105#define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */
106#define HAVE_SPLICE /* a splice(2) exists in 2.6, but not in 2.4 */
106#ifdef CONFIG_SMP 107#ifdef CONFIG_SMP
107#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ 108#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
108#else 109#else
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 84ddf1893894..90cd314acbaa 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -301,36 +301,23 @@ xfs_sendfile(
301 void *target, 301 void *target,
302 cred_t *credp) 302 cred_t *credp)
303{ 303{
304 xfs_inode_t *ip = XFS_BHVTOI(bdp);
305 xfs_mount_t *mp = ip->i_mount;
304 ssize_t ret; 306 ssize_t ret;
305 xfs_fsize_t n;
306 xfs_inode_t *ip;
307 xfs_mount_t *mp;
308 vnode_t *vp;
309
310 ip = XFS_BHVTOI(bdp);
311 vp = BHV_TO_VNODE(bdp);
312 mp = ip->i_mount;
313 307
314 XFS_STATS_INC(xs_read_calls); 308 XFS_STATS_INC(xs_read_calls);
315 309 if (XFS_FORCED_SHUTDOWN(mp))
316 n = XFS_MAXIOFFSET(mp) - *offset;
317 if ((n <= 0) || (count == 0))
318 return 0;
319
320 if (n < count)
321 count = n;
322
323 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
324 return -EIO; 310 return -EIO;
325 311
326 xfs_ilock(ip, XFS_IOLOCK_SHARED); 312 xfs_ilock(ip, XFS_IOLOCK_SHARED);
327 313
328 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && 314 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
329 (!(ioflags & IO_INVIS))) { 315 (!(ioflags & IO_INVIS))) {
330 vrwlock_t locktype = VRWLOCK_READ; 316 vrwlock_t locktype = VRWLOCK_READ;
331 int error; 317 int error;
332 318
333 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count, 319 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
320 *offset, count,
334 FILP_DELAY_FLAG(filp), &locktype); 321 FILP_DELAY_FLAG(filp), &locktype);
335 if (error) { 322 if (error) {
336 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 323 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
@@ -340,12 +327,96 @@ xfs_sendfile(
340 xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore, 327 xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
341 (void *)(unsigned long)target, count, *offset, ioflags); 328 (void *)(unsigned long)target, count, *offset, ioflags);
342 ret = generic_file_sendfile(filp, offset, count, actor, target); 329 ret = generic_file_sendfile(filp, offset, count, actor, target);
330 if (ret > 0)
331 XFS_STATS_ADD(xs_read_bytes, ret);
343 332
344 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 333 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
334 return ret;
335}
345 336
337ssize_t
338xfs_splice_read(
339 bhv_desc_t *bdp,
340 struct file *infilp,
341 struct inode *pipe,
342 size_t count,
343 int flags,
344 int ioflags,
345 cred_t *credp)
346{
347 xfs_inode_t *ip = XFS_BHVTOI(bdp);
348 xfs_mount_t *mp = ip->i_mount;
349 ssize_t ret;
350
351 XFS_STATS_INC(xs_read_calls);
352 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
353 return -EIO;
354
355 xfs_ilock(ip, XFS_IOLOCK_SHARED);
356
357 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
358 (!(ioflags & IO_INVIS))) {
359 vrwlock_t locktype = VRWLOCK_READ;
360 int error;
361
362 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
363 infilp->f_pos, count,
364 FILP_DELAY_FLAG(infilp), &locktype);
365 if (error) {
366 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
367 return -error;
368 }
369 }
370 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore,
371 pipe, count, infilp->f_pos, ioflags);
372 ret = generic_file_splice_read(infilp, pipe, count, flags);
346 if (ret > 0) 373 if (ret > 0)
347 XFS_STATS_ADD(xs_read_bytes, ret); 374 XFS_STATS_ADD(xs_read_bytes, ret);
348 375
376 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
377 return ret;
378}
379
380ssize_t
381xfs_splice_write(
382 bhv_desc_t *bdp,
383 struct inode *pipe,
384 struct file *outfilp,
385 size_t count,
386 int flags,
387 int ioflags,
388 cred_t *credp)
389{
390 xfs_inode_t *ip = XFS_BHVTOI(bdp);
391 xfs_mount_t *mp = ip->i_mount;
392 ssize_t ret;
393
394 XFS_STATS_INC(xs_write_calls);
395 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
396 return -EIO;
397
398 xfs_ilock(ip, XFS_IOLOCK_EXCL);
399
400 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) &&
401 (!(ioflags & IO_INVIS))) {
402 vrwlock_t locktype = VRWLOCK_WRITE;
403 int error;
404
405 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp),
406 outfilp->f_pos, count,
407 FILP_DELAY_FLAG(outfilp), &locktype);
408 if (error) {
409 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
410 return -error;
411 }
412 }
413 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore,
414 pipe, count, outfilp->f_pos, ioflags);
415 ret = generic_file_splice_write(pipe, outfilp, count, flags);
416 if (ret > 0)
417 XFS_STATS_ADD(xs_write_bytes, ret);
418
419 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
349 return ret; 420 return ret;
350} 421}
351 422
@@ -363,7 +434,7 @@ xfs_zero_last_block(
363 xfs_fsize_t end_size) 434 xfs_fsize_t end_size)
364{ 435{
365 xfs_fileoff_t last_fsb; 436 xfs_fileoff_t last_fsb;
366 xfs_mount_t *mp; 437 xfs_mount_t *mp = io->io_mount;
367 int nimaps; 438 int nimaps;
368 int zero_offset; 439 int zero_offset;
369 int zero_len; 440 int zero_len;
@@ -373,8 +444,6 @@ xfs_zero_last_block(
373 444
374 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); 445 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
375 446
376 mp = io->io_mount;
377
378 zero_offset = XFS_B_FSB_OFFSET(mp, isize); 447 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
379 if (zero_offset == 0) { 448 if (zero_offset == 0) {
380 /* 449 /*
@@ -405,10 +474,9 @@ xfs_zero_last_block(
405 * don't deadlock when the buffer cache calls back to us. 474 * don't deadlock when the buffer cache calls back to us.
406 */ 475 */
407 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); 476 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
408 loff = XFS_FSB_TO_B(mp, last_fsb);
409 477
478 loff = XFS_FSB_TO_B(mp, last_fsb);
410 zero_len = mp->m_sb.sb_blocksize - zero_offset; 479 zero_len = mp->m_sb.sb_blocksize - zero_offset;
411
412 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size); 480 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
413 481
414 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 482 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
@@ -441,7 +509,7 @@ xfs_zero_eof(
441 xfs_fileoff_t zero_count_fsb; 509 xfs_fileoff_t zero_count_fsb;
442 xfs_fileoff_t last_fsb; 510 xfs_fileoff_t last_fsb;
443 xfs_extlen_t buf_len_fsb; 511 xfs_extlen_t buf_len_fsb;
444 xfs_mount_t *mp; 512 xfs_mount_t *mp = io->io_mount;
445 int nimaps; 513 int nimaps;
446 int error = 0; 514 int error = 0;
447 xfs_bmbt_irec_t imap; 515 xfs_bmbt_irec_t imap;
@@ -450,8 +518,6 @@ xfs_zero_eof(
450 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 518 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
451 ASSERT(offset > isize); 519 ASSERT(offset > isize);
452 520
453 mp = io->io_mount;
454
455 /* 521 /*
456 * First handle zeroing the block on which isize resides. 522 * First handle zeroing the block on which isize resides.
457 * We only zero a part of that block so it is handled specially. 523 * We only zero a part of that block so it is handled specially.
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index 38864a88d42d..eaa5659713fb 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -60,6 +60,8 @@ struct xfs_iomap;
60#define XFS_IOMAP_ALLOC_ENTER 25 60#define XFS_IOMAP_ALLOC_ENTER 25
61#define XFS_IOMAP_ALLOC_MAP 26 61#define XFS_IOMAP_ALLOC_MAP 26
62#define XFS_IOMAP_UNWRITTEN 27 62#define XFS_IOMAP_UNWRITTEN 27
63#define XFS_SPLICE_READ_ENTER 28
64#define XFS_SPLICE_WRITE_ENTER 29
63extern void xfs_rw_enter_trace(int, struct xfs_iocore *, 65extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
64 void *, size_t, loff_t, int); 66 void *, size_t, loff_t, int);
65extern void xfs_inval_cached_trace(struct xfs_iocore *, 67extern void xfs_inval_cached_trace(struct xfs_iocore *,
@@ -78,6 +80,7 @@ extern int xfs_bmap(struct bhv_desc *, xfs_off_t, ssize_t, int,
78 struct xfs_iomap *, int *); 80 struct xfs_iomap *, int *);
79extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *); 81extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
80extern int xfs_bdstrat_cb(struct xfs_buf *); 82extern int xfs_bdstrat_cb(struct xfs_buf *);
83extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
81 84
82extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t, 85extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
83 xfs_fsize_t, xfs_fsize_t); 86 xfs_fsize_t, xfs_fsize_t);
@@ -90,7 +93,11 @@ extern ssize_t xfs_write(struct bhv_desc *, struct kiocb *,
90extern ssize_t xfs_sendfile(struct bhv_desc *, struct file *, 93extern ssize_t xfs_sendfile(struct bhv_desc *, struct file *,
91 loff_t *, int, size_t, read_actor_t, 94 loff_t *, int, size_t, read_actor_t,
92 void *, struct cred *); 95 void *, struct cred *);
93 96extern ssize_t xfs_splice_read(struct bhv_desc *, struct file *,
94extern int xfs_dev_is_read_only(struct xfs_mount *, char *); 97 struct inode *, size_t, int, int,
98 struct cred *);
99extern ssize_t xfs_splice_write(struct bhv_desc *, struct inode *,
100 struct file *, size_t, int, int,
101 struct cred *);
95 102
96#endif /* __XFS_LRW_H__ */ 103#endif /* __XFS_LRW_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1884300417e3..68f4793e8a11 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -67,7 +67,8 @@ mempool_t *xfs_ioend_pool;
67 67
68STATIC struct xfs_mount_args * 68STATIC struct xfs_mount_args *
69xfs_args_allocate( 69xfs_args_allocate(
70 struct super_block *sb) 70 struct super_block *sb,
71 int silent)
71{ 72{
72 struct xfs_mount_args *args; 73 struct xfs_mount_args *args;
73 74
@@ -80,8 +81,8 @@ xfs_args_allocate(
80 args->flags |= XFSMNT_DIRSYNC; 81 args->flags |= XFSMNT_DIRSYNC;
81 if (sb->s_flags & MS_SYNCHRONOUS) 82 if (sb->s_flags & MS_SYNCHRONOUS)
82 args->flags |= XFSMNT_WSYNC; 83 args->flags |= XFSMNT_WSYNC;
83 84 if (silent)
84 /* Default to 32 bit inodes on Linux all the time */ 85 args->flags |= XFSMNT_QUIET;
85 args->flags |= XFSMNT_32BITINODES; 86 args->flags |= XFSMNT_32BITINODES;
86 87
87 return args; 88 return args;
@@ -719,7 +720,7 @@ xfs_fs_remount(
719 char *options) 720 char *options)
720{ 721{
721 vfs_t *vfsp = vfs_from_sb(sb); 722 vfs_t *vfsp = vfs_from_sb(sb);
722 struct xfs_mount_args *args = xfs_args_allocate(sb); 723 struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
723 int error; 724 int error;
724 725
725 VFS_PARSEARGS(vfsp, options, args, 1, error); 726 VFS_PARSEARGS(vfsp, options, args, 1, error);
@@ -825,7 +826,7 @@ xfs_fs_fill_super(
825{ 826{
826 vnode_t *rootvp; 827 vnode_t *rootvp;
827 struct vfs *vfsp = vfs_allocate(sb); 828 struct vfs *vfsp = vfs_allocate(sb);
828 struct xfs_mount_args *args = xfs_args_allocate(sb); 829 struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
829 struct kstatfs statvfs; 830 struct kstatfs statvfs;
830 int error, error2; 831 int error, error2;
831 832
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 06f5845e9568..6f1c79a28f8b 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -173,6 +173,12 @@ typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct kiocb *,
173typedef ssize_t (*vop_sendfile_t)(bhv_desc_t *, struct file *, 173typedef ssize_t (*vop_sendfile_t)(bhv_desc_t *, struct file *,
174 loff_t *, int, size_t, read_actor_t, 174 loff_t *, int, size_t, read_actor_t,
175 void *, struct cred *); 175 void *, struct cred *);
176typedef ssize_t (*vop_splice_read_t)(bhv_desc_t *, struct file *,
177 struct inode *, size_t, int, int,
178 struct cred *);
179typedef ssize_t (*vop_splice_write_t)(bhv_desc_t *, struct inode *,
180 struct file *, size_t, int, int,
181 struct cred *);
176typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *, 182typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *,
177 int, unsigned int, void __user *); 183 int, unsigned int, void __user *);
178typedef int (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int, 184typedef int (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int,
@@ -231,6 +237,8 @@ typedef struct vnodeops {
231 vop_read_t vop_read; 237 vop_read_t vop_read;
232 vop_write_t vop_write; 238 vop_write_t vop_write;
233 vop_sendfile_t vop_sendfile; 239 vop_sendfile_t vop_sendfile;
240 vop_splice_read_t vop_splice_read;
241 vop_splice_write_t vop_splice_write;
234 vop_ioctl_t vop_ioctl; 242 vop_ioctl_t vop_ioctl;
235 vop_getattr_t vop_getattr; 243 vop_getattr_t vop_getattr;
236 vop_setattr_t vop_setattr; 244 vop_setattr_t vop_setattr;
@@ -276,6 +284,10 @@ typedef struct vnodeops {
276 rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr) 284 rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
277#define VOP_SENDFILE(vp,f,off,ioflags,cnt,act,targ,cr,rv) \ 285#define VOP_SENDFILE(vp,f,off,ioflags,cnt,act,targ,cr,rv) \
278 rv = _VOP_(vop_sendfile, vp)((vp)->v_fbhv,f,off,ioflags,cnt,act,targ,cr) 286 rv = _VOP_(vop_sendfile, vp)((vp)->v_fbhv,f,off,ioflags,cnt,act,targ,cr)
287#define VOP_SPLICE_READ(vp,f,pipe,cnt,fl,iofl,cr,rv) \
288 rv = _VOP_(vop_splice_read, vp)((vp)->v_fbhv,f,pipe,cnt,fl,iofl,cr)
289#define VOP_SPLICE_WRITE(vp,f,pipe,cnt,fl,iofl,cr,rv) \
290 rv = _VOP_(vop_splice_write, vp)((vp)->v_fbhv,f,pipe,cnt,fl,iofl,cr)
279#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \ 291#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \
280 rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n) 292 rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n)
281#define VOP_OPEN(vp, cr, rv) \ 293#define VOP_OPEN(vp, cr, rv) \
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 73c1e5e80c07..7fb5eca9bd50 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -2624,7 +2624,7 @@ xfs_qm_vop_chown_reserve(
2624{ 2624{
2625 int error; 2625 int error;
2626 xfs_mount_t *mp; 2626 xfs_mount_t *mp;
2627 uint delblks, blkflags; 2627 uint delblks, blkflags, prjflags = 0;
2628 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; 2628 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
2629 2629
2630 ASSERT(XFS_ISLOCKED_INODE(ip)); 2630 ASSERT(XFS_ISLOCKED_INODE(ip));
@@ -2650,10 +2650,13 @@ xfs_qm_vop_chown_reserve(
2650 } 2650 }
2651 } 2651 }
2652 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { 2652 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
2653 if ((XFS_IS_GQUOTA_ON(ip->i_mount) && 2653 if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
2654 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) || 2654 ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))
2655 (XFS_IS_PQUOTA_ON(ip->i_mount) && 2655 prjflags = XFS_QMOPT_ENOSPC;
2656 ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))) { 2656
2657 if (prjflags ||
2658 (XFS_IS_GQUOTA_ON(ip->i_mount) &&
2659 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
2657 delblksgdq = gdqp; 2660 delblksgdq = gdqp;
2658 if (delblks) { 2661 if (delblks) {
2659 ASSERT(ip->i_gdquot); 2662 ASSERT(ip->i_gdquot);
@@ -2664,7 +2667,7 @@ xfs_qm_vop_chown_reserve(
2664 2667
2665 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, 2668 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2666 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, 2669 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
2667 flags | blkflags))) 2670 flags | blkflags | prjflags)))
2668 return (error); 2671 return (error);
2669 2672
2670 /* 2673 /*
@@ -2681,7 +2684,7 @@ xfs_qm_vop_chown_reserve(
2681 ASSERT(unresudq || unresgdq); 2684 ASSERT(unresudq || unresgdq);
2682 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 2685 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2683 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, 2686 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
2684 flags | blkflags))) 2687 flags | blkflags | prjflags)))
2685 return (error); 2688 return (error);
2686 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 2689 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2687 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, 2690 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index d8e131ec0aa8..9168918db252 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -595,12 +595,19 @@ xfs_trans_unreserve_and_mod_dquots(
595 } 595 }
596} 596}
597 597
598STATIC int
599xfs_quota_error(uint flags)
600{
601 if (flags & XFS_QMOPT_ENOSPC)
602 return ENOSPC;
603 return EDQUOT;
604}
605
598/* 606/*
599 * This reserves disk blocks and inodes against a dquot. 607 * This reserves disk blocks and inodes against a dquot.
600 * Flags indicate if the dquot is to be locked here and also 608 * Flags indicate if the dquot is to be locked here and also
601 * if the blk reservation is for RT or regular blocks. 609 * if the blk reservation is for RT or regular blocks.
602 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. 610 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
603 * Returns EDQUOT if quota is exceeded.
604 */ 611 */
605STATIC int 612STATIC int
606xfs_trans_dqresv( 613xfs_trans_dqresv(
@@ -666,19 +673,15 @@ xfs_trans_dqresv(
666 */ 673 */
667 if (hardlimit > 0ULL && 674 if (hardlimit > 0ULL &&
668 (hardlimit <= nblks + *resbcountp)) { 675 (hardlimit <= nblks + *resbcountp)) {
669 error = EDQUOT; 676 error = xfs_quota_error(flags);
670 goto error_return; 677 goto error_return;
671 } 678 }
672 679
673 if (softlimit > 0ULL && 680 if (softlimit > 0ULL &&
674 (softlimit <= nblks + *resbcountp)) { 681 (softlimit <= nblks + *resbcountp)) {
675 /*
676 * If timer or warnings has expired,
677 * return EDQUOT
678 */
679 if ((timer != 0 && get_seconds() > timer) || 682 if ((timer != 0 && get_seconds() > timer) ||
680 (warns != 0 && warns >= warnlimit)) { 683 (warns != 0 && warns >= warnlimit)) {
681 error = EDQUOT; 684 error = xfs_quota_error(flags);
682 goto error_return; 685 goto error_return;
683 } 686 }
684 } 687 }
@@ -695,16 +698,12 @@ xfs_trans_dqresv(
695 if (!softlimit) 698 if (!softlimit)
696 softlimit = q->qi_isoftlimit; 699 softlimit = q->qi_isoftlimit;
697 if (hardlimit > 0ULL && count >= hardlimit) { 700 if (hardlimit > 0ULL && count >= hardlimit) {
698 error = EDQUOT; 701 error = xfs_quota_error(flags);
699 goto error_return; 702 goto error_return;
700 } else if (softlimit > 0ULL && count >= softlimit) { 703 } else if (softlimit > 0ULL && count >= softlimit) {
701 /*
702 * If timer or warnings has expired,
703 * return EDQUOT
704 */
705 if ((timer != 0 && get_seconds() > timer) || 704 if ((timer != 0 && get_seconds() > timer) ||
706 (warns != 0 && warns >= warnlimit)) { 705 (warns != 0 && warns >= warnlimit)) {
707 error = EDQUOT; 706 error = xfs_quota_error(flags);
708 goto error_return; 707 goto error_return;
709 } 708 }
710 } 709 }
@@ -751,13 +750,14 @@ error_return:
751 750
752 751
753/* 752/*
754 * Given a dquot(s), make disk block and/or inode reservations against them. 753 * Given dquot(s), make disk block and/or inode reservations against them.
755 * The fact that this does the reservation against both the usr and 754 * The fact that this does the reservation against both the usr and
756 * grp quotas is important, because this follows a both-or-nothing 755 * grp/prj quotas is important, because this follows a both-or-nothing
757 * approach. 756 * approach.
758 * 757 *
759 * flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked. 758 * flags = XFS_QMOPT_DQLOCK indicate if dquot(s) need to be locked.
760 * XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. 759 * XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
760 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
761 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks 761 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
762 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks 762 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
763 * dquots are unlocked on return, if they were not locked by caller. 763 * dquots are unlocked on return, if they were not locked by caller.
@@ -772,25 +772,27 @@ xfs_trans_reserve_quota_bydquots(
772 long ninos, 772 long ninos,
773 uint flags) 773 uint flags)
774{ 774{
775 int resvd; 775 int resvd = 0, error;
776 776
777 if (! XFS_IS_QUOTA_ON(mp)) 777 if (!XFS_IS_QUOTA_ON(mp))
778 return (0); 778 return 0;
779 779
780 if (tp && tp->t_dqinfo == NULL) 780 if (tp && tp->t_dqinfo == NULL)
781 xfs_trans_alloc_dqinfo(tp); 781 xfs_trans_alloc_dqinfo(tp);
782 782
783 ASSERT(flags & XFS_QMOPT_RESBLK_MASK); 783 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
784 resvd = 0;
785 784
786 if (udqp) { 785 if (udqp) {
787 if (xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags)) 786 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
788 return (EDQUOT); 787 (flags & ~XFS_QMOPT_ENOSPC));
788 if (error)
789 return error;
789 resvd = 1; 790 resvd = 1;
790 } 791 }
791 792
792 if (gdqp) { 793 if (gdqp) {
793 if (xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags)) { 794 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
795 if (error) {
794 /* 796 /*
795 * can't do it, so backout previous reservation 797 * can't do it, so backout previous reservation
796 */ 798 */
@@ -799,14 +801,14 @@ xfs_trans_reserve_quota_bydquots(
799 xfs_trans_dqresv(tp, mp, udqp, 801 xfs_trans_dqresv(tp, mp, udqp,
800 -nblks, -ninos, flags); 802 -nblks, -ninos, flags);
801 } 803 }
802 return (EDQUOT); 804 return error;
803 } 805 }
804 } 806 }
805 807
806 /* 808 /*
807 * Didn't change anything critical, so, no need to log 809 * Didn't change anything critical, so, no need to log
808 */ 810 */
809 return (0); 811 return 0;
810} 812}
811 813
812 814
@@ -814,8 +816,6 @@ xfs_trans_reserve_quota_bydquots(
814 * Lock the dquot and change the reservation if we can. 816 * Lock the dquot and change the reservation if we can.
815 * This doesn't change the actual usage, just the reservation. 817 * This doesn't change the actual usage, just the reservation.
816 * The inode sent in is locked. 818 * The inode sent in is locked.
817 *
818 * Returns 0 on success, EDQUOT or other errors otherwise
819 */ 819 */
820STATIC int 820STATIC int
821xfs_trans_reserve_quota_nblks( 821xfs_trans_reserve_quota_nblks(
@@ -824,20 +824,24 @@ xfs_trans_reserve_quota_nblks(
824 xfs_inode_t *ip, 824 xfs_inode_t *ip,
825 long nblks, 825 long nblks,
826 long ninos, 826 long ninos,
827 uint type) 827 uint flags)
828{ 828{
829 int error; 829 int error;
830 830
831 if (!XFS_IS_QUOTA_ON(mp)) 831 if (!XFS_IS_QUOTA_ON(mp))
832 return (0); 832 return 0;
833 if (XFS_IS_PQUOTA_ON(mp))
834 flags |= XFS_QMOPT_ENOSPC;
833 835
834 ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); 836 ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
835 ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); 837 ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
836 838
837 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); 839 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
838 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 840 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
839 ASSERT((type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_RTBLKS || 841 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
840 (type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_BLKS); 842 XFS_TRANS_DQ_RES_RTBLKS ||
843 (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
844 XFS_TRANS_DQ_RES_BLKS);
841 845
842 /* 846 /*
843 * Reserve nblks against these dquots, with trans as the mediator. 847 * Reserve nblks against these dquots, with trans as the mediator.
@@ -845,8 +849,8 @@ xfs_trans_reserve_quota_nblks(
845 error = xfs_trans_reserve_quota_bydquots(tp, mp, 849 error = xfs_trans_reserve_quota_bydquots(tp, mp,
846 ip->i_udquot, ip->i_gdquot, 850 ip->i_udquot, ip->i_gdquot,
847 nblks, ninos, 851 nblks, ninos,
848 type); 852 flags);
849 return (error); 853 return error;
850} 854}
851 855
852/* 856/*
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index d384e489705f..26939d364bc4 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4719,18 +4719,17 @@ xfs_bmapi(
4719 /* 4719 /*
4720 * Make a transaction-less quota reservation for 4720 * Make a transaction-less quota reservation for
4721 * delayed allocation blocks. This number gets 4721 * delayed allocation blocks. This number gets
4722 * adjusted later. 4722 * adjusted later. We return if we haven't
4723 * We return EDQUOT if we haven't allocated 4723 * allocated blocks already inside this loop.
4724 * blks already inside this loop;
4725 */ 4724 */
4726 if (XFS_TRANS_RESERVE_QUOTA_NBLKS( 4725 if ((error = XFS_TRANS_RESERVE_QUOTA_NBLKS(
4727 mp, NULL, ip, (long)alen, 0, 4726 mp, NULL, ip, (long)alen, 0,
4728 rt ? XFS_QMOPT_RES_RTBLKS : 4727 rt ? XFS_QMOPT_RES_RTBLKS :
4729 XFS_QMOPT_RES_REGBLKS)) { 4728 XFS_QMOPT_RES_REGBLKS))) {
4730 if (n == 0) { 4729 if (n == 0) {
4731 *nmap = 0; 4730 *nmap = 0;
4732 ASSERT(cur == NULL); 4731 ASSERT(cur == NULL);
4733 return XFS_ERROR(EDQUOT); 4732 return error;
4734 } 4733 }
4735 break; 4734 break;
4736 } 4735 }
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index f83399c89ce3..8e0d73d9ccc4 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -353,10 +353,11 @@ xfs_check_nostate_extents(
353 xfs_extnum_t num); 353 xfs_extnum_t num);
354 354
355/* 355/*
356 * Call xfs_bmap_do_search_extents() to search for the extent 356 * Search the extent records for the entry containing block bno.
357 * record containing block bno. If in multi-level in-core extent 357 * If bno lies in a hole, point to the next entry. If bno lies
358 * allocation mode, find and extract the target extent buffer, 358 * past eof, *eofp will be set, and *prevp will contain the last
359 * otherwise just use the direct extent list. 359 * entry (null if none). Else, *lastxp will be set to the index
360 * of the found entry; *gotp will contain the entry.
360 */ 361 */
361xfs_bmbt_rec_t * 362xfs_bmbt_rec_t *
362xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *, 363xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *,
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
index 022fff62085b..5b7eb81453be 100644
--- a/fs/xfs/xfs_clnt.h
+++ b/fs/xfs/xfs_clnt.h
@@ -68,6 +68,7 @@ struct xfs_mount_args {
68 * enforcement */ 68 * enforcement */
69#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit 69#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit
70 * enforcement */ 70 * enforcement */
71#define XFSMNT_QUIET 0x00000080 /* don't report mount errors */
71#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at 72#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at
72 * stripe boundaries*/ 73 * stripe boundaries*/
73#define XFSMNT_RETERR 0x00000400 /* return error to user */ 74#define XFSMNT_RETERR 0x00000400 /* return error to user */
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 26b8e709a569..bc43163456ef 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -186,4 +186,7 @@ extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...);
186#define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ 186#define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
187 xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) 187 xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args)
188 188
189#define xfs_fs_mount_cmn_err(f, fmt, args...) \
190 ((f & XFS_MFSI_QUIET)? cmn_err(CE_WARN, "XFS: " fmt, ## args) : (void)0)
191
189#endif /* __XFS_ERROR_H__ */ 192#endif /* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 72e7e78bfff8..049fabb7f7e0 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -213,7 +213,8 @@ xfs_mount_free(
213STATIC int 213STATIC int
214xfs_mount_validate_sb( 214xfs_mount_validate_sb(
215 xfs_mount_t *mp, 215 xfs_mount_t *mp,
216 xfs_sb_t *sbp) 216 xfs_sb_t *sbp,
217 int flags)
217{ 218{
218 /* 219 /*
219 * If the log device and data device have the 220 * If the log device and data device have the
@@ -223,33 +224,29 @@ xfs_mount_validate_sb(
223 * a volume filesystem in a non-volume manner. 224 * a volume filesystem in a non-volume manner.
224 */ 225 */
225 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 226 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
226 cmn_err(CE_WARN, "XFS: bad magic number"); 227 xfs_fs_mount_cmn_err(flags, "bad magic number");
227 return XFS_ERROR(EWRONGFS); 228 return XFS_ERROR(EWRONGFS);
228 } 229 }
229 230
230 if (!XFS_SB_GOOD_VERSION(sbp)) { 231 if (!XFS_SB_GOOD_VERSION(sbp)) {
231 cmn_err(CE_WARN, "XFS: bad version"); 232 xfs_fs_mount_cmn_err(flags, "bad version");
232 return XFS_ERROR(EWRONGFS); 233 return XFS_ERROR(EWRONGFS);
233 } 234 }
234 235
235 if (unlikely( 236 if (unlikely(
236 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { 237 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
237 cmn_err(CE_WARN, 238 xfs_fs_mount_cmn_err(flags,
238 "XFS: filesystem is marked as having an external log; " 239 "filesystem is marked as having an external log; "
239 "specify logdev on the\nmount command line."); 240 "specify logdev on the\nmount command line.");
240 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(1)", 241 return XFS_ERROR(EINVAL);
241 XFS_ERRLEVEL_HIGH, mp, sbp);
242 return XFS_ERROR(EFSCORRUPTED);
243 } 242 }
244 243
245 if (unlikely( 244 if (unlikely(
246 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { 245 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
247 cmn_err(CE_WARN, 246 xfs_fs_mount_cmn_err(flags,
248 "XFS: filesystem is marked as having an internal log; " 247 "filesystem is marked as having an internal log; "
249 "don't specify logdev on\nthe mount command line."); 248 "do not specify logdev on\nthe mount command line.");
250 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(2)", 249 return XFS_ERROR(EINVAL);
251 XFS_ERRLEVEL_HIGH, mp, sbp);
252 return XFS_ERROR(EFSCORRUPTED);
253 } 250 }
254 251
255 /* 252 /*
@@ -274,9 +271,7 @@ xfs_mount_validate_sb(
274 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || 271 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
275 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || 272 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
276 (sbp->sb_imax_pct > 100 || sbp->sb_imax_pct < 1))) { 273 (sbp->sb_imax_pct > 100 || sbp->sb_imax_pct < 1))) {
277 cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); 274 xfs_fs_mount_cmn_err(flags, "SB sanity check 1 failed");
278 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)",
279 XFS_ERRLEVEL_LOW, mp, sbp);
280 return XFS_ERROR(EFSCORRUPTED); 275 return XFS_ERROR(EFSCORRUPTED);
281 } 276 }
282 277
@@ -289,9 +284,7 @@ xfs_mount_validate_sb(
289 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || 284 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
290 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * 285 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
291 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { 286 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
292 cmn_err(CE_WARN, "XFS: SB sanity check 2 failed"); 287 xfs_fs_mount_cmn_err(flags, "SB sanity check 2 failed");
293 XFS_ERROR_REPORT("xfs_mount_validate_sb(4)",
294 XFS_ERRLEVEL_LOW, mp);
295 return XFS_ERROR(EFSCORRUPTED); 288 return XFS_ERROR(EFSCORRUPTED);
296 } 289 }
297 290
@@ -307,15 +300,13 @@ xfs_mount_validate_sb(
307 (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX || 300 (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX ||
308 (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) { 301 (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) {
309#endif 302#endif
310 cmn_err(CE_WARN, 303 xfs_fs_mount_cmn_err(flags,
311 "XFS: File system is too large to be mounted on this system."); 304 "file system too large to be mounted on this system.");
312 return XFS_ERROR(E2BIG); 305 return XFS_ERROR(E2BIG);
313 } 306 }
314 307
315 if (unlikely(sbp->sb_inprogress)) { 308 if (unlikely(sbp->sb_inprogress)) {
316 cmn_err(CE_WARN, "XFS: file system busy"); 309 xfs_fs_mount_cmn_err(flags, "file system busy");
317 XFS_ERROR_REPORT("xfs_mount_validate_sb(5)",
318 XFS_ERRLEVEL_LOW, mp);
319 return XFS_ERROR(EFSCORRUPTED); 310 return XFS_ERROR(EFSCORRUPTED);
320 } 311 }
321 312
@@ -323,8 +314,8 @@ xfs_mount_validate_sb(
323 * Version 1 directory format has never worked on Linux. 314 * Version 1 directory format has never worked on Linux.
324 */ 315 */
325 if (unlikely(!XFS_SB_VERSION_HASDIRV2(sbp))) { 316 if (unlikely(!XFS_SB_VERSION_HASDIRV2(sbp))) {
326 cmn_err(CE_WARN, 317 xfs_fs_mount_cmn_err(flags,
327 "XFS: Attempted to mount file system using version 1 directory format"); 318 "file system using version 1 directory format");
328 return XFS_ERROR(ENOSYS); 319 return XFS_ERROR(ENOSYS);
329 } 320 }
330 321
@@ -332,11 +323,11 @@ xfs_mount_validate_sb(
332 * Until this is fixed only page-sized or smaller data blocks work. 323 * Until this is fixed only page-sized or smaller data blocks work.
333 */ 324 */
334 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { 325 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
335 cmn_err(CE_WARN, 326 xfs_fs_mount_cmn_err(flags,
336 "XFS: Attempted to mount file system with blocksize %d bytes", 327 "file system with blocksize %d bytes",
337 sbp->sb_blocksize); 328 sbp->sb_blocksize);
338 cmn_err(CE_WARN, 329 xfs_fs_mount_cmn_err(flags,
339 "XFS: Only page-sized (%ld) or less blocksizes currently work.", 330 "only pagesize (%ld) or less will currently work.",
340 PAGE_SIZE); 331 PAGE_SIZE);
341 return XFS_ERROR(ENOSYS); 332 return XFS_ERROR(ENOSYS);
342 } 333 }
@@ -484,7 +475,7 @@ xfs_xlatesb(
484 * Does the initial read of the superblock. 475 * Does the initial read of the superblock.
485 */ 476 */
486int 477int
487xfs_readsb(xfs_mount_t *mp) 478xfs_readsb(xfs_mount_t *mp, int flags)
488{ 479{
489 unsigned int sector_size; 480 unsigned int sector_size;
490 unsigned int extra_flags; 481 unsigned int extra_flags;
@@ -506,7 +497,7 @@ xfs_readsb(xfs_mount_t *mp)
506 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 497 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
507 BTOBB(sector_size), extra_flags); 498 BTOBB(sector_size), extra_flags);
508 if (!bp || XFS_BUF_ISERROR(bp)) { 499 if (!bp || XFS_BUF_ISERROR(bp)) {
509 cmn_err(CE_WARN, "XFS: SB read failed"); 500 xfs_fs_mount_cmn_err(flags, "SB read failed");
510 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 501 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
511 goto fail; 502 goto fail;
512 } 503 }
@@ -520,9 +511,9 @@ xfs_readsb(xfs_mount_t *mp)
520 sbp = XFS_BUF_TO_SBP(bp); 511 sbp = XFS_BUF_TO_SBP(bp);
521 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, XFS_SB_ALL_BITS); 512 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, XFS_SB_ALL_BITS);
522 513
523 error = xfs_mount_validate_sb(mp, &(mp->m_sb)); 514 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
524 if (error) { 515 if (error) {
525 cmn_err(CE_WARN, "XFS: SB validate failed"); 516 xfs_fs_mount_cmn_err(flags, "SB validate failed");
526 goto fail; 517 goto fail;
527 } 518 }
528 519
@@ -530,8 +521,8 @@ xfs_readsb(xfs_mount_t *mp)
530 * We must be able to do sector-sized and sector-aligned IO. 521 * We must be able to do sector-sized and sector-aligned IO.
531 */ 522 */
532 if (sector_size > mp->m_sb.sb_sectsize) { 523 if (sector_size > mp->m_sb.sb_sectsize) {
533 cmn_err(CE_WARN, 524 xfs_fs_mount_cmn_err(flags,
534 "XFS: device supports only %u byte sectors (not %u)", 525 "device supports only %u byte sectors (not %u)",
535 sector_size, mp->m_sb.sb_sectsize); 526 sector_size, mp->m_sb.sb_sectsize);
536 error = ENOSYS; 527 error = ENOSYS;
537 goto fail; 528 goto fail;
@@ -548,7 +539,7 @@ xfs_readsb(xfs_mount_t *mp)
548 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 539 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
549 BTOBB(sector_size), extra_flags); 540 BTOBB(sector_size), extra_flags);
550 if (!bp || XFS_BUF_ISERROR(bp)) { 541 if (!bp || XFS_BUF_ISERROR(bp)) {
551 cmn_err(CE_WARN, "XFS: SB re-read failed"); 542 xfs_fs_mount_cmn_err(flags, "SB re-read failed");
552 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 543 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
553 goto fail; 544 goto fail;
554 } 545 }
@@ -678,7 +669,7 @@ xfs_mountfs(
678 int error = 0; 669 int error = 0;
679 670
680 if (mp->m_sb_bp == NULL) { 671 if (mp->m_sb_bp == NULL) {
681 if ((error = xfs_readsb(mp))) { 672 if ((error = xfs_readsb(mp, mfsi_flags))) {
682 return error; 673 return error;
683 } 674 }
684 } 675 }
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 66cbee79864e..668ad23fd37c 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -510,9 +510,12 @@ xfs_preferred_iosize(xfs_mount_t *mp)
510 */ 510 */
511#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ 511#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */
512#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */ 512#define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */
513/* XFS_MFSI_RRINODES */
513#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */ 514#define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */
514 /* log recovery */ 515 /* log recovery */
515#define XFS_MFSI_NO_QUOTACHECK 0x10 /* Skip quotacheck processing */ 516#define XFS_MFSI_NO_QUOTACHECK 0x10 /* Skip quotacheck processing */
517/* XFS_MFSI_CONVERT_SUNIT */
518#define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
516 519
517/* 520/*
518 * Macros for getting from mount to vfs and back. 521 * Macros for getting from mount to vfs and back.
@@ -581,7 +584,7 @@ extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t,
581extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, 584extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
582 uint, int); 585 uint, int);
583extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 586extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
584extern int xfs_readsb(xfs_mount_t *mp); 587extern int xfs_readsb(xfs_mount_t *, int);
585extern void xfs_freesb(xfs_mount_t *); 588extern void xfs_freesb(xfs_mount_t *);
586extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int); 589extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int);
587extern int xfs_syncsub(xfs_mount_t *, int, int, int *); 590extern int xfs_syncsub(xfs_mount_t *, int, int, int *);
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 4f6a034de7f7..7fbef974bce6 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -196,10 +196,11 @@ typedef struct xfs_qoff_logformat {
196#define XFS_QMOPT_QUOTAOFF 0x0000080 /* quotas are being turned off */ 196#define XFS_QMOPT_QUOTAOFF 0x0000080 /* quotas are being turned off */
197#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */ 197#define XFS_QMOPT_UMOUNTING 0x0000100 /* filesys is being unmounted */
198#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */ 198#define XFS_QMOPT_DOLOG 0x0000200 /* log buf changes (in quotacheck) */
199#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if necessary */ 199#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
200#define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */ 200#define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */
201#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot, if damaged. */ 201#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
202#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */ 202#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
203#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */
203 204
204/* 205/*
205 * flags to xfs_trans_mod_dquot to indicate which field needs to be 206 * flags to xfs_trans_mod_dquot to indicate which field needs to be
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 504d2a80747a..f0e09ca14139 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -442,6 +442,9 @@ xfs_mount(
442 p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO); 442 p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO);
443 mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs; 443 mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs;
444 444
445 if (args->flags & XFSMNT_QUIET)
446 flags |= XFS_MFSI_QUIET;
447
445 /* 448 /*
446 * Open real time and log devices - order is important. 449 * Open real time and log devices - order is important.
447 */ 450 */
@@ -492,7 +495,7 @@ xfs_mount(
492 error = xfs_start_flags(vfsp, args, mp); 495 error = xfs_start_flags(vfsp, args, mp);
493 if (error) 496 if (error)
494 goto error1; 497 goto error1;
495 error = xfs_readsb(mp); 498 error = xfs_readsb(mp, flags);
496 if (error) 499 if (error)
497 goto error1; 500 goto error1;
498 error = xfs_finish_flags(vfsp, args, mp); 501 error = xfs_finish_flags(vfsp, args, mp);
@@ -1697,8 +1700,9 @@ xfs_parseargs(
1697 int dsunit, dswidth, vol_dsunit, vol_dswidth; 1700 int dsunit, dswidth, vol_dsunit, vol_dswidth;
1698 int iosize; 1701 int iosize;
1699 1702
1700 args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
1701 args->flags |= XFSMNT_IDELETE; 1703 args->flags |= XFSMNT_IDELETE;
1704 args->flags |= XFSMNT_BARRIER;
1705 args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
1702 1706
1703 if (!options) 1707 if (!options)
1704 goto done; 1708 goto done;
@@ -1947,8 +1951,6 @@ xfs_showargs(
1947 seq_printf(m, "," MNTOPT_IKEEP); 1951 seq_printf(m, "," MNTOPT_IKEEP);
1948 if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) 1952 if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE))
1949 seq_printf(m, "," MNTOPT_LARGEIO); 1953 seq_printf(m, "," MNTOPT_LARGEIO);
1950 if (mp->m_flags & XFS_MOUNT_BARRIER)
1951 seq_printf(m, "," MNTOPT_BARRIER);
1952 1954
1953 if (!(vfsp->vfs_flag & VFS_32BITINODES)) 1955 if (!(vfsp->vfs_flag & VFS_32BITINODES))
1954 seq_printf(m, "," MNTOPT_64BITINODE); 1956 seq_printf(m, "," MNTOPT_64BITINODE);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index de49601919c1..fa71b305ba5c 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -4649,6 +4649,10 @@ vnodeops_t xfs_vnodeops = {
4649#ifdef HAVE_SENDFILE 4649#ifdef HAVE_SENDFILE
4650 .vop_sendfile = xfs_sendfile, 4650 .vop_sendfile = xfs_sendfile,
4651#endif 4651#endif
4652#ifdef HAVE_SPLICE
4653 .vop_splice_read = xfs_splice_read,
4654 .vop_splice_write = xfs_splice_write,
4655#endif
4652 .vop_write = xfs_write, 4656 .vop_write = xfs_write,
4653 .vop_ioctl = xfs_ioctl, 4657 .vop_ioctl = xfs_ioctl,
4654 .vop_getattr = xfs_getattr, 4658 .vop_getattr = xfs_getattr,
diff --git a/include/asm-arm/arch-at91rm9200/at91rm9200_mci.h b/include/asm-arm/arch-at91rm9200/at91rm9200_mci.h
new file mode 100644
index 000000000000..f28636d61e39
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/at91rm9200_mci.h
@@ -0,0 +1,104 @@
1/*
2 * include/asm-arm/arch-at91rm9200/at91rm9200_mci.h
3 *
4 * Copyright (C) 2005 Ivan Kokshaysky
5 * Copyright (C) SAN People
6 *
7 * MultiMedia Card Interface (MCI) registers.
8 * Based on AT91RM9200 datasheet revision E.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef AT91RM9200_MCI_H
17#define AT91RM9200_MCI_H
18
19#define AT91_MCI_CR 0x00 /* Control Register */
20#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */
21#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */
22#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */
23#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */
24#define AT91_MCI_SWRST (1 << 7) /* Software Reset */
25
26#define AT91_MCI_MR 0x04 /* Mode Register */
27#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */
28#define AT91_MCI_PWSDIV (3 << 8) /* Power Saving Divider */
29#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */
30#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */
31#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */
32
33#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */
34#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */
35#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */
36#define AT91_MCI_DTOMUL_1 (0 << 4)
37#define AT91_MCI_DTOMUL_16 (1 << 4)
38#define AT91_MCI_DTOMUL_128 (2 << 4)
39#define AT91_MCI_DTOMUL_256 (3 << 4)
40#define AT91_MCI_DTOMUL_1K (4 << 4)
41#define AT91_MCI_DTOMUL_4K (5 << 4)
42#define AT91_MCI_DTOMUL_64K (6 << 4)
43#define AT91_MCI_DTOMUL_1M (7 << 4)
44
45#define AT91_MCI_SDCR 0x0c /* SD Card Register */
46#define AT91_MCI_SDCSEL (0xf << 0) /* SD Card Selector */
47#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */
48
49#define AT91_MCI_ARGR 0x10 /* Argument Register */
50
51#define AT91_MCI_CMDR 0x14 /* Command Register */
52#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */
53#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */
54#define AT91_MCI_RSPTYP_NONE (0 << 6)
55#define AT91_MCI_RSPTYP_48 (1 << 6)
56#define AT91_MCI_RSPTYP_136 (2 << 6)
57#define AT91_MCI_SPCMD (7 << 8) /* Special Command */
58#define AT91_MCI_SPCMD_NONE (0 << 8)
59#define AT91_MCI_SPCMD_INIT (1 << 8)
60#define AT91_MCI_SPCMD_SYNC (2 << 8)
61#define AT91_MCI_SPCMD_ICMD (4 << 8)
62#define AT91_MCI_SPCMD_IRESP (5 << 8)
63#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */
64#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */
65#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */
66#define AT91_MCI_TRCMD_NONE (0 << 16)
67#define AT91_MCI_TRCMD_START (1 << 16)
68#define AT91_MCI_TRCMD_STOP (2 << 16)
69#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */
70#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */
71#define AT91_MCI_TRTYP_BLOCK (0 << 19)
72#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
73#define AT91_MCI_TRTYP_STREAM (2 << 19)
74
75#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */
76#define AT91_MCR_RDR 0x30 /* Receive Data Register */
77#define AT91_MCR_TDR 0x34 /* Transmit Data Register */
78
79#define AT91_MCI_SR 0x40 /* Status Register */
80#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */
81#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */
82#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */
83#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */
84#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */
85#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */
86#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */
87#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */
88#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */
89#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */
90#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */
91#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */
92#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */
93#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */
94#define AT91_MCI_RTOE (1 << 20) /* Reponse Time-out Error */
95#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */
96#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */
97#define AT91_MCI_OVRE (1 << 30) /* Overrun */
98#define AT91_MCI_UNRE (1 << 31) /* Underrun */
99
100#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */
101#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */
102#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */
103
104#endif
diff --git a/include/asm-arm/arch-at91rm9200/board.h b/include/asm-arm/arch-at91rm9200/board.h
index 2e7d1139a799..4fdef13d01d4 100644
--- a/include/asm-arm/arch-at91rm9200/board.h
+++ b/include/asm-arm/arch-at91rm9200/board.h
@@ -38,6 +38,8 @@ extern unsigned long at91_master_clock;
38extern int at91_serial_map[AT91_NR_UART]; 38extern int at91_serial_map[AT91_NR_UART];
39extern int at91_console_port; 39extern int at91_console_port;
40 40
41#include <linux/mtd/partitions.h>
42
41 /* USB Device */ 43 /* USB Device */
42struct at91_udc_data { 44struct at91_udc_data {
43 u8 vbus_pin; /* high == host powering us */ 45 u8 vbus_pin; /* high == host powering us */
@@ -77,4 +79,26 @@ struct at91_usbh_data {
77}; 79};
78extern void __init at91_add_device_usbh(struct at91_usbh_data *data); 80extern void __init at91_add_device_usbh(struct at91_usbh_data *data);
79 81
82 /* NAND / SmartMedia */
83struct at91_nand_data {
84 u8 enable_pin; /* chip enable */
85 u8 det_pin; /* card detect */
86 u8 rdy_pin; /* ready/busy */
87 u8 ale; /* address line number connected to ALE */
88 u8 cle; /* address line number connected to CLE */
89 struct mtd_partition* (*partition_info)(int, int*);
90};
91extern void __init at91_add_device_nand(struct at91_nand_data *data);
92
93 /* I2C*/
94void __init at91_add_device_i2c(void);
95
96 /* RTC */
97void __init at91_add_device_rtc(void);
98
99 /* LEDs */
100extern u8 at91_leds_cpu;
101extern u8 at91_leds_timer;
102extern void __init at91_init_leds(u8 cpu_led, u8 timer_led);
103
80#endif 104#endif
diff --git a/include/asm-arm/arch-at91rm9200/hardware.h b/include/asm-arm/arch-at91rm9200/hardware.h
index 2646c01f8e97..59e6f44d3a0d 100644
--- a/include/asm-arm/arch-at91rm9200/hardware.h
+++ b/include/asm-arm/arch-at91rm9200/hardware.h
@@ -65,6 +65,9 @@
65/* SmartMedia */ 65/* SmartMedia */
66#define AT91_SMARTMEDIA_BASE 0x40000000 /* NCS3: Smartmedia physical base address */ 66#define AT91_SMARTMEDIA_BASE 0x40000000 /* NCS3: Smartmedia physical base address */
67 67
68/* Compact Flash */
69#define AT91_CF_BASE 0x50000000 /* NCS4-NCS6: Compact Flash physical base address */
70
68/* Multi-Master Memory controller */ 71/* Multi-Master Memory controller */
69#define AT91_UHP_BASE 0x00300000 /* USB Host controller */ 72#define AT91_UHP_BASE 0x00300000 /* USB Host controller */
70 73
diff --git a/include/asm-arm/arch-ep93xx/ts72xx.h b/include/asm-arm/arch-ep93xx/ts72xx.h
index 412215e77f44..a94f63ff0535 100644
--- a/include/asm-arm/arch-ep93xx/ts72xx.h
+++ b/include/asm-arm/arch-ep93xx/ts72xx.h
@@ -12,6 +12,8 @@
12 * febfc000 [67]0000000 4K NAND data register 12 * febfc000 [67]0000000 4K NAND data register
13 * febfb000 [67]0400000 4K NAND control register 13 * febfb000 [67]0400000 4K NAND control register
14 * febfa000 [67]0800000 4K NAND busy register 14 * febfa000 [67]0800000 4K NAND busy register
15 * febf9000 10800000 4K TS-5620 RTC index register
16 * febf8000 11700000 4K TS-5620 RTC data register
15 */ 17 */
16 18
17#define TS72XX_MODEL_PHYS_BASE 0x22000000 19#define TS72XX_MODEL_PHYS_BASE 0x22000000
@@ -58,6 +60,15 @@
58#define TS72XX_NAND_BUSY_SIZE 0x00001000 60#define TS72XX_NAND_BUSY_SIZE 0x00001000
59 61
60 62
63#define TS72XX_RTC_INDEX_VIRT_BASE 0xfebf9000
64#define TS72XX_RTC_INDEX_PHYS_BASE 0x10800000
65#define TS72XX_RTC_INDEX_SIZE 0x00001000
66
67#define TS72XX_RTC_DATA_VIRT_BASE 0xfebf8000
68#define TS72XX_RTC_DATA_PHYS_BASE 0x11700000
69#define TS72XX_RTC_DATA_SIZE 0x00001000
70
71
61#ifndef __ASSEMBLY__ 72#ifndef __ASSEMBLY__
62#include <asm/io.h> 73#include <asm/io.h>
63 74
diff --git a/include/asm-arm/arch-imx/dma.h b/include/asm-arm/arch-imx/dma.h
index b45fa367d71e..621ff2c730f2 100644
--- a/include/asm-arm/arch-imx/dma.h
+++ b/include/asm-arm/arch-imx/dma.h
@@ -17,27 +17,16 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20
20#ifndef __ASM_ARCH_DMA_H 21#ifndef __ASM_ARCH_DMA_H
21#define __ASM_ARCH_DMA_H 22#define __ASM_ARCH_DMA_H
22 23
23/*
24 * DMA registration
25 */
26
27typedef enum { 24typedef enum {
28 DMA_PRIO_HIGH = 0, 25 DMA_PRIO_HIGH = 0,
29 DMA_PRIO_MEDIUM = 3, 26 DMA_PRIO_MEDIUM = 1,
30 DMA_PRIO_LOW = 6 27 DMA_PRIO_LOW = 2
31} imx_dma_prio; 28} imx_dma_prio;
32 29
33int imx_request_dma(char *name, imx_dma_prio prio,
34 void (*irq_handler) (int, void *, struct pt_regs *),
35 void (*err_handler) (int, void *, struct pt_regs *),
36 void *data);
37
38void imx_free_dma(int dma_ch);
39
40
41#define DMA_REQ_UART3_T 2 30#define DMA_REQ_UART3_T 2
42#define DMA_REQ_UART3_R 3 31#define DMA_REQ_UART3_R 3
43#define DMA_REQ_SSI2_T 4 32#define DMA_REQ_SSI2_T 4
diff --git a/include/asm-arm/arch-imx/imx-dma.h b/include/asm-arm/arch-imx/imx-dma.h
new file mode 100644
index 000000000000..f2063c1d610d
--- /dev/null
+++ b/include/asm-arm/arch-imx/imx-dma.h
@@ -0,0 +1,90 @@
1/*
2 * linux/include/asm-arm/imxads/dma.h
3 *
4 * Copyright (C) 1997,1998 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <asm/dma.h>
22
23#ifndef __ASM_ARCH_IMX_DMA_H
24#define __ASM_ARCH_IMX_DMA_H
25
26#define IMX_DMA_CHANNELS 11
27
28/*
29 * struct imx_dma_channel - i.MX specific DMA extension
30 * @name: name specified by DMA client
31 * @irq_handler: client callback for end of transfer
32 * @err_handler: client callback for error condition
33 * @data: clients context data for callbacks
34 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
35 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
36 * @sgbc: counter of processed bytes in the actual read/written chunk
37 * @resbytes: total residual number of bytes to transfer
38 * (it can be lower or same as sum of SG mapped chunk sizes)
39 * @sgcount: number of chunks to be read/written
40 *
41 * Structure is used for IMX DMA processing. It would be probably good
42 * @struct dma_struct in the future for external interfacing and use
43 * @struct imx_dma_channel only as extension to it.
44 */
45
46struct imx_dma_channel {
47 const char *name;
48 void (*irq_handler) (int, void *, struct pt_regs *);
49 void (*err_handler) (int, void *, struct pt_regs *);
50 void *data;
51 dmamode_t dma_mode;
52 struct scatterlist *sg;
53 unsigned int sgbc;
54 unsigned int sgcount;
55 unsigned int resbytes;
56 int dma_num;
57};
58
59extern struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
60
61
62/* The type to distinguish channel numbers parameter from ordinal int type */
63typedef int imx_dmach_t;
64
65int
66imx_dma_setup_single(imx_dmach_t dma_ch, dma_addr_t dma_address,
67 unsigned int dma_length, unsigned int dev_addr, dmamode_t dmamode);
68
69int
70imx_dma_setup_sg(imx_dmach_t dma_ch,
71 struct scatterlist *sg, unsigned int sgcount, unsigned int dma_length,
72 unsigned int dev_addr, dmamode_t dmamode);
73
74int
75imx_dma_setup_handlers(imx_dmach_t dma_ch,
76 void (*irq_handler) (int, void *, struct pt_regs *),
77 void (*err_handler) (int, void *, struct pt_regs *), void *data);
78
79void imx_dma_enable(imx_dmach_t dma_ch);
80
81void imx_dma_disable(imx_dmach_t dma_ch);
82
83int imx_dma_request(imx_dmach_t dma_ch, const char *name);
84
85void imx_dma_free(imx_dmach_t dma_ch);
86
87int imx_dma_request_by_prio(imx_dmach_t *pdma_ch, const char *name, imx_dma_prio prio);
88
89
90#endif /* _ASM_ARCH_IMX_DMA_H */
diff --git a/include/asm-arm/arch-imx/mmc.h b/include/asm-arm/arch-imx/mmc.h
new file mode 100644
index 000000000000..1937151665c7
--- /dev/null
+++ b/include/asm-arm/arch-imx/mmc.h
@@ -0,0 +1,12 @@
1#ifndef ASMARM_ARCH_MMC_H
2#define ASMARM_ARCH_MMC_H
3
4#include <linux/mmc/protocol.h>
5
6struct imxmmc_platform_data {
7 int (*card_present)(void);
8};
9
10extern void imx_set_mmc_info(struct imxmmc_platform_data *info);
11
12#endif
diff --git a/include/asm-arm/arch-ixp23xx/memory.h b/include/asm-arm/arch-ixp23xx/memory.h
index bebcf0aa0d72..6e19f46d54d1 100644
--- a/include/asm-arm/arch-ixp23xx/memory.h
+++ b/include/asm-arm/arch-ixp23xx/memory.h
@@ -28,6 +28,7 @@
28 * to an address that the kernel can use. 28 * to an address that the kernel can use.
29 */ 29 */
30#ifndef __ASSEMBLY__ 30#ifndef __ASSEMBLY__
31#include <asm/mach-types.h>
31 32
32#define __virt_to_bus(v) \ 33#define __virt_to_bus(v) \
33 ({ unsigned int ret; \ 34 ({ unsigned int ret; \
@@ -40,6 +41,22 @@
40 data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR); \ 41 data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR); \
41 __phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); }) 42 __phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); })
42 43
44/*
45 * Coherency support. Only supported on A2 CPUs or on A1
46 * systems that have the cache coherency workaround.
47 */
48static inline int __ixp23xx_arch_is_coherent(void)
49{
50 extern unsigned int processor_id;
51
52 if (((processor_id & 15) >= 2) || machine_is_roadrunner())
53 return 1;
54
55 return 0;
56}
57
58#define arch_is_coherent() __ixp23xx_arch_is_coherent()
59
43#endif 60#endif
44 61
45 62
diff --git a/include/asm-arm/arch-ixp23xx/platform.h b/include/asm-arm/arch-ixp23xx/platform.h
index f85b4685a491..e4d99060a049 100644
--- a/include/asm-arm/arch-ixp23xx/platform.h
+++ b/include/asm-arm/arch-ixp23xx/platform.h
@@ -22,6 +22,7 @@ void ixp23xx_sys_init(void);
22int ixp23xx_pci_setup(int, struct pci_sys_data *); 22int ixp23xx_pci_setup(int, struct pci_sys_data *);
23void ixp23xx_pci_preinit(void); 23void ixp23xx_pci_preinit(void);
24struct pci_bus *ixp23xx_pci_scan_bus(int, struct pci_sys_data*); 24struct pci_bus *ixp23xx_pci_scan_bus(int, struct pci_sys_data*);
25void ixp23xx_pci_slave_init(void);
25 26
26extern struct sys_timer ixp23xx_timer; 27extern struct sys_timer ixp23xx_timer;
27 28
diff --git a/include/asm-arm/arch-ixp23xx/uncompress.h b/include/asm-arm/arch-ixp23xx/uncompress.h
index 62623fa9b2f7..013575e6a9a1 100644
--- a/include/asm-arm/arch-ixp23xx/uncompress.h
+++ b/include/asm-arm/arch-ixp23xx/uncompress.h
@@ -16,26 +16,21 @@
16 16
17#define UART_BASE ((volatile u32 *)IXP23XX_UART1_PHYS) 17#define UART_BASE ((volatile u32 *)IXP23XX_UART1_PHYS)
18 18
19static __inline__ void putc(char c) 19static inline void putc(char c)
20{ 20{
21 int j; 21 int j;
22 22
23 for (j = 0; j < 0x1000; j++) { 23 for (j = 0; j < 0x1000; j++) {
24 if (UART_BASE[UART_LSR] & UART_LSR_THRE) 24 if (UART_BASE[UART_LSR] & UART_LSR_THRE)
25 break; 25 break;
26 barrier();
26 } 27 }
27 28
28 UART_BASE[UART_TX] = c; 29 UART_BASE[UART_TX] = c;
29} 30}
30 31
31static void putstr(const char *s) 32static inline void flush(void)
32{ 33{
33 while (*s) {
34 putc(*s);
35 if (*s == '\n')
36 putc('\r');
37 s++;
38 }
39} 34}
40 35
41#define arch_decomp_setup() 36#define arch_decomp_setup()
diff --git a/include/asm-arm/arch-omap/aic23.h b/include/asm-arm/arch-omap/aic23.h
index 590bac25b7c4..6513065941d0 100644
--- a/include/asm-arm/arch-omap/aic23.h
+++ b/include/asm-arm/arch-omap/aic23.h
@@ -57,6 +57,7 @@
57#define LHV_MIN 0x0000 57#define LHV_MIN 0x0000
58 58
59// Analog audio path control register 59// Analog audio path control register
60#define STA_REG(x) ((x)<<6)
60#define STE_ENABLED 0x0020 61#define STE_ENABLED 0x0020
61#define DAC_SELECTED 0x0010 62#define DAC_SELECTED 0x0010
62#define BYPASS_ON 0x0008 63#define BYPASS_ON 0x0008
@@ -109,4 +110,7 @@
109#define TLV320AIC23ID1 (0x1a) // cs low 110#define TLV320AIC23ID1 (0x1a) // cs low
110#define TLV320AIC23ID2 (0x1b) // cs high 111#define TLV320AIC23ID2 (0x1b) // cs high
111 112
113void tlv320aic23_power_up(void);
114void tlv320aic23_power_down(void);
115
112#endif /* __ASM_ARCH_AIC23_H */ 116#endif /* __ASM_ARCH_AIC23_H */
diff --git a/include/asm-arm/arch-omap/board-ams-delta.h b/include/asm-arm/arch-omap/board-ams-delta.h
new file mode 100644
index 000000000000..0070f6d3b75c
--- /dev/null
+++ b/include/asm-arm/arch-omap/board-ams-delta.h
@@ -0,0 +1,65 @@
1/*
2 * linux/include/asm-arm/arch-omap/board-ams-delta.h
3 *
4 * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
12 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
14 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
15 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
16 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
17 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
18 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
20 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26#ifndef __ASM_ARCH_OMAP_AMS_DELTA_H
27#define __ASM_ARCH_OMAP_AMS_DELTA_H
28
29#if defined (CONFIG_MACH_AMS_DELTA)
30
31#define AMS_DELTA_LATCH1_PHYS 0x01000000
32#define AMS_DELTA_LATCH1_VIRT 0xEA000000
33#define AMS_DELTA_MODEM_PHYS 0x04000000
34#define AMS_DELTA_MODEM_VIRT 0xEB000000
35#define AMS_DELTA_LATCH2_PHYS 0x08000000
36#define AMS_DELTA_LATCH2_VIRT 0xEC000000
37
38#define AMS_DELTA_LATCH1_LED_CAMERA 0x01
39#define AMS_DELTA_LATCH1_LED_ADVERT 0x02
40#define AMS_DELTA_LATCH1_LED_EMAIL 0x04
41#define AMS_DELTA_LATCH1_LED_HANDSFREE 0x08
42#define AMS_DELTA_LATCH1_LED_VOICEMAIL 0x10
43#define AMS_DELTA_LATCH1_LED_VOICE 0x20
44
45#define AMS_DELTA_LATCH2_LCD_VBLEN 0x0001
46#define AMS_DELTA_LATCH2_LCD_NDISP 0x0002
47#define AMS_DELTA_LATCH2_NAND_NCE 0x0004
48#define AMS_DELTA_LATCH2_NAND_NRE 0x0008
49#define AMS_DELTA_LATCH2_NAND_NWP 0x0010
50#define AMS_DELTA_LATCH2_NAND_NWE 0x0020
51#define AMS_DELTA_LATCH2_NAND_ALE 0x0040
52#define AMS_DELTA_LATCH2_NAND_CLE 0x0080
53#define AMS_DELTA_LATCH2_MODEM_NRESET 0x1000
54#define AMS_DELTA_LATCH2_MODEM_CODEC 0x2000
55
56#define AMS_DELTA_GPIO_PIN_NAND_RB 12
57
58#ifndef __ASSEMBLY__
59void ams_delta_latch1_write(u8 mask, u8 value);
60void ams_delta_latch2_write(u16 mask, u16 value);
61#endif
62
63#endif /* CONFIG_MACH_AMS_DELTA */
64
65#endif /* __ASM_ARCH_OMAP_AMS_DELTA_H */
diff --git a/include/asm-arm/arch-omap/board-apollon.h b/include/asm-arm/arch-omap/board-apollon.h
new file mode 100644
index 000000000000..de0c5b792c58
--- /dev/null
+++ b/include/asm-arm/arch-omap/board-apollon.h
@@ -0,0 +1,45 @@
1/*
2 * linux/include/asm-arm/arch-omap/board-apollon.h
3 *
4 * Hardware definitions for Samsung OMAP24XX Apollon board.
5 *
6 * Initial creation by Kyungmin Park <kyungmin.park@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __ASM_ARCH_OMAP_APOLLON_H
30#define __ASM_ARCH_OMAP_APOLLON_H
31
32/* Placeholder for APOLLON specific defines */
33/* GPMC CS0 */
34#define APOLLON_CS0_BASE 0x00000000
35/* GPMC CS1 */
36#define APOLLON_CS1_BASE 0x08000000
37#define APOLLON_ETHR_START (APOLLON_CS1_BASE + 0x300)
38#define APOLLON_ETHR_GPIO_IRQ 74
39/* GPMC CS2 - reserved for OneNAND */
40#define APOLLON_CS2_BASE 0x10000000
41/* GPMC CS3 - reserved for NOR or NAND */
42#define APOLLON_CS3_BASE 0x18000000
43
44#endif /* __ASM_ARCH_OMAP_APOLLON_H */
45
diff --git a/include/asm-arm/arch-omap/board-h2.h b/include/asm-arm/arch-omap/board-h2.h
index 39ca5a31aeea..b2888ef9e9b4 100644
--- a/include/asm-arm/arch-omap/board-h2.h
+++ b/include/asm-arm/arch-omap/board-h2.h
@@ -34,9 +34,5 @@
34/* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */ 34/* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */
35#define OMAP1610_ETHR_START 0x04000300 35#define OMAP1610_ETHR_START 0x04000300
36 36
37/* Samsung NAND flash at CS2B or CS3(NAND Boot) */
38#define OMAP_NAND_FLASH_START1 0x0A000000 /* CS2B */
39#define OMAP_NAND_FLASH_START2 0x0C000000 /* CS3 */
40
41#endif /* __ASM_ARCH_OMAP_H2_H */ 37#endif /* __ASM_ARCH_OMAP_H2_H */
42 38
diff --git a/include/asm-arm/arch-omap/board-h3.h b/include/asm-arm/arch-omap/board-h3.h
index 1b12c1dcc2fa..761ea0a17897 100644
--- a/include/asm-arm/arch-omap/board-h3.h
+++ b/include/asm-arm/arch-omap/board-h3.h
@@ -30,10 +30,6 @@
30/* In OMAP1710 H3 the Ethernet is directly connected to CS1 */ 30/* In OMAP1710 H3 the Ethernet is directly connected to CS1 */
31#define OMAP1710_ETHR_START 0x04000300 31#define OMAP1710_ETHR_START 0x04000300
32 32
33/* Samsung NAND flash at CS2B or CS3(NAND Boot) */
34#define OMAP_NAND_FLASH_START1 0x0A000000 /* CS2B */
35#define OMAP_NAND_FLASH_START2 0x0C000000 /* CS3 */
36
37#define MAXIRQNUM (IH_BOARD_BASE) 33#define MAXIRQNUM (IH_BOARD_BASE)
38#define MAXFIQNUM MAXIRQNUM 34#define MAXFIQNUM MAXIRQNUM
39#define MAXSWINUM MAXIRQNUM 35#define MAXSWINUM MAXIRQNUM
diff --git a/include/asm-arm/arch-omap/board-h4.h b/include/asm-arm/arch-omap/board-h4.h
index 33ea29a41654..7ef664bc9e33 100644
--- a/include/asm-arm/arch-omap/board-h4.h
+++ b/include/asm-arm/arch-omap/board-h4.h
@@ -33,12 +33,6 @@
33/* GPMC CS1 */ 33/* GPMC CS1 */
34#define OMAP24XX_ETHR_START 0x08000300 34#define OMAP24XX_ETHR_START 0x08000300
35#define OMAP24XX_ETHR_GPIO_IRQ 92 35#define OMAP24XX_ETHR_GPIO_IRQ 92
36 36#define H4_CS0_BASE 0x04000000
37#define H4_CS0_BASE 0x04000000
38
39#define H4_CS0_BASE 0x04000000
40
41#define H4_CS0_BASE 0x04000000
42
43#endif /* __ASM_ARCH_OMAP_H4_H */ 37#endif /* __ASM_ARCH_OMAP_H4_H */
44 38
diff --git a/include/asm-arm/arch-omap/board-netstar.h b/include/asm-arm/arch-omap/board-netstar.h
deleted file mode 100644
index 77cc0fb54d54..000000000000
--- a/include/asm-arm/arch-omap/board-netstar.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Copyright (C) 2004 2N Telekomunikace, Ladislav Michl <michl@2n.cz>
3 *
4 * Hardware definitions for OMAP5910 based NetStar board.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_NETSTAR_H
12#define __ASM_ARCH_NETSTAR_H
13
14#include <asm/arch/tc.h>
15
16#define OMAP_NAND_FLASH_START1 OMAP_CS1_PHYS + (1 << 23)
17#define OMAP_NAND_FLASH_START2 OMAP_CS1_PHYS + (2 << 23)
18
19#endif /* __ASM_ARCH_NETSTAR_H */
diff --git a/include/asm-arm/arch-omap/board-nokia.h b/include/asm-arm/arch-omap/board-nokia.h
new file mode 100644
index 000000000000..72deea203493
--- /dev/null
+++ b/include/asm-arm/arch-omap/board-nokia.h
@@ -0,0 +1,54 @@
1/*
2 * linux/include/asm-arm/arch-omap/board-nokia.h
3 *
4 * Information structures for Nokia-specific board config data
5 *
6 * Copyright (C) 2005 Nokia Corporation
7 */
8
9#ifndef _OMAP_BOARD_NOKIA_H
10#define _OMAP_BOARD_NOKIA_H
11
12#include <linux/types.h>
13
14#define OMAP_TAG_NOKIA_BT 0x4e01
15#define OMAP_TAG_WLAN_CX3110X 0x4e02
16#define OMAP_TAG_CBUS 0x4e03
17#define OMAP_TAG_EM_ASIC_BB5 0x4e04
18
19
20#define BT_CHIP_CSR 1
21#define BT_CHIP_TI 2
22
23#define BT_SYSCLK_12 1
24#define BT_SYSCLK_38_4 2
25
26struct omap_bluetooth_config {
27 u8 chip_type;
28 u8 bt_wakeup_gpio;
29 u8 host_wakeup_gpio;
30 u8 reset_gpio;
31 u8 bt_uart;
32 u8 bd_addr[6];
33 u8 bt_sysclk;
34};
35
36struct omap_wlan_cx3110x_config {
37 u8 chip_type;
38 s16 power_gpio;
39 s16 irq_gpio;
40 s16 spi_cs_gpio;
41};
42
43struct omap_cbus_config {
44 s16 clk_gpio;
45 s16 dat_gpio;
46 s16 sel_gpio;
47};
48
49struct omap_em_asic_bb5_config {
50 s16 retu_irq_gpio;
51 s16 tahvo_irq_gpio;
52};
53
54#endif
diff --git a/include/asm-arm/arch-omap/board-perseus2.h b/include/asm-arm/arch-omap/board-perseus2.h
index 691e52a52b43..eb74420cb439 100644
--- a/include/asm-arm/arch-omap/board-perseus2.h
+++ b/include/asm-arm/arch-omap/board-perseus2.h
@@ -42,8 +42,4 @@
42 42
43#define NR_IRQS (MAXIRQNUM + 1) 43#define NR_IRQS (MAXIRQNUM + 1)
44 44
45/* Samsung NAND flash at CS2B or CS3(NAND Boot) */
46#define OMAP_NAND_FLASH_START1 0x0A000000 /* CS2B */
47#define OMAP_NAND_FLASH_START2 0x0C000000 /* CS3 */
48
49#endif 45#endif
diff --git a/include/asm-arm/arch-omap/board.h b/include/asm-arm/arch-omap/board.h
index a0040cd86639..6d6240a4681c 100644
--- a/include/asm-arm/arch-omap/board.h
+++ b/include/asm-arm/arch-omap/board.h
@@ -21,9 +21,12 @@
21#define OMAP_TAG_LCD 0x4f05 21#define OMAP_TAG_LCD 0x4f05
22#define OMAP_TAG_GPIO_SWITCH 0x4f06 22#define OMAP_TAG_GPIO_SWITCH 0x4f06
23#define OMAP_TAG_UART 0x4f07 23#define OMAP_TAG_UART 0x4f07
24#define OMAP_TAG_FBMEM 0x4f08
25#define OMAP_TAG_STI_CONSOLE 0x4f09
24 26
25#define OMAP_TAG_BOOT_REASON 0x4f80 27#define OMAP_TAG_BOOT_REASON 0x4f80
26#define OMAP_TAG_FLASH_PART 0x4f81 28#define OMAP_TAG_FLASH_PART 0x4f81
29#define OMAP_TAG_VERSION_STR 0x4f82
27 30
28struct omap_clock_config { 31struct omap_clock_config {
29 /* 0 for 12 MHz, 1 for 13 MHz and 2 for 19.2 MHz */ 32 /* 0 for 12 MHz, 1 for 13 MHz and 2 for 19.2 MHz */
@@ -54,6 +57,11 @@ struct omap_serial_console_config {
54 u32 console_speed; 57 u32 console_speed;
55}; 58};
56 59
60struct omap_sti_console_config {
61 unsigned enable:1;
62 u8 channel;
63};
64
57struct omap_usb_config { 65struct omap_usb_config {
58 /* Configure drivers according to the connectors on your board: 66 /* Configure drivers according to the connectors on your board:
59 * - "A" connector (rectagular) 67 * - "A" connector (rectagular)
@@ -87,6 +95,13 @@ struct omap_lcd_config {
87 char ctrl_name[16]; 95 char ctrl_name[16];
88}; 96};
89 97
98struct omap_fbmem_config {
99 u32 fb_sram_start;
100 u32 fb_sram_size;
101 u32 fb_sdram_start;
102 u32 fb_sdram_size;
103};
104
90/* Cover: 105/* Cover:
91 * high -> closed 106 * high -> closed
92 * low -> open 107 * low -> open
@@ -106,6 +121,12 @@ struct omap_gpio_switch_config {
106 int key_code:24; /* Linux key code */ 121 int key_code:24; /* Linux key code */
107}; 122};
108 123
124struct omap_uart_config {
125 /* Bit field of UARTs present; bit 0 --> UART1 */
126 unsigned int enabled_uarts;
127};
128
129
109struct omap_flash_part_config { 130struct omap_flash_part_config {
110 char part_table[0]; 131 char part_table[0];
111}; 132};
@@ -114,11 +135,14 @@ struct omap_boot_reason_config {
114 char reason_str[12]; 135 char reason_str[12];
115}; 136};
116 137
117struct omap_uart_config { 138struct omap_version_config {
118 /* Bit field of UARTs present; bit 0 --> UART1 */ 139 char component[12];
119 unsigned int enabled_uarts; 140 char version[12];
120}; 141};
121 142
143
144#include <asm-arm/arch-omap/board-nokia.h>
145
122struct omap_board_config_entry { 146struct omap_board_config_entry {
123 u16 tag; 147 u16 tag;
124 u16 len; 148 u16 len;
diff --git a/include/asm-arm/arch-omap/clock.h b/include/asm-arm/arch-omap/clock.h
index 46a0402696de..3c4eb9fbe48a 100644
--- a/include/asm-arm/arch-omap/clock.h
+++ b/include/asm-arm/arch-omap/clock.h
@@ -19,6 +19,7 @@ struct clk {
19 struct list_head node; 19 struct list_head node;
20 struct module *owner; 20 struct module *owner;
21 const char *name; 21 const char *name;
22 int id;
22 struct clk *parent; 23 struct clk *parent;
23 unsigned long rate; 24 unsigned long rate;
24 __u32 flags; 25 __u32 flags;
@@ -57,6 +58,7 @@ extern void propagate_rate(struct clk *clk);
57extern void followparent_recalc(struct clk * clk); 58extern void followparent_recalc(struct clk * clk);
58extern void clk_allow_idle(struct clk *clk); 59extern void clk_allow_idle(struct clk *clk);
59extern void clk_deny_idle(struct clk *clk); 60extern void clk_deny_idle(struct clk *clk);
61extern int clk_get_usecount(struct clk *clk);
60 62
61/* Clock flags */ 63/* Clock flags */
62#define RATE_CKCTL (1 << 0) /* Main fixed ratio clocks */ 64#define RATE_CKCTL (1 << 0) /* Main fixed ratio clocks */
@@ -80,10 +82,11 @@ extern void clk_deny_idle(struct clk *clk);
80#define CM_PLL_SEL1 (1 << 18) 82#define CM_PLL_SEL1 (1 << 18)
81#define CM_PLL_SEL2 (1 << 19) 83#define CM_PLL_SEL2 (1 << 19)
82#define CM_SYSCLKOUT_SEL1 (1 << 20) 84#define CM_SYSCLKOUT_SEL1 (1 << 20)
83#define CLOCK_IN_OMAP730 (1 << 21) 85#define CLOCK_IN_OMAP310 (1 << 21)
84#define CLOCK_IN_OMAP1510 (1 << 22) 86#define CLOCK_IN_OMAP730 (1 << 22)
85#define CLOCK_IN_OMAP16XX (1 << 23) 87#define CLOCK_IN_OMAP1510 (1 << 23)
86#define CLOCK_IN_OMAP242X (1 << 24) 88#define CLOCK_IN_OMAP16XX (1 << 24)
87#define CLOCK_IN_OMAP243X (1 << 25) 89#define CLOCK_IN_OMAP242X (1 << 25)
90#define CLOCK_IN_OMAP243X (1 << 26)
88 91
89#endif 92#endif
diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h
index d4e73efcb816..ca1202312a45 100644
--- a/include/asm-arm/arch-omap/dma.h
+++ b/include/asm-arm/arch-omap/dma.h
@@ -404,6 +404,7 @@ extern void omap_free_lcd_dma(void);
404extern void omap_setup_lcd_dma(void); 404extern void omap_setup_lcd_dma(void);
405extern void omap_enable_lcd_dma(void); 405extern void omap_enable_lcd_dma(void);
406extern void omap_stop_lcd_dma(void); 406extern void omap_stop_lcd_dma(void);
407extern int omap_lcd_dma_ext_running(void);
407extern void omap_set_lcd_dma_ext_controller(int external); 408extern void omap_set_lcd_dma_ext_controller(int external);
408extern void omap_set_lcd_dma_single_transfer(int single); 409extern void omap_set_lcd_dma_single_transfer(int single);
409extern void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres, 410extern void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
diff --git a/include/asm-arm/arch-omap/dmtimer.h b/include/asm-arm/arch-omap/dmtimer.h
index 11772c792f3e..e6522e6a3834 100644
--- a/include/asm-arm/arch-omap/dmtimer.h
+++ b/include/asm-arm/arch-omap/dmtimer.h
@@ -88,5 +88,6 @@ unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer);
88void omap_dm_timer_reset_counter(struct omap_dm_timer *timer); 88void omap_dm_timer_reset_counter(struct omap_dm_timer *timer);
89 89
90int omap_dm_timers_active(void); 90int omap_dm_timers_active(void);
91u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
91 92
92#endif /* __ASM_ARCH_TIMER_H */ 93#endif /* __ASM_ARCH_TIMER_H */
diff --git a/include/asm-arm/arch-omap/dsp.h b/include/asm-arm/arch-omap/dsp.h
index 57bf4f39ca58..06dad83dd41f 100644
--- a/include/asm-arm/arch-omap/dsp.h
+++ b/include/asm-arm/arch-omap/dsp.h
@@ -181,10 +181,16 @@ struct omap_dsp_varinfo {
181#define OMAP_DSP_MBCMD_PM_ENABLE 0x01 181#define OMAP_DSP_MBCMD_PM_ENABLE 0x01
182 182
183#define OMAP_DSP_MBCMD_KFUNC_FBCTL 0x00 183#define OMAP_DSP_MBCMD_KFUNC_FBCTL 0x00
184#define OMAP_DSP_MBCMD_KFUNC_AUDIO_PWR 0x01
184 185
186#define OMAP_DSP_MBCMD_FBCTL_UPD 0x0000
185#define OMAP_DSP_MBCMD_FBCTL_ENABLE 0x0002 187#define OMAP_DSP_MBCMD_FBCTL_ENABLE 0x0002
186#define OMAP_DSP_MBCMD_FBCTL_DISABLE 0x0003 188#define OMAP_DSP_MBCMD_FBCTL_DISABLE 0x0003
187 189
190#define OMAP_DSP_MBCMD_AUDIO_PWR_UP 0x0000
191#define OMAP_DSP_MBCMD_AUDIO_PWR_DOWN1 0x0001
192#define OMAP_DSP_MBCMD_AUDIO_PWR_DOWN2 0x0002
193
188#define OMAP_DSP_MBCMD_TDEL_SAFE 0x0000 194#define OMAP_DSP_MBCMD_TDEL_SAFE 0x0000
189#define OMAP_DSP_MBCMD_TDEL_KILL 0x0001 195#define OMAP_DSP_MBCMD_TDEL_KILL 0x0001
190 196
diff --git a/include/asm-arm/arch-omap/dsp_common.h b/include/asm-arm/arch-omap/dsp_common.h
index 4fcce6944056..16a459dfa714 100644
--- a/include/asm-arm/arch-omap/dsp_common.h
+++ b/include/asm-arm/arch-omap/dsp_common.h
@@ -27,11 +27,12 @@
27#ifndef ASM_ARCH_DSP_COMMON_H 27#ifndef ASM_ARCH_DSP_COMMON_H
28#define ASM_ARCH_DSP_COMMON_H 28#define ASM_ARCH_DSP_COMMON_H
29 29
30void omap_dsp_pm_suspend(void); 30extern void omap_dsp_request_mpui(void);
31void omap_dsp_pm_resume(void); 31extern void omap_dsp_release_mpui(void);
32void omap_dsp_request_mpui(void); 32extern int omap_dsp_request_mem(void);
33void omap_dsp_release_mpui(void); 33extern int omap_dsp_release_mem(void);
34int omap_dsp_request_mem(void); 34
35int omap_dsp_release_mem(void); 35extern void (*omap_dsp_audio_pwr_up_request)(int stage);
36extern void (*omap_dsp_audio_pwr_down_request)(int stage);
36 37
37#endif /* ASM_ARCH_DSP_COMMON_H */ 38#endif /* ASM_ARCH_DSP_COMMON_H */
diff --git a/include/asm-arm/arch-omap/gpioexpander.h b/include/asm-arm/arch-omap/gpioexpander.h
new file mode 100644
index 000000000000..7a43b0a912e4
--- /dev/null
+++ b/include/asm-arm/arch-omap/gpioexpander.h
@@ -0,0 +1,24 @@
1/*
2 * linux/include/asm-arm/arch-omap/gpioexpander.h
3 *
4 *
5 * Copyright (C) 2004 Texas Instruments, Inc.
6 *
7 * This package is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
12 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
13 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
14 */
15
16#ifndef __ASM_ARCH_OMAP_GPIOEXPANDER_H
17#define __ASM_ARCH_OMAP_GPIOEXPANDER_H
18
19/* Function Prototypes for GPIO Expander functions */
20
21int read_gpio_expa(u8 *, int);
22int write_gpio_expa(u8 , int);
23
24#endif /* __ASM_ARCH_OMAP_GPIOEXPANDER_H */
diff --git a/include/asm-arm/arch-omap/hardware.h b/include/asm-arm/arch-omap/hardware.h
index 5406b875c422..7909b729826c 100644
--- a/include/asm-arm/arch-omap/hardware.h
+++ b/include/asm-arm/arch-omap/hardware.h
@@ -306,6 +306,10 @@
306#include "board-h4.h" 306#include "board-h4.h"
307#endif 307#endif
308 308
309#ifdef CONFIG_MACH_OMAP_APOLLON
310#include "board-apollon.h"
311#endif
312
309#ifdef CONFIG_MACH_OMAP_OSK 313#ifdef CONFIG_MACH_OMAP_OSK
310#include "board-osk.h" 314#include "board-osk.h"
311#endif 315#endif
@@ -314,10 +318,6 @@
314#include "board-voiceblue.h" 318#include "board-voiceblue.h"
315#endif 319#endif
316 320
317#ifdef CONFIG_MACH_NETSTAR
318#include "board-netstar.h"
319#endif
320
321#endif /* !__ASSEMBLER__ */ 321#endif /* !__ASSEMBLER__ */
322 322
323#endif /* __ASM_ARCH_OMAP_HARDWARE_H */ 323#endif /* __ASM_ARCH_OMAP_HARDWARE_H */
diff --git a/include/asm-arm/arch-omap/irda.h b/include/asm-arm/arch-omap/irda.h
new file mode 100644
index 000000000000..805ae3575e44
--- /dev/null
+++ b/include/asm-arm/arch-omap/irda.h
@@ -0,0 +1,36 @@
1/*
2 * linux/include/asm-arm/arch-omap/irda.h
3 *
4 * Copyright (C) 2005-2006 Komal Shah <komal_shah802003@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef ASMARM_ARCH_IRDA_H
11#define ASMARM_ARCH_IRDA_H
12
13/* board specific transceiver capabilities */
14
15#define IR_SEL 1 /* Selects IrDA */
16#define IR_SIRMODE 2
17#define IR_FIRMODE 4
18#define IR_MIRMODE 8
19
20struct omap_irda_config {
21 int transceiver_cap;
22 int (*transceiver_mode)(struct device *dev, int mode);
23 int (*select_irda)(struct device *dev, int state);
24 /* Very specific to the needs of some platforms (h3,h4)
25 * having calls which can sleep in irda_set_speed.
26 */
27 struct work_struct gpio_expa;
28 int rx_channel;
29 int tx_channel;
30 unsigned long dest_start;
31 unsigned long src_start;
32 int tx_trigger;
33 int rx_trigger;
34};
35
36#endif
diff --git a/include/asm-arm/arch-omap/irqs.h b/include/asm-arm/arch-omap/irqs.h
index 4ffce1d77759..42098d99f302 100644
--- a/include/asm-arm/arch-omap/irqs.h
+++ b/include/asm-arm/arch-omap/irqs.h
@@ -242,6 +242,11 @@
242#define INT_24XX_GPIO_BANK2 30 242#define INT_24XX_GPIO_BANK2 30
243#define INT_24XX_GPIO_BANK3 31 243#define INT_24XX_GPIO_BANK3 31
244#define INT_24XX_GPIO_BANK4 32 244#define INT_24XX_GPIO_BANK4 32
245#define INT_24XX_MCBSP1_IRQ_TX 59
246#define INT_24XX_MCBSP1_IRQ_RX 60
247#define INT_24XX_MCBSP2_IRQ_TX 62
248#define INT_24XX_MCBSP2_IRQ_RX 63
249#define INT_24XX_UART3_IRQ 74
245 250
246/* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730) and 251/* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730) and
247 * 16 MPUIO lines */ 252 * 16 MPUIO lines */
diff --git a/include/asm-arm/arch-omap/keypad.h b/include/asm-arm/arch-omap/keypad.h
new file mode 100644
index 000000000000..8a023a984acb
--- /dev/null
+++ b/include/asm-arm/arch-omap/keypad.h
@@ -0,0 +1,36 @@
1/*
2 * linux/include/asm-arm/arch-omap/keypad.h
3 *
4 * Copyright (C) 2006 Komal Shah <komal_shah802003@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef ASMARM_ARCH_KEYPAD_H
11#define ASMARM_ARCH_KEYPAD_H
12
13struct omap_kp_platform_data {
14 int rows;
15 int cols;
16 int *keymap;
17 unsigned int rep:1;
18 /* specific to OMAP242x*/
19 unsigned int *row_gpios;
20 unsigned int *col_gpios;
21};
22
23/* Group (0..3) -- when multiple keys are pressed, only the
24 * keys pressed in the same group are considered as pressed. This is
25 * in order to workaround certain crappy HW designs that produce ghost
26 * keypresses. */
27#define GROUP_0 (0 << 16)
28#define GROUP_1 (1 << 16)
29#define GROUP_2 (2 << 16)
30#define GROUP_3 (3 << 16)
31#define GROUP_MASK GROUP_3
32
33#define KEY(col, row, val) (((col) << 28) | ((row) << 24) | (val))
34
35#endif
36
diff --git a/include/asm-arm/arch-omap/lcd_lph8923.h b/include/asm-arm/arch-omap/lcd_lph8923.h
new file mode 100644
index 000000000000..004e67e22ca7
--- /dev/null
+++ b/include/asm-arm/arch-omap/lcd_lph8923.h
@@ -0,0 +1,14 @@
1#ifndef __LCD_LPH8923_H
2#define __LCD_LPH8923_H
3
4enum lcd_lph8923_test_num {
5 LCD_LPH8923_TEST_RGB_LINES,
6};
7
8enum lcd_lph8923_test_result {
9 LCD_LPH8923_TEST_SUCCESS,
10 LCD_LPH8923_TEST_INVALID,
11 LCD_LPH8923_TEST_FAILED,
12};
13
14#endif
diff --git a/include/asm-arm/arch-omap/mcbsp.h b/include/asm-arm/arch-omap/mcbsp.h
index e79d98ab2ab6..ed0dde4f7219 100644
--- a/include/asm-arm/arch-omap/mcbsp.h
+++ b/include/asm-arm/arch-omap/mcbsp.h
@@ -37,6 +37,11 @@
37#define OMAP1610_MCBSP2_BASE 0xfffb1000 37#define OMAP1610_MCBSP2_BASE 0xfffb1000
38#define OMAP1610_MCBSP3_BASE 0xe1017000 38#define OMAP1610_MCBSP3_BASE 0xe1017000
39 39
40#define OMAP24XX_MCBSP1_BASE 0x48074000
41#define OMAP24XX_MCBSP2_BASE 0x48076000
42
43#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730)
44
40#define OMAP_MCBSP_REG_DRR2 0x00 45#define OMAP_MCBSP_REG_DRR2 0x00
41#define OMAP_MCBSP_REG_DRR1 0x02 46#define OMAP_MCBSP_REG_DRR1 0x02
42#define OMAP_MCBSP_REG_DXR2 0x04 47#define OMAP_MCBSP_REG_DXR2 0x04
@@ -71,9 +76,62 @@
71 76
72#define OMAP_MAX_MCBSP_COUNT 3 77#define OMAP_MAX_MCBSP_COUNT 3
73 78
79#define AUDIO_MCBSP_DATAWRITE (OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1)
80#define AUDIO_MCBSP_DATAREAD (OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1)
81
82#define AUDIO_MCBSP OMAP_MCBSP1
83#define AUDIO_DMA_TX OMAP_DMA_MCBSP1_TX
84#define AUDIO_DMA_RX OMAP_DMA_MCBSP1_RX
85
86#elif defined(CONFIG_ARCH_OMAP24XX)
87
88#define OMAP_MCBSP_REG_DRR2 0x00
89#define OMAP_MCBSP_REG_DRR1 0x04
90#define OMAP_MCBSP_REG_DXR2 0x08
91#define OMAP_MCBSP_REG_DXR1 0x0C
92#define OMAP_MCBSP_REG_SPCR2 0x10
93#define OMAP_MCBSP_REG_SPCR1 0x14
94#define OMAP_MCBSP_REG_RCR2 0x18
95#define OMAP_MCBSP_REG_RCR1 0x1C
96#define OMAP_MCBSP_REG_XCR2 0x20
97#define OMAP_MCBSP_REG_XCR1 0x24
98#define OMAP_MCBSP_REG_SRGR2 0x28
99#define OMAP_MCBSP_REG_SRGR1 0x2C
100#define OMAP_MCBSP_REG_MCR2 0x30
101#define OMAP_MCBSP_REG_MCR1 0x34
102#define OMAP_MCBSP_REG_RCERA 0x38
103#define OMAP_MCBSP_REG_RCERB 0x3C
104#define OMAP_MCBSP_REG_XCERA 0x40
105#define OMAP_MCBSP_REG_XCERB 0x44
106#define OMAP_MCBSP_REG_PCR0 0x48
107#define OMAP_MCBSP_REG_RCERC 0x4C
108#define OMAP_MCBSP_REG_RCERD 0x50
109#define OMAP_MCBSP_REG_XCERC 0x54
110#define OMAP_MCBSP_REG_XCERD 0x58
111#define OMAP_MCBSP_REG_RCERE 0x5C
112#define OMAP_MCBSP_REG_RCERF 0x60
113#define OMAP_MCBSP_REG_XCERE 0x64
114#define OMAP_MCBSP_REG_XCERF 0x68
115#define OMAP_MCBSP_REG_RCERG 0x6C
116#define OMAP_MCBSP_REG_RCERH 0x70
117#define OMAP_MCBSP_REG_XCERG 0x74
118#define OMAP_MCBSP_REG_XCERH 0x78
119
120#define OMAP_MAX_MCBSP_COUNT 2
121
122#define AUDIO_MCBSP_DATAWRITE (OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1)
123#define AUDIO_MCBSP_DATAREAD (OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1)
124
125#define AUDIO_MCBSP OMAP_MCBSP2
126#define AUDIO_DMA_TX OMAP24XX_DMA_MCBSP2_TX
127#define AUDIO_DMA_RX OMAP24XX_DMA_MCBSP2_RX
128
129#endif
130
74#define OMAP_MCBSP_READ(base, reg) __raw_readw((base) + OMAP_MCBSP_REG_##reg) 131#define OMAP_MCBSP_READ(base, reg) __raw_readw((base) + OMAP_MCBSP_REG_##reg)
75#define OMAP_MCBSP_WRITE(base, reg, val) __raw_writew((val), (base) + OMAP_MCBSP_REG_##reg) 132#define OMAP_MCBSP_WRITE(base, reg, val) __raw_writew((val), (base) + OMAP_MCBSP_REG_##reg)
76 133
134
77/************************** McBSP SPCR1 bit definitions ***********************/ 135/************************** McBSP SPCR1 bit definitions ***********************/
78#define RRST 0x0001 136#define RRST 0x0001
79#define RRDY 0x0002 137#define RRDY 0x0002
@@ -195,6 +253,10 @@ typedef enum {
195 OMAP_MCBSP3, 253 OMAP_MCBSP3,
196} omap_mcbsp_id; 254} omap_mcbsp_id;
197 255
256typedef int __bitwise omap_mcbsp_io_type_t;
257#define OMAP_MCBSP_IRQ_IO ((__force omap_mcbsp_io_type_t) 1)
258#define OMAP_MCBSP_POLL_IO ((__force omap_mcbsp_io_type_t) 2)
259
198typedef enum { 260typedef enum {
199 OMAP_MCBSP_WORD_8 = 0, 261 OMAP_MCBSP_WORD_8 = 0,
200 OMAP_MCBSP_WORD_12, 262 OMAP_MCBSP_WORD_12,
@@ -246,6 +308,9 @@ u32 omap_mcbsp_recv_word(unsigned int id);
246 308
247int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int length); 309int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
248int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int length); 310int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
311int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word);
312int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 * word);
313
249 314
250/* SPI specific API */ 315/* SPI specific API */
251void omap_mcbsp_set_spi_mode(unsigned int id, const struct omap_mcbsp_spi_cfg * spi_cfg); 316void omap_mcbsp_set_spi_mode(unsigned int id, const struct omap_mcbsp_spi_cfg * spi_cfg);
diff --git a/include/asm-arm/arch-omap/mcspi.h b/include/asm-arm/arch-omap/mcspi.h
new file mode 100644
index 000000000000..9e7f40a88e1b
--- /dev/null
+++ b/include/asm-arm/arch-omap/mcspi.h
@@ -0,0 +1,16 @@
1#ifndef _OMAP2_MCSPI_H
2#define _OMAP2_MCSPI_H
3
4struct omap2_mcspi_platform_config {
5 unsigned long base;
6 unsigned short num_cs;
7};
8
9struct omap2_mcspi_device_config {
10 unsigned turbo_mode:1;
11
12 /* Do we want one channel enabled at the same time? */
13 unsigned single_channel:1;
14};
15
16#endif
diff --git a/include/asm-arm/arch-omap/menelaus.h b/include/asm-arm/arch-omap/menelaus.h
index 46be8b8d6346..88cd4c87f0de 100644
--- a/include/asm-arm/arch-omap/menelaus.h
+++ b/include/asm-arm/arch-omap/menelaus.h
@@ -7,7 +7,7 @@
7#ifndef __ASM_ARCH_MENELAUS_H 7#ifndef __ASM_ARCH_MENELAUS_H
8#define __ASM_ARCH_MENELAUS_H 8#define __ASM_ARCH_MENELAUS_H
9 9
10extern void menelaus_mmc_register(void (*callback)(u8 card_mask), 10extern void menelaus_mmc_register(void (*callback)(unsigned long data, u8 card_mask),
11 unsigned long data); 11 unsigned long data);
12extern void menelaus_mmc_remove(void); 12extern void menelaus_mmc_remove(void);
13extern void menelaus_mmc_opendrain(int enable); 13extern void menelaus_mmc_opendrain(int enable);
diff --git a/include/asm-arm/arch-omap/mux.h b/include/asm-arm/arch-omap/mux.h
index 13415a9aab06..0dc24d4ba59c 100644
--- a/include/asm-arm/arch-omap/mux.h
+++ b/include/asm-arm/arch-omap/mux.h
@@ -112,14 +112,13 @@
112 * as mux config 112 * as mux config
113 */ 113 */
114#define MUX_CFG_730(desc, mux_reg, mode_offset, mode, \ 114#define MUX_CFG_730(desc, mux_reg, mode_offset, mode, \
115 pull_reg, pull_bit, pull_status, \ 115 pull_bit, pull_status, debug_status)\
116 pu_pd_reg, pu_pd_status, debug_status)\
117{ \ 116{ \
118 .name = desc, \ 117 .name = desc, \
119 .debug = debug_status, \ 118 .debug = debug_status, \
120 MUX_REG_730(mux_reg, mode_offset, mode) \ 119 MUX_REG_730(mux_reg, mode_offset, mode) \
121 PULL_REG_730(mux_reg, pull_bit, pull_status) \ 120 PULL_REG_730(mux_reg, pull_bit, pull_status) \
122 PU_PD_REG(pu_pd_reg, pu_pd_status) \ 121 PU_PD_REG(NA, 0) \
123}, 122},
124 123
125#define MUX_CFG_24XX(desc, reg_offset, mode, \ 124#define MUX_CFG_24XX(desc, reg_offset, mode, \
@@ -172,6 +171,11 @@ enum omap730_index {
172 E4_730_KBC2, 171 E4_730_KBC2,
173 F4_730_KBC3, 172 F4_730_KBC3,
174 E3_730_KBC4, 173 E3_730_KBC4,
174
175 /* USB */
176 AA17_730_USB_DM,
177 W16_730_USB_PU_EN,
178 W17_730_USB_VBUSI,
175}; 179};
176 180
177enum omap1xxx_index { 181enum omap1xxx_index {
@@ -403,9 +407,53 @@ enum omap24xx_index {
403 /* 24xx Menelaus interrupt */ 407 /* 24xx Menelaus interrupt */
404 W19_24XX_SYS_NIRQ, 408 W19_24XX_SYS_NIRQ,
405 409
410 /* 24xx clock */
411 W14_24XX_SYS_CLKOUT,
412
413 /* 242X McBSP */
414 Y15_24XX_MCBSP2_CLKX,
415 R14_24XX_MCBSP2_FSX,
416 W15_24XX_MCBSP2_DR,
417 V15_24XX_MCBSP2_DX,
418
406 /* 24xx GPIO */ 419 /* 24xx GPIO */
420 M21_242X_GPIO11,
421 AA10_242X_GPIO13,
422 AA6_242X_GPIO14,
423 AA4_242X_GPIO15,
424 Y11_242X_GPIO16,
425 AA12_242X_GPIO17,
426 AA8_242X_GPIO58,
407 Y20_24XX_GPIO60, 427 Y20_24XX_GPIO60,
428 W4__24XX_GPIO74,
408 M15_24XX_GPIO92, 429 M15_24XX_GPIO92,
430 V14_24XX_GPIO117,
431
432 P20_24XX_TSC_IRQ,
433
434 /* UART3 */
435 K15_24XX_UART3_TX,
436 K14_24XX_UART3_RX,
437
438 /* Keypad GPIO*/
439 T19_24XX_KBR0,
440 R19_24XX_KBR1,
441 V18_24XX_KBR2,
442 M21_24XX_KBR3,
443 E5__24XX_KBR4,
444 M18_24XX_KBR5,
445 R20_24XX_KBC0,
446 M14_24XX_KBC1,
447 H19_24XX_KBC2,
448 V17_24XX_KBC3,
449 P21_24XX_KBC4,
450 L14_24XX_KBC5,
451 N19_24XX_KBC6,
452
453 /* 24xx Menelaus Keypad GPIO */
454 B3__24XX_KBR5,
455 AA4_24XX_KBC2,
456 B13_24XX_KBC6,
409}; 457};
410 458
411#ifdef CONFIG_OMAP_MUX 459#ifdef CONFIG_OMAP_MUX
diff --git a/include/asm-arm/arch-omap/omap-alsa.h b/include/asm-arm/arch-omap/omap-alsa.h
new file mode 100644
index 000000000000..df4695474e3d
--- /dev/null
+++ b/include/asm-arm/arch-omap/omap-alsa.h
@@ -0,0 +1,124 @@
1/*
2 * linux/include/asm-arm/arch-omap/omap-alsa.h
3 *
4 * Alsa Driver for AIC23 and TSC2101 codecs on OMAP platform boards.
5 *
6 * Copyright (C) 2006 Mika Laitio <lamikr@cc.jyu.fi>
7 *
8 * Copyright (C) 2005 Instituto Nokia de Tecnologia - INdT - Manaus Brazil
9 * Written by Daniel Petrini, David Cohen, Anderson Briglia
10 * {daniel.petrini, david.cohen, anderson.briglia}@indt.org.br
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
24 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * You should have received a copy of the GNU General Public License along
29 * with this program; if not, write to the Free Software Foundation, Inc.,
30 * 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 * History
33 * -------
34 *
35 * 2005/07/25 INdT-10LE Kernel Team - Alsa driver for omap osk,
36 * original version based in sa1100 driver
37 * and omap oss driver.
38 */
39
40#ifndef __OMAP_ALSA_H
41#define __OMAP_ALSA_H
42
43#include <sound/driver.h>
44#include <asm/arch/dma.h>
45#include <sound/core.h>
46#include <sound/pcm.h>
47#include <asm/arch/mcbsp.h>
48#include <linux/platform_device.h>
49
50#define DMA_BUF_SIZE (1024 * 8)
51
52/*
53 * Buffer management for alsa and dma
54 */
55struct audio_stream {
56 char *id; /* identification string */
57 int stream_id; /* numeric identification */
58 int dma_dev; /* dma number of that device */
59 int *lch; /* Chain of channels this stream is linked to */
60 char started; /* to store if the chain was started or not */
61 int dma_q_head; /* DMA Channel Q Head */
62 int dma_q_tail; /* DMA Channel Q Tail */
63 char dma_q_count; /* DMA Channel Q Count */
64 int active:1; /* we are using this stream for transfer now */
65 int period; /* current transfer period */
66 int periods; /* current count of periods registerd in the DMA engine */
67 spinlock_t dma_lock; /* for locking in DMA operations */
68 snd_pcm_substream_t *stream; /* the pcm stream */
69 unsigned linked:1; /* dma channels linked */
70 int offset; /* store start position of the last period in the alsa buffer */
71 int (*hw_start)(void); /* interface to start HW interface, e.g. McBSP */
72 int (*hw_stop)(void); /* interface to stop HW interface, e.g. McBSP */
73};
74
75/*
76 * Alsa card structure for aic23
77 */
78struct snd_card_omap_codec {
79 snd_card_t *card;
80 snd_pcm_t *pcm;
81 long samplerate;
82 struct audio_stream s[2]; /* playback & capture */
83};
84
85/* Codec specific information and function pointers.
86 * Codec (omap-alsa-aic23.c and omap-alsa-tsc2101.c)
87 * are responsible for defining the function pointers.
88 */
89struct omap_alsa_codec_config {
90 char *name;
91 struct omap_mcbsp_reg_cfg *mcbsp_regs_alsa;
92 snd_pcm_hw_constraint_list_t *hw_constraints_rates;
93 snd_pcm_hardware_t *snd_omap_alsa_playback;
94 snd_pcm_hardware_t *snd_omap_alsa_capture;
95 void (*codec_configure_dev)(void);
96 void (*codec_set_samplerate)(long);
97 void (*codec_clock_setup)(void);
98 int (*codec_clock_on)(void);
99 int (*codec_clock_off)(void);
100 int (*get_default_samplerate)(void);
101};
102
103/*********** Mixer function prototypes *************************/
104int snd_omap_mixer(struct snd_card_omap_codec *);
105void snd_omap_init_mixer(void);
106
107#ifdef CONFIG_PM
108void snd_omap_suspend_mixer(void);
109void snd_omap_resume_mixer(void);
110#endif
111
112int snd_omap_alsa_post_probe(struct platform_device *pdev, struct omap_alsa_codec_config *config);
113int snd_omap_alsa_remove(struct platform_device *pdev);
114#ifdef CONFIG_PM
115int snd_omap_alsa_suspend(struct platform_device *pdev, pm_message_t state);
116int snd_omap_alsa_resume(struct platform_device *pdev);
117#else
118#define snd_omap_alsa_suspend NULL
119#define snd_omap_alsa_resume NULL
120#endif
121
122void callback_omap_alsa_sound_dma(void *);
123
124#endif
diff --git a/include/asm-arm/arch-omap/omapfb.h b/include/asm-arm/arch-omap/omapfb.h
index 4ba2622cc142..fccdb3db025f 100644
--- a/include/asm-arm/arch-omap/omapfb.h
+++ b/include/asm-arm/arch-omap/omapfb.h
@@ -34,9 +34,10 @@
34#define OMAPFB_MIRROR OMAP_IOW(31, int) 34#define OMAPFB_MIRROR OMAP_IOW(31, int)
35#define OMAPFB_SYNC_GFX OMAP_IO(37) 35#define OMAPFB_SYNC_GFX OMAP_IO(37)
36#define OMAPFB_VSYNC OMAP_IO(38) 36#define OMAPFB_VSYNC OMAP_IO(38)
37#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, enum omapfb_update_mode) 37#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int)
38#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(41, struct omapfb_update_window_old)
38#define OMAPFB_GET_CAPS OMAP_IOR(42, unsigned long) 39#define OMAPFB_GET_CAPS OMAP_IOR(42, unsigned long)
39#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, enum omapfb_update_mode) 40#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int)
40#define OMAPFB_LCD_TEST OMAP_IOW(45, int) 41#define OMAPFB_LCD_TEST OMAP_IOW(45, int)
41#define OMAPFB_CTRL_TEST OMAP_IOW(46, int) 42#define OMAPFB_CTRL_TEST OMAP_IOW(46, int)
42#define OMAPFB_UPDATE_WINDOW OMAP_IOW(47, struct omapfb_update_window) 43#define OMAPFB_UPDATE_WINDOW OMAP_IOW(47, struct omapfb_update_window)
@@ -66,9 +67,14 @@ enum omapfb_color_format {
66}; 67};
67 68
68struct omapfb_update_window { 69struct omapfb_update_window {
69 u32 x, y; 70 __u32 x, y;
70 u32 width, height; 71 __u32 width, height;
71 u32 format; 72 __u32 format;
73};
74
75struct omapfb_update_window_old {
76 __u32 x, y;
77 __u32 width, height;
72}; 78};
73 79
74enum omapfb_plane { 80enum omapfb_plane {
@@ -83,17 +89,17 @@ enum omapfb_channel_out {
83}; 89};
84 90
85struct omapfb_setup_plane { 91struct omapfb_setup_plane {
86 u8 plane; 92 __u8 plane;
87 u8 channel_out; 93 __u8 channel_out;
88 u32 offset; 94 __u32 offset;
89 u32 pos_x, pos_y; 95 __u32 pos_x, pos_y;
90 u32 width, height; 96 __u32 width, height;
91 u32 color_mode; 97 __u32 color_mode;
92}; 98};
93 99
94struct omapfb_enable_plane { 100struct omapfb_enable_plane {
95 u8 plane; 101 __u8 plane;
96 u8 enable; 102 __u8 enable;
97}; 103};
98 104
99enum omapfb_color_key_type { 105enum omapfb_color_key_type {
@@ -103,10 +109,10 @@ enum omapfb_color_key_type {
103}; 109};
104 110
105struct omapfb_color_key { 111struct omapfb_color_key {
106 u8 channel_out; 112 __u8 channel_out;
107 u32 background; 113 __u32 background;
108 u32 trans_key; 114 __u32 trans_key;
109 u8 key_type; 115 __u8 key_type;
110}; 116};
111 117
112enum omapfb_update_mode { 118enum omapfb_update_mode {
@@ -120,6 +126,9 @@ enum omapfb_update_mode {
120#include <linux/completion.h> 126#include <linux/completion.h>
121#include <linux/interrupt.h> 127#include <linux/interrupt.h>
122#include <linux/fb.h> 128#include <linux/fb.h>
129#include <linux/mutex.h>
130
131#include <asm/arch/board.h>
123 132
124#define OMAP_LCDC_INV_VSYNC 0x0001 133#define OMAP_LCDC_INV_VSYNC 0x0001
125#define OMAP_LCDC_INV_HSYNC 0x0002 134#define OMAP_LCDC_INV_HSYNC 0x0002
@@ -184,19 +193,38 @@ struct extif_timings {
184 int re_cycle_time; 193 int re_cycle_time;
185 int cs_pulse_width; 194 int cs_pulse_width;
186 int access_time; 195 int access_time;
196
197 int clk_div;
198
199 u32 tim[5]; /* set by extif->convert_timings */
200
201 int converted;
187}; 202};
188 203
189struct lcd_ctrl_extif { 204struct lcd_ctrl_extif {
190 int (*init) (void); 205 int (*init) (void);
191 void (*cleanup) (void); 206 void (*cleanup) (void);
207 void (*get_clk_info) (u32 *clk_period, u32 *max_clk_div);
208 int (*convert_timings) (struct extif_timings *timings);
192 void (*set_timings) (const struct extif_timings *timings); 209 void (*set_timings) (const struct extif_timings *timings);
193 void (*write_command) (u32 cmd); 210 void (*set_bits_per_cycle)(int bpc);
194 u32 (*read_data) (void); 211 void (*write_command) (const void *buf, unsigned int len);
195 void (*write_data) (u32 data); 212 void (*read_data) (void *buf, unsigned int len);
213 void (*write_data) (const void *buf, unsigned int len);
196 void (*transfer_area) (int width, int height, 214 void (*transfer_area) (int width, int height,
197 void (callback)(void * data), void *data); 215 void (callback)(void * data), void *data);
216 unsigned long max_transmit_size;
198}; 217};
199 218
219struct omapfb_notifier_block {
220 struct notifier_block nb;
221 void *data;
222};
223
224typedef int (*omapfb_notifier_callback_t)(struct omapfb_notifier_block *,
225 unsigned long event,
226 struct omapfb_device *fbdev);
227
200struct lcd_ctrl { 228struct lcd_ctrl {
201 const char *name; 229 const char *name;
202 void *data; 230 void *data;
@@ -204,9 +232,11 @@ struct lcd_ctrl {
204 int (*init) (struct omapfb_device *fbdev, 232 int (*init) (struct omapfb_device *fbdev,
205 int ext_mode, int req_vram_size); 233 int ext_mode, int req_vram_size);
206 void (*cleanup) (void); 234 void (*cleanup) (void);
235 void (*bind_client) (struct omapfb_notifier_block *nb);
207 void (*get_vram_layout)(unsigned long *size, 236 void (*get_vram_layout)(unsigned long *size,
208 void **virt_base, 237 void **virt_base,
209 dma_addr_t *phys_base); 238 dma_addr_t *phys_base);
239 int (*mmap) (struct vm_area_struct *vma);
210 unsigned long (*get_caps) (void); 240 unsigned long (*get_caps) (void);
211 int (*set_update_mode)(enum omapfb_update_mode mode); 241 int (*set_update_mode)(enum omapfb_update_mode mode);
212 enum omapfb_update_mode (*get_update_mode)(void); 242 enum omapfb_update_mode (*get_update_mode)(void);
@@ -240,7 +270,7 @@ struct omapfb_device {
240 int state; 270 int state;
241 int ext_lcdc; /* Using external 271 int ext_lcdc; /* Using external
242 LCD controller */ 272 LCD controller */
243 struct semaphore rqueue_sema; 273 struct mutex rqueue_mutex;
244 274
245 void *vram_virt_base; 275 void *vram_virt_base;
246 dma_addr_t vram_phys_base; 276 dma_addr_t vram_phys_base;
@@ -261,12 +291,13 @@ struct omapfb_device {
261 struct device *dev; 291 struct device *dev;
262}; 292};
263 293
264extern struct lcd_panel h3_panel; 294struct omapfb_platform_data {
265extern struct lcd_panel h2_panel; 295 struct omap_lcd_config lcd;
266extern struct lcd_panel p2_panel; 296 struct omap_fbmem_config fbmem;
267extern struct lcd_panel osk_panel; 297};
268extern struct lcd_panel innovator1610_panel; 298
269extern struct lcd_panel innovator1510_panel; 299#define OMAPFB_EVENT_READY 1
300#define OMAPFB_EVENT_DISABLED 2
270 301
271#ifdef CONFIG_ARCH_OMAP1 302#ifdef CONFIG_ARCH_OMAP1
272extern struct lcd_ctrl omap1_lcd_ctrl; 303extern struct lcd_ctrl omap1_lcd_ctrl;
@@ -274,7 +305,20 @@ extern struct lcd_ctrl omap1_lcd_ctrl;
274extern struct lcd_ctrl omap2_disp_ctrl; 305extern struct lcd_ctrl omap2_disp_ctrl;
275#endif 306#endif
276 307
308extern void omapfb_register_panel(struct lcd_panel *panel);
277extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval); 309extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
310extern void omapfb_notify_clients(struct omapfb_device *fbdev,
311 unsigned long event);
312extern int omapfb_register_client(struct omapfb_notifier_block *nb,
313 omapfb_notifier_callback_t callback,
314 void *callback_data);
315extern int omapfb_unregister_client(struct omapfb_notifier_block *nb);
316extern int omapfb_update_window_async(struct omapfb_update_window *win,
317 void (*callback)(void *),
318 void *callback_data);
319
320/* in arch/arm/plat-omap/devices.c */
321extern void omapfb_reserve_mem(void);
278 322
279#endif /* __KERNEL__ */ 323#endif /* __KERNEL__ */
280 324
diff --git a/include/asm-arm/arch-omap/param.h b/include/asm-arm/arch-omap/param.h
new file mode 100644
index 000000000000..face9ad41e97
--- /dev/null
+++ b/include/asm-arm/arch-omap/param.h
@@ -0,0 +1,8 @@
1/*
2 * linux/include/asm-arm/arch-omap/param.h
3 *
4 */
5
6#ifdef CONFIG_OMAP_32K_TIMER_HZ
7#define HZ CONFIG_OMAP_32K_TIMER_HZ
8#endif
diff --git a/include/asm-arm/arch-omap/pm.h b/include/asm-arm/arch-omap/pm.h
index 7c790425e363..05b003f3a94c 100644
--- a/include/asm-arm/arch-omap/pm.h
+++ b/include/asm-arm/arch-omap/pm.h
@@ -49,7 +49,7 @@
49 49
50/* 50/*
51 * ---------------------------------------------------------------------------- 51 * ----------------------------------------------------------------------------
52 * Powermanagement bitmasks 52 * Power management bitmasks
53 * ---------------------------------------------------------------------------- 53 * ----------------------------------------------------------------------------
54 */ 54 */
55#define IDLE_WAIT_CYCLES 0x00000fff 55#define IDLE_WAIT_CYCLES 0x00000fff
@@ -112,32 +112,59 @@
112#endif 112#endif
113 113
114#ifndef __ASSEMBLER__ 114#ifndef __ASSEMBLER__
115
116#include <linux/clk.h>
117
118extern void prevent_idle_sleep(void);
119extern void allow_idle_sleep(void);
120
121/**
122 * clk_deny_idle - Prevents the clock from being idled during MPU idle
123 * @clk: clock signal handle
124 */
125void clk_deny_idle(struct clk *clk);
126
127/**
128 * clk_allow_idle - Counters previous clk_deny_idle
129 * @clk: clock signal handle
130 */
131void clk_deny_idle(struct clk *clk);
132
115extern void omap_pm_idle(void); 133extern void omap_pm_idle(void);
116extern void omap_pm_suspend(void); 134extern void omap_pm_suspend(void);
117extern void omap730_cpu_suspend(unsigned short, unsigned short); 135extern void omap730_cpu_suspend(unsigned short, unsigned short);
118extern void omap1510_cpu_suspend(unsigned short, unsigned short); 136extern void omap1510_cpu_suspend(unsigned short, unsigned short);
119extern void omap1610_cpu_suspend(unsigned short, unsigned short); 137extern void omap1610_cpu_suspend(unsigned short, unsigned short);
138extern void omap24xx_cpu_suspend(u32 dll_ctrl, u32 cpu_revision);
120extern void omap730_idle_loop_suspend(void); 139extern void omap730_idle_loop_suspend(void);
121extern void omap1510_idle_loop_suspend(void); 140extern void omap1510_idle_loop_suspend(void);
122extern void omap1610_idle_loop_suspend(void); 141extern void omap1610_idle_loop_suspend(void);
142extern void omap24xx_idle_loop_suspend(void);
143
144extern unsigned int omap730_cpu_suspend_sz;
145extern unsigned int omap1510_cpu_suspend_sz;
146extern unsigned int omap1610_cpu_suspend_sz;
147extern unsigned int omap24xx_cpu_suspend_sz;
148extern unsigned int omap730_idle_loop_suspend_sz;
149extern unsigned int omap1510_idle_loop_suspend_sz;
150extern unsigned int omap1610_idle_loop_suspend_sz;
151extern unsigned int omap24xx_idle_loop_suspend_sz;
123 152
124#ifdef CONFIG_OMAP_SERIAL_WAKE 153#ifdef CONFIG_OMAP_SERIAL_WAKE
125extern void omap_serial_wake_trigger(int enable); 154extern void omap_serial_wake_trigger(int enable);
126#else 155#else
156#define omap_serial_wakeup_init() {}
127#define omap_serial_wake_trigger(x) {} 157#define omap_serial_wake_trigger(x) {}
128#endif /* CONFIG_OMAP_SERIAL_WAKE */ 158#endif /* CONFIG_OMAP_SERIAL_WAKE */
129 159
130extern unsigned int omap730_cpu_suspend_sz;
131extern unsigned int omap730_idle_loop_suspend_sz;
132extern unsigned int omap1510_cpu_suspend_sz;
133extern unsigned int omap1510_idle_loop_suspend_sz;
134extern unsigned int omap1610_cpu_suspend_sz;
135extern unsigned int omap1610_idle_loop_suspend_sz;
136
137#define ARM_SAVE(x) arm_sleep_save[ARM_SLEEP_SAVE_##x] = omap_readl(x) 160#define ARM_SAVE(x) arm_sleep_save[ARM_SLEEP_SAVE_##x] = omap_readl(x)
138#define ARM_RESTORE(x) omap_writel((arm_sleep_save[ARM_SLEEP_SAVE_##x]), (x)) 161#define ARM_RESTORE(x) omap_writel((arm_sleep_save[ARM_SLEEP_SAVE_##x]), (x))
139#define ARM_SHOW(x) arm_sleep_save[ARM_SLEEP_SAVE_##x] 162#define ARM_SHOW(x) arm_sleep_save[ARM_SLEEP_SAVE_##x]
140 163
164#define DSP_SAVE(x) dsp_sleep_save[DSP_SLEEP_SAVE_##x] = __raw_readw(x)
165#define DSP_RESTORE(x) __raw_writew((dsp_sleep_save[DSP_SLEEP_SAVE_##x]), (x))
166#define DSP_SHOW(x) dsp_sleep_save[DSP_SLEEP_SAVE_##x]
167
141#define ULPD_SAVE(x) ulpd_sleep_save[ULPD_SLEEP_SAVE_##x] = omap_readw(x) 168#define ULPD_SAVE(x) ulpd_sleep_save[ULPD_SLEEP_SAVE_##x] = omap_readw(x)
142#define ULPD_RESTORE(x) omap_writew((ulpd_sleep_save[ULPD_SLEEP_SAVE_##x]), (x)) 169#define ULPD_RESTORE(x) omap_writew((ulpd_sleep_save[ULPD_SLEEP_SAVE_##x]), (x))
143#define ULPD_SHOW(x) ulpd_sleep_save[ULPD_SLEEP_SAVE_##x] 170#define ULPD_SHOW(x) ulpd_sleep_save[ULPD_SLEEP_SAVE_##x]
@@ -154,6 +181,10 @@ extern unsigned int omap1610_idle_loop_suspend_sz;
154#define MPUI1610_RESTORE(x) omap_writel((mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_##x]), (x)) 181#define MPUI1610_RESTORE(x) omap_writel((mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_##x]), (x))
155#define MPUI1610_SHOW(x) mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_##x] 182#define MPUI1610_SHOW(x) mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_##x]
156 183
184#define OMAP24XX_SAVE(x) omap24xx_sleep_save[OMAP24XX_SLEEP_SAVE_##x] = x
185#define OMAP24XX_RESTORE(x) x = omap24xx_sleep_save[OMAP24XX_SLEEP_SAVE_##x]
186#define OMAP24XX_SHOW(x) omap24xx_sleep_save[OMAP24XX_SLEEP_SAVE_##x]
187
157/* 188/*
158 * List of global OMAP registers to preserve. 189 * List of global OMAP registers to preserve.
159 * More ones like CP and general purpose register values are preserved 190 * More ones like CP and general purpose register values are preserved
@@ -176,6 +207,15 @@ enum arm_save_state {
176 ARM_SLEEP_SAVE_SIZE 207 ARM_SLEEP_SAVE_SIZE
177}; 208};
178 209
210enum dsp_save_state {
211 DSP_SLEEP_SAVE_START = 0,
212 /*
213 * DSP registers 16 bits
214 */
215 DSP_SLEEP_SAVE_DSP_IDLECT2,
216 DSP_SLEEP_SAVE_SIZE
217};
218
179enum ulpd_save_state { 219enum ulpd_save_state {
180 ULPD_SLEEP_SAVE_START = 0, 220 ULPD_SLEEP_SAVE_START = 0,
181 /* 221 /*
@@ -254,5 +294,30 @@ enum mpui1610_save_state {
254#endif 294#endif
255}; 295};
256 296
297enum omap24xx_save_state {
298 OMAP24XX_SLEEP_SAVE_START = 0,
299 OMAP24XX_SLEEP_SAVE_INTC_MIR0,
300 OMAP24XX_SLEEP_SAVE_INTC_MIR1,
301 OMAP24XX_SLEEP_SAVE_INTC_MIR2,
302 OMAP24XX_SLEEP_SAVE_CM_FCLKEN1_CORE,
303 OMAP24XX_SLEEP_SAVE_CM_FCLKEN2_CORE,
304 OMAP24XX_SLEEP_SAVE_CM_ICLKEN1_CORE,
305 OMAP24XX_SLEEP_SAVE_CM_ICLKEN2_CORE,
306 OMAP24XX_SLEEP_SAVE_CM_ICLKEN4_CORE,
307 OMAP24XX_SLEEP_SAVE_GPIO1_IRQENABLE1,
308 OMAP24XX_SLEEP_SAVE_GPIO2_IRQENABLE1,
309 OMAP24XX_SLEEP_SAVE_GPIO3_IRQENABLE1,
310 OMAP24XX_SLEEP_SAVE_GPIO4_IRQENABLE1,
311 OMAP24XX_SLEEP_SAVE_GPIO3_OE,
312 OMAP24XX_SLEEP_SAVE_GPIO4_OE,
313 OMAP24XX_SLEEP_SAVE_GPIO3_RISINGDETECT,
314 OMAP24XX_SLEEP_SAVE_GPIO3_FALLINGDETECT,
315 OMAP24XX_SLEEP_SAVE_CONTROL_PADCONF_SPI1_NCS2,
316 OMAP24XX_SLEEP_SAVE_CONTROL_PADCONF_MCBSP1_DX,
317 OMAP24XX_SLEEP_SAVE_CONTROL_PADCONF_SSI1_FLAG_TX,
318 OMAP24XX_SLEEP_SAVE_CONTROL_PADCONF_SYS_NIRQW0,
319 OMAP24XX_SLEEP_SAVE_SIZE
320};
321
257#endif /* ASSEMBLER */ 322#endif /* ASSEMBLER */
258#endif /* __ASM_ARCH_OMAP_PM_H */ 323#endif /* __ASM_ARCH_OMAP_PM_H */
diff --git a/include/asm-arm/arch-omap/prcm.h b/include/asm-arm/arch-omap/prcm.h
index 7b48a5cbb15f..7bcaf94bde9f 100644
--- a/include/asm-arm/arch-omap/prcm.h
+++ b/include/asm-arm/arch-omap/prcm.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * prcm.h - Access definations for use in OMAP24XX clock and power management 2 * linux/include/asm-arm/arch-omap/prcm.h
3 *
4 * Access definations for use in OMAP24XX clock and power management
3 * 5 *
4 * Copyright (C) 2005 Texas Instruments, Inc. 6 * Copyright (C) 2005 Texas Instruments, Inc.
5 * 7 *
@@ -21,405 +23,7 @@
21#ifndef __ASM_ARM_ARCH_DPM_PRCM_H 23#ifndef __ASM_ARM_ARCH_DPM_PRCM_H
22#define __ASM_ARM_ARCH_DPM_PRCM_H 24#define __ASM_ARM_ARCH_DPM_PRCM_H
23 25
24/* SET_PERFORMANCE_LEVEL PARAMETERS */ 26u32 omap_prcm_get_reset_sources(void);
25#define PRCM_HALF_SPEED 1
26#define PRCM_FULL_SPEED 2
27
28#ifndef __ASSEMBLER__
29
30#define PRCM_REG32(offset) __REG32(OMAP24XX_PRCM_BASE + (offset))
31
32#define PRCM_REVISION PRCM_REG32(0x000)
33#define PRCM_SYSCONFIG PRCM_REG32(0x010)
34#define PRCM_IRQSTATUS_MPU PRCM_REG32(0x018)
35#define PRCM_IRQENABLE_MPU PRCM_REG32(0x01C)
36#define PRCM_VOLTCTRL PRCM_REG32(0x050)
37#define PRCM_VOLTST PRCM_REG32(0x054)
38#define PRCM_CLKSRC_CTRL PRCM_REG32(0x060)
39#define PRCM_CLKOUT_CTRL PRCM_REG32(0x070)
40#define PRCM_CLKEMUL_CTRL PRCM_REG32(0x078)
41#define PRCM_CLKCFG_CTRL PRCM_REG32(0x080)
42#define PRCM_CLKCFG_STATUS PRCM_REG32(0x084)
43#define PRCM_VOLTSETUP PRCM_REG32(0x090)
44#define PRCM_CLKSSETUP PRCM_REG32(0x094)
45#define PRCM_POLCTRL PRCM_REG32(0x098)
46
47/* GENERAL PURPOSE */
48#define GENERAL_PURPOSE1 PRCM_REG32(0x0B0)
49#define GENERAL_PURPOSE2 PRCM_REG32(0x0B4)
50#define GENERAL_PURPOSE3 PRCM_REG32(0x0B8)
51#define GENERAL_PURPOSE4 PRCM_REG32(0x0BC)
52#define GENERAL_PURPOSE5 PRCM_REG32(0x0C0)
53#define GENERAL_PURPOSE6 PRCM_REG32(0x0C4)
54#define GENERAL_PURPOSE7 PRCM_REG32(0x0C8)
55#define GENERAL_PURPOSE8 PRCM_REG32(0x0CC)
56#define GENERAL_PURPOSE9 PRCM_REG32(0x0D0)
57#define GENERAL_PURPOSE10 PRCM_REG32(0x0D4)
58#define GENERAL_PURPOSE11 PRCM_REG32(0x0D8)
59#define GENERAL_PURPOSE12 PRCM_REG32(0x0DC)
60#define GENERAL_PURPOSE13 PRCM_REG32(0x0E0)
61#define GENERAL_PURPOSE14 PRCM_REG32(0x0E4)
62#define GENERAL_PURPOSE15 PRCM_REG32(0x0E8)
63#define GENERAL_PURPOSE16 PRCM_REG32(0x0EC)
64#define GENERAL_PURPOSE17 PRCM_REG32(0x0F0)
65#define GENERAL_PURPOSE18 PRCM_REG32(0x0F4)
66#define GENERAL_PURPOSE19 PRCM_REG32(0x0F8)
67#define GENERAL_PURPOSE20 PRCM_REG32(0x0FC)
68
69/* MPU */
70#define CM_CLKSEL_MPU PRCM_REG32(0x140)
71#define CM_CLKSTCTRL_MPU PRCM_REG32(0x148)
72#define RM_RSTST_MPU PRCM_REG32(0x158)
73#define PM_WKDEP_MPU PRCM_REG32(0x1C8)
74#define PM_EVGENCTRL_MPU PRCM_REG32(0x1D4)
75#define PM_EVEGENONTIM_MPU PRCM_REG32(0x1D8)
76#define PM_EVEGENOFFTIM_MPU PRCM_REG32(0x1DC)
77#define PM_PWSTCTRL_MPU PRCM_REG32(0x1E0)
78#define PM_PWSTST_MPU PRCM_REG32(0x1E4)
79
80/* CORE */
81#define CM_FCLKEN1_CORE PRCM_REG32(0x200)
82#define CM_FCLKEN2_CORE PRCM_REG32(0x204)
83#define CM_FCLKEN3_CORE PRCM_REG32(0x208)
84#define CM_ICLKEN1_CORE PRCM_REG32(0x210)
85#define CM_ICLKEN2_CORE PRCM_REG32(0x214)
86#define CM_ICLKEN3_CORE PRCM_REG32(0x218)
87#define CM_ICLKEN4_CORE PRCM_REG32(0x21C)
88#define CM_IDLEST1_CORE PRCM_REG32(0x220)
89#define CM_IDLEST2_CORE PRCM_REG32(0x224)
90#define CM_IDLEST3_CORE PRCM_REG32(0x228)
91#define CM_IDLEST4_CORE PRCM_REG32(0x22C)
92#define CM_AUTOIDLE1_CORE PRCM_REG32(0x230)
93#define CM_AUTOIDLE2_CORE PRCM_REG32(0x234)
94#define CM_AUTOIDLE3_CORE PRCM_REG32(0x238)
95#define CM_AUTOIDLE4_CORE PRCM_REG32(0x23C)
96#define CM_CLKSEL1_CORE PRCM_REG32(0x240)
97#define CM_CLKSEL2_CORE PRCM_REG32(0x244)
98#define CM_CLKSTCTRL_CORE PRCM_REG32(0x248)
99#define PM_WKEN1_CORE PRCM_REG32(0x2A0)
100#define PM_WKEN2_CORE PRCM_REG32(0x2A4)
101#define PM_WKST1_CORE PRCM_REG32(0x2B0)
102#define PM_WKST2_CORE PRCM_REG32(0x2B4)
103#define PM_WKDEP_CORE PRCM_REG32(0x2C8)
104#define PM_PWSTCTRL_CORE PRCM_REG32(0x2E0)
105#define PM_PWSTST_CORE PRCM_REG32(0x2E4)
106
107/* GFX */
108#define CM_FCLKEN_GFX PRCM_REG32(0x300)
109#define CM_ICLKEN_GFX PRCM_REG32(0x310)
110#define CM_IDLEST_GFX PRCM_REG32(0x320)
111#define CM_CLKSEL_GFX PRCM_REG32(0x340)
112#define CM_CLKSTCTRL_GFX PRCM_REG32(0x348)
113#define RM_RSTCTRL_GFX PRCM_REG32(0x350)
114#define RM_RSTST_GFX PRCM_REG32(0x358)
115#define PM_WKDEP_GFX PRCM_REG32(0x3C8)
116#define PM_PWSTCTRL_GFX PRCM_REG32(0x3E0)
117#define PM_PWSTST_GFX PRCM_REG32(0x3E4)
118
119/* WAKE-UP */
120#define CM_FCLKEN_WKUP PRCM_REG32(0x400)
121#define CM_ICLKEN_WKUP PRCM_REG32(0x410)
122#define CM_IDLEST_WKUP PRCM_REG32(0x420)
123#define CM_AUTOIDLE_WKUP PRCM_REG32(0x430)
124#define CM_CLKSEL_WKUP PRCM_REG32(0x440)
125#define RM_RSTCTRL_WKUP PRCM_REG32(0x450)
126#define RM_RSTTIME_WKUP PRCM_REG32(0x454)
127#define RM_RSTST_WKUP PRCM_REG32(0x458)
128#define PM_WKEN_WKUP PRCM_REG32(0x4A0)
129#define PM_WKST_WKUP PRCM_REG32(0x4B0)
130
131/* CLOCKS */
132#define CM_CLKEN_PLL PRCM_REG32(0x500)
133#define CM_IDLEST_CKGEN PRCM_REG32(0x520)
134#define CM_AUTOIDLE_PLL PRCM_REG32(0x530)
135#define CM_CLKSEL1_PLL PRCM_REG32(0x540)
136#define CM_CLKSEL2_PLL PRCM_REG32(0x544)
137
138/* DSP */
139#define CM_FCLKEN_DSP PRCM_REG32(0x800)
140#define CM_ICLKEN_DSP PRCM_REG32(0x810)
141#define CM_IDLEST_DSP PRCM_REG32(0x820)
142#define CM_AUTOIDLE_DSP PRCM_REG32(0x830)
143#define CM_CLKSEL_DSP PRCM_REG32(0x840)
144#define CM_CLKSTCTRL_DSP PRCM_REG32(0x848)
145#define RM_RSTCTRL_DSP PRCM_REG32(0x850)
146#define RM_RSTST_DSP PRCM_REG32(0x858)
147#define PM_WKEN_DSP PRCM_REG32(0x8A0)
148#define PM_WKDEP_DSP PRCM_REG32(0x8C8)
149#define PM_PWSTCTRL_DSP PRCM_REG32(0x8E0)
150#define PM_PWSTST_DSP PRCM_REG32(0x8E4)
151#define PRCM_IRQSTATUS_DSP PRCM_REG32(0x8F0)
152#define PRCM_IRQENABLE_DSP PRCM_REG32(0x8F4)
153
154/* IVA */
155#define PRCM_IRQSTATUS_IVA PRCM_REG32(0x8F8)
156#define PRCM_IRQENABLE_IVA PRCM_REG32(0x8FC)
157
158/* Modem on 2430 */
159#define CM_FCLKEN_MDM PRCM_REG32(0xC00)
160#define CM_ICLKEN_MDM PRCM_REG32(0xC10)
161#define CM_IDLEST_MDM PRCM_REG32(0xC20)
162#define CM_CLKSEL_MDM PRCM_REG32(0xC40)
163
164/* FIXME: Move to header for 2430 */
165#define DISP_BASE (OMAP24XX_L4_IO_BASE+0x50000)
166#define DISP_REG32(offset) __REG32(DISP_BASE + (offset))
167
168#define OMAP24XX_GPMC_BASE (L3_24XX_BASE + 0xa000)
169#define GPMC_BASE (OMAP24XX_GPMC_BASE)
170#define GPMC_REG32(offset) __REG32(GPMC_BASE + (offset))
171
172#define GPT1_BASE (OMAP24XX_GPT1)
173#define GPT1_REG32(offset) __REG32(GPT1_BASE + (offset))
174
175/* Misc sysconfig */
176#define DISPC_SYSCONFIG DISP_REG32(0x410)
177#define SPI_BASE (OMAP24XX_L4_IO_BASE+0x98000)
178#define MCSPI1_SYSCONFIG __REG32(SPI_BASE + 0x10)
179#define MCSPI2_SYSCONFIG __REG32(SPI_BASE+0x2000 + 0x10)
180
181//#define DSP_MMU_SYSCONFIG 0x5A000010
182#define CAMERA_MMU_SYSCONFIG __REG32(DISP_BASE+0x2C10)
183//#define IVA_MMU_SYSCONFIG 0x5D000010
184//#define DSP_DMA_SYSCONFIG 0x00FCC02C
185#define CAMERA_DMA_SYSCONFIG __REG32(DISP_BASE+0x282C)
186#define SYSTEM_DMA_SYSCONFIG __REG32(DISP_BASE+0x602C)
187#define GPMC_SYSCONFIG GPMC_REG32(0x010)
188#define MAILBOXES_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x94010)
189#define UART1_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6A054)
190#define UART2_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6C054)
191#define UART3_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6E054)
192//#define IVA_SYSCONFIG 0x5C060010
193#define SDRC_SYSCONFIG __REG32(OMAP24XX_SDRC_BASE+0x10)
194#define SMS_SYSCONFIG __REG32(OMAP24XX_SMS_BASE+0x10)
195#define SSI_SYSCONFIG __REG32(DISP_BASE+0x8010)
196//#define VLYNQ_SYSCONFIG 0x67FFFE10
197
198/* rkw - good cannidates for PM_ to start what nm was trying */
199#define OMAP24XX_GPT2 (OMAP24XX_L4_IO_BASE+0x2A000)
200#define OMAP24XX_GPT3 (OMAP24XX_L4_IO_BASE+0x78000)
201#define OMAP24XX_GPT4 (OMAP24XX_L4_IO_BASE+0x7A000)
202#define OMAP24XX_GPT5 (OMAP24XX_L4_IO_BASE+0x7C000)
203#define OMAP24XX_GPT6 (OMAP24XX_L4_IO_BASE+0x7E000)
204#define OMAP24XX_GPT7 (OMAP24XX_L4_IO_BASE+0x80000)
205#define OMAP24XX_GPT8 (OMAP24XX_L4_IO_BASE+0x82000)
206#define OMAP24XX_GPT9 (OMAP24XX_L4_IO_BASE+0x84000)
207#define OMAP24XX_GPT10 (OMAP24XX_L4_IO_BASE+0x86000)
208#define OMAP24XX_GPT11 (OMAP24XX_L4_IO_BASE+0x88000)
209#define OMAP24XX_GPT12 (OMAP24XX_L4_IO_BASE+0x8A000)
210
211#define GPTIMER1_SYSCONFIG GPT1_REG32(0x010)
212#define GPTIMER2_SYSCONFIG __REG32(OMAP24XX_GPT2 + 0x10)
213#define GPTIMER3_SYSCONFIG __REG32(OMAP24XX_GPT3 + 0x10)
214#define GPTIMER4_SYSCONFIG __REG32(OMAP24XX_GPT4 + 0x10)
215#define GPTIMER5_SYSCONFIG __REG32(OMAP24XX_GPT5 + 0x10)
216#define GPTIMER6_SYSCONFIG __REG32(OMAP24XX_GPT6 + 0x10)
217#define GPTIMER7_SYSCONFIG __REG32(OMAP24XX_GPT7 + 0x10)
218#define GPTIMER8_SYSCONFIG __REG32(OMAP24XX_GPT8 + 0x10)
219#define GPTIMER9_SYSCONFIG __REG32(OMAP24XX_GPT9 + 0x10)
220#define GPTIMER10_SYSCONFIG __REG32(OMAP24XX_GPT10 + 0x10)
221#define GPTIMER11_SYSCONFIG __REG32(OMAP24XX_GPT11 + 0x10)
222#define GPTIMER12_SYSCONFIG __REG32(OMAP24XX_GPT12 + 0x10)
223
224#define GPIOX_BASE(X) (OMAP24XX_GPIO_BASE+(0x2000*((X)-1)))
225
226#define GPIO1_SYSCONFIG __REG32((GPIOX_BASE(1)+0x10))
227#define GPIO2_SYSCONFIG __REG32((GPIOX_BASE(2)+0x10))
228#define GPIO3_SYSCONFIG __REG32((GPIOX_BASE(3)+0x10))
229#define GPIO4_SYSCONFIG __REG32((GPIOX_BASE(4)+0x10))
230
231/* GP TIMER 1 */
232#define GPTIMER1_TISTAT GPT1_REG32(0x014)
233#define GPTIMER1_TISR GPT1_REG32(0x018)
234#define GPTIMER1_TIER GPT1_REG32(0x01C)
235#define GPTIMER1_TWER GPT1_REG32(0x020)
236#define GPTIMER1_TCLR GPT1_REG32(0x024)
237#define GPTIMER1_TCRR GPT1_REG32(0x028)
238#define GPTIMER1_TLDR GPT1_REG32(0x02C)
239#define GPTIMER1_TTGR GPT1_REG32(0x030)
240#define GPTIMER1_TWPS GPT1_REG32(0x034)
241#define GPTIMER1_TMAR GPT1_REG32(0x038)
242#define GPTIMER1_TCAR1 GPT1_REG32(0x03C)
243#define GPTIMER1_TSICR GPT1_REG32(0x040)
244#define GPTIMER1_TCAR2 GPT1_REG32(0x044)
245
246/* rkw -- base fix up please... */
247#define GPTIMER3_TISR __REG32(OMAP24XX_L4_IO_BASE+0x78018)
248
249/* SDRC */
250#define SDRC_DLLA_CTRL __REG32(OMAP24XX_SDRC_BASE+0x060)
251#define SDRC_DLLA_STATUS __REG32(OMAP24XX_SDRC_BASE+0x064)
252#define SDRC_DLLB_CTRL __REG32(OMAP24XX_SDRC_BASE+0x068)
253#define SDRC_DLLB_STATUS __REG32(OMAP24XX_SDRC_BASE+0x06C)
254#define SDRC_POWER __REG32(OMAP24XX_SDRC_BASE+0x070)
255#define SDRC_MR_0 __REG32(OMAP24XX_SDRC_BASE+0x084)
256
257/* GPIO 1 */
258#define GPIO1_BASE GPIOX_BASE(1)
259#define GPIO1_REG32(offset) __REG32(GPIO1_BASE + (offset))
260#define GPIO1_IRQENABLE1 GPIO1_REG32(0x01C)
261#define GPIO1_IRQSTATUS1 GPIO1_REG32(0x018)
262#define GPIO1_IRQENABLE2 GPIO1_REG32(0x02C)
263#define GPIO1_IRQSTATUS2 GPIO1_REG32(0x028)
264#define GPIO1_WAKEUPENABLE GPIO1_REG32(0x020)
265#define GPIO1_RISINGDETECT GPIO1_REG32(0x048)
266#define GPIO1_DATAIN GPIO1_REG32(0x038)
267#define GPIO1_OE GPIO1_REG32(0x034)
268#define GPIO1_DATAOUT GPIO1_REG32(0x03C)
269
270/* GPIO2 */
271#define GPIO2_BASE GPIOX_BASE(2)
272#define GPIO2_REG32(offset) __REG32(GPIO2_BASE + (offset))
273#define GPIO2_IRQENABLE1 GPIO2_REG32(0x01C)
274#define GPIO2_IRQSTATUS1 GPIO2_REG32(0x018)
275#define GPIO2_IRQENABLE2 GPIO2_REG32(0x02C)
276#define GPIO2_IRQSTATUS2 GPIO2_REG32(0x028)
277#define GPIO2_WAKEUPENABLE GPIO2_REG32(0x020)
278#define GPIO2_RISINGDETECT GPIO2_REG32(0x048)
279#define GPIO2_DATAIN GPIO2_REG32(0x038)
280#define GPIO2_OE GPIO2_REG32(0x034)
281#define GPIO2_DATAOUT GPIO2_REG32(0x03C)
282
283/* GPIO 3 */
284#define GPIO3_BASE GPIOX_BASE(3)
285#define GPIO3_REG32(offset) __REG32(GPIO3_BASE + (offset))
286#define GPIO3_IRQENABLE1 GPIO3_REG32(0x01C)
287#define GPIO3_IRQSTATUS1 GPIO3_REG32(0x018)
288#define GPIO3_IRQENABLE2 GPIO3_REG32(0x02C)
289#define GPIO3_IRQSTATUS2 GPIO3_REG32(0x028)
290#define GPIO3_WAKEUPENABLE GPIO3_REG32(0x020)
291#define GPIO3_RISINGDETECT GPIO3_REG32(0x048)
292#define GPIO3_FALLINGDETECT GPIO3_REG32(0x04C)
293#define GPIO3_DATAIN GPIO3_REG32(0x038)
294#define GPIO3_OE GPIO3_REG32(0x034)
295#define GPIO3_DATAOUT GPIO3_REG32(0x03C)
296#define GPIO3_DEBOUNCENABLE GPIO3_REG32(0x050)
297#define GPIO3_DEBOUNCINGTIME GPIO3_REG32(0x054)
298
299/* GPIO 4 */
300#define GPIO4_BASE GPIOX_BASE(4)
301#define GPIO4_REG32(offset) __REG32(GPIO4_BASE + (offset))
302#define GPIO4_IRQENABLE1 GPIO4_REG32(0x01C)
303#define GPIO4_IRQSTATUS1 GPIO4_REG32(0x018)
304#define GPIO4_IRQENABLE2 GPIO4_REG32(0x02C)
305#define GPIO4_IRQSTATUS2 GPIO4_REG32(0x028)
306#define GPIO4_WAKEUPENABLE GPIO4_REG32(0x020)
307#define GPIO4_RISINGDETECT GPIO4_REG32(0x048)
308#define GPIO4_FALLINGDETECT GPIO4_REG32(0x04C)
309#define GPIO4_DATAIN GPIO4_REG32(0x038)
310#define GPIO4_OE GPIO4_REG32(0x034)
311#define GPIO4_DATAOUT GPIO4_REG32(0x03C)
312#define GPIO4_DEBOUNCENABLE GPIO4_REG32(0x050)
313#define GPIO4_DEBOUNCINGTIME GPIO4_REG32(0x054)
314
315
316/* IO CONFIG */
317#define CONTROL_BASE (OMAP24XX_CTRL_BASE)
318#define CONTROL_REG32(offset) __REG32(CONTROL_BASE + (offset))
319
320#define CONTROL_PADCONF_SPI1_NCS2 CONTROL_REG32(0x104)
321#define CONTROL_PADCONF_SYS_XTALOUT CONTROL_REG32(0x134)
322#define CONTROL_PADCONF_UART1_RX CONTROL_REG32(0x0C8)
323#define CONTROL_PADCONF_MCBSP1_DX CONTROL_REG32(0x10C)
324#define CONTROL_PADCONF_GPMC_NCS4 CONTROL_REG32(0x090)
325#define CONTROL_PADCONF_DSS_D5 CONTROL_REG32(0x0B8)
326#define CONTROL_PADCONF_DSS_D9 CONTROL_REG32(0x0BC)
327#define CONTROL_PADCONF_DSS_D13 CONTROL_REG32(0x0C0)
328#define CONTROL_PADCONF_DSS_VSYNC CONTROL_REG32(0x0CC)
329
330/* CONTROL */
331#define CONTROL_DEVCONF CONTROL_REG32(0x274)
332
333/* INTERRUPT CONTROLLER */
334#define INTC_BASE (OMAP24XX_L4_IO_BASE+0xfe000)
335#define INTC_REG32(offset) __REG32(INTC_BASE + (offset))
336
337#define INTC1_U_BASE INTC_REG32(0x000)
338#define INTC_MIR0 INTC_REG32(0x084)
339#define INTC_MIR_SET0 INTC_REG32(0x08C)
340#define INTC_MIR_CLEAR0 INTC_REG32(0x088)
341#define INTC_ISR_CLEAR0 INTC_REG32(0x094)
342#define INTC_MIR1 INTC_REG32(0x0A4)
343#define INTC_MIR_SET1 INTC_REG32(0x0AC)
344#define INTC_MIR_CLEAR1 INTC_REG32(0x0A8)
345#define INTC_ISR_CLEAR1 INTC_REG32(0x0B4)
346#define INTC_MIR2 INTC_REG32(0x0C4)
347#define INTC_MIR_SET2 INTC_REG32(0x0CC)
348#define INTC_MIR_CLEAR2 INTC_REG32(0x0C8)
349#define INTC_ISR_CLEAR2 INTC_REG32(0x0D4)
350#define INTC_SIR_IRQ INTC_REG32(0x040)
351#define INTC_CONTROL INTC_REG32(0x048)
352#define INTC_ILR11 INTC_REG32(0x12C)
353#define INTC_ILR32 INTC_REG32(0x180)
354#define INTC_ILR37 INTC_REG32(0x194)
355#define INTC_SYSCONFIG INTC_REG32(0x010)
356
357/* RAM FIREWALL */
358#define RAMFW_BASE (0x68005000)
359#define RAMFW_REG32(offset) __REG32(RAMFW_BASE + (offset))
360
361#define RAMFW_REQINFOPERM0 RAMFW_REG32(0x048)
362#define RAMFW_READPERM0 RAMFW_REG32(0x050)
363#define RAMFW_WRITEPERM0 RAMFW_REG32(0x058)
364
365/* GPMC CS1 FPGA ON USER INTERFACE MODULE */
366//#define DEBUG_BOARD_LED_REGISTER 0x04000014
367
368/* GPMC CS0 */
369#define GPMC_CONFIG1_0 GPMC_REG32(0x060)
370#define GPMC_CONFIG2_0 GPMC_REG32(0x064)
371#define GPMC_CONFIG3_0 GPMC_REG32(0x068)
372#define GPMC_CONFIG4_0 GPMC_REG32(0x06C)
373#define GPMC_CONFIG5_0 GPMC_REG32(0x070)
374#define GPMC_CONFIG6_0 GPMC_REG32(0x074)
375#define GPMC_CONFIG7_0 GPMC_REG32(0x078)
376
377/* GPMC CS1 */
378#define GPMC_CONFIG1_1 GPMC_REG32(0x090)
379#define GPMC_CONFIG2_1 GPMC_REG32(0x094)
380#define GPMC_CONFIG3_1 GPMC_REG32(0x098)
381#define GPMC_CONFIG4_1 GPMC_REG32(0x09C)
382#define GPMC_CONFIG5_1 GPMC_REG32(0x0a0)
383#define GPMC_CONFIG6_1 GPMC_REG32(0x0a4)
384#define GPMC_CONFIG7_1 GPMC_REG32(0x0a8)
385
386/* DSS */
387#define DSS_CONTROL DISP_REG32(0x040)
388#define DISPC_CONTROL DISP_REG32(0x440)
389#define DISPC_SYSSTATUS DISP_REG32(0x414)
390#define DISPC_IRQSTATUS DISP_REG32(0x418)
391#define DISPC_IRQENABLE DISP_REG32(0x41C)
392#define DISPC_CONFIG DISP_REG32(0x444)
393#define DISPC_DEFAULT_COLOR0 DISP_REG32(0x44C)
394#define DISPC_DEFAULT_COLOR1 DISP_REG32(0x450)
395#define DISPC_TRANS_COLOR0 DISP_REG32(0x454)
396#define DISPC_TRANS_COLOR1 DISP_REG32(0x458)
397#define DISPC_LINE_NUMBER DISP_REG32(0x460)
398#define DISPC_TIMING_H DISP_REG32(0x464)
399#define DISPC_TIMING_V DISP_REG32(0x468)
400#define DISPC_POL_FREQ DISP_REG32(0x46C)
401#define DISPC_DIVISOR DISP_REG32(0x470)
402#define DISPC_SIZE_DIG DISP_REG32(0x478)
403#define DISPC_SIZE_LCD DISP_REG32(0x47C)
404#define DISPC_GFX_BA0 DISP_REG32(0x480)
405#define DISPC_GFX_BA1 DISP_REG32(0x484)
406#define DISPC_GFX_POSITION DISP_REG32(0x488)
407#define DISPC_GFX_SIZE DISP_REG32(0x48C)
408#define DISPC_GFX_ATTRIBUTES DISP_REG32(0x4A0)
409#define DISPC_GFX_FIFO_THRESHOLD DISP_REG32(0x4A4)
410#define DISPC_GFX_ROW_INC DISP_REG32(0x4AC)
411#define DISPC_GFX_PIXEL_INC DISP_REG32(0x4B0)
412#define DISPC_GFX_WINDOW_SKIP DISP_REG32(0x4B4)
413#define DISPC_GFX_TABLE_BA DISP_REG32(0x4B8)
414#define DISPC_DATA_CYCLE1 DISP_REG32(0x5D4)
415#define DISPC_DATA_CYCLE2 DISP_REG32(0x5D8)
416#define DISPC_DATA_CYCLE3 DISP_REG32(0x5DC)
417
418/* Wake up define for board */
419#define GPIO97 (1 << 1)
420#define GPIO88 (1 << 24)
421
422#endif /* __ASSEMBLER__ */
423 27
424#endif 28#endif
425 29
diff --git a/include/asm-arm/arch-omap/sram.h b/include/asm-arm/arch-omap/sram.h
index e72ccbf0fe06..6fc0dd57b7c3 100644
--- a/include/asm-arm/arch-omap/sram.h
+++ b/include/asm-arm/arch-omap/sram.h
@@ -20,6 +20,8 @@ extern void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val,
20 u32 mem_type); 20 u32 mem_type);
21extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass); 21extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
22 22
23extern unsigned long omap_fb_sram_start;
24extern unsigned long omap_fb_sram_size;
23 25
24/* Do not use these */ 26/* Do not use these */
25extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl); 27extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
diff --git a/include/asm-arm/arch-omap/system.h b/include/asm-arm/arch-omap/system.h
index 6724a81bd10b..67970d1a2020 100644
--- a/include/asm-arm/arch-omap/system.h
+++ b/include/asm-arm/arch-omap/system.h
@@ -9,12 +9,13 @@
9 9
10#include <asm/mach-types.h> 10#include <asm/mach-types.h>
11#include <asm/hardware.h> 11#include <asm/hardware.h>
12#include <asm/arch/prcm.h>
13 12
14#ifndef CONFIG_MACH_VOICEBLUE 13#ifndef CONFIG_MACH_VOICEBLUE
15#define voiceblue_reset() do {} while (0) 14#define voiceblue_reset() do {} while (0)
16#endif 15#endif
17 16
17extern void omap_prcm_arch_reset(char mode);
18
18static inline void arch_idle(void) 19static inline void arch_idle(void)
19{ 20{
20 cpu_do_idle(); 21 cpu_do_idle();
@@ -38,24 +39,12 @@ static inline void omap1_arch_reset(char mode)
38 omap_writew(1, ARM_RSTCT1); 39 omap_writew(1, ARM_RSTCT1);
39} 40}
40 41
41static inline void omap2_arch_reset(char mode)
42{
43 u32 rate;
44 struct clk *vclk, *sclk;
45
46 vclk = clk_get(NULL, "virt_prcm_set");
47 sclk = clk_get(NULL, "sys_ck");
48 rate = clk_get_rate(sclk);
49 clk_set_rate(vclk, rate); /* go to bypass for OMAP limitation */
50 RM_RSTCTRL_WKUP |= 2;
51}
52
53static inline void arch_reset(char mode) 42static inline void arch_reset(char mode)
54{ 43{
55 if (!cpu_is_omap24xx()) 44 if (!cpu_is_omap24xx())
56 omap1_arch_reset(mode); 45 omap1_arch_reset(mode);
57 else 46 else
58 omap2_arch_reset(mode); 47 omap_prcm_arch_reset(mode);
59} 48}
60 49
61#endif 50#endif
diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h
index 1409c5bd703f..c8f53a71c076 100644
--- a/include/asm-arm/arch-pxa/pxa-regs.h
+++ b/include/asm-arm/arch-pxa/pxa-regs.h
@@ -485,7 +485,7 @@
485#define SACR1_ENLBF (1 << 5) /* Enable Loopback */ 485#define SACR1_ENLBF (1 << 5) /* Enable Loopback */
486#define SACR1_DRPL (1 << 4) /* Disable Replaying Function */ 486#define SACR1_DRPL (1 << 4) /* Disable Replaying Function */
487#define SACR1_DREC (1 << 3) /* Disable Recording Function */ 487#define SACR1_DREC (1 << 3) /* Disable Recording Function */
488#define SACR1_AMSL (1 << 1) /* Specify Alternate Mode */ 488#define SACR1_AMSL (1 << 0) /* Specify Alternate Mode */
489 489
490#define SASR0_I2SOFF (1 << 7) /* Controller Status */ 490#define SASR0_I2SOFF (1 << 7) /* Controller Status */
491#define SASR0_ROR (1 << 6) /* Rx FIFO Overrun */ 491#define SASR0_ROR (1 << 6) /* Rx FIFO Overrun */
diff --git a/include/asm-arm/arch-pxa/sharpsl.h b/include/asm-arm/arch-pxa/sharpsl.h
index 0b43495d24b4..94cb4982af82 100644
--- a/include/asm-arm/arch-pxa/sharpsl.h
+++ b/include/asm-arm/arch-pxa/sharpsl.h
@@ -27,6 +27,8 @@ struct corgits_machinfo {
27 */ 27 */
28struct corgibl_machinfo { 28struct corgibl_machinfo {
29 int max_intensity; 29 int max_intensity;
30 int default_intensity;
31 int limit_mask;
30 void (*set_bl_intensity)(int intensity); 32 void (*set_bl_intensity)(int intensity);
31}; 33};
32extern void corgibl_limit_intensity(int limit); 34extern void corgibl_limit_intensity(int limit);
diff --git a/include/asm-arm/arch-s3c2410/entry-macro.S b/include/asm-arm/arch-s3c2410/entry-macro.S
index cc06b1bd37b2..894c35cf3b1e 100644
--- a/include/asm-arm/arch-s3c2410/entry-macro.S
+++ b/include/asm-arm/arch-s3c2410/entry-macro.S
@@ -6,116 +6,83 @@
6 * This file is licensed under the terms of the GNU General Public 6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any 7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied. 8 * warranty of any kind, whether express or implied.
9*/
10
11/* We have a problem that the INTOFFSET register does not always
12 * show one interrupt. Occasionally we get two interrupts through
13 * the prioritiser, and this causes the INTOFFSET register to show
14 * what looks like the logical-or of the two interrupt numbers.
15 *
16 * Thanks to Klaus, Shannon, et al for helping to debug this problem
17*/
18
19#define INTPND (0x10)
20#define INTOFFSET (0x14)
21#define EXTINTPEND (0xa8)
22#define EXTINTMASK (0xa4)
9 23
10 * Modifications:
11 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
12 */
13#include <asm/hardware.h> 24#include <asm/hardware.h>
14#include <asm/arch/irqs.h> 25#include <asm/arch/irqs.h>
15 26
16
17 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp 27 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
18 28
19 mov \tmp, #S3C24XX_VA_IRQ 29 mov \base, #S3C24XX_VA_IRQ
20 ldr \irqnr, [ \tmp, #0x14 ] @ get irq no 30
2130000: 31 ldr \irqstat, [ \base, #INTPND]
22 teq \irqnr, #4 32 bics \irqnr, \irqstat, #3<<4 @@ only an GPIO IRQ
23 teqne \irqnr, #5 33 beq 2000f
24 beq 1002f @ external irq reg 34
25 35 @@ try the interrupt offset register, since it is there
26 @ debug check to see if interrupt reported is the same 36
27 @ as the offset.... 37 ldr \irqnr, [ \base, #INTOFFSET ]
28 38 mov \tmp, #1
29 teq \irqnr, #0 39 tst \irqstat, \tmp, lsl \irqnr
30 beq 20002f 40 addne \irqnr, \irqnr, #IRQ_EINT0
31 ldr \irqstat, [ \tmp, #0x10 ] @ INTPND 41 bne 1001f
32 mov \irqstat, \irqstat, lsr \irqnr 42
33 tst \irqstat, #1 43 @@ the number specified is not a valid irq, so try
34 bne 20002f 44 @@ and work it out for ourselves
35 45
36 /* debug/warning if we get an invalud response from the 46 mov \irqnr, #IRQ_EINT0 @@ start here
37 * INTOFFSET register */ 47 b 3000f
38#if 1 48
39 stmfd r13!, { r0 - r4 , r8-r12, r14 } 492000:
40 ldr r1, [ \tmp, #0x14 ] @ INTOFFSET 50 @@ load the GPIO interrupt register, and check it
41 ldr r2, [ \tmp, #0x10 ] @ INTPND 51
42 ldr r3, [ \tmp, #0x00 ] @ SRCPND 52 add \tmp, \base, #S3C24XX_VA_GPIO - S3C24XX_VA_IRQ
43 adr r0, 20003f 53 ldr \irqstat, [ \tmp, # EXTINTPEND ]
44 bl printk 54 ldr \irqnr, [ \tmp, # EXTINTMASK ]
45 b 20004f 55 bics \irqstat, \irqstat, \irqnr
46 56 beq 1001f
4720003: 57
48 .ascii "<7>irq: err - bad offset %d, intpnd=%08x, srcpnd=%08x\n" 58 mov \irqnr, #(IRQ_EINT4 - 4)
49 .byte 0 59
50 .align 4 60 @@ work out which irq (if any) we got
5120004: 613000:
52 mov r1, #1 62 movs \tmp, \irqstat, lsl#16
53 mov \tmp, #S3C24XX_VA_IRQ 63 addeq \irqnr, \irqnr, #16
54 ldmfd r13!, { r0 - r4 , r8-r12, r14 } 64 moveq \irqstat, \irqstat, lsr#16
55#endif 65 tst \irqstat, #0xff
56 66 addeq \irqnr, \irqnr, #8
57 @ try working out interrupt number for ourselves 67 moveq \irqstat, \irqstat, lsr#8
58 mov \irqnr, #0 68 tst \irqstat, #0xf
59 ldr \irqstat, [ \tmp, #0x10 ] @ INTPND 69 addeq \irqnr, \irqnr, #4
6010021: 70 moveq \irqstat, \irqstat, lsr#4
61 movs \irqstat, \irqstat, lsr#1 71 tst \irqstat, #0x3
62 bcs 30000b @ try and re-start the proccess 72 addeq \irqnr, \irqnr, #2
63 add \irqnr, \irqnr, #1 73 moveq \irqstat, \irqstat, lsr#2
64 cmp \irqnr, #32 74 tst \irqstat, #0x1
65 ble 10021b 75 addeq \irqnr, \irqnr, #1
66 76
67 @ found no interrupt, set Z flag and leave 77 @@ we have the value
68 movs \irqnr, #0 78 movs \irqnr, \irqnr
69 b 1001f 79
70
7120005:
7220002: @ exit
73 @ we base the s3c2410x interrupts at 16 and above to allow
74 @ isa peripherals to have their standard interrupts, also
75 @ ensure that Z flag is un-set on exit
76
77 @ note, we cannot be sure if we get IRQ_EINT0 (0) that
78 @ there is simply no interrupt pending, so in all other
79 @ cases we jump to say we have found something, otherwise
80 @ we check to see if the interrupt really is assrted
81 adds \irqnr, \irqnr, #IRQ_EINT0
82 teq \irqnr, #IRQ_EINT0
83 bne 1001f @ exit
84 ldr \irqstat, [ \tmp, #0x10 ] @ INTPND
85 teq \irqstat, #0
86 moveq \irqnr, #0
87 b 1001f
88
89 @ we get here from no main or external interrupts pending
901002:
91 add \tmp, \tmp, #S3C24XX_VA_GPIO - S3C24XX_VA_IRQ
92 ldr \irqstat, [ \tmp, # 0xa8 ] @ EXTINTPEND
93 ldr \irqnr, [ \tmp, # 0xa4 ] @ EXTINTMASK
94
95 bic \irqstat, \irqstat, \irqnr @ clear masked irqs
96
97 mov \irqnr, #IRQ_EINT4 @ start extint nos
98 mov \irqstat, \irqstat, lsr#4 @ ignore bottom 4 bits
9910021:
100 movs \irqstat, \irqstat, lsr#1
101 bcs 1004f
102 add \irqnr, \irqnr, #1
103 cmp \irqnr, #IRQ_EINT23
104 ble 10021b
105
106 @ found no interrupt, set Z flag and leave
107 movs \irqnr, #0
108
1091004: @ ensure Z flag clear in case our MOVS shifted out the last bit
110 teq \irqnr, #0
1111001: 801001:
112 @ exit irq routine 81 @@ exit here, Z flag unset if IRQ
113 .endm
114 82
83 .endm
115 84
116 /* currently don't need an disable_fiq macro */ 85 /* currently don't need an disable_fiq macro */
117 86
118 .macro disable_fiq 87 .macro disable_fiq
119 .endm 88 .endm
120
121
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index e3e8541ee63b..63ca7412a462 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -47,7 +47,7 @@ static inline int dma_get_cache_alignment(void)
47 47
48static inline int dma_is_consistent(dma_addr_t handle) 48static inline int dma_is_consistent(dma_addr_t handle)
49{ 49{
50 return 0; 50 return !!arch_is_coherent();
51} 51}
52 52
53/* 53/*
@@ -145,7 +145,9 @@ static inline dma_addr_t
145dma_map_single(struct device *dev, void *cpu_addr, size_t size, 145dma_map_single(struct device *dev, void *cpu_addr, size_t size,
146 enum dma_data_direction dir) 146 enum dma_data_direction dir)
147{ 147{
148 consistent_sync(cpu_addr, size, dir); 148 if (!arch_is_coherent())
149 consistent_sync(cpu_addr, size, dir);
150
149 return virt_to_dma(dev, (unsigned long)cpu_addr); 151 return virt_to_dma(dev, (unsigned long)cpu_addr);
150} 152}
151#else 153#else
@@ -255,7 +257,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
255 257
256 sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; 258 sg->dma_address = page_to_dma(dev, sg->page) + sg->offset;
257 virt = page_address(sg->page) + sg->offset; 259 virt = page_address(sg->page) + sg->offset;
258 consistent_sync(virt, sg->length, dir); 260
261 if (!arch_is_coherent())
262 consistent_sync(virt, sg->length, dir);
259 } 263 }
260 264
261 return nents; 265 return nents;
@@ -310,14 +314,16 @@ static inline void
310dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, 314dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
311 enum dma_data_direction dir) 315 enum dma_data_direction dir)
312{ 316{
313 consistent_sync((void *)dma_to_virt(dev, handle), size, dir); 317 if (!arch_is_coherent())
318 consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
314} 319}
315 320
316static inline void 321static inline void
317dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, 322dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
318 enum dma_data_direction dir) 323 enum dma_data_direction dir)
319{ 324{
320 consistent_sync((void *)dma_to_virt(dev, handle), size, dir); 325 if (!arch_is_coherent())
326 consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
321} 327}
322#else 328#else
323extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); 329extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
@@ -347,7 +353,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
347 353
348 for (i = 0; i < nents; i++, sg++) { 354 for (i = 0; i < nents; i++, sg++) {
349 char *virt = page_address(sg->page) + sg->offset; 355 char *virt = page_address(sg->page) + sg->offset;
350 consistent_sync(virt, sg->length, dir); 356 if (!arch_is_coherent())
357 consistent_sync(virt, sg->length, dir);
351 } 358 }
352} 359}
353 360
@@ -359,7 +366,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
359 366
360 for (i = 0; i < nents; i++, sg++) { 367 for (i = 0; i < nents; i++, sg++) {
361 char *virt = page_address(sg->page) + sg->offset; 368 char *virt = page_address(sg->page) + sg->offset;
362 consistent_sync(virt, sg->length, dir); 369 if (!arch_is_coherent())
370 consistent_sync(virt, sg->length, dir);
363 } 371 }
364} 372}
365#else 373#else
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index afa5c3ea077c..2b3cf69b3ed9 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -234,6 +234,14 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
234#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr)) 234#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
235#endif 235#endif
236 236
237/*
238 * Optional coherency support. Currently used only by selected
239 * Intel XSC3-based systems.
240 */
241#ifndef arch_is_coherent
242#define arch_is_coherent() 0
243#endif
244
237#endif 245#endif
238 246
239#include <asm-generic/memory_model.h> 247#include <asm-generic/memory_model.h>
diff --git a/include/asm-arm/pgtable-hwdef.h b/include/asm-arm/pgtable-hwdef.h
index 1d033495cc75..1bc1f997bda2 100644
--- a/include/asm-arm/pgtable-hwdef.h
+++ b/include/asm-arm/pgtable-hwdef.h
@@ -73,6 +73,7 @@
73#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) 73#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
74#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ 74#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
75#define PTE_EXT_APX (1 << 9) /* v6 */ 75#define PTE_EXT_APX (1 << 9) /* v6 */
76#define PTE_EXT_COHERENT (1 << 9) /* XScale3 */
76#define PTE_EXT_SHARED (1 << 10) /* v6 */ 77#define PTE_EXT_SHARED (1 << 10) /* v6 */
77#define PTE_EXT_NG (1 << 11) /* v6 */ 78#define PTE_EXT_NG (1 << 11) /* v6 */
78 79
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index e595ae24efe2..e85c08d78dda 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -156,6 +156,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
156#define L_PTE_WRITE (1 << 5) 156#define L_PTE_WRITE (1 << 5)
157#define L_PTE_EXEC (1 << 6) 157#define L_PTE_EXEC (1 << 6)
158#define L_PTE_DIRTY (1 << 7) 158#define L_PTE_DIRTY (1 << 7)
159#define L_PTE_COHERENT (1 << 9) /* I/O coherent (xsc3) */
159#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */ 160#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */
160#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */ 161#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */
161 162
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 8f331bbd39a8..65ac305c2d45 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -308,8 +308,6 @@
308#define __NR_mq_notify (__NR_SYSCALL_BASE+278) 308#define __NR_mq_notify (__NR_SYSCALL_BASE+278)
309#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279) 309#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279)
310#define __NR_waitid (__NR_SYSCALL_BASE+280) 310#define __NR_waitid (__NR_SYSCALL_BASE+280)
311
312#if defined(__ARM_EABI__) /* reserve these for un-muxing socketcall */
313#define __NR_socket (__NR_SYSCALL_BASE+281) 311#define __NR_socket (__NR_SYSCALL_BASE+281)
314#define __NR_bind (__NR_SYSCALL_BASE+282) 312#define __NR_bind (__NR_SYSCALL_BASE+282)
315#define __NR_connect (__NR_SYSCALL_BASE+283) 313#define __NR_connect (__NR_SYSCALL_BASE+283)
@@ -327,9 +325,6 @@
327#define __NR_getsockopt (__NR_SYSCALL_BASE+295) 325#define __NR_getsockopt (__NR_SYSCALL_BASE+295)
328#define __NR_sendmsg (__NR_SYSCALL_BASE+296) 326#define __NR_sendmsg (__NR_SYSCALL_BASE+296)
329#define __NR_recvmsg (__NR_SYSCALL_BASE+297) 327#define __NR_recvmsg (__NR_SYSCALL_BASE+297)
330#endif
331
332#if defined(__ARM_EABI__) /* reserve these for un-muxing ipc */
333#define __NR_semop (__NR_SYSCALL_BASE+298) 328#define __NR_semop (__NR_SYSCALL_BASE+298)
334#define __NR_semget (__NR_SYSCALL_BASE+299) 329#define __NR_semget (__NR_SYSCALL_BASE+299)
335#define __NR_semctl (__NR_SYSCALL_BASE+300) 330#define __NR_semctl (__NR_SYSCALL_BASE+300)
@@ -341,16 +336,10 @@
341#define __NR_shmdt (__NR_SYSCALL_BASE+306) 336#define __NR_shmdt (__NR_SYSCALL_BASE+306)
342#define __NR_shmget (__NR_SYSCALL_BASE+307) 337#define __NR_shmget (__NR_SYSCALL_BASE+307)
343#define __NR_shmctl (__NR_SYSCALL_BASE+308) 338#define __NR_shmctl (__NR_SYSCALL_BASE+308)
344#endif
345
346#define __NR_add_key (__NR_SYSCALL_BASE+309) 339#define __NR_add_key (__NR_SYSCALL_BASE+309)
347#define __NR_request_key (__NR_SYSCALL_BASE+310) 340#define __NR_request_key (__NR_SYSCALL_BASE+310)
348#define __NR_keyctl (__NR_SYSCALL_BASE+311) 341#define __NR_keyctl (__NR_SYSCALL_BASE+311)
349
350#if defined(__ARM_EABI__) /* reserved for un-muxing ipc */
351#define __NR_semtimedop (__NR_SYSCALL_BASE+312) 342#define __NR_semtimedop (__NR_SYSCALL_BASE+312)
352#endif
353
354#define __NR_vserver (__NR_SYSCALL_BASE+313) 343#define __NR_vserver (__NR_SYSCALL_BASE+313)
355#define __NR_ioprio_set (__NR_SYSCALL_BASE+314) 344#define __NR_ioprio_set (__NR_SYSCALL_BASE+314)
356#define __NR_ioprio_get (__NR_SYSCALL_BASE+315) 345#define __NR_ioprio_get (__NR_SYSCALL_BASE+315)
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index de4614840c2c..9291c24f5819 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -7,8 +7,15 @@
7#include <asm/atomic.h> 7#include <asm/atomic.h>
8#include <asm/types.h> 8#include <asm/types.h>
9 9
10/* An unsigned long type for operations which are atomic for a single 10/*
11 * CPU. Usually used in combination with per-cpu variables. */ 11 * A signed long type for operations which are atomic for a single CPU.
12 * Usually used in combination with per-cpu variables.
13 *
14 * This is the default implementation, which uses atomic_long_t. Which is
15 * rather pointless. The whole point behind local_t is that some processors
16 * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
17 * running on this CPU. local_t allows exploitation of such capabilities.
18 */
12 19
13/* Implement in terms of atomics. */ 20/* Implement in terms of atomics. */
14 21
@@ -20,7 +27,7 @@ typedef struct
20 27
21#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } 28#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
22 29
23#define local_read(l) ((unsigned long)atomic_long_read(&(l)->a)) 30#define local_read(l) atomic_long_read(&(l)->a)
24#define local_set(l,i) atomic_long_set((&(l)->a),(i)) 31#define local_set(l,i) atomic_long_set((&(l)->a),(i))
25#define local_inc(l) atomic_long_inc(&(l)->a) 32#define local_inc(l) atomic_long_inc(&(l)->a)
26#define local_dec(l) atomic_long_dec(&(l)->a) 33#define local_dec(l) atomic_long_dec(&(l)->a)
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index 40c6d1f86598..29c6ac34e236 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -17,13 +17,14 @@
17 * it wasn't 1 originally. This function MUST leave the value lower than 17 * it wasn't 1 originally. This function MUST leave the value lower than
18 * 1 even when the "1" assertion wasn't true. 18 * 1 even when the "1" assertion wasn't true.
19 */ 19 */
20#define __mutex_fastpath_lock(count, fail_fn) \ 20static inline void
21do { \ 21__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
22 if (unlikely(atomic_dec_return(count) < 0)) \ 22{
23 fail_fn(count); \ 23 if (unlikely(atomic_dec_return(count) < 0))
24 else \ 24 fail_fn(count);
25 smp_mb(); \ 25 else
26} while (0) 26 smp_mb();
27}
27 28
28/** 29/**
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 30 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -36,7 +37,7 @@ do { \
36 * or anything the slow path function returns. 37 * or anything the slow path function returns.
37 */ 38 */
38static inline int 39static inline int
39__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 40__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
40{ 41{
41 if (unlikely(atomic_dec_return(count) < 0)) 42 if (unlikely(atomic_dec_return(count) < 0))
42 return fail_fn(count); 43 return fail_fn(count);
@@ -59,12 +60,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
59 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs 60 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
60 * to return 0 otherwise. 61 * to return 0 otherwise.
61 */ 62 */
62#define __mutex_fastpath_unlock(count, fail_fn) \ 63static inline void
63do { \ 64__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
64 smp_mb(); \ 65{
65 if (unlikely(atomic_inc_return(count) <= 0)) \ 66 smp_mb();
66 fail_fn(count); \ 67 if (unlikely(atomic_inc_return(count) <= 0))
67} while (0) 68 fail_fn(count);
69}
68 70
69#define __mutex_slowpath_needs_to_unlock() 1 71#define __mutex_slowpath_needs_to_unlock() 1
70 72
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 1d24f47e6c48..32a2100c1aeb 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Generic implementation of the mutex fastpath, based on xchg(). 4 * Generic implementation of the mutex fastpath, based on xchg().
5 * 5 *
6 * NOTE: An xchg based implementation is less optimal than an atomic 6 * NOTE: An xchg based implementation might be less optimal than an atomic
7 * decrement/increment based implementation. If your architecture 7 * decrement/increment based implementation. If your architecture
8 * has a reasonable atomic dec/inc then you should probably use 8 * has a reasonable atomic dec/inc then you should probably use
9 * asm-generic/mutex-dec.h instead, or you could open-code an 9 * asm-generic/mutex-dec.h instead, or you could open-code an
@@ -22,14 +22,14 @@
22 * wasn't 1 originally. This function MUST leave the value lower than 1 22 * wasn't 1 originally. This function MUST leave the value lower than 1
23 * even when the "1" assertion wasn't true. 23 * even when the "1" assertion wasn't true.
24 */ 24 */
25#define __mutex_fastpath_lock(count, fail_fn) \ 25static inline void
26do { \ 26__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
27 if (unlikely(atomic_xchg(count, 0) != 1)) \ 27{
28 fail_fn(count); \ 28 if (unlikely(atomic_xchg(count, 0) != 1))
29 else \ 29 fail_fn(count);
30 smp_mb(); \ 30 else
31} while (0) 31 smp_mb();
32 32}
33 33
34/** 34/**
35 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 35 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -42,7 +42,7 @@ do { \
42 * or anything the slow path function returns 42 * or anything the slow path function returns
43 */ 43 */
44static inline int 44static inline int
45__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 45__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
46{ 46{
47 if (unlikely(atomic_xchg(count, 0) != 1)) 47 if (unlikely(atomic_xchg(count, 0) != 1))
48 return fail_fn(count); 48 return fail_fn(count);
@@ -64,12 +64,13 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
64 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs 64 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
65 * to return 0 otherwise. 65 * to return 0 otherwise.
66 */ 66 */
67#define __mutex_fastpath_unlock(count, fail_fn) \ 67static inline void
68do { \ 68__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
69 smp_mb(); \ 69{
70 if (unlikely(atomic_xchg(count, 1) != 0)) \ 70 smp_mb();
71 fail_fn(count); \ 71 if (unlikely(atomic_xchg(count, 1) != 0))
72} while (0) 72 fail_fn(count);
73}
73 74
74#define __mutex_slowpath_needs_to_unlock() 0 75#define __mutex_slowpath_needs_to_unlock() 0
75 76
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h
index 03185cef8e0a..5e4a35af2921 100644
--- a/include/asm-i386/apicdef.h
+++ b/include/asm-i386/apicdef.h
@@ -37,6 +37,7 @@
37#define APIC_SPIV_FOCUS_DISABLED (1<<9) 37#define APIC_SPIV_FOCUS_DISABLED (1<<9)
38#define APIC_SPIV_APIC_ENABLED (1<<8) 38#define APIC_SPIV_APIC_ENABLED (1<<8)
39#define APIC_ISR 0x100 39#define APIC_ISR 0x100
40#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
40#define APIC_TMR 0x180 41#define APIC_TMR 0x180
41#define APIC_IRR 0x200 42#define APIC_IRR 0x200
42#define APIC_ESR 0x280 43#define APIC_ESR 0x280
diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h
index 79727afb94c9..03403045c182 100644
--- a/include/asm-i386/floppy.h
+++ b/include/asm-i386/floppy.h
@@ -56,7 +56,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
56 register unsigned char st; 56 register unsigned char st;
57 57
58#undef TRACE_FLPY_INT 58#undef TRACE_FLPY_INT
59#define NO_FLOPPY_ASSEMBLER
60 59
61#ifdef TRACE_FLPY_INT 60#ifdef TRACE_FLPY_INT
62 static int calls=0; 61 static int calls=0;
@@ -71,38 +70,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
71 bytes = virtual_dma_count; 70 bytes = virtual_dma_count;
72#endif 71#endif
73 72
74#ifndef NO_FLOPPY_ASSEMBLER
75 __asm__ (
76 "testl %1,%1"
77 "je 3f"
78"1: inb %w4,%b0"
79 "andb $160,%b0"
80 "cmpb $160,%b0"
81 "jne 2f"
82 "incw %w4"
83 "testl %3,%3"
84 "jne 4f"
85 "inb %w4,%b0"
86 "movb %0,(%2)"
87 "jmp 5f"
88"4: movb (%2),%0"
89 "outb %b0,%w4"
90"5: decw %w4"
91 "outb %0,$0x80"
92 "decl %1"
93 "incl %2"
94 "testl %1,%1"
95 "jne 1b"
96"3: inb %w4,%b0"
97"2: "
98 : "=a" ((char) st),
99 "=c" ((long) virtual_dma_count),
100 "=S" ((long) virtual_dma_addr)
101 : "b" ((long) virtual_dma_mode),
102 "d" ((short) virtual_dma_port+4),
103 "1" ((long) virtual_dma_count),
104 "2" ((long) virtual_dma_addr));
105#else
106 { 73 {
107 register int lcount; 74 register int lcount;
108 register char *lptr; 75 register char *lptr;
@@ -122,7 +89,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
122 virtual_dma_addr = lptr; 89 virtual_dma_addr = lptr;
123 st = inb(virtual_dma_port+4); 90 st = inb(virtual_dma_port+4);
124 } 91 }
125#endif
126 92
127#ifdef TRACE_FLPY_INT 93#ifdef TRACE_FLPY_INT
128 calls++; 94 calls++;
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 0177da80dde3..e67fa08260fe 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -5,7 +5,7 @@
5 5
6typedef struct 6typedef struct
7{ 7{
8 volatile unsigned long counter; 8 volatile long counter;
9} local_t; 9} local_t;
10 10
11#define LOCAL_INIT(i) { (i) } 11#define LOCAL_INIT(i) { (i) }
@@ -29,7 +29,7 @@ static __inline__ void local_dec(local_t *v)
29 :"m" (v->counter)); 29 :"m" (v->counter));
30} 30}
31 31
32static __inline__ void local_add(unsigned long i, local_t *v) 32static __inline__ void local_add(long i, local_t *v)
33{ 33{
34 __asm__ __volatile__( 34 __asm__ __volatile__(
35 "addl %1,%0" 35 "addl %1,%0"
@@ -37,7 +37,7 @@ static __inline__ void local_add(unsigned long i, local_t *v)
37 :"ir" (i), "m" (v->counter)); 37 :"ir" (i), "m" (v->counter));
38} 38}
39 39
40static __inline__ void local_sub(unsigned long i, local_t *v) 40static __inline__ void local_sub(long i, local_t *v)
41{ 41{
42 __asm__ __volatile__( 42 __asm__ __volatile__(
43 "subl %1,%0" 43 "subl %1,%0"
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 789e9bdd0a40..2e7f3e257fdd 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -319,8 +319,9 @@
319#define __NR_set_robust_list 311 319#define __NR_set_robust_list 311
320#define __NR_get_robust_list 312 320#define __NR_get_robust_list 312
321#define __NR_sys_splice 313 321#define __NR_sys_splice 313
322#define __NR_sys_sync_file_range 314
322 323
323#define NR_syscalls 314 324#define NR_syscalls 315
324 325
325/* 326/*
326 * user-visible error numbers are in the range -1 - -128: see 327 * user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 4e7e6f23b08c..37e52a2836b0 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -68,6 +68,7 @@
68#define PAL_SHUTDOWN 40 /* enter processor shutdown state */ 68#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
69#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ 69#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
70#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */ 70#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
71#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
71 72
72#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 73#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
73#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ 74#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -130,7 +131,7 @@ typedef u64 pal_cache_line_state_t;
130#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */ 131#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
131 132
132typedef struct pal_freq_ratio { 133typedef struct pal_freq_ratio {
133 u64 den : 32, num : 32; /* numerator & denominator */ 134 u32 den, num; /* numerator & denominator */
134} itc_ratio, proc_ratio; 135} itc_ratio, proc_ratio;
135 136
136typedef union pal_cache_config_info_1_s { 137typedef union pal_cache_config_info_1_s {
@@ -151,10 +152,10 @@ typedef union pal_cache_config_info_1_s {
151 152
152typedef union pal_cache_config_info_2_s { 153typedef union pal_cache_config_info_2_s {
153 struct { 154 struct {
154 u64 cache_size : 32, /*cache size in bytes*/ 155 u32 cache_size; /*cache size in bytes*/
155 156
156 157
157 alias_boundary : 8, /* 39-32 aliased addr 158 u32 alias_boundary : 8, /* 39-32 aliased addr
158 * separation for max 159 * separation for max
159 * performance. 160 * performance.
160 */ 161 */
@@ -1647,6 +1648,33 @@ ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
1647 1648
1648 return iprv.status; 1649 return iprv.status;
1649} 1650}
1651
1652typedef struct pal_cache_shared_info_s
1653{
1654 u64 num_shared;
1655 pal_proc_n_log_info1_t ppli1;
1656 pal_proc_n_log_info2_t ppli2;
1657} pal_cache_shared_info_t;
1658
1659/* Get information on logical to physical processor mappings. */
1660static inline s64
1661ia64_pal_cache_shared_info(u64 level,
1662 u64 type,
1663 u64 proc_number,
1664 pal_cache_shared_info_t *info)
1665{
1666 struct ia64_pal_retval iprv;
1667
1668 PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
1669
1670 if (iprv.status == PAL_STATUS_SUCCESS) {
1671 info->num_shared = iprv.v0;
1672 info->ppli1.ppli1_data = iprv.v1;
1673 info->ppli2.ppli2_data = iprv.v2;
1674 }
1675
1676 return iprv.status;
1677}
1650#endif /* __ASSEMBLY__ */ 1678#endif /* __ASSEMBLY__ */
1651 1679
1652#endif /* _ASM_IA64_PAL_H */ 1680#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h
index 5207758a6dd9..868c7139dbff 100644
--- a/include/asm-powerpc/eeh.h
+++ b/include/asm-powerpc/eeh.h
@@ -60,24 +60,10 @@ void __init pci_addr_cache_build(void);
60 * device (including config space i/o). Call eeh_add_device_late 60 * device (including config space i/o). Call eeh_add_device_late
61 * to finish the eeh setup for this device. 61 * to finish the eeh setup for this device.
62 */ 62 */
63void eeh_add_device_early(struct device_node *);
64void eeh_add_device_late(struct pci_dev *dev);
65void eeh_add_device_tree_early(struct device_node *); 63void eeh_add_device_tree_early(struct device_node *);
66void eeh_add_device_tree_late(struct pci_bus *); 64void eeh_add_device_tree_late(struct pci_bus *);
67 65
68/** 66/**
69 * eeh_remove_device - undo EEH setup for the indicated pci device
70 * @dev: pci device to be removed
71 *
72 * This routine should be called when a device is removed from
73 * a running system (e.g. by hotplug or dlpar). It unregisters
74 * the PCI device from the EEH subsystem. I/O errors affecting
75 * this device will no longer be detected after this call; thus,
76 * i/o errors affecting this slot may leave this device unusable.
77 */
78void eeh_remove_device(struct pci_dev *);
79
80/**
81 * eeh_remove_device_recursive - undo EEH for device & children. 67 * eeh_remove_device_recursive - undo EEH for device & children.
82 * @dev: pci device to be removed 68 * @dev: pci device to be removed
83 * 69 *
@@ -116,12 +102,6 @@ static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *d
116 102
117static inline void pci_addr_cache_build(void) { } 103static inline void pci_addr_cache_build(void) { }
118 104
119static inline void eeh_add_device_early(struct device_node *dn) { }
120
121static inline void eeh_add_device_late(struct pci_dev *dev) { }
122
123static inline void eeh_remove_device(struct pci_dev *dev) { }
124
125static inline void eeh_add_device_tree_early(struct device_node *dn) { } 105static inline void eeh_add_device_tree_early(struct device_node *dn) { }
126 106
127static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } 107static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index b72c04f3f551..6cc7e1fb7bfd 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -4,47 +4,88 @@
4 4
5#define HVSC .long 0x44000022 5#define HVSC .long 0x44000022
6 6
7#define H_Success 0 7#define H_SUCCESS 0
8#define H_Busy 1 /* Hardware busy -- retry later */ 8#define H_BUSY 1 /* Hardware busy -- retry later */
9#define H_Closed 2 /* Resource closed */ 9#define H_CLOSED 2 /* Resource closed */
10#define H_Constrained 4 /* Resource request constrained to max allowed */ 10#define H_NOT_AVAILABLE 3
11#define H_InProgress 14 /* Kind of like busy */ 11#define H_CONSTRAINED 4 /* Resource request constrained to max allowed */
12#define H_Pending 17 /* returned from H_POLL_PENDING */ 12#define H_PARTIAL 5
13#define H_Continue 18 /* Returned from H_Join on success */ 13#define H_IN_PROGRESS 14 /* Kind of like busy */
14#define H_LongBusyStartRange 9900 /* Start of long busy range */ 14#define H_PAGE_REGISTERED 15
15#define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */ 15#define H_PARTIAL_STORE 16
16#define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */ 16#define H_PENDING 17 /* returned from H_POLL_PENDING */
17#define H_LongBusyOrder100msec 9902 /* Long busy, hint that 100msec is a good time to retry */ 17#define H_CONTINUE 18 /* Returned from H_Join on success */
18#define H_LongBusyOrder1sec 9903 /* Long busy, hint that 1sec is a good time to retry */ 18#define H_LONG_BUSY_START_RANGE 9900 /* Start of long busy range */
19#define H_LongBusyOrder10sec 9904 /* Long busy, hint that 10sec is a good time to retry */ 19#define H_LONG_BUSY_ORDER_1_MSEC 9900 /* Long busy, hint that 1msec \
20#define H_LongBusyOrder100sec 9905 /* Long busy, hint that 100sec is a good time to retry */ 20 is a good time to retry */
21#define H_LongBusyEndRange 9905 /* End of long busy range */ 21#define H_LONG_BUSY_ORDER_10_MSEC 9901 /* Long busy, hint that 10msec \
22#define H_Hardware -1 /* Hardware error */ 22 is a good time to retry */
23#define H_Function -2 /* Function not supported */ 23#define H_LONG_BUSY_ORDER_100_MSEC 9902 /* Long busy, hint that 100msec \
24#define H_Privilege -3 /* Caller not privileged */ 24 is a good time to retry */
25#define H_Parameter -4 /* Parameter invalid, out-of-range or conflicting */ 25#define H_LONG_BUSY_ORDER_1_SEC 9903 /* Long busy, hint that 1sec \
26#define H_Bad_Mode -5 /* Illegal msr value */ 26 is a good time to retry */
27#define H_PTEG_Full -6 /* PTEG is full */ 27#define H_LONG_BUSY_ORDER_10_SEC 9904 /* Long busy, hint that 10sec \
28#define H_Not_Found -7 /* PTE was not found" */ 28 is a good time to retry */
29#define H_Reserved_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */ 29#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
30#define H_NoMem -9 30 is a good time to retry */
31#define H_Authority -10 31#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
32#define H_Permission -11 32#define H_HARDWARE -1 /* Hardware error */
33#define H_Dropped -12 33#define H_FUNCTION -2 /* Function not supported */
34#define H_SourceParm -13 34#define H_PRIVILEGE -3 /* Caller not privileged */
35#define H_DestParm -14 35#define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */
36#define H_RemoteParm -15 36#define H_BAD_MODE -5 /* Illegal msr value */
37#define H_Resource -16 37#define H_PTEG_FULL -6 /* PTEG is full */
38#define H_NOT_FOUND -7 /* PTE was not found" */
39#define H_RESERVED_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
40#define H_NO_MEM -9
41#define H_AUTHORITY -10
42#define H_PERMISSION -11
43#define H_DROPPED -12
44#define H_SOURCE_PARM -13
45#define H_DEST_PARM -14
46#define H_REMOTE_PARM -15
47#define H_RESOURCE -16
48#define H_ADAPTER_PARM -17
49#define H_RH_PARM -18
50#define H_RCQ_PARM -19
51#define H_SCQ_PARM -20
52#define H_EQ_PARM -21
53#define H_RT_PARM -22
54#define H_ST_PARM -23
55#define H_SIGT_PARM -24
56#define H_TOKEN_PARM -25
57#define H_MLENGTH_PARM -27
58#define H_MEM_PARM -28
59#define H_MEM_ACCESS_PARM -29
60#define H_ATTR_PARM -30
61#define H_PORT_PARM -31
62#define H_MCG_PARM -32
63#define H_VL_PARM -33
64#define H_TSIZE_PARM -34
65#define H_TRACE_PARM -35
66
67#define H_MASK_PARM -37
68#define H_MCG_FULL -38
69#define H_ALIAS_EXIST -39
70#define H_P_COUNTER -40
71#define H_TABLE_FULL -41
72#define H_ALT_TABLE -42
73#define H_MR_CONDITION -43
74#define H_NOT_ENOUGH_RESOURCES -44
75#define H_R_STATE -45
76#define H_RESCINDEND -46
77
38 78
39/* Long Busy is a condition that can be returned by the firmware 79/* Long Busy is a condition that can be returned by the firmware
40 * when a call cannot be completed now, but the identical call 80 * when a call cannot be completed now, but the identical call
41 * should be retried later. This prevents calls blocking in the 81 * should be retried later. This prevents calls blocking in the
42 * firmware for long periods of time. Annoyingly the firmware can return 82 * firmware for long periods of time. Annoyingly the firmware can return
43 * a range of return codes, hinting at how long we should wait before 83 * a range of return codes, hinting at how long we should wait before
44 * retrying. If you don't care for the hint, the macro below is a good 84 * retrying. If you don't care for the hint, the macro below is a good
45 * way to check for the long_busy return codes 85 * way to check for the long_busy return codes
46 */ 86 */
47#define H_isLongBusy(x) ((x >= H_LongBusyStartRange) && (x <= H_LongBusyEndRange)) 87#define H_IS_LONG_BUSY(x) ((x >= H_LONG_BUSY_START_RANGE) \
88 && (x <= H_LONG_BUSY_END_RANGE))
48 89
49/* Flags */ 90/* Flags */
50#define H_LARGE_PAGE (1UL<<(63-16)) 91#define H_LARGE_PAGE (1UL<<(63-16))
@@ -66,6 +107,9 @@
66#define H_DABRX_KERNEL (1UL<<(63-62)) 107#define H_DABRX_KERNEL (1UL<<(63-62))
67#define H_DABRX_USER (1UL<<(63-63)) 108#define H_DABRX_USER (1UL<<(63-63))
68 109
110/* Each control block has to be on a 4K bondary */
111#define H_CB_ALIGNMENT 4096
112
69/* pSeries hypervisor opcodes */ 113/* pSeries hypervisor opcodes */
70#define H_REMOVE 0x04 114#define H_REMOVE 0x04
71#define H_ENTER 0x08 115#define H_ENTER 0x08
@@ -99,25 +143,52 @@
99#define H_PERFMON 0x7c 143#define H_PERFMON 0x7c
100#define H_MIGRATE_DMA 0x78 144#define H_MIGRATE_DMA 0x78
101#define H_REGISTER_VPA 0xDC 145#define H_REGISTER_VPA 0xDC
102#define H_CEDE 0xE0 146#define H_CEDE 0xE0
103#define H_CONFER 0xE4 147#define H_CONFER 0xE4
104#define H_PROD 0xE8 148#define H_PROD 0xE8
105#define H_GET_PPP 0xEC 149#define H_GET_PPP 0xEC
106#define H_SET_PPP 0xF0 150#define H_SET_PPP 0xF0
107#define H_PURR 0xF4 151#define H_PURR 0xF4
108#define H_PIC 0xF8 152#define H_PIC 0xF8
109#define H_REG_CRQ 0xFC 153#define H_REG_CRQ 0xFC
110#define H_FREE_CRQ 0x100 154#define H_FREE_CRQ 0x100
111#define H_VIO_SIGNAL 0x104 155#define H_VIO_SIGNAL 0x104
112#define H_SEND_CRQ 0x108 156#define H_SEND_CRQ 0x108
113#define H_COPY_RDMA 0x110 157#define H_COPY_RDMA 0x110
114#define H_SET_XDABR 0x134 158#define H_SET_XDABR 0x134
115#define H_STUFF_TCE 0x138 159#define H_STUFF_TCE 0x138
116#define H_PUT_TCE_INDIRECT 0x13C 160#define H_PUT_TCE_INDIRECT 0x13C
117#define H_VTERM_PARTNER_INFO 0x150 161#define H_VTERM_PARTNER_INFO 0x150
118#define H_REGISTER_VTERM 0x154 162#define H_REGISTER_VTERM 0x154
119#define H_FREE_VTERM 0x158 163#define H_FREE_VTERM 0x158
120#define H_POLL_PENDING 0x1D8 164#define H_RESET_EVENTS 0x15C
165#define H_ALLOC_RESOURCE 0x160
166#define H_FREE_RESOURCE 0x164
167#define H_MODIFY_QP 0x168
168#define H_QUERY_QP 0x16C
169#define H_REREGISTER_PMR 0x170
170#define H_REGISTER_SMR 0x174
171#define H_QUERY_MR 0x178
172#define H_QUERY_MW 0x17C
173#define H_QUERY_HCA 0x180
174#define H_QUERY_PORT 0x184
175#define H_MODIFY_PORT 0x188
176#define H_DEFINE_AQP1 0x18C
177#define H_GET_TRACE_BUFFER 0x190
178#define H_DEFINE_AQP0 0x194
179#define H_RESIZE_MR 0x198
180#define H_ATTACH_MCQP 0x19C
181#define H_DETACH_MCQP 0x1A0
182#define H_CREATE_RPT 0x1A4
183#define H_REMOVE_RPT 0x1A8
184#define H_REGISTER_RPAGES 0x1AC
185#define H_DISABLE_AND_GETC 0x1B0
186#define H_ERROR_DATA 0x1B4
187#define H_GET_HCA_INFO 0x1B8
188#define H_GET_PERF_COUNT 0x1BC
189#define H_MANAGE_TRACE 0x1C0
190#define H_QUERY_INT_STATE 0x1E4
191#define H_POLL_PENDING 0x1D8
121#define H_JOIN 0x298 192#define H_JOIN 0x298
122#define H_ENABLE_CRQ 0x2B0 193#define H_ENABLE_CRQ 0x2B0
123 194
@@ -152,7 +223,7 @@ long plpar_hcall_norets(unsigned long opcode, ...);
152 */ 223 */
153long plpar_hcall_8arg_2ret(unsigned long opcode, 224long plpar_hcall_8arg_2ret(unsigned long opcode,
154 unsigned long arg1, 225 unsigned long arg1,
155 unsigned long arg2, 226 unsigned long arg2,
156 unsigned long arg3, 227 unsigned long arg3,
157 unsigned long arg4, 228 unsigned long arg4,
158 unsigned long arg5, 229 unsigned long arg5,
@@ -176,6 +247,42 @@ long plpar_hcall_4out(unsigned long opcode,
176 unsigned long *out3, 247 unsigned long *out3,
177 unsigned long *out4); 248 unsigned long *out4);
178 249
250long plpar_hcall_7arg_7ret(unsigned long opcode,
251 unsigned long arg1,
252 unsigned long arg2,
253 unsigned long arg3,
254 unsigned long arg4,
255 unsigned long arg5,
256 unsigned long arg6,
257 unsigned long arg7,
258 unsigned long *out1,
259 unsigned long *out2,
260 unsigned long *out3,
261 unsigned long *out4,
262 unsigned long *out5,
263 unsigned long *out6,
264 unsigned long *out7);
265
266long plpar_hcall_9arg_9ret(unsigned long opcode,
267 unsigned long arg1,
268 unsigned long arg2,
269 unsigned long arg3,
270 unsigned long arg4,
271 unsigned long arg5,
272 unsigned long arg6,
273 unsigned long arg7,
274 unsigned long arg8,
275 unsigned long arg9,
276 unsigned long *out1,
277 unsigned long *out2,
278 unsigned long *out3,
279 unsigned long *out4,
280 unsigned long *out5,
281 unsigned long *out6,
282 unsigned long *out7,
283 unsigned long *out8,
284 unsigned long *out9);
285
179#endif /* __ASSEMBLY__ */ 286#endif /* __ASSEMBLY__ */
180#endif /* __KERNEL__ */ 287#endif /* __KERNEL__ */
181#endif /* _ASM_POWERPC_HVCALL_H */ 288#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 65f5a7b2646b..d075725bf444 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -365,8 +365,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
365 * powers of 2 writes until it reaches sufficient alignment). 365 * powers of 2 writes until it reaches sufficient alignment).
366 * 366 *
367 * Based on this we disable the IP header alignment in network drivers. 367 * Based on this we disable the IP header alignment in network drivers.
368 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
369 * cacheline alignment of buffers.
368 */ 370 */
369#define NET_IP_ALIGN 0 371#define NET_IP_ALIGN 0
372#define NET_SKB_PAD L1_CACHE_BYTES
370#endif 373#endif
371 374
372#define arch_align_stack(x) (x) 375#define arch_align_stack(x) (x)
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index e10ed87094f0..436d216601e5 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -46,7 +46,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
46#define percpu_modcopy(pcpudst, src, size) \ 46#define percpu_modcopy(pcpudst, src, size) \
47do { \ 47do { \
48 unsigned int __i; \ 48 unsigned int __i; \
49 for_each_cpu(__i) \ 49 for_each_possible_cpu(__i) \
50 memcpy((pcpudst)+__per_cpu_offset[__i], \ 50 memcpy((pcpudst)+__per_cpu_offset[__i], \
51 (src), (size)); \ 51 (src), (size)); \
52} while (0) 52} while (0)
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index 64ec640a40ee..264f0ebeaedc 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -180,7 +180,7 @@
180#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */ 180#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
181#define __NR_getdomainname 162 /* SunOS Specific */ 181#define __NR_getdomainname 162 /* SunOS Specific */
182#define __NR_setdomainname 163 /* Common */ 182#define __NR_setdomainname 163 /* Common */
183/* #define __NR_ni_syscall 164 ENOSYS under SunOS */ 183/* #define __NR_utrap_install 164 Linux sparc64 specific */
184#define __NR_quotactl 165 /* Common */ 184#define __NR_quotactl 165 /* Common */
185#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */ 185#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
186#define __NR_mount 167 /* Common */ 186#define __NR_mount 167 /* Common */
@@ -248,7 +248,7 @@
248#define __NR_setfsgid 229 /* Linux Specific */ 248#define __NR_setfsgid 229 /* Linux Specific */
249#define __NR__newselect 230 /* Linux Specific */ 249#define __NR__newselect 230 /* Linux Specific */
250#define __NR_time 231 /* Linux Specific */ 250#define __NR_time 231 /* Linux Specific */
251/* #define __NR_oldstat 232 Linux Specific */ 251#define __NR_sys_splice 232 /* Linux Specific */
252#define __NR_stime 233 /* Linux Specific */ 252#define __NR_stime 233 /* Linux Specific */
253#define __NR_statfs64 234 /* Linux Specific */ 253#define __NR_statfs64 234 /* Linux Specific */
254#define __NR_fstatfs64 235 /* Linux Specific */ 254#define __NR_fstatfs64 235 /* Linux Specific */
@@ -271,7 +271,7 @@
271#define __NR_getsid 252 271#define __NR_getsid 252
272#define __NR_fdatasync 253 272#define __NR_fdatasync 253
273#define __NR_nfsservctl 254 273#define __NR_nfsservctl 254
274#define __NR_aplib 255 274#define __NR_sys_sync_file_range 255
275#define __NR_clock_settime 256 275#define __NR_clock_settime 256
276#define __NR_clock_gettime 257 276#define __NR_clock_gettime 257
277#define __NR_clock_getres 258 277#define __NR_clock_getres 258
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index a284986b1541..d0544b4f47b7 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -250,7 +250,7 @@
250#ifdef __KERNEL__ 250#ifdef __KERNEL__
251#define __NR_time 231 /* Linux sparc32 */ 251#define __NR_time 231 /* Linux sparc32 */
252#endif 252#endif
253/* #define __NR_oldstat 232 Linux Specific */ 253#define __NR_sys_splice 232 /* Linux Specific */
254#define __NR_stime 233 /* Linux Specific */ 254#define __NR_stime 233 /* Linux Specific */
255#define __NR_statfs64 234 /* Linux Specific */ 255#define __NR_statfs64 234 /* Linux Specific */
256#define __NR_fstatfs64 235 /* Linux Specific */ 256#define __NR_fstatfs64 235 /* Linux Specific */
@@ -273,7 +273,7 @@
273#define __NR_getsid 252 273#define __NR_getsid 252
274#define __NR_fdatasync 253 274#define __NR_fdatasync 253
275#define __NR_nfsservctl 254 275#define __NR_nfsservctl 254
276#define __NR_aplib 255 276#define __NR_sys_sync_file_range 255
277#define __NR_clock_settime 256 277#define __NR_clock_settime 256
278#define __NR_clock_gettime 257 278#define __NR_clock_gettime 257
279#define __NR_clock_getres 258 279#define __NR_clock_getres 258
diff --git a/include/asm-um/desc.h b/include/asm-um/desc.h
index ac1d2a20d178..4ec34a51b62c 100644
--- a/include/asm-um/desc.h
+++ b/include/asm-um/desc.h
@@ -1,6 +1,16 @@
1#ifndef __UM_DESC_H 1#ifndef __UM_DESC_H
2#define __UM_DESC_H 2#define __UM_DESC_H
3 3
4#include "asm/arch/desc.h" 4/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't
5 * compile, and has never been used. */
6#define LDT_empty(info) (\
7 (info)->base_addr == 0 && \
8 (info)->limit == 0 && \
9 (info)->contents == 0 && \
10 (info)->read_exec_only == 1 && \
11 (info)->seg_32bit == 0 && \
12 (info)->limit_in_pages == 0 && \
13 (info)->seg_not_present == 1 && \
14 (info)->useable == 0 )
5 15
6#endif 16#endif
diff --git a/include/asm-um/host_ldt-i386.h b/include/asm-um/host_ldt-i386.h
new file mode 100644
index 000000000000..b27cb0a9dd30
--- /dev/null
+++ b/include/asm-um/host_ldt-i386.h
@@ -0,0 +1,34 @@
1#ifndef __ASM_HOST_LDT_I386_H
2#define __ASM_HOST_LDT_I386_H
3
4#include "asm/arch/ldt.h"
5
6/*
7 * macros stolen from include/asm-i386/desc.h
8 */
9#define LDT_entry_a(info) \
10 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
11
12#define LDT_entry_b(info) \
13 (((info)->base_addr & 0xff000000) | \
14 (((info)->base_addr & 0x00ff0000) >> 16) | \
15 ((info)->limit & 0xf0000) | \
16 (((info)->read_exec_only ^ 1) << 9) | \
17 ((info)->contents << 10) | \
18 (((info)->seg_not_present ^ 1) << 15) | \
19 ((info)->seg_32bit << 22) | \
20 ((info)->limit_in_pages << 23) | \
21 ((info)->useable << 20) | \
22 0x7000)
23
24#define LDT_empty(info) (\
25 (info)->base_addr == 0 && \
26 (info)->limit == 0 && \
27 (info)->contents == 0 && \
28 (info)->read_exec_only == 1 && \
29 (info)->seg_32bit == 0 && \
30 (info)->limit_in_pages == 0 && \
31 (info)->seg_not_present == 1 && \
32 (info)->useable == 0 )
33
34#endif
diff --git a/include/asm-um/ldt-x86_64.h b/include/asm-um/host_ldt-x86_64.h
index 96b35aada79a..74a63f7d9a90 100644
--- a/include/asm-um/ldt-x86_64.h
+++ b/include/asm-um/host_ldt-x86_64.h
@@ -1,43 +1,8 @@
1/* 1#ifndef __ASM_HOST_LDT_X86_64_H
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH 2#define __ASM_HOST_LDT_X86_64_H
3 * Licensed under the GPL
4 *
5 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
6 */
7 3
8#ifndef __ASM_LDT_X86_64_H
9#define __ASM_LDT_X86_64_H
10
11#include "asm/semaphore.h"
12#include "asm/arch/ldt.h" 4#include "asm/arch/ldt.h"
13 5
14struct mmu_context_skas;
15extern void ldt_host_info(void);
16extern long init_new_ldt(struct mmu_context_skas * to_mm,
17 struct mmu_context_skas * from_mm);
18extern void free_ldt(struct mmu_context_skas * mm);
19
20#define LDT_PAGES_MAX \
21 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
22#define LDT_ENTRIES_PER_PAGE \
23 (PAGE_SIZE/LDT_ENTRY_SIZE)
24#define LDT_DIRECT_ENTRIES \
25 ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
26
27struct ldt_entry {
28 __u32 a;
29 __u32 b;
30};
31
32typedef struct uml_ldt {
33 int entry_count;
34 struct semaphore semaphore;
35 union {
36 struct ldt_entry * pages[LDT_PAGES_MAX];
37 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
38 } u;
39} uml_ldt_t;
40
41/* 6/*
42 * macros stolen from include/asm-x86_64/desc.h 7 * macros stolen from include/asm-x86_64/desc.h
43 */ 8 */
diff --git a/include/asm-um/ldt-i386.h b/include/asm-um/ldt-i386.h
deleted file mode 100644
index 175722a91164..000000000000
--- a/include/asm-um/ldt-i386.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Licensed under the GPL
4 *
5 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
6 */
7
8#ifndef __ASM_LDT_I386_H
9#define __ASM_LDT_I386_H
10
11#include "asm/semaphore.h"
12#include "asm/arch/ldt.h"
13
14struct mmu_context_skas;
15extern void ldt_host_info(void);
16extern long init_new_ldt(struct mmu_context_skas * to_mm,
17 struct mmu_context_skas * from_mm);
18extern void free_ldt(struct mmu_context_skas * mm);
19
20#define LDT_PAGES_MAX \
21 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
22#define LDT_ENTRIES_PER_PAGE \
23 (PAGE_SIZE/LDT_ENTRY_SIZE)
24#define LDT_DIRECT_ENTRIES \
25 ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
26
27struct ldt_entry {
28 __u32 a;
29 __u32 b;
30};
31
32typedef struct uml_ldt {
33 int entry_count;
34 struct semaphore semaphore;
35 union {
36 struct ldt_entry * pages[LDT_PAGES_MAX];
37 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
38 } u;
39} uml_ldt_t;
40
41/*
42 * macros stolen from include/asm-i386/desc.h
43 */
44#define LDT_entry_a(info) \
45 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
46
47#define LDT_entry_b(info) \
48 (((info)->base_addr & 0xff000000) | \
49 (((info)->base_addr & 0x00ff0000) >> 16) | \
50 ((info)->limit & 0xf0000) | \
51 (((info)->read_exec_only ^ 1) << 9) | \
52 ((info)->contents << 10) | \
53 (((info)->seg_not_present ^ 1) << 15) | \
54 ((info)->seg_32bit << 22) | \
55 ((info)->limit_in_pages << 23) | \
56 ((info)->useable << 20) | \
57 0x7000)
58
59#define LDT_empty(info) (\
60 (info)->base_addr == 0 && \
61 (info)->limit == 0 && \
62 (info)->contents == 0 && \
63 (info)->read_exec_only == 1 && \
64 (info)->seg_32bit == 0 && \
65 (info)->limit_in_pages == 0 && \
66 (info)->seg_not_present == 1 && \
67 (info)->useable == 0 )
68
69#endif
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
new file mode 100644
index 000000000000..96f82a456ce6
--- /dev/null
+++ b/include/asm-um/ldt.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
3 * Licensed under the GPL
4 *
5 * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
6 */
7
8#ifndef __ASM_LDT_H
9#define __ASM_LDT_H
10
11#include "asm/semaphore.h"
12#include "asm/host_ldt.h"
13
14struct mmu_context_skas;
15extern void ldt_host_info(void);
16extern long init_new_ldt(struct mmu_context_skas * to_mm,
17 struct mmu_context_skas * from_mm);
18extern void free_ldt(struct mmu_context_skas * mm);
19
20#define LDT_PAGES_MAX \
21 ((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
22#define LDT_ENTRIES_PER_PAGE \
23 (PAGE_SIZE/LDT_ENTRY_SIZE)
24#define LDT_DIRECT_ENTRIES \
25 ((LDT_PAGES_MAX*sizeof(void *))/LDT_ENTRY_SIZE)
26
27struct ldt_entry {
28 __u32 a;
29 __u32 b;
30};
31
32typedef struct uml_ldt {
33 int entry_count;
34 struct semaphore semaphore;
35 union {
36 struct ldt_entry * pages[LDT_PAGES_MAX];
37 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
38 } u;
39} uml_ldt_t;
40
41#endif
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h
index 4108a579eb92..595f1c3e1e40 100644
--- a/include/asm-um/processor-i386.h
+++ b/include/asm-um/processor-i386.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -6,21 +6,48 @@
6#ifndef __UM_PROCESSOR_I386_H 6#ifndef __UM_PROCESSOR_I386_H
7#define __UM_PROCESSOR_I386_H 7#define __UM_PROCESSOR_I386_H
8 8
9#include "linux/string.h"
10#include "asm/host_ldt.h"
11#include "asm/segment.h"
12
9extern int host_has_xmm; 13extern int host_has_xmm;
10extern int host_has_cmov; 14extern int host_has_cmov;
11 15
12/* include faultinfo structure */ 16/* include faultinfo structure */
13#include "sysdep/faultinfo.h" 17#include "sysdep/faultinfo.h"
14 18
19struct uml_tls_struct {
20 struct user_desc tls;
21 unsigned flushed:1;
22 unsigned present:1;
23};
24
15struct arch_thread { 25struct arch_thread {
26 struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
16 unsigned long debugregs[8]; 27 unsigned long debugregs[8];
17 int debugregs_seq; 28 int debugregs_seq;
18 struct faultinfo faultinfo; 29 struct faultinfo faultinfo;
19}; 30};
20 31
21#define INIT_ARCH_THREAD { .debugregs = { [ 0 ... 7 ] = 0 }, \ 32#define INIT_ARCH_THREAD { \
22 .debugregs_seq = 0, \ 33 .tls_array = { [ 0 ... GDT_ENTRY_TLS_ENTRIES - 1 ] = \
23 .faultinfo = { 0, 0, 0 } } 34 { .present = 0, .flushed = 0 } }, \
35 .debugregs = { [ 0 ... 7 ] = 0 }, \
36 .debugregs_seq = 0, \
37 .faultinfo = { 0, 0, 0 } \
38}
39
40static inline void arch_flush_thread(struct arch_thread *thread)
41{
42 /* Clear any TLS still hanging */
43 memset(&thread->tls_array, 0, sizeof(thread->tls_array));
44}
45
46static inline void arch_copy_thread(struct arch_thread *from,
47 struct arch_thread *to)
48{
49 memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array));
50}
24 51
25#include "asm/arch/user.h" 52#include "asm/arch/user.h"
26 53
diff --git a/include/asm-um/processor-x86_64.h b/include/asm-um/processor-x86_64.h
index e1e1255a1d36..10609af376c0 100644
--- a/include/asm-um/processor-x86_64.h
+++ b/include/asm-um/processor-x86_64.h
@@ -28,6 +28,15 @@ extern inline void rep_nop(void)
28 .debugregs_seq = 0, \ 28 .debugregs_seq = 0, \
29 .faultinfo = { 0, 0, 0 } } 29 .faultinfo = { 0, 0, 0 } }
30 30
31static inline void arch_flush_thread(struct arch_thread *thread)
32{
33}
34
35static inline void arch_copy_thread(struct arch_thread *from,
36 struct arch_thread *to)
37{
38}
39
31#include "asm/arch/user.h" 40#include "asm/arch/user.h"
32 41
33#define current_text_addr() \ 42#define current_text_addr() \
diff --git a/include/asm-um/ptrace-generic.h b/include/asm-um/ptrace-generic.h
index 46599ac44037..503484305e67 100644
--- a/include/asm-um/ptrace-generic.h
+++ b/include/asm-um/ptrace-generic.h
@@ -28,7 +28,7 @@ struct pt_regs {
28 union uml_pt_regs regs; 28 union uml_pt_regs regs;
29}; 29};
30 30
31#define EMPTY_REGS { regs : EMPTY_UML_PT_REGS } 31#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
32 32
33#define PT_REGS_IP(r) UPT_IP(&(r)->regs) 33#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
34#define PT_REGS_SP(r) UPT_SP(&(r)->regs) 34#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
@@ -60,17 +60,9 @@ extern void show_regs(struct pt_regs *regs);
60extern void send_sigtrap(struct task_struct *tsk, union uml_pt_regs *regs, 60extern void send_sigtrap(struct task_struct *tsk, union uml_pt_regs *regs,
61 int error_code); 61 int error_code);
62 62
63#endif 63extern int arch_copy_tls(struct task_struct *new);
64extern void clear_flushed_tls(struct task_struct *task);
64 65
65#endif 66#endif
66 67
67/* 68#endif
68 * Overrides for Emacs so that we follow Linus's tabbing style.
69 * Emacs will notice this stuff at the end of the file and automatically
70 * adjust the settings for this buffer only. This must remain at the end
71 * of the file.
72 * ---------------------------------------------------------------------------
73 * Local variables:
74 * c-file-style: "linux"
75 * End:
76 */
diff --git a/include/asm-um/ptrace-i386.h b/include/asm-um/ptrace-i386.h
index fe882b9d917e..30656c962d74 100644
--- a/include/asm-um/ptrace-i386.h
+++ b/include/asm-um/ptrace-i386.h
@@ -8,8 +8,11 @@
8 8
9#define HOST_AUDIT_ARCH AUDIT_ARCH_I386 9#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
10 10
11#include "linux/compiler.h"
11#include "sysdep/ptrace.h" 12#include "sysdep/ptrace.h"
12#include "asm/ptrace-generic.h" 13#include "asm/ptrace-generic.h"
14#include "asm/host_ldt.h"
15#include "choose-mode.h"
13 16
14#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs) 17#define PT_REGS_EAX(r) UPT_EAX(&(r)->regs)
15#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs) 18#define PT_REGS_EBX(r) UPT_EBX(&(r)->regs)
@@ -38,15 +41,31 @@
38 41
39#define user_mode(r) UPT_IS_USER(&(r)->regs) 42#define user_mode(r) UPT_IS_USER(&(r)->regs)
40 43
41#endif 44extern int ptrace_get_thread_area(struct task_struct *child, int idx,
45 struct user_desc __user *user_desc);
42 46
43/* 47extern int ptrace_set_thread_area(struct task_struct *child, int idx,
44 * Overrides for Emacs so that we follow Linus's tabbing style. 48 struct user_desc __user *user_desc);
45 * Emacs will notice this stuff at the end of the file and automatically 49
46 * adjust the settings for this buffer only. This must remain at the end 50extern int do_set_thread_area_skas(struct user_desc *info);
47 * of the file. 51extern int do_get_thread_area_skas(struct user_desc *info);
48 * --------------------------------------------------------------------------- 52
49 * Local variables: 53extern int do_set_thread_area_tt(struct user_desc *info);
50 * c-file-style: "linux" 54extern int do_get_thread_area_tt(struct user_desc *info);
51 * End: 55
52 */ 56extern int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to);
57extern int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to);
58
59static inline int do_get_thread_area(struct user_desc *info)
60{
61 return CHOOSE_MODE_PROC(do_get_thread_area_tt, do_get_thread_area_skas, info);
62}
63
64static inline int do_set_thread_area(struct user_desc *info)
65{
66 return CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, info);
67}
68
69struct task_struct;
70
71#endif
diff --git a/include/asm-um/ptrace-x86_64.h b/include/asm-um/ptrace-x86_64.h
index be51219a8ffe..c894e68b1f96 100644
--- a/include/asm-um/ptrace-x86_64.h
+++ b/include/asm-um/ptrace-x86_64.h
@@ -8,6 +8,8 @@
8#define __UM_PTRACE_X86_64_H 8#define __UM_PTRACE_X86_64_H
9 9
10#include "linux/compiler.h" 10#include "linux/compiler.h"
11#include "asm/errno.h"
12#include "asm/host_ldt.h"
11 13
12#define signal_fault signal_fault_x86_64 14#define signal_fault signal_fault_x86_64
13#define __FRAME_OFFSETS /* Needed to get the R* macros */ 15#define __FRAME_OFFSETS /* Needed to get the R* macros */
@@ -63,15 +65,26 @@ void signal_fault(struct pt_regs_subarch *regs, void *frame, char *where);
63 65
64#define profile_pc(regs) PT_REGS_IP(regs) 66#define profile_pc(regs) PT_REGS_IP(regs)
65 67
66#endif 68static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
69 struct user_desc __user *user_desc)
70{
71 return -ENOSYS;
72}
67 73
68/* 74static inline int ptrace_set_thread_area(struct task_struct *child, int idx,
69 * Overrides for Emacs so that we follow Linus's tabbing style. 75 struct user_desc __user *user_desc)
70 * Emacs will notice this stuff at the end of the file and automatically 76{
71 * adjust the settings for this buffer only. This must remain at the end 77 return -ENOSYS;
72 * of the file. 78}
73 * --------------------------------------------------------------------------- 79
74 * Local variables: 80static inline void arch_switch_to_tt(struct task_struct *from,
75 * c-file-style: "linux" 81 struct task_struct *to)
76 * End: 82{
77 */ 83}
84
85static inline void arch_switch_to_skas(struct task_struct *from,
86 struct task_struct *to)
87{
88}
89
90#endif
diff --git a/include/asm-um/segment.h b/include/asm-um/segment.h
index 55e40301f625..45183fcd10b6 100644
--- a/include/asm-um/segment.h
+++ b/include/asm-um/segment.h
@@ -1,4 +1,10 @@
1#ifndef __UM_SEGMENT_H 1#ifndef __UM_SEGMENT_H
2#define __UM_SEGMENT_H 2#define __UM_SEGMENT_H
3 3
4extern int host_gdt_entry_tls_min;
5
6#define GDT_ENTRY_TLS_ENTRIES 3
7#define GDT_ENTRY_TLS_MIN host_gdt_entry_tls_min
8#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9
4#endif 10#endif
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 17b6b07c4332..f166b9837c6a 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -27,14 +27,14 @@ struct thread_info {
27 27
28#define INIT_THREAD_INFO(tsk) \ 28#define INIT_THREAD_INFO(tsk) \
29{ \ 29{ \
30 task: &tsk, \ 30 .task = &tsk, \
31 exec_domain: &default_exec_domain, \ 31 .exec_domain = &default_exec_domain, \
32 flags: 0, \ 32 .flags = 0, \
33 cpu: 0, \ 33 .cpu = 0, \
34 preempt_count: 1, \ 34 .preempt_count = 1, \
35 addr_limit: KERNEL_DS, \ 35 .addr_limit = KERNEL_DS, \
36 restart_block: { \ 36 .restart_block = { \
37 fn: do_no_restart_syscall, \ 37 .fn = do_no_restart_syscall, \
38 }, \ 38 }, \
39} 39}
40 40
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index 4e460d6f5ac8..bea5a015f667 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -57,7 +57,7 @@
57({ \ 57({ \
58 const __typeof__((*(ptr))) __user *private_ptr = (ptr); \ 58 const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
59 (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \ 59 (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
60 __get_user(x, private_ptr) : ((x) = 0, -EFAULT)); \ 60 __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
61}) 61})
62 62
63#define __put_user(x, ptr) \ 63#define __put_user(x, ptr) \
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index bf148037d4e5..cd17945bf218 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -5,7 +5,7 @@
5 5
6typedef struct 6typedef struct
7{ 7{
8 volatile unsigned long counter; 8 volatile long counter;
9} local_t; 9} local_t;
10 10
11#define LOCAL_INIT(i) { (i) } 11#define LOCAL_INIT(i) { (i) }
@@ -13,7 +13,7 @@ typedef struct
13#define local_read(v) ((v)->counter) 13#define local_read(v) ((v)->counter)
14#define local_set(v,i) (((v)->counter) = (i)) 14#define local_set(v,i) (((v)->counter) = (i))
15 15
16static __inline__ void local_inc(local_t *v) 16static inline void local_inc(local_t *v)
17{ 17{
18 __asm__ __volatile__( 18 __asm__ __volatile__(
19 "incq %0" 19 "incq %0"
@@ -21,7 +21,7 @@ static __inline__ void local_inc(local_t *v)
21 :"m" (v->counter)); 21 :"m" (v->counter));
22} 22}
23 23
24static __inline__ void local_dec(local_t *v) 24static inline void local_dec(local_t *v)
25{ 25{
26 __asm__ __volatile__( 26 __asm__ __volatile__(
27 "decq %0" 27 "decq %0"
@@ -29,7 +29,7 @@ static __inline__ void local_dec(local_t *v)
29 :"m" (v->counter)); 29 :"m" (v->counter));
30} 30}
31 31
32static __inline__ void local_add(unsigned int i, local_t *v) 32static inline void local_add(long i, local_t *v)
33{ 33{
34 __asm__ __volatile__( 34 __asm__ __volatile__(
35 "addq %1,%0" 35 "addq %1,%0"
@@ -37,7 +37,7 @@ static __inline__ void local_add(unsigned int i, local_t *v)
37 :"ir" (i), "m" (v->counter)); 37 :"ir" (i), "m" (v->counter));
38} 38}
39 39
40static __inline__ void local_sub(unsigned int i, local_t *v) 40static inline void local_sub(long i, local_t *v)
41{ 41{
42 __asm__ __volatile__( 42 __asm__ __volatile__(
43 "subq %1,%0" 43 "subq %1,%0"
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index bb9e54322322..75e91f5b6a04 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -19,20 +19,25 @@ struct fb_info;
19struct backlight_properties { 19struct backlight_properties {
20 /* Owner module */ 20 /* Owner module */
21 struct module *owner; 21 struct module *owner;
22 /* Get the backlight power status (0: full on, 1..3: power saving 22
23 modes; 4: full off), see FB_BLANK_XXX */ 23 /* Notify the backlight driver some property has changed */
24 int (*get_power)(struct backlight_device *); 24 int (*update_status)(struct backlight_device *);
25 /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */ 25 /* Return the current backlight brightness (accounting for power,
26 int (*set_power)(struct backlight_device *, int power); 26 fb_blank etc.) */
27 /* Maximal value for brightness (read-only) */
28 int max_brightness;
29 /* Get current backlight brightness */
30 int (*get_brightness)(struct backlight_device *); 27 int (*get_brightness)(struct backlight_device *);
31 /* Set backlight brightness (0..max_brightness) */
32 int (*set_brightness)(struct backlight_device *, int brightness);
33 /* Check if given framebuffer device is the one bound to this backlight; 28 /* Check if given framebuffer device is the one bound to this backlight;
34 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ 29 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
35 int (*check_fb)(struct fb_info *); 30 int (*check_fb)(struct fb_info *);
31
32 /* Current User requested brightness (0 - max_brightness) */
33 int brightness;
34 /* Maximal value for brightness (read-only) */
35 int max_brightness;
36 /* Current FB Power mode (0: full on, 1..3: power saving
37 modes; 4: full off), see FB_BLANK_XXX */
38 int power;
39 /* FB Blanking active? (values as for power) */
40 int fb_blank;
36}; 41};
37 42
38struct backlight_device { 43struct backlight_device {
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d10bd30c337e..836325ee0931 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -275,6 +275,7 @@ extern void d_move(struct dentry *, struct dentry *);
275/* appendix may either be NULL or be used for transname suffixes */ 275/* appendix may either be NULL or be used for transname suffixes */
276extern struct dentry * d_lookup(struct dentry *, struct qstr *); 276extern struct dentry * d_lookup(struct dentry *, struct qstr *);
277extern struct dentry * __d_lookup(struct dentry *, struct qstr *); 277extern struct dentry * __d_lookup(struct dentry *, struct qstr *);
278extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
278 279
279/* validate "insecure" dentry pointer */ 280/* validate "insecure" dentry pointer */
280extern int d_validate(struct dentry *, struct dentry *); 281extern int d_validate(struct dentry *, struct dentry *);
diff --git a/include/linux/fadvise.h b/include/linux/fadvise.h
index b2913bba35d8..e8e747139b9a 100644
--- a/include/linux/fadvise.h
+++ b/include/linux/fadvise.h
@@ -18,10 +18,4 @@
18#define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */ 18#define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */
19#endif 19#endif
20 20
21/*
22 * Linux-specific fadvise() extensions:
23 */
24#define LINUX_FADV_ASYNC_WRITE 32 /* Start writeout on range */
25#define LINUX_FADV_WRITE_WAIT 33 /* Wait upon writeout to range */
26
27#endif /* FADVISE_H_INCLUDED */ 21#endif /* FADVISE_H_INCLUDED */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d03fadfcafe3..315d89740ddf 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -839,12 +839,10 @@ struct fb_info {
839#define FB_LEFT_POS(bpp) (32 - bpp) 839#define FB_LEFT_POS(bpp) (32 - bpp)
840#define FB_SHIFT_HIGH(val, bits) ((val) >> (bits)) 840#define FB_SHIFT_HIGH(val, bits) ((val) >> (bits))
841#define FB_SHIFT_LOW(val, bits) ((val) << (bits)) 841#define FB_SHIFT_LOW(val, bits) ((val) << (bits))
842#define FB_BIT_NR(b) (7 - (b))
843#else 842#else
844#define FB_LEFT_POS(bpp) (0) 843#define FB_LEFT_POS(bpp) (0)
845#define FB_SHIFT_HIGH(val, bits) ((val) << (bits)) 844#define FB_SHIFT_HIGH(val, bits) ((val) << (bits))
846#define FB_SHIFT_LOW(val, bits) ((val) >> (bits)) 845#define FB_SHIFT_LOW(val, bits) ((val) >> (bits))
847#define FB_BIT_NR(b) (b)
848#endif 846#endif
849 847
850 /* 848 /*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e5ce62e9676d..16be62041bfe 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -757,6 +757,13 @@ extern void send_sigio(struct fown_struct *fown, int fd, int band);
757extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); 757extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
758extern int fcntl_getlease(struct file *filp); 758extern int fcntl_getlease(struct file *filp);
759 759
760/* fs/sync.c */
761#define SYNC_FILE_RANGE_WAIT_BEFORE 1
762#define SYNC_FILE_RANGE_WRITE 2
763#define SYNC_FILE_RANGE_WAIT_AFTER 4
764extern int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte,
765 int flags);
766
760/* fs/locks.c */ 767/* fs/locks.c */
761extern void locks_init_lock(struct file_lock *); 768extern void locks_init_lock(struct file_lock *);
762extern void locks_copy_lock(struct file_lock *, struct file_lock *); 769extern void locks_copy_lock(struct file_lock *, struct file_lock *);
@@ -857,7 +864,7 @@ struct super_block {
857 */ 864 */
858 struct mutex s_vfs_rename_mutex; /* Kludge */ 865 struct mutex s_vfs_rename_mutex; /* Kludge */
859 866
860 /* Granuality of c/m/atime in ns. 867 /* Granularity of c/m/atime in ns.
861 Cannot be worse than a second */ 868 Cannot be worse than a second */
862 u32 s_time_gran; 869 u32 s_time_gran;
863}; 870};
@@ -1416,6 +1423,7 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *);
1416#endif 1423#endif
1417 1424
1418/* fs/char_dev.c */ 1425/* fs/char_dev.c */
1426#define CHRDEV_MAJOR_HASH_SIZE 255
1419extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); 1427extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
1420extern int register_chrdev_region(dev_t, unsigned, const char *); 1428extern int register_chrdev_region(dev_t, unsigned, const char *);
1421extern int register_chrdev(unsigned int, const char *, 1429extern int register_chrdev(unsigned int, const char *,
@@ -1423,25 +1431,17 @@ extern int register_chrdev(unsigned int, const char *,
1423extern int unregister_chrdev(unsigned int, const char *); 1431extern int unregister_chrdev(unsigned int, const char *);
1424extern void unregister_chrdev_region(dev_t, unsigned); 1432extern void unregister_chrdev_region(dev_t, unsigned);
1425extern int chrdev_open(struct inode *, struct file *); 1433extern int chrdev_open(struct inode *, struct file *);
1426extern int get_chrdev_list(char *); 1434extern void chrdev_show(struct seq_file *,off_t);
1427extern void *acquire_chrdev_list(void);
1428extern int count_chrdev_list(void);
1429extern void *get_next_chrdev(void *);
1430extern int get_chrdev_info(void *, int *, char **);
1431extern void release_chrdev_list(void *);
1432 1435
1433/* fs/block_dev.c */ 1436/* fs/block_dev.c */
1437#define BLKDEV_MAJOR_HASH_SIZE 255
1434#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1438#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1435extern const char *__bdevname(dev_t, char *buffer); 1439extern const char *__bdevname(dev_t, char *buffer);
1436extern const char *bdevname(struct block_device *bdev, char *buffer); 1440extern const char *bdevname(struct block_device *bdev, char *buffer);
1437extern struct block_device *lookup_bdev(const char *); 1441extern struct block_device *lookup_bdev(const char *);
1438extern struct block_device *open_bdev_excl(const char *, int, void *); 1442extern struct block_device *open_bdev_excl(const char *, int, void *);
1439extern void close_bdev_excl(struct block_device *); 1443extern void close_bdev_excl(struct block_device *);
1440extern void *acquire_blkdev_list(void); 1444extern void blkdev_show(struct seq_file *,off_t);
1441extern int count_blkdev_list(void);
1442extern void *get_next_blkdev(void *);
1443extern int get_blkdev_info(void *, int *, char **);
1444extern void release_blkdev_list(void *);
1445 1445
1446extern void init_special_inode(struct inode *, umode_t, dev_t); 1446extern void init_special_inode(struct inode *, umode_t, dev_t);
1447 1447
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 9c8e6da2393b..71e7b2847cb3 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -11,6 +11,7 @@
11 11
12#include <asm/io.h> 12#include <asm/io.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/mutex.h>
14#include <linux/device.h> 15#include <linux/device.h>
15#include <linux/timer.h> 16#include <linux/timer.h>
16 17
@@ -40,7 +41,7 @@ struct gameport {
40 struct gameport *parent, *child; 41 struct gameport *parent, *child;
41 42
42 struct gameport_driver *drv; 43 struct gameport_driver *drv;
43 struct semaphore drv_sem; /* protects serio->drv so attributes can pin driver */ 44 struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
44 45
45 struct device dev; 46 struct device dev;
46 unsigned int registered; /* port has been fully registered with driver core */ 47 unsigned int registered; /* port has been fully registered with driver core */
@@ -137,12 +138,12 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data)
137 */ 138 */
138static inline int gameport_pin_driver(struct gameport *gameport) 139static inline int gameport_pin_driver(struct gameport *gameport)
139{ 140{
140 return down_interruptible(&gameport->drv_sem); 141 return mutex_lock_interruptible(&gameport->drv_mutex);
141} 142}
142 143
143static inline void gameport_unpin_driver(struct gameport *gameport) 144static inline void gameport_unpin_driver(struct gameport *gameport)
144{ 145{
145 up(&gameport->drv_sem); 146 mutex_unlock(&gameport->drv_mutex);
146} 147}
147 148
148void __gameport_register_driver(struct gameport_driver *drv, struct module *owner); 149void __gameport_register_driver(struct gameport_driver *drv, struct module *owner);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 93830158348e..306acf1dc6d5 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -58,6 +58,19 @@ struct hrtimer {
58}; 58};
59 59
60/** 60/**
61 * struct hrtimer_sleeper - simple sleeper structure
62 *
63 * @timer: embedded timer structure
64 * @task: task to wake up
65 *
66 * task is set to NULL, when the timer expires.
67 */
68struct hrtimer_sleeper {
69 struct hrtimer timer;
70 struct task_struct *task;
71};
72
73/**
61 * struct hrtimer_base - the timer base for a specific clock 74 * struct hrtimer_base - the timer base for a specific clock
62 * 75 *
63 * @index: clock type index for per_cpu support when moving a timer 76 * @index: clock type index for per_cpu support when moving a timer
@@ -67,7 +80,7 @@ struct hrtimer {
67 * @first: pointer to the timer node which expires first 80 * @first: pointer to the timer node which expires first
68 * @resolution: the resolution of the clock, in nanoseconds 81 * @resolution: the resolution of the clock, in nanoseconds
69 * @get_time: function to retrieve the current time of the clock 82 * @get_time: function to retrieve the current time of the clock
70 * @get_sofirq_time: function to retrieve the current time from the softirq 83 * @get_softirq_time: function to retrieve the current time from the softirq
71 * @curr_timer: the timer which is executing a callback right now 84 * @curr_timer: the timer which is executing a callback right now
72 * @softirq_time: the time when running the hrtimer queue in the softirq 85 * @softirq_time: the time when running the hrtimer queue in the softirq
73 */ 86 */
@@ -127,6 +140,9 @@ extern long hrtimer_nanosleep(struct timespec *rqtp,
127 const enum hrtimer_mode mode, 140 const enum hrtimer_mode mode,
128 const clockid_t clockid); 141 const clockid_t clockid);
129 142
143extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
144 struct task_struct *tsk);
145
130/* Soft interrupt function to run the hrtimer queues: */ 146/* Soft interrupt function to run the hrtimer queues: */
131extern void hrtimer_run_queues(void); 147extern void hrtimer_run_queues(void);
132 148
diff --git a/include/linux/input.h b/include/linux/input.h
index 1d4e341b72e6..b0e612dda0cf 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -421,7 +421,7 @@ struct input_absinfo {
421#define BTN_GEAR_UP 0x151 421#define BTN_GEAR_UP 0x151
422 422
423#define KEY_OK 0x160 423#define KEY_OK 0x160
424#define KEY_SELECT 0x161 424#define KEY_SELECT 0x161
425#define KEY_GOTO 0x162 425#define KEY_GOTO 0x162
426#define KEY_CLEAR 0x163 426#define KEY_CLEAR 0x163
427#define KEY_POWER2 0x164 427#define KEY_POWER2 0x164
@@ -512,6 +512,15 @@ struct input_absinfo {
512#define KEY_FN_S 0x1e3 512#define KEY_FN_S 0x1e3
513#define KEY_FN_B 0x1e4 513#define KEY_FN_B 0x1e4
514 514
515#define KEY_BRL_DOT1 0x1f1
516#define KEY_BRL_DOT2 0x1f2
517#define KEY_BRL_DOT3 0x1f3
518#define KEY_BRL_DOT4 0x1f4
519#define KEY_BRL_DOT5 0x1f5
520#define KEY_BRL_DOT6 0x1f6
521#define KEY_BRL_DOT7 0x1f7
522#define KEY_BRL_DOT8 0x1f8
523
515/* We avoid low common keys in module aliases so they don't get huge. */ 524/* We avoid low common keys in module aliases so they don't get huge. */
516#define KEY_MIN_INTERESTING KEY_MUTE 525#define KEY_MIN_INTERESTING KEY_MUTE
517#define KEY_MAX 0x1ff 526#define KEY_MAX 0x1ff
@@ -929,7 +938,7 @@ struct input_dev {
929 938
930 struct input_handle *grab; 939 struct input_handle *grab;
931 940
932 struct semaphore sem; /* serializes open and close operations */ 941 struct mutex mutex; /* serializes open and close operations */
933 unsigned int users; 942 unsigned int users;
934 943
935 struct class_device cdev; 944 struct class_device cdev;
@@ -995,11 +1004,6 @@ static inline void init_input_dev(struct input_dev *dev)
995 1004
996struct input_dev *input_allocate_device(void); 1005struct input_dev *input_allocate_device(void);
997 1006
998static inline void input_free_device(struct input_dev *dev)
999{
1000 kfree(dev);
1001}
1002
1003static inline struct input_dev *input_get_device(struct input_dev *dev) 1007static inline struct input_dev *input_get_device(struct input_dev *dev)
1004{ 1008{
1005 return to_input_dev(class_device_get(&dev->cdev)); 1009 return to_input_dev(class_device_get(&dev->cdev));
@@ -1010,6 +1014,11 @@ static inline void input_put_device(struct input_dev *dev)
1010 class_device_put(&dev->cdev); 1014 class_device_put(&dev->cdev);
1011} 1015}
1012 1016
1017static inline void input_free_device(struct input_dev *dev)
1018{
1019 input_put_device(dev);
1020}
1021
1013int input_register_device(struct input_dev *); 1022int input_register_device(struct input_dev *);
1014void input_unregister_device(struct input_dev *); 1023void input_unregister_device(struct input_dev *);
1015 1024
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 53571288a9fc..6d9c7e4da472 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -82,6 +82,13 @@ struct ipmi_smi_handlers
82{ 82{
83 struct module *owner; 83 struct module *owner;
84 84
85 /* The low-level interface cannot start sending messages to
86 the upper layer until this function is called. This may
87 not be NULL, the lower layer must take the interface from
88 this call. */
89 int (*start_processing)(void *send_info,
90 ipmi_smi_t new_intf);
91
85 /* Called to enqueue an SMI message to be sent. This 92 /* Called to enqueue an SMI message to be sent. This
86 operation is not allowed to fail. If an error occurs, it 93 operation is not allowed to fail. If an error occurs, it
87 should report back the error in a received message. It may 94 should report back the error in a received message. It may
@@ -157,13 +164,16 @@ static inline void ipmi_demangle_device_id(unsigned char *data,
157} 164}
158 165
159/* Add a low-level interface to the IPMI driver. Note that if the 166/* Add a low-level interface to the IPMI driver. Note that if the
160 interface doesn't know its slave address, it should pass in zero. */ 167 interface doesn't know its slave address, it should pass in zero.
168 The low-level interface should not deliver any messages to the
169 upper layer until the start_processing() function in the handlers
170 is called, and the lower layer must get the interface from that
171 call. */
161int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 172int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
162 void *send_info, 173 void *send_info,
163 struct ipmi_device_id *device_id, 174 struct ipmi_device_id *device_id,
164 struct device *dev, 175 struct device *dev,
165 unsigned char slave_addr, 176 unsigned char slave_addr);
166 ipmi_smi_t *intf);
167 177
168/* 178/*
169 * Remove a low-level interface from the IPMI driver. This will 179 * Remove a low-level interface from the IPMI driver. This will
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index e87c32a5c86a..4eb851ece080 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -135,6 +135,8 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
135 135
136#define U(x) ((x) ^ 0xf000) 136#define U(x) ((x) ^ 0xf000)
137 137
138#define BRL_UC_ROW 0x2800
139
138/* keyboard.c */ 140/* keyboard.c */
139 141
140struct console; 142struct console;
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
index 08488042d74a..de76843bbe8a 100644
--- a/include/linux/keyboard.h
+++ b/include/linux/keyboard.h
@@ -44,6 +44,7 @@ extern unsigned short plain_map[NR_KEYS];
44#define KT_ASCII 9 44#define KT_ASCII 9
45#define KT_LOCK 10 45#define KT_LOCK 10
46#define KT_SLOCK 12 46#define KT_SLOCK 12
47#define KT_BRL 14
47 48
48#define K(t,v) (((t)<<8)|(v)) 49#define K(t,v) (((t)<<8)|(v))
49#define KTYP(x) ((x) >> 8) 50#define KTYP(x) ((x) >> 8)
@@ -427,5 +428,17 @@ extern unsigned short plain_map[NR_KEYS];
427 428
428#define NR_LOCK 8 429#define NR_LOCK 8
429 430
431#define K_BRL_BLANK K(KT_BRL, 0)
432#define K_BRL_DOT1 K(KT_BRL, 1)
433#define K_BRL_DOT2 K(KT_BRL, 2)
434#define K_BRL_DOT3 K(KT_BRL, 3)
435#define K_BRL_DOT4 K(KT_BRL, 4)
436#define K_BRL_DOT5 K(KT_BRL, 5)
437#define K_BRL_DOT6 K(KT_BRL, 6)
438#define K_BRL_DOT7 K(KT_BRL, 7)
439#define K_BRL_DOT8 K(KT_BRL, 8)
440
441#define NR_BRL 9
442
430#define MAX_DIACR 256 443#define MAX_DIACR 256
431#endif 444#endif
diff --git a/include/linux/leds.h b/include/linux/leds.h
new file mode 100644
index 000000000000..4617e75903b0
--- /dev/null
+++ b/include/linux/leds.h
@@ -0,0 +1,111 @@
1/*
2 * Driver model for leds and led triggers
3 *
4 * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
5 * Copyright (C) 2005 Richard Purdie <rpurdie@openedhand.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#ifndef __LINUX_LEDS_H_INCLUDED
13#define __LINUX_LEDS_H_INCLUDED
14
15struct device;
16struct class_device;
17/*
18 * LED Core
19 */
20
21enum led_brightness {
22 LED_OFF = 0,
23 LED_HALF = 127,
24 LED_FULL = 255,
25};
26
27struct led_classdev {
28 const char *name;
29 int brightness;
30 int flags;
31#define LED_SUSPENDED (1 << 0)
32
33 /* A function to set the brightness of the led */
34 void (*brightness_set)(struct led_classdev *led_cdev,
35 enum led_brightness brightness);
36
37 struct class_device *class_dev;
38 /* LED Device linked list */
39 struct list_head node;
40
41 /* Trigger data */
42 char *default_trigger;
43#ifdef CONFIG_LEDS_TRIGGERS
44 rwlock_t trigger_lock;
45 /* Protects the trigger data below */
46
47 struct led_trigger *trigger;
48 struct list_head trig_list;
49 void *trigger_data;
50#endif
51};
52
53extern int led_classdev_register(struct device *parent,
54 struct led_classdev *led_cdev);
55extern void led_classdev_unregister(struct led_classdev *led_cdev);
56extern void led_classdev_suspend(struct led_classdev *led_cdev);
57extern void led_classdev_resume(struct led_classdev *led_cdev);
58
59/*
60 * LED Triggers
61 */
62#ifdef CONFIG_LEDS_TRIGGERS
63
64#define TRIG_NAME_MAX 50
65
66struct led_trigger {
67 /* Trigger Properties */
68 const char *name;
69 void (*activate)(struct led_classdev *led_cdev);
70 void (*deactivate)(struct led_classdev *led_cdev);
71
72 /* LEDs under control by this trigger (for simple triggers) */
73 rwlock_t leddev_list_lock;
74 struct list_head led_cdevs;
75
76 /* Link to next registered trigger */
77 struct list_head next_trig;
78};
79
80/* Registration functions for complex triggers */
81extern int led_trigger_register(struct led_trigger *trigger);
82extern void led_trigger_unregister(struct led_trigger *trigger);
83
84/* Registration functions for simple triggers */
85#define DEFINE_LED_TRIGGER(x) static struct led_trigger *x;
86#define DEFINE_LED_TRIGGER_GLOBAL(x) struct led_trigger *x;
87extern void led_trigger_register_simple(const char *name,
88 struct led_trigger **trigger);
89extern void led_trigger_unregister_simple(struct led_trigger *trigger);
90extern void led_trigger_event(struct led_trigger *trigger,
91 enum led_brightness event);
92
93#else
94
95/* Triggers aren't active - null macros */
96#define DEFINE_LED_TRIGGER(x)
97#define DEFINE_LED_TRIGGER_GLOBAL(x)
98#define led_trigger_register_simple(x, y) do {} while(0)
99#define led_trigger_unregister_simple(x) do {} while(0)
100#define led_trigger_event(x, y) do {} while(0)
101
102#endif
103
104/* Trigger specific functions */
105#ifdef CONFIG_LEDS_TRIGGER_IDE_DISK
106extern void ledtrig_ide_activity(void);
107#else
108#define ledtrig_ide_activity() do {} while(0)
109#endif
110
111#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index a710bddda4eb..08a450a9dbf7 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -28,7 +28,7 @@ struct ps2dev {
28 struct serio *serio; 28 struct serio *serio;
29 29
30 /* Ensures that only one command is executing at a time */ 30 /* Ensures that only one command is executing at a time */
31 struct semaphore cmd_sem; 31 struct mutex cmd_mutex;
32 32
33 /* Used to signal completion from interrupt handler */ 33 /* Used to signal completion from interrupt handler */
34 wait_queue_head_t wait; 34 wait_queue_head_t wait;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 7d09962c3c0b..ff0a64073ebc 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,7 @@ extern void migrate_page_copy(struct page *, struct page *);
12extern int migrate_page_remove_references(struct page *, struct page *, int); 12extern int migrate_page_remove_references(struct page *, struct page *, int);
13extern int migrate_pages(struct list_head *l, struct list_head *t, 13extern int migrate_pages(struct list_head *l, struct list_head *t,
14 struct list_head *moved, struct list_head *failed); 14 struct list_head *moved, struct list_head *failed);
15int migrate_pages_to(struct list_head *pagelist, 15extern int migrate_pages_to(struct list_head *pagelist,
16 struct vm_area_struct *vma, int dest); 16 struct vm_area_struct *vma, int dest);
17extern int fail_migrate_page(struct page *, struct page *); 17extern int fail_migrate_page(struct page *, struct page *);
18 18
@@ -26,6 +26,9 @@ static inline int putback_lru_pages(struct list_head *l) { return 0; }
26static inline int migrate_pages(struct list_head *l, struct list_head *t, 26static inline int migrate_pages(struct list_head *l, struct list_head *t,
27 struct list_head *moved, struct list_head *failed) { return -ENOSYS; } 27 struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
28 28
29static inline int migrate_pages_to(struct list_head *pagelist,
30 struct vm_area_struct *vma, int dest) { return 0; }
31
29static inline int migrate_prep(void) { return -ENOSYS; } 32static inline int migrate_prep(void) { return -ENOSYS; }
30 33
31/* Possible settings for the migrate_page() method in address_operations */ 34/* Possible settings for the migrate_page() method in address_operations */
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index f46afec6fbf8..72fc68c5ee96 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -10,7 +10,7 @@
10#ifndef __MTD_TRANS_H__ 10#ifndef __MTD_TRANS_H__
11#define __MTD_TRANS_H__ 11#define __MTD_TRANS_H__
12 12
13#include <asm/semaphore.h> 13#include <linux/mutex.h>
14 14
15struct hd_geometry; 15struct hd_geometry;
16struct mtd_info; 16struct mtd_info;
@@ -22,7 +22,7 @@ struct mtd_blktrans_dev {
22 struct mtd_blktrans_ops *tr; 22 struct mtd_blktrans_ops *tr;
23 struct list_head list; 23 struct list_head list;
24 struct mtd_info *mtd; 24 struct mtd_info *mtd;
25 struct semaphore sem; 25 struct mutex lock;
26 int devnum; 26 int devnum;
27 int blksize; 27 int blksize;
28 unsigned long size; 28 unsigned long size;
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
index 386a52cf8b1b..9addd073bf15 100644
--- a/include/linux/mtd/doc2000.h
+++ b/include/linux/mtd/doc2000.h
@@ -15,7 +15,7 @@
15#define __MTD_DOC2000_H__ 15#define __MTD_DOC2000_H__
16 16
17#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
18#include <asm/semaphore.h> 18#include <linux/mutex.h>
19 19
20#define DoC_Sig1 0 20#define DoC_Sig1 0
21#define DoC_Sig2 1 21#define DoC_Sig2 1
@@ -187,7 +187,7 @@ struct DiskOnChip {
187 int numchips; 187 int numchips;
188 struct Nand *chips; 188 struct Nand *chips;
189 struct mtd_info *nextdoc; 189 struct mtd_info *nextdoc;
190 struct semaphore lock; 190 struct mutex lock;
191}; 191};
192 192
193int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]); 193int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]);
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 0268125a6271..d7eaa40e5ab0 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -52,6 +52,11 @@ struct INFTLrecord {
52int INFTL_mount(struct INFTLrecord *s); 52int INFTL_mount(struct INFTLrecord *s);
53int INFTL_formatblock(struct INFTLrecord *s, int block); 53int INFTL_formatblock(struct INFTLrecord *s, int block);
54 54
55extern char inftlmountrev[];
56
57void INFTL_dumptables(struct INFTLrecord *s);
58void INFTL_dumpVUchains(struct INFTLrecord *s);
59
55#endif /* __KERNEL__ */ 60#endif /* __KERNEL__ */
56 61
57#endif /* __MTD_INFTL_H__ */ 62#endif /* __MTD_INFTL_H__ */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index e6698013e4d0..58cb3d3d44b4 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -75,7 +75,6 @@ extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
75extern void release_open_intent(struct nameidata *); 75extern void release_open_intent(struct nameidata *);
76 76
77extern struct dentry * lookup_one_len(const char *, struct dentry *, int); 77extern struct dentry * lookup_one_len(const char *, struct dentry *, int);
78extern __deprecated_for_modules struct dentry * lookup_hash(struct nameidata *);
79 78
80extern int follow_down(struct vfsmount **, struct dentry **); 79extern int follow_down(struct vfsmount **, struct dentry **);
81extern int follow_up(struct vfsmount **, struct dentry **); 80extern int follow_up(struct vfsmount **, struct dentry **);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 950dc55e5192..40ccf8cc4239 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -598,20 +598,7 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
598 598
599#define HAVE_NETIF_QUEUE 599#define HAVE_NETIF_QUEUE
600 600
601static inline void __netif_schedule(struct net_device *dev) 601extern void __netif_schedule(struct net_device *dev);
602{
603 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
604 unsigned long flags;
605 struct softnet_data *sd;
606
607 local_irq_save(flags);
608 sd = &__get_cpu_var(softnet_data);
609 dev->next_sched = sd->output_queue;
610 sd->output_queue = dev;
611 raise_softirq_irqoff(NET_TX_SOFTIRQ);
612 local_irq_restore(flags);
613 }
614}
615 602
616static inline void netif_schedule(struct net_device *dev) 603static inline void netif_schedule(struct net_device *dev)
617{ 604{
@@ -675,13 +662,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
675/* Use this variant in places where it could be invoked 662/* Use this variant in places where it could be invoked
676 * either from interrupt or non-interrupt context. 663 * either from interrupt or non-interrupt context.
677 */ 664 */
678static inline void dev_kfree_skb_any(struct sk_buff *skb) 665extern void dev_kfree_skb_any(struct sk_buff *skb);
679{
680 if (in_irq() || irqs_disabled())
681 dev_kfree_skb_irq(skb);
682 else
683 dev_kfree_skb(skb);
684}
685 666
686#define HAVE_NETIF_RX 1 667#define HAVE_NETIF_RX 1
687extern int netif_rx(struct sk_buff *skb); 668extern int netif_rx(struct sk_buff *skb);
@@ -768,22 +749,9 @@ static inline int netif_device_present(struct net_device *dev)
768 return test_bit(__LINK_STATE_PRESENT, &dev->state); 749 return test_bit(__LINK_STATE_PRESENT, &dev->state);
769} 750}
770 751
771static inline void netif_device_detach(struct net_device *dev) 752extern void netif_device_detach(struct net_device *dev);
772{
773 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
774 netif_running(dev)) {
775 netif_stop_queue(dev);
776 }
777}
778 753
779static inline void netif_device_attach(struct net_device *dev) 754extern void netif_device_attach(struct net_device *dev);
780{
781 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
782 netif_running(dev)) {
783 netif_wake_queue(dev);
784 __netdev_watchdog_up(dev);
785 }
786}
787 755
788/* 756/*
789 * Network interface message level settings 757 * Network interface message level settings
@@ -851,20 +819,7 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
851 * already been called and returned 1. 819 * already been called and returned 1.
852 */ 820 */
853 821
854static inline void __netif_rx_schedule(struct net_device *dev) 822extern void __netif_rx_schedule(struct net_device *dev);
855{
856 unsigned long flags;
857
858 local_irq_save(flags);
859 dev_hold(dev);
860 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
861 if (dev->quota < 0)
862 dev->quota += dev->weight;
863 else
864 dev->quota = dev->weight;
865 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
866 local_irq_restore(flags);
867}
868 823
869/* Try to reschedule poll. Called by irq handler. */ 824/* Try to reschedule poll. Called by irq handler. */
870 825
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 1350e47b0234..f6bdef82a322 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -142,6 +142,12 @@ struct xt_counters_info
142#define ASSERT_WRITE_LOCK(x) 142#define ASSERT_WRITE_LOCK(x)
143#include <linux/netfilter_ipv4/listhelp.h> 143#include <linux/netfilter_ipv4/listhelp.h>
144 144
145#ifdef CONFIG_COMPAT
146#define COMPAT_TO_USER 1
147#define COMPAT_FROM_USER -1
148#define COMPAT_CALC_SIZE 0
149#endif
150
145struct xt_match 151struct xt_match
146{ 152{
147 struct list_head list; 153 struct list_head list;
@@ -175,6 +181,9 @@ struct xt_match
175 void (*destroy)(const struct xt_match *match, void *matchinfo, 181 void (*destroy)(const struct xt_match *match, void *matchinfo,
176 unsigned int matchinfosize); 182 unsigned int matchinfosize);
177 183
184 /* Called when userspace align differs from kernel space one */
185 int (*compat)(void *match, void **dstptr, int *size, int convert);
186
178 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 187 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
179 struct module *me; 188 struct module *me;
180 189
@@ -220,6 +229,9 @@ struct xt_target
220 void (*destroy)(const struct xt_target *target, void *targinfo, 229 void (*destroy)(const struct xt_target *target, void *targinfo,
221 unsigned int targinfosize); 230 unsigned int targinfosize);
222 231
232 /* Called when userspace align differs from kernel space one */
233 int (*compat)(void *target, void **dstptr, int *size, int convert);
234
223 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 235 /* Set this to THIS_MODULE if you are a module, otherwise NULL */
224 struct module *me; 236 struct module *me;
225 237
@@ -314,6 +326,61 @@ extern void xt_proto_fini(int af);
314extern struct xt_table_info *xt_alloc_table_info(unsigned int size); 326extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
315extern void xt_free_table_info(struct xt_table_info *info); 327extern void xt_free_table_info(struct xt_table_info *info);
316 328
329#ifdef CONFIG_COMPAT
330#include <net/compat.h>
331
332struct compat_xt_entry_match
333{
334 union {
335 struct {
336 u_int16_t match_size;
337 char name[XT_FUNCTION_MAXNAMELEN - 1];
338 u_int8_t revision;
339 } user;
340 u_int16_t match_size;
341 } u;
342 unsigned char data[0];
343};
344
345struct compat_xt_entry_target
346{
347 union {
348 struct {
349 u_int16_t target_size;
350 char name[XT_FUNCTION_MAXNAMELEN - 1];
351 u_int8_t revision;
352 } user;
353 u_int16_t target_size;
354 } u;
355 unsigned char data[0];
356};
357
358/* FIXME: this works only on 32 bit tasks
359 * need to change whole approach in order to calculate align as function of
360 * current task alignment */
361
362struct compat_xt_counters
363{
364 u_int32_t cnt[4];
365};
366
367struct compat_xt_counters_info
368{
369 char name[XT_TABLE_MAXNAMELEN];
370 compat_uint_t num_counters;
371 struct compat_xt_counters counters[0];
372};
373
374#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
375 & ~(__alignof__(struct compat_xt_counters)-1))
376
377extern void xt_compat_lock(int af);
378extern void xt_compat_unlock(int af);
379extern int xt_compat_match(void *match, void **dstptr, int *size, int convert);
380extern int xt_compat_target(void *target, void **dstptr, int *size,
381 int convert);
382
383#endif /* CONFIG_COMPAT */
317#endif /* __KERNEL__ */ 384#endif /* __KERNEL__ */
318 385
319#endif /* _X_TABLES_H */ 386#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter/xt_esp.h b/include/linux/netfilter/xt_esp.h
new file mode 100644
index 000000000000..9380fb1c27da
--- /dev/null
+++ b/include/linux/netfilter/xt_esp.h
@@ -0,0 +1,14 @@
1#ifndef _XT_ESP_H
2#define _XT_ESP_H
3
4struct xt_esp
5{
6 u_int32_t spis[2]; /* Security Parameter Index */
7 u_int8_t invflags; /* Inverse flags */
8};
9
10/* Values for "invflags" field in struct xt_esp. */
11#define XT_ESP_INV_SPI 0x01 /* Invert the sense of spi. */
12#define XT_ESP_INV_MASK 0x01 /* All possible flags. */
13
14#endif /*_XT_ESP_H*/
diff --git a/include/linux/netfilter/xt_multiport.h b/include/linux/netfilter/xt_multiport.h
new file mode 100644
index 000000000000..d49ee4183710
--- /dev/null
+++ b/include/linux/netfilter/xt_multiport.h
@@ -0,0 +1,30 @@
1#ifndef _XT_MULTIPORT_H
2#define _XT_MULTIPORT_H
3
4enum xt_multiport_flags
5{
6 XT_MULTIPORT_SOURCE,
7 XT_MULTIPORT_DESTINATION,
8 XT_MULTIPORT_EITHER
9};
10
11#define XT_MULTI_PORTS 15
12
13/* Must fit inside union xt_matchinfo: 16 bytes */
14struct xt_multiport
15{
16 u_int8_t flags; /* Type of comparison */
17 u_int8_t count; /* Number of ports */
18 u_int16_t ports[XT_MULTI_PORTS]; /* Ports */
19};
20
21struct xt_multiport_v1
22{
23 u_int8_t flags; /* Type of comparison */
24 u_int8_t count; /* Number of ports */
25 u_int16_t ports[XT_MULTI_PORTS]; /* Ports */
26 u_int8_t pflags[XT_MULTI_PORTS]; /* Port flags */
27 u_int8_t invert; /* Invert flag */
28};
29
30#endif /*_XT_MULTIPORT_H*/
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index d5b8c0d6a12b..c0dac16e1902 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -316,5 +316,23 @@ extern unsigned int ipt_do_table(struct sk_buff **pskb,
316 void *userdata); 316 void *userdata);
317 317
318#define IPT_ALIGN(s) XT_ALIGN(s) 318#define IPT_ALIGN(s) XT_ALIGN(s)
319
320#ifdef CONFIG_COMPAT
321#include <net/compat.h>
322
323struct compat_ipt_entry
324{
325 struct ipt_ip ip;
326 compat_uint_t nfcache;
327 u_int16_t target_offset;
328 u_int16_t next_offset;
329 compat_uint_t comefrom;
330 struct compat_xt_counters counters;
331 unsigned char elems[0];
332};
333
334#define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s)
335
336#endif /* CONFIG_COMPAT */
319#endif /*__KERNEL__*/ 337#endif /*__KERNEL__*/
320#endif /* _IPTABLES_H */ 338#endif /* _IPTABLES_H */
diff --git a/include/linux/netfilter_ipv4/ipt_esp.h b/include/linux/netfilter_ipv4/ipt_esp.h
index c782a83e53e0..78296e7eeff9 100644
--- a/include/linux/netfilter_ipv4/ipt_esp.h
+++ b/include/linux/netfilter_ipv4/ipt_esp.h
@@ -1,16 +1,10 @@
1#ifndef _IPT_ESP_H 1#ifndef _IPT_ESP_H
2#define _IPT_ESP_H 2#define _IPT_ESP_H
3 3
4struct ipt_esp 4#include <linux/netfilter/xt_esp.h>
5{
6 u_int32_t spis[2]; /* Security Parameter Index */
7 u_int8_t invflags; /* Inverse flags */
8};
9 5
10 6#define ipt_esp xt_esp
11 7#define IPT_ESP_INV_SPI XT_ESP_INV_SPI
12/* Values for "invflags" field in struct ipt_esp. */ 8#define IPT_ESP_INV_MASK XT_ESP_INV_MASK
13#define IPT_ESP_INV_SPI 0x01 /* Invert the sense of spi. */
14#define IPT_ESP_INV_MASK 0x01 /* All possible flags. */
15 9
16#endif /*_IPT_ESP_H*/ 10#endif /*_IPT_ESP_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_multiport.h b/include/linux/netfilter_ipv4/ipt_multiport.h
index e6b6fff811df..55fe85eca88c 100644
--- a/include/linux/netfilter_ipv4/ipt_multiport.h
+++ b/include/linux/netfilter_ipv4/ipt_multiport.h
@@ -1,30 +1,15 @@
1#ifndef _IPT_MULTIPORT_H 1#ifndef _IPT_MULTIPORT_H
2#define _IPT_MULTIPORT_H 2#define _IPT_MULTIPORT_H
3#include <linux/netfilter_ipv4/ip_tables.h>
4 3
5enum ipt_multiport_flags 4#include <linux/netfilter/xt_multiport.h>
6{
7 IPT_MULTIPORT_SOURCE,
8 IPT_MULTIPORT_DESTINATION,
9 IPT_MULTIPORT_EITHER
10};
11 5
12#define IPT_MULTI_PORTS 15 6#define IPT_MULTIPORT_SOURCE XT_MULTIPORT_SOURCE
7#define IPT_MULTIPORT_DESTINATION XT_MULTIPORT_DESTINATION
8#define IPT_MULTIPORT_EITHER XT_MULTIPORT_EITHER
13 9
14/* Must fit inside union ipt_matchinfo: 16 bytes */ 10#define IPT_MULTI_PORTS XT_MULTI_PORTS
15struct ipt_multiport 11
16{ 12#define ipt_multiport xt_multiport
17 u_int8_t flags; /* Type of comparison */ 13#define ipt_multiport_v1 xt_multiport_v1
18 u_int8_t count; /* Number of ports */
19 u_int16_t ports[IPT_MULTI_PORTS]; /* Ports */
20};
21 14
22struct ipt_multiport_v1
23{
24 u_int8_t flags; /* Type of comparison */
25 u_int8_t count; /* Number of ports */
26 u_int16_t ports[IPT_MULTI_PORTS]; /* Ports */
27 u_int8_t pflags[IPT_MULTI_PORTS]; /* Port flags */
28 u_int8_t invert; /* Invert flag */
29};
30#endif /*_IPT_MULTIPORT_H*/ 15#endif /*_IPT_MULTIPORT_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_esp.h b/include/linux/netfilter_ipv6/ip6t_esp.h
index a91b6abc8079..f62eaf53c16c 100644
--- a/include/linux/netfilter_ipv6/ip6t_esp.h
+++ b/include/linux/netfilter_ipv6/ip6t_esp.h
@@ -1,14 +1,10 @@
1#ifndef _IP6T_ESP_H 1#ifndef _IP6T_ESP_H
2#define _IP6T_ESP_H 2#define _IP6T_ESP_H
3 3
4struct ip6t_esp 4#include <linux/netfilter/xt_esp.h>
5{
6 u_int32_t spis[2]; /* Security Parameter Index */
7 u_int8_t invflags; /* Inverse flags */
8};
9 5
10/* Values for "invflags" field in struct ip6t_esp. */ 6#define ip6t_esp xt_esp
11#define IP6T_ESP_INV_SPI 0x01 /* Invert the sense of spi. */ 7#define IP6T_ESP_INV_SPI XT_ESP_INV_SPI
12#define IP6T_ESP_INV_MASK 0x01 /* All possible flags. */ 8#define IP6T_ESP_INV_MASK XT_ESP_INV_MASK
13 9
14#endif /*_IP6T_ESP_H*/ 10#endif /*_IP6T_ESP_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_multiport.h b/include/linux/netfilter_ipv6/ip6t_multiport.h
index efe4954a8681..042c92661cee 100644
--- a/include/linux/netfilter_ipv6/ip6t_multiport.h
+++ b/include/linux/netfilter_ipv6/ip6t_multiport.h
@@ -1,21 +1,14 @@
1#ifndef _IP6T_MULTIPORT_H 1#ifndef _IP6T_MULTIPORT_H
2#define _IP6T_MULTIPORT_H 2#define _IP6T_MULTIPORT_H
3#include <linux/netfilter_ipv6/ip6_tables.h>
4 3
5enum ip6t_multiport_flags 4#include <linux/netfilter/xt_multiport.h>
6{
7 IP6T_MULTIPORT_SOURCE,
8 IP6T_MULTIPORT_DESTINATION,
9 IP6T_MULTIPORT_EITHER
10};
11 5
12#define IP6T_MULTI_PORTS 15 6#define IP6T_MULTIPORT_SOURCE XT_MULTIPORT_SOURCE
7#define IP6T_MULTIPORT_DESTINATION XT_MULTIPORT_DESTINATION
8#define IP6T_MULTIPORT_EITHER XT_MULTIPORT_EITHER
13 9
14/* Must fit inside union ip6t_matchinfo: 16 bytes */ 10#define IP6T_MULTI_PORTS XT_MULTI_PORTS
15struct ip6t_multiport 11
16{ 12#define ip6t_multiport xt_multiport
17 u_int8_t flags; /* Type of comparison */ 13
18 u_int8_t count; /* Number of ports */ 14#endif /*_IP6T_MULTIPORT_H*/
19 u_int16_t ports[IP6T_MULTI_PORTS]; /* Ports */
20};
21#endif /*_IPT_MULTIPORT_H*/
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 839f0b3c23aa..9539efd4f7e6 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -72,8 +72,8 @@ extern struct page * find_get_page(struct address_space *mapping,
72 unsigned long index); 72 unsigned long index);
73extern struct page * find_lock_page(struct address_space *mapping, 73extern struct page * find_lock_page(struct address_space *mapping,
74 unsigned long index); 74 unsigned long index);
75extern struct page * find_trylock_page(struct address_space *mapping, 75extern __deprecated_for_modules struct page * find_trylock_page(
76 unsigned long index); 76 struct address_space *mapping, unsigned long index);
77extern struct page * find_or_create_page(struct address_space *mapping, 77extern struct page * find_or_create_page(struct address_space *mapping,
78 unsigned long index, gfp_t gfp_mask); 78 unsigned long index, gfp_t gfp_mask);
79unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 79unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 5b9082cc600f..29960b03bef7 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_PID_H 1#ifndef _LINUX_PID_H
2#define _LINUX_PID_H 2#define _LINUX_PID_H
3 3
4#include <linux/rcupdate.h>
5
4enum pid_type 6enum pid_type
5{ 7{
6 PIDTYPE_PID, 8 PIDTYPE_PID,
@@ -9,45 +11,109 @@ enum pid_type
9 PIDTYPE_MAX 11 PIDTYPE_MAX
10}; 12};
11 13
14/*
15 * What is struct pid?
16 *
17 * A struct pid is the kernel's internal notion of a process identifier.
18 * It refers to individual tasks, process groups, and sessions. While
19 * there are processes attached to it the struct pid lives in a hash
20 * table, so it and then the processes that it refers to can be found
21 * quickly from the numeric pid value. The attached processes may be
22 * quickly accessed by following pointers from struct pid.
23 *
24 * Storing pid_t values in the kernel and refering to them later has a
25 * problem. The process originally with that pid may have exited and the
26 * pid allocator wrapped, and another process could have come along
27 * and been assigned that pid.
28 *
29 * Referring to user space processes by holding a reference to struct
30 * task_struct has a problem. When the user space process exits
31 * the now useless task_struct is still kept. A task_struct plus a
32 * stack consumes around 10K of low kernel memory. More precisely
33 * this is THREAD_SIZE + sizeof(struct task_struct). By comparison
34 * a struct pid is about 64 bytes.
35 *
36 * Holding a reference to struct pid solves both of these problems.
37 * It is small so holding a reference does not consume a lot of
38 * resources, and since a new struct pid is allocated when the numeric
39 * pid value is reused we don't mistakenly refer to new processes.
40 */
41
12struct pid 42struct pid
13{ 43{
44 atomic_t count;
14 /* Try to keep pid_chain in the same cacheline as nr for find_pid */ 45 /* Try to keep pid_chain in the same cacheline as nr for find_pid */
15 int nr; 46 int nr;
16 struct hlist_node pid_chain; 47 struct hlist_node pid_chain;
17 /* list of pids with the same nr, only one of them is in the hash */ 48 /* lists of tasks that use this pid */
18 struct list_head pid_list; 49 struct hlist_head tasks[PIDTYPE_MAX];
50 struct rcu_head rcu;
19}; 51};
20 52
21#define pid_task(elem, type) \ 53struct pid_link
22 list_entry(elem, struct task_struct, pids[type].pid_list) 54{
55 struct hlist_node node;
56 struct pid *pid;
57};
58
59static inline struct pid *get_pid(struct pid *pid)
60{
61 if (pid)
62 atomic_inc(&pid->count);
63 return pid;
64}
65
66extern void FASTCALL(put_pid(struct pid *pid));
67extern struct task_struct *FASTCALL(pid_task(struct pid *pid, enum pid_type));
68extern struct task_struct *FASTCALL(get_pid_task(struct pid *pid,
69 enum pid_type));
23 70
24/* 71/*
25 * attach_pid() and detach_pid() must be called with the tasklist_lock 72 * attach_pid() and detach_pid() must be called with the tasklist_lock
26 * write-held. 73 * write-held.
27 */ 74 */
28extern int FASTCALL(attach_pid(struct task_struct *task, enum pid_type type, int nr)); 75extern int FASTCALL(attach_pid(struct task_struct *task,
76 enum pid_type type, int nr));
29 77
30extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type)); 78extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type));
31 79
32/* 80/*
33 * look up a PID in the hash table. Must be called with the tasklist_lock 81 * look up a PID in the hash table. Must be called with the tasklist_lock
34 * held. 82 * or rcu_read_lock() held.
83 */
84extern struct pid *FASTCALL(find_pid(int nr));
85
86/*
87 * Lookup a PID in the hash table, and return with it's count elevated.
35 */ 88 */
36extern struct pid *FASTCALL(find_pid(enum pid_type, int)); 89extern struct pid *find_get_pid(int nr);
37 90
38extern int alloc_pidmap(void); 91extern struct pid *alloc_pid(void);
39extern void FASTCALL(free_pidmap(int)); 92extern void FASTCALL(free_pid(struct pid *pid));
40 93
94#define pid_next(task, type) \
95 ((task)->pids[(type)].node.next)
96
97#define pid_next_task(task, type) \
98 hlist_entry(pid_next(task, type), struct task_struct, \
99 pids[(type)].node)
100
101
102/* We could use hlist_for_each_entry_rcu here but it takes more arguments
103 * than the do_each_task_pid/while_each_task_pid. So we roll our own
104 * to preserve the existing interface.
105 */
41#define do_each_task_pid(who, type, task) \ 106#define do_each_task_pid(who, type, task) \
42 if ((task = find_task_by_pid_type(type, who))) { \ 107 if ((task = find_task_by_pid_type(type, who))) { \
43 prefetch((task)->pids[type].pid_list.next); \ 108 prefetch(pid_next(task, type)); \
44 do { 109 do {
45 110
46#define while_each_task_pid(who, type, task) \ 111#define while_each_task_pid(who, type, task) \
47 } while (task = pid_task((task)->pids[type].pid_list.next,\ 112 } while (pid_next(task, type) && ({ \
48 type), \ 113 task = pid_next_task(task, type); \
49 prefetch((task)->pids[type].pid_list.next), \ 114 rcu_dereference(task); \
50 hlist_unhashed(&(task)->pids[type].pid_chain)); \ 115 prefetch(pid_next(task, type)); \
51 } \ 116 1; }) ); \
117 }
52 118
53#endif /* _LINUX_PID_H */ 119#endif /* _LINUX_PID_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 75c7f55023ab..ec384958d509 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -5,11 +5,14 @@
5 5
6#define PIPE_BUFFERS (16) 6#define PIPE_BUFFERS (16)
7 7
8#define PIPE_BUF_FLAG_STOLEN 0x01
9#define PIPE_BUF_FLAG_LRU 0x02
10
8struct pipe_buffer { 11struct pipe_buffer {
9 struct page *page; 12 struct page *page;
10 unsigned int offset, len; 13 unsigned int offset, len;
11 struct pipe_buf_operations *ops; 14 struct pipe_buf_operations *ops;
12 unsigned int stolen; 15 unsigned int flags;
13}; 16};
14 17
15struct pipe_buf_operations { 18struct pipe_buf_operations {
@@ -60,5 +63,9 @@ void free_pipe_info(struct inode* inode);
60 * add the splice flags here. 63 * add the splice flags here.
61 */ 64 */
62#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ 65#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
66#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
67 /* we may still block on the fd we splice */
68 /* from/to, of course */
69#define SPLICE_F_MORE (0x04) /* expect more data */
63 70
64#endif 71#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d04186d8cc68..541f4828f5e7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -100,6 +100,7 @@ DECLARE_PER_CPU(unsigned long, process_counts);
100extern int nr_processes(void); 100extern int nr_processes(void);
101extern unsigned long nr_running(void); 101extern unsigned long nr_running(void);
102extern unsigned long nr_uninterruptible(void); 102extern unsigned long nr_uninterruptible(void);
103extern unsigned long nr_active(void);
103extern unsigned long nr_iowait(void); 104extern unsigned long nr_iowait(void);
104 105
105#include <linux/time.h> 106#include <linux/time.h>
@@ -483,6 +484,7 @@ struct signal_struct {
483#define MAX_PRIO (MAX_RT_PRIO + 40) 484#define MAX_PRIO (MAX_RT_PRIO + 40)
484 485
485#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) 486#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
487#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
486 488
487/* 489/*
488 * Some day this will be a full-fledged user tracking system.. 490 * Some day this will be a full-fledged user tracking system..
@@ -683,6 +685,13 @@ static inline void prefetch_stack(struct task_struct *t) { }
683struct audit_context; /* See audit.c */ 685struct audit_context; /* See audit.c */
684struct mempolicy; 686struct mempolicy;
685 687
688enum sleep_type {
689 SLEEP_NORMAL,
690 SLEEP_NONINTERACTIVE,
691 SLEEP_INTERACTIVE,
692 SLEEP_INTERRUPTED,
693};
694
686struct task_struct { 695struct task_struct {
687 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 696 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
688 struct thread_info *thread_info; 697 struct thread_info *thread_info;
@@ -705,7 +714,7 @@ struct task_struct {
705 unsigned long sleep_avg; 714 unsigned long sleep_avg;
706 unsigned long long timestamp, last_ran; 715 unsigned long long timestamp, last_ran;
707 unsigned long long sched_time; /* sched_clock time spent running */ 716 unsigned long long sched_time; /* sched_clock time spent running */
708 int activated; 717 enum sleep_type sleep_type;
709 718
710 unsigned long policy; 719 unsigned long policy;
711 cpumask_t cpus_allowed; 720 cpumask_t cpus_allowed;
@@ -751,7 +760,7 @@ struct task_struct {
751 struct task_struct *group_leader; /* threadgroup leader */ 760 struct task_struct *group_leader; /* threadgroup leader */
752 761
753 /* PID/PID hash table linkage. */ 762 /* PID/PID hash table linkage. */
754 struct pid pids[PIDTYPE_MAX]; 763 struct pid_link pids[PIDTYPE_MAX];
755 struct list_head thread_group; 764 struct list_head thread_group;
756 765
757 struct completion *vfork_done; /* for vfork() */ 766 struct completion *vfork_done; /* for vfork() */
@@ -890,18 +899,19 @@ static inline pid_t process_group(struct task_struct *tsk)
890 */ 899 */
891static inline int pid_alive(struct task_struct *p) 900static inline int pid_alive(struct task_struct *p)
892{ 901{
893 return p->pids[PIDTYPE_PID].nr != 0; 902 return p->pids[PIDTYPE_PID].pid != NULL;
894} 903}
895 904
896extern void free_task(struct task_struct *tsk); 905extern void free_task(struct task_struct *tsk);
897#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 906#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
898 907
899extern void __put_task_struct_cb(struct rcu_head *rhp); 908extern void __put_task_struct_cb(struct rcu_head *rhp);
909extern void __put_task_struct(struct task_struct *t);
900 910
901static inline void put_task_struct(struct task_struct *t) 911static inline void put_task_struct(struct task_struct *t)
902{ 912{
903 if (atomic_dec_and_test(&t->usage)) 913 if (atomic_dec_and_test(&t->usage))
904 call_rcu(&t->rcu, __put_task_struct_cb); 914 __put_task_struct(t);
905} 915}
906 916
907/* 917/*
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 690aabca8ed0..6348e8330897 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/mutex.h>
21#include <linux/device.h> 22#include <linux/device.h>
22#include <linux/mod_devicetable.h> 23#include <linux/mod_devicetable.h>
23 24
@@ -42,7 +43,7 @@ struct serio {
42 struct serio *parent, *child; 43 struct serio *parent, *child;
43 44
44 struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ 45 struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */
45 struct semaphore drv_sem; /* protects serio->drv so attributes can pin driver */ 46 struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
46 47
47 struct device dev; 48 struct device dev;
48 unsigned int registered; /* port has been fully registered with driver core */ 49 unsigned int registered; /* port has been fully registered with driver core */
@@ -151,17 +152,17 @@ static inline void serio_continue_rx(struct serio *serio)
151 */ 152 */
152static inline int serio_pin_driver(struct serio *serio) 153static inline int serio_pin_driver(struct serio *serio)
153{ 154{
154 return down_interruptible(&serio->drv_sem); 155 return mutex_lock_interruptible(&serio->drv_mutex);
155} 156}
156 157
157static inline void serio_pin_driver_uninterruptible(struct serio *serio) 158static inline void serio_pin_driver_uninterruptible(struct serio *serio)
158{ 159{
159 down(&serio->drv_sem); 160 mutex_lock(&serio->drv_mutex);
160} 161}
161 162
162static inline void serio_unpin_driver(struct serio *serio) 163static inline void serio_unpin_driver(struct serio *serio)
163{ 164{
164 up(&serio->drv_sem); 165 mutex_unlock(&serio->drv_mutex);
165} 166}
166 167
167 168
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 613b9513f8b9..c4619a428d9b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -941,6 +941,25 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
941#define NET_IP_ALIGN 2 941#define NET_IP_ALIGN 2
942#endif 942#endif
943 943
944/*
945 * The networking layer reserves some headroom in skb data (via
946 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
947 * the header has to grow. In the default case, if the header has to grow
948 * 16 bytes or less we avoid the reallocation.
949 *
950 * Unfortunately this headroom changes the DMA alignment of the resulting
951 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
952 * on some architectures. An architecture can override this value,
953 * perhaps setting it to a cacheline in size (since that will maintain
954 * cacheline alignment of the DMA). It must be a power of 2.
955 *
956 * Various parts of the networking layer expect at least 16 bytes of
957 * headroom, you should not reduce this.
958 */
959#ifndef NET_SKB_PAD
960#define NET_SKB_PAD 16
961#endif
962
944extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); 963extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
945 964
946static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 965static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
@@ -1030,9 +1049,9 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1030static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1049static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1031 gfp_t gfp_mask) 1050 gfp_t gfp_mask)
1032{ 1051{
1033 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); 1052 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1034 if (likely(skb)) 1053 if (likely(skb))
1035 skb_reserve(skb, 16); 1054 skb_reserve(skb, NET_SKB_PAD);
1036 return skb; 1055 return skb;
1037} 1056}
1038#else 1057#else
@@ -1070,13 +1089,15 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1070 */ 1089 */
1071static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1090static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1072{ 1091{
1073 int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); 1092 int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
1093 skb_headroom(skb);
1074 1094
1075 if (delta < 0) 1095 if (delta < 0)
1076 delta = 0; 1096 delta = 0;
1077 1097
1078 if (delta || skb_cloned(skb)) 1098 if (delta || skb_cloned(skb))
1079 return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC); 1099 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
1100 ~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
1080 return 0; 1101 return 0;
1081} 1102}
1082 1103
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index e78ffc7d5b56..5717147596b6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -571,5 +571,7 @@ asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
571asmlinkage long sys_unshare(unsigned long unshare_flags); 571asmlinkage long sys_unshare(unsigned long unshare_flags);
572asmlinkage long sys_splice(int fdin, int fdout, size_t len, 572asmlinkage long sys_splice(int fdin, int fdout, size_t len,
573 unsigned int flags); 573 unsigned int flags);
574asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
575 int flags);
574 576
575#endif 577#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index b5caabca553c..0a485beba9f5 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -6,7 +6,7 @@
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/stddef.h> 7#include <linux/stddef.h>
8 8
9struct timer_base_s; 9struct tvec_t_base_s;
10 10
11struct timer_list { 11struct timer_list {
12 struct list_head entry; 12 struct list_head entry;
@@ -15,16 +15,16 @@ struct timer_list {
15 void (*function)(unsigned long); 15 void (*function)(unsigned long);
16 unsigned long data; 16 unsigned long data;
17 17
18 struct timer_base_s *base; 18 struct tvec_t_base_s *base;
19}; 19};
20 20
21extern struct timer_base_s __init_timer_base; 21extern struct tvec_t_base_s boot_tvec_bases;
22 22
23#define TIMER_INITIALIZER(_function, _expires, _data) { \ 23#define TIMER_INITIALIZER(_function, _expires, _data) { \
24 .function = (_function), \ 24 .function = (_function), \
25 .expires = (_expires), \ 25 .expires = (_expires), \
26 .data = (_data), \ 26 .data = (_data), \
27 .base = &__init_timer_base, \ 27 .base = &boot_tvec_bases, \
28 } 28 }
29 29
30#define DEFINE_TIMER(_name, _function, _expires, _data) \ 30#define DEFINE_TIMER(_name, _function, _expires, _data) \
diff --git a/include/linux/tiocl.h b/include/linux/tiocl.h
index 2c9e847f6ed1..4756862c4ed4 100644
--- a/include/linux/tiocl.h
+++ b/include/linux/tiocl.h
@@ -34,5 +34,6 @@ struct tiocl_selection {
34#define TIOCL_SCROLLCONSOLE 13 /* scroll console */ 34#define TIOCL_SCROLLCONSOLE 13 /* scroll console */
35#define TIOCL_BLANKSCREEN 14 /* keep screen blank even if a key is pressed */ 35#define TIOCL_BLANKSCREEN 14 /* keep screen blank even if a key is pressed */
36#define TIOCL_BLANKEDSCREEN 15 /* return which vt was blanked */ 36#define TIOCL_BLANKEDSCREEN 15 /* return which vt was blanked */
37#define TIOCL_GETKMSGREDIRECT 17 /* get the vt the kernel messages are restricted to */
37 38
38#endif /* _LINUX_TIOCL_H */ 39#endif /* _LINUX_TIOCL_H */
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 0ff7ca68e5c5..7168302f9844 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -20,7 +20,7 @@
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * 21 *
22 * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> 22 * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
23 * 23 *
24 * Changes/Revisions: 24 * Changes/Revisions:
25 * 0.2 16/10/2004 (Micah Dowty <micah@navi.cx>) 25 * 0.2 16/10/2004 (Micah Dowty <micah@navi.cx>)
26 * - added force feedback support 26 * - added force feedback support
@@ -51,7 +51,7 @@ struct uinput_request {
51 51
52struct uinput_device { 52struct uinput_device {
53 struct input_dev *dev; 53 struct input_dev *dev;
54 struct semaphore sem; 54 struct mutex mutex;
55 enum uinput_state state; 55 enum uinput_state state;
56 wait_queue_head_t waitq; 56 wait_queue_head_t waitq;
57 unsigned char ready; 57 unsigned char ready;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index af2d6155d3fe..d7670ec1ec1e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -966,66 +966,17 @@ struct v4l2_sliced_vbi_format
966 966
967/* Teletext World System Teletext 967/* Teletext World System Teletext
968 (WST), defined on ITU-R BT.653-2 */ 968 (WST), defined on ITU-R BT.653-2 */
969#define V4L2_SLICED_TELETEXT_PAL_B (0x000001) 969#define V4L2_SLICED_TELETEXT_B (0x0001)
970#define V4L2_SLICED_TELETEXT_PAL_C (0x000002)
971#define V4L2_SLICED_TELETEXT_NTSC_B (0x000010)
972#define V4L2_SLICED_TELETEXT_SECAM (0x000020)
973
974/* Teletext North American Broadcast Teletext Specification
975 (NABTS), defined on ITU-R BT.653-2 */
976#define V4L2_SLICED_TELETEXT_NTSC_C (0x000040)
977#define V4L2_SLICED_TELETEXT_NTSC_D (0x000080)
978
979/* Video Program System, defined on ETS 300 231*/ 970/* Video Program System, defined on ETS 300 231*/
980#define V4L2_SLICED_VPS (0x000400) 971#define V4L2_SLICED_VPS (0x0400)
981
982/* Closed Caption, defined on EIA-608 */ 972/* Closed Caption, defined on EIA-608 */
983#define V4L2_SLICED_CAPTION_525 (0x001000) 973#define V4L2_SLICED_CAPTION_525 (0x1000)
984#define V4L2_SLICED_CAPTION_625 (0x002000)
985
986/* Wide Screen System, defined on ITU-R BT1119.1 */ 974/* Wide Screen System, defined on ITU-R BT1119.1 */
987#define V4L2_SLICED_WSS_625 (0x004000) 975#define V4L2_SLICED_WSS_625 (0x4000)
988 976
989/* Wide Screen System, defined on IEC 61880 */ 977#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525)
990#define V4L2_SLICED_WSS_525 (0x008000) 978#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
991 979
992/* Vertical Interval Timecode (VITC), defined on SMPTE 12M */
993#define V4l2_SLICED_VITC_625 (0x010000)
994#define V4l2_SLICED_VITC_525 (0x020000)
995
996#define V4L2_SLICED_TELETEXT_B (V4L2_SLICED_TELETEXT_PAL_B |\
997 V4L2_SLICED_TELETEXT_NTSC_B)
998
999#define V4L2_SLICED_TELETEXT (V4L2_SLICED_TELETEXT_PAL_B |\
1000 V4L2_SLICED_TELETEXT_PAL_C |\
1001 V4L2_SLICED_TELETEXT_SECAM |\
1002 V4L2_SLICED_TELETEXT_NTSC_B |\
1003 V4L2_SLICED_TELETEXT_NTSC_C |\
1004 V4L2_SLICED_TELETEXT_NTSC_D)
1005
1006#define V4L2_SLICED_CAPTION (V4L2_SLICED_CAPTION_525 |\
1007 V4L2_SLICED_CAPTION_625)
1008
1009#define V4L2_SLICED_WSS (V4L2_SLICED_WSS_525 |\
1010 V4L2_SLICED_WSS_625)
1011
1012#define V4L2_SLICED_VITC (V4L2_SLICED_VITC_525 |\
1013 V4L2_SLICED_VITC_625)
1014
1015#define V4L2_SLICED_VBI_525 (V4L2_SLICED_TELETEXT_NTSC_B |\
1016 V4L2_SLICED_TELETEXT_NTSC_C |\
1017 V4L2_SLICED_TELETEXT_NTSC_D |\
1018 V4L2_SLICED_CAPTION_525 |\
1019 V4L2_SLICED_WSS_525 |\
1020 V4l2_SLICED_VITC_525)
1021
1022#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_PAL_B |\
1023 V4L2_SLICED_TELETEXT_PAL_C |\
1024 V4L2_SLICED_TELETEXT_SECAM |\
1025 V4L2_SLICED_VPS |\
1026 V4L2_SLICED_CAPTION_625 |\
1027 V4L2_SLICED_WSS_625 |\
1028 V4l2_SLICED_VITC_625)
1029 980
1030struct v4l2_sliced_vbi_cap 981struct v4l2_sliced_vbi_cap
1031{ 982{
diff --git a/include/media/cx25840.h b/include/media/cx25840.h
new file mode 100644
index 000000000000..8e7e52d659a0
--- /dev/null
+++ b/include/media/cx25840.h
@@ -0,0 +1,64 @@
1/*
2 cx25840.h - definition for cx25840/1/2/3 inputs
3
4 Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef _CX25840_H_
22#define _CX25840_H_
23
24enum cx25840_video_input {
25 /* Composite video inputs In1-In8 */
26 CX25840_COMPOSITE1 = 1,
27 CX25840_COMPOSITE2,
28 CX25840_COMPOSITE3,
29 CX25840_COMPOSITE4,
30 CX25840_COMPOSITE5,
31 CX25840_COMPOSITE6,
32 CX25840_COMPOSITE7,
33 CX25840_COMPOSITE8,
34
35 /* S-Video inputs consist of one luma input (In1-In4) ORed with one
36 chroma input (In5-In8) */
37 CX25840_SVIDEO_LUMA1 = 0x10,
38 CX25840_SVIDEO_LUMA2 = 0x20,
39 CX25840_SVIDEO_LUMA3 = 0x30,
40 CX25840_SVIDEO_LUMA4 = 0x40,
41 CX25840_SVIDEO_CHROMA4 = 0x400,
42 CX25840_SVIDEO_CHROMA5 = 0x500,
43 CX25840_SVIDEO_CHROMA6 = 0x600,
44 CX25840_SVIDEO_CHROMA7 = 0x700,
45 CX25840_SVIDEO_CHROMA8 = 0x800,
46
47 /* S-Video aliases for common luma/chroma combinations */
48 CX25840_SVIDEO1 = 0x510,
49 CX25840_SVIDEO2 = 0x620,
50 CX25840_SVIDEO3 = 0x730,
51 CX25840_SVIDEO4 = 0x840,
52};
53
54enum cx25840_audio_input {
55 /* Audio inputs: serial or In4-In8 */
56 CX25840_AUDIO_SERIAL,
57 CX25840_AUDIO4 = 4,
58 CX25840_AUDIO5,
59 CX25840_AUDIO6,
60 CX25840_AUDIO7,
61 CX25840_AUDIO8,
62};
63
64#endif
diff --git a/include/media/msp3400.h b/include/media/msp3400.h
index 0be61a021d45..6ab854931c05 100644
--- a/include/media/msp3400.h
+++ b/include/media/msp3400.h
@@ -80,16 +80,16 @@
80 */ 80 */
81 81
82/* SCART input to DSP selection */ 82/* SCART input to DSP selection */
83#define MSP_IN_SCART_1 0 /* Pin SC1_IN */ 83#define MSP_IN_SCART1 0 /* Pin SC1_IN */
84#define MSP_IN_SCART_2 1 /* Pin SC2_IN */ 84#define MSP_IN_SCART2 1 /* Pin SC2_IN */
85#define MSP_IN_SCART_3 2 /* Pin SC3_IN */ 85#define MSP_IN_SCART3 2 /* Pin SC3_IN */
86#define MSP_IN_SCART_4 3 /* Pin SC4_IN */ 86#define MSP_IN_SCART4 3 /* Pin SC4_IN */
87#define MSP_IN_MONO 6 /* Pin MONO_IN */ 87#define MSP_IN_MONO 6 /* Pin MONO_IN */
88#define MSP_IN_MUTE 7 /* Mute DSP input */ 88#define MSP_IN_MUTE 7 /* Mute DSP input */
89#define MSP_SCART_TO_DSP(in) (in) 89#define MSP_SCART_TO_DSP(in) (in)
90/* Tuner input to demodulator and DSP selection */ 90/* Tuner input to demodulator and DSP selection */
91#define MSP_IN_TUNER_1 0 /* Analog Sound IF input pin ANA_IN1 */ 91#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */
92#define MSP_IN_TUNER_2 1 /* Analog Sound IF input pin ANA_IN2 */ 92#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */
93#define MSP_TUNER_TO_DSP(in) ((in) << 3) 93#define MSP_TUNER_TO_DSP(in) ((in) << 3)
94 94
95/* The msp has up to 5 DSP outputs, each output can independently select 95/* The msp has up to 5 DSP outputs, each output can independently select
@@ -109,14 +109,14 @@
109 DSP. This is currently not implemented. Also not implemented is the 109 DSP. This is currently not implemented. Also not implemented is the
110 multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate 110 multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate
111 a need for one of those features then additional support can be added. */ 111 a need for one of those features then additional support can be added. */
112#define MSP_DSP_OUT_TUNER 0 /* Tuner output */ 112#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */
113#define MSP_DSP_OUT_SCART 2 /* SCART output */ 113#define MSP_DSP_IN_SCART 2 /* SCART DSP input */
114#define MSP_DSP_OUT_I2S1 5 /* I2S1 output */ 114#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */
115#define MSP_DSP_OUT_I2S2 6 /* I2S2 output */ 115#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */
116#define MSP_DSP_OUT_I2S3 7 /* I2S3 output */ 116#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */
117#define MSP_DSP_OUT_MAIN_AVC 11 /* MAIN AVC processed output */ 117#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */
118#define MSP_DSP_OUT_MAIN 12 /* MAIN output */ 118#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */
119#define MSP_DSP_OUT_AUX 13 /* AUX output */ 119#define MSP_DSP_IN_AUX 13 /* AUX DSP input */
120#define MSP_DSP_TO_MAIN(in) ((in) << 4) 120#define MSP_DSP_TO_MAIN(in) ((in) << 4)
121#define MSP_DSP_TO_AUX(in) ((in) << 8) 121#define MSP_DSP_TO_AUX(in) ((in) << 8)
122#define MSP_DSP_TO_SCART1(in) ((in) << 12) 122#define MSP_DSP_TO_SCART1(in) ((in) << 12)
@@ -125,16 +125,16 @@
125 125
126/* Output SCART select: the SCART outputs can select which input 126/* Output SCART select: the SCART outputs can select which input
127 to use. */ 127 to use. */
128#define MSP_OUT_SCART1 0 /* SCART1 input, bypassing the DSP */ 128#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */
129#define MSP_OUT_SCART2 1 /* SCART2 input, bypassing the DSP */ 129#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */
130#define MSP_OUT_SCART3 2 /* SCART3 input, bypassing the DSP */ 130#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */
131#define MSP_OUT_SCART4 3 /* SCART4 input, bypassing the DSP */ 131#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */
132#define MSP_OUT_SCART1_DA 4 /* DSP SCART1 output */ 132#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */
133#define MSP_OUT_SCART2_DA 5 /* DSP SCART2 output */ 133#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */
134#define MSP_OUT_MONO 6 /* MONO input, bypassing the DSP */ 134#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */
135#define MSP_OUT_MUTE 7 /* MUTE output */ 135#define MSP_SC_IN_MUTE 7 /* MUTE output */
136#define MSP_OUT_TO_SCART1(in) (in) 136#define MSP_SC_TO_SCART1(in) (in)
137#define MSP_OUT_TO_SCART2(in) ((in) << 4) 137#define MSP_SC_TO_SCART2(in) ((in) << 4)
138 138
139/* Shortcut macros */ 139/* Shortcut macros */
140#define MSP_INPUT(sc, t, main_aux_src, sc_i2s_src) \ 140#define MSP_INPUT(sc, t, main_aux_src, sc_i2s_src) \
@@ -145,14 +145,14 @@
145 MSP_DSP_TO_SCART1(sc_i2s_src) | \ 145 MSP_DSP_TO_SCART1(sc_i2s_src) | \
146 MSP_DSP_TO_SCART2(sc_i2s_src) | \ 146 MSP_DSP_TO_SCART2(sc_i2s_src) | \
147 MSP_DSP_TO_I2S(sc_i2s_src)) 147 MSP_DSP_TO_I2S(sc_i2s_src))
148#define MSP_INPUT_DEFAULT MSP_INPUT(MSP_IN_SCART_1, MSP_IN_TUNER_1, \ 148#define MSP_INPUT_DEFAULT MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, \
149 MSP_DSP_OUT_TUNER, MSP_DSP_OUT_TUNER) 149 MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER)
150#define MSP_OUTPUT(sc) \ 150#define MSP_OUTPUT(sc) \
151 (MSP_OUT_TO_SCART1(sc) | \ 151 (MSP_SC_TO_SCART1(sc) | \
152 MSP_OUT_TO_SCART2(sc)) 152 MSP_SC_TO_SCART2(sc))
153/* This equals the RESET position of the msp3400 ACB register */ 153/* This equals the RESET position of the msp3400 ACB register */
154#define MSP_OUTPUT_DEFAULT (MSP_OUT_TO_SCART1(MSP_OUT_SCART3) | \ 154#define MSP_OUTPUT_DEFAULT (MSP_SC_TO_SCART1(MSP_SC_IN_SCART3) | \
155 MSP_OUT_TO_SCART2(MSP_OUT_SCART1_DA)) 155 MSP_SC_TO_SCART2(MSP_SC_IN_DSP_SCART1))
156 156
157/* Tuner inputs vs. msp version */ 157/* Tuner inputs vs. msp version */
158/* Chip TUNER_1 TUNER_2 158/* Chip TUNER_1 TUNER_2
diff --git a/include/media/saa7115.h b/include/media/saa7115.h
new file mode 100644
index 000000000000..6b4836f3f057
--- /dev/null
+++ b/include/media/saa7115.h
@@ -0,0 +1,37 @@
1/*
2 saa7115.h - definition for saa7113/4/5 inputs
3
4 Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef _SAA7115_H_
22#define _SAA7115_H_
23
24/* SAA7113/4/5 HW inputs */
25#define SAA7115_COMPOSITE0 0
26#define SAA7115_COMPOSITE1 1
27#define SAA7115_COMPOSITE2 2
28#define SAA7115_COMPOSITE3 3
29#define SAA7115_COMPOSITE4 4 /* not available for the saa7113 */
30#define SAA7115_COMPOSITE5 5 /* not available for the saa7113 */
31#define SAA7115_SVIDEO0 6
32#define SAA7115_SVIDEO1 7
33#define SAA7115_SVIDEO2 8
34#define SAA7115_SVIDEO3 9
35
36#endif
37
diff --git a/include/media/saa7127.h b/include/media/saa7127.h
new file mode 100644
index 000000000000..bbcf862141af
--- /dev/null
+++ b/include/media/saa7127.h
@@ -0,0 +1,41 @@
1/*
2 saa7127.h - definition for saa7126/7/8/9 inputs/outputs
3
4 Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef _SAA7127_H_
22#define _SAA7127_H_
23
24/* Enumeration for the supported input types */
25enum saa7127_input_type {
26 SAA7127_INPUT_TYPE_NORMAL,
27 SAA7127_INPUT_TYPE_TEST_IMAGE
28};
29
30/* Enumeration for the supported output signal types */
31enum saa7127_output_type {
32 SAA7127_OUTPUT_TYPE_BOTH,
33 SAA7127_OUTPUT_TYPE_COMPOSITE,
34 SAA7127_OUTPUT_TYPE_SVIDEO,
35 SAA7127_OUTPUT_TYPE_RGB,
36 SAA7127_OUTPUT_TYPE_YUV_C,
37 SAA7127_OUTPUT_TYPE_YUV_V
38};
39
40#endif
41
diff --git a/include/media/upd64031a.h b/include/media/upd64031a.h
new file mode 100644
index 000000000000..3ad6a32e1bce
--- /dev/null
+++ b/include/media/upd64031a.h
@@ -0,0 +1,40 @@
1/*
2 * upd64031a - NEC Electronics Ghost Reduction input defines
3 *
4 * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef _UPD64031A_H_
22#define _UPD64031A_H_
23
24/* Ghost reduction modes */
25#define UPD64031A_GR_ON 0
26#define UPD64031A_GR_OFF 1
27#define UPD64031A_GR_THROUGH 3
28
29/* Direct 3D/YCS Connection */
30#define UPD64031A_3DYCS_DISABLE (0 << 2)
31#define UPD64031A_3DYCS_COMPOSITE (2 << 2)
32#define UPD64031A_3DYCS_SVIDEO (3 << 2)
33
34/* Composite sync digital separation circuit */
35#define UPD64031A_COMPOSITE_EXTERNAL (1 << 4)
36
37/* Vertical sync digital separation circuit */
38#define UPD64031A_VERTICAL_EXTERNAL (1 << 5)
39
40#endif
diff --git a/include/media/upd64083.h b/include/media/upd64083.h
new file mode 100644
index 000000000000..59b6f32ba300
--- /dev/null
+++ b/include/media/upd64083.h
@@ -0,0 +1,58 @@
1/*
2 * upd6408x - NEC Electronics 3-Dimensional Y/C separation input defines
3 *
4 * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef _UPD64083_H_
22#define _UPD64083_H_
23
24/* There are two bits of information that the driver needs in order
25 to select the correct routing: the operating mode and the selection
26 of the Y input (external or internal).
27
28 The first two operating modes expect a composite signal on the Y input,
29 the second two operating modes use both the Y and C inputs.
30
31 Normally YCS_MODE is used for tuner and composite inputs, and the
32 YCNR mode is used for S-Video inputs.
33
34 The external Y-ADC is selected when the composite input comes from a
35 upd64031a ghost reduction device. If this device is not present, or
36 the input is a S-Video signal, then the internal Y-ADC input should
37 be used. */
38
39/* Operating modes: */
40
41/* YCS mode: Y/C separation (burst locked clocking) */
42#define UPD64083_YCS_MODE 0
43/* YCS+ mode: 2D Y/C separation and YCNR (burst locked clocking) */
44#define UPD64083_YCS_PLUS_MODE 1
45
46/* Note: the following two modes cannot be used in combination with the
47 external Y-ADC. */
48/* MNNR mode: frame comb type YNR+C delay (line locked clocking) */
49#define UPD64083_MNNR_MODE 2
50/* YCNR mode: frame recursive YCNR (burst locked clocking) */
51#define UPD64083_YCNR_MODE 3
52
53/* Select external Y-ADC: this should be set if this device is used in
54 combination with the upd64031a ghost reduction device.
55 Otherwise leave at 0 (use internal Y-ADC). */
56#define UPD64083_EXT_Y_ADC (1 << 2)
57
58#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9418f4d1afbb..3c989db8a7aa 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -405,9 +405,6 @@ extern int tcp_disconnect(struct sock *sk, int flags);
405 405
406extern void tcp_unhash(struct sock *sk); 406extern void tcp_unhash(struct sock *sk);
407 407
408extern int tcp_v4_hash_connecting(struct sock *sk);
409
410
411/* From syncookies.c */ 408/* From syncookies.c */
412extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 409extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
413 struct ip_options *opt); 410 struct ip_options *opt);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e100291e43f4..0d5529c382e8 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -242,7 +242,6 @@ extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
242 242
243extern void xfrm_state_delete_tunnel(struct xfrm_state *x); 243extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
244 244
245struct xfrm_decap_state;
246struct xfrm_type 245struct xfrm_type
247{ 246{
248 char *description; 247 char *description;
@@ -251,7 +250,7 @@ struct xfrm_type
251 250
252 int (*init_state)(struct xfrm_state *x); 251 int (*init_state)(struct xfrm_state *x);
253 void (*destructor)(struct xfrm_state *); 252 void (*destructor)(struct xfrm_state *);
254 int (*input)(struct xfrm_state *, struct xfrm_decap_state *, struct sk_buff *skb); 253 int (*input)(struct xfrm_state *, struct sk_buff *skb);
255 int (*output)(struct xfrm_state *, struct sk_buff *pskb); 254 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
256 /* Estimate maximal size of result of transformation of a dgram */ 255 /* Estimate maximal size of result of transformation of a dgram */
257 u32 (*get_max_size)(struct xfrm_state *, int size); 256 u32 (*get_max_size)(struct xfrm_state *, int size);
@@ -606,25 +605,11 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
606 605
607extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); 606extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
608 607
609/* Decapsulation state, used by the input to store data during
610 * decapsulation procedure, to be used later (during the policy
611 * check
612 */
613struct xfrm_decap_state {
614 char decap_data[20];
615 __u16 decap_type;
616};
617
618struct sec_decap_state {
619 struct xfrm_state *xvec;
620 struct xfrm_decap_state decap;
621};
622
623struct sec_path 608struct sec_path
624{ 609{
625 atomic_t refcnt; 610 atomic_t refcnt;
626 int len; 611 int len;
627 struct sec_decap_state x[XFRM_MAX_DEPTH]; 612 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
628}; 613};
629 614
630static inline struct sec_path * 615static inline struct sec_path *
diff --git a/include/pcmcia/bulkmem.h b/include/pcmcia/bulkmem.h
index b53b78d497ba..6bc7472293b2 100644
--- a/include/pcmcia/bulkmem.h
+++ b/include/pcmcia/bulkmem.h
@@ -35,7 +35,7 @@ typedef struct region_info_t {
35#define REGION_BAR_MASK 0xe000 35#define REGION_BAR_MASK 0xe000
36#define REGION_BAR_SHIFT 13 36#define REGION_BAR_SHIFT 13
37 37
38int pcmcia_get_first_region(client_handle_t handle, region_info_t *rgn); 38int pcmcia_get_first_region(struct pcmcia_device *handle, region_info_t *rgn);
39int pcmcia_get_next_region(client_handle_t handle, region_info_t *rgn); 39int pcmcia_get_next_region(struct pcmcia_device *handle, region_info_t *rgn);
40 40
41#endif /* _LINUX_BULKMEM_H */ 41#endif /* _LINUX_BULKMEM_H */
diff --git a/include/pcmcia/ciscode.h b/include/pcmcia/ciscode.h
index da19c297dd65..c1da8558339a 100644
--- a/include/pcmcia/ciscode.h
+++ b/include/pcmcia/ciscode.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * ciscode.h -- Definitions for bulk memory services 2 * ciscode.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -122,4 +122,7 @@
122 122
123#define MANFID_XIRCOM 0x0105 123#define MANFID_XIRCOM 0x0105
124 124
125#define MANFID_POSSIO 0x030c
126#define PRODID_POSSIO_GCC 0x0003
127
125#endif /* _LINUX_CISCODE_H */ 128#endif /* _LINUX_CISCODE_H */
diff --git a/include/pcmcia/cistpl.h b/include/pcmcia/cistpl.h
index c6a069554fd7..d3bbb19caf81 100644
--- a/include/pcmcia/cistpl.h
+++ b/include/pcmcia/cistpl.h
@@ -586,12 +586,7 @@ typedef struct cisdump_t {
586 cisdata_t Data[CISTPL_MAX_CIS_SIZE]; 586 cisdata_t Data[CISTPL_MAX_CIS_SIZE];
587} cisdump_t; 587} cisdump_t;
588 588
589int pcmcia_get_first_tuple(client_handle_t handle, tuple_t *tuple);
590int pcmcia_get_next_tuple(client_handle_t handle, tuple_t *tuple);
591int pcmcia_get_tuple_data(client_handle_t handle, tuple_t *tuple);
592int pcmcia_parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse);
593 589
594int pcmcia_validate_cis(client_handle_t handle, cisinfo_t *info);
595int pcmcia_replace_cis(struct pcmcia_socket *s, cisdump_t *cis); 590int pcmcia_replace_cis(struct pcmcia_socket *s, cisdump_t *cis);
596 591
597/* don't use outside of PCMCIA core yet */ 592/* don't use outside of PCMCIA core yet */
@@ -602,4 +597,20 @@ int pccard_parse_tuple(tuple_t *tuple, cisparse_t *parse);
602 597
603int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_t *info); 598int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, cisinfo_t *info);
604 599
600/* ... but use these wrappers instead */
601#define pcmcia_get_first_tuple(p_dev, tuple) \
602 pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple)
603
604#define pcmcia_get_next_tuple(p_dev, tuple) \
605 pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple)
606
607#define pcmcia_get_tuple_data(p_dev, tuple) \
608 pccard_get_tuple_data(p_dev->socket, tuple)
609
610#define pcmcia_parse_tuple(p_dev, tuple, parse) \
611 pccard_parse_tuple(tuple, parse)
612
613#define pcmcia_validate_cis(p_dev, info) \
614 pccard_validate_cis(p_dev->socket, p_dev->func, info)
615
605#endif /* LINUX_CISTPL_H */ 616#endif /* LINUX_CISTPL_H */
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index 52660f32663d..d5838c30d20f 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -109,17 +109,6 @@ typedef struct client_req_t {
109 109
110#define CLIENT_THIS_SOCKET 0x01 110#define CLIENT_THIS_SOCKET 0x01
111 111
112/* For RegisterClient */
113typedef struct client_reg_t {
114 dev_info_t *dev_info;
115 u_int Attributes; /* UNUSED */
116 u_int EventMask;
117 int (*event_handler)(event_t event, int priority,
118 event_callback_args_t *);
119 event_callback_args_t event_callback_args;
120 u_int Version;
121} client_reg_t;
122
123/* ModifyConfiguration */ 112/* ModifyConfiguration */
124typedef struct modconf_t { 113typedef struct modconf_t {
125 u_int Attributes; 114 u_int Attributes;
@@ -127,15 +116,16 @@ typedef struct modconf_t {
127} modconf_t; 116} modconf_t;
128 117
129/* Attributes for ModifyConfiguration */ 118/* Attributes for ModifyConfiguration */
130#define CONF_IRQ_CHANGE_VALID 0x100 119#define CONF_IRQ_CHANGE_VALID 0x0100
131#define CONF_VCC_CHANGE_VALID 0x200 120#define CONF_VCC_CHANGE_VALID 0x0200
132#define CONF_VPP1_CHANGE_VALID 0x400 121#define CONF_VPP1_CHANGE_VALID 0x0400
133#define CONF_VPP2_CHANGE_VALID 0x800 122#define CONF_VPP2_CHANGE_VALID 0x0800
123#define CONF_IO_CHANGE_WIDTH 0x1000
134 124
135/* For RequestConfiguration */ 125/* For RequestConfiguration */
136typedef struct config_req_t { 126typedef struct config_req_t {
137 u_int Attributes; 127 u_int Attributes;
138 u_int Vcc, Vpp1, Vpp2; 128 u_int Vpp; /* both Vpp1 and Vpp2 */
139 u_int IntType; 129 u_int IntType;
140 u_int ConfigBase; 130 u_int ConfigBase;
141 u_char Status, Pin, Copy, ExtStatus; 131 u_char Status, Pin, Copy, ExtStatus;
@@ -389,23 +379,27 @@ int pcmcia_get_status(struct pcmcia_device *p_dev, cs_status_t *status);
389int pcmcia_get_mem_page(window_handle_t win, memreq_t *req); 379int pcmcia_get_mem_page(window_handle_t win, memreq_t *req);
390int pcmcia_map_mem_page(window_handle_t win, memreq_t *req); 380int pcmcia_map_mem_page(window_handle_t win, memreq_t *req);
391int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod); 381int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod);
392int pcmcia_release_configuration(struct pcmcia_device *p_dev);
393int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req);
394int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req);
395int pcmcia_release_window(window_handle_t win); 382int pcmcia_release_window(window_handle_t win);
396int pcmcia_request_configuration(struct pcmcia_device *p_dev, config_req_t *req); 383int pcmcia_request_configuration(struct pcmcia_device *p_dev, config_req_t *req);
397int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req); 384int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req);
398int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req); 385int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req);
399int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_handle_t *wh); 386int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_handle_t *wh);
400int pcmcia_reset_card(struct pcmcia_device *p_dev, client_req_t *req);
401int pcmcia_suspend_card(struct pcmcia_socket *skt); 387int pcmcia_suspend_card(struct pcmcia_socket *skt);
402int pcmcia_resume_card(struct pcmcia_socket *skt); 388int pcmcia_resume_card(struct pcmcia_socket *skt);
403int pcmcia_eject_card(struct pcmcia_socket *skt); 389int pcmcia_eject_card(struct pcmcia_socket *skt);
404int pcmcia_insert_card(struct pcmcia_socket *skt); 390int pcmcia_insert_card(struct pcmcia_socket *skt);
391int pccard_reset_card(struct pcmcia_socket *skt);
392
393struct pcmcia_device * pcmcia_dev_present(struct pcmcia_device *p_dev);
394void pcmcia_disable_device(struct pcmcia_device *p_dev);
405 395
406struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt); 396struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt);
407void pcmcia_put_socket(struct pcmcia_socket *skt); 397void pcmcia_put_socket(struct pcmcia_socket *skt);
408 398
399/* compatibility functions */
400#define pcmcia_reset_card(p_dev, req) \
401 pccard_reset_card(p_dev->socket)
402
409#endif /* __KERNEL__ */ 403#endif /* __KERNEL__ */
410 404
411#endif /* _LINUX_CS_H */ 405#endif /* _LINUX_CS_H */
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 8e2a96396478..8c339f5678cf 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -39,7 +39,7 @@ typedef struct win_info_t {
39typedef struct bind_info_t { 39typedef struct bind_info_t {
40 dev_info_t dev_info; 40 dev_info_t dev_info;
41 u_char function; 41 u_char function;
42 struct dev_link_t *instance; 42 struct pcmcia_device *instance;
43 char name[DEV_NAME_LEN]; 43 char name[DEV_NAME_LEN];
44 u_short major, minor; 44 u_short major, minor;
45 void *next; 45 void *next;
@@ -96,6 +96,7 @@ typedef union ds_ioctl_arg_t {
96 96
97#ifdef __KERNEL__ 97#ifdef __KERNEL__
98#include <linux/device.h> 98#include <linux/device.h>
99#include <pcmcia/ss.h>
99 100
100typedef struct dev_node_t { 101typedef struct dev_node_t {
101 char dev_name[DEV_NAME_LEN]; 102 char dev_name[DEV_NAME_LEN];
@@ -103,34 +104,9 @@ typedef struct dev_node_t {
103 struct dev_node_t *next; 104 struct dev_node_t *next;
104} dev_node_t; 105} dev_node_t;
105 106
106typedef struct dev_link_t {
107 dev_node_t *dev;
108 u_int state, open;
109 wait_queue_head_t pending;
110 client_handle_t handle;
111 io_req_t io;
112 irq_req_t irq;
113 config_req_t conf;
114 window_handle_t win;
115 void *priv;
116 struct dev_link_t *next;
117} dev_link_t;
118
119/* Flags for device state */
120#define DEV_PRESENT 0x01
121#define DEV_CONFIG 0x02
122#define DEV_STALE_CONFIG 0x04 /* release on close */
123#define DEV_STALE_LINK 0x08 /* detach on release */
124#define DEV_CONFIG_PENDING 0x10
125#define DEV_RELEASE_PENDING 0x20
126#define DEV_SUSPEND 0x40
127#define DEV_BUSY 0x80
128
129#define DEV_OK(l) \
130 ((l) && ((l->state & ~DEV_BUSY) == (DEV_CONFIG|DEV_PRESENT)))
131
132 107
133struct pcmcia_socket; 108struct pcmcia_socket;
109struct config_t;
134 110
135struct pcmcia_driver { 111struct pcmcia_driver {
136 int (*probe) (struct pcmcia_device *dev); 112 int (*probe) (struct pcmcia_device *dev);
@@ -148,6 +124,7 @@ struct pcmcia_driver {
148int pcmcia_register_driver(struct pcmcia_driver *driver); 124int pcmcia_register_driver(struct pcmcia_driver *driver);
149void pcmcia_unregister_driver(struct pcmcia_driver *driver); 125void pcmcia_unregister_driver(struct pcmcia_driver *driver);
150 126
127
151struct pcmcia_device { 128struct pcmcia_device {
152 /* the socket and the device_no [for multifunction devices] 129 /* the socket and the device_no [for multifunction devices]
153 uniquely define a pcmcia_device */ 130 uniquely define a pcmcia_device */
@@ -160,21 +137,40 @@ struct pcmcia_device {
160 /* the hardware "function" device; certain subdevices can 137 /* the hardware "function" device; certain subdevices can
161 * share one hardware "function" device. */ 138 * share one hardware "function" device. */
162 u8 func; 139 u8 func;
140 struct config_t* function_config;
163 141
164 struct list_head socket_device_list; 142 struct list_head socket_device_list;
165 143
166 /* deprecated, a cleaned up version will be moved into this 144 /* deprecated, will be cleaned up soon */
167 struct soon */ 145 dev_node_t *dev_node;
168 dev_link_t *instance; 146 u_int open;
169 u_int state; 147 io_req_t io;
148 irq_req_t irq;
149 config_req_t conf;
150 window_handle_t win;
151
152 /* Is the device suspended, or in the process of
153 * being removed? */
154 u16 suspended:1;
155 u16 _removed:1;
156
157 /* Flags whether io, irq, win configurations were
158 * requested, and whether the configuration is "locked" */
159 u16 _irq:1;
160 u16 _io:1;
161 u16 _win:4;
162 u16 _locked:1;
163
164 /* Flag whether a "fuzzy" func_id based match is
165 * allowed. */
166 u16 allow_func_id_match:1;
170 167
171 /* information about this device */ 168 /* information about this device */
172 u8 has_manf_id:1; 169 u16 has_manf_id:1;
173 u8 has_card_id:1; 170 u16 has_card_id:1;
174 u8 has_func_id:1; 171 u16 has_func_id:1;
175 172
176 u8 allow_func_id_match:1; 173 u16 reserved:3;
177 u8 reserved:4;
178 174
179 u8 func_id; 175 u8 func_id;
180 u16 manf_id; 176 u16 manf_id;
@@ -182,22 +178,24 @@ struct pcmcia_device {
182 178
183 char * prod_id[4]; 179 char * prod_id[4];
184 180
181 struct device dev;
182
183#ifdef CONFIG_PCMCIA_IOCTL
185 /* device driver wanted by cardmgr */ 184 /* device driver wanted by cardmgr */
186 struct pcmcia_driver * cardmgr; 185 struct pcmcia_driver * cardmgr;
186#endif
187 187
188 struct device dev; 188 /* data private to drivers */
189 void *priv;
189}; 190};
190 191
191#define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev) 192#define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev)
192#define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv) 193#define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv)
193 194
194#define handle_to_pdev(handle) (handle)
195#define handle_to_dev(handle) (handle->dev) 195#define handle_to_dev(handle) (handle->dev)
196 196
197#define dev_to_instance(dev) (dev->instance)
198
199/* error reporting */ 197/* error reporting */
200void cs_error(client_handle_t handle, int func, int ret); 198void cs_error(struct pcmcia_device *handle, int func, int ret);
201 199
202#endif /* __KERNEL__ */ 200#endif /* __KERNEL__ */
203#endif /* _LINUX_DS_H */ 201#endif /* _LINUX_DS_H */
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 2889a69a7a8f..5e0a01ab2216 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -18,6 +18,7 @@
18#include <linux/config.h> 18#include <linux/config.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/sched.h> /* task_struct, completion */ 20#include <linux/sched.h> /* task_struct, completion */
21#include <linux/mutex.h>
21 22
22#include <pcmcia/cs_types.h> 23#include <pcmcia/cs_types.h>
23#include <pcmcia/cs.h> 24#include <pcmcia/cs.h>
@@ -146,14 +147,15 @@ extern struct pccard_resource_ops pccard_static_ops;
146/* !SS_CAP_STATIC_MAP */ 147/* !SS_CAP_STATIC_MAP */
147extern struct pccard_resource_ops pccard_nonstatic_ops; 148extern struct pccard_resource_ops pccard_nonstatic_ops;
148 149
150/* static mem, dynamic IO sockets */
151extern struct pccard_resource_ops pccard_iodyn_ops;
152
149/* 153/*
150 * Calls to set up low-level "Socket Services" drivers 154 * Calls to set up low-level "Socket Services" drivers
151 */ 155 */
152struct pcmcia_socket; 156struct pcmcia_socket;
153 157
154typedef struct io_window_t { 158typedef struct io_window_t {
155 u_int Attributes;
156 kio_addr_t BasePort, NumPorts;
157 kio_addr_t InUse, Config; 159 kio_addr_t InUse, Config;
158 struct resource *res; 160 struct resource *res;
159} io_window_t; 161} io_window_t;
@@ -162,7 +164,7 @@ typedef struct io_window_t {
162typedef struct window_t { 164typedef struct window_t {
163 u_short magic; 165 u_short magic;
164 u_short index; 166 u_short index;
165 client_handle_t handle; 167 struct pcmcia_device *handle;
166 struct pcmcia_socket *sock; 168 struct pcmcia_socket *sock;
167 pccard_mem_map ctl; 169 pccard_mem_map ctl;
168} window_t; 170} window_t;
@@ -186,7 +188,6 @@ struct pcmcia_socket {
186 u_short lock_count; 188 u_short lock_count;
187 pccard_mem_map cis_mem; 189 pccard_mem_map cis_mem;
188 void __iomem *cis_virt; 190 void __iomem *cis_virt;
189 struct config_t *config;
190 struct { 191 struct {
191 u_int AssignedIRQ; 192 u_int AssignedIRQ;
192 u_int Config; 193 u_int Config;
@@ -241,7 +242,7 @@ struct pcmcia_socket {
241#endif 242#endif
242 243
243 /* state thread */ 244 /* state thread */
244 struct semaphore skt_sem; /* protects socket h/w state */ 245 struct mutex skt_mutex; /* protects socket h/w state */
245 246
246 struct task_struct *thread; 247 struct task_struct *thread;
247 struct completion thread_done; 248 struct completion thread_done;
diff --git a/include/sound/core.h b/include/sound/core.h
index 7f32c12b4a0a..5135147f20e8 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -170,13 +170,13 @@ static inline void snd_power_change_state(struct snd_card *card, unsigned int st
170} 170}
171 171
172/* init.c */ 172/* init.c */
173int snd_power_wait(struct snd_card *card, unsigned int power_state, struct file *file); 173int snd_power_wait(struct snd_card *card, unsigned int power_state);
174 174
175#else /* ! CONFIG_PM */ 175#else /* ! CONFIG_PM */
176 176
177#define snd_power_lock(card) do { (void)(card); } while (0) 177#define snd_power_lock(card) do { (void)(card); } while (0)
178#define snd_power_unlock(card) do { (void)(card); } while (0) 178#define snd_power_unlock(card) do { (void)(card); } while (0)
179static inline int snd_power_wait(struct snd_card *card, unsigned int state, struct file *file) { return 0; } 179static inline int snd_power_wait(struct snd_card *card, unsigned int state) { return 0; }
180#define snd_power_get_state(card) SNDRV_CTL_POWER_D0 180#define snd_power_get_state(card) SNDRV_CTL_POWER_D0
181#define snd_power_change_state(card, state) do { (void)(card); } while (0) 181#define snd_power_change_state(card, state) do { (void)(card); } while (0)
182 182
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 15b885660bf0..66b1f08b42b9 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -369,6 +369,7 @@ struct snd_pcm_substream {
369 /* -- assigned files -- */ 369 /* -- assigned files -- */
370 struct snd_pcm_file *file; 370 struct snd_pcm_file *file;
371 struct file *ffile; 371 struct file *ffile;
372 void (*pcm_release)(struct snd_pcm_substream *);
372#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) 373#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
373 /* -- OSS things -- */ 374 /* -- OSS things -- */
374 struct snd_pcm_oss_substream oss; 375 struct snd_pcm_oss_substream oss;
@@ -381,13 +382,10 @@ struct snd_pcm_substream {
381 struct snd_info_entry *proc_prealloc_entry; 382 struct snd_info_entry *proc_prealloc_entry;
382 /* misc flags */ 383 /* misc flags */
383 unsigned int no_mmap_ctrl: 1; 384 unsigned int no_mmap_ctrl: 1;
385 unsigned int hw_opened: 1;
384}; 386};
385 387
386#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
387#define SUBSTREAM_BUSY(substream) ((substream)->file != NULL || ((substream)->oss.file != NULL))
388#else
389#define SUBSTREAM_BUSY(substream) ((substream)->file != NULL) 388#define SUBSTREAM_BUSY(substream) ((substream)->file != NULL)
390#endif
391 389
392 390
393struct snd_pcm_str { 391struct snd_pcm_str {
@@ -460,7 +458,6 @@ int snd_pcm_info_user(struct snd_pcm_substream *substream,
460 struct snd_pcm_info __user *info); 458 struct snd_pcm_info __user *info);
461int snd_pcm_status(struct snd_pcm_substream *substream, 459int snd_pcm_status(struct snd_pcm_substream *substream,
462 struct snd_pcm_status *status); 460 struct snd_pcm_status *status);
463int snd_pcm_prepare(struct snd_pcm_substream *substream);
464int snd_pcm_start(struct snd_pcm_substream *substream); 461int snd_pcm_start(struct snd_pcm_substream *substream);
465int snd_pcm_stop(struct snd_pcm_substream *substream, int status); 462int snd_pcm_stop(struct snd_pcm_substream *substream, int status);
466int snd_pcm_drain_done(struct snd_pcm_substream *substream); 463int snd_pcm_drain_done(struct snd_pcm_substream *substream);
@@ -468,11 +465,13 @@ int snd_pcm_drain_done(struct snd_pcm_substream *substream);
468int snd_pcm_suspend(struct snd_pcm_substream *substream); 465int snd_pcm_suspend(struct snd_pcm_substream *substream);
469int snd_pcm_suspend_all(struct snd_pcm *pcm); 466int snd_pcm_suspend_all(struct snd_pcm *pcm);
470#endif 467#endif
471int snd_pcm_kernel_playback_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg);
472int snd_pcm_kernel_capture_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg);
473int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); 468int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg);
474int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct snd_pcm_substream **rsubstream); 469int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file,
470 struct snd_pcm_substream **rsubstream);
475void snd_pcm_release_substream(struct snd_pcm_substream *substream); 471void snd_pcm_release_substream(struct snd_pcm_substream *substream);
472int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, struct file *file,
473 struct snd_pcm_substream **rsubstream);
474void snd_pcm_detach_substream(struct snd_pcm_substream *substream);
476void snd_pcm_vma_notify_data(void *client, void *data); 475void snd_pcm_vma_notify_data(void *client, void *data);
477int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area); 476int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area);
478 477
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index bff0778e1969..39df2baca18a 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -69,8 +69,7 @@ struct snd_pcm_oss_file {
69 69
70struct snd_pcm_oss_substream { 70struct snd_pcm_oss_substream {
71 unsigned oss: 1; /* oss mode */ 71 unsigned oss: 1; /* oss mode */
72 struct snd_pcm_oss_setup *setup; /* active setup */ 72 struct snd_pcm_oss_setup setup; /* active setup */
73 struct snd_pcm_oss_file *file;
74}; 73};
75 74
76struct snd_pcm_oss_stream { 75struct snd_pcm_oss_stream {
diff --git a/ipc/shm.c b/ipc/shm.c
index f806a2e314e0..6b0c9af5bbf7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -91,8 +91,8 @@ static inline int shm_addid(struct shmid_kernel *shp)
91static inline void shm_inc (int id) { 91static inline void shm_inc (int id) {
92 struct shmid_kernel *shp; 92 struct shmid_kernel *shp;
93 93
94 if(!(shp = shm_lock(id))) 94 shp = shm_lock(id);
95 BUG(); 95 BUG_ON(!shp);
96 shp->shm_atim = get_seconds(); 96 shp->shm_atim = get_seconds();
97 shp->shm_lprid = current->tgid; 97 shp->shm_lprid = current->tgid;
98 shp->shm_nattch++; 98 shp->shm_nattch++;
@@ -142,8 +142,8 @@ static void shm_close (struct vm_area_struct *shmd)
142 142
143 mutex_lock(&shm_ids.mutex); 143 mutex_lock(&shm_ids.mutex);
144 /* remove from the list of attaches of the shm segment */ 144 /* remove from the list of attaches of the shm segment */
145 if(!(shp = shm_lock(id))) 145 shp = shm_lock(id);
146 BUG(); 146 BUG_ON(!shp);
147 shp->shm_lprid = current->tgid; 147 shp->shm_lprid = current->tgid;
148 shp->shm_dtim = get_seconds(); 148 shp->shm_dtim = get_seconds();
149 shp->shm_nattch--; 149 shp->shm_nattch--;
@@ -283,8 +283,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
283 err = -EEXIST; 283 err = -EEXIST;
284 } else { 284 } else {
285 shp = shm_lock(id); 285 shp = shm_lock(id);
286 if(shp==NULL) 286 BUG_ON(shp==NULL);
287 BUG();
288 if (shp->shm_segsz < size) 287 if (shp->shm_segsz < size)
289 err = -EINVAL; 288 err = -EINVAL;
290 else if (ipcperms(&shp->shm_perm, shmflg)) 289 else if (ipcperms(&shp->shm_perm, shmflg))
@@ -774,8 +773,8 @@ invalid:
774 up_write(&current->mm->mmap_sem); 773 up_write(&current->mm->mmap_sem);
775 774
776 mutex_lock(&shm_ids.mutex); 775 mutex_lock(&shm_ids.mutex);
777 if(!(shp = shm_lock(shmid))) 776 shp = shm_lock(shmid);
778 BUG(); 777 BUG_ON(!shp);
779 shp->shm_nattch--; 778 shp->shm_nattch--;
780 if(shp->shm_nattch == 0 && 779 if(shp->shm_nattch == 0 &&
781 shp->shm_perm.mode & SHM_DEST) 780 shp->shm_perm.mode & SHM_DEST)
diff --git a/ipc/util.c b/ipc/util.c
index 23151ef32590..5e785a29e1e6 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -266,8 +266,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
266{ 266{
267 struct kern_ipc_perm* p; 267 struct kern_ipc_perm* p;
268 int lid = id % SEQ_MULTIPLIER; 268 int lid = id % SEQ_MULTIPLIER;
269 if(lid >= ids->entries->size) 269 BUG_ON(lid >= ids->entries->size);
270 BUG();
271 270
272 /* 271 /*
273 * do not need a rcu_dereference()() here to force ordering 272 * do not need a rcu_dereference()() here to force ordering
@@ -275,8 +274,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
275 */ 274 */
276 p = ids->entries->p[lid]; 275 p = ids->entries->p[lid];
277 ids->entries->p[lid] = NULL; 276 ids->entries->p[lid] = NULL;
278 if(p==NULL) 277 BUG_ON(p==NULL);
279 BUG();
280 ids->in_use--; 278 ids->in_use--;
281 279
282 if (lid == ids->max_id) { 280 if (lid == ids->max_id) {
diff --git a/kernel/acct.c b/kernel/acct.c
index 065d8b4e51ef..b327f4d20104 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -449,8 +449,8 @@ static void do_acct_process(long exitcode, struct file *file)
449 /* calculate run_time in nsec*/ 449 /* calculate run_time in nsec*/
450 do_posix_clock_monotonic_gettime(&uptime); 450 do_posix_clock_monotonic_gettime(&uptime);
451 run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec; 451 run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec;
452 run_time -= (u64)current->start_time.tv_sec*NSEC_PER_SEC 452 run_time -= (u64)current->group_leader->start_time.tv_sec * NSEC_PER_SEC
453 + current->start_time.tv_nsec; 453 + current->group_leader->start_time.tv_nsec;
454 /* convert nsec -> AHZ */ 454 /* convert nsec -> AHZ */
455 elapsed = nsec_to_AHZ(run_time); 455 elapsed = nsec_to_AHZ(run_time);
456#if ACCT_VERSION==3 456#if ACCT_VERSION==3
@@ -469,10 +469,10 @@ static void do_acct_process(long exitcode, struct file *file)
469#endif 469#endif
470 do_div(elapsed, AHZ); 470 do_div(elapsed, AHZ);
471 ac.ac_btime = xtime.tv_sec - elapsed; 471 ac.ac_btime = xtime.tv_sec - elapsed;
472 jiffies = cputime_to_jiffies(cputime_add(current->group_leader->utime, 472 jiffies = cputime_to_jiffies(cputime_add(current->utime,
473 current->signal->utime)); 473 current->signal->utime));
474 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies)); 474 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies));
475 jiffies = cputime_to_jiffies(cputime_add(current->group_leader->stime, 475 jiffies = cputime_to_jiffies(cputime_add(current->stime,
476 current->signal->stime)); 476 current->signal->stime));
477 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies)); 477 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies));
478 /* we really need to bite the bullet and change layout */ 478 /* we really need to bite the bullet and change layout */
@@ -522,9 +522,9 @@ static void do_acct_process(long exitcode, struct file *file)
522 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */ 522 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */
523 ac.ac_rw = encode_comp_t(ac.ac_io / 1024); 523 ac.ac_rw = encode_comp_t(ac.ac_io / 1024);
524 ac.ac_minflt = encode_comp_t(current->signal->min_flt + 524 ac.ac_minflt = encode_comp_t(current->signal->min_flt +
525 current->group_leader->min_flt); 525 current->min_flt);
526 ac.ac_majflt = encode_comp_t(current->signal->maj_flt + 526 ac.ac_majflt = encode_comp_t(current->signal->maj_flt +
527 current->group_leader->maj_flt); 527 current->maj_flt);
528 ac.ac_swaps = encode_comp_t(0); 528 ac.ac_swaps = encode_comp_t(0);
529 ac.ac_exitcode = exitcode; 529 ac.ac_exitcode = exitcode;
530 530
diff --git a/kernel/audit.c b/kernel/audit.c
index 04fe2e301b61..c8ccbd09048f 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -578,7 +578,7 @@ static int __init audit_enable(char *str)
578 audit_initialized ? "" : " (after initialization)"); 578 audit_initialized ? "" : " (after initialization)");
579 if (audit_initialized) 579 if (audit_initialized)
580 audit_enabled = audit_default; 580 audit_enabled = audit_default;
581 return 0; 581 return 1;
582} 582}
583 583
584__setup("audit=", audit_enable); 584__setup("audit=", audit_enable);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 18aea1bd1284..72248d1b9e3f 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -616,12 +616,10 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
616 * current->cpuset if a task has its memory placement changed. 616 * current->cpuset if a task has its memory placement changed.
617 * Do not call this routine if in_interrupt(). 617 * Do not call this routine if in_interrupt().
618 * 618 *
619 * Call without callback_mutex or task_lock() held. May be called 619 * Call without callback_mutex or task_lock() held. May be
620 * with or without manage_mutex held. Doesn't need task_lock to guard 620 * called with or without manage_mutex held. Thanks in part to
621 * against another task changing a non-NULL cpuset pointer to NULL, 621 * 'the_top_cpuset_hack', the tasks cpuset pointer will never
622 * as that is only done by a task on itself, and if the current task 622 * be NULL. This routine also might acquire callback_mutex and
623 * is here, it is not simultaneously in the exit code NULL'ing its
624 * cpuset pointer. This routine also might acquire callback_mutex and
625 * current->mm->mmap_sem during call. 623 * current->mm->mmap_sem during call.
626 * 624 *
627 * Reading current->cpuset->mems_generation doesn't need task_lock 625 * Reading current->cpuset->mems_generation doesn't need task_lock
@@ -836,6 +834,55 @@ static int update_cpumask(struct cpuset *cs, char *buf)
836} 834}
837 835
838/* 836/*
837 * cpuset_migrate_mm
838 *
839 * Migrate memory region from one set of nodes to another.
840 *
841 * Temporarilly set tasks mems_allowed to target nodes of migration,
842 * so that the migration code can allocate pages on these nodes.
843 *
844 * Call holding manage_mutex, so our current->cpuset won't change
845 * during this call, as manage_mutex holds off any attach_task()
846 * calls. Therefore we don't need to take task_lock around the
847 * call to guarantee_online_mems(), as we know no one is changing
848 * our tasks cpuset.
849 *
850 * Hold callback_mutex around the two modifications of our tasks
851 * mems_allowed to synchronize with cpuset_mems_allowed().
852 *
853 * While the mm_struct we are migrating is typically from some
854 * other task, the task_struct mems_allowed that we are hacking
855 * is for our current task, which must allocate new pages for that
856 * migrating memory region.
857 *
858 * We call cpuset_update_task_memory_state() before hacking
859 * our tasks mems_allowed, so that we are assured of being in
860 * sync with our tasks cpuset, and in particular, callbacks to
861 * cpuset_update_task_memory_state() from nested page allocations
862 * won't see any mismatch of our cpuset and task mems_generation
863 * values, so won't overwrite our hacked tasks mems_allowed
864 * nodemask.
865 */
866
867static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
868 const nodemask_t *to)
869{
870 struct task_struct *tsk = current;
871
872 cpuset_update_task_memory_state();
873
874 mutex_lock(&callback_mutex);
875 tsk->mems_allowed = *to;
876 mutex_unlock(&callback_mutex);
877
878 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
879
880 mutex_lock(&callback_mutex);
881 guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed);
882 mutex_unlock(&callback_mutex);
883}
884
885/*
839 * Handle user request to change the 'mems' memory placement 886 * Handle user request to change the 'mems' memory placement
840 * of a cpuset. Needs to validate the request, update the 887 * of a cpuset. Needs to validate the request, update the
841 * cpusets mems_allowed and mems_generation, and for each 888 * cpusets mems_allowed and mems_generation, and for each
@@ -947,10 +994,8 @@ static int update_nodemask(struct cpuset *cs, char *buf)
947 struct mm_struct *mm = mmarray[i]; 994 struct mm_struct *mm = mmarray[i];
948 995
949 mpol_rebind_mm(mm, &cs->mems_allowed); 996 mpol_rebind_mm(mm, &cs->mems_allowed);
950 if (migrate) { 997 if (migrate)
951 do_migrate_pages(mm, &oldmem, &cs->mems_allowed, 998 cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed);
952 MPOL_MF_MOVE_ALL);
953 }
954 mmput(mm); 999 mmput(mm);
955 } 1000 }
956 1001
@@ -1185,11 +1230,11 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
1185 mm = get_task_mm(tsk); 1230 mm = get_task_mm(tsk);
1186 if (mm) { 1231 if (mm) {
1187 mpol_rebind_mm(mm, &to); 1232 mpol_rebind_mm(mm, &to);
1233 if (is_memory_migrate(cs))
1234 cpuset_migrate_mm(mm, &from, &to);
1188 mmput(mm); 1235 mmput(mm);
1189 } 1236 }
1190 1237
1191 if (is_memory_migrate(cs))
1192 do_migrate_pages(tsk->mm, &from, &to, MPOL_MF_MOVE_ALL);
1193 put_task_struct(tsk); 1238 put_task_struct(tsk);
1194 synchronize_rcu(); 1239 synchronize_rcu();
1195 if (atomic_dec_and_test(&oldcs->count)) 1240 if (atomic_dec_and_test(&oldcs->count))
diff --git a/kernel/exit.c b/kernel/exit.c
index bc0ec674d3f4..6c2eeb8f6390 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -127,6 +127,11 @@ static void __exit_signal(struct task_struct *tsk)
127 } 127 }
128} 128}
129 129
130static void delayed_put_task_struct(struct rcu_head *rhp)
131{
132 put_task_struct(container_of(rhp, struct task_struct, rcu));
133}
134
130void release_task(struct task_struct * p) 135void release_task(struct task_struct * p)
131{ 136{
132 int zap_leader; 137 int zap_leader;
@@ -168,7 +173,7 @@ repeat:
168 spin_unlock(&p->proc_lock); 173 spin_unlock(&p->proc_lock);
169 proc_pid_flush(proc_dentry); 174 proc_pid_flush(proc_dentry);
170 release_thread(p); 175 release_thread(p);
171 put_task_struct(p); 176 call_rcu(&p->rcu, delayed_put_task_struct);
172 177
173 p = leader; 178 p = leader;
174 if (unlikely(zap_leader)) 179 if (unlikely(zap_leader))
diff --git a/kernel/fork.c b/kernel/fork.c
index b3f7a1bb5e55..3384eb89cb1c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -108,10 +108,8 @@ void free_task(struct task_struct *tsk)
108} 108}
109EXPORT_SYMBOL(free_task); 109EXPORT_SYMBOL(free_task);
110 110
111void __put_task_struct_cb(struct rcu_head *rhp) 111void __put_task_struct(struct task_struct *tsk)
112{ 112{
113 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
114
115 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); 113 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
116 WARN_ON(atomic_read(&tsk->usage)); 114 WARN_ON(atomic_read(&tsk->usage));
117 WARN_ON(tsk == current); 115 WARN_ON(tsk == current);
@@ -126,6 +124,12 @@ void __put_task_struct_cb(struct rcu_head *rhp)
126 free_task(tsk); 124 free_task(tsk);
127} 125}
128 126
127void __put_task_struct_cb(struct rcu_head *rhp)
128{
129 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
130 __put_task_struct(tsk);
131}
132
129void __init fork_init(unsigned long mempages) 133void __init fork_init(unsigned long mempages)
130{ 134{
131#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 135#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -721,7 +725,7 @@ out_release:
721 free_fdset (new_fdt->open_fds, new_fdt->max_fdset); 725 free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
722 free_fd_array(new_fdt->fd, new_fdt->max_fds); 726 free_fd_array(new_fdt->fd, new_fdt->max_fds);
723 kmem_cache_free(files_cachep, newf); 727 kmem_cache_free(files_cachep, newf);
724 goto out; 728 return NULL;
725} 729}
726 730
727static int copy_files(unsigned long clone_flags, struct task_struct * tsk) 731static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
@@ -1311,17 +1315,19 @@ long do_fork(unsigned long clone_flags,
1311{ 1315{
1312 struct task_struct *p; 1316 struct task_struct *p;
1313 int trace = 0; 1317 int trace = 0;
1314 long pid = alloc_pidmap(); 1318 struct pid *pid = alloc_pid();
1319 long nr;
1315 1320
1316 if (pid < 0) 1321 if (!pid)
1317 return -EAGAIN; 1322 return -EAGAIN;
1323 nr = pid->nr;
1318 if (unlikely(current->ptrace)) { 1324 if (unlikely(current->ptrace)) {
1319 trace = fork_traceflag (clone_flags); 1325 trace = fork_traceflag (clone_flags);
1320 if (trace) 1326 if (trace)
1321 clone_flags |= CLONE_PTRACE; 1327 clone_flags |= CLONE_PTRACE;
1322 } 1328 }
1323 1329
1324 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid); 1330 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
1325 /* 1331 /*
1326 * Do this prior waking up the new thread - the thread pointer 1332 * Do this prior waking up the new thread - the thread pointer
1327 * might get invalid after that point, if the thread exits quickly. 1333 * might get invalid after that point, if the thread exits quickly.
@@ -1348,7 +1354,7 @@ long do_fork(unsigned long clone_flags,
1348 p->state = TASK_STOPPED; 1354 p->state = TASK_STOPPED;
1349 1355
1350 if (unlikely (trace)) { 1356 if (unlikely (trace)) {
1351 current->ptrace_message = pid; 1357 current->ptrace_message = nr;
1352 ptrace_notify ((trace << 8) | SIGTRAP); 1358 ptrace_notify ((trace << 8) | SIGTRAP);
1353 } 1359 }
1354 1360
@@ -1358,10 +1364,10 @@ long do_fork(unsigned long clone_flags,
1358 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); 1364 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1359 } 1365 }
1360 } else { 1366 } else {
1361 free_pidmap(pid); 1367 free_pid(pid);
1362 pid = PTR_ERR(p); 1368 nr = PTR_ERR(p);
1363 } 1369 }
1364 return pid; 1370 return nr;
1365} 1371}
1366 1372
1367#ifndef ARCH_MIN_MMSTRUCT_ALIGN 1373#ifndef ARCH_MIN_MMSTRUCT_ALIGN
diff --git a/kernel/futex.c b/kernel/futex.c
index 9c9b2b6b22dd..5699c512057b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1039,9 +1039,11 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
1039 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 1039 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
1040 int val2 = 0; 1040 int val2 = 0;
1041 1041
1042 if ((op == FUTEX_WAIT) && utime) { 1042 if (utime && (op == FUTEX_WAIT)) {
1043 if (copy_from_user(&t, utime, sizeof(t)) != 0) 1043 if (copy_from_user(&t, utime, sizeof(t)) != 0)
1044 return -EFAULT; 1044 return -EFAULT;
1045 if (!timespec_valid(&t))
1046 return -EINVAL;
1045 timeout = timespec_to_jiffies(&t) + 1; 1047 timeout = timespec_to_jiffies(&t) + 1;
1046 } 1048 }
1047 /* 1049 /*
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 54274fc85321..1ab6a0ea3d14 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -129,9 +129,11 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
129 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 129 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
130 int val2 = 0; 130 int val2 = 0;
131 131
132 if ((op == FUTEX_WAIT) && utime) { 132 if (utime && (op == FUTEX_WAIT)) {
133 if (get_compat_timespec(&t, utime)) 133 if (get_compat_timespec(&t, utime))
134 return -EFAULT; 134 return -EFAULT;
135 if (!timespec_valid(&t))
136 return -EINVAL;
135 timeout = timespec_to_jiffies(&t) + 1; 137 timeout = timespec_to_jiffies(&t) + 1;
136 } 138 }
137 if (op >= FUTEX_REQUEUE) 139 if (op >= FUTEX_REQUEUE)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 0237a556eb1f..f181ff4dd32e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -606,6 +606,9 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base)
606{ 606{
607 struct rb_node *node; 607 struct rb_node *node;
608 608
609 if (!base->first)
610 return;
611
609 if (base->get_softirq_time) 612 if (base->get_softirq_time)
610 base->softirq_time = base->get_softirq_time(); 613 base->softirq_time = base->get_softirq_time();
611 614
@@ -655,29 +658,28 @@ void hrtimer_run_queues(void)
655/* 658/*
656 * Sleep related functions: 659 * Sleep related functions:
657 */ 660 */
658 661static int hrtimer_wakeup(struct hrtimer *timer)
659struct sleep_hrtimer {
660 struct hrtimer timer;
661 struct task_struct *task;
662 int expired;
663};
664
665static int nanosleep_wakeup(struct hrtimer *timer)
666{ 662{
667 struct sleep_hrtimer *t = 663 struct hrtimer_sleeper *t =
668 container_of(timer, struct sleep_hrtimer, timer); 664 container_of(timer, struct hrtimer_sleeper, timer);
665 struct task_struct *task = t->task;
669 666
670 t->expired = 1; 667 t->task = NULL;
671 wake_up_process(t->task); 668 if (task)
669 wake_up_process(task);
672 670
673 return HRTIMER_NORESTART; 671 return HRTIMER_NORESTART;
674} 672}
675 673
676static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode) 674void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task)
677{ 675{
678 t->timer.function = nanosleep_wakeup; 676 sl->timer.function = hrtimer_wakeup;
679 t->task = current; 677 sl->task = task;
680 t->expired = 0; 678}
679
680static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
681{
682 hrtimer_init_sleeper(t, current);
681 683
682 do { 684 do {
683 set_current_state(TASK_INTERRUPTIBLE); 685 set_current_state(TASK_INTERRUPTIBLE);
@@ -685,18 +687,17 @@ static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode)
685 687
686 schedule(); 688 schedule();
687 689
688 if (unlikely(!t->expired)) { 690 hrtimer_cancel(&t->timer);
689 hrtimer_cancel(&t->timer); 691 mode = HRTIMER_ABS;
690 mode = HRTIMER_ABS; 692
691 } 693 } while (t->task && !signal_pending(current));
692 } while (!t->expired && !signal_pending(current));
693 694
694 return t->expired; 695 return t->task == NULL;
695} 696}
696 697
697static long __sched nanosleep_restart(struct restart_block *restart) 698static long __sched nanosleep_restart(struct restart_block *restart)
698{ 699{
699 struct sleep_hrtimer t; 700 struct hrtimer_sleeper t;
700 struct timespec __user *rmtp; 701 struct timespec __user *rmtp;
701 struct timespec tu; 702 struct timespec tu;
702 ktime_t time; 703 ktime_t time;
@@ -729,7 +730,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
729 const enum hrtimer_mode mode, const clockid_t clockid) 730 const enum hrtimer_mode mode, const clockid_t clockid)
730{ 731{
731 struct restart_block *restart; 732 struct restart_block *restart;
732 struct sleep_hrtimer t; 733 struct hrtimer_sleeper t;
733 struct timespec tu; 734 struct timespec tu;
734 ktime_t rem; 735 ktime_t rem;
735 736
diff --git a/kernel/module.c b/kernel/module.c
index bd088a7c1499..d24deb0dbbc9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1254,6 +1254,7 @@ static inline int license_is_gpl_compatible(const char *license)
1254 || strcmp(license, "GPL v2") == 0 1254 || strcmp(license, "GPL v2") == 0
1255 || strcmp(license, "GPL and additional rights") == 0 1255 || strcmp(license, "GPL and additional rights") == 0
1256 || strcmp(license, "Dual BSD/GPL") == 0 1256 || strcmp(license, "Dual BSD/GPL") == 0
1257 || strcmp(license, "Dual MIT/GPL") == 0
1257 || strcmp(license, "Dual MPL/GPL") == 0); 1258 || strcmp(license, "Dual MPL/GPL") == 0);
1258} 1259}
1259 1260
diff --git a/kernel/pid.c b/kernel/pid.c
index a9f2dfd006d2..eeb836b65ca4 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -28,8 +28,9 @@
28#include <linux/hash.h> 28#include <linux/hash.h>
29 29
30#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) 30#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
31static struct hlist_head *pid_hash[PIDTYPE_MAX]; 31static struct hlist_head *pid_hash;
32static int pidhash_shift; 32static int pidhash_shift;
33static kmem_cache_t *pid_cachep;
33 34
34int pid_max = PID_MAX_DEFAULT; 35int pid_max = PID_MAX_DEFAULT;
35int last_pid; 36int last_pid;
@@ -60,9 +61,22 @@ typedef struct pidmap {
60static pidmap_t pidmap_array[PIDMAP_ENTRIES] = 61static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
61 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }; 62 { [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
62 63
64/*
65 * Note: disable interrupts while the pidmap_lock is held as an
66 * interrupt might come in and do read_lock(&tasklist_lock).
67 *
68 * If we don't disable interrupts there is a nasty deadlock between
69 * detach_pid()->free_pid() and another cpu that does
70 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
71 * read_lock(&tasklist_lock);
72 *
73 * After we clean up the tasklist_lock and know there are no
74 * irq handlers that take it we can leave the interrupts enabled.
75 * For now it is easier to be safe than to prove it can't happen.
76 */
63static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 77static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
64 78
65fastcall void free_pidmap(int pid) 79static fastcall void free_pidmap(int pid)
66{ 80{
67 pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; 81 pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
68 int offset = pid & BITS_PER_PAGE_MASK; 82 int offset = pid & BITS_PER_PAGE_MASK;
@@ -71,7 +85,7 @@ fastcall void free_pidmap(int pid)
71 atomic_inc(&map->nr_free); 85 atomic_inc(&map->nr_free);
72} 86}
73 87
74int alloc_pidmap(void) 88static int alloc_pidmap(void)
75{ 89{
76 int i, offset, max_scan, pid, last = last_pid; 90 int i, offset, max_scan, pid, last = last_pid;
77 pidmap_t *map; 91 pidmap_t *map;
@@ -89,12 +103,12 @@ int alloc_pidmap(void)
89 * Free the page if someone raced with us 103 * Free the page if someone raced with us
90 * installing it: 104 * installing it:
91 */ 105 */
92 spin_lock(&pidmap_lock); 106 spin_lock_irq(&pidmap_lock);
93 if (map->page) 107 if (map->page)
94 free_page(page); 108 free_page(page);
95 else 109 else
96 map->page = (void *)page; 110 map->page = (void *)page;
97 spin_unlock(&pidmap_lock); 111 spin_unlock_irq(&pidmap_lock);
98 if (unlikely(!map->page)) 112 if (unlikely(!map->page))
99 break; 113 break;
100 } 114 }
@@ -131,13 +145,73 @@ int alloc_pidmap(void)
131 return -1; 145 return -1;
132} 146}
133 147
134struct pid * fastcall find_pid(enum pid_type type, int nr) 148fastcall void put_pid(struct pid *pid)
149{
150 if (!pid)
151 return;
152 if ((atomic_read(&pid->count) == 1) ||
153 atomic_dec_and_test(&pid->count))
154 kmem_cache_free(pid_cachep, pid);
155}
156
157static void delayed_put_pid(struct rcu_head *rhp)
158{
159 struct pid *pid = container_of(rhp, struct pid, rcu);
160 put_pid(pid);
161}
162
163fastcall void free_pid(struct pid *pid)
164{
165 /* We can be called with write_lock_irq(&tasklist_lock) held */
166 unsigned long flags;
167
168 spin_lock_irqsave(&pidmap_lock, flags);
169 hlist_del_rcu(&pid->pid_chain);
170 spin_unlock_irqrestore(&pidmap_lock, flags);
171
172 free_pidmap(pid->nr);
173 call_rcu(&pid->rcu, delayed_put_pid);
174}
175
176struct pid *alloc_pid(void)
177{
178 struct pid *pid;
179 enum pid_type type;
180 int nr = -1;
181
182 pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
183 if (!pid)
184 goto out;
185
186 nr = alloc_pidmap();
187 if (nr < 0)
188 goto out_free;
189
190 atomic_set(&pid->count, 1);
191 pid->nr = nr;
192 for (type = 0; type < PIDTYPE_MAX; ++type)
193 INIT_HLIST_HEAD(&pid->tasks[type]);
194
195 spin_lock_irq(&pidmap_lock);
196 hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
197 spin_unlock_irq(&pidmap_lock);
198
199out:
200 return pid;
201
202out_free:
203 kmem_cache_free(pid_cachep, pid);
204 pid = NULL;
205 goto out;
206}
207
208struct pid * fastcall find_pid(int nr)
135{ 209{
136 struct hlist_node *elem; 210 struct hlist_node *elem;
137 struct pid *pid; 211 struct pid *pid;
138 212
139 hlist_for_each_entry_rcu(pid, elem, 213 hlist_for_each_entry_rcu(pid, elem,
140 &pid_hash[type][pid_hashfn(nr)], pid_chain) { 214 &pid_hash[pid_hashfn(nr)], pid_chain) {
141 if (pid->nr == nr) 215 if (pid->nr == nr)
142 return pid; 216 return pid;
143 } 217 }
@@ -146,77 +220,82 @@ struct pid * fastcall find_pid(enum pid_type type, int nr)
146 220
147int fastcall attach_pid(task_t *task, enum pid_type type, int nr) 221int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
148{ 222{
149 struct pid *pid, *task_pid; 223 struct pid_link *link;
150 224 struct pid *pid;
151 task_pid = &task->pids[type]; 225
152 pid = find_pid(type, nr); 226 WARN_ON(!task->pid); /* to be removed soon */
153 task_pid->nr = nr; 227 WARN_ON(!nr); /* to be removed soon */
154 if (pid == NULL) { 228
155 INIT_LIST_HEAD(&task_pid->pid_list); 229 link = &task->pids[type];
156 hlist_add_head_rcu(&task_pid->pid_chain, 230 link->pid = pid = find_pid(nr);
157 &pid_hash[type][pid_hashfn(nr)]); 231 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
158 } else {
159 INIT_HLIST_NODE(&task_pid->pid_chain);
160 list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list);
161 }
162 232
163 return 0; 233 return 0;
164} 234}
165 235
166static fastcall int __detach_pid(task_t *task, enum pid_type type) 236void fastcall detach_pid(task_t *task, enum pid_type type)
167{ 237{
168 struct pid *pid, *pid_next; 238 struct pid_link *link;
169 int nr = 0; 239 struct pid *pid;
240 int tmp;
170 241
171 pid = &task->pids[type]; 242 link = &task->pids[type];
172 if (!hlist_unhashed(&pid->pid_chain)) { 243 pid = link->pid;
173 244
174 if (list_empty(&pid->pid_list)) { 245 hlist_del_rcu(&link->node);
175 nr = pid->nr; 246 link->pid = NULL;
176 hlist_del_rcu(&pid->pid_chain);
177 } else {
178 pid_next = list_entry(pid->pid_list.next,
179 struct pid, pid_list);
180 /* insert next pid from pid_list to hash */
181 hlist_replace_rcu(&pid->pid_chain,
182 &pid_next->pid_chain);
183 }
184 }
185 247
186 list_del_rcu(&pid->pid_list); 248 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
187 pid->nr = 0; 249 if (!hlist_empty(&pid->tasks[tmp]))
250 return;
188 251
189 return nr; 252 free_pid(pid);
190} 253}
191 254
192void fastcall detach_pid(task_t *task, enum pid_type type) 255struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
193{ 256{
194 int tmp, nr; 257 struct task_struct *result = NULL;
258 if (pid) {
259 struct hlist_node *first;
260 first = rcu_dereference(pid->tasks[type].first);
261 if (first)
262 result = hlist_entry(first, struct task_struct, pids[(type)].node);
263 }
264 return result;
265}
195 266
196 nr = __detach_pid(task, type); 267/*
197 if (!nr) 268 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
198 return; 269 */
270task_t *find_task_by_pid_type(int type, int nr)
271{
272 return pid_task(find_pid(nr), type);
273}
199 274
200 for (tmp = PIDTYPE_MAX; --tmp >= 0; ) 275EXPORT_SYMBOL(find_task_by_pid_type);
201 if (tmp != type && find_pid(tmp, nr))
202 return;
203 276
204 free_pidmap(nr); 277struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
278{
279 struct task_struct *result;
280 rcu_read_lock();
281 result = pid_task(pid, type);
282 if (result)
283 get_task_struct(result);
284 rcu_read_unlock();
285 return result;
205} 286}
206 287
207task_t *find_task_by_pid_type(int type, int nr) 288struct pid *find_get_pid(pid_t nr)
208{ 289{
209 struct pid *pid; 290 struct pid *pid;
210 291
211 pid = find_pid(type, nr); 292 rcu_read_lock();
212 if (!pid) 293 pid = get_pid(find_pid(nr));
213 return NULL; 294 rcu_read_unlock();
214 295
215 return pid_task(&pid->pid_list, type); 296 return pid;
216} 297}
217 298
218EXPORT_SYMBOL(find_task_by_pid_type);
219
220/* 299/*
221 * The pid hash table is scaled according to the amount of memory in the 300 * The pid hash table is scaled according to the amount of memory in the
222 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or 301 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
@@ -224,7 +303,7 @@ EXPORT_SYMBOL(find_task_by_pid_type);
224 */ 303 */
225void __init pidhash_init(void) 304void __init pidhash_init(void)
226{ 305{
227 int i, j, pidhash_size; 306 int i, pidhash_size;
228 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); 307 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
229 308
230 pidhash_shift = max(4, fls(megabytes * 4)); 309 pidhash_shift = max(4, fls(megabytes * 4));
@@ -233,16 +312,13 @@ void __init pidhash_init(void)
233 312
234 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", 313 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
235 pidhash_size, pidhash_shift, 314 pidhash_size, pidhash_shift,
236 PIDTYPE_MAX * pidhash_size * sizeof(struct hlist_head)); 315 pidhash_size * sizeof(struct hlist_head));
237 316
238 for (i = 0; i < PIDTYPE_MAX; i++) { 317 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
239 pid_hash[i] = alloc_bootmem(pidhash_size * 318 if (!pid_hash)
240 sizeof(*(pid_hash[i]))); 319 panic("Could not alloc pidhash!\n");
241 if (!pid_hash[i]) 320 for (i = 0; i < pidhash_size; i++)
242 panic("Could not alloc pidhash!\n"); 321 INIT_HLIST_HEAD(&pid_hash[i]);
243 for (j = 0; j < pidhash_size; j++)
244 INIT_HLIST_HEAD(&pid_hash[i][j]);
245 }
246} 322}
247 323
248void __init pidmap_init(void) 324void __init pidmap_init(void)
@@ -251,4 +327,8 @@ void __init pidmap_init(void)
251 /* Reserve PID 0. We never call free_pidmap(0) */ 327 /* Reserve PID 0. We never call free_pidmap(0) */
252 set_bit(0, pidmap_array->page); 328 set_bit(0, pidmap_array->page);
253 atomic_dec(&pidmap_array->nr_free); 329 atomic_dec(&pidmap_array->nr_free);
330
331 pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
332 __alignof__(struct pid),
333 SLAB_PANIC, NULL, NULL);
254} 334}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9fd8d4f03595..ce0dfb8f4a4e 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -41,7 +41,7 @@ config SOFTWARE_SUSPEND
41 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) 41 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
42 ---help--- 42 ---help---
43 Enable the possibility of suspending the machine. 43 Enable the possibility of suspending the machine.
44 It doesn't need APM. 44 It doesn't need ACPI or APM.
45 You may suspend your machine by 'swsusp' or 'shutdown -z <time>' 45 You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
46 (patch for sysvinit needed). 46 (patch for sysvinit needed).
47 47
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 8ac7c35fad77..b2a5f671d6cd 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -26,8 +26,7 @@ static inline int freezeable(struct task_struct * p)
26 (p->flags & PF_NOFREEZE) || 26 (p->flags & PF_NOFREEZE) ||
27 (p->exit_state == EXIT_ZOMBIE) || 27 (p->exit_state == EXIT_ZOMBIE) ||
28 (p->exit_state == EXIT_DEAD) || 28 (p->exit_state == EXIT_DEAD) ||
29 (p->state == TASK_STOPPED) || 29 (p->state == TASK_STOPPED))
30 (p->state == TASK_TRACED))
31 return 0; 30 return 0;
32 return 1; 31 return 1;
33} 32}
diff --git a/kernel/printk.c b/kernel/printk.c
index 891f7a714037..a33f342b31b7 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -360,8 +360,7 @@ static void call_console_drivers(unsigned long start, unsigned long end)
360 unsigned long cur_index, start_print; 360 unsigned long cur_index, start_print;
361 static int msg_level = -1; 361 static int msg_level = -1;
362 362
363 if (((long)(start - end)) > 0) 363 BUG_ON(((long)(start - end)) > 0);
364 BUG();
365 364
366 cur_index = start; 365 cur_index = start;
367 start_print = start; 366 start_print = start;
@@ -708,8 +707,7 @@ int __init add_preferred_console(char *name, int idx, char *options)
708 */ 707 */
709void acquire_console_sem(void) 708void acquire_console_sem(void)
710{ 709{
711 if (in_interrupt()) 710 BUG_ON(in_interrupt());
712 BUG();
713 down(&console_sem); 711 down(&console_sem);
714 console_locked = 1; 712 console_locked = 1;
715 console_may_schedule = 1; 713 console_may_schedule = 1;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 86a7f6c60cb2..0eeb7e66722c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -30,8 +30,7 @@
30 */ 30 */
31void __ptrace_link(task_t *child, task_t *new_parent) 31void __ptrace_link(task_t *child, task_t *new_parent)
32{ 32{
33 if (!list_empty(&child->ptrace_list)) 33 BUG_ON(!list_empty(&child->ptrace_list));
34 BUG();
35 if (child->parent == new_parent) 34 if (child->parent == new_parent)
36 return; 35 return;
37 list_add(&child->ptrace_list, &child->parent->ptrace_children); 36 list_add(&child->ptrace_list, &child->parent->ptrace_children);
diff --git a/kernel/sched.c b/kernel/sched.c
index a9ecac398bb9..dd153d6f8a04 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -667,9 +667,13 @@ static int effective_prio(task_t *p)
667/* 667/*
668 * __activate_task - move a task to the runqueue. 668 * __activate_task - move a task to the runqueue.
669 */ 669 */
670static inline void __activate_task(task_t *p, runqueue_t *rq) 670static void __activate_task(task_t *p, runqueue_t *rq)
671{ 671{
672 enqueue_task(p, rq->active); 672 prio_array_t *target = rq->active;
673
674 if (batch_task(p))
675 target = rq->expired;
676 enqueue_task(p, target);
673 rq->nr_running++; 677 rq->nr_running++;
674} 678}
675 679
@@ -688,7 +692,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
688 unsigned long long __sleep_time = now - p->timestamp; 692 unsigned long long __sleep_time = now - p->timestamp;
689 unsigned long sleep_time; 693 unsigned long sleep_time;
690 694
691 if (unlikely(p->policy == SCHED_BATCH)) 695 if (batch_task(p))
692 sleep_time = 0; 696 sleep_time = 0;
693 else { 697 else {
694 if (__sleep_time > NS_MAX_SLEEP_AVG) 698 if (__sleep_time > NS_MAX_SLEEP_AVG)
@@ -700,21 +704,25 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
700 if (likely(sleep_time > 0)) { 704 if (likely(sleep_time > 0)) {
701 /* 705 /*
702 * User tasks that sleep a long time are categorised as 706 * User tasks that sleep a long time are categorised as
703 * idle and will get just interactive status to stay active & 707 * idle. They will only have their sleep_avg increased to a
704 * prevent them suddenly becoming cpu hogs and starving 708 * level that makes them just interactive priority to stay
705 * other processes. 709 * active yet prevent them suddenly becoming cpu hogs and
710 * starving other processes.
706 */ 711 */
707 if (p->mm && p->activated != -1 && 712 if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
708 sleep_time > INTERACTIVE_SLEEP(p)) { 713 unsigned long ceiling;
709 p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG - 714
710 DEF_TIMESLICE); 715 ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
716 DEF_TIMESLICE);
717 if (p->sleep_avg < ceiling)
718 p->sleep_avg = ceiling;
711 } else { 719 } else {
712 /* 720 /*
713 * Tasks waking from uninterruptible sleep are 721 * Tasks waking from uninterruptible sleep are
714 * limited in their sleep_avg rise as they 722 * limited in their sleep_avg rise as they
715 * are likely to be waiting on I/O 723 * are likely to be waiting on I/O
716 */ 724 */
717 if (p->activated == -1 && p->mm) { 725 if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
718 if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) 726 if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
719 sleep_time = 0; 727 sleep_time = 0;
720 else if (p->sleep_avg + sleep_time >= 728 else if (p->sleep_avg + sleep_time >=
@@ -769,7 +777,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
769 * This checks to make sure it's not an uninterruptible task 777 * This checks to make sure it's not an uninterruptible task
770 * that is now waking up. 778 * that is now waking up.
771 */ 779 */
772 if (!p->activated) { 780 if (p->sleep_type == SLEEP_NORMAL) {
773 /* 781 /*
774 * Tasks which were woken up by interrupts (ie. hw events) 782 * Tasks which were woken up by interrupts (ie. hw events)
775 * are most likely of interactive nature. So we give them 783 * are most likely of interactive nature. So we give them
@@ -778,13 +786,13 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
778 * on a CPU, first time around: 786 * on a CPU, first time around:
779 */ 787 */
780 if (in_interrupt()) 788 if (in_interrupt())
781 p->activated = 2; 789 p->sleep_type = SLEEP_INTERRUPTED;
782 else { 790 else {
783 /* 791 /*
784 * Normal first-time wakeups get a credit too for 792 * Normal first-time wakeups get a credit too for
785 * on-runqueue time, but it will be weighted down: 793 * on-runqueue time, but it will be weighted down:
786 */ 794 */
787 p->activated = 1; 795 p->sleep_type = SLEEP_INTERACTIVE;
788 } 796 }
789 } 797 }
790 p->timestamp = now; 798 p->timestamp = now;
@@ -1272,19 +1280,19 @@ out_activate:
1272 * Tasks on involuntary sleep don't earn 1280 * Tasks on involuntary sleep don't earn
1273 * sleep_avg beyond just interactive state. 1281 * sleep_avg beyond just interactive state.
1274 */ 1282 */
1275 p->activated = -1; 1283 p->sleep_type = SLEEP_NONINTERACTIVE;
1276 } 1284 } else
1277 1285
1278 /* 1286 /*
1279 * Tasks that have marked their sleep as noninteractive get 1287 * Tasks that have marked their sleep as noninteractive get
1280 * woken up without updating their sleep average. (i.e. their 1288 * woken up with their sleep average not weighted in an
1281 * sleep is handled in a priority-neutral manner, no priority 1289 * interactive way.
1282 * boost and no penalty.)
1283 */ 1290 */
1284 if (old_state & TASK_NONINTERACTIVE) 1291 if (old_state & TASK_NONINTERACTIVE)
1285 __activate_task(p, rq); 1292 p->sleep_type = SLEEP_NONINTERACTIVE;
1286 else 1293
1287 activate_task(p, rq, cpu == this_cpu); 1294
1295 activate_task(p, rq, cpu == this_cpu);
1288 /* 1296 /*
1289 * Sync wakeups (i.e. those types of wakeups where the waker 1297 * Sync wakeups (i.e. those types of wakeups where the waker
1290 * has indicated that it will leave the CPU in short order) 1298 * has indicated that it will leave the CPU in short order)
@@ -1658,6 +1666,21 @@ unsigned long nr_iowait(void)
1658 return sum; 1666 return sum;
1659} 1667}
1660 1668
1669unsigned long nr_active(void)
1670{
1671 unsigned long i, running = 0, uninterruptible = 0;
1672
1673 for_each_online_cpu(i) {
1674 running += cpu_rq(i)->nr_running;
1675 uninterruptible += cpu_rq(i)->nr_uninterruptible;
1676 }
1677
1678 if (unlikely((long)uninterruptible < 0))
1679 uninterruptible = 0;
1680
1681 return running + uninterruptible;
1682}
1683
1661#ifdef CONFIG_SMP 1684#ifdef CONFIG_SMP
1662 1685
1663/* 1686/*
@@ -2860,6 +2883,12 @@ EXPORT_SYMBOL(sub_preempt_count);
2860 2883
2861#endif 2884#endif
2862 2885
2886static inline int interactive_sleep(enum sleep_type sleep_type)
2887{
2888 return (sleep_type == SLEEP_INTERACTIVE ||
2889 sleep_type == SLEEP_INTERRUPTED);
2890}
2891
2863/* 2892/*
2864 * schedule() is the main scheduler function. 2893 * schedule() is the main scheduler function.
2865 */ 2894 */
@@ -2983,12 +3012,12 @@ go_idle:
2983 queue = array->queue + idx; 3012 queue = array->queue + idx;
2984 next = list_entry(queue->next, task_t, run_list); 3013 next = list_entry(queue->next, task_t, run_list);
2985 3014
2986 if (!rt_task(next) && next->activated > 0) { 3015 if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
2987 unsigned long long delta = now - next->timestamp; 3016 unsigned long long delta = now - next->timestamp;
2988 if (unlikely((long long)(now - next->timestamp) < 0)) 3017 if (unlikely((long long)(now - next->timestamp) < 0))
2989 delta = 0; 3018 delta = 0;
2990 3019
2991 if (next->activated == 1) 3020 if (next->sleep_type == SLEEP_INTERACTIVE)
2992 delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; 3021 delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
2993 3022
2994 array = next->array; 3023 array = next->array;
@@ -2998,10 +3027,9 @@ go_idle:
2998 dequeue_task(next, array); 3027 dequeue_task(next, array);
2999 next->prio = new_prio; 3028 next->prio = new_prio;
3000 enqueue_task(next, array); 3029 enqueue_task(next, array);
3001 } else 3030 }
3002 requeue_task(next, array);
3003 } 3031 }
3004 next->activated = 0; 3032 next->sleep_type = SLEEP_NORMAL;
3005switch_tasks: 3033switch_tasks:
3006 if (next == rq->idle) 3034 if (next == rq->idle)
3007 schedstat_inc(rq, sched_goidle); 3035 schedstat_inc(rq, sched_goidle);
diff --git a/kernel/signal.c b/kernel/signal.c
index 4922928d91f6..5ccaac505e8d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -769,8 +769,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
769{ 769{
770 int ret = 0; 770 int ret = 0;
771 771
772 if (!irqs_disabled()) 772 BUG_ON(!irqs_disabled());
773 BUG();
774 assert_spin_locked(&t->sighand->siglock); 773 assert_spin_locked(&t->sighand->siglock);
775 774
776 /* Short-circuit ignored signals. */ 775 /* Short-circuit ignored signals. */
@@ -1384,8 +1383,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1384 * the overrun count. Other uses should not try to 1383 * the overrun count. Other uses should not try to
1385 * send the signal multiple times. 1384 * send the signal multiple times.
1386 */ 1385 */
1387 if (q->info.si_code != SI_TIMER) 1386 BUG_ON(q->info.si_code != SI_TIMER);
1388 BUG();
1389 q->info.si_overrun++; 1387 q->info.si_overrun++;
1390 goto out; 1388 goto out;
1391 } 1389 }
@@ -1560,6 +1558,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1560 /* Let the debugger run. */ 1558 /* Let the debugger run. */
1561 set_current_state(TASK_TRACED); 1559 set_current_state(TASK_TRACED);
1562 spin_unlock_irq(&current->sighand->siglock); 1560 spin_unlock_irq(&current->sighand->siglock);
1561 try_to_freeze();
1563 read_lock(&tasklist_lock); 1562 read_lock(&tasklist_lock);
1564 if (likely(current->ptrace & PT_PTRACED) && 1563 if (likely(current->ptrace & PT_PTRACED) &&
1565 likely(current->parent != current->real_parent || 1564 likely(current->parent != current->real_parent ||
diff --git a/kernel/sys.c b/kernel/sys.c
index 7ef7f6054c28..0b6ec0e7936f 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1372,18 +1372,29 @@ asmlinkage long sys_getsid(pid_t pid)
1372asmlinkage long sys_setsid(void) 1372asmlinkage long sys_setsid(void)
1373{ 1373{
1374 struct task_struct *group_leader = current->group_leader; 1374 struct task_struct *group_leader = current->group_leader;
1375 struct pid *pid; 1375 pid_t session;
1376 int err = -EPERM; 1376 int err = -EPERM;
1377 1377
1378 mutex_lock(&tty_mutex); 1378 mutex_lock(&tty_mutex);
1379 write_lock_irq(&tasklist_lock); 1379 write_lock_irq(&tasklist_lock);
1380 1380
1381 pid = find_pid(PIDTYPE_PGID, group_leader->pid); 1381 /* Fail if I am already a session leader */
1382 if (pid) 1382 if (group_leader->signal->leader)
1383 goto out;
1384
1385 session = group_leader->pid;
1386 /* Fail if a process group id already exists that equals the
1387 * proposed session id.
1388 *
1389 * Don't check if session id == 1 because kernel threads use this
1390 * session id and so the check will always fail and make it so
1391 * init cannot successfully call setsid.
1392 */
1393 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1383 goto out; 1394 goto out;
1384 1395
1385 group_leader->signal->leader = 1; 1396 group_leader->signal->leader = 1;
1386 __set_special_pids(group_leader->pid, group_leader->pid); 1397 __set_special_pids(session, session);
1387 group_leader->signal->tty = NULL; 1398 group_leader->signal->tty = NULL;
1388 group_leader->signal->tty_old_pgrp = 0; 1399 group_leader->signal->tty_old_pgrp = 0;
1389 err = process_group(group_leader); 1400 err = process_group(group_leader);
diff --git a/kernel/time.c b/kernel/time.c
index ff8e7019c4c4..b00ddc71cedb 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(current_kernel_time);
410 * current_fs_time - Return FS time 410 * current_fs_time - Return FS time
411 * @sb: Superblock. 411 * @sb: Superblock.
412 * 412 *
413 * Return the current time truncated to the time granuality supported by 413 * Return the current time truncated to the time granularity supported by
414 * the fs. 414 * the fs.
415 */ 415 */
416struct timespec current_fs_time(struct super_block *sb) 416struct timespec current_fs_time(struct super_block *sb)
@@ -421,11 +421,11 @@ struct timespec current_fs_time(struct super_block *sb)
421EXPORT_SYMBOL(current_fs_time); 421EXPORT_SYMBOL(current_fs_time);
422 422
423/** 423/**
424 * timespec_trunc - Truncate timespec to a granuality 424 * timespec_trunc - Truncate timespec to a granularity
425 * @t: Timespec 425 * @t: Timespec
426 * @gran: Granuality in ns. 426 * @gran: Granularity in ns.
427 * 427 *
428 * Truncate a timespec to a granuality. gran must be smaller than a second. 428 * Truncate a timespec to a granularity. gran must be smaller than a second.
429 * Always rounds down. 429 * Always rounds down.
430 * 430 *
431 * This function should be only used for timestamps returned by 431 * This function should be only used for timestamps returned by
diff --git a/kernel/timer.c b/kernel/timer.c
index ab189dd187cb..c3a874f1393c 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(jiffies_64);
54/* 54/*
55 * per-CPU timer vector definitions: 55 * per-CPU timer vector definitions:
56 */ 56 */
57
58#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) 57#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
59#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) 58#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
60#define TVN_SIZE (1 << TVN_BITS) 59#define TVN_SIZE (1 << TVN_BITS)
@@ -62,11 +61,6 @@ EXPORT_SYMBOL(jiffies_64);
62#define TVN_MASK (TVN_SIZE - 1) 61#define TVN_MASK (TVN_SIZE - 1)
63#define TVR_MASK (TVR_SIZE - 1) 62#define TVR_MASK (TVR_SIZE - 1)
64 63
65struct timer_base_s {
66 spinlock_t lock;
67 struct timer_list *running_timer;
68};
69
70typedef struct tvec_s { 64typedef struct tvec_s {
71 struct list_head vec[TVN_SIZE]; 65 struct list_head vec[TVN_SIZE];
72} tvec_t; 66} tvec_t;
@@ -76,7 +70,8 @@ typedef struct tvec_root_s {
76} tvec_root_t; 70} tvec_root_t;
77 71
78struct tvec_t_base_s { 72struct tvec_t_base_s {
79 struct timer_base_s t_base; 73 spinlock_t lock;
74 struct timer_list *running_timer;
80 unsigned long timer_jiffies; 75 unsigned long timer_jiffies;
81 tvec_root_t tv1; 76 tvec_root_t tv1;
82 tvec_t tv2; 77 tvec_t tv2;
@@ -87,13 +82,14 @@ struct tvec_t_base_s {
87 82
88typedef struct tvec_t_base_s tvec_base_t; 83typedef struct tvec_t_base_s tvec_base_t;
89static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); 84static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
90static tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases);
91 87
92static inline void set_running_timer(tvec_base_t *base, 88static inline void set_running_timer(tvec_base_t *base,
93 struct timer_list *timer) 89 struct timer_list *timer)
94{ 90{
95#ifdef CONFIG_SMP 91#ifdef CONFIG_SMP
96 base->t_base.running_timer = timer; 92 base->running_timer = timer;
97#endif 93#endif
98} 94}
99 95
@@ -139,15 +135,6 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
139 list_add_tail(&timer->entry, vec); 135 list_add_tail(&timer->entry, vec);
140} 136}
141 137
142typedef struct timer_base_s timer_base_t;
143/*
144 * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
145 * at compile time, and we need timer->base to lock the timer.
146 */
147timer_base_t __init_timer_base
148 ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
149EXPORT_SYMBOL(__init_timer_base);
150
151/*** 138/***
152 * init_timer - initialize a timer. 139 * init_timer - initialize a timer.
153 * @timer: the timer to be initialized 140 * @timer: the timer to be initialized
@@ -158,7 +145,7 @@ EXPORT_SYMBOL(__init_timer_base);
158void fastcall init_timer(struct timer_list *timer) 145void fastcall init_timer(struct timer_list *timer)
159{ 146{
160 timer->entry.next = NULL; 147 timer->entry.next = NULL;
161 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base; 148 timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
162} 149}
163EXPORT_SYMBOL(init_timer); 150EXPORT_SYMBOL(init_timer);
164 151
@@ -174,7 +161,7 @@ static inline void detach_timer(struct timer_list *timer,
174} 161}
175 162
176/* 163/*
177 * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock 164 * We are using hashed locking: holding per_cpu(tvec_bases).lock
178 * means that all timers which are tied to this base via timer->base are 165 * means that all timers which are tied to this base via timer->base are
179 * locked, and the base itself is locked too. 166 * locked, and the base itself is locked too.
180 * 167 *
@@ -185,10 +172,10 @@ static inline void detach_timer(struct timer_list *timer,
185 * possible to set timer->base = NULL and drop the lock: the timer remains 172 * possible to set timer->base = NULL and drop the lock: the timer remains
186 * locked. 173 * locked.
187 */ 174 */
188static timer_base_t *lock_timer_base(struct timer_list *timer, 175static tvec_base_t *lock_timer_base(struct timer_list *timer,
189 unsigned long *flags) 176 unsigned long *flags)
190{ 177{
191 timer_base_t *base; 178 tvec_base_t *base;
192 179
193 for (;;) { 180 for (;;) {
194 base = timer->base; 181 base = timer->base;
@@ -205,8 +192,7 @@ static timer_base_t *lock_timer_base(struct timer_list *timer,
205 192
206int __mod_timer(struct timer_list *timer, unsigned long expires) 193int __mod_timer(struct timer_list *timer, unsigned long expires)
207{ 194{
208 timer_base_t *base; 195 tvec_base_t *base, *new_base;
209 tvec_base_t *new_base;
210 unsigned long flags; 196 unsigned long flags;
211 int ret = 0; 197 int ret = 0;
212 198
@@ -221,7 +207,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
221 207
222 new_base = __get_cpu_var(tvec_bases); 208 new_base = __get_cpu_var(tvec_bases);
223 209
224 if (base != &new_base->t_base) { 210 if (base != new_base) {
225 /* 211 /*
226 * We are trying to schedule the timer on the local CPU. 212 * We are trying to schedule the timer on the local CPU.
227 * However we can't change timer's base while it is running, 213 * However we can't change timer's base while it is running,
@@ -229,21 +215,19 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
229 * handler yet has not finished. This also guarantees that 215 * handler yet has not finished. This also guarantees that
230 * the timer is serialized wrt itself. 216 * the timer is serialized wrt itself.
231 */ 217 */
232 if (unlikely(base->running_timer == timer)) { 218 if (likely(base->running_timer != timer)) {
233 /* The timer remains on a former base */
234 new_base = container_of(base, tvec_base_t, t_base);
235 } else {
236 /* See the comment in lock_timer_base() */ 219 /* See the comment in lock_timer_base() */
237 timer->base = NULL; 220 timer->base = NULL;
238 spin_unlock(&base->lock); 221 spin_unlock(&base->lock);
239 spin_lock(&new_base->t_base.lock); 222 base = new_base;
240 timer->base = &new_base->t_base; 223 spin_lock(&base->lock);
224 timer->base = base;
241 } 225 }
242 } 226 }
243 227
244 timer->expires = expires; 228 timer->expires = expires;
245 internal_add_timer(new_base, timer); 229 internal_add_timer(base, timer);
246 spin_unlock_irqrestore(&new_base->t_base.lock, flags); 230 spin_unlock_irqrestore(&base->lock, flags);
247 231
248 return ret; 232 return ret;
249} 233}
@@ -263,10 +247,10 @@ void add_timer_on(struct timer_list *timer, int cpu)
263 unsigned long flags; 247 unsigned long flags;
264 248
265 BUG_ON(timer_pending(timer) || !timer->function); 249 BUG_ON(timer_pending(timer) || !timer->function);
266 spin_lock_irqsave(&base->t_base.lock, flags); 250 spin_lock_irqsave(&base->lock, flags);
267 timer->base = &base->t_base; 251 timer->base = base;
268 internal_add_timer(base, timer); 252 internal_add_timer(base, timer);
269 spin_unlock_irqrestore(&base->t_base.lock, flags); 253 spin_unlock_irqrestore(&base->lock, flags);
270} 254}
271 255
272 256
@@ -319,7 +303,7 @@ EXPORT_SYMBOL(mod_timer);
319 */ 303 */
320int del_timer(struct timer_list *timer) 304int del_timer(struct timer_list *timer)
321{ 305{
322 timer_base_t *base; 306 tvec_base_t *base;
323 unsigned long flags; 307 unsigned long flags;
324 int ret = 0; 308 int ret = 0;
325 309
@@ -346,7 +330,7 @@ EXPORT_SYMBOL(del_timer);
346 */ 330 */
347int try_to_del_timer_sync(struct timer_list *timer) 331int try_to_del_timer_sync(struct timer_list *timer)
348{ 332{
349 timer_base_t *base; 333 tvec_base_t *base;
350 unsigned long flags; 334 unsigned long flags;
351 int ret = -1; 335 int ret = -1;
352 336
@@ -410,7 +394,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
410 struct timer_list *tmp; 394 struct timer_list *tmp;
411 395
412 tmp = list_entry(curr, struct timer_list, entry); 396 tmp = list_entry(curr, struct timer_list, entry);
413 BUG_ON(tmp->base != &base->t_base); 397 BUG_ON(tmp->base != base);
414 curr = curr->next; 398 curr = curr->next;
415 internal_add_timer(base, tmp); 399 internal_add_timer(base, tmp);
416 } 400 }
@@ -432,7 +416,7 @@ static inline void __run_timers(tvec_base_t *base)
432{ 416{
433 struct timer_list *timer; 417 struct timer_list *timer;
434 418
435 spin_lock_irq(&base->t_base.lock); 419 spin_lock_irq(&base->lock);
436 while (time_after_eq(jiffies, base->timer_jiffies)) { 420 while (time_after_eq(jiffies, base->timer_jiffies)) {
437 struct list_head work_list = LIST_HEAD_INIT(work_list); 421 struct list_head work_list = LIST_HEAD_INIT(work_list);
438 struct list_head *head = &work_list; 422 struct list_head *head = &work_list;
@@ -458,7 +442,7 @@ static inline void __run_timers(tvec_base_t *base)
458 442
459 set_running_timer(base, timer); 443 set_running_timer(base, timer);
460 detach_timer(timer, 1); 444 detach_timer(timer, 1);
461 spin_unlock_irq(&base->t_base.lock); 445 spin_unlock_irq(&base->lock);
462 { 446 {
463 int preempt_count = preempt_count(); 447 int preempt_count = preempt_count();
464 fn(data); 448 fn(data);
@@ -471,11 +455,11 @@ static inline void __run_timers(tvec_base_t *base)
471 BUG(); 455 BUG();
472 } 456 }
473 } 457 }
474 spin_lock_irq(&base->t_base.lock); 458 spin_lock_irq(&base->lock);
475 } 459 }
476 } 460 }
477 set_running_timer(base, NULL); 461 set_running_timer(base, NULL);
478 spin_unlock_irq(&base->t_base.lock); 462 spin_unlock_irq(&base->lock);
479} 463}
480 464
481#ifdef CONFIG_NO_IDLE_HZ 465#ifdef CONFIG_NO_IDLE_HZ
@@ -506,7 +490,7 @@ unsigned long next_timer_interrupt(void)
506 hr_expires += jiffies; 490 hr_expires += jiffies;
507 491
508 base = __get_cpu_var(tvec_bases); 492 base = __get_cpu_var(tvec_bases);
509 spin_lock(&base->t_base.lock); 493 spin_lock(&base->lock);
510 expires = base->timer_jiffies + (LONG_MAX >> 1); 494 expires = base->timer_jiffies + (LONG_MAX >> 1);
511 list = NULL; 495 list = NULL;
512 496
@@ -554,7 +538,7 @@ found:
554 expires = nte->expires; 538 expires = nte->expires;
555 } 539 }
556 } 540 }
557 spin_unlock(&base->t_base.lock); 541 spin_unlock(&base->lock);
558 542
559 if (time_before(hr_expires, expires)) 543 if (time_before(hr_expires, expires))
560 return hr_expires; 544 return hr_expires;
@@ -841,7 +825,7 @@ void update_process_times(int user_tick)
841 */ 825 */
842static unsigned long count_active_tasks(void) 826static unsigned long count_active_tasks(void)
843{ 827{
844 return (nr_running() + nr_uninterruptible()) * FIXED_1; 828 return nr_active() * FIXED_1;
845} 829}
846 830
847/* 831/*
@@ -1262,7 +1246,7 @@ static int __devinit init_timers_cpu(int cpu)
1262 } 1246 }
1263 per_cpu(tvec_bases, cpu) = base; 1247 per_cpu(tvec_bases, cpu) = base;
1264 } 1248 }
1265 spin_lock_init(&base->t_base.lock); 1249 spin_lock_init(&base->lock);
1266 for (j = 0; j < TVN_SIZE; j++) { 1250 for (j = 0; j < TVN_SIZE; j++) {
1267 INIT_LIST_HEAD(base->tv5.vec + j); 1251 INIT_LIST_HEAD(base->tv5.vec + j);
1268 INIT_LIST_HEAD(base->tv4.vec + j); 1252 INIT_LIST_HEAD(base->tv4.vec + j);
@@ -1284,7 +1268,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1284 while (!list_empty(head)) { 1268 while (!list_empty(head)) {
1285 timer = list_entry(head->next, struct timer_list, entry); 1269 timer = list_entry(head->next, struct timer_list, entry);
1286 detach_timer(timer, 0); 1270 detach_timer(timer, 0);
1287 timer->base = &new_base->t_base; 1271 timer->base = new_base;
1288 internal_add_timer(new_base, timer); 1272 internal_add_timer(new_base, timer);
1289 } 1273 }
1290} 1274}
@@ -1300,11 +1284,11 @@ static void __devinit migrate_timers(int cpu)
1300 new_base = get_cpu_var(tvec_bases); 1284 new_base = get_cpu_var(tvec_bases);
1301 1285
1302 local_irq_disable(); 1286 local_irq_disable();
1303 spin_lock(&new_base->t_base.lock); 1287 spin_lock(&new_base->lock);
1304 spin_lock(&old_base->t_base.lock); 1288 spin_lock(&old_base->lock);
1289
1290 BUG_ON(old_base->running_timer);
1305 1291
1306 if (old_base->t_base.running_timer)
1307 BUG();
1308 for (i = 0; i < TVR_SIZE; i++) 1292 for (i = 0; i < TVR_SIZE; i++)
1309 migrate_timer_list(new_base, old_base->tv1.vec + i); 1293 migrate_timer_list(new_base, old_base->tv1.vec + i);
1310 for (i = 0; i < TVN_SIZE; i++) { 1294 for (i = 0; i < TVN_SIZE; i++) {
@@ -1314,8 +1298,8 @@ static void __devinit migrate_timers(int cpu)
1314 migrate_timer_list(new_base, old_base->tv5.vec + i); 1298 migrate_timer_list(new_base, old_base->tv5.vec + i);
1315 } 1299 }
1316 1300
1317 spin_unlock(&old_base->t_base.lock); 1301 spin_unlock(&old_base->lock);
1318 spin_unlock(&new_base->t_base.lock); 1302 spin_unlock(&new_base->lock);
1319 local_irq_enable(); 1303 local_irq_enable();
1320 put_cpu_var(tvec_bases); 1304 put_cpu_var(tvec_bases);
1321} 1305}
@@ -1495,8 +1479,7 @@ register_time_interpolator(struct time_interpolator *ti)
1495 unsigned long flags; 1479 unsigned long flags;
1496 1480
1497 /* Sanity check */ 1481 /* Sanity check */
1498 if (ti->frequency == 0 || ti->mask == 0) 1482 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1499 BUG();
1500 1483
1501 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; 1484 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1502 spin_lock(&time_interpolator_lock); 1485 spin_lock(&time_interpolator_lock);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 907c39257ca0..0a03357a1f8e 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -35,17 +35,6 @@
35 * 35 *
36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk. 36 * LINUX_FADV_ASYNC_WRITE: push some or all of the dirty pages at the disk.
37 * 37 *
38 * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE: push all of the currently
39 * dirty pages at the disk.
40 *
41 * LINUX_FADV_WRITE_WAIT, LINUX_FADV_ASYNC_WRITE, LINUX_FADV_WRITE_WAIT: push
42 * all of the currently dirty pages at the disk, wait until they have been
43 * written.
44 *
45 * It should be noted that none of these operations write out the file's
46 * metadata. So unless the application is strictly performing overwrites of
47 * already-instantiated disk blocks, there are no guarantees here that the data
48 * will be available after a crash.
49 */ 38 */
50asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) 39asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
51{ 40{
@@ -129,15 +118,6 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
129 invalidate_mapping_pages(mapping, start_index, 118 invalidate_mapping_pages(mapping, start_index,
130 end_index); 119 end_index);
131 break; 120 break;
132 case LINUX_FADV_ASYNC_WRITE:
133 ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
134 WB_SYNC_NONE);
135 break;
136 case LINUX_FADV_WRITE_WAIT:
137 ret = wait_on_page_writeback_range(mapping,
138 offset >> PAGE_CACHE_SHIFT,
139 endbyte >> PAGE_CACHE_SHIFT);
140 break;
141 default: 121 default:
142 ret = -EINVAL; 122 ret = -EINVAL;
143 } 123 }
diff --git a/mm/highmem.c b/mm/highmem.c
index 55885f64af40..9b274fdf9d08 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
74 pkmap_count[i] = 0; 74 pkmap_count[i] = 0;
75 75
76 /* sanity check */ 76 /* sanity check */
77 if (pte_none(pkmap_page_table[i])) 77 BUG_ON(pte_none(pkmap_page_table[i]));
78 BUG();
79 78
80 /* 79 /*
81 * Don't need an atomic fetch-and-clear op here; 80 * Don't need an atomic fetch-and-clear op here;
@@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
158 if (!vaddr) 157 if (!vaddr)
159 vaddr = map_new_virtual(page); 158 vaddr = map_new_virtual(page);
160 pkmap_count[PKMAP_NR(vaddr)]++; 159 pkmap_count[PKMAP_NR(vaddr)]++;
161 if (pkmap_count[PKMAP_NR(vaddr)] < 2) 160 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
162 BUG();
163 spin_unlock(&kmap_lock); 161 spin_unlock(&kmap_lock);
164 return (void*) vaddr; 162 return (void*) vaddr;
165} 163}
@@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
174 172
175 spin_lock(&kmap_lock); 173 spin_lock(&kmap_lock);
176 vaddr = (unsigned long)page_address(page); 174 vaddr = (unsigned long)page_address(page);
177 if (!vaddr) 175 BUG_ON(!vaddr);
178 BUG();
179 nr = PKMAP_NR(vaddr); 176 nr = PKMAP_NR(vaddr);
180 177
181 /* 178 /*
@@ -220,8 +217,7 @@ static __init int init_emergency_pool(void)
220 return 0; 217 return 0;
221 218
222 page_pool = mempool_create_page_pool(POOL_SIZE, 0); 219 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
223 if (!page_pool) 220 BUG_ON(!page_pool);
224 BUG();
225 printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 221 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
226 222
227 return 0; 223 return 0;
@@ -264,8 +260,7 @@ int init_emergency_isa_pool(void)
264 260
265 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, 261 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
266 mempool_free_pages, (void *) 0); 262 mempool_free_pages, (void *) 0);
267 if (!isa_page_pool) 263 BUG_ON(!isa_page_pool);
268 BUG();
269 264
270 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); 265 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
271 return 0; 266 return 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ebad6bbb3501..832f676ca038 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -334,6 +334,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
334 return nr_huge_pages; 334 return nr_huge_pages;
335 335
336 spin_lock(&hugetlb_lock); 336 spin_lock(&hugetlb_lock);
337 count = max(count, reserved_huge_pages);
337 try_to_free_low(count); 338 try_to_free_low(count);
338 while (count < nr_huge_pages) { 339 while (count < nr_huge_pages) {
339 struct page *page = dequeue_huge_page(NULL, 0); 340 struct page *page = dequeue_huge_page(NULL, 0);
@@ -697,9 +698,10 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
697 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 698 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
698 page = pte_page(*pte); 699 page = pte_page(*pte);
699same_page: 700same_page:
700 get_page(page); 701 if (pages) {
701 if (pages) 702 get_page(page);
702 pages[i] = page + pfn_offset; 703 pages[i] = page + pfn_offset;
704 }
703 705
704 if (vmas) 706 if (vmas)
705 vmas[i] = vma; 707 vmas[i] = vma;
diff --git a/mm/memory.c b/mm/memory.c
index 8d8f52569f32..0ec7bc644271 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -87,7 +87,7 @@ int randomize_va_space __read_mostly = 1;
87static int __init disable_randmaps(char *s) 87static int __init disable_randmaps(char *s)
88{ 88{
89 randomize_va_space = 0; 89 randomize_va_space = 0;
90 return 0; 90 return 1;
91} 91}
92__setup("norandmaps", disable_randmaps); 92__setup("norandmaps", disable_randmaps);
93 93
diff --git a/mm/mmap.c b/mm/mmap.c
index 4f5b5709136a..e780d19aa214 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm)
294 i = browse_rb(&mm->mm_rb); 294 i = browse_rb(&mm->mm_rb);
295 if (i != mm->map_count) 295 if (i != mm->map_count)
296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; 296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
297 if (bug) 297 BUG_ON(bug);
298 BUG();
299} 298}
300#else 299#else
301#define validate_mm(mm) do { } while (0) 300#define validate_mm(mm) do { } while (0)
@@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
432 struct rb_node ** rb_link, * rb_parent; 431 struct rb_node ** rb_link, * rb_parent;
433 432
434 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); 433 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
435 if (__vma && __vma->vm_start < vma->vm_end) 434 BUG_ON(__vma && __vma->vm_start < vma->vm_end);
436 BUG();
437 __vma_link(mm, vma, prev, rb_link, rb_parent); 435 __vma_link(mm, vma, prev, rb_link, rb_parent);
438 mm->map_count++; 436 mm->map_count++;
439} 437}
@@ -813,8 +811,7 @@ try_prev:
813 * (e.g. stash info in next's anon_vma_node when assigning 811 * (e.g. stash info in next's anon_vma_node when assigning
814 * an anon_vma, or when trying vma_merge). Another time. 812 * an anon_vma, or when trying vma_merge). Another time.
815 */ 813 */
816 if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) 814 BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
817 BUG();
818 if (!near) 815 if (!near)
819 goto none; 816 goto none;
820 817
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 893d7677579e..6dcce3a4bbdc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping)
258/** 258/**
259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
260 * @mapping: address_space which was dirtied 260 * @mapping: address_space which was dirtied
261 * @nr_pages: number of pages which the caller has just dirtied 261 * @nr_pages_dirtied: number of pages which the caller has just dirtied
262 * 262 *
263 * Processes which are dirtying memory should call in here once for each page 263 * Processes which are dirtying memory should call in here once for each page
264 * which was newly dirtied. The function will periodically check the system's 264 * which was newly dirtied. The function will periodically check the system's
diff --git a/mm/slab.c b/mm/slab.c
index 4cbf8bb13557..f055c1420216 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
1297 if (cache_cache.num) 1297 if (cache_cache.num)
1298 break; 1298 break;
1299 } 1299 }
1300 if (!cache_cache.num) 1300 BUG_ON(!cache_cache.num);
1301 BUG();
1302 cache_cache.gfporder = order; 1301 cache_cache.gfporder = order;
1303 cache_cache.colour = left_over / cache_cache.colour_off; 1302 cache_cache.colour = left_over / cache_cache.colour_off;
1304 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1303 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1974 * Always checks flags, a caller might be expecting debug support which 1973 * Always checks flags, a caller might be expecting debug support which
1975 * isn't available. 1974 * isn't available.
1976 */ 1975 */
1977 if (flags & ~CREATE_MASK) 1976 BUG_ON(flags & ~CREATE_MASK);
1978 BUG();
1979 1977
1980 /* 1978 /*
1981 * Check that size is in terms of words. This is needed to avoid 1979 * Check that size is in terms of words. This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
2206 2204
2207 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2205 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
2208#if DEBUG 2206#if DEBUG
2209 if (slabp->inuse) 2207 BUG_ON(slabp->inuse);
2210 BUG();
2211#endif 2208#endif
2212 list_del(&slabp->list); 2209 list_del(&slabp->list);
2213 2210
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
2248 */ 2245 */
2249int kmem_cache_shrink(struct kmem_cache *cachep) 2246int kmem_cache_shrink(struct kmem_cache *cachep)
2250{ 2247{
2251 if (!cachep || in_interrupt()) 2248 BUG_ON(!cachep || in_interrupt());
2252 BUG();
2253 2249
2254 return __cache_shrink(cachep); 2250 return __cache_shrink(cachep);
2255} 2251}
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
2277 int i; 2273 int i;
2278 struct kmem_list3 *l3; 2274 struct kmem_list3 *l3;
2279 2275
2280 if (!cachep || in_interrupt()) 2276 BUG_ON(!cachep || in_interrupt());
2281 BUG();
2282 2277
2283 /* Don't let CPUs to come and go */ 2278 /* Don't let CPUs to come and go */
2284 lock_cpu_hotplug(); 2279 lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2477 * Be lazy and only check for valid flags here, keeping it out of the 2472 * Be lazy and only check for valid flags here, keeping it out of the
2478 * critical path in kmem_cache_alloc(). 2473 * critical path in kmem_cache_alloc().
2479 */ 2474 */
2480 if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) 2475 BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
2481 BUG();
2482 if (flags & SLAB_NO_GROW) 2476 if (flags & SLAB_NO_GROW)
2483 return 0; 2477 return 0;
2484 2478
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d7af296833fc..e0e1583f32c2 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
148 swp_entry_t entry; 148 swp_entry_t entry;
149 int err; 149 int err;
150 150
151 if (!PageLocked(page)) 151 BUG_ON(!PageLocked(page));
152 BUG();
153 152
154 for (;;) { 153 for (;;) {
155 entry = get_swap_page(); 154 entry = get_swap_page();
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 39aa9d129612..e5fd5385f0cc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -397,18 +397,24 @@ void free_swap_and_cache(swp_entry_t entry)
397 397
398 p = swap_info_get(entry); 398 p = swap_info_get(entry);
399 if (p) { 399 if (p) {
400 if (swap_entry_free(p, swp_offset(entry)) == 1) 400 if (swap_entry_free(p, swp_offset(entry)) == 1) {
401 page = find_trylock_page(&swapper_space, entry.val); 401 page = find_get_page(&swapper_space, entry.val);
402 if (page && unlikely(TestSetPageLocked(page))) {
403 page_cache_release(page);
404 page = NULL;
405 }
406 }
402 spin_unlock(&swap_lock); 407 spin_unlock(&swap_lock);
403 } 408 }
404 if (page) { 409 if (page) {
405 int one_user; 410 int one_user;
406 411
407 BUG_ON(PagePrivate(page)); 412 BUG_ON(PagePrivate(page));
408 page_cache_get(page);
409 one_user = (page_count(page) == 2); 413 one_user = (page_count(page) == 2);
410 /* Only cache user (+us), or swap space full? Free it! */ 414 /* Only cache user (+us), or swap space full? Free it! */
411 if (!PageWriteback(page) && (one_user || vm_swap_full())) { 415 /* Also recheck PageSwapCache after page is locked (above) */
416 if (PageSwapCache(page) && !PageWriteback(page) &&
417 (one_user || vm_swap_full())) {
412 delete_from_swap_cache(page); 418 delete_from_swap_cache(page);
413 SetPageDirty(page); 419 SetPageDirty(page);
414 } 420 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 729eb3eec75f..c0504f1e34eb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages)
321 int i; 321 int i;
322 322
323 for (i = 0; i < area->nr_pages; i++) { 323 for (i = 0; i < area->nr_pages; i++) {
324 if (unlikely(!area->pages[i])) 324 BUG_ON(!area->pages[i]);
325 BUG();
326 __free_page(area->pages[i]); 325 __free_page(area->pages[i]);
327 } 326 }
328 327
diff --git a/net/compat.c b/net/compat.c
index 8fd37cd7b501..d5d69fa15d07 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -476,8 +476,7 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
476 int err; 476 int err;
477 struct socket *sock; 477 struct socket *sock;
478 478
479 /* SO_SET_REPLACE seems to be the same in all levels */ 479 if (level == SOL_IPV6 && optname == IPT_SO_SET_REPLACE)
480 if (optname == IPT_SO_SET_REPLACE)
481 return do_netfilter_replace(fd, level, optname, 480 return do_netfilter_replace(fd, level, optname,
482 optval, optlen); 481 optval, optlen);
483 482
diff --git a/net/core/dev.c b/net/core/dev.c
index a3ab11f34153..434220d093aa 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1080,6 +1080,70 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1080 rcu_read_unlock(); 1080 rcu_read_unlock();
1081} 1081}
1082 1082
1083
1084void __netif_schedule(struct net_device *dev)
1085{
1086 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1087 unsigned long flags;
1088 struct softnet_data *sd;
1089
1090 local_irq_save(flags);
1091 sd = &__get_cpu_var(softnet_data);
1092 dev->next_sched = sd->output_queue;
1093 sd->output_queue = dev;
1094 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1095 local_irq_restore(flags);
1096 }
1097}
1098EXPORT_SYMBOL(__netif_schedule);
1099
1100void __netif_rx_schedule(struct net_device *dev)
1101{
1102 unsigned long flags;
1103
1104 local_irq_save(flags);
1105 dev_hold(dev);
1106 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
1107 if (dev->quota < 0)
1108 dev->quota += dev->weight;
1109 else
1110 dev->quota = dev->weight;
1111 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1112 local_irq_restore(flags);
1113}
1114EXPORT_SYMBOL(__netif_rx_schedule);
1115
1116void dev_kfree_skb_any(struct sk_buff *skb)
1117{
1118 if (in_irq() || irqs_disabled())
1119 dev_kfree_skb_irq(skb);
1120 else
1121 dev_kfree_skb(skb);
1122}
1123EXPORT_SYMBOL(dev_kfree_skb_any);
1124
1125
1126/* Hot-plugging. */
1127void netif_device_detach(struct net_device *dev)
1128{
1129 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1130 netif_running(dev)) {
1131 netif_stop_queue(dev);
1132 }
1133}
1134EXPORT_SYMBOL(netif_device_detach);
1135
1136void netif_device_attach(struct net_device *dev)
1137{
1138 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1139 netif_running(dev)) {
1140 netif_wake_queue(dev);
1141 __netdev_watchdog_up(dev);
1142 }
1143}
1144EXPORT_SYMBOL(netif_device_attach);
1145
1146
1083/* 1147/*
1084 * Invalidate hardware checksum when packet is to be mangled, and 1148 * Invalidate hardware checksum when packet is to be mangled, and
1085 * complete checksum manually on outgoing path. 1149 * complete checksum manually on outgoing path.
diff --git a/net/core/sock.c b/net/core/sock.c
index a96ea7dd0fc1..ed2afdb9ea2d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -385,7 +385,21 @@ set_sndbuf:
385 val = sysctl_rmem_max; 385 val = sysctl_rmem_max;
386set_rcvbuf: 386set_rcvbuf:
387 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 387 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
388 /* FIXME: is this lower bound the right one? */ 388 /*
389 * We double it on the way in to account for
390 * "struct sk_buff" etc. overhead. Applications
391 * assume that the SO_RCVBUF setting they make will
392 * allow that much actual data to be received on that
393 * socket.
394 *
395 * Applications are unaware that "struct sk_buff" and
396 * other overheads allocate from the receive buffer
397 * during socket buffer allocation.
398 *
399 * And after considering the possible alternatives,
400 * returning the value we actually used in getsockopt
401 * is the most desirable behavior.
402 */
389 if ((val * 2) < SOCK_MIN_RCVBUF) 403 if ((val * 2) < SOCK_MIN_RCVBUF)
390 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 404 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
391 else 405 else
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index e3dd30d36c8a..b39e2a597889 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -204,7 +204,7 @@ static int dccp_feat_reconcile(struct sock *sk, struct dccp_opt_pend *opt,
204 if (rc) { 204 if (rc) {
205 kfree(opt->dccpop_sc->dccpoc_val); 205 kfree(opt->dccpop_sc->dccpoc_val);
206 kfree(opt->dccpop_sc); 206 kfree(opt->dccpop_sc);
207 opt->dccpop_sc = 0; 207 opt->dccpop_sc = NULL;
208 return rc; 208 return rc;
209 } 209 }
210 210
@@ -322,7 +322,7 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
322 opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R : 322 opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R :
323 DCCPO_CONFIRM_L; 323 DCCPO_CONFIRM_L;
324 opt->dccpop_feat = feature; 324 opt->dccpop_feat = feature;
325 opt->dccpop_val = 0; 325 opt->dccpop_val = NULL;
326 opt->dccpop_len = 0; 326 opt->dccpop_len = 0;
327 327
328 /* change feature */ 328 /* change feature */
@@ -523,7 +523,7 @@ int dccp_feat_clone(struct sock *oldsk, struct sock *newsk)
523 * once... 523 * once...
524 */ 524 */
525 /* the master socket no longer needs to worry about confirms */ 525 /* the master socket no longer needs to worry about confirms */
526 opt->dccpop_sc = 0; /* it's not a memleak---new socket has it */ 526 opt->dccpop_sc = NULL; /* it's not a memleak---new socket has it */
527 527
528 /* reset state for a new socket */ 528 /* reset state for a new socket */
529 opt->dccpop_conf = 0; 529 opt->dccpop_conf = 0;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d2ae9893ca17..a26ff9f44576 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -620,7 +620,7 @@ int dn_dev_set_default(struct net_device *dev, int force)
620 } 620 }
621 write_unlock(&dndev_lock); 621 write_unlock(&dndev_lock);
622 if (old) 622 if (old)
623 dev_put(dev); 623 dev_put(old);
624 return rv; 624 return rv;
625} 625}
626 626
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index e16d8b42b953..e2e4771fa4c6 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -116,7 +116,7 @@ error:
116 return err; 116 return err;
117} 117}
118 118
119static int ah_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 119static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
120{ 120{
121 int ah_hlen; 121 int ah_hlen;
122 struct iphdr *iph; 122 struct iphdr *iph;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index bf88c620a954..9d1881c07a32 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -133,7 +133,7 @@ error:
133 * expensive, so we only support truncated data, which is the recommended 133 * expensive, so we only support truncated data, which is the recommended
134 * and common case. 134 * and common case.
135 */ 135 */
136static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 136static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
137{ 137{
138 struct iphdr *iph; 138 struct iphdr *iph;
139 struct ip_esp_hdr *esph; 139 struct ip_esp_hdr *esph;
@@ -208,9 +208,6 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
208 struct xfrm_encap_tmpl *encap = x->encap; 208 struct xfrm_encap_tmpl *encap = x->encap;
209 struct udphdr *uh; 209 struct udphdr *uh;
210 210
211 if (encap->encap_type != decap->decap_type)
212 goto out;
213
214 uh = (struct udphdr *)(iph + 1); 211 uh = (struct udphdr *)(iph + 1);
215 encap_len = (void*)esph - (void*)uh; 212 encap_len = (void*)esph - (void*)uh;
216 213
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index c95020f7c81e..0a1d86a0f632 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -81,8 +81,7 @@ out:
81 return err; 81 return err;
82} 82}
83 83
84static int ipcomp_input(struct xfrm_state *x, 84static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
85 struct xfrm_decap_state *decap, struct sk_buff *skb)
86{ 85{
87 u8 nexthdr; 86 u8 nexthdr;
88 int err = 0; 87 int err = 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 882b842c25d4..77855ccd6b43 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -221,16 +221,6 @@ config IP_NF_MATCH_IPRANGE
221 221
222 To compile it as a module, choose M here. If unsure, say N. 222 To compile it as a module, choose M here. If unsure, say N.
223 223
224config IP_NF_MATCH_MULTIPORT
225 tristate "Multiple port match support"
226 depends on IP_NF_IPTABLES
227 help
228 Multiport matching allows you to match TCP or UDP packets based on
229 a series of source or destination ports: normally a rule can only
230 match a single range of ports.
231
232 To compile it as a module, choose M here. If unsure, say N.
233
234config IP_NF_MATCH_TOS 224config IP_NF_MATCH_TOS
235 tristate "TOS match support" 225 tristate "TOS match support"
236 depends on IP_NF_IPTABLES 226 depends on IP_NF_IPTABLES
@@ -272,12 +262,12 @@ config IP_NF_MATCH_DSCP
272 262
273 To compile it as a module, choose M here. If unsure, say N. 263 To compile it as a module, choose M here. If unsure, say N.
274 264
275config IP_NF_MATCH_AH_ESP 265config IP_NF_MATCH_AH
276 tristate "AH/ESP match support" 266 tristate "AH match support"
277 depends on IP_NF_IPTABLES 267 depends on IP_NF_IPTABLES
278 help 268 help
279 These two match extensions (`ah' and `esp') allow you to match a 269 This match extension allows you to match a range of SPIs
280 range of SPIs inside AH or ESP headers of IPSec packets. 270 inside AH header of IPSec packets.
281 271
282 To compile it as a module, choose M here. If unsure, say N. 272 To compile it as a module, choose M here. If unsure, say N.
283 273
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index f2cd9a6c5b91..461cb1eb5de7 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -53,13 +53,12 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
53# matches 53# matches
54obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o 54obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
55obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o 55obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
56obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
57obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o 56obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
58obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o 57obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
59obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o 58obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
60obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o 59obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
61obj-$(CONFIG_IP_NF_MATCH_DSCP) += ipt_dscp.o 60obj-$(CONFIG_IP_NF_MATCH_DSCP) += ipt_dscp.o
62obj-$(CONFIG_IP_NF_MATCH_AH_ESP) += ipt_ah.o ipt_esp.o 61obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
63obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o 62obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
64obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o 63obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
65 64
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 9b6e19bae90f..01bd7cab9367 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1658,7 +1658,7 @@ static void __exit ctnetlink_exit(void)
1658 printk("ctnetlink: unregistering from nfnetlink.\n"); 1658 printk("ctnetlink: unregistering from nfnetlink.\n");
1659 1659
1660#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS 1660#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
1661 ip_conntrack_unregister_notifier(&ctnl_notifier_exp); 1661 ip_conntrack_expect_unregister_notifier(&ctnl_notifier_exp);
1662 ip_conntrack_unregister_notifier(&ctnl_notifier); 1662 ip_conntrack_unregister_notifier(&ctnl_notifier);
1663#endif 1663#endif
1664 1664
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 460fd905fad0..d5b8cdd361ce 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/icmp.h> 25#include <linux/icmp.h>
26#include <net/ip.h> 26#include <net/ip.h>
27#include <net/compat.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include <linux/mutex.h> 29#include <linux/mutex.h>
29#include <linux/proc_fs.h> 30#include <linux/proc_fs.h>
@@ -799,17 +800,11 @@ get_counters(const struct xt_table_info *t,
799 } 800 }
800} 801}
801 802
802static int 803static inline struct xt_counters * alloc_counters(struct ipt_table *table)
803copy_entries_to_user(unsigned int total_size,
804 struct ipt_table *table,
805 void __user *userptr)
806{ 804{
807 unsigned int off, num, countersize; 805 unsigned int countersize;
808 struct ipt_entry *e;
809 struct xt_counters *counters; 806 struct xt_counters *counters;
810 struct xt_table_info *private = table->private; 807 struct xt_table_info *private = table->private;
811 int ret = 0;
812 void *loc_cpu_entry;
813 808
814 /* We need atomic snapshot of counters: rest doesn't change 809 /* We need atomic snapshot of counters: rest doesn't change
815 (other than comefrom, which userspace doesn't care 810 (other than comefrom, which userspace doesn't care
@@ -818,13 +813,32 @@ copy_entries_to_user(unsigned int total_size,
818 counters = vmalloc_node(countersize, numa_node_id()); 813 counters = vmalloc_node(countersize, numa_node_id());
819 814
820 if (counters == NULL) 815 if (counters == NULL)
821 return -ENOMEM; 816 return ERR_PTR(-ENOMEM);
822 817
823 /* First, sum counters... */ 818 /* First, sum counters... */
824 write_lock_bh(&table->lock); 819 write_lock_bh(&table->lock);
825 get_counters(private, counters); 820 get_counters(private, counters);
826 write_unlock_bh(&table->lock); 821 write_unlock_bh(&table->lock);
827 822
823 return counters;
824}
825
826static int
827copy_entries_to_user(unsigned int total_size,
828 struct ipt_table *table,
829 void __user *userptr)
830{
831 unsigned int off, num;
832 struct ipt_entry *e;
833 struct xt_counters *counters;
834 struct xt_table_info *private = table->private;
835 int ret = 0;
836 void *loc_cpu_entry;
837
838 counters = alloc_counters(table);
839 if (IS_ERR(counters))
840 return PTR_ERR(counters);
841
828 /* choose the copy that is on our node/cpu, ... 842 /* choose the copy that is on our node/cpu, ...
829 * This choice is lazy (because current thread is 843 * This choice is lazy (because current thread is
830 * allowed to migrate to another cpu) 844 * allowed to migrate to another cpu)
@@ -884,25 +898,278 @@ copy_entries_to_user(unsigned int total_size,
884 return ret; 898 return ret;
885} 899}
886 900
901#ifdef CONFIG_COMPAT
902struct compat_delta {
903 struct compat_delta *next;
904 u_int16_t offset;
905 short delta;
906};
907
908static struct compat_delta *compat_offsets = NULL;
909
910static int compat_add_offset(u_int16_t offset, short delta)
911{
912 struct compat_delta *tmp;
913
914 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
915 if (!tmp)
916 return -ENOMEM;
917 tmp->offset = offset;
918 tmp->delta = delta;
919 if (compat_offsets) {
920 tmp->next = compat_offsets->next;
921 compat_offsets->next = tmp;
922 } else {
923 compat_offsets = tmp;
924 tmp->next = NULL;
925 }
926 return 0;
927}
928
929static void compat_flush_offsets(void)
930{
931 struct compat_delta *tmp, *next;
932
933 if (compat_offsets) {
934 for(tmp = compat_offsets; tmp; tmp = next) {
935 next = tmp->next;
936 kfree(tmp);
937 }
938 compat_offsets = NULL;
939 }
940}
941
942static short compat_calc_jump(u_int16_t offset)
943{
944 struct compat_delta *tmp;
945 short delta;
946
947 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
948 if (tmp->offset < offset)
949 delta += tmp->delta;
950 return delta;
951}
952
953struct compat_ipt_standard_target
954{
955 struct compat_xt_entry_target target;
956 compat_int_t verdict;
957};
958
959#define IPT_ST_OFFSET (sizeof(struct ipt_standard_target) - \
960 sizeof(struct compat_ipt_standard_target))
961
962struct compat_ipt_standard
963{
964 struct compat_ipt_entry entry;
965 struct compat_ipt_standard_target target;
966};
967
968static int compat_ipt_standard_fn(void *target,
969 void **dstptr, int *size, int convert)
970{
971 struct compat_ipt_standard_target compat_st, *pcompat_st;
972 struct ipt_standard_target st, *pst;
973 int ret;
974
975 ret = 0;
976 switch (convert) {
977 case COMPAT_TO_USER:
978 pst = (struct ipt_standard_target *)target;
979 memcpy(&compat_st.target, &pst->target,
980 sizeof(struct ipt_entry_target));
981 compat_st.verdict = pst->verdict;
982 if (compat_st.verdict > 0)
983 compat_st.verdict -=
984 compat_calc_jump(compat_st.verdict);
985 compat_st.target.u.user.target_size =
986 sizeof(struct compat_ipt_standard_target);
987 if (__copy_to_user(*dstptr, &compat_st,
988 sizeof(struct compat_ipt_standard_target)))
989 ret = -EFAULT;
990 *size -= IPT_ST_OFFSET;
991 *dstptr += sizeof(struct compat_ipt_standard_target);
992 break;
993 case COMPAT_FROM_USER:
994 pcompat_st =
995 (struct compat_ipt_standard_target *)target;
996 memcpy(&st.target, &pcompat_st->target,
997 sizeof(struct ipt_entry_target));
998 st.verdict = pcompat_st->verdict;
999 if (st.verdict > 0)
1000 st.verdict += compat_calc_jump(st.verdict);
1001 st.target.u.user.target_size =
1002 sizeof(struct ipt_standard_target);
1003 memcpy(*dstptr, &st,
1004 sizeof(struct ipt_standard_target));
1005 *size += IPT_ST_OFFSET;
1006 *dstptr += sizeof(struct ipt_standard_target);
1007 break;
1008 case COMPAT_CALC_SIZE:
1009 *size += IPT_ST_OFFSET;
1010 break;
1011 default:
1012 ret = -ENOPROTOOPT;
1013 break;
1014 }
1015 return ret;
1016}
1017
1018static inline int
1019compat_calc_match(struct ipt_entry_match *m, int * size)
1020{
1021 if (m->u.kernel.match->compat)
1022 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1023 else
1024 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1025 return 0;
1026}
1027
1028static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1029 void *base, struct xt_table_info *newinfo)
1030{
1031 struct ipt_entry_target *t;
1032 u_int16_t entry_offset;
1033 int off, i, ret;
1034
1035 off = 0;
1036 entry_offset = (void *)e - base;
1037 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1038 t = ipt_get_target(e);
1039 if (t->u.kernel.target->compat)
1040 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1041 else
1042 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1043 newinfo->size -= off;
1044 ret = compat_add_offset(entry_offset, off);
1045 if (ret)
1046 return ret;
1047
1048 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1049 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1050 (base + info->hook_entry[i])))
1051 newinfo->hook_entry[i] -= off;
1052 if (info->underflow[i] && (e < (struct ipt_entry *)
1053 (base + info->underflow[i])))
1054 newinfo->underflow[i] -= off;
1055 }
1056 return 0;
1057}
1058
1059static int compat_table_info(struct xt_table_info *info,
1060 struct xt_table_info *newinfo)
1061{
1062 void *loc_cpu_entry;
1063 int i;
1064
1065 if (!newinfo || !info)
1066 return -EINVAL;
1067
1068 memset(newinfo, 0, sizeof(struct xt_table_info));
1069 newinfo->size = info->size;
1070 newinfo->number = info->number;
1071 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1072 newinfo->hook_entry[i] = info->hook_entry[i];
1073 newinfo->underflow[i] = info->underflow[i];
1074 }
1075 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1076 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1077 compat_calc_entry, info, loc_cpu_entry, newinfo);
1078}
1079#endif
1080
1081static int get_info(void __user *user, int *len, int compat)
1082{
1083 char name[IPT_TABLE_MAXNAMELEN];
1084 struct ipt_table *t;
1085 int ret;
1086
1087 if (*len != sizeof(struct ipt_getinfo)) {
1088 duprintf("length %u != %u\n", *len,
1089 (unsigned int)sizeof(struct ipt_getinfo));
1090 return -EINVAL;
1091 }
1092
1093 if (copy_from_user(name, user, sizeof(name)) != 0)
1094 return -EFAULT;
1095
1096 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1097#ifdef CONFIG_COMPAT
1098 if (compat)
1099 xt_compat_lock(AF_INET);
1100#endif
1101 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1102 "iptable_%s", name);
1103 if (t && !IS_ERR(t)) {
1104 struct ipt_getinfo info;
1105 struct xt_table_info *private = t->private;
1106
1107#ifdef CONFIG_COMPAT
1108 if (compat) {
1109 struct xt_table_info tmp;
1110 ret = compat_table_info(private, &tmp);
1111 compat_flush_offsets();
1112 private = &tmp;
1113 }
1114#endif
1115 info.valid_hooks = t->valid_hooks;
1116 memcpy(info.hook_entry, private->hook_entry,
1117 sizeof(info.hook_entry));
1118 memcpy(info.underflow, private->underflow,
1119 sizeof(info.underflow));
1120 info.num_entries = private->number;
1121 info.size = private->size;
1122 strcpy(info.name, name);
1123
1124 if (copy_to_user(user, &info, *len) != 0)
1125 ret = -EFAULT;
1126 else
1127 ret = 0;
1128
1129 xt_table_unlock(t);
1130 module_put(t->me);
1131 } else
1132 ret = t ? PTR_ERR(t) : -ENOENT;
1133#ifdef CONFIG_COMPAT
1134 if (compat)
1135 xt_compat_unlock(AF_INET);
1136#endif
1137 return ret;
1138}
1139
887static int 1140static int
888get_entries(const struct ipt_get_entries *entries, 1141get_entries(struct ipt_get_entries __user *uptr, int *len)
889 struct ipt_get_entries __user *uptr)
890{ 1142{
891 int ret; 1143 int ret;
1144 struct ipt_get_entries get;
892 struct ipt_table *t; 1145 struct ipt_table *t;
893 1146
894 t = xt_find_table_lock(AF_INET, entries->name); 1147 if (*len < sizeof(get)) {
1148 duprintf("get_entries: %u < %d\n", *len,
1149 (unsigned int)sizeof(get));
1150 return -EINVAL;
1151 }
1152 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1153 return -EFAULT;
1154 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1155 duprintf("get_entries: %u != %u\n", *len,
1156 (unsigned int)(sizeof(struct ipt_get_entries) +
1157 get.size));
1158 return -EINVAL;
1159 }
1160
1161 t = xt_find_table_lock(AF_INET, get.name);
895 if (t && !IS_ERR(t)) { 1162 if (t && !IS_ERR(t)) {
896 struct xt_table_info *private = t->private; 1163 struct xt_table_info *private = t->private;
897 duprintf("t->private->number = %u\n", 1164 duprintf("t->private->number = %u\n",
898 private->number); 1165 private->number);
899 if (entries->size == private->size) 1166 if (get.size == private->size)
900 ret = copy_entries_to_user(private->size, 1167 ret = copy_entries_to_user(private->size,
901 t, uptr->entrytable); 1168 t, uptr->entrytable);
902 else { 1169 else {
903 duprintf("get_entries: I've got %u not %u!\n", 1170 duprintf("get_entries: I've got %u not %u!\n",
904 private->size, 1171 private->size,
905 entries->size); 1172 get.size);
906 ret = -EINVAL; 1173 ret = -EINVAL;
907 } 1174 }
908 module_put(t->me); 1175 module_put(t->me);
@@ -914,79 +1181,47 @@ get_entries(const struct ipt_get_entries *entries,
914} 1181}
915 1182
916static int 1183static int
917do_replace(void __user *user, unsigned int len) 1184__do_replace(const char *name, unsigned int valid_hooks,
1185 struct xt_table_info *newinfo, unsigned int num_counters,
1186 void __user *counters_ptr)
918{ 1187{
919 int ret; 1188 int ret;
920 struct ipt_replace tmp;
921 struct ipt_table *t; 1189 struct ipt_table *t;
922 struct xt_table_info *newinfo, *oldinfo; 1190 struct xt_table_info *oldinfo;
923 struct xt_counters *counters; 1191 struct xt_counters *counters;
924 void *loc_cpu_entry, *loc_cpu_old_entry; 1192 void *loc_cpu_old_entry;
925 1193
926 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1194 ret = 0;
927 return -EFAULT; 1195 counters = vmalloc(num_counters * sizeof(struct xt_counters));
928
929 /* Hack: Causes ipchains to give correct error msg --RR */
930 if (len != sizeof(tmp) + tmp.size)
931 return -ENOPROTOOPT;
932
933 /* overflow check */
934 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
935 SMP_CACHE_BYTES)
936 return -ENOMEM;
937 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
938 return -ENOMEM;
939
940 newinfo = xt_alloc_table_info(tmp.size);
941 if (!newinfo)
942 return -ENOMEM;
943
944 /* choose the copy that is our node/cpu */
945 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
946 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
947 tmp.size) != 0) {
948 ret = -EFAULT;
949 goto free_newinfo;
950 }
951
952 counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
953 if (!counters) { 1196 if (!counters) {
954 ret = -ENOMEM; 1197 ret = -ENOMEM;
955 goto free_newinfo; 1198 goto out;
956 } 1199 }
957 1200
958 ret = translate_table(tmp.name, tmp.valid_hooks, 1201 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
959 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, 1202 "iptable_%s", name);
960 tmp.hook_entry, tmp.underflow);
961 if (ret != 0)
962 goto free_newinfo_counters;
963
964 duprintf("ip_tables: Translated table\n");
965
966 t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
967 "iptable_%s", tmp.name);
968 if (!t || IS_ERR(t)) { 1203 if (!t || IS_ERR(t)) {
969 ret = t ? PTR_ERR(t) : -ENOENT; 1204 ret = t ? PTR_ERR(t) : -ENOENT;
970 goto free_newinfo_counters_untrans; 1205 goto free_newinfo_counters_untrans;
971 } 1206 }
972 1207
973 /* You lied! */ 1208 /* You lied! */
974 if (tmp.valid_hooks != t->valid_hooks) { 1209 if (valid_hooks != t->valid_hooks) {
975 duprintf("Valid hook crap: %08X vs %08X\n", 1210 duprintf("Valid hook crap: %08X vs %08X\n",
976 tmp.valid_hooks, t->valid_hooks); 1211 valid_hooks, t->valid_hooks);
977 ret = -EINVAL; 1212 ret = -EINVAL;
978 goto put_module; 1213 goto put_module;
979 } 1214 }
980 1215
981 oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret); 1216 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
982 if (!oldinfo) 1217 if (!oldinfo)
983 goto put_module; 1218 goto put_module;
984 1219
985 /* Update module usage count based on number of rules */ 1220 /* Update module usage count based on number of rules */
986 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1221 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
987 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1222 oldinfo->number, oldinfo->initial_entries, newinfo->number);
988 if ((oldinfo->number > oldinfo->initial_entries) || 1223 if ((oldinfo->number > oldinfo->initial_entries) ||
989 (newinfo->number <= oldinfo->initial_entries)) 1224 (newinfo->number <= oldinfo->initial_entries))
990 module_put(t->me); 1225 module_put(t->me);
991 if ((oldinfo->number > oldinfo->initial_entries) && 1226 if ((oldinfo->number > oldinfo->initial_entries) &&
992 (newinfo->number <= oldinfo->initial_entries)) 1227 (newinfo->number <= oldinfo->initial_entries))
@@ -998,8 +1233,8 @@ do_replace(void __user *user, unsigned int len)
998 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1233 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
999 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL); 1234 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1000 xt_free_table_info(oldinfo); 1235 xt_free_table_info(oldinfo);
1001 if (copy_to_user(tmp.counters, counters, 1236 if (copy_to_user(counters_ptr, counters,
1002 sizeof(struct xt_counters) * tmp.num_counters) != 0) 1237 sizeof(struct xt_counters) * num_counters) != 0)
1003 ret = -EFAULT; 1238 ret = -EFAULT;
1004 vfree(counters); 1239 vfree(counters);
1005 xt_table_unlock(t); 1240 xt_table_unlock(t);
@@ -1009,9 +1244,62 @@ do_replace(void __user *user, unsigned int len)
1009 module_put(t->me); 1244 module_put(t->me);
1010 xt_table_unlock(t); 1245 xt_table_unlock(t);
1011 free_newinfo_counters_untrans: 1246 free_newinfo_counters_untrans:
1012 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1013 free_newinfo_counters:
1014 vfree(counters); 1247 vfree(counters);
1248 out:
1249 return ret;
1250}
1251
1252static int
1253do_replace(void __user *user, unsigned int len)
1254{
1255 int ret;
1256 struct ipt_replace tmp;
1257 struct xt_table_info *newinfo;
1258 void *loc_cpu_entry;
1259
1260 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1261 return -EFAULT;
1262
1263 /* Hack: Causes ipchains to give correct error msg --RR */
1264 if (len != sizeof(tmp) + tmp.size)
1265 return -ENOPROTOOPT;
1266
1267 /* overflow check */
1268 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1269 SMP_CACHE_BYTES)
1270 return -ENOMEM;
1271 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1272 return -ENOMEM;
1273
1274 newinfo = xt_alloc_table_info(tmp.size);
1275 if (!newinfo)
1276 return -ENOMEM;
1277
1278 /* choose the copy that is our node/cpu */
1279 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1280 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1281 tmp.size) != 0) {
1282 ret = -EFAULT;
1283 goto free_newinfo;
1284 }
1285
1286 ret = translate_table(tmp.name, tmp.valid_hooks,
1287 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1288 tmp.hook_entry, tmp.underflow);
1289 if (ret != 0)
1290 goto free_newinfo;
1291
1292 duprintf("ip_tables: Translated table\n");
1293
1294 ret = __do_replace(tmp.name, tmp.valid_hooks,
1295 newinfo, tmp.num_counters,
1296 tmp.counters);
1297 if (ret)
1298 goto free_newinfo_untrans;
1299 return 0;
1300
1301 free_newinfo_untrans:
1302 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1015 free_newinfo: 1303 free_newinfo:
1016 xt_free_table_info(newinfo); 1304 xt_free_table_info(newinfo);
1017 return ret; 1305 return ret;
@@ -1040,31 +1328,59 @@ add_counter_to_entry(struct ipt_entry *e,
1040} 1328}
1041 1329
1042static int 1330static int
1043do_add_counters(void __user *user, unsigned int len) 1331do_add_counters(void __user *user, unsigned int len, int compat)
1044{ 1332{
1045 unsigned int i; 1333 unsigned int i;
1046 struct xt_counters_info tmp, *paddc; 1334 struct xt_counters_info tmp;
1335 struct xt_counters *paddc;
1336 unsigned int num_counters;
1337 char *name;
1338 int size;
1339 void *ptmp;
1047 struct ipt_table *t; 1340 struct ipt_table *t;
1048 struct xt_table_info *private; 1341 struct xt_table_info *private;
1049 int ret = 0; 1342 int ret = 0;
1050 void *loc_cpu_entry; 1343 void *loc_cpu_entry;
1344#ifdef CONFIG_COMPAT
1345 struct compat_xt_counters_info compat_tmp;
1051 1346
1052 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1347 if (compat) {
1348 ptmp = &compat_tmp;
1349 size = sizeof(struct compat_xt_counters_info);
1350 } else
1351#endif
1352 {
1353 ptmp = &tmp;
1354 size = sizeof(struct xt_counters_info);
1355 }
1356
1357 if (copy_from_user(ptmp, user, size) != 0)
1053 return -EFAULT; 1358 return -EFAULT;
1054 1359
1055 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters)) 1360#ifdef CONFIG_COMPAT
1361 if (compat) {
1362 num_counters = compat_tmp.num_counters;
1363 name = compat_tmp.name;
1364 } else
1365#endif
1366 {
1367 num_counters = tmp.num_counters;
1368 name = tmp.name;
1369 }
1370
1371 if (len != size + num_counters * sizeof(struct xt_counters))
1056 return -EINVAL; 1372 return -EINVAL;
1057 1373
1058 paddc = vmalloc_node(len, numa_node_id()); 1374 paddc = vmalloc_node(len - size, numa_node_id());
1059 if (!paddc) 1375 if (!paddc)
1060 return -ENOMEM; 1376 return -ENOMEM;
1061 1377
1062 if (copy_from_user(paddc, user, len) != 0) { 1378 if (copy_from_user(paddc, user + size, len - size) != 0) {
1063 ret = -EFAULT; 1379 ret = -EFAULT;
1064 goto free; 1380 goto free;
1065 } 1381 }
1066 1382
1067 t = xt_find_table_lock(AF_INET, tmp.name); 1383 t = xt_find_table_lock(AF_INET, name);
1068 if (!t || IS_ERR(t)) { 1384 if (!t || IS_ERR(t)) {
1069 ret = t ? PTR_ERR(t) : -ENOENT; 1385 ret = t ? PTR_ERR(t) : -ENOENT;
1070 goto free; 1386 goto free;
@@ -1072,7 +1388,7 @@ do_add_counters(void __user *user, unsigned int len)
1072 1388
1073 write_lock_bh(&t->lock); 1389 write_lock_bh(&t->lock);
1074 private = t->private; 1390 private = t->private;
1075 if (private->number != paddc->num_counters) { 1391 if (private->number != num_counters) {
1076 ret = -EINVAL; 1392 ret = -EINVAL;
1077 goto unlock_up_free; 1393 goto unlock_up_free;
1078 } 1394 }
@@ -1083,7 +1399,7 @@ do_add_counters(void __user *user, unsigned int len)
1083 IPT_ENTRY_ITERATE(loc_cpu_entry, 1399 IPT_ENTRY_ITERATE(loc_cpu_entry,
1084 private->size, 1400 private->size,
1085 add_counter_to_entry, 1401 add_counter_to_entry,
1086 paddc->counters, 1402 paddc,
1087 &i); 1403 &i);
1088 unlock_up_free: 1404 unlock_up_free:
1089 write_unlock_bh(&t->lock); 1405 write_unlock_bh(&t->lock);
@@ -1095,8 +1411,438 @@ do_add_counters(void __user *user, unsigned int len)
1095 return ret; 1411 return ret;
1096} 1412}
1097 1413
1414#ifdef CONFIG_COMPAT
1415struct compat_ipt_replace {
1416 char name[IPT_TABLE_MAXNAMELEN];
1417 u32 valid_hooks;
1418 u32 num_entries;
1419 u32 size;
1420 u32 hook_entry[NF_IP_NUMHOOKS];
1421 u32 underflow[NF_IP_NUMHOOKS];
1422 u32 num_counters;
1423 compat_uptr_t counters; /* struct ipt_counters * */
1424 struct compat_ipt_entry entries[0];
1425};
1426
1427static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1428 void __user **dstptr, compat_uint_t *size)
1429{
1430 if (m->u.kernel.match->compat)
1431 return m->u.kernel.match->compat(m, dstptr, size,
1432 COMPAT_TO_USER);
1433 else
1434 return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
1435}
1436
1437static int compat_copy_entry_to_user(struct ipt_entry *e,
1438 void __user **dstptr, compat_uint_t *size)
1439{
1440 struct ipt_entry_target __user *t;
1441 struct compat_ipt_entry __user *ce;
1442 u_int16_t target_offset, next_offset;
1443 compat_uint_t origsize;
1444 int ret;
1445
1446 ret = -EFAULT;
1447 origsize = *size;
1448 ce = (struct compat_ipt_entry __user *)*dstptr;
1449 if (__copy_to_user(ce, e, sizeof(struct ipt_entry)))
1450 goto out;
1451
1452 *dstptr += sizeof(struct compat_ipt_entry);
1453 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1454 target_offset = e->target_offset - (origsize - *size);
1455 if (ret)
1456 goto out;
1457 t = ipt_get_target(e);
1458 if (t->u.kernel.target->compat)
1459 ret = t->u.kernel.target->compat(t, dstptr, size,
1460 COMPAT_TO_USER);
1461 else
1462 ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
1463 if (ret)
1464 goto out;
1465 ret = -EFAULT;
1466 next_offset = e->next_offset - (origsize - *size);
1467 if (__put_user(target_offset, &ce->target_offset))
1468 goto out;
1469 if (__put_user(next_offset, &ce->next_offset))
1470 goto out;
1471 return 0;
1472out:
1473 return ret;
1474}
1475
1476static inline int
1477compat_check_calc_match(struct ipt_entry_match *m,
1478 const char *name,
1479 const struct ipt_ip *ip,
1480 unsigned int hookmask,
1481 int *size, int *i)
1482{
1483 struct ipt_match *match;
1484
1485 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1486 m->u.user.revision),
1487 "ipt_%s", m->u.user.name);
1488 if (IS_ERR(match) || !match) {
1489 duprintf("compat_check_calc_match: `%s' not found\n",
1490 m->u.user.name);
1491 return match ? PTR_ERR(match) : -ENOENT;
1492 }
1493 m->u.kernel.match = match;
1494
1495 if (m->u.kernel.match->compat)
1496 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1497 else
1498 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1499
1500 (*i)++;
1501 return 0;
1502}
1503
1504static inline int
1505check_compat_entry_size_and_hooks(struct ipt_entry *e,
1506 struct xt_table_info *newinfo,
1507 unsigned int *size,
1508 unsigned char *base,
1509 unsigned char *limit,
1510 unsigned int *hook_entries,
1511 unsigned int *underflows,
1512 unsigned int *i,
1513 const char *name)
1514{
1515 struct ipt_entry_target *t;
1516 struct ipt_target *target;
1517 u_int16_t entry_offset;
1518 int ret, off, h, j;
1519
1520 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1521 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1522 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1523 duprintf("Bad offset %p, limit = %p\n", e, limit);
1524 return -EINVAL;
1525 }
1526
1527 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1528 sizeof(struct compat_xt_entry_target)) {
1529 duprintf("checking: element %p size %u\n",
1530 e, e->next_offset);
1531 return -EINVAL;
1532 }
1533
1534 if (!ip_checkentry(&e->ip)) {
1535 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1536 return -EINVAL;
1537 }
1538
1539 off = 0;
1540 entry_offset = (void *)e - (void *)base;
1541 j = 0;
1542 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1543 e->comefrom, &off, &j);
1544 if (ret != 0)
1545 goto out;
1546
1547 t = ipt_get_target(e);
1548 target = try_then_request_module(xt_find_target(AF_INET,
1549 t->u.user.name,
1550 t->u.user.revision),
1551 "ipt_%s", t->u.user.name);
1552 if (IS_ERR(target) || !target) {
1553 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1554 ret = target ? PTR_ERR(target) : -ENOENT;
1555 goto out;
1556 }
1557 t->u.kernel.target = target;
1558
1559 if (t->u.kernel.target->compat)
1560 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1561 else
1562 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1563 *size += off;
1564 ret = compat_add_offset(entry_offset, off);
1565 if (ret)
1566 goto out;
1567
1568 /* Check hooks & underflows */
1569 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1570 if ((unsigned char *)e - base == hook_entries[h])
1571 newinfo->hook_entry[h] = hook_entries[h];
1572 if ((unsigned char *)e - base == underflows[h])
1573 newinfo->underflow[h] = underflows[h];
1574 }
1575
1576 /* Clear counters and comefrom */
1577 e->counters = ((struct ipt_counters) { 0, 0 });
1578 e->comefrom = 0;
1579
1580 (*i)++;
1581 return 0;
1582out:
1583 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1584 return ret;
1585}
1586
1587static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1588 void **dstptr, compat_uint_t *size, const char *name,
1589 const struct ipt_ip *ip, unsigned int hookmask)
1590{
1591 struct ipt_entry_match *dm;
1592 struct ipt_match *match;
1593 int ret;
1594
1595 dm = (struct ipt_entry_match *)*dstptr;
1596 match = m->u.kernel.match;
1597 if (match->compat)
1598 match->compat(m, dstptr, size, COMPAT_FROM_USER);
1599 else
1600 xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
1601
1602 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1603 name, hookmask, ip->proto,
1604 ip->invflags & IPT_INV_PROTO);
1605 if (ret)
1606 return ret;
1607
1608 if (m->u.kernel.match->checkentry
1609 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1610 dm->u.match_size - sizeof(*dm),
1611 hookmask)) {
1612 duprintf("ip_tables: check failed for `%s'.\n",
1613 m->u.kernel.match->name);
1614 return -EINVAL;
1615 }
1616 return 0;
1617}
1618
1619static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1620 unsigned int *size, const char *name,
1621 struct xt_table_info *newinfo, unsigned char *base)
1622{
1623 struct ipt_entry_target *t;
1624 struct ipt_target *target;
1625 struct ipt_entry *de;
1626 unsigned int origsize;
1627 int ret, h;
1628
1629 ret = 0;
1630 origsize = *size;
1631 de = (struct ipt_entry *)*dstptr;
1632 memcpy(de, e, sizeof(struct ipt_entry));
1633
1634 *dstptr += sizeof(struct compat_ipt_entry);
1635 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1636 name, &de->ip, de->comefrom);
1637 if (ret)
1638 goto out;
1639 de->target_offset = e->target_offset - (origsize - *size);
1640 t = ipt_get_target(e);
1641 target = t->u.kernel.target;
1642 if (target->compat)
1643 target->compat(t, dstptr, size, COMPAT_FROM_USER);
1644 else
1645 xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
1646
1647 de->next_offset = e->next_offset - (origsize - *size);
1648 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1649 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1650 newinfo->hook_entry[h] -= origsize - *size;
1651 if ((unsigned char *)de - base < newinfo->underflow[h])
1652 newinfo->underflow[h] -= origsize - *size;
1653 }
1654
1655 t = ipt_get_target(de);
1656 target = t->u.kernel.target;
1657 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1658 name, e->comefrom, e->ip.proto,
1659 e->ip.invflags & IPT_INV_PROTO);
1660 if (ret)
1661 goto out;
1662
1663 ret = -EINVAL;
1664 if (t->u.kernel.target == &ipt_standard_target) {
1665 if (!standard_check(t, *size))
1666 goto out;
1667 } else if (t->u.kernel.target->checkentry
1668 && !t->u.kernel.target->checkentry(name, de, target,
1669 t->data, t->u.target_size - sizeof(*t),
1670 de->comefrom)) {
1671 duprintf("ip_tables: compat: check failed for `%s'.\n",
1672 t->u.kernel.target->name);
1673 goto out;
1674 }
1675 ret = 0;
1676out:
1677 return ret;
1678}
1679
1098static int 1680static int
1099do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 1681translate_compat_table(const char *name,
1682 unsigned int valid_hooks,
1683 struct xt_table_info **pinfo,
1684 void **pentry0,
1685 unsigned int total_size,
1686 unsigned int number,
1687 unsigned int *hook_entries,
1688 unsigned int *underflows)
1689{
1690 unsigned int i;
1691 struct xt_table_info *newinfo, *info;
1692 void *pos, *entry0, *entry1;
1693 unsigned int size;
1694 int ret;
1695
1696 info = *pinfo;
1697 entry0 = *pentry0;
1698 size = total_size;
1699 info->number = number;
1700
1701 /* Init all hooks to impossible value. */
1702 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1703 info->hook_entry[i] = 0xFFFFFFFF;
1704 info->underflow[i] = 0xFFFFFFFF;
1705 }
1706
1707 duprintf("translate_compat_table: size %u\n", info->size);
1708 i = 0;
1709 xt_compat_lock(AF_INET);
1710 /* Walk through entries, checking offsets. */
1711 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1712 check_compat_entry_size_and_hooks,
1713 info, &size, entry0,
1714 entry0 + total_size,
1715 hook_entries, underflows, &i, name);
1716 if (ret != 0)
1717 goto out_unlock;
1718
1719 ret = -EINVAL;
1720 if (i != number) {
1721 duprintf("translate_compat_table: %u not %u entries\n",
1722 i, number);
1723 goto out_unlock;
1724 }
1725
1726 /* Check hooks all assigned */
1727 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1728 /* Only hooks which are valid */
1729 if (!(valid_hooks & (1 << i)))
1730 continue;
1731 if (info->hook_entry[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid hook entry %u %u\n",
1733 i, hook_entries[i]);
1734 goto out_unlock;
1735 }
1736 if (info->underflow[i] == 0xFFFFFFFF) {
1737 duprintf("Invalid underflow %u %u\n",
1738 i, underflows[i]);
1739 goto out_unlock;
1740 }
1741 }
1742
1743 ret = -ENOMEM;
1744 newinfo = xt_alloc_table_info(size);
1745 if (!newinfo)
1746 goto out_unlock;
1747
1748 newinfo->number = number;
1749 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1750 newinfo->hook_entry[i] = info->hook_entry[i];
1751 newinfo->underflow[i] = info->underflow[i];
1752 }
1753 entry1 = newinfo->entries[raw_smp_processor_id()];
1754 pos = entry1;
1755 size = total_size;
1756 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1757 compat_copy_entry_from_user, &pos, &size,
1758 name, newinfo, entry1);
1759 compat_flush_offsets();
1760 xt_compat_unlock(AF_INET);
1761 if (ret)
1762 goto free_newinfo;
1763
1764 ret = -ELOOP;
1765 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1766 goto free_newinfo;
1767
1768 /* And one copy for every other CPU */
1769 for_each_cpu(i)
1770 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1771 memcpy(newinfo->entries[i], entry1, newinfo->size);
1772
1773 *pinfo = newinfo;
1774 *pentry0 = entry1;
1775 xt_free_table_info(info);
1776 return 0;
1777
1778free_newinfo:
1779 xt_free_table_info(newinfo);
1780out:
1781 return ret;
1782out_unlock:
1783 xt_compat_unlock(AF_INET);
1784 goto out;
1785}
1786
1787static int
1788compat_do_replace(void __user *user, unsigned int len)
1789{
1790 int ret;
1791 struct compat_ipt_replace tmp;
1792 struct xt_table_info *newinfo;
1793 void *loc_cpu_entry;
1794
1795 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1796 return -EFAULT;
1797
1798 /* Hack: Causes ipchains to give correct error msg --RR */
1799 if (len != sizeof(tmp) + tmp.size)
1800 return -ENOPROTOOPT;
1801
1802 /* overflow check */
1803 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1804 SMP_CACHE_BYTES)
1805 return -ENOMEM;
1806 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1807 return -ENOMEM;
1808
1809 newinfo = xt_alloc_table_info(tmp.size);
1810 if (!newinfo)
1811 return -ENOMEM;
1812
1813 /* choose the copy that is our node/cpu */
1814 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1816 tmp.size) != 0) {
1817 ret = -EFAULT;
1818 goto free_newinfo;
1819 }
1820
1821 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1822 &newinfo, &loc_cpu_entry, tmp.size,
1823 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1824 if (ret != 0)
1825 goto free_newinfo;
1826
1827 duprintf("compat_do_replace: Translated table\n");
1828
1829 ret = __do_replace(tmp.name, tmp.valid_hooks,
1830 newinfo, tmp.num_counters,
1831 compat_ptr(tmp.counters));
1832 if (ret)
1833 goto free_newinfo_untrans;
1834 return 0;
1835
1836 free_newinfo_untrans:
1837 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1838 free_newinfo:
1839 xt_free_table_info(newinfo);
1840 return ret;
1841}
1842
1843static int
1844compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1845 unsigned int len)
1100{ 1846{
1101 int ret; 1847 int ret;
1102 1848
@@ -1105,11 +1851,11 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1105 1851
1106 switch (cmd) { 1852 switch (cmd) {
1107 case IPT_SO_SET_REPLACE: 1853 case IPT_SO_SET_REPLACE:
1108 ret = do_replace(user, len); 1854 ret = compat_do_replace(user, len);
1109 break; 1855 break;
1110 1856
1111 case IPT_SO_SET_ADD_COUNTERS: 1857 case IPT_SO_SET_ADD_COUNTERS:
1112 ret = do_add_counters(user, len); 1858 ret = do_add_counters(user, len, 1);
1113 break; 1859 break;
1114 1860
1115 default: 1861 default:
@@ -1120,75 +1866,196 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1120 return ret; 1866 return ret;
1121} 1867}
1122 1868
1869struct compat_ipt_get_entries
1870{
1871 char name[IPT_TABLE_MAXNAMELEN];
1872 compat_uint_t size;
1873 struct compat_ipt_entry entrytable[0];
1874};
1875
1876static int compat_copy_entries_to_user(unsigned int total_size,
1877 struct ipt_table *table, void __user *userptr)
1878{
1879 unsigned int off, num;
1880 struct compat_ipt_entry e;
1881 struct xt_counters *counters;
1882 struct xt_table_info *private = table->private;
1883 void __user *pos;
1884 unsigned int size;
1885 int ret = 0;
1886 void *loc_cpu_entry;
1887
1888 counters = alloc_counters(table);
1889 if (IS_ERR(counters))
1890 return PTR_ERR(counters);
1891
1892 /* choose the copy that is on our node/cpu, ...
1893 * This choice is lazy (because current thread is
1894 * allowed to migrate to another cpu)
1895 */
1896 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1897 pos = userptr;
1898 size = total_size;
1899 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1900 compat_copy_entry_to_user, &pos, &size);
1901 if (ret)
1902 goto free_counters;
1903
1904 /* ... then go back and fix counters and names */
1905 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1906 unsigned int i;
1907 struct ipt_entry_match m;
1908 struct ipt_entry_target t;
1909
1910 ret = -EFAULT;
1911 if (copy_from_user(&e, userptr + off,
1912 sizeof(struct compat_ipt_entry)))
1913 goto free_counters;
1914 if (copy_to_user(userptr + off +
1915 offsetof(struct compat_ipt_entry, counters),
1916 &counters[num], sizeof(counters[num])))
1917 goto free_counters;
1918
1919 for (i = sizeof(struct compat_ipt_entry);
1920 i < e.target_offset; i += m.u.match_size) {
1921 if (copy_from_user(&m, userptr + off + i,
1922 sizeof(struct ipt_entry_match)))
1923 goto free_counters;
1924 if (copy_to_user(userptr + off + i +
1925 offsetof(struct ipt_entry_match, u.user.name),
1926 m.u.kernel.match->name,
1927 strlen(m.u.kernel.match->name) + 1))
1928 goto free_counters;
1929 }
1930
1931 if (copy_from_user(&t, userptr + off + e.target_offset,
1932 sizeof(struct ipt_entry_target)))
1933 goto free_counters;
1934 if (copy_to_user(userptr + off + e.target_offset +
1935 offsetof(struct ipt_entry_target, u.user.name),
1936 t.u.kernel.target->name,
1937 strlen(t.u.kernel.target->name) + 1))
1938 goto free_counters;
1939 }
1940 ret = 0;
1941free_counters:
1942 vfree(counters);
1943 return ret;
1944}
1945
1123static int 1946static int
1124do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1947compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1125{ 1948{
1126 int ret; 1949 int ret;
1950 struct compat_ipt_get_entries get;
1951 struct ipt_table *t;
1127 1952
1128 if (!capable(CAP_NET_ADMIN))
1129 return -EPERM;
1130 1953
1131 switch (cmd) { 1954 if (*len < sizeof(get)) {
1132 case IPT_SO_GET_INFO: { 1955 duprintf("compat_get_entries: %u < %u\n",
1133 char name[IPT_TABLE_MAXNAMELEN]; 1956 *len, (unsigned int)sizeof(get));
1134 struct ipt_table *t; 1957 return -EINVAL;
1958 }
1959
1960 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1961 return -EFAULT;
1962
1963 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1964 duprintf("compat_get_entries: %u != %u\n", *len,
1965 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1966 get.size));
1967 return -EINVAL;
1968 }
1135 1969
1136 if (*len != sizeof(struct ipt_getinfo)) { 1970 xt_compat_lock(AF_INET);
1137 duprintf("length %u != %u\n", *len, 1971 t = xt_find_table_lock(AF_INET, get.name);
1138 sizeof(struct ipt_getinfo)); 1972 if (t && !IS_ERR(t)) {
1973 struct xt_table_info *private = t->private;
1974 struct xt_table_info info;
1975 duprintf("t->private->number = %u\n",
1976 private->number);
1977 ret = compat_table_info(private, &info);
1978 if (!ret && get.size == info.size) {
1979 ret = compat_copy_entries_to_user(private->size,
1980 t, uptr->entrytable);
1981 } else if (!ret) {
1982 duprintf("compat_get_entries: I've got %u not %u!\n",
1983 private->size,
1984 get.size);
1139 ret = -EINVAL; 1985 ret = -EINVAL;
1140 break;
1141 } 1986 }
1987 compat_flush_offsets();
1988 module_put(t->me);
1989 xt_table_unlock(t);
1990 } else
1991 ret = t ? PTR_ERR(t) : -ENOENT;
1142 1992
1143 if (copy_from_user(name, user, sizeof(name)) != 0) { 1993 xt_compat_unlock(AF_INET);
1144 ret = -EFAULT; 1994 return ret;
1145 break; 1995}
1146 } 1996
1147 name[IPT_TABLE_MAXNAMELEN-1] = '\0'; 1997static int
1148 1998compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1149 t = try_then_request_module(xt_find_table_lock(AF_INET, name), 1999{
1150 "iptable_%s", name); 2000 int ret;
1151 if (t && !IS_ERR(t)) { 2001
1152 struct ipt_getinfo info; 2002 switch (cmd) {
1153 struct xt_table_info *private = t->private; 2003 case IPT_SO_GET_INFO:
1154 2004 ret = get_info(user, len, 1);
1155 info.valid_hooks = t->valid_hooks; 2005 break;
1156 memcpy(info.hook_entry, private->hook_entry, 2006 case IPT_SO_GET_ENTRIES:
1157 sizeof(info.hook_entry)); 2007 ret = compat_get_entries(user, len);
1158 memcpy(info.underflow, private->underflow, 2008 break;
1159 sizeof(info.underflow)); 2009 default:
1160 info.num_entries = private->number; 2010 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
1161 info.size = private->size; 2011 ret = -EINVAL;
1162 memcpy(info.name, name, sizeof(info.name));
1163
1164 if (copy_to_user(user, &info, *len) != 0)
1165 ret = -EFAULT;
1166 else
1167 ret = 0;
1168 xt_table_unlock(t);
1169 module_put(t->me);
1170 } else
1171 ret = t ? PTR_ERR(t) : -ENOENT;
1172 } 2012 }
1173 break; 2013 return ret;
2014}
2015#endif
1174 2016
1175 case IPT_SO_GET_ENTRIES: { 2017static int
1176 struct ipt_get_entries get; 2018do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2019{
2020 int ret;
1177 2021
1178 if (*len < sizeof(get)) { 2022 if (!capable(CAP_NET_ADMIN))
1179 duprintf("get_entries: %u < %u\n", *len, sizeof(get)); 2023 return -EPERM;
1180 ret = -EINVAL; 2024
1181 } else if (copy_from_user(&get, user, sizeof(get)) != 0) { 2025 switch (cmd) {
1182 ret = -EFAULT; 2026 case IPT_SO_SET_REPLACE:
1183 } else if (*len != sizeof(struct ipt_get_entries) + get.size) { 2027 ret = do_replace(user, len);
1184 duprintf("get_entries: %u != %u\n", *len,
1185 sizeof(struct ipt_get_entries) + get.size);
1186 ret = -EINVAL;
1187 } else
1188 ret = get_entries(&get, user);
1189 break; 2028 break;
2029
2030 case IPT_SO_SET_ADD_COUNTERS:
2031 ret = do_add_counters(user, len, 0);
2032 break;
2033
2034 default:
2035 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2036 ret = -EINVAL;
1190 } 2037 }
1191 2038
2039 return ret;
2040}
2041
2042static int
2043do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2044{
2045 int ret;
2046
2047 if (!capable(CAP_NET_ADMIN))
2048 return -EPERM;
2049
2050 switch (cmd) {
2051 case IPT_SO_GET_INFO:
2052 ret = get_info(user, len, 0);
2053 break;
2054
2055 case IPT_SO_GET_ENTRIES:
2056 ret = get_entries(user, len);
2057 break;
2058
1192 case IPT_SO_GET_REVISION_MATCH: 2059 case IPT_SO_GET_REVISION_MATCH:
1193 case IPT_SO_GET_REVISION_TARGET: { 2060 case IPT_SO_GET_REVISION_TARGET: {
1194 struct ipt_get_revision rev; 2061 struct ipt_get_revision rev;
@@ -1336,6 +2203,9 @@ static struct ipt_target ipt_standard_target = {
1336 .name = IPT_STANDARD_TARGET, 2203 .name = IPT_STANDARD_TARGET,
1337 .targetsize = sizeof(int), 2204 .targetsize = sizeof(int),
1338 .family = AF_INET, 2205 .family = AF_INET,
2206#ifdef CONFIG_COMPAT
2207 .compat = &compat_ipt_standard_fn,
2208#endif
1339}; 2209};
1340 2210
1341static struct ipt_target ipt_error_target = { 2211static struct ipt_target ipt_error_target = {
@@ -1350,9 +2220,15 @@ static struct nf_sockopt_ops ipt_sockopts = {
1350 .set_optmin = IPT_BASE_CTL, 2220 .set_optmin = IPT_BASE_CTL,
1351 .set_optmax = IPT_SO_SET_MAX+1, 2221 .set_optmax = IPT_SO_SET_MAX+1,
1352 .set = do_ipt_set_ctl, 2222 .set = do_ipt_set_ctl,
2223#ifdef CONFIG_COMPAT
2224 .compat_set = compat_do_ipt_set_ctl,
2225#endif
1353 .get_optmin = IPT_BASE_CTL, 2226 .get_optmin = IPT_BASE_CTL,
1354 .get_optmax = IPT_SO_GET_MAX+1, 2227 .get_optmax = IPT_SO_GET_MAX+1,
1355 .get = do_ipt_get_ctl, 2228 .get = do_ipt_get_ctl,
2229#ifdef CONFIG_COMPAT
2230 .compat_get = compat_do_ipt_get_ctl,
2231#endif
1356}; 2232};
1357 2233
1358static struct ipt_match icmp_matchstruct = { 2234static struct ipt_match icmp_matchstruct = {
diff --git a/net/ipv4/netfilter/ipt_multiport.c b/net/ipv4/netfilter/ipt_multiport.c
deleted file mode 100644
index ac95d8390bcc..000000000000
--- a/net/ipv4/netfilter/ipt_multiport.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/* Kernel module to match one of a list of TCP/UDP ports: ports are in
2 the same place so we can treat them as equal. */
3
4/* (C) 1999-2001 Paul `Rusty' Russell
5 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/udp.h>
15#include <linux/skbuff.h>
16
17#include <linux/netfilter_ipv4/ipt_multiport.h>
18#include <linux/netfilter_ipv4/ip_tables.h>
19
20MODULE_LICENSE("GPL");
21MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
22MODULE_DESCRIPTION("iptables multiple port match module");
23
24#if 0
25#define duprintf(format, args...) printk(format , ## args)
26#else
27#define duprintf(format, args...)
28#endif
29
30/* Returns 1 if the port is matched by the test, 0 otherwise. */
31static inline int
32ports_match(const u_int16_t *portlist, enum ipt_multiport_flags flags,
33 u_int8_t count, u_int16_t src, u_int16_t dst)
34{
35 unsigned int i;
36 for (i=0; i<count; i++) {
37 if (flags != IPT_MULTIPORT_DESTINATION
38 && portlist[i] == src)
39 return 1;
40
41 if (flags != IPT_MULTIPORT_SOURCE
42 && portlist[i] == dst)
43 return 1;
44 }
45
46 return 0;
47}
48
49/* Returns 1 if the port is matched by the test, 0 otherwise. */
50static inline int
51ports_match_v1(const struct ipt_multiport_v1 *minfo,
52 u_int16_t src, u_int16_t dst)
53{
54 unsigned int i;
55 u_int16_t s, e;
56
57 for (i=0; i < minfo->count; i++) {
58 s = minfo->ports[i];
59
60 if (minfo->pflags[i]) {
61 /* range port matching */
62 e = minfo->ports[++i];
63 duprintf("src or dst matches with %d-%d?\n", s, e);
64
65 if (minfo->flags == IPT_MULTIPORT_SOURCE
66 && src >= s && src <= e)
67 return 1 ^ minfo->invert;
68 if (minfo->flags == IPT_MULTIPORT_DESTINATION
69 && dst >= s && dst <= e)
70 return 1 ^ minfo->invert;
71 if (minfo->flags == IPT_MULTIPORT_EITHER
72 && ((dst >= s && dst <= e)
73 || (src >= s && src <= e)))
74 return 1 ^ minfo->invert;
75 } else {
76 /* exact port matching */
77 duprintf("src or dst matches with %d?\n", s);
78
79 if (minfo->flags == IPT_MULTIPORT_SOURCE
80 && src == s)
81 return 1 ^ minfo->invert;
82 if (minfo->flags == IPT_MULTIPORT_DESTINATION
83 && dst == s)
84 return 1 ^ minfo->invert;
85 if (minfo->flags == IPT_MULTIPORT_EITHER
86 && (src == s || dst == s))
87 return 1 ^ minfo->invert;
88 }
89 }
90
91 return minfo->invert;
92}
93
94static int
95match(const struct sk_buff *skb,
96 const struct net_device *in,
97 const struct net_device *out,
98 const struct xt_match *match,
99 const void *matchinfo,
100 int offset,
101 unsigned int protoff,
102 int *hotdrop)
103{
104 u16 _ports[2], *pptr;
105 const struct ipt_multiport *multiinfo = matchinfo;
106
107 if (offset)
108 return 0;
109
110 pptr = skb_header_pointer(skb, protoff,
111 sizeof(_ports), _ports);
112 if (pptr == NULL) {
113 /* We've been asked to examine this packet, and we
114 * can't. Hence, no choice but to drop.
115 */
116 duprintf("ipt_multiport:"
117 " Dropping evil offset=0 tinygram.\n");
118 *hotdrop = 1;
119 return 0;
120 }
121
122 return ports_match(multiinfo->ports,
123 multiinfo->flags, multiinfo->count,
124 ntohs(pptr[0]), ntohs(pptr[1]));
125}
126
127static int
128match_v1(const struct sk_buff *skb,
129 const struct net_device *in,
130 const struct net_device *out,
131 const struct xt_match *match,
132 const void *matchinfo,
133 int offset,
134 unsigned int protoff,
135 int *hotdrop)
136{
137 u16 _ports[2], *pptr;
138 const struct ipt_multiport_v1 *multiinfo = matchinfo;
139
140 if (offset)
141 return 0;
142
143 pptr = skb_header_pointer(skb, protoff,
144 sizeof(_ports), _ports);
145 if (pptr == NULL) {
146 /* We've been asked to examine this packet, and we
147 * can't. Hence, no choice but to drop.
148 */
149 duprintf("ipt_multiport:"
150 " Dropping evil offset=0 tinygram.\n");
151 *hotdrop = 1;
152 return 0;
153 }
154
155 return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1]));
156}
157
158static struct ipt_match multiport_match = {
159 .name = "multiport",
160 .revision = 0,
161 .match = match,
162 .matchsize = sizeof(struct ipt_multiport),
163 .me = THIS_MODULE,
164};
165
166static struct ipt_match multiport_match_v1 = {
167 .name = "multiport",
168 .revision = 1,
169 .match = match_v1,
170 .matchsize = sizeof(struct ipt_multiport_v1),
171 .me = THIS_MODULE,
172};
173
174static int __init ipt_multiport_init(void)
175{
176 int err;
177
178 err = ipt_register_match(&multiport_match);
179 if (!err) {
180 err = ipt_register_match(&multiport_match_v1);
181 if (err)
182 ipt_unregister_match(&multiport_match);
183 }
184
185 return err;
186}
187
188static void __exit ipt_multiport_fini(void)
189{
190 ipt_unregister_match(&multiport_match);
191 ipt_unregister_match(&multiport_match_v1);
192}
193
194module_init(ipt_multiport_init);
195module_exit(ipt_multiport_fini);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 850d919591d1..e1b8f4b90d80 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -68,7 +68,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
68{ 68{
69 int err; 69 int err;
70 u32 spi, seq; 70 u32 spi, seq;
71 struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH]; 71 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
72 struct xfrm_state *x; 72 struct xfrm_state *x;
73 int xfrm_nr = 0; 73 int xfrm_nr = 0;
74 int decaps = 0; 74 int decaps = 0;
@@ -90,14 +90,16 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
90 if (unlikely(x->km.state != XFRM_STATE_VALID)) 90 if (unlikely(x->km.state != XFRM_STATE_VALID))
91 goto drop_unlock; 91 goto drop_unlock;
92 92
93 if (x->encap->encap_type != encap_type)
94 goto drop_unlock;
95
93 if (x->props.replay_window && xfrm_replay_check(x, seq)) 96 if (x->props.replay_window && xfrm_replay_check(x, seq))
94 goto drop_unlock; 97 goto drop_unlock;
95 98
96 if (xfrm_state_check_expire(x)) 99 if (xfrm_state_check_expire(x))
97 goto drop_unlock; 100 goto drop_unlock;
98 101
99 xfrm_vec[xfrm_nr].decap.decap_type = encap_type; 102 if (x->type->input(x, skb))
100 if (x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb))
101 goto drop_unlock; 103 goto drop_unlock;
102 104
103 /* only the first xfrm gets the encap type */ 105 /* only the first xfrm gets the encap type */
@@ -111,7 +113,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
111 113
112 spin_unlock(&x->lock); 114 spin_unlock(&x->lock);
113 115
114 xfrm_vec[xfrm_nr++].xvec = x; 116 xfrm_vec[xfrm_nr++] = x;
115 117
116 iph = skb->nh.iph; 118 iph = skb->nh.iph;
117 119
@@ -153,7 +155,8 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
153 if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) 155 if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
154 goto drop; 156 goto drop;
155 157
156 memcpy(skb->sp->x+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(struct sec_decap_state)); 158 memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
159 xfrm_nr * sizeof(xfrm_vec[0]));
157 skb->sp->len += xfrm_nr; 160 skb->sp->len += xfrm_nr;
158 161
159 nf_reset(skb); 162 nf_reset(skb);
@@ -184,7 +187,7 @@ drop_unlock:
184 xfrm_state_put(x); 187 xfrm_state_put(x);
185drop: 188drop:
186 while (--xfrm_nr >= 0) 189 while (--xfrm_nr >= 0)
187 xfrm_state_put(xfrm_vec[xfrm_nr].xvec); 190 xfrm_state_put(xfrm_vec[xfrm_nr]);
188 191
189 kfree_skb(skb); 192 kfree_skb(skb);
190 return 0; 193 return 0;
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 2d670935c2b5..f8ceaa127c83 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -21,7 +21,7 @@ static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
21 return 0; 21 return 0;
22} 22}
23 23
24static int ipip_xfrm_rcv(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 24static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb)
25{ 25{
26 return 0; 26 return 0;
27} 27}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index cf58251df4b3..6778173a3dda 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -229,7 +229,7 @@ error:
229 return err; 229 return err;
230} 230}
231 231
232static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 232static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
233{ 233{
234 /* 234 /*
235 * Before process AH 235 * Before process AH
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 3dcaac7a0972..22f046079037 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -130,7 +130,7 @@ error:
130 return err; 130 return err;
131} 131}
132 132
133static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 133static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
134{ 134{
135 struct ipv6hdr *iph; 135 struct ipv6hdr *iph;
136 struct ipv6_esp_hdr *esph; 136 struct ipv6_esp_hdr *esph;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index d4cfec3f414e..00f3fadfcca7 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -63,7 +63,7 @@ static void **ipcomp6_scratches;
63static int ipcomp6_scratch_users; 63static int ipcomp6_scratch_users;
64static LIST_HEAD(ipcomp6_tfms_list); 64static LIST_HEAD(ipcomp6_tfms_list);
65 65
66static int ipcomp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 66static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
67{ 67{
68 int err = 0; 68 int err = 0;
69 u8 nexthdr = 0; 69 u8 nexthdr = 0;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 98f78759f1ab..4bc4e5b33794 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -87,16 +87,6 @@ config IP6_NF_MATCH_HL
87 87
88 To compile it as a module, choose M here. If unsure, say N. 88 To compile it as a module, choose M here. If unsure, say N.
89 89
90config IP6_NF_MATCH_MULTIPORT
91 tristate "Multiple port match support"
92 depends on IP6_NF_IPTABLES
93 help
94 Multiport matching allows you to match TCP or UDP packets based on
95 a series of source or destination ports: normally a rule can only
96 match a single range of ports.
97
98 To compile it as a module, choose M here. If unsure, say N.
99
100config IP6_NF_MATCH_OWNER 90config IP6_NF_MATCH_OWNER
101 tristate "Owner match support" 91 tristate "Owner match support"
102 depends on IP6_NF_IPTABLES 92 depends on IP6_NF_IPTABLES
@@ -115,11 +105,11 @@ config IP6_NF_MATCH_IPV6HEADER
115 105
116 To compile it as a module, choose M here. If unsure, say N. 106 To compile it as a module, choose M here. If unsure, say N.
117 107
118config IP6_NF_MATCH_AHESP 108config IP6_NF_MATCH_AH
119 tristate "AH/ESP match support" 109 tristate "AH match support"
120 depends on IP6_NF_IPTABLES 110 depends on IP6_NF_IPTABLES
121 help 111 help
122 This module allows one to match AH and ESP packets. 112 This module allows one to match AH packets.
123 113
124 To compile it as a module, choose M here. If unsure, say N. 114 To compile it as a module, choose M here. If unsure, say N.
125 115
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 8436a1a1731f..eeeb57d4c9c5 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -8,9 +8,8 @@ obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
8obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o 8obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
9obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o 9obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
10obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o 10obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
11obj-$(CONFIG_IP6_NF_MATCH_AHESP) += ip6t_esp.o ip6t_ah.o 11obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
12obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o 12obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
13obj-$(CONFIG_IP6_NF_MATCH_MULTIPORT) += ip6t_multiport.o
14obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o 13obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
15obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o 14obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
16obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 15obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
diff --git a/net/ipv6/netfilter/ip6t_esp.c b/net/ipv6/netfilter/ip6t_esp.c
deleted file mode 100644
index 36bedad2c6f7..000000000000
--- a/net/ipv6/netfilter/ip6t_esp.c
+++ /dev/null
@@ -1,115 +0,0 @@
1/* Kernel module to match ESP parameters. */
2/* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9
10#include <linux/module.h>
11#include <linux/skbuff.h>
12#include <linux/ip.h>
13#include <linux/ipv6.h>
14#include <linux/types.h>
15#include <net/checksum.h>
16#include <net/ipv6.h>
17
18#include <linux/netfilter_ipv6/ip6_tables.h>
19#include <linux/netfilter_ipv6/ip6t_esp.h>
20
21MODULE_LICENSE("GPL");
22MODULE_DESCRIPTION("IPv6 ESP match");
23MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
24
25#if 0
26#define DEBUGP printk
27#else
28#define DEBUGP(format, args...)
29#endif
30
31/* Returns 1 if the spi is matched by the range, 0 otherwise */
32static inline int
33spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
34{
35 int r=0;
36 DEBUGP("esp spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
37 min,spi,max);
38 r=(spi >= min && spi <= max) ^ invert;
39 DEBUGP(" result %s\n",r? "PASS\n" : "FAILED\n");
40 return r;
41}
42
43static int
44match(const struct sk_buff *skb,
45 const struct net_device *in,
46 const struct net_device *out,
47 const struct xt_match *match,
48 const void *matchinfo,
49 int offset,
50 unsigned int protoff,
51 int *hotdrop)
52{
53 struct ip_esp_hdr _esp, *eh;
54 const struct ip6t_esp *espinfo = matchinfo;
55 unsigned int ptr;
56
57 /* Make sure this isn't an evil packet */
58 /*DEBUGP("ipv6_esp entered \n");*/
59
60 if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ESP, NULL) < 0)
61 return 0;
62
63 eh = skb_header_pointer(skb, ptr, sizeof(_esp), &_esp);
64 if (eh == NULL) {
65 *hotdrop = 1;
66 return 0;
67 }
68
69 DEBUGP("IPv6 ESP SPI %u %08X\n", ntohl(eh->spi), ntohl(eh->spi));
70
71 return (eh != NULL)
72 && spi_match(espinfo->spis[0], espinfo->spis[1],
73 ntohl(eh->spi),
74 !!(espinfo->invflags & IP6T_ESP_INV_SPI));
75}
76
77/* Called when user tries to insert an entry of this type. */
78static int
79checkentry(const char *tablename,
80 const void *ip,
81 const struct xt_match *match,
82 void *matchinfo,
83 unsigned int matchinfosize,
84 unsigned int hook_mask)
85{
86 const struct ip6t_esp *espinfo = matchinfo;
87
88 if (espinfo->invflags & ~IP6T_ESP_INV_MASK) {
89 DEBUGP("ip6t_esp: unknown flags %X\n",
90 espinfo->invflags);
91 return 0;
92 }
93 return 1;
94}
95
96static struct ip6t_match esp_match = {
97 .name = "esp",
98 .match = match,
99 .matchsize = sizeof(struct ip6t_esp),
100 .checkentry = checkentry,
101 .me = THIS_MODULE,
102};
103
104static int __init ip6t_esp_init(void)
105{
106 return ip6t_register_match(&esp_match);
107}
108
109static void __exit ip6t_esp_fini(void)
110{
111 ip6t_unregister_match(&esp_match);
112}
113
114module_init(ip6t_esp_init);
115module_exit(ip6t_esp_fini);
diff --git a/net/ipv6/netfilter/ip6t_multiport.c b/net/ipv6/netfilter/ip6t_multiport.c
deleted file mode 100644
index 10c48ba596d6..000000000000
--- a/net/ipv6/netfilter/ip6t_multiport.c
+++ /dev/null
@@ -1,125 +0,0 @@
1/* Kernel module to match one of a list of TCP/UDP ports: ports are in
2 the same place so we can treat them as equal. */
3
4/* (C) 1999-2001 Paul `Rusty' Russell
5 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/udp.h>
15#include <linux/skbuff.h>
16#include <linux/in.h>
17
18#include <linux/netfilter_ipv6/ip6t_multiport.h>
19#include <linux/netfilter_ipv6/ip6_tables.h>
20
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
23MODULE_DESCRIPTION("ip6tables match for multiple ports");
24
25#if 0
26#define duprintf(format, args...) printk(format , ## args)
27#else
28#define duprintf(format, args...)
29#endif
30
31/* Returns 1 if the port is matched by the test, 0 otherwise. */
32static inline int
33ports_match(const u_int16_t *portlist, enum ip6t_multiport_flags flags,
34 u_int8_t count, u_int16_t src, u_int16_t dst)
35{
36 unsigned int i;
37 for (i=0; i<count; i++) {
38 if (flags != IP6T_MULTIPORT_DESTINATION
39 && portlist[i] == src)
40 return 1;
41
42 if (flags != IP6T_MULTIPORT_SOURCE
43 && portlist[i] == dst)
44 return 1;
45 }
46
47 return 0;
48}
49
50static int
51match(const struct sk_buff *skb,
52 const struct net_device *in,
53 const struct net_device *out,
54 const struct xt_match *match,
55 const void *matchinfo,
56 int offset,
57 unsigned int protoff,
58 int *hotdrop)
59{
60 u16 _ports[2], *pptr;
61 const struct ip6t_multiport *multiinfo = matchinfo;
62
63 /* Must not be a fragment. */
64 if (offset)
65 return 0;
66
67 /* Must be big enough to read ports (both UDP and TCP have
68 them at the start). */
69 pptr = skb_header_pointer(skb, protoff, sizeof(_ports), &_ports[0]);
70 if (pptr == NULL) {
71 /* We've been asked to examine this packet, and we
72 * can't. Hence, no choice but to drop.
73 */
74 duprintf("ip6t_multiport:"
75 " Dropping evil offset=0 tinygram.\n");
76 *hotdrop = 1;
77 return 0;
78 }
79
80 return ports_match(multiinfo->ports,
81 multiinfo->flags, multiinfo->count,
82 ntohs(pptr[0]), ntohs(pptr[1]));
83}
84
85/* Called when user tries to insert an entry of this type. */
86static int
87checkentry(const char *tablename,
88 const void *info,
89 const struct xt_match *match,
90 void *matchinfo,
91 unsigned int matchsize,
92 unsigned int hook_mask)
93{
94 const struct ip6t_ip6 *ip = info;
95 const struct ip6t_multiport *multiinfo = matchinfo;
96
97 /* Must specify proto == TCP/UDP, no unknown flags or bad count */
98 return (ip->proto == IPPROTO_TCP || ip->proto == IPPROTO_UDP)
99 && !(ip->invflags & IP6T_INV_PROTO)
100 && (multiinfo->flags == IP6T_MULTIPORT_SOURCE
101 || multiinfo->flags == IP6T_MULTIPORT_DESTINATION
102 || multiinfo->flags == IP6T_MULTIPORT_EITHER)
103 && multiinfo->count <= IP6T_MULTI_PORTS;
104}
105
106static struct ip6t_match multiport_match = {
107 .name = "multiport",
108 .match = match,
109 .matchsize = sizeof(struct ip6t_multiport),
110 .checkentry = checkentry,
111 .me = THIS_MODULE,
112};
113
114static int __init ip6t_multiport_init(void)
115{
116 return ip6t_register_match(&multiport_match);
117}
118
119static void __exit ip6t_multiport_fini(void)
120{
121 ip6t_unregister_match(&multiport_match);
122}
123
124module_init(ip6t_multiport_init);
125module_exit(ip6t_multiport_fini);
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index cccf8b76f046..00cfdee18dca 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -32,7 +32,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
32{ 32{
33 int err; 33 int err;
34 u32 seq; 34 u32 seq;
35 struct sec_decap_state xfrm_vec[XFRM_MAX_DEPTH]; 35 struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
36 struct xfrm_state *x; 36 struct xfrm_state *x;
37 int xfrm_nr = 0; 37 int xfrm_nr = 0;
38 int decaps = 0; 38 int decaps = 0;
@@ -65,7 +65,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
65 if (xfrm_state_check_expire(x)) 65 if (xfrm_state_check_expire(x))
66 goto drop_unlock; 66 goto drop_unlock;
67 67
68 nexthdr = x->type->input(x, &(xfrm_vec[xfrm_nr].decap), skb); 68 nexthdr = x->type->input(x, skb);
69 if (nexthdr <= 0) 69 if (nexthdr <= 0)
70 goto drop_unlock; 70 goto drop_unlock;
71 71
@@ -79,7 +79,7 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
79 79
80 spin_unlock(&x->lock); 80 spin_unlock(&x->lock);
81 81
82 xfrm_vec[xfrm_nr++].xvec = x; 82 xfrm_vec[xfrm_nr++] = x;
83 83
84 if (x->props.mode) { /* XXX */ 84 if (x->props.mode) { /* XXX */
85 if (nexthdr != IPPROTO_IPV6) 85 if (nexthdr != IPPROTO_IPV6)
@@ -118,7 +118,8 @@ int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi)
118 if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH) 118 if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
119 goto drop; 119 goto drop;
120 120
121 memcpy(skb->sp->x+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(struct sec_decap_state)); 121 memcpy(skb->sp->xvec + skb->sp->len, xfrm_vec,
122 xfrm_nr * sizeof(xfrm_vec[0]));
122 skb->sp->len += xfrm_nr; 123 skb->sp->len += xfrm_nr;
123 skb->ip_summed = CHECKSUM_NONE; 124 skb->ip_summed = CHECKSUM_NONE;
124 125
@@ -149,7 +150,7 @@ drop_unlock:
149 xfrm_state_put(x); 150 xfrm_state_put(x);
150drop: 151drop:
151 while (--xfrm_nr >= 0) 152 while (--xfrm_nr >= 0)
152 xfrm_state_put(xfrm_vec[xfrm_nr].xvec); 153 xfrm_state_put(xfrm_vec[xfrm_nr]);
153 kfree_skb(skb); 154 kfree_skb(skb);
154 return -1; 155 return -1;
155} 156}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index a8f6776c518d..d37768e5064f 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -351,7 +351,7 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
351 return 0; 351 return 0;
352} 352}
353 353
354static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) 354static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
355{ 355{
356 return 0; 356 return 0;
357} 357}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 332acb37b385..e2893effdfaa 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -231,6 +231,15 @@ config NETFILTER_XT_MATCH_DCCP
231 If you want to compile it as a module, say M here and read 231 If you want to compile it as a module, say M here and read
232 <file:Documentation/modules.txt>. If unsure, say `N'. 232 <file:Documentation/modules.txt>. If unsure, say `N'.
233 233
234config NETFILTER_XT_MATCH_ESP
235 tristate '"ESP" match support'
236 depends on NETFILTER_XTABLES
237 help
238 This match extension allows you to match a range of SPIs
239 inside ESP header of IPSec packets.
240
241 To compile it as a module, choose M here. If unsure, say N.
242
234config NETFILTER_XT_MATCH_HELPER 243config NETFILTER_XT_MATCH_HELPER
235 tristate '"helper" match support' 244 tristate '"helper" match support'
236 depends on NETFILTER_XTABLES 245 depends on NETFILTER_XTABLES
@@ -289,6 +298,16 @@ config NETFILTER_XT_MATCH_POLICY
289 298
290 To compile it as a module, choose M here. If unsure, say N. 299 To compile it as a module, choose M here. If unsure, say N.
291 300
301config NETFILTER_XT_MATCH_MULTIPORT
302 tristate "Multiple port match support"
303 depends on NETFILTER_XTABLES
304 help
305 Multiport matching allows you to match TCP or UDP packets based on
306 a series of source or destination ports: normally a rule can only
307 match a single range of ports.
308
309 To compile it as a module, choose M here. If unsure, say N.
310
292config NETFILTER_XT_MATCH_PHYSDEV 311config NETFILTER_XT_MATCH_PHYSDEV
293 tristate '"physdev" match support' 312 tristate '"physdev" match support'
294 depends on NETFILTER_XTABLES && BRIDGE_NETFILTER 313 depends on NETFILTER_XTABLES && BRIDGE_NETFILTER
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 9558727f5e79..95b7e416512d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -35,11 +35,13 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
35obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o 35obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o
36obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o 36obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
37obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o 37obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
38obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
38obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o 39obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
39obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o 40obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
40obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o 41obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
41obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o 42obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
42obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o 43obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
44obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
43obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o 45obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
44obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o 46obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
45obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o 47obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0e0e9d7b34c8..bd10eb944b65 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1022,7 +1022,7 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[])
1022 return err; 1022 return err;
1023 } 1023 }
1024 1024
1025#if defined(CONFIG_IP_NF_CONNTRACK_MARK) 1025#if defined(CONFIG_NF_CONNTRACK_MARK)
1026 if (cda[CTA_MARK-1]) 1026 if (cda[CTA_MARK-1])
1027 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1])); 1027 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
1028#endif 1028#endif
@@ -1062,7 +1062,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
1062 return err; 1062 return err;
1063 } 1063 }
1064 1064
1065#if defined(CONFIG_IP_NF_CONNTRACK_MARK) 1065#if defined(CONFIG_NF_CONNTRACK_MARK)
1066 if (cda[CTA_MARK-1]) 1066 if (cda[CTA_MARK-1])
1067 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1])); 1067 ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
1068#endif 1068#endif
@@ -1687,7 +1687,7 @@ static void __exit ctnetlink_exit(void)
1687 printk("ctnetlink: unregistering from nfnetlink.\n"); 1687 printk("ctnetlink: unregistering from nfnetlink.\n");
1688 1688
1689#ifdef CONFIG_NF_CONNTRACK_EVENTS 1689#ifdef CONFIG_NF_CONNTRACK_EVENTS
1690 nf_conntrack_unregister_notifier(&ctnl_notifier_exp); 1690 nf_conntrack_expect_unregister_notifier(&ctnl_notifier_exp);
1691 nf_conntrack_unregister_notifier(&ctnl_notifier); 1691 nf_conntrack_unregister_notifier(&ctnl_notifier);
1692#endif 1692#endif
1693 1693
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a657ab5394c3..feb8a9e066b0 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -38,6 +38,7 @@ struct xt_af {
38 struct list_head match; 38 struct list_head match;
39 struct list_head target; 39 struct list_head target;
40 struct list_head tables; 40 struct list_head tables;
41 struct mutex compat_mutex;
41}; 42};
42 43
43static struct xt_af *xt; 44static struct xt_af *xt;
@@ -272,6 +273,54 @@ int xt_check_match(const struct xt_match *match, unsigned short family,
272} 273}
273EXPORT_SYMBOL_GPL(xt_check_match); 274EXPORT_SYMBOL_GPL(xt_check_match);
274 275
276#ifdef CONFIG_COMPAT
277int xt_compat_match(void *match, void **dstptr, int *size, int convert)
278{
279 struct xt_match *m;
280 struct compat_xt_entry_match *pcompat_m;
281 struct xt_entry_match *pm;
282 u_int16_t msize;
283 int off, ret;
284
285 ret = 0;
286 m = ((struct xt_entry_match *)match)->u.kernel.match;
287 off = XT_ALIGN(m->matchsize) - COMPAT_XT_ALIGN(m->matchsize);
288 switch (convert) {
289 case COMPAT_TO_USER:
290 pm = (struct xt_entry_match *)match;
291 msize = pm->u.user.match_size;
292 if (__copy_to_user(*dstptr, pm, msize)) {
293 ret = -EFAULT;
294 break;
295 }
296 msize -= off;
297 if (put_user(msize, (u_int16_t *)*dstptr))
298 ret = -EFAULT;
299 *size -= off;
300 *dstptr += msize;
301 break;
302 case COMPAT_FROM_USER:
303 pcompat_m = (struct compat_xt_entry_match *)match;
304 pm = (struct xt_entry_match *)*dstptr;
305 msize = pcompat_m->u.user.match_size;
306 memcpy(pm, pcompat_m, msize);
307 msize += off;
308 pm->u.user.match_size = msize;
309 *size += off;
310 *dstptr += msize;
311 break;
312 case COMPAT_CALC_SIZE:
313 *size += off;
314 break;
315 default:
316 ret = -ENOPROTOOPT;
317 break;
318 }
319 return ret;
320}
321EXPORT_SYMBOL_GPL(xt_compat_match);
322#endif
323
275int xt_check_target(const struct xt_target *target, unsigned short family, 324int xt_check_target(const struct xt_target *target, unsigned short family,
276 unsigned int size, const char *table, unsigned int hook_mask, 325 unsigned int size, const char *table, unsigned int hook_mask,
277 unsigned short proto, int inv_proto) 326 unsigned short proto, int inv_proto)
@@ -301,6 +350,54 @@ int xt_check_target(const struct xt_target *target, unsigned short family,
301} 350}
302EXPORT_SYMBOL_GPL(xt_check_target); 351EXPORT_SYMBOL_GPL(xt_check_target);
303 352
353#ifdef CONFIG_COMPAT
354int xt_compat_target(void *target, void **dstptr, int *size, int convert)
355{
356 struct xt_target *t;
357 struct compat_xt_entry_target *pcompat;
358 struct xt_entry_target *pt;
359 u_int16_t tsize;
360 int off, ret;
361
362 ret = 0;
363 t = ((struct xt_entry_target *)target)->u.kernel.target;
364 off = XT_ALIGN(t->targetsize) - COMPAT_XT_ALIGN(t->targetsize);
365 switch (convert) {
366 case COMPAT_TO_USER:
367 pt = (struct xt_entry_target *)target;
368 tsize = pt->u.user.target_size;
369 if (__copy_to_user(*dstptr, pt, tsize)) {
370 ret = -EFAULT;
371 break;
372 }
373 tsize -= off;
374 if (put_user(tsize, (u_int16_t *)*dstptr))
375 ret = -EFAULT;
376 *size -= off;
377 *dstptr += tsize;
378 break;
379 case COMPAT_FROM_USER:
380 pcompat = (struct compat_xt_entry_target *)target;
381 pt = (struct xt_entry_target *)*dstptr;
382 tsize = pcompat->u.user.target_size;
383 memcpy(pt, pcompat, tsize);
384 tsize += off;
385 pt->u.user.target_size = tsize;
386 *size += off;
387 *dstptr += tsize;
388 break;
389 case COMPAT_CALC_SIZE:
390 *size += off;
391 break;
392 default:
393 ret = -ENOPROTOOPT;
394 break;
395 }
396 return ret;
397}
398EXPORT_SYMBOL_GPL(xt_compat_target);
399#endif
400
304struct xt_table_info *xt_alloc_table_info(unsigned int size) 401struct xt_table_info *xt_alloc_table_info(unsigned int size)
305{ 402{
306 struct xt_table_info *newinfo; 403 struct xt_table_info *newinfo;
@@ -371,6 +468,19 @@ void xt_table_unlock(struct xt_table *table)
371} 468}
372EXPORT_SYMBOL_GPL(xt_table_unlock); 469EXPORT_SYMBOL_GPL(xt_table_unlock);
373 470
471#ifdef CONFIG_COMPAT
472void xt_compat_lock(int af)
473{
474 mutex_lock(&xt[af].compat_mutex);
475}
476EXPORT_SYMBOL_GPL(xt_compat_lock);
477
478void xt_compat_unlock(int af)
479{
480 mutex_unlock(&xt[af].compat_mutex);
481}
482EXPORT_SYMBOL_GPL(xt_compat_unlock);
483#endif
374 484
375struct xt_table_info * 485struct xt_table_info *
376xt_replace_table(struct xt_table *table, 486xt_replace_table(struct xt_table *table,
@@ -671,6 +781,9 @@ static int __init xt_init(void)
671 781
672 for (i = 0; i < NPROTO; i++) { 782 for (i = 0; i < NPROTO; i++) {
673 mutex_init(&xt[i].mutex); 783 mutex_init(&xt[i].mutex);
784#ifdef CONFIG_COMPAT
785 mutex_init(&xt[i].compat_mutex);
786#endif
674 INIT_LIST_HEAD(&xt[i].target); 787 INIT_LIST_HEAD(&xt[i].target);
675 INIT_LIST_HEAD(&xt[i].match); 788 INIT_LIST_HEAD(&xt[i].match);
676 INIT_LIST_HEAD(&xt[i].tables); 789 INIT_LIST_HEAD(&xt[i].tables);
diff --git a/net/ipv4/netfilter/ipt_esp.c b/net/netfilter/xt_esp.c
index 3840b417a3c5..9dad6281e0c1 100644
--- a/net/ipv4/netfilter/ipt_esp.c
+++ b/net/netfilter/xt_esp.c
@@ -9,16 +9,22 @@
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/in.h>
12#include <linux/ip.h> 13#include <linux/ip.h>
13 14
14#include <linux/netfilter_ipv4/ipt_esp.h> 15#include <linux/netfilter/xt_esp.h>
16#include <linux/netfilter/x_tables.h>
17
15#include <linux/netfilter_ipv4/ip_tables.h> 18#include <linux/netfilter_ipv4/ip_tables.h>
19#include <linux/netfilter_ipv6/ip6_tables.h>
16 20
17MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
18MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>"); 22MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
19MODULE_DESCRIPTION("iptables ESP SPI match module"); 23MODULE_DESCRIPTION("x_tables ESP SPI match module");
24MODULE_ALIAS("ipt_esp");
25MODULE_ALIAS("ip6t_esp");
20 26
21#ifdef DEBUG_CONNTRACK 27#if 0
22#define duprintf(format, args...) printk(format , ## args) 28#define duprintf(format, args...) printk(format , ## args)
23#else 29#else
24#define duprintf(format, args...) 30#define duprintf(format, args...)
@@ -28,11 +34,11 @@ MODULE_DESCRIPTION("iptables ESP SPI match module");
28static inline int 34static inline int
29spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) 35spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert)
30{ 36{
31 int r=0; 37 int r = 0;
32 duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', 38 duprintf("esp spi_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
33 min,spi,max); 39 min, spi, max);
34 r=(spi >= min && spi <= max) ^ invert; 40 r = (spi >= min && spi <= max) ^ invert;
35 duprintf(" result %s\n",r? "PASS" : "FAILED"); 41 duprintf(" result %s\n", r ? "PASS" : "FAILED");
36 return r; 42 return r;
37} 43}
38 44
@@ -47,14 +53,13 @@ match(const struct sk_buff *skb,
47 int *hotdrop) 53 int *hotdrop)
48{ 54{
49 struct ip_esp_hdr _esp, *eh; 55 struct ip_esp_hdr _esp, *eh;
50 const struct ipt_esp *espinfo = matchinfo; 56 const struct xt_esp *espinfo = matchinfo;
51 57
52 /* Must not be a fragment. */ 58 /* Must not be a fragment. */
53 if (offset) 59 if (offset)
54 return 0; 60 return 0;
55 61
56 eh = skb_header_pointer(skb, protoff, 62 eh = skb_header_pointer(skb, protoff, sizeof(_esp), &_esp);
57 sizeof(_esp), &_esp);
58 if (eh == NULL) { 63 if (eh == NULL) {
59 /* We've been asked to examine this packet, and we 64 /* We've been asked to examine this packet, and we
60 * can't. Hence, no choice but to drop. 65 * can't. Hence, no choice but to drop.
@@ -64,9 +69,8 @@ match(const struct sk_buff *skb,
64 return 0; 69 return 0;
65 } 70 }
66 71
67 return spi_match(espinfo->spis[0], espinfo->spis[1], 72 return spi_match(espinfo->spis[0], espinfo->spis[1], ntohl(eh->spi),
68 ntohl(eh->spi), 73 !!(espinfo->invflags & XT_ESP_INV_SPI));
69 !!(espinfo->invflags & IPT_ESP_INV_SPI));
70} 74}
71 75
72/* Called when user tries to insert an entry of this type. */ 76/* Called when user tries to insert an entry of this type. */
@@ -78,34 +82,55 @@ checkentry(const char *tablename,
78 unsigned int matchinfosize, 82 unsigned int matchinfosize,
79 unsigned int hook_mask) 83 unsigned int hook_mask)
80{ 84{
81 const struct ipt_esp *espinfo = matchinfo; 85 const struct xt_esp *espinfo = matchinfo;
82 86
83 /* Must specify no unknown invflags */ 87 if (espinfo->invflags & ~XT_ESP_INV_MASK) {
84 if (espinfo->invflags & ~IPT_ESP_INV_MASK) { 88 duprintf("xt_esp: unknown flags %X\n", espinfo->invflags);
85 duprintf("ipt_esp: unknown flags %X\n", espinfo->invflags);
86 return 0; 89 return 0;
87 } 90 }
91
88 return 1; 92 return 1;
89} 93}
90 94
91static struct ipt_match esp_match = { 95static struct xt_match esp_match = {
92 .name = "esp", 96 .name = "esp",
93 .match = match, 97 .family = AF_INET,
94 .matchsize = sizeof(struct ipt_esp),
95 .proto = IPPROTO_ESP, 98 .proto = IPPROTO_ESP,
96 .checkentry = checkentry, 99 .match = &match,
100 .matchsize = sizeof(struct xt_esp),
101 .checkentry = &checkentry,
97 .me = THIS_MODULE, 102 .me = THIS_MODULE,
98}; 103};
99 104
100static int __init ipt_esp_init(void) 105static struct xt_match esp6_match = {
106 .name = "esp",
107 .family = AF_INET6,
108 .proto = IPPROTO_ESP,
109 .match = &match,
110 .matchsize = sizeof(struct xt_esp),
111 .checkentry = &checkentry,
112 .me = THIS_MODULE,
113};
114
115static int __init xt_esp_init(void)
101{ 116{
102 return ipt_register_match(&esp_match); 117 int ret;
118 ret = xt_register_match(&esp_match);
119 if (ret)
120 return ret;
121
122 ret = xt_register_match(&esp6_match);
123 if (ret)
124 xt_unregister_match(&esp_match);
125
126 return ret;
103} 127}
104 128
105static void __exit ipt_esp_fini(void) 129static void __exit xt_esp_cleanup(void)
106{ 130{
107 ipt_unregister_match(&esp_match); 131 xt_unregister_match(&esp_match);
132 xt_unregister_match(&esp6_match);
108} 133}
109 134
110module_init(ipt_esp_init); 135module_init(xt_esp_init);
111module_exit(ipt_esp_fini); 136module_exit(xt_esp_cleanup);
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c
new file mode 100644
index 000000000000..b56cd2baaac2
--- /dev/null
+++ b/net/netfilter/xt_multiport.c
@@ -0,0 +1,314 @@
1/* Kernel module to match one of a list of TCP/UDP ports: ports are in
2 the same place so we can treat them as equal. */
3
4/* (C) 1999-2001 Paul `Rusty' Russell
5 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/udp.h>
15#include <linux/skbuff.h>
16#include <linux/in.h>
17
18#include <linux/netfilter/xt_multiport.h>
19#include <linux/netfilter/x_tables.h>
20#include <linux/netfilter_ipv4/ip_tables.h>
21#include <linux/netfilter_ipv6/ip6_tables.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
25MODULE_DESCRIPTION("x_tables multiple port match module");
26MODULE_ALIAS("ipt_multiport");
27MODULE_ALIAS("ip6t_multiport");
28
29#if 0
30#define duprintf(format, args...) printk(format , ## args)
31#else
32#define duprintf(format, args...)
33#endif
34
35/* Returns 1 if the port is matched by the test, 0 otherwise. */
36static inline int
37ports_match(const u_int16_t *portlist, enum xt_multiport_flags flags,
38 u_int8_t count, u_int16_t src, u_int16_t dst)
39{
40 unsigned int i;
41 for (i = 0; i < count; i++) {
42 if (flags != XT_MULTIPORT_DESTINATION && portlist[i] == src)
43 return 1;
44
45 if (flags != XT_MULTIPORT_SOURCE && portlist[i] == dst)
46 return 1;
47 }
48
49 return 0;
50}
51
52/* Returns 1 if the port is matched by the test, 0 otherwise. */
53static inline int
54ports_match_v1(const struct xt_multiport_v1 *minfo,
55 u_int16_t src, u_int16_t dst)
56{
57 unsigned int i;
58 u_int16_t s, e;
59
60 for (i = 0; i < minfo->count; i++) {
61 s = minfo->ports[i];
62
63 if (minfo->pflags[i]) {
64 /* range port matching */
65 e = minfo->ports[++i];
66 duprintf("src or dst matches with %d-%d?\n", s, e);
67
68 if (minfo->flags == XT_MULTIPORT_SOURCE
69 && src >= s && src <= e)
70 return 1 ^ minfo->invert;
71 if (minfo->flags == XT_MULTIPORT_DESTINATION
72 && dst >= s && dst <= e)
73 return 1 ^ minfo->invert;
74 if (minfo->flags == XT_MULTIPORT_EITHER
75 && ((dst >= s && dst <= e)
76 || (src >= s && src <= e)))
77 return 1 ^ minfo->invert;
78 } else {
79 /* exact port matching */
80 duprintf("src or dst matches with %d?\n", s);
81
82 if (minfo->flags == XT_MULTIPORT_SOURCE
83 && src == s)
84 return 1 ^ minfo->invert;
85 if (minfo->flags == XT_MULTIPORT_DESTINATION
86 && dst == s)
87 return 1 ^ minfo->invert;
88 if (minfo->flags == XT_MULTIPORT_EITHER
89 && (src == s || dst == s))
90 return 1 ^ minfo->invert;
91 }
92 }
93
94 return minfo->invert;
95}
96
97static int
98match(const struct sk_buff *skb,
99 const struct net_device *in,
100 const struct net_device *out,
101 const struct xt_match *match,
102 const void *matchinfo,
103 int offset,
104 unsigned int protoff,
105 int *hotdrop)
106{
107 u16 _ports[2], *pptr;
108 const struct xt_multiport *multiinfo = matchinfo;
109
110 if (offset)
111 return 0;
112
113 pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports);
114 if (pptr == NULL) {
115 /* We've been asked to examine this packet, and we
116 * can't. Hence, no choice but to drop.
117 */
118 duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
119 *hotdrop = 1;
120 return 0;
121 }
122
123 return ports_match(multiinfo->ports,
124 multiinfo->flags, multiinfo->count,
125 ntohs(pptr[0]), ntohs(pptr[1]));
126}
127
128static int
129match_v1(const struct sk_buff *skb,
130 const struct net_device *in,
131 const struct net_device *out,
132 const struct xt_match *match,
133 const void *matchinfo,
134 int offset,
135 unsigned int protoff,
136 int *hotdrop)
137{
138 u16 _ports[2], *pptr;
139 const struct xt_multiport_v1 *multiinfo = matchinfo;
140
141 if (offset)
142 return 0;
143
144 pptr = skb_header_pointer(skb, protoff, sizeof(_ports), _ports);
145 if (pptr == NULL) {
146 /* We've been asked to examine this packet, and we
147 * can't. Hence, no choice but to drop.
148 */
149 duprintf("xt_multiport: Dropping evil offset=0 tinygram.\n");
150 *hotdrop = 1;
151 return 0;
152 }
153
154 return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1]));
155}
156
157static inline int
158check(u_int16_t proto,
159 u_int8_t ip_invflags,
160 u_int8_t match_flags,
161 u_int8_t count)
162{
163 /* Must specify proto == TCP/UDP, no unknown flags or bad count */
164 return (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
165 && !(ip_invflags & XT_INV_PROTO)
166 && (match_flags == XT_MULTIPORT_SOURCE
167 || match_flags == XT_MULTIPORT_DESTINATION
168 || match_flags == XT_MULTIPORT_EITHER)
169 && count <= XT_MULTI_PORTS;
170}
171
172/* Called when user tries to insert an entry of this type. */
173static int
174checkentry(const char *tablename,
175 const void *info,
176 const struct xt_match *match,
177 void *matchinfo,
178 unsigned int matchsize,
179 unsigned int hook_mask)
180{
181 const struct ipt_ip *ip = info;
182 const struct xt_multiport *multiinfo = matchinfo;
183
184 return check(ip->proto, ip->invflags, multiinfo->flags,
185 multiinfo->count);
186}
187
188static int
189checkentry_v1(const char *tablename,
190 const void *info,
191 const struct xt_match *match,
192 void *matchinfo,
193 unsigned int matchsize,
194 unsigned int hook_mask)
195{
196 const struct ipt_ip *ip = info;
197 const struct xt_multiport_v1 *multiinfo = matchinfo;
198
199 return check(ip->proto, ip->invflags, multiinfo->flags,
200 multiinfo->count);
201}
202
203static int
204checkentry6(const char *tablename,
205 const void *info,
206 const struct xt_match *match,
207 void *matchinfo,
208 unsigned int matchsize,
209 unsigned int hook_mask)
210{
211 const struct ip6t_ip6 *ip = info;
212 const struct xt_multiport *multiinfo = matchinfo;
213
214 return check(ip->proto, ip->invflags, multiinfo->flags,
215 multiinfo->count);
216}
217
218static int
219checkentry6_v1(const char *tablename,
220 const void *info,
221 const struct xt_match *match,
222 void *matchinfo,
223 unsigned int matchsize,
224 unsigned int hook_mask)
225{
226 const struct ip6t_ip6 *ip = info;
227 const struct xt_multiport_v1 *multiinfo = matchinfo;
228
229 return check(ip->proto, ip->invflags, multiinfo->flags,
230 multiinfo->count);
231}
232
233static struct xt_match multiport_match = {
234 .name = "multiport",
235 .revision = 0,
236 .matchsize = sizeof(struct xt_multiport),
237 .match = &match,
238 .checkentry = &checkentry,
239 .family = AF_INET,
240 .me = THIS_MODULE,
241};
242
243static struct xt_match multiport_match_v1 = {
244 .name = "multiport",
245 .revision = 1,
246 .matchsize = sizeof(struct xt_multiport_v1),
247 .match = &match_v1,
248 .checkentry = &checkentry_v1,
249 .family = AF_INET,
250 .me = THIS_MODULE,
251};
252
253static struct xt_match multiport6_match = {
254 .name = "multiport",
255 .revision = 0,
256 .matchsize = sizeof(struct xt_multiport),
257 .match = &match,
258 .checkentry = &checkentry6,
259 .family = AF_INET6,
260 .me = THIS_MODULE,
261};
262
263static struct xt_match multiport6_match_v1 = {
264 .name = "multiport",
265 .revision = 1,
266 .matchsize = sizeof(struct xt_multiport_v1),
267 .match = &match_v1,
268 .checkentry = &checkentry6_v1,
269 .family = AF_INET6,
270 .me = THIS_MODULE,
271};
272
273static int __init xt_multiport_init(void)
274{
275 int ret;
276
277 ret = xt_register_match(&multiport_match);
278 if (ret)
279 goto out;
280
281 ret = xt_register_match(&multiport_match_v1);
282 if (ret)
283 goto out_unreg_multi_v0;
284
285 ret = xt_register_match(&multiport6_match);
286 if (ret)
287 goto out_unreg_multi_v1;
288
289 ret = xt_register_match(&multiport6_match_v1);
290 if (ret)
291 goto out_unreg_multi6_v0;
292
293 return ret;
294
295out_unreg_multi6_v0:
296 xt_unregister_match(&multiport6_match);
297out_unreg_multi_v1:
298 xt_unregister_match(&multiport_match_v1);
299out_unreg_multi_v0:
300 xt_unregister_match(&multiport_match);
301out:
302 return ret;
303}
304
305static void __exit xt_multiport_fini(void)
306{
307 xt_unregister_match(&multiport_match);
308 xt_unregister_match(&multiport_match_v1);
309 xt_unregister_match(&multiport6_match);
310 xt_unregister_match(&multiport6_match_v1);
311}
312
313module_init(xt_multiport_init);
314module_exit(xt_multiport_fini);
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 1099cb005fcc..a3aa62fbda6f 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -71,7 +71,7 @@ match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info,
71 return 0; 71 return 0;
72 e = &info->pol[pos]; 72 e = &info->pol[pos];
73 73
74 if (match_xfrm_state(sp->x[i].xvec, e, family)) { 74 if (match_xfrm_state(sp->xvec[i], e, family)) {
75 if (!strict) 75 if (!strict)
76 return 1; 76 return 1;
77 } else if (strict) 77 } else if (strict)
diff --git a/net/socket.c b/net/socket.c
index b13042f68c02..b807f360e02c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1418,7 +1418,8 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int _
1418 newfd = sock_alloc_fd(&newfile); 1418 newfd = sock_alloc_fd(&newfile);
1419 if (unlikely(newfd < 0)) { 1419 if (unlikely(newfd < 0)) {
1420 err = newfd; 1420 err = newfd;
1421 goto out_release; 1421 sock_release(newsock);
1422 goto out_put;
1422 } 1423 }
1423 1424
1424 err = sock_attach_fd(newsock, newfile); 1425 err = sock_attach_fd(newsock, newfile);
@@ -1455,10 +1456,8 @@ out_put:
1455out: 1456out:
1456 return err; 1457 return err;
1457out_fd: 1458out_fd:
1458 put_filp(newfile); 1459 fput(newfile);
1459 put_unused_fd(newfd); 1460 put_unused_fd(newfd);
1460out_release:
1461 sock_release(newsock);
1462 goto out_put; 1461 goto out_put;
1463} 1462}
1464 1463
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 2407a7072327..b54971059f16 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -18,7 +18,7 @@ void __secpath_destroy(struct sec_path *sp)
18{ 18{
19 int i; 19 int i;
20 for (i = 0; i < sp->len; i++) 20 for (i = 0; i < sp->len; i++)
21 xfrm_state_put(sp->x[i].xvec); 21 xfrm_state_put(sp->xvec[i]);
22 kmem_cache_free(secpath_cachep, sp); 22 kmem_cache_free(secpath_cachep, sp);
23} 23}
24EXPORT_SYMBOL(__secpath_destroy); 24EXPORT_SYMBOL(__secpath_destroy);
@@ -37,7 +37,7 @@ struct sec_path *secpath_dup(struct sec_path *src)
37 37
38 memcpy(sp, src, sizeof(*sp)); 38 memcpy(sp, src, sizeof(*sp));
39 for (i = 0; i < sp->len; i++) 39 for (i = 0; i < sp->len; i++)
40 xfrm_state_hold(sp->x[i].xvec); 40 xfrm_state_hold(sp->xvec[i]);
41 } 41 }
42 atomic_set(&sp->refcnt, 1); 42 atomic_set(&sp->refcnt, 1);
43 return sp; 43 return sp;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f5eae9febd26..c3725fe2a8fb 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -943,9 +943,9 @@ xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
943 } else 943 } else
944 start = -1; 944 start = -1;
945 for (; idx < sp->len; idx++) { 945 for (; idx < sp->len; idx++) {
946 if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family)) 946 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
947 return ++idx; 947 return ++idx;
948 if (sp->x[idx].xvec->props.mode) 948 if (sp->xvec[idx]->props.mode)
949 break; 949 break;
950 } 950 }
951 return start; 951 return start;
@@ -968,7 +968,7 @@ EXPORT_SYMBOL(xfrm_decode_session);
968static inline int secpath_has_tunnel(struct sec_path *sp, int k) 968static inline int secpath_has_tunnel(struct sec_path *sp, int k)
969{ 969{
970 for (; k < sp->len; k++) { 970 for (; k < sp->len; k++) {
971 if (sp->x[k].xvec->props.mode) 971 if (sp->xvec[k]->props.mode)
972 return 1; 972 return 1;
973 } 973 }
974 974
@@ -994,8 +994,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
994 int i; 994 int i;
995 995
996 for (i=skb->sp->len-1; i>=0; i--) { 996 for (i=skb->sp->len-1; i>=0; i--) {
997 struct sec_decap_state *xvec = &(skb->sp->x[i]); 997 struct xfrm_state *x = skb->sp->xvec[i];
998 if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family)) 998 if (!xfrm_selector_match(&x->sel, &fl, family))
999 return 0; 999 return 0;
1000 } 1000 }
1001 } 1001 }
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 9dd121bb5638..8efc1b12f3a8 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -92,8 +92,9 @@ config SND_SEQUENCER_OSS
92 92
93 Many programs still use the OSS API, so say Y. 93 Many programs still use the OSS API, so say Y.
94 94
95 To compile this driver as a module, choose M here: the module 95 If you choose M in "Sequencer support" (SND_SEQUENCER),
96 will be called snd-seq-oss. 96 this will be compiled as a module. The module will be called
97 snd-seq-oss.
97 98
98config SND_RTCTIMER 99config SND_RTCTIMER
99 tristate "RTC Timer support" 100 tristate "RTC Timer support"
diff --git a/sound/core/control.c b/sound/core/control.c
index 574745314e70..22565c9b9603 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -664,7 +664,7 @@ static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl,
664 if (copy_from_user(&info, _info, sizeof(info))) 664 if (copy_from_user(&info, _info, sizeof(info)))
665 return -EFAULT; 665 return -EFAULT;
666 snd_power_lock(ctl->card); 666 snd_power_lock(ctl->card);
667 result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0, NULL); 667 result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
668 if (result >= 0) 668 if (result >= 0)
669 result = snd_ctl_elem_info(ctl, &info); 669 result = snd_ctl_elem_info(ctl, &info);
670 snd_power_unlock(ctl->card); 670 snd_power_unlock(ctl->card);
@@ -718,7 +718,7 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
718 return -EFAULT; 718 return -EFAULT;
719 } 719 }
720 snd_power_lock(card); 720 snd_power_lock(card);
721 result = snd_power_wait(card, SNDRV_CTL_POWER_D0, NULL); 721 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
722 if (result >= 0) 722 if (result >= 0)
723 result = snd_ctl_elem_read(card, control); 723 result = snd_ctl_elem_read(card, control);
724 snd_power_unlock(card); 724 snd_power_unlock(card);
@@ -783,7 +783,7 @@ static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
783 } 783 }
784 card = file->card; 784 card = file->card;
785 snd_power_lock(card); 785 snd_power_lock(card);
786 result = snd_power_wait(card, SNDRV_CTL_POWER_D0, NULL); 786 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
787 if (result >= 0) 787 if (result >= 0)
788 result = snd_ctl_elem_write(card, file, control); 788 result = snd_ctl_elem_write(card, file, control);
789 snd_power_unlock(card); 789 snd_power_unlock(card);
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 84fef5084e17..3c0161bb5ba4 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -109,7 +109,7 @@ static int snd_ctl_elem_info_compat(struct snd_ctl_file *ctl,
109 goto error; 109 goto error;
110 110
111 snd_power_lock(ctl->card); 111 snd_power_lock(ctl->card);
112 err = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0, NULL); 112 err = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0);
113 if (err >= 0) 113 if (err >= 0)
114 err = snd_ctl_elem_info(ctl, data); 114 err = snd_ctl_elem_info(ctl, data);
115 snd_power_unlock(ctl->card); 115 snd_power_unlock(ctl->card);
@@ -294,7 +294,7 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
294 goto error; 294 goto error;
295 295
296 snd_power_lock(card); 296 snd_power_lock(card);
297 err = snd_power_wait(card, SNDRV_CTL_POWER_D0, NULL); 297 err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
298 if (err >= 0) 298 if (err >= 0)
299 err = snd_ctl_elem_read(card, data); 299 err = snd_ctl_elem_read(card, data);
300 snd_power_unlock(card); 300 snd_power_unlock(card);
@@ -320,7 +320,7 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
320 goto error; 320 goto error;
321 321
322 snd_power_lock(card); 322 snd_power_lock(card);
323 err = snd_power_wait(card, SNDRV_CTL_POWER_D0, NULL); 323 err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
324 if (err >= 0) 324 if (err >= 0)
325 err = snd_ctl_elem_write(card, file, data); 325 err = snd_ctl_elem_write(card, file, data);
326 snd_power_unlock(card); 326 snd_power_unlock(card);
diff --git a/sound/core/init.c b/sound/core/init.c
index 5bb8a8b23d51..39ed2e5bb0af 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -722,13 +722,12 @@ int snd_card_file_remove(struct snd_card *card, struct file *file)
722 * snd_power_wait - wait until the power-state is changed. 722 * snd_power_wait - wait until the power-state is changed.
723 * @card: soundcard structure 723 * @card: soundcard structure
724 * @power_state: expected power state 724 * @power_state: expected power state
725 * @file: file structure for the O_NONBLOCK check (optional)
726 * 725 *
727 * Waits until the power-state is changed. 726 * Waits until the power-state is changed.
728 * 727 *
729 * Note: the power lock must be active before call. 728 * Note: the power lock must be active before call.
730 */ 729 */
731int snd_power_wait(struct snd_card *card, unsigned int power_state, struct file *file) 730int snd_power_wait(struct snd_card *card, unsigned int power_state)
732{ 731{
733 wait_queue_t wait; 732 wait_queue_t wait;
734 int result = 0; 733 int result = 0;
@@ -745,12 +744,6 @@ int snd_power_wait(struct snd_card *card, unsigned int power_state, struct file
745 } 744 }
746 if (snd_power_get_state(card) == power_state) 745 if (snd_power_get_state(card) == power_state)
747 break; 746 break;
748#if 0 /* block all devices */
749 if (file && (file->f_flags & O_NONBLOCK)) {
750 result = -EAGAIN;
751 break;
752 }
753#endif
754 set_current_state(TASK_UNINTERRUPTIBLE); 747 set_current_state(TASK_UNINTERRUPTIBLE);
755 snd_power_unlock(card); 748 snd_power_unlock(card);
756 schedule_timeout(30 * HZ); 749 schedule_timeout(30 * HZ);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index f8302b703a30..91114c7aeff5 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -208,9 +208,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
208 oss_buffer_size = runtime->oss.mmap_bytes; 208 oss_buffer_size = runtime->oss.mmap_bytes;
209 } 209 }
210 210
211 if (substream->oss.setup && 211 if (substream->oss.setup.period_size > 16)
212 substream->oss.setup->period_size > 16) 212 oss_period_size = substream->oss.setup.period_size;
213 oss_period_size = substream->oss.setup->period_size;
214 else if (runtime->oss.fragshift) { 213 else if (runtime->oss.fragshift) {
215 oss_period_size = 1 << runtime->oss.fragshift; 214 oss_period_size = 1 << runtime->oss.fragshift;
216 if (oss_period_size > oss_buffer_size / 2) 215 if (oss_period_size > oss_buffer_size / 2)
@@ -252,10 +251,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
252 251
253 oss_periods = oss_buffer_size / oss_period_size; 252 oss_periods = oss_buffer_size / oss_period_size;
254 253
255 if (substream->oss.setup) { 254 if (substream->oss.setup.periods > 1)
256 if (substream->oss.setup->periods > 1) 255 oss_periods = substream->oss.setup.periods;
257 oss_periods = substream->oss.setup->periods;
258 }
259 256
260 s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); 257 s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
261 if (runtime->oss.maxfrags && s > runtime->oss.maxfrags) 258 if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
@@ -341,12 +338,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
341 goto failure; 338 goto failure;
342 } 339 }
343 340
344 if (atomic_read(&runtime->mmap_count)) { 341 if (atomic_read(&runtime->mmap_count))
345 direct = 1; 342 direct = 1;
346 } else { 343 else
347 struct snd_pcm_oss_setup *setup = substream->oss.setup; 344 direct = substream->oss.setup.direct;
348 direct = (setup != NULL && setup->direct);
349 }
350 345
351 _snd_pcm_hw_params_any(sparams); 346 _snd_pcm_hw_params_any(sparams);
352 _snd_pcm_hw_param_setinteger(sparams, SNDRV_PCM_HW_PARAM_PERIODS); 347 _snd_pcm_hw_param_setinteger(sparams, SNDRV_PCM_HW_PARAM_PERIODS);
@@ -482,7 +477,7 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
482 1 : runtime->period_size; 477 1 : runtime->period_size;
483 sw_params->xfer_align = 1; 478 sw_params->xfer_align = 1;
484 if (atomic_read(&runtime->mmap_count) || 479 if (atomic_read(&runtime->mmap_count) ||
485 (substream->oss.setup && substream->oss.setup->nosilence)) { 480 substream->oss.setup.nosilence) {
486 sw_params->silence_threshold = 0; 481 sw_params->silence_threshold = 0;
487 sw_params->silence_size = 0; 482 sw_params->silence_size = 0;
488 } else { 483 } else {
@@ -843,7 +838,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
843 buf += tmp; 838 buf += tmp;
844 bytes -= tmp; 839 bytes -= tmp;
845 xfer += tmp; 840 xfer += tmp;
846 if ((substream->oss.setup != NULL && substream->oss.setup->partialfrag) || 841 if (substream->oss.setup.partialfrag ||
847 runtime->oss.buffer_used == runtime->oss.period_bytes) { 842 runtime->oss.buffer_used == runtime->oss.period_bytes) {
848 tmp = snd_pcm_oss_write2(substream, runtime->oss.buffer + runtime->oss.period_ptr, 843 tmp = snd_pcm_oss_write2(substream, runtime->oss.buffer + runtime->oss.period_ptr,
849 runtime->oss.buffer_used - runtime->oss.period_ptr, 1); 844 runtime->oss.buffer_used - runtime->oss.period_ptr, 1);
@@ -959,12 +954,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
959 954
960 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; 955 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
961 if (substream != NULL) { 956 if (substream != NULL) {
962 snd_pcm_kernel_playback_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); 957 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
963 substream->runtime->oss.prepare = 1; 958 substream->runtime->oss.prepare = 1;
964 } 959 }
965 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; 960 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
966 if (substream != NULL) { 961 if (substream != NULL) {
967 snd_pcm_kernel_capture_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); 962 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
968 substream->runtime->oss.prepare = 1; 963 substream->runtime->oss.prepare = 1;
969 } 964 }
970 return 0; 965 return 0;
@@ -979,7 +974,7 @@ static int snd_pcm_oss_post(struct snd_pcm_oss_file *pcm_oss_file)
979 if (substream != NULL) { 974 if (substream != NULL) {
980 if ((err = snd_pcm_oss_make_ready(substream)) < 0) 975 if ((err = snd_pcm_oss_make_ready(substream)) < 0)
981 return err; 976 return err;
982 snd_pcm_kernel_playback_ioctl(substream, SNDRV_PCM_IOCTL_START, NULL); 977 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_START, NULL);
983 } 978 }
984 /* note: all errors from the start action are ignored */ 979 /* note: all errors from the start action are ignored */
985 /* OSS apps do not know, how to handle them */ 980 /* OSS apps do not know, how to handle them */
@@ -1108,7 +1103,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1108 __direct: 1103 __direct:
1109 saved_f_flags = substream->ffile->f_flags; 1104 saved_f_flags = substream->ffile->f_flags;
1110 substream->ffile->f_flags &= ~O_NONBLOCK; 1105 substream->ffile->f_flags &= ~O_NONBLOCK;
1111 err = snd_pcm_kernel_playback_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); 1106 err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
1112 substream->ffile->f_flags = saved_f_flags; 1107 substream->ffile->f_flags = saved_f_flags;
1113 if (err < 0) 1108 if (err < 0)
1114 return err; 1109 return err;
@@ -1120,7 +1115,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
1120 if ((err = snd_pcm_oss_make_ready(substream)) < 0) 1115 if ((err = snd_pcm_oss_make_ready(substream)) < 0)
1121 return err; 1116 return err;
1122 runtime = substream->runtime; 1117 runtime = substream->runtime;
1123 err = snd_pcm_kernel_capture_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); 1118 err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
1124 if (err < 0) 1119 if (err < 0)
1125 return err; 1120 return err;
1126 runtime->oss.buffer_used = 0; 1121 runtime->oss.buffer_used = 0;
@@ -1214,12 +1209,10 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1214 1209
1215 if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) 1210 if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0)
1216 return err; 1211 return err;
1217 if (atomic_read(&substream->runtime->mmap_count)) { 1212 if (atomic_read(&substream->runtime->mmap_count))
1218 direct = 1; 1213 direct = 1;
1219 } else { 1214 else
1220 struct snd_pcm_oss_setup *setup = substream->oss.setup; 1215 direct = substream->oss.setup.direct;
1221 direct = (setup != NULL && setup->direct);
1222 }
1223 if (!direct) 1216 if (!direct)
1224 return AFMT_MU_LAW | AFMT_U8 | 1217 return AFMT_MU_LAW | AFMT_U8 |
1225 AFMT_S16_LE | AFMT_S16_BE | 1218 AFMT_S16_LE | AFMT_S16_BE |
@@ -1437,7 +1430,7 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
1437 cmd = SNDRV_PCM_IOCTL_DROP; 1430 cmd = SNDRV_PCM_IOCTL_DROP;
1438 runtime->oss.prepare = 1; 1431 runtime->oss.prepare = 1;
1439 } 1432 }
1440 err = snd_pcm_kernel_playback_ioctl(psubstream, cmd, NULL); 1433 err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
1441 if (err < 0) 1434 if (err < 0)
1442 return err; 1435 return err;
1443 } 1436 }
@@ -1458,7 +1451,7 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
1458 cmd = SNDRV_PCM_IOCTL_DROP; 1451 cmd = SNDRV_PCM_IOCTL_DROP;
1459 runtime->oss.prepare = 1; 1452 runtime->oss.prepare = 1;
1460 } 1453 }
1461 err = snd_pcm_kernel_capture_ioctl(csubstream, cmd, NULL); 1454 err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
1462 if (err < 0) 1455 if (err < 0)
1463 return err; 1456 return err;
1464 } 1457 }
@@ -1495,7 +1488,7 @@ static int snd_pcm_oss_get_odelay(struct snd_pcm_oss_file *pcm_oss_file)
1495 runtime = substream->runtime; 1488 runtime = substream->runtime;
1496 if (runtime->oss.params || runtime->oss.prepare) 1489 if (runtime->oss.params || runtime->oss.prepare)
1497 return 0; 1490 return 0;
1498 err = snd_pcm_kernel_playback_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &delay); 1491 err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &delay);
1499 if (err == -EPIPE) 1492 if (err == -EPIPE)
1500 delay = 0; /* hack for broken OSS applications */ 1493 delay = 0; /* hack for broken OSS applications */
1501 else if (err < 0) 1494 else if (err < 0)
@@ -1555,8 +1548,7 @@ static int snd_pcm_oss_get_ptr(struct snd_pcm_oss_file *pcm_oss_file, int stream
1555 } else { 1548 } else {
1556 delay = snd_pcm_oss_bytes(substream, delay); 1549 delay = snd_pcm_oss_bytes(substream, delay);
1557 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1550 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1558 struct snd_pcm_oss_setup *setup = substream->oss.setup; 1551 if (substream->oss.setup.buggyptr)
1559 if (setup && setup->buggyptr)
1560 info.blocks = (runtime->oss.buffer_bytes - delay - fixup) / runtime->oss.period_bytes; 1552 info.blocks = (runtime->oss.buffer_bytes - delay - fixup) / runtime->oss.period_bytes;
1561 else 1553 else
1562 info.blocks = (delay + fixup) / runtime->oss.period_bytes; 1554 info.blocks = (delay + fixup) / runtime->oss.period_bytes;
@@ -1638,37 +1630,46 @@ static int snd_pcm_oss_get_mapbuf(struct snd_pcm_oss_file *pcm_oss_file, int str
1638 return -EINVAL; 1630 return -EINVAL;
1639} 1631}
1640 1632
1641static struct snd_pcm_oss_setup *snd_pcm_oss_look_for_setup(struct snd_pcm *pcm, int stream, const char *task_name) 1633static const char *strip_task_path(const char *path)
1642{ 1634{
1643 const char *ptr, *ptrl; 1635 const char *ptr, *ptrl = NULL;
1644 struct snd_pcm_oss_setup *setup; 1636 for (ptr = path; *ptr; ptr++) {
1645
1646 mutex_lock(&pcm->streams[stream].oss.setup_mutex);
1647 for (setup = pcm->streams[stream].oss.setup_list; setup; setup = setup->next) {
1648 if (!strcmp(setup->task_name, task_name)) {
1649 mutex_unlock(&pcm->streams[stream].oss.setup_mutex);
1650 return setup;
1651 }
1652 }
1653 ptr = ptrl = task_name;
1654 while (*ptr) {
1655 if (*ptr == '/') 1637 if (*ptr == '/')
1656 ptrl = ptr + 1; 1638 ptrl = ptr + 1;
1657 ptr++;
1658 } 1639 }
1659 if (ptrl == task_name) { 1640 return ptrl;
1660 goto __not_found; 1641}
1661 return NULL; 1642
1662 } 1643static void snd_pcm_oss_look_for_setup(struct snd_pcm *pcm, int stream,
1663 for (setup = pcm->streams[stream].oss.setup_list; setup; setup = setup->next) { 1644 const char *task_name,
1664 if (!strcmp(setup->task_name, ptrl)) { 1645 struct snd_pcm_oss_setup *rsetup)
1665 mutex_unlock(&pcm->streams[stream].oss.setup_mutex); 1646{
1666 return setup; 1647 struct snd_pcm_oss_setup *setup;
1648
1649 mutex_lock(&pcm->streams[stream].oss.setup_mutex);
1650 do {
1651 for (setup = pcm->streams[stream].oss.setup_list; setup;
1652 setup = setup->next) {
1653 if (!strcmp(setup->task_name, task_name))
1654 goto out;
1667 } 1655 }
1668 } 1656 } while ((task_name = strip_task_path(task_name)) != NULL);
1669 __not_found: 1657 out:
1658 if (setup)
1659 *rsetup = *setup;
1670 mutex_unlock(&pcm->streams[stream].oss.setup_mutex); 1660 mutex_unlock(&pcm->streams[stream].oss.setup_mutex);
1671 return NULL; 1661}
1662
1663static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
1664{
1665 struct snd_pcm_runtime *runtime;
1666 runtime = substream->runtime;
1667 vfree(runtime->oss.buffer);
1668 runtime->oss.buffer = NULL;
1669#ifdef CONFIG_SND_PCM_OSS_PLUGINS
1670 snd_pcm_oss_plugin_clear(substream);
1671#endif
1672 substream->oss.oss = 0;
1672} 1673}
1673 1674
1674static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream, 1675static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
@@ -1678,7 +1679,11 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
1678 struct snd_pcm_runtime *runtime; 1679 struct snd_pcm_runtime *runtime;
1679 1680
1680 substream->oss.oss = 1; 1681 substream->oss.oss = 1;
1681 substream->oss.setup = setup; 1682 substream->oss.setup = *setup;
1683 if (setup->nonblock)
1684 substream->ffile->f_flags |= O_NONBLOCK;
1685 else
1686 substream->ffile->f_flags &= ~O_NONBLOCK;
1682 runtime = substream->runtime; 1687 runtime = substream->runtime;
1683 runtime->oss.params = 1; 1688 runtime->oss.params = 1;
1684 runtime->oss.trigger = 1; 1689 runtime->oss.trigger = 1;
@@ -1697,18 +1702,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
1697 runtime->oss.fragshift = 0; 1702 runtime->oss.fragshift = 0;
1698 runtime->oss.maxfrags = 0; 1703 runtime->oss.maxfrags = 0;
1699 runtime->oss.subdivision = 0; 1704 runtime->oss.subdivision = 0;
1700} 1705 substream->pcm_release = snd_pcm_oss_release_substream;
1701
1702static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
1703{
1704 struct snd_pcm_runtime *runtime;
1705 runtime = substream->runtime;
1706 vfree(runtime->oss.buffer);
1707#ifdef CONFIG_SND_PCM_OSS_PLUGINS
1708 snd_pcm_oss_plugin_clear(substream);
1709#endif
1710 substream->oss.file = NULL;
1711 substream->oss.oss = 0;
1712} 1706}
1713 1707
1714static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file) 1708static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
@@ -1717,23 +1711,8 @@ static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
1717 snd_assert(pcm_oss_file != NULL, return -ENXIO); 1711 snd_assert(pcm_oss_file != NULL, return -ENXIO);
1718 for (cidx = 0; cidx < 2; ++cidx) { 1712 for (cidx = 0; cidx < 2; ++cidx) {
1719 struct snd_pcm_substream *substream = pcm_oss_file->streams[cidx]; 1713 struct snd_pcm_substream *substream = pcm_oss_file->streams[cidx];
1720 struct snd_pcm_runtime *runtime; 1714 if (substream)
1721 if (substream == NULL) 1715 snd_pcm_release_substream(substream);
1722 continue;
1723 runtime = substream->runtime;
1724
1725 snd_pcm_stream_lock_irq(substream);
1726 if (snd_pcm_running(substream))
1727 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
1728 snd_pcm_stream_unlock_irq(substream);
1729 if (substream->ffile != NULL) {
1730 if (substream->ops->hw_free != NULL)
1731 substream->ops->hw_free(substream);
1732 substream->ops->close(substream);
1733 substream->ffile = NULL;
1734 }
1735 snd_pcm_oss_release_substream(substream);
1736 snd_pcm_release_substream(substream);
1737 } 1716 }
1738 kfree(pcm_oss_file); 1717 kfree(pcm_oss_file);
1739 return 0; 1718 return 0;
@@ -1743,12 +1722,11 @@ static int snd_pcm_oss_open_file(struct file *file,
1743 struct snd_pcm *pcm, 1722 struct snd_pcm *pcm,
1744 struct snd_pcm_oss_file **rpcm_oss_file, 1723 struct snd_pcm_oss_file **rpcm_oss_file,
1745 int minor, 1724 int minor,
1746 struct snd_pcm_oss_setup *psetup, 1725 struct snd_pcm_oss_setup *setup)
1747 struct snd_pcm_oss_setup *csetup)
1748{ 1726{
1749 int err = 0; 1727 int idx, err;
1750 struct snd_pcm_oss_file *pcm_oss_file; 1728 struct snd_pcm_oss_file *pcm_oss_file;
1751 struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL; 1729 struct snd_pcm_substream *substream;
1752 unsigned int f_mode = file->f_mode; 1730 unsigned int f_mode = file->f_mode;
1753 1731
1754 snd_assert(rpcm_oss_file != NULL, return -EINVAL); 1732 snd_assert(rpcm_oss_file != NULL, return -EINVAL);
@@ -1761,73 +1739,31 @@ static int snd_pcm_oss_open_file(struct file *file,
1761 if ((f_mode & (FMODE_WRITE|FMODE_READ)) == (FMODE_WRITE|FMODE_READ) && 1739 if ((f_mode & (FMODE_WRITE|FMODE_READ)) == (FMODE_WRITE|FMODE_READ) &&
1762 (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX)) 1740 (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX))
1763 f_mode = FMODE_WRITE; 1741 f_mode = FMODE_WRITE;
1764 if ((f_mode & FMODE_WRITE) && !(psetup && psetup->disable)) { 1742
1765 if ((err = snd_pcm_open_substream(pcm, SNDRV_PCM_STREAM_PLAYBACK, 1743 for (idx = 0; idx < 2; idx++) {
1766 &psubstream)) < 0) { 1744 if (setup[idx].disable)
1745 continue;
1746 if (idx == SNDRV_PCM_STREAM_PLAYBACK) {
1747 if (! (f_mode & FMODE_WRITE))
1748 continue;
1749 } else {
1750 if (! (f_mode & FMODE_READ))
1751 continue;
1752 }
1753 err = snd_pcm_open_substream(pcm, idx, file, &substream);
1754 if (err < 0) {
1767 snd_pcm_oss_release_file(pcm_oss_file); 1755 snd_pcm_oss_release_file(pcm_oss_file);
1768 return err; 1756 return err;
1769 } 1757 }
1770 pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK] = psubstream; 1758
1771 } 1759 pcm_oss_file->streams[idx] = substream;
1772 if ((f_mode & FMODE_READ) && !(csetup && csetup->disable)) { 1760 snd_pcm_oss_init_substream(substream, &setup[idx], minor);
1773 if ((err = snd_pcm_open_substream(pcm, SNDRV_PCM_STREAM_CAPTURE,
1774 &csubstream)) < 0) {
1775 if (!(f_mode & FMODE_WRITE) || err != -ENODEV) {
1776 snd_pcm_oss_release_file(pcm_oss_file);
1777 return err;
1778 } else {
1779 csubstream = NULL;
1780 }
1781 }
1782 pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE] = csubstream;
1783 } 1761 }
1784 1762
1785 if (psubstream == NULL && csubstream == NULL) { 1763 if (! pcm_oss_file->streams[0] && pcm_oss_file->streams[1]) {
1786 snd_pcm_oss_release_file(pcm_oss_file); 1764 snd_pcm_oss_release_file(pcm_oss_file);
1787 return -EINVAL; 1765 return -EINVAL;
1788 } 1766 }
1789 if (psubstream != NULL) {
1790 psubstream->oss.file = pcm_oss_file;
1791 err = snd_pcm_hw_constraints_init(psubstream);
1792 if (err < 0) {
1793 snd_printd("snd_pcm_hw_constraint_init failed\n");
1794 snd_pcm_oss_release_file(pcm_oss_file);
1795 return err;
1796 }
1797 if ((err = psubstream->ops->open(psubstream)) < 0) {
1798 snd_pcm_oss_release_file(pcm_oss_file);
1799 return err;
1800 }
1801 psubstream->ffile = file;
1802 err = snd_pcm_hw_constraints_complete(psubstream);
1803 if (err < 0) {
1804 snd_printd("snd_pcm_hw_constraint_complete failed\n");
1805 snd_pcm_oss_release_file(pcm_oss_file);
1806 return err;
1807 }
1808 snd_pcm_oss_init_substream(psubstream, psetup, minor);
1809 }
1810 if (csubstream != NULL) {
1811 csubstream->oss.file = pcm_oss_file;
1812 err = snd_pcm_hw_constraints_init(csubstream);
1813 if (err < 0) {
1814 snd_printd("snd_pcm_hw_constraint_init failed\n");
1815 snd_pcm_oss_release_file(pcm_oss_file);
1816 return err;
1817 }
1818 if ((err = csubstream->ops->open(csubstream)) < 0) {
1819 snd_pcm_oss_release_file(pcm_oss_file);
1820 return err;
1821 }
1822 csubstream->ffile = file;
1823 err = snd_pcm_hw_constraints_complete(csubstream);
1824 if (err < 0) {
1825 snd_printd("snd_pcm_hw_constraint_complete failed\n");
1826 snd_pcm_oss_release_file(pcm_oss_file);
1827 return err;
1828 }
1829 snd_pcm_oss_init_substream(csubstream, csetup, minor);
1830 }
1831 1767
1832 file->private_data = pcm_oss_file; 1768 file->private_data = pcm_oss_file;
1833 *rpcm_oss_file = pcm_oss_file; 1769 *rpcm_oss_file = pcm_oss_file;
@@ -1852,7 +1788,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
1852 char task_name[32]; 1788 char task_name[32];
1853 struct snd_pcm *pcm; 1789 struct snd_pcm *pcm;
1854 struct snd_pcm_oss_file *pcm_oss_file; 1790 struct snd_pcm_oss_file *pcm_oss_file;
1855 struct snd_pcm_oss_setup *psetup = NULL, *csetup = NULL; 1791 struct snd_pcm_oss_setup setup[2];
1856 int nonblock; 1792 int nonblock;
1857 wait_queue_t wait; 1793 wait_queue_t wait;
1858 1794
@@ -1873,23 +1809,15 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
1873 err = -EFAULT; 1809 err = -EFAULT;
1874 goto __error; 1810 goto __error;
1875 } 1811 }
1812 memset(setup, 0, sizeof(*setup));
1876 if (file->f_mode & FMODE_WRITE) 1813 if (file->f_mode & FMODE_WRITE)
1877 psetup = snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_PLAYBACK, task_name); 1814 snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_PLAYBACK,
1815 task_name, &setup[0]);
1878 if (file->f_mode & FMODE_READ) 1816 if (file->f_mode & FMODE_READ)
1879 csetup = snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_CAPTURE, task_name); 1817 snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_CAPTURE,
1818 task_name, &setup[1]);
1880 1819
1881 nonblock = !!(file->f_flags & O_NONBLOCK); 1820 nonblock = !!(file->f_flags & O_NONBLOCK);
1882 if (psetup && !psetup->disable) {
1883 if (psetup->nonblock)
1884 nonblock = 1;
1885 else if (psetup->block)
1886 nonblock = 0;
1887 } else if (csetup && !csetup->disable) {
1888 if (csetup->nonblock)
1889 nonblock = 1;
1890 else if (csetup->block)
1891 nonblock = 0;
1892 }
1893 if (!nonblock) 1821 if (!nonblock)
1894 nonblock = nonblock_open; 1822 nonblock = nonblock_open;
1895 1823
@@ -1898,7 +1826,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
1898 mutex_lock(&pcm->open_mutex); 1826 mutex_lock(&pcm->open_mutex);
1899 while (1) { 1827 while (1) {
1900 err = snd_pcm_oss_open_file(file, pcm, &pcm_oss_file, 1828 err = snd_pcm_oss_open_file(file, pcm, &pcm_oss_file,
1901 iminor(inode), psetup, csetup); 1829 iminor(inode), setup);
1902 if (err >= 0) 1830 if (err >= 0)
1903 break; 1831 break;
1904 if (err == -EAGAIN) { 1832 if (err == -EAGAIN) {
@@ -2312,13 +2240,8 @@ static void snd_pcm_oss_proc_read(struct snd_info_entry *entry,
2312 2240
2313static void snd_pcm_oss_proc_free_setup_list(struct snd_pcm_str * pstr) 2241static void snd_pcm_oss_proc_free_setup_list(struct snd_pcm_str * pstr)
2314{ 2242{
2315 unsigned int idx;
2316 struct snd_pcm_substream *substream;
2317 struct snd_pcm_oss_setup *setup, *setupn; 2243 struct snd_pcm_oss_setup *setup, *setupn;
2318 2244
2319 for (idx = 0, substream = pstr->substream;
2320 idx < pstr->substream_count; idx++, substream = substream->next)
2321 substream->oss.setup = NULL;
2322 for (setup = pstr->oss.setup_list, pstr->oss.setup_list = NULL; 2245 for (setup = pstr->oss.setup_list, pstr->oss.setup_list = NULL;
2323 setup; setup = setupn) { 2246 setup; setup = setupn) {
2324 setupn = setup->next; 2247 setupn = setup->next;
@@ -2379,21 +2302,28 @@ static void snd_pcm_oss_proc_write(struct snd_info_entry *entry,
2379 } 2302 }
2380 } while (*str); 2303 } while (*str);
2381 if (setup == NULL) { 2304 if (setup == NULL) {
2382 setup = kmalloc(sizeof(struct snd_pcm_oss_setup), GFP_KERNEL); 2305 setup = kmalloc(sizeof(*setup), GFP_KERNEL);
2383 if (setup) { 2306 if (! setup) {
2384 if (pstr->oss.setup_list == NULL) { 2307 buffer->error = -ENOMEM;
2385 pstr->oss.setup_list = setup; 2308 mutex_lock(&pstr->oss.setup_mutex);
2386 } else { 2309 return;
2387 for (setup1 = pstr->oss.setup_list; setup1->next; setup1 = setup1->next); 2310 }
2388 setup1->next = setup; 2311 if (pstr->oss.setup_list == NULL)
2389 } 2312 pstr->oss.setup_list = setup;
2390 template.task_name = kstrdup(task_name, GFP_KERNEL); 2313 else {
2391 } else { 2314 for (setup1 = pstr->oss.setup_list;
2315 setup1->next; setup1 = setup1->next);
2316 setup1->next = setup;
2317 }
2318 template.task_name = kstrdup(task_name, GFP_KERNEL);
2319 if (! template.task_name) {
2320 kfree(setup);
2392 buffer->error = -ENOMEM; 2321 buffer->error = -ENOMEM;
2322 mutex_lock(&pstr->oss.setup_mutex);
2323 return;
2393 } 2324 }
2394 } 2325 }
2395 if (setup) 2326 *setup = template;
2396 *setup = template;
2397 mutex_unlock(&pstr->oss.setup_mutex); 2327 mutex_unlock(&pstr->oss.setup_mutex);
2398 } 2328 }
2399} 2329}
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 3da6a38c2d0f..5d7eb123b999 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -777,8 +777,9 @@ static void snd_pcm_tick_timer_func(unsigned long data)
777 snd_pcm_tick_elapsed(substream); 777 snd_pcm_tick_elapsed(substream);
778} 778}
779 779
780int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, 780int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,
781 struct snd_pcm_substream **rsubstream) 781 struct file *file,
782 struct snd_pcm_substream **rsubstream)
782{ 783{
783 struct snd_pcm_str * pstr; 784 struct snd_pcm_str * pstr;
784 struct snd_pcm_substream *substream; 785 struct snd_pcm_substream *substream;
@@ -793,7 +794,7 @@ int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
793 *rsubstream = NULL; 794 *rsubstream = NULL;
794 snd_assert(pcm != NULL, return -ENXIO); 795 snd_assert(pcm != NULL, return -ENXIO);
795 pstr = &pcm->streams[stream]; 796 pstr = &pcm->streams[stream];
796 if (pstr->substream == NULL) 797 if (pstr->substream == NULL || pstr->substream_count == 0)
797 return -ENODEV; 798 return -ENODEV;
798 799
799 card = pcm->card; 800 card = pcm->card;
@@ -807,8 +808,6 @@ int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
807 } 808 }
808 up_read(&card->controls_rwsem); 809 up_read(&card->controls_rwsem);
809 810
810 if (pstr->substream_count == 0)
811 return -ENODEV;
812 switch (stream) { 811 switch (stream) {
813 case SNDRV_PCM_STREAM_PLAYBACK: 812 case SNDRV_PCM_STREAM_PLAYBACK:
814 if (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX) { 813 if (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX) {
@@ -874,12 +873,13 @@ int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
874 873
875 substream->runtime = runtime; 874 substream->runtime = runtime;
876 substream->private_data = pcm->private_data; 875 substream->private_data = pcm->private_data;
876 substream->ffile = file;
877 pstr->substream_opened++; 877 pstr->substream_opened++;
878 *rsubstream = substream; 878 *rsubstream = substream;
879 return 0; 879 return 0;
880} 880}
881 881
882void snd_pcm_release_substream(struct snd_pcm_substream *substream) 882void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
883{ 883{
884 struct snd_pcm_runtime *runtime; 884 struct snd_pcm_runtime *runtime;
885 substream->file = NULL; 885 substream->file = NULL;
@@ -1111,8 +1111,6 @@ EXPORT_SYMBOL(snd_pcm_link_rwlock);
1111EXPORT_SYMBOL(snd_pcm_suspend); 1111EXPORT_SYMBOL(snd_pcm_suspend);
1112EXPORT_SYMBOL(snd_pcm_suspend_all); 1112EXPORT_SYMBOL(snd_pcm_suspend_all);
1113#endif 1113#endif
1114EXPORT_SYMBOL(snd_pcm_kernel_playback_ioctl);
1115EXPORT_SYMBOL(snd_pcm_kernel_capture_ioctl);
1116EXPORT_SYMBOL(snd_pcm_kernel_ioctl); 1114EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
1117EXPORT_SYMBOL(snd_pcm_mmap_data); 1115EXPORT_SYMBOL(snd_pcm_mmap_data);
1118#if SNDRV_PCM_INFO_MMAP_IOMEM 1116#if SNDRV_PCM_INFO_MMAP_IOMEM
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index eeba2f060955..230a940d00bd 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2299,19 +2299,7 @@ snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream, const v
2299 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2299 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2300 return -EBADFD; 2300 return -EBADFD;
2301 2301
2302 snd_assert(substream->ffile != NULL, return -ENXIO);
2303 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK); 2302 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK);
2304#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
2305 if (substream->oss.oss) {
2306 struct snd_pcm_oss_setup *setup = substream->oss.setup;
2307 if (setup != NULL) {
2308 if (setup->nonblock)
2309 nonblock = 1;
2310 else if (setup->block)
2311 nonblock = 0;
2312 }
2313 }
2314#endif
2315 2303
2316 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && 2304 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2317 runtime->channels > 1) 2305 runtime->channels > 1)
@@ -2374,19 +2362,7 @@ snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream,
2374 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2362 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2375 return -EBADFD; 2363 return -EBADFD;
2376 2364
2377 snd_assert(substream->ffile != NULL, return -ENXIO);
2378 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK); 2365 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK);
2379#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
2380 if (substream->oss.oss) {
2381 struct snd_pcm_oss_setup *setup = substream->oss.setup;
2382 if (setup != NULL) {
2383 if (setup->nonblock)
2384 nonblock = 1;
2385 else if (setup->block)
2386 nonblock = 0;
2387 }
2388 }
2389#endif
2390 2366
2391 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 2367 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2392 return -EINVAL; 2368 return -EINVAL;
@@ -2596,19 +2572,7 @@ snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream, void __u
2596 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2572 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2597 return -EBADFD; 2573 return -EBADFD;
2598 2574
2599 snd_assert(substream->ffile != NULL, return -ENXIO);
2600 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK); 2575 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK);
2601#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
2602 if (substream->oss.oss) {
2603 struct snd_pcm_oss_setup *setup = substream->oss.setup;
2604 if (setup != NULL) {
2605 if (setup->nonblock)
2606 nonblock = 1;
2607 else if (setup->block)
2608 nonblock = 0;
2609 }
2610 }
2611#endif
2612 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED) 2576 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED)
2613 return -EINVAL; 2577 return -EINVAL;
2614 return snd_pcm_lib_read1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_read_transfer); 2578 return snd_pcm_lib_read1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_read_transfer);
@@ -2665,20 +2629,7 @@ snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream,
2665 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) 2629 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2666 return -EBADFD; 2630 return -EBADFD;
2667 2631
2668 snd_assert(substream->ffile != NULL, return -ENXIO);
2669 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK); 2632 nonblock = !!(substream->ffile->f_flags & O_NONBLOCK);
2670#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
2671 if (substream->oss.oss) {
2672 struct snd_pcm_oss_setup *setup = substream->oss.setup;
2673 if (setup != NULL) {
2674 if (setup->nonblock)
2675 nonblock = 1;
2676 else if (setup->block)
2677 nonblock = 0;
2678 }
2679 }
2680#endif
2681
2682 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) 2633 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2683 return -EINVAL; 2634 return -EINVAL;
2684 return snd_pcm_lib_read1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_readv_transfer); 2635 return snd_pcm_lib_read1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_readv_transfer);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 01f150f0990e..964e4c47a7f1 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1170,7 +1170,7 @@ static int snd_pcm_resume(struct snd_pcm_substream *substream)
1170 int res; 1170 int res;
1171 1171
1172 snd_power_lock(card); 1172 snd_power_lock(card);
1173 if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0, substream->ffile)) >= 0) 1173 if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0)
1174 res = snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0); 1174 res = snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
1175 snd_power_unlock(card); 1175 snd_power_unlock(card);
1176 return res; 1176 return res;
@@ -1198,7 +1198,7 @@ static int snd_pcm_xrun(struct snd_pcm_substream *substream)
1198 1198
1199 snd_power_lock(card); 1199 snd_power_lock(card);
1200 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { 1200 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
1201 result = snd_power_wait(card, SNDRV_CTL_POWER_D0, substream->ffile); 1201 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
1202 if (result < 0) 1202 if (result < 0)
1203 goto _unlock; 1203 goto _unlock;
1204 } 1204 }
@@ -1313,13 +1313,13 @@ static struct action_ops snd_pcm_action_prepare = {
1313 * 1313 *
1314 * Prepare the PCM substream to be triggerable. 1314 * Prepare the PCM substream to be triggerable.
1315 */ 1315 */
1316int snd_pcm_prepare(struct snd_pcm_substream *substream) 1316static int snd_pcm_prepare(struct snd_pcm_substream *substream)
1317{ 1317{
1318 int res; 1318 int res;
1319 struct snd_card *card = substream->pcm->card; 1319 struct snd_card *card = substream->pcm->card;
1320 1320
1321 snd_power_lock(card); 1321 snd_power_lock(card);
1322 if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0, substream->ffile)) >= 0) 1322 if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0)
1323 res = snd_pcm_action_nonatomic(&snd_pcm_action_prepare, substream, 0); 1323 res = snd_pcm_action_nonatomic(&snd_pcm_action_prepare, substream, 0);
1324 snd_power_unlock(card); 1324 snd_power_unlock(card);
1325 return res; 1325 return res;
@@ -1410,7 +1410,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
1410 1410
1411 snd_power_lock(card); 1411 snd_power_lock(card);
1412 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { 1412 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
1413 result = snd_power_wait(card, SNDRV_CTL_POWER_D0, substream->ffile); 1413 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
1414 if (result < 0) { 1414 if (result < 0) {
1415 snd_power_unlock(card); 1415 snd_power_unlock(card);
1416 return result; 1416 return result;
@@ -1533,7 +1533,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
1533 1533
1534 snd_power_lock(card); 1534 snd_power_lock(card);
1535 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { 1535 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
1536 result = snd_power_wait(card, SNDRV_CTL_POWER_D0, substream->ffile); 1536 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
1537 if (result < 0) 1537 if (result < 0)
1538 goto _unlock; 1538 goto _unlock;
1539 } 1539 }
@@ -1995,28 +1995,63 @@ static void snd_pcm_remove_file(struct snd_pcm_str *str,
1995 } 1995 }
1996} 1996}
1997 1997
1998static int snd_pcm_release_file(struct snd_pcm_file * pcm_file) 1998static void pcm_release_private(struct snd_pcm_substream *substream)
1999{ 1999{
2000 struct snd_pcm_substream *substream; 2000 struct snd_pcm_file *pcm_file = substream->file;
2001 struct snd_pcm_runtime *runtime;
2002 struct snd_pcm_str * str;
2003 2001
2004 snd_assert(pcm_file != NULL, return -ENXIO);
2005 substream = pcm_file->substream;
2006 snd_assert(substream != NULL, return -ENXIO);
2007 runtime = substream->runtime;
2008 str = substream->pstr;
2009 snd_pcm_unlink(substream); 2002 snd_pcm_unlink(substream);
2010 if (substream->ffile != NULL) { 2003 snd_pcm_remove_file(substream->pstr, pcm_file);
2004 kfree(pcm_file);
2005}
2006
2007void snd_pcm_release_substream(struct snd_pcm_substream *substream)
2008{
2009 snd_pcm_drop(substream);
2010 if (substream->pcm_release)
2011 substream->pcm_release(substream);
2012 if (substream->hw_opened) {
2011 if (substream->ops->hw_free != NULL) 2013 if (substream->ops->hw_free != NULL)
2012 substream->ops->hw_free(substream); 2014 substream->ops->hw_free(substream);
2013 substream->ops->close(substream); 2015 substream->ops->close(substream);
2014 substream->ffile = NULL; 2016 substream->hw_opened = 0;
2015 } 2017 }
2016 snd_pcm_remove_file(str, pcm_file); 2018 snd_pcm_detach_substream(substream);
2017 snd_pcm_release_substream(substream); 2019}
2018 kfree(pcm_file); 2020
2021int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
2022 struct file *file,
2023 struct snd_pcm_substream **rsubstream)
2024{
2025 struct snd_pcm_substream *substream;
2026 int err;
2027
2028 err = snd_pcm_attach_substream(pcm, stream, file, &substream);
2029 if (err < 0)
2030 return err;
2031 substream->no_mmap_ctrl = 0;
2032 err = snd_pcm_hw_constraints_init(substream);
2033 if (err < 0) {
2034 snd_printd("snd_pcm_hw_constraints_init failed\n");
2035 goto error;
2036 }
2037
2038 if ((err = substream->ops->open(substream)) < 0)
2039 goto error;
2040
2041 substream->hw_opened = 1;
2042
2043 err = snd_pcm_hw_constraints_complete(substream);
2044 if (err < 0) {
2045 snd_printd("snd_pcm_hw_constraints_complete failed\n");
2046 goto error;
2047 }
2048
2049 *rsubstream = substream;
2019 return 0; 2050 return 0;
2051
2052 error:
2053 snd_pcm_release_substream(substream);
2054 return err;
2020} 2055}
2021 2056
2022static int snd_pcm_open_file(struct file *file, 2057static int snd_pcm_open_file(struct file *file,
@@ -2024,52 +2059,29 @@ static int snd_pcm_open_file(struct file *file,
2024 int stream, 2059 int stream,
2025 struct snd_pcm_file **rpcm_file) 2060 struct snd_pcm_file **rpcm_file)
2026{ 2061{
2027 int err = 0;
2028 struct snd_pcm_file *pcm_file; 2062 struct snd_pcm_file *pcm_file;
2029 struct snd_pcm_substream *substream; 2063 struct snd_pcm_substream *substream;
2030 struct snd_pcm_str *str; 2064 struct snd_pcm_str *str;
2065 int err;
2031 2066
2032 snd_assert(rpcm_file != NULL, return -EINVAL); 2067 snd_assert(rpcm_file != NULL, return -EINVAL);
2033 *rpcm_file = NULL; 2068 *rpcm_file = NULL;
2034 2069
2070 err = snd_pcm_open_substream(pcm, stream, file, &substream);
2071 if (err < 0)
2072 return err;
2073
2035 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL); 2074 pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
2036 if (pcm_file == NULL) { 2075 if (pcm_file == NULL) {
2076 snd_pcm_release_substream(substream);
2037 return -ENOMEM; 2077 return -ENOMEM;
2038 } 2078 }
2039
2040 if ((err = snd_pcm_open_substream(pcm, stream, &substream)) < 0) {
2041 kfree(pcm_file);
2042 return err;
2043 }
2044
2045 str = substream->pstr; 2079 str = substream->pstr;
2046 substream->file = pcm_file; 2080 substream->file = pcm_file;
2047 substream->no_mmap_ctrl = 0; 2081 substream->pcm_release = pcm_release_private;
2048
2049 pcm_file->substream = substream; 2082 pcm_file->substream = substream;
2050
2051 snd_pcm_add_file(str, pcm_file); 2083 snd_pcm_add_file(str, pcm_file);
2052 2084
2053 err = snd_pcm_hw_constraints_init(substream);
2054 if (err < 0) {
2055 snd_printd("snd_pcm_hw_constraints_init failed\n");
2056 snd_pcm_release_file(pcm_file);
2057 return err;
2058 }
2059
2060 if ((err = substream->ops->open(substream)) < 0) {
2061 snd_pcm_release_file(pcm_file);
2062 return err;
2063 }
2064 substream->ffile = file;
2065
2066 err = snd_pcm_hw_constraints_complete(substream);
2067 if (err < 0) {
2068 snd_printd("snd_pcm_hw_constraints_complete failed\n");
2069 snd_pcm_release_file(pcm_file);
2070 return err;
2071 }
2072
2073 file->private_data = pcm_file; 2085 file->private_data = pcm_file;
2074 *rpcm_file = pcm_file; 2086 *rpcm_file = pcm_file;
2075 return 0; 2087 return 0;
@@ -2158,10 +2170,9 @@ static int snd_pcm_release(struct inode *inode, struct file *file)
2158 snd_assert(substream != NULL, return -ENXIO); 2170 snd_assert(substream != NULL, return -ENXIO);
2159 snd_assert(!atomic_read(&substream->runtime->mmap_count), ); 2171 snd_assert(!atomic_read(&substream->runtime->mmap_count), );
2160 pcm = substream->pcm; 2172 pcm = substream->pcm;
2161 snd_pcm_drop(substream);
2162 fasync_helper(-1, file, 0, &substream->runtime->fasync); 2173 fasync_helper(-1, file, 0, &substream->runtime->fasync);
2163 mutex_lock(&pcm->open_mutex); 2174 mutex_lock(&pcm->open_mutex);
2164 snd_pcm_release_file(pcm_file); 2175 snd_pcm_release_substream(substream);
2165 mutex_unlock(&pcm->open_mutex); 2176 mutex_unlock(&pcm->open_mutex);
2166 wake_up(&pcm->open_wait); 2177 wake_up(&pcm->open_wait);
2167 module_put(pcm->card->module); 2178 module_put(pcm->card->module);
@@ -2480,11 +2491,6 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2480 return 0; 2491 return 0;
2481} 2492}
2482 2493
2483static int snd_pcm_playback_ioctl1(struct snd_pcm_substream *substream,
2484 unsigned int cmd, void __user *arg);
2485static int snd_pcm_capture_ioctl1(struct snd_pcm_substream *substream,
2486 unsigned int cmd, void __user *arg);
2487
2488static int snd_pcm_common_ioctl1(struct snd_pcm_substream *substream, 2494static int snd_pcm_common_ioctl1(struct snd_pcm_substream *substream,
2489 unsigned int cmd, void __user *arg) 2495 unsigned int cmd, void __user *arg)
2490{ 2496{
@@ -2736,41 +2742,28 @@ static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd,
2736 return snd_pcm_capture_ioctl1(pcm_file->substream, cmd, (void __user *)arg); 2742 return snd_pcm_capture_ioctl1(pcm_file->substream, cmd, (void __user *)arg);
2737} 2743}
2738 2744
2739int snd_pcm_kernel_playback_ioctl(struct snd_pcm_substream *substream, 2745int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
2740 unsigned int cmd, void *arg) 2746 unsigned int cmd, void *arg)
2741{
2742 mm_segment_t fs;
2743 int result;
2744
2745 fs = snd_enter_user();
2746 result = snd_pcm_playback_ioctl1(substream, cmd, (void __user *)arg);
2747 snd_leave_user(fs);
2748 return result;
2749}
2750
2751int snd_pcm_kernel_capture_ioctl(struct snd_pcm_substream *substream,
2752 unsigned int cmd, void *arg)
2753{ 2747{
2754 mm_segment_t fs; 2748 mm_segment_t fs;
2755 int result; 2749 int result;
2756 2750
2757 fs = snd_enter_user(); 2751 fs = snd_enter_user();
2758 result = snd_pcm_capture_ioctl1(substream, cmd, (void __user *)arg);
2759 snd_leave_user(fs);
2760 return result;
2761}
2762
2763int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
2764 unsigned int cmd, void *arg)
2765{
2766 switch (substream->stream) { 2752 switch (substream->stream) {
2767 case SNDRV_PCM_STREAM_PLAYBACK: 2753 case SNDRV_PCM_STREAM_PLAYBACK:
2768 return snd_pcm_kernel_playback_ioctl(substream, cmd, arg); 2754 result = snd_pcm_playback_ioctl1(substream,
2755 cmd, (void __user *)arg);
2756 break;
2769 case SNDRV_PCM_STREAM_CAPTURE: 2757 case SNDRV_PCM_STREAM_CAPTURE:
2770 return snd_pcm_kernel_capture_ioctl(substream, cmd, arg); 2758 result = snd_pcm_capture_ioctl1(substream,
2759 cmd, (void __user *)arg);
2760 break;
2771 default: 2761 default:
2772 return -EINVAL; 2762 result = -EINVAL;
2763 break;
2773 } 2764 }
2765 snd_leave_user(fs);
2766 return result;
2774} 2767}
2775 2768
2776static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count, 2769static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index ff8fef932786..557c4de22960 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -11,6 +11,15 @@ config SND_CS4231_LIB
11 tristate 11 tristate
12 select SND_PCM 12 select SND_PCM
13 13
14config SND_ADLIB
15 tristate "AdLib FM card"
16 select SND_OPL3_LIB
17 help
18 Say Y here to include support for AdLib FM cards.
19
20 To compile this driver as a module, choose M here: the module
21 will be called snd-adlib.
22
14config SND_AD1816A 23config SND_AD1816A
15 tristate "Analog Devices SoundPort AD1816A" 24 tristate "Analog Devices SoundPort AD1816A"
16 depends on SND && PNP && ISA 25 depends on SND && PNP && ISA
@@ -292,6 +301,20 @@ config SND_OPTI93X
292 To compile this driver as a module, choose M here: the module 301 To compile this driver as a module, choose M here: the module
293 will be called snd-opti93x. 302 will be called snd-opti93x.
294 303
304config SND_MIRO
305 tristate "Miro miroSOUND PCM1pro/PCM12/PCM20radio driver"
306 depends on SND
307 select SND_OPL4_LIB
308 select SND_CS4231_LIB
309 select SND_MPU401_UART
310 select SND_PCM
311 help
312 Say 'Y' or 'M' to include support for Miro miroSOUND PCM1 pro,
313 miroSOUND PCM12 and miroSOUND PCM20 Radio soundcards.
314
315 To compile this driver as a module, choose M here: the module
316 will be called snd-miro.
317
295config SND_SB8 318config SND_SB8
296 tristate "Sound Blaster 1.0/2.0/Pro (8-bit)" 319 tristate "Sound Blaster 1.0/2.0/Pro (8-bit)"
297 depends on SND 320 depends on SND
diff --git a/sound/isa/Makefile b/sound/isa/Makefile
index 05724eb7bfe4..bb317ccc170f 100644
--- a/sound/isa/Makefile
+++ b/sound/isa/Makefile
@@ -3,6 +3,7 @@
3# Copyright (c) 2001 by Jaroslav Kysela <perex@suse.cz> 3# Copyright (c) 2001 by Jaroslav Kysela <perex@suse.cz>
4# 4#
5 5
6snd-adlib-objs := adlib.o
6snd-als100-objs := als100.o 7snd-als100-objs := als100.o
7snd-azt2320-objs := azt2320.o 8snd-azt2320-objs := azt2320.o
8snd-cmi8330-objs := cmi8330.o 9snd-cmi8330-objs := cmi8330.o
@@ -13,6 +14,7 @@ snd-sgalaxy-objs := sgalaxy.o
13snd-sscape-objs := sscape.o 14snd-sscape-objs := sscape.o
14 15
15# Toplevel Module Dependency 16# Toplevel Module Dependency
17obj-$(CONFIG_SND_ADLIB) += snd-adlib.o
16obj-$(CONFIG_SND_ALS100) += snd-als100.o 18obj-$(CONFIG_SND_ALS100) += snd-als100.o
17obj-$(CONFIG_SND_AZT2320) += snd-azt2320.o 19obj-$(CONFIG_SND_AZT2320) += snd-azt2320.o
18obj-$(CONFIG_SND_CMI8330) += snd-cmi8330.o 20obj-$(CONFIG_SND_CMI8330) += snd-cmi8330.o
diff --git a/sound/isa/adlib.c b/sound/isa/adlib.c
new file mode 100644
index 000000000000..a253a14e6a45
--- /dev/null
+++ b/sound/isa/adlib.c
@@ -0,0 +1,161 @@
1/*
2 * AdLib FM card driver.
3 */
4
5#include <sound/driver.h>
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/platform_device.h>
9#include <sound/core.h>
10#include <sound/initval.h>
11#include <sound/opl3.h>
12
13#define CRD_NAME "AdLib FM"
14#define DRV_NAME "snd_adlib"
15
16MODULE_DESCRIPTION(CRD_NAME);
17MODULE_AUTHOR("Rene Herman");
18MODULE_LICENSE("GPL");
19
20static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
21static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
22static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE;
23static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
24
25module_param_array(index, int, NULL, 0444);
26MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard.");
27module_param_array(id, charp, NULL, 0444);
28MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard.");
29module_param_array(enable, bool, NULL, 0444);
30MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard.");
31module_param_array(port, long, NULL, 0444);
32MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver.");
33
34static struct platform_device *devices[SNDRV_CARDS];
35
36static void snd_adlib_free(struct snd_card *card)
37{
38 release_and_free_resource(card->private_data);
39}
40
41static int __devinit snd_adlib_probe(struct platform_device *device)
42{
43 struct snd_card *card;
44 struct snd_opl3 *opl3;
45
46 int error;
47 int i = device->id;
48
49 if (port[i] == SNDRV_AUTO_PORT) {
50 snd_printk(KERN_ERR DRV_NAME ": please specify port\n");
51 error = -EINVAL;
52 goto out0;
53 }
54
55 card = snd_card_new(index[i], id[i], THIS_MODULE, 0);
56 if (!card) {
57 snd_printk(KERN_ERR DRV_NAME ": could not create card\n");
58 error = -EINVAL;
59 goto out0;
60 }
61
62 card->private_data = request_region(port[i], 4, CRD_NAME);
63 if (!card->private_data) {
64 snd_printk(KERN_ERR DRV_NAME ": could not grab ports\n");
65 error = -EBUSY;
66 goto out1;
67 }
68 card->private_free = snd_adlib_free;
69
70 error = snd_opl3_create(card, port[i], port[i] + 2, OPL3_HW_AUTO, 1, &opl3);
71 if (error < 0) {
72 snd_printk(KERN_ERR DRV_NAME ": could not create OPL\n");
73 goto out1;
74 }
75
76 error = snd_opl3_hwdep_new(opl3, 0, 0, NULL);
77 if (error < 0) {
78 snd_printk(KERN_ERR DRV_NAME ": could not create FM\n");
79 goto out1;
80 }
81
82 strcpy(card->driver, DRV_NAME);
83 strcpy(card->shortname, CRD_NAME);
84 sprintf(card->longname, CRD_NAME " at %#lx", port[i]);
85
86 snd_card_set_dev(card, &device->dev);
87
88 error = snd_card_register(card);
89 if (error < 0) {
90 snd_printk(KERN_ERR DRV_NAME ": could not register card\n");
91 goto out1;
92 }
93
94 platform_set_drvdata(device, card);
95 return 0;
96
97out1: snd_card_free(card);
98 out0: error = -EINVAL; /* FIXME: should be the original error code */
99 return error;
100}
101
102static int __devexit snd_adlib_remove(struct platform_device *device)
103{
104 snd_card_free(platform_get_drvdata(device));
105 platform_set_drvdata(device, NULL);
106 return 0;
107}
108
109static struct platform_driver snd_adlib_driver = {
110 .probe = snd_adlib_probe,
111 .remove = __devexit_p(snd_adlib_remove),
112
113 .driver = {
114 .name = DRV_NAME
115 }
116};
117
118static int __init alsa_card_adlib_init(void)
119{
120 int i, cards;
121
122 if (platform_driver_register(&snd_adlib_driver) < 0) {
123 snd_printk(KERN_ERR DRV_NAME ": could not register driver\n");
124 return -ENODEV;
125 }
126
127 for (cards = 0, i = 0; i < SNDRV_CARDS; i++) {
128 struct platform_device *device;
129
130 if (!enable[i])
131 continue;
132
133 device = platform_device_register_simple(DRV_NAME, i, NULL, 0);
134 if (IS_ERR(device))
135 continue;
136
137 devices[i] = device;
138 cards++;
139 }
140
141 if (!cards) {
142#ifdef MODULE
143 printk(KERN_ERR CRD_NAME " soundcard not found or device busy\n");
144#endif
145 platform_driver_unregister(&snd_adlib_driver);
146 return -ENODEV;
147 }
148 return 0;
149}
150
151static void __exit alsa_card_adlib_exit(void)
152{
153 int i;
154
155 for (i = 0; i < SNDRV_CARDS; i++)
156 platform_device_unregister(devices[i]);
157 platform_driver_unregister(&snd_adlib_driver);
158}
159
160module_init(alsa_card_adlib_init);
161module_exit(alsa_card_adlib_exit);
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index fa63048a8b9d..bc0f5ebf5d3c 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -693,9 +693,9 @@ static int __init alsa_card_cmi8330_init(void)
693 if ((err = platform_driver_register(&snd_cmi8330_driver)) < 0) 693 if ((err = platform_driver_register(&snd_cmi8330_driver)) < 0)
694 return err; 694 return err;
695 695
696 for (i = 0; i < SNDRV_CARDS && enable[i]; i++) { 696 for (i = 0; i < SNDRV_CARDS; i++) {
697 struct platform_device *device; 697 struct platform_device *device;
698 if (is_isapnp_selected(i)) 698 if (! enable[i] || is_isapnp_selected(i))
699 continue; 699 continue;
700 device = platform_device_register_simple(CMI8330_DRIVER, 700 device = platform_device_register_simple(CMI8330_DRIVER,
701 i, NULL, 0); 701 i, NULL, 0);
diff --git a/sound/isa/opti9xx/Makefile b/sound/isa/opti9xx/Makefile
index 28c64070cd56..0e41bfd5a403 100644
--- a/sound/isa/opti9xx/Makefile
+++ b/sound/isa/opti9xx/Makefile
@@ -6,8 +6,10 @@
6snd-opti92x-ad1848-objs := opti92x-ad1848.o 6snd-opti92x-ad1848-objs := opti92x-ad1848.o
7snd-opti92x-cs4231-objs := opti92x-cs4231.o 7snd-opti92x-cs4231-objs := opti92x-cs4231.o
8snd-opti93x-objs := opti93x.o 8snd-opti93x-objs := opti93x.o
9snd-miro-objs := miro.o
9 10
10# Toplevel Module Dependency 11# Toplevel Module Dependency
11obj-$(CONFIG_SND_OPTI92X_AD1848) += snd-opti92x-ad1848.o 12obj-$(CONFIG_SND_OPTI92X_AD1848) += snd-opti92x-ad1848.o
12obj-$(CONFIG_SND_OPTI92X_CS4231) += snd-opti92x-cs4231.o 13obj-$(CONFIG_SND_OPTI92X_CS4231) += snd-opti92x-cs4231.o
13obj-$(CONFIG_SND_OPTI93X) += snd-opti93x.o 14obj-$(CONFIG_SND_OPTI93X) += snd-opti93x.o
15obj-$(CONFIG_SND_MIRO) += snd-miro.o
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
new file mode 100644
index 000000000000..09384d03dc31
--- /dev/null
+++ b/sound/isa/opti9xx/miro.c
@@ -0,0 +1,1455 @@
1/*
2 * ALSA soundcard driver for Miro miroSOUND PCM1 pro
3 * miroSOUND PCM12
4 * miroSOUND PCM20 Radio
5 *
6 * Copyright (C) 2004-2005 Martin Langer <martin-langer@gmx.de>
7 *
8 * Based on OSS ACI and ALSA OPTi9xx drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <sound/driver.h>
26#include <linux/init.h>
27#include <linux/err.h>
28#include <linux/platform_device.h>
29#include <linux/delay.h>
30#include <linux/slab.h>
31#include <linux/ioport.h>
32#include <linux/moduleparam.h>
33#include <asm/io.h>
34#include <asm/dma.h>
35#include <sound/core.h>
36#include <sound/cs4231.h>
37#include <sound/mpu401.h>
38#include <sound/opl4.h>
39#include <sound/control.h>
40#include <sound/info.h>
41#define SNDRV_LEGACY_FIND_FREE_IRQ
42#define SNDRV_LEGACY_FIND_FREE_DMA
43#include <sound/initval.h>
44#include "miro.h"
45
46MODULE_AUTHOR("Martin Langer <martin-langer@gmx.de>");
47MODULE_LICENSE("GPL");
48MODULE_DESCRIPTION("Miro miroSOUND PCM1 pro, PCM12, PCM20 Radio");
49MODULE_SUPPORTED_DEVICE("{{Miro,miroSOUND PCM1 pro}, "
50 "{Miro,miroSOUND PCM12}, "
51 "{Miro,miroSOUND PCM20 Radio}}");
52
53static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
54static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
55static long port = SNDRV_DEFAULT_PORT1; /* 0x530,0xe80,0xf40,0x604 */
56static long mpu_port = SNDRV_DEFAULT_PORT1; /* 0x300,0x310,0x320,0x330 */
57static long fm_port = SNDRV_DEFAULT_PORT1; /* 0x388 */
58static int irq = SNDRV_DEFAULT_IRQ1; /* 5,7,9,10,11 */
59static int mpu_irq = SNDRV_DEFAULT_IRQ1; /* 5,7,9,10 */
60static int dma1 = SNDRV_DEFAULT_DMA1; /* 0,1,3 */
61static int dma2 = SNDRV_DEFAULT_DMA1; /* 0,1,3 */
62static int wss;
63static int ide;
64
65module_param(index, int, 0444);
66MODULE_PARM_DESC(index, "Index value for miro soundcard.");
67module_param(id, charp, 0444);
68MODULE_PARM_DESC(id, "ID string for miro soundcard.");
69module_param(port, long, 0444);
70MODULE_PARM_DESC(port, "WSS port # for miro driver.");
71module_param(mpu_port, long, 0444);
72MODULE_PARM_DESC(mpu_port, "MPU-401 port # for miro driver.");
73module_param(fm_port, long, 0444);
74MODULE_PARM_DESC(fm_port, "FM Port # for miro driver.");
75module_param(irq, int, 0444);
76MODULE_PARM_DESC(irq, "WSS irq # for miro driver.");
77module_param(mpu_irq, int, 0444);
78MODULE_PARM_DESC(mpu_irq, "MPU-401 irq # for miro driver.");
79module_param(dma1, int, 0444);
80MODULE_PARM_DESC(dma1, "1st dma # for miro driver.");
81module_param(dma2, int, 0444);
82MODULE_PARM_DESC(dma2, "2nd dma # for miro driver.");
83module_param(wss, int, 0444);
84MODULE_PARM_DESC(wss, "wss mode");
85module_param(ide, int, 0444);
86MODULE_PARM_DESC(ide, "enable ide port");
87
88#define OPTi9XX_HW_DETECT 0
89#define OPTi9XX_HW_82C928 1
90#define OPTi9XX_HW_82C929 2
91#define OPTi9XX_HW_82C924 3
92#define OPTi9XX_HW_82C925 4
93#define OPTi9XX_HW_82C930 5
94#define OPTi9XX_HW_82C931 6
95#define OPTi9XX_HW_82C933 7
96#define OPTi9XX_HW_LAST OPTi9XX_HW_82C933
97
98#define OPTi9XX_MC_REG(n) n
99
100
101struct snd_miro {
102 unsigned short hardware;
103 unsigned char password;
104 char name[7];
105
106 struct resource *res_mc_base;
107 struct resource *res_aci_port;
108
109 unsigned long mc_base;
110 unsigned long mc_base_size;
111 unsigned long pwd_reg;
112
113 spinlock_t lock;
114 struct snd_card *card;
115 struct snd_pcm *pcm;
116
117 long wss_base;
118 int irq;
119 int dma1;
120 int dma2;
121
122 long fm_port;
123
124 long mpu_port;
125 int mpu_irq;
126
127 unsigned long aci_port;
128 int aci_vendor;
129 int aci_product;
130 int aci_version;
131 int aci_amp;
132 int aci_preamp;
133 int aci_solomode;
134
135 struct mutex aci_mutex;
136};
137
138static void snd_miro_proc_init(struct snd_miro * miro);
139
140#define DRIVER_NAME "snd-miro"
141
142static struct platform_device *device;
143
144static char * snd_opti9xx_names[] = {
145 "unkown",
146 "82C928", "82C929",
147 "82C924", "82C925",
148 "82C930", "82C931", "82C933"
149};
150
151/*
152 * ACI control
153 */
154
155static int aci_busy_wait(struct snd_miro * miro)
156{
157 long timeout;
158 unsigned char byte;
159
160 for (timeout = 1; timeout <= ACI_MINTIME+30; timeout++) {
161 if (((byte=inb(miro->aci_port + ACI_REG_BUSY)) & 1) == 0) {
162 if (timeout >= ACI_MINTIME)
163 snd_printd("aci ready in round %ld.\n",
164 timeout-ACI_MINTIME);
165 return byte;
166 }
167 if (timeout >= ACI_MINTIME) {
168 long out=10*HZ;
169 switch (timeout-ACI_MINTIME) {
170 case 0 ... 9:
171 out /= 10;
172 case 10 ... 19:
173 out /= 10;
174 case 20 ... 30:
175 out /= 10;
176 default:
177 set_current_state(TASK_UNINTERRUPTIBLE);
178 schedule_timeout(out);
179 break;
180 }
181 }
182 }
183 snd_printk(KERN_ERR "aci_busy_wait() time out\n");
184 return -EBUSY;
185}
186
187static inline int aci_write(struct snd_miro * miro, unsigned char byte)
188{
189 if (aci_busy_wait(miro) >= 0) {
190 outb(byte, miro->aci_port + ACI_REG_COMMAND);
191 return 0;
192 } else {
193 snd_printk(KERN_ERR "aci busy, aci_write(0x%x) stopped.\n", byte);
194 return -EBUSY;
195 }
196}
197
198static inline int aci_read(struct snd_miro * miro)
199{
200 unsigned char byte;
201
202 if (aci_busy_wait(miro) >= 0) {
203 byte=inb(miro->aci_port + ACI_REG_STATUS);
204 return byte;
205 } else {
206 snd_printk(KERN_ERR "aci busy, aci_read() stopped.\n");
207 return -EBUSY;
208 }
209}
210
211static int aci_cmd(struct snd_miro * miro, int write1, int write2, int write3)
212{
213 int write[] = {write1, write2, write3};
214 int value, i;
215
216 if (mutex_lock_interruptible(&miro->aci_mutex))
217 return -EINTR;
218
219 for (i=0; i<3; i++) {
220 if (write[i]< 0 || write[i] > 255)
221 break;
222 else {
223 value = aci_write(miro, write[i]);
224 if (value < 0)
225 goto out;
226 }
227 }
228
229 value = aci_read(miro);
230
231out: mutex_unlock(&miro->aci_mutex);
232 return value;
233}
234
235static int aci_getvalue(struct snd_miro * miro, unsigned char index)
236{
237 return aci_cmd(miro, ACI_STATUS, index, -1);
238}
239
240static int aci_setvalue(struct snd_miro * miro, unsigned char index, int value)
241{
242 return aci_cmd(miro, index, value, -1);
243}
244
245/*
246 * MIXER part
247 */
248
249static int snd_miro_info_capture(struct snd_kcontrol *kcontrol,
250 struct snd_ctl_elem_info *uinfo)
251{
252 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
253 uinfo->count = 1;
254
255 return 0;
256}
257
258static int snd_miro_get_capture(struct snd_kcontrol *kcontrol,
259 struct snd_ctl_elem_value *ucontrol)
260{
261 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
262 int value;
263
264 if ((value = aci_getvalue(miro, ACI_S_GENERAL)) < 0) {
265 snd_printk(KERN_ERR "snd_miro_get_capture() failed: %d\n", value);
266 return value;
267 }
268
269 ucontrol->value.integer.value[0] = value & 0x20;
270
271 return 0;
272}
273
274static int snd_miro_put_capture(struct snd_kcontrol *kcontrol,
275 struct snd_ctl_elem_value *ucontrol)
276{
277 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
278 int change, value, error;
279
280 value = !(ucontrol->value.integer.value[0]);
281
282 if ((error = aci_setvalue(miro, ACI_SET_SOLOMODE, value)) < 0) {
283 snd_printk(KERN_ERR "snd_miro_put_capture() failed: %d\n", error);
284 return error;
285 }
286
287 change = (value != miro->aci_solomode);
288 miro->aci_solomode = value;
289
290 return change;
291}
292
293static int snd_miro_info_preamp(struct snd_kcontrol *kcontrol,
294 struct snd_ctl_elem_info *uinfo)
295{
296 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
297 uinfo->count = 1;
298 uinfo->value.integer.min = 0;
299 uinfo->value.integer.max = 3;
300
301 return 0;
302}
303
304static int snd_miro_get_preamp(struct snd_kcontrol *kcontrol,
305 struct snd_ctl_elem_value *ucontrol)
306{
307 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
308 int value;
309
310 if (miro->aci_version <= 176) {
311
312 /*
313 OSS says it's not readable with versions < 176.
314 But it doesn't work on my card,
315 which is a PCM12 with aci_version = 176.
316 */
317
318 ucontrol->value.integer.value[0] = miro->aci_preamp;
319 return 0;
320 }
321
322 if ((value = aci_getvalue(miro, ACI_GET_PREAMP)) < 0) {
323 snd_printk(KERN_ERR "snd_miro_get_preamp() failed: %d\n", value);
324 return value;
325 }
326
327 ucontrol->value.integer.value[0] = value;
328
329 return 0;
330}
331
332static int snd_miro_put_preamp(struct snd_kcontrol *kcontrol,
333 struct snd_ctl_elem_value *ucontrol)
334{
335 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
336 int error, value, change;
337
338 value = ucontrol->value.integer.value[0];
339
340 if ((error = aci_setvalue(miro, ACI_SET_PREAMP, value)) < 0) {
341 snd_printk(KERN_ERR "snd_miro_put_preamp() failed: %d\n", error);
342 return error;
343 }
344
345 change = (value != miro->aci_preamp);
346 miro->aci_preamp = value;
347
348 return change;
349}
350
351static int snd_miro_info_amp(struct snd_kcontrol *kcontrol,
352 struct snd_ctl_elem_info *uinfo)
353{
354 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
355 uinfo->count = 1;
356
357 return 0;
358}
359
360static int snd_miro_get_amp(struct snd_kcontrol *kcontrol,
361 struct snd_ctl_elem_value *ucontrol)
362{
363 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
364 ucontrol->value.integer.value[0] = miro->aci_amp;
365
366 return 0;
367}
368
369static int snd_miro_put_amp(struct snd_kcontrol *kcontrol,
370 struct snd_ctl_elem_value *ucontrol)
371{
372 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
373 int error, value, change;
374
375 value = ucontrol->value.integer.value[0];
376
377 if ((error = aci_setvalue(miro, ACI_SET_POWERAMP, value)) < 0) {
378 snd_printk(KERN_ERR "snd_miro_put_amp() to %d failed: %d\n", value, error);
379 return error;
380 }
381
382 change = (value != miro->aci_amp);
383 miro->aci_amp = value;
384
385 return change;
386}
387
388#define MIRO_DOUBLE(ctl_name, ctl_index, get_right_reg, set_right_reg) \
389{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
390 .name = ctl_name, \
391 .index = ctl_index, \
392 .info = snd_miro_info_double, \
393 .get = snd_miro_get_double, \
394 .put = snd_miro_put_double, \
395 .private_value = get_right_reg | (set_right_reg << 8) \
396}
397
398static int snd_miro_info_double(struct snd_kcontrol *kcontrol,
399 struct snd_ctl_elem_info *uinfo)
400{
401 int reg = kcontrol->private_value & 0xff;
402
403 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
404 uinfo->count = 2;
405
406 if ((reg >= ACI_GET_EQ1) && (reg <= ACI_GET_EQ7)) {
407
408 /* equalizer elements */
409
410 uinfo->value.integer.min = - 0x7f;
411 uinfo->value.integer.max = 0x7f;
412 } else {
413
414 /* non-equalizer elements */
415
416 uinfo->value.integer.min = 0;
417 uinfo->value.integer.max = 0x20;
418 }
419
420 return 0;
421}
422
423static int snd_miro_get_double(struct snd_kcontrol *kcontrol,
424 struct snd_ctl_elem_value *uinfo)
425{
426 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
427 int left_val, right_val;
428
429 int right_reg = kcontrol->private_value & 0xff;
430 int left_reg = right_reg + 1;
431
432 if ((right_val = aci_getvalue(miro, right_reg)) < 0) {
433 snd_printk(KERN_ERR "aci_getvalue(%d) failed: %d\n", right_reg, right_val);
434 return right_val;
435 }
436
437 if ((left_val = aci_getvalue(miro, left_reg)) < 0) {
438 snd_printk(KERN_ERR "aci_getvalue(%d) failed: %d\n", left_reg, left_val);
439 return left_val;
440 }
441
442 if ((right_reg >= ACI_GET_EQ1) && (right_reg <= ACI_GET_EQ7)) {
443
444 /* equalizer elements */
445
446 if (left_val < 0x80) {
447 uinfo->value.integer.value[0] = left_val;
448 } else {
449 uinfo->value.integer.value[0] = 0x80 - left_val;
450 }
451
452 if (right_val < 0x80) {
453 uinfo->value.integer.value[1] = right_val;
454 } else {
455 uinfo->value.integer.value[1] = 0x80 - right_val;
456 }
457
458 } else {
459
460 /* non-equalizer elements */
461
462 uinfo->value.integer.value[0] = 0x20 - left_val;
463 uinfo->value.integer.value[1] = 0x20 - right_val;
464 }
465
466 return 0;
467}
468
469static int snd_miro_put_double(struct snd_kcontrol *kcontrol,
470 struct snd_ctl_elem_value *ucontrol)
471{
472 struct snd_miro *miro = snd_kcontrol_chip(kcontrol);
473 int left, right, left_old, right_old;
474 int setreg_left, setreg_right, getreg_left, getreg_right;
475 int change, error;
476
477 left = ucontrol->value.integer.value[0];
478 right = ucontrol->value.integer.value[1];
479
480 setreg_right = (kcontrol->private_value >> 8) & 0xff;
481 if (setreg_right == ACI_SET_MASTER) {
482 setreg_left = setreg_right + 1;
483 } else {
484 setreg_left = setreg_right + 8;
485 }
486
487 getreg_right = kcontrol->private_value & 0xff;
488 getreg_left = getreg_right + 1;
489
490 if ((left_old = aci_getvalue(miro, getreg_left)) < 0) {
491 snd_printk(KERN_ERR "aci_getvalue(%d) failed: %d\n", getreg_left, left_old);
492 return left_old;
493 }
494
495 if ((right_old = aci_getvalue(miro, getreg_right)) < 0) {
496 snd_printk(KERN_ERR "aci_getvalue(%d) failed: %d\n", getreg_right, right_old);
497 return right_old;
498 }
499
500 if ((getreg_right >= ACI_GET_EQ1) && (getreg_right <= ACI_GET_EQ7)) {
501
502 /* equalizer elements */
503
504 if (left_old > 0x80)
505 left_old = 0x80 - left_old;
506 if (right_old > 0x80)
507 right_old = 0x80 - right_old;
508
509 if (left >= 0) {
510 if ((error = aci_setvalue(miro, setreg_left, left)) < 0) {
511 snd_printk(KERN_ERR "aci_setvalue(%d) failed: %d\n",
512 left, error);
513 return error;
514 }
515 } else {
516 if ((error = aci_setvalue(miro, setreg_left, 0x80 - left)) < 0) {
517 snd_printk(KERN_ERR "aci_setvalue(%d) failed: %d\n",
518 0x80 - left, error);
519 return error;
520 }
521 }
522
523 if (right >= 0) {
524 if ((error = aci_setvalue(miro, setreg_right, right)) < 0) {
525 snd_printk(KERN_ERR "aci_setvalue(%d) failed: %d\n",
526 right, error);
527 return error;
528 }
529 } else {
530 if ((error = aci_setvalue(miro, setreg_right, 0x80 - right)) < 0) {
531 snd_printk(KERN_ERR "aci_setvalue(%d) failed: %d\n",
532 0x80 - right, error);
533 return error;
534 }
535 }
536
537 } else {
538
539 /* non-equalizer elements */
540
541 left_old = 0x20 - left_old;
542 right_old = 0x20 - right_old;
543
544 if ((error = aci_setvalue(miro, setreg_left, 0x20 - left)) < 0) {
545 snd_printk(KERN_ERR "aci_setvalue(%d) failed: %d\n",
546 0x20 - left, error);
547 return error;
548 }
549 if ((error = aci_setvalue(miro, setreg_right, 0x20 - right)) < 0) {
550 snd_printk(KERN_ERR "aci_setvalue(%d) failed: %d\n",
551 0x20 - right, error);
552 return error;
553 }
554 }
555
556 change = (left != left_old) || (right != right_old);
557
558 return change;
559}
560
561static struct snd_kcontrol_new snd_miro_controls[] = {
562MIRO_DOUBLE("Master Playback Volume", 0, ACI_GET_MASTER, ACI_SET_MASTER),
563MIRO_DOUBLE("Mic Playback Volume", 1, ACI_GET_MIC, ACI_SET_MIC),
564MIRO_DOUBLE("Line Playback Volume", 1, ACI_GET_LINE, ACI_SET_LINE),
565MIRO_DOUBLE("CD Playback Volume", 0, ACI_GET_CD, ACI_SET_CD),
566MIRO_DOUBLE("Synth Playback Volume", 0, ACI_GET_SYNTH, ACI_SET_SYNTH),
567MIRO_DOUBLE("PCM Playback Volume", 1, ACI_GET_PCM, ACI_SET_PCM),
568MIRO_DOUBLE("Aux Playback Volume", 2, ACI_GET_LINE2, ACI_SET_LINE2),
569};
570
571/* Equalizer with seven bands (only PCM20)
572 from -12dB up to +12dB on each band */
573static struct snd_kcontrol_new snd_miro_eq_controls[] = {
574MIRO_DOUBLE("Tone Control - 28 Hz", 0, ACI_GET_EQ1, ACI_SET_EQ1),
575MIRO_DOUBLE("Tone Control - 160 Hz", 0, ACI_GET_EQ2, ACI_SET_EQ2),
576MIRO_DOUBLE("Tone Control - 400 Hz", 0, ACI_GET_EQ3, ACI_SET_EQ3),
577MIRO_DOUBLE("Tone Control - 1 kHz", 0, ACI_GET_EQ4, ACI_SET_EQ4),
578MIRO_DOUBLE("Tone Control - 2.5 kHz", 0, ACI_GET_EQ5, ACI_SET_EQ5),
579MIRO_DOUBLE("Tone Control - 6.3 kHz", 0, ACI_GET_EQ6, ACI_SET_EQ6),
580MIRO_DOUBLE("Tone Control - 16 kHz", 0, ACI_GET_EQ7, ACI_SET_EQ7),
581};
582
583static struct snd_kcontrol_new snd_miro_radio_control[] = {
584MIRO_DOUBLE("Radio Playback Volume", 0, ACI_GET_LINE1, ACI_SET_LINE1),
585};
586
587static struct snd_kcontrol_new snd_miro_line_control[] = {
588MIRO_DOUBLE("Line Playback Volume", 2, ACI_GET_LINE1, ACI_SET_LINE1),
589};
590
591static struct snd_kcontrol_new snd_miro_preamp_control[] = {
592{
593 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
594 .name = "Mic Boost",
595 .index = 1,
596 .info = snd_miro_info_preamp,
597 .get = snd_miro_get_preamp,
598 .put = snd_miro_put_preamp,
599}};
600
601static struct snd_kcontrol_new snd_miro_amp_control[] = {
602{
603 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
604 .name = "Line Boost",
605 .index = 0,
606 .info = snd_miro_info_amp,
607 .get = snd_miro_get_amp,
608 .put = snd_miro_put_amp,
609}};
610
611static struct snd_kcontrol_new snd_miro_capture_control[] = {
612{
613 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
614 .name = "PCM Capture Switch",
615 .index = 0,
616 .info = snd_miro_info_capture,
617 .get = snd_miro_get_capture,
618 .put = snd_miro_put_capture,
619}};
620
621static unsigned char aci_init_values[][2] __initdata = {
622 { ACI_SET_MUTE, 0x00 },
623 { ACI_SET_POWERAMP, 0x00 },
624 { ACI_SET_PREAMP, 0x00 },
625 { ACI_SET_SOLOMODE, 0x00 },
626 { ACI_SET_MIC + 0, 0x20 },
627 { ACI_SET_MIC + 8, 0x20 },
628 { ACI_SET_LINE + 0, 0x20 },
629 { ACI_SET_LINE + 8, 0x20 },
630 { ACI_SET_CD + 0, 0x20 },
631 { ACI_SET_CD + 8, 0x20 },
632 { ACI_SET_PCM + 0, 0x20 },
633 { ACI_SET_PCM + 8, 0x20 },
634 { ACI_SET_LINE1 + 0, 0x20 },
635 { ACI_SET_LINE1 + 8, 0x20 },
636 { ACI_SET_LINE2 + 0, 0x20 },
637 { ACI_SET_LINE2 + 8, 0x20 },
638 { ACI_SET_SYNTH + 0, 0x20 },
639 { ACI_SET_SYNTH + 8, 0x20 },
640 { ACI_SET_MASTER + 0, 0x20 },
641 { ACI_SET_MASTER + 1, 0x20 },
642};
643
644static int __init snd_set_aci_init_values(struct snd_miro *miro)
645{
646 int idx, error;
647
648 /* enable WSS on PCM1 */
649
650 if ((miro->aci_product == 'A') && wss) {
651 if ((error = aci_setvalue(miro, ACI_SET_WSS, wss)) < 0) {
652 snd_printk(KERN_ERR "enabling WSS mode failed\n");
653 return error;
654 }
655 }
656
657 /* enable IDE port */
658
659 if (ide) {
660 if ((error = aci_setvalue(miro, ACI_SET_IDE, ide)) < 0) {
661 snd_printk(KERN_ERR "enabling IDE port failed\n");
662 return error;
663 }
664 }
665
666 /* set common aci values */
667
668 for (idx = 0; idx < ARRAY_SIZE(aci_init_values); idx++)
669 if ((error = aci_setvalue(miro, aci_init_values[idx][0],
670 aci_init_values[idx][1])) < 0) {
671 snd_printk(KERN_ERR "aci_setvalue(%d) failed: %d\n",
672 aci_init_values[idx][0], error);
673 return error;
674 }
675
676 miro->aci_amp = 0;
677 miro->aci_preamp = 0;
678 miro->aci_solomode = 1;
679
680 return 0;
681}
682
683static int snd_miro_mixer(struct snd_miro *miro)
684{
685 struct snd_card *card;
686 unsigned int idx;
687 int err;
688
689 snd_assert(miro != NULL && miro->card != NULL, return -EINVAL);
690
691 card = miro->card;
692
693 switch (miro->hardware) {
694 case OPTi9XX_HW_82C924:
695 strcpy(card->mixername, "ACI & OPTi924");
696 break;
697 case OPTi9XX_HW_82C929:
698 strcpy(card->mixername, "ACI & OPTi929");
699 break;
700 default:
701 snd_BUG();
702 break;
703 }
704
705 for (idx = 0; idx < ARRAY_SIZE(snd_miro_controls); idx++) {
706 if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_controls[idx], miro))) < 0)
707 return err;
708 }
709
710 if ((miro->aci_product == 'A') || (miro->aci_product == 'B')) {
711 /* PCM1/PCM12 with power-amp and Line 2 */
712 if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_line_control[0], miro))) < 0)
713 return err;
714 if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_amp_control[0], miro))) < 0)
715 return err;
716 }
717
718 if ((miro->aci_product == 'B') || (miro->aci_product == 'C')) {
719 /* PCM12/PCM20 with mic-preamp */
720 if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_preamp_control[0], miro))) < 0)
721 return err;
722 if (miro->aci_version >= 176)
723 if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_capture_control[0], miro))) < 0)
724 return err;
725 }
726
727 if (miro->aci_product == 'C') {
728 /* PCM20 with radio and 7 band equalizer */
729 if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_radio_control[0], miro))) < 0)
730 return err;
731 for (idx = 0; idx < ARRAY_SIZE(snd_miro_eq_controls); idx++) {
732 if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_miro_eq_controls[idx], miro))) < 0)
733 return err;
734 }
735 }
736
737 return 0;
738}
739
740static long snd_legacy_find_free_ioport(long *port_table, long size)
741{
742 while (*port_table != -1) {
743 struct resource *res;
744 if ((res = request_region(*port_table, size,
745 "ALSA test")) != NULL) {
746 release_and_free_resource(res);
747 return *port_table;
748 }
749 port_table++;
750 }
751 return -1;
752}
753
754static int __init snd_miro_init(struct snd_miro *chip, unsigned short hardware)
755{
756 static int opti9xx_mc_size[] = {7, 7, 10, 10, 2, 2, 2};
757
758 chip->hardware = hardware;
759 strcpy(chip->name, snd_opti9xx_names[hardware]);
760
761 chip->mc_base_size = opti9xx_mc_size[hardware];
762
763 spin_lock_init(&chip->lock);
764
765 chip->wss_base = -1;
766 chip->irq = -1;
767 chip->dma1 = -1;
768 chip->dma2 = -1;
769 chip->fm_port = -1;
770 chip->mpu_port = -1;
771 chip->mpu_irq = -1;
772
773 switch (hardware) {
774 case OPTi9XX_HW_82C929:
775 chip->mc_base = 0xf8c;
776 chip->password = 0xe3;
777 chip->pwd_reg = 3;
778 break;
779
780 case OPTi9XX_HW_82C924:
781 chip->mc_base = 0xf8c;
782 chip->password = 0xe5;
783 chip->pwd_reg = 3;
784 break;
785
786 default:
787 snd_printk(KERN_ERR "sorry, no support for %d\n", hardware);
788 return -ENODEV;
789 }
790
791 return 0;
792}
793
794static unsigned char snd_miro_read(struct snd_miro *chip,
795 unsigned char reg)
796{
797 unsigned long flags;
798 unsigned char retval = 0xff;
799
800 spin_lock_irqsave(&chip->lock, flags);
801 outb(chip->password, chip->mc_base + chip->pwd_reg);
802
803 switch (chip->hardware) {
804 case OPTi9XX_HW_82C924:
805 if (reg > 7) {
806 outb(reg, chip->mc_base + 8);
807 outb(chip->password, chip->mc_base + chip->pwd_reg);
808 retval = inb(chip->mc_base + 9);
809 break;
810 }
811
812 case OPTi9XX_HW_82C929:
813 retval = inb(chip->mc_base + reg);
814 break;
815
816 default:
817 snd_printk(KERN_ERR "sorry, no support for %d\n", chip->hardware);
818 }
819
820 spin_unlock_irqrestore(&chip->lock, flags);
821 return retval;
822}
823
824static void snd_miro_write(struct snd_miro *chip, unsigned char reg,
825 unsigned char value)
826{
827 unsigned long flags;
828
829 spin_lock_irqsave(&chip->lock, flags);
830 outb(chip->password, chip->mc_base + chip->pwd_reg);
831
832 switch (chip->hardware) {
833 case OPTi9XX_HW_82C924:
834 if (reg > 7) {
835 outb(reg, chip->mc_base + 8);
836 outb(chip->password, chip->mc_base + chip->pwd_reg);
837 outb(value, chip->mc_base + 9);
838 break;
839 }
840
841 case OPTi9XX_HW_82C929:
842 outb(value, chip->mc_base + reg);
843 break;
844
845 default:
846 snd_printk(KERN_ERR "sorry, no support for %d\n", chip->hardware);
847 }
848
849 spin_unlock_irqrestore(&chip->lock, flags);
850}
851
852
853#define snd_miro_write_mask(chip, reg, value, mask) \
854 snd_miro_write(chip, reg, \
855 (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask)))
856
857/*
858 * Proc Interface
859 */
860
861static void snd_miro_proc_read(struct snd_info_entry * entry,
862 struct snd_info_buffer *buffer)
863{
864 struct snd_miro *miro = (struct snd_miro *) entry->private_data;
865 char* model = "unknown";
866
867 /* miroSOUND PCM1 pro, early PCM12 */
868
869 if ((miro->hardware == OPTi9XX_HW_82C929) &&
870 (miro->aci_vendor == 'm') &&
871 (miro->aci_product == 'A')) {
872 switch(miro->aci_version) {
873 case 3:
874 model = "miroSOUND PCM1 pro";
875 break;
876 default:
877 model = "miroSOUND PCM1 pro / (early) PCM12";
878 break;
879 }
880 }
881
882 /* miroSOUND PCM12, PCM12 (Rev. E), PCM12 pnp */
883
884 if ((miro->hardware == OPTi9XX_HW_82C924) &&
885 (miro->aci_vendor == 'm') &&
886 (miro->aci_product == 'B')) {
887 switch(miro->aci_version) {
888 case 4:
889 model = "miroSOUND PCM12";
890 break;
891 case 176:
892 model = "miroSOUND PCM12 (Rev. E)";
893 break;
894 default:
895 model = "miroSOUND PCM12 / PCM12 pnp";
896 break;
897 }
898 }
899
900 /* miroSOUND PCM20 radio */
901
902 if ((miro->hardware == OPTi9XX_HW_82C924) &&
903 (miro->aci_vendor == 'm') &&
904 (miro->aci_product == 'C')) {
905 switch(miro->aci_version) {
906 case 7:
907 model = "miroSOUND PCM20 radio (Rev. E)";
908 break;
909 default:
910 model = "miroSOUND PCM20 radio";
911 break;
912 }
913 }
914
915 snd_iprintf(buffer, "\nGeneral information:\n");
916 snd_iprintf(buffer, " model : %s\n", model);
917 snd_iprintf(buffer, " opti : %s\n", miro->name);
918 snd_iprintf(buffer, " codec : %s\n", miro->pcm->name);
919 snd_iprintf(buffer, " port : 0x%lx\n", miro->wss_base);
920 snd_iprintf(buffer, " irq : %d\n", miro->irq);
921 snd_iprintf(buffer, " dma : %d,%d\n\n", miro->dma1, miro->dma2);
922
923 snd_iprintf(buffer, "MPU-401:\n");
924 snd_iprintf(buffer, " port : 0x%lx\n", miro->mpu_port);
925 snd_iprintf(buffer, " irq : %d\n\n", miro->mpu_irq);
926
927 snd_iprintf(buffer, "ACI information:\n");
928 snd_iprintf(buffer, " vendor : ");
929 switch(miro->aci_vendor) {
930 case 'm':
931 snd_iprintf(buffer, "Miro\n");
932 break;
933 default:
934 snd_iprintf(buffer, "unknown (0x%x)\n", miro->aci_vendor);
935 break;
936 }
937
938 snd_iprintf(buffer, " product : ");
939 switch(miro->aci_product) {
940 case 'A':
941 snd_iprintf(buffer, "miroSOUND PCM1 pro / (early) PCM12\n");
942 break;
943 case 'B':
944 snd_iprintf(buffer, "miroSOUND PCM12\n");
945 break;
946 case 'C':
947 snd_iprintf(buffer, "miroSOUND PCM20 radio\n");
948 break;
949 default:
950 snd_iprintf(buffer, "unknown (0x%x)\n", miro->aci_product);
951 break;
952 }
953
954 snd_iprintf(buffer, " firmware: %d (0x%x)\n",
955 miro->aci_version, miro->aci_version);
956 snd_iprintf(buffer, " port : 0x%lx-0x%lx\n",
957 miro->aci_port, miro->aci_port+2);
958 snd_iprintf(buffer, " wss : 0x%x\n", wss);
959 snd_iprintf(buffer, " ide : 0x%x\n", ide);
960 snd_iprintf(buffer, " solomode: 0x%x\n", miro->aci_solomode);
961 snd_iprintf(buffer, " amp : 0x%x\n", miro->aci_amp);
962 snd_iprintf(buffer, " preamp : 0x%x\n", miro->aci_preamp);
963}
964
965static void __init snd_miro_proc_init(struct snd_miro * miro)
966{
967 struct snd_info_entry *entry;
968
969 if (! snd_card_proc_new(miro->card, "miro", &entry))
970 snd_info_set_text_ops(entry, miro, 1024, snd_miro_proc_read);
971}
972
973/*
974 * Init
975 */
976
977static int __init snd_miro_configure(struct snd_miro *chip)
978{
979 unsigned char wss_base_bits;
980 unsigned char irq_bits;
981 unsigned char dma_bits;
982 unsigned char mpu_port_bits = 0;
983 unsigned char mpu_irq_bits;
984 unsigned long flags;
985
986 switch (chip->hardware) {
987 case OPTi9XX_HW_82C924:
988 snd_miro_write_mask(chip, OPTi9XX_MC_REG(6), 0x02, 0x02);
989 snd_miro_write_mask(chip, OPTi9XX_MC_REG(1), 0x80, 0x80);
990 snd_miro_write_mask(chip, OPTi9XX_MC_REG(2), 0x20, 0x20); /* OPL4 */
991 snd_miro_write_mask(chip, OPTi9XX_MC_REG(3), 0xf0, 0xff);
992 snd_miro_write_mask(chip, OPTi9XX_MC_REG(5), 0x02, 0x02);
993 break;
994 case OPTi9XX_HW_82C929:
995 /* untested init commands for OPTi929 */
996 snd_miro_write_mask(chip, OPTi9XX_MC_REG(1), 0x80, 0x80);
997 snd_miro_write_mask(chip, OPTi9XX_MC_REG(2), 0x20, 0x20); /* OPL4 */
998 snd_miro_write_mask(chip, OPTi9XX_MC_REG(4), 0x00, 0x0c);
999 snd_miro_write_mask(chip, OPTi9XX_MC_REG(5), 0x02, 0x02);
1000 break;
1001 default:
1002 snd_printk(KERN_ERR "chip %d not supported\n", chip->hardware);
1003 return -EINVAL;
1004 }
1005
1006 switch (chip->wss_base) {
1007 case 0x530:
1008 wss_base_bits = 0x00;
1009 break;
1010 case 0x604:
1011 wss_base_bits = 0x03;
1012 break;
1013 case 0xe80:
1014 wss_base_bits = 0x01;
1015 break;
1016 case 0xf40:
1017 wss_base_bits = 0x02;
1018 break;
1019 default:
1020 snd_printk(KERN_ERR "WSS port 0x%lx not valid\n", chip->wss_base);
1021 goto __skip_base;
1022 }
1023 snd_miro_write_mask(chip, OPTi9XX_MC_REG(1), wss_base_bits << 4, 0x30);
1024
1025__skip_base:
1026 switch (chip->irq) {
1027 case 5:
1028 irq_bits = 0x05;
1029 break;
1030 case 7:
1031 irq_bits = 0x01;
1032 break;
1033 case 9:
1034 irq_bits = 0x02;
1035 break;
1036 case 10:
1037 irq_bits = 0x03;
1038 break;
1039 case 11:
1040 irq_bits = 0x04;
1041 break;
1042 default:
1043 snd_printk(KERN_ERR "WSS irq # %d not valid\n", chip->irq);
1044 goto __skip_resources;
1045 }
1046
1047 switch (chip->dma1) {
1048 case 0:
1049 dma_bits = 0x01;
1050 break;
1051 case 1:
1052 dma_bits = 0x02;
1053 break;
1054 case 3:
1055 dma_bits = 0x03;
1056 break;
1057 default:
1058 snd_printk(KERN_ERR "WSS dma1 # %d not valid\n", chip->dma1);
1059 goto __skip_resources;
1060 }
1061
1062 if (chip->dma1 == chip->dma2) {
1063 snd_printk(KERN_ERR "don't want to share dmas\n");
1064 return -EBUSY;
1065 }
1066
1067 switch (chip->dma2) {
1068 case 0:
1069 case 1:
1070 break;
1071 default:
1072 snd_printk(KERN_ERR "WSS dma2 # %d not valid\n", chip->dma2);
1073 goto __skip_resources;
1074 }
1075 dma_bits |= 0x04;
1076
1077 spin_lock_irqsave(&chip->lock, flags);
1078 outb(irq_bits << 3 | dma_bits, chip->wss_base);
1079 spin_unlock_irqrestore(&chip->lock, flags);
1080
1081__skip_resources:
1082 if (chip->hardware > OPTi9XX_HW_82C928) {
1083 switch (chip->mpu_port) {
1084 case 0:
1085 case -1:
1086 break;
1087 case 0x300:
1088 mpu_port_bits = 0x03;
1089 break;
1090 case 0x310:
1091 mpu_port_bits = 0x02;
1092 break;
1093 case 0x320:
1094 mpu_port_bits = 0x01;
1095 break;
1096 case 0x330:
1097 mpu_port_bits = 0x00;
1098 break;
1099 default:
1100 snd_printk(KERN_ERR "MPU-401 port 0x%lx not valid\n",
1101 chip->mpu_port);
1102 goto __skip_mpu;
1103 }
1104
1105 switch (chip->mpu_irq) {
1106 case 5:
1107 mpu_irq_bits = 0x02;
1108 break;
1109 case 7:
1110 mpu_irq_bits = 0x03;
1111 break;
1112 case 9:
1113 mpu_irq_bits = 0x00;
1114 break;
1115 case 10:
1116 mpu_irq_bits = 0x01;
1117 break;
1118 default:
1119 snd_printk(KERN_ERR "MPU-401 irq # %d not valid\n",
1120 chip->mpu_irq);
1121 goto __skip_mpu;
1122 }
1123
1124 snd_miro_write_mask(chip, OPTi9XX_MC_REG(6),
1125 (chip->mpu_port <= 0) ? 0x00 :
1126 0x80 | mpu_port_bits << 5 | mpu_irq_bits << 3,
1127 0xf8);
1128 }
1129__skip_mpu:
1130
1131 return 0;
1132}
1133
1134static int __init snd_card_miro_detect(struct snd_card *card, struct snd_miro *chip)
1135{
1136 int i, err;
1137 unsigned char value;
1138
1139 for (i = OPTi9XX_HW_82C929; i <= OPTi9XX_HW_82C924; i++) {
1140
1141 if ((err = snd_miro_init(chip, i)) < 0)
1142 return err;
1143
1144 if ((chip->res_mc_base = request_region(chip->mc_base, chip->mc_base_size, "OPTi9xx MC")) == NULL)
1145 continue;
1146
1147 value = snd_miro_read(chip, OPTi9XX_MC_REG(1));
1148 if ((value != 0xff) && (value != inb(chip->mc_base + 1)))
1149 if (value == snd_miro_read(chip, OPTi9XX_MC_REG(1)))
1150 return 1;
1151
1152 release_and_free_resource(chip->res_mc_base);
1153 chip->res_mc_base = NULL;
1154
1155 }
1156
1157 return -ENODEV;
1158}
1159
1160static int __init snd_card_miro_aci_detect(struct snd_card *card, struct snd_miro * miro)
1161{
1162 unsigned char regval;
1163 int i;
1164
1165 mutex_init(&miro->aci_mutex);
1166
1167 /* get ACI port from OPTi9xx MC 4 */
1168
1169 miro->mc_base = 0xf8c;
1170 regval=inb(miro->mc_base + 4);
1171 miro->aci_port = (regval & 0x10) ? 0x344: 0x354;
1172
1173 if ((miro->res_aci_port = request_region(miro->aci_port, 3, "miro aci")) == NULL) {
1174 snd_printk(KERN_ERR "aci i/o area 0x%lx-0x%lx already used.\n",
1175 miro->aci_port, miro->aci_port+2);
1176 return -ENOMEM;
1177 }
1178
1179 /* force ACI into a known state */
1180 for (i = 0; i < 3; i++)
1181 if (aci_cmd(miro, ACI_ERROR_OP, -1, -1) < 0) {
1182 snd_card_free(card);
1183 snd_printk(KERN_ERR "can't force aci into known state.\n");
1184 return -ENXIO;
1185 }
1186
1187 if ((miro->aci_vendor=aci_cmd(miro, ACI_READ_IDCODE, -1, -1)) < 0 ||
1188 (miro->aci_product=aci_cmd(miro, ACI_READ_IDCODE, -1, -1)) < 0) {
1189 snd_card_free(card);
1190 snd_printk(KERN_ERR "can't read aci id on 0x%lx.\n", miro->aci_port);
1191 return -ENXIO;
1192 }
1193
1194 if ((miro->aci_version=aci_cmd(miro, ACI_READ_VERSION, -1, -1)) < 0) {
1195 snd_card_free(card);
1196 snd_printk(KERN_ERR "can't read aci version on 0x%lx.\n",
1197 miro->aci_port);
1198 return -ENXIO;
1199 }
1200
1201 if (aci_cmd(miro, ACI_INIT, -1, -1) < 0 ||
1202 aci_cmd(miro, ACI_ERROR_OP, ACI_ERROR_OP, ACI_ERROR_OP) < 0 ||
1203 aci_cmd(miro, ACI_ERROR_OP, ACI_ERROR_OP, ACI_ERROR_OP) < 0) {
1204 snd_printk(KERN_ERR "can't initialize aci.\n");
1205 return -ENXIO;
1206 }
1207
1208 return 0;
1209}
1210
1211static void snd_card_miro_free(struct snd_card *card)
1212{
1213 struct snd_miro *miro = card->private_data;
1214
1215 release_and_free_resource(miro->res_aci_port);
1216 release_and_free_resource(miro->res_mc_base);
1217}
1218
1219static int __init snd_miro_probe(struct platform_device *devptr)
1220{
1221 static long possible_ports[] = {0x530, 0xe80, 0xf40, 0x604, -1};
1222 static long possible_mpu_ports[] = {0x330, 0x300, 0x310, 0x320, -1};
1223 static int possible_irqs[] = {11, 9, 10, 7, -1};
1224 static int possible_mpu_irqs[] = {10, 5, 9, 7, -1};
1225 static int possible_dma1s[] = {3, 1, 0, -1};
1226 static int possible_dma2s[][2] = {{1,-1}, {0,-1}, {-1,-1}, {0,-1}};
1227
1228 int error;
1229 struct snd_miro *miro;
1230 struct snd_cs4231 *codec;
1231 struct snd_timer *timer;
1232 struct snd_card *card;
1233 struct snd_pcm *pcm;
1234 struct snd_rawmidi *rmidi;
1235
1236 if (!(card = snd_card_new(index, id, THIS_MODULE,
1237 sizeof(struct snd_miro))))
1238 return -ENOMEM;
1239
1240 card->private_free = snd_card_miro_free;
1241 miro = card->private_data;
1242 miro->card = card;
1243
1244 if ((error = snd_card_miro_aci_detect(card, miro)) < 0) {
1245 snd_card_free(card);
1246 snd_printk(KERN_ERR "unable to detect aci chip\n");
1247 return -ENODEV;
1248 }
1249
1250 /* init proc interface */
1251 snd_miro_proc_init(miro);
1252
1253 if ((error = snd_card_miro_detect(card, miro)) < 0) {
1254 snd_card_free(card);
1255 snd_printk(KERN_ERR "unable to detect OPTi9xx chip\n");
1256 return -ENODEV;
1257 }
1258
1259 if (! miro->res_mc_base &&
1260 (miro->res_mc_base = request_region(miro->mc_base, miro->mc_base_size,
1261 "miro (OPTi9xx MC)")) == NULL) {
1262 snd_card_free(card);
1263 snd_printk(KERN_ERR "request for OPTI9xx MC failed\n");
1264 return -ENOMEM;
1265 }
1266
1267 miro->wss_base = port;
1268 miro->fm_port = fm_port;
1269 miro->mpu_port = mpu_port;
1270 miro->irq = irq;
1271 miro->mpu_irq = mpu_irq;
1272 miro->dma1 = dma1;
1273 miro->dma2 = dma2;
1274
1275 if (miro->wss_base == SNDRV_AUTO_PORT) {
1276 if ((miro->wss_base = snd_legacy_find_free_ioport(possible_ports, 4)) < 0) {
1277 snd_card_free(card);
1278 snd_printk(KERN_ERR "unable to find a free WSS port\n");
1279 return -EBUSY;
1280 }
1281 }
1282
1283 if (miro->mpu_port == SNDRV_AUTO_PORT) {
1284 if ((miro->mpu_port = snd_legacy_find_free_ioport(possible_mpu_ports, 2)) < 0) {
1285 snd_card_free(card);
1286 snd_printk(KERN_ERR "unable to find a free MPU401 port\n");
1287 return -EBUSY;
1288 }
1289 }
1290 if (miro->irq == SNDRV_AUTO_IRQ) {
1291 if ((miro->irq = snd_legacy_find_free_irq(possible_irqs)) < 0) {
1292 snd_card_free(card);
1293 snd_printk(KERN_ERR "unable to find a free IRQ\n");
1294 return -EBUSY;
1295 }
1296 }
1297 if (miro->mpu_irq == SNDRV_AUTO_IRQ) {
1298 if ((miro->mpu_irq = snd_legacy_find_free_irq(possible_mpu_irqs)) < 0) {
1299 snd_card_free(card);
1300 snd_printk(KERN_ERR "unable to find a free MPU401 IRQ\n");
1301 return -EBUSY;
1302 }
1303 }
1304 if (miro->dma1 == SNDRV_AUTO_DMA) {
1305 if ((miro->dma1 = snd_legacy_find_free_dma(possible_dma1s)) < 0) {
1306 snd_card_free(card);
1307 snd_printk(KERN_ERR "unable to find a free DMA1\n");
1308 return -EBUSY;
1309 }
1310 }
1311 if (miro->dma2 == SNDRV_AUTO_DMA) {
1312 if ((miro->dma2 = snd_legacy_find_free_dma(possible_dma2s[miro->dma1 % 4])) < 0) {
1313 snd_card_free(card);
1314 snd_printk(KERN_ERR "unable to find a free DMA2\n");
1315 return -EBUSY;
1316 }
1317 }
1318
1319 if ((error = snd_miro_configure(miro))) {
1320 snd_card_free(card);
1321 return error;
1322 }
1323
1324 if ((error = snd_cs4231_create(card, miro->wss_base + 4, -1,
1325 miro->irq, miro->dma1, miro->dma2,
1326 CS4231_HW_AD1845,
1327 0,
1328 &codec)) < 0) {
1329 snd_card_free(card);
1330 return error;
1331 }
1332
1333 if ((error = snd_cs4231_pcm(codec, 0, &pcm)) < 0) {
1334 snd_card_free(card);
1335 return error;
1336 }
1337 if ((error = snd_cs4231_mixer(codec)) < 0) {
1338 snd_card_free(card);
1339 return error;
1340 }
1341 if ((error = snd_cs4231_timer(codec, 0, &timer)) < 0) {
1342 snd_card_free(card);
1343 return error;
1344 }
1345
1346 miro->pcm = pcm;
1347
1348 if ((error = snd_miro_mixer(miro)) < 0) {
1349 snd_card_free(card);
1350 return error;
1351 }
1352
1353 if (miro->aci_vendor == 'm') {
1354 /* It looks like a miro sound card. */
1355 switch (miro->aci_product) {
1356 case 'A':
1357 sprintf(card->shortname,
1358 "miroSOUND PCM1 pro / PCM12");
1359 break;
1360 case 'B':
1361 sprintf(card->shortname,
1362 "miroSOUND PCM12");
1363 break;
1364 case 'C':
1365 sprintf(card->shortname,
1366 "miroSOUND PCM20 radio");
1367 break;
1368 default:
1369 sprintf(card->shortname,
1370 "unknown miro");
1371 snd_printk(KERN_INFO "unknown miro aci id\n");
1372 break;
1373 }
1374 } else {
1375 snd_printk(KERN_INFO "found unsupported aci card\n");
1376 sprintf(card->shortname, "unknown Cardinal Technologies");
1377 }
1378
1379 strcpy(card->driver, "miro");
1380 sprintf(card->longname, "%s: OPTi%s, %s at 0x%lx, irq %d, dma %d&%d",
1381 card->shortname, miro->name, pcm->name, miro->wss_base + 4,
1382 miro->irq, miro->dma1, miro->dma2);
1383
1384 if (miro->mpu_port <= 0 || miro->mpu_port == SNDRV_AUTO_PORT)
1385 rmidi = NULL;
1386 else
1387 if ((error = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401,
1388 miro->mpu_port, 0, miro->mpu_irq, SA_INTERRUPT,
1389 &rmidi)))
1390 snd_printk(KERN_WARNING "no MPU-401 device at 0x%lx?\n", miro->mpu_port);
1391
1392 if (miro->fm_port > 0 && miro->fm_port != SNDRV_AUTO_PORT) {
1393 struct snd_opl3 *opl3 = NULL;
1394 struct snd_opl4 *opl4;
1395 if (snd_opl4_create(card, miro->fm_port, miro->fm_port - 8,
1396 2, &opl3, &opl4) < 0)
1397 snd_printk(KERN_WARNING "no OPL4 device at 0x%lx\n", miro->fm_port);
1398 }
1399
1400 if ((error = snd_set_aci_init_values(miro)) < 0) {
1401 snd_card_free(card);
1402 return error;
1403 }
1404
1405 snd_card_set_dev(card, &devptr->dev);
1406
1407 if ((error = snd_card_register(card))) {
1408 snd_card_free(card);
1409 return error;
1410 }
1411
1412 platform_set_drvdata(devptr, card);
1413 return 0;
1414}
1415
1416static int __devexit snd_miro_remove(struct platform_device *devptr)
1417{
1418 snd_card_free(platform_get_drvdata(devptr));
1419 platform_set_drvdata(devptr, NULL);
1420 return 0;
1421}
1422
1423static struct platform_driver snd_miro_driver = {
1424 .probe = snd_miro_probe,
1425 .remove = __devexit_p(snd_miro_remove),
1426 /* FIXME: suspend/resume */
1427 .driver = {
1428 .name = DRIVER_NAME
1429 },
1430};
1431
1432static int __init alsa_card_miro_init(void)
1433{
1434 int error;
1435
1436 if ((error = platform_driver_register(&snd_miro_driver)) < 0)
1437 return error;
1438 device = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
1439 if (! IS_ERR(device))
1440 return 0;
1441#ifdef MODULE
1442 printk(KERN_ERR "no miro soundcard found\n");
1443#endif
1444 platform_driver_unregister(&snd_miro_driver);
1445 return PTR_ERR(device);
1446}
1447
1448static void __exit alsa_card_miro_exit(void)
1449{
1450 platform_device_unregister(device);
1451 platform_driver_unregister(&snd_miro_driver);
1452}
1453
1454module_init(alsa_card_miro_init)
1455module_exit(alsa_card_miro_exit)
diff --git a/sound/isa/opti9xx/miro.h b/sound/isa/opti9xx/miro.h
new file mode 100644
index 000000000000..6e1385b8e07e
--- /dev/null
+++ b/sound/isa/opti9xx/miro.h
@@ -0,0 +1,73 @@
1#ifndef _MIRO_H_
2#define _MIRO_H_
3
4#define ACI_REG_COMMAND 0 /* write register offset */
5#define ACI_REG_STATUS 1 /* read register offset */
6#define ACI_REG_BUSY 2 /* busy register offset */
7#define ACI_REG_RDS 2 /* PCM20: RDS register offset */
8#define ACI_MINTIME 500 /* ACI time out limit */
9
10#define ACI_SET_MUTE 0x0d
11#define ACI_SET_POWERAMP 0x0f
12#define ACI_SET_TUNERMUTE 0xa3
13#define ACI_SET_TUNERMONO 0xa4
14#define ACI_SET_IDE 0xd0
15#define ACI_SET_WSS 0xd1
16#define ACI_SET_SOLOMODE 0xd2
17#define ACI_SET_PREAMP 0x03
18#define ACI_GET_PREAMP 0x21
19#define ACI_WRITE_TUNE 0xa7
20#define ACI_READ_TUNERSTEREO 0xa8
21#define ACI_READ_TUNERSTATION 0xa9
22#define ACI_READ_VERSION 0xf1
23#define ACI_READ_IDCODE 0xf2
24#define ACI_INIT 0xff
25#define ACI_STATUS 0xf0
26#define ACI_S_GENERAL 0x00
27#define ACI_ERROR_OP 0xdf
28
29/* ACI Mixer */
30
31/* These are the values for the right channel GET registers.
32 Add an offset of 0x01 for the left channel register.
33 (left=right+0x01) */
34
35#define ACI_GET_MASTER 0x03
36#define ACI_GET_MIC 0x05
37#define ACI_GET_LINE 0x07
38#define ACI_GET_CD 0x09
39#define ACI_GET_SYNTH 0x0b
40#define ACI_GET_PCM 0x0d
41#define ACI_GET_LINE1 0x10 /* Radio on PCM20 */
42#define ACI_GET_LINE2 0x12
43
44#define ACI_GET_EQ1 0x22 /* from Bass ... */
45#define ACI_GET_EQ2 0x24
46#define ACI_GET_EQ3 0x26
47#define ACI_GET_EQ4 0x28
48#define ACI_GET_EQ5 0x2a
49#define ACI_GET_EQ6 0x2c
50#define ACI_GET_EQ7 0x2e /* ... to Treble */
51
52/* And these are the values for the right channel SET registers.
53 For left channel access you have to add an offset of 0x08.
54 MASTER is an exception, which needs an offset of 0x01 */
55
56#define ACI_SET_MASTER 0x00
57#define ACI_SET_MIC 0x30
58#define ACI_SET_LINE 0x31
59#define ACI_SET_CD 0x34
60#define ACI_SET_SYNTH 0x33
61#define ACI_SET_PCM 0x32
62#define ACI_SET_LINE1 0x35 /* Radio on PCM20 */
63#define ACI_SET_LINE2 0x36
64
65#define ACI_SET_EQ1 0x40 /* from Bass ... */
66#define ACI_SET_EQ2 0x41
67#define ACI_SET_EQ3 0x42
68#define ACI_SET_EQ4 0x43
69#define ACI_SET_EQ5 0x44
70#define ACI_SET_EQ6 0x45
71#define ACI_SET_EQ7 0x46 /* ... to Treble */
72
73#endif /* _MIRO_H_ */
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 1e2e19305e38..a2081803a827 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -15,6 +15,18 @@ config SND_AD1889
15 To compile this as a module, choose M here: the module 15 To compile this as a module, choose M here: the module
16 will be called snd-ad1889. 16 will be called snd-ad1889.
17 17
18config SND_ALS300
19 tristate "Avance Logic ALS300/ALS300+"
20 depends on SND
21 select SND_PCM
22 select SND_AC97_CODEC
23 select SND_OPL3_LIB
24 help
25 Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+
26
27 To compile this driver as a module, choose M here: the module
28 will be called snd-als300
29
18config SND_ALS4000 30config SND_ALS4000
19 tristate "Avance Logic ALS4000" 31 tristate "Avance Logic ALS4000"
20 depends on SND && ISA_DMA_API 32 depends on SND && ISA_DMA_API
@@ -195,8 +207,9 @@ config SND_CS46XX
195 will be called snd-cs46xx. 207 will be called snd-cs46xx.
196 208
197config SND_CS46XX_NEW_DSP 209config SND_CS46XX_NEW_DSP
198 bool "Cirrus Logic (Sound Fusion) New DSP support (EXPERIMENTAL)" 210 bool "Cirrus Logic (Sound Fusion) New DSP support"
199 depends on SND_CS46XX && EXPERIMENTAL 211 depends on SND_CS46XX
212 default y
200 help 213 help
201 Say Y here to use a new DSP image for SPDIF and dual codecs. 214 Say Y here to use a new DSP image for SPDIF and dual codecs.
202 215
@@ -466,6 +479,19 @@ config SND_PCXHR
466 To compile this driver as a module, choose M here: the module 479 To compile this driver as a module, choose M here: the module
467 will be called snd-pcxhr. 480 will be called snd-pcxhr.
468 481
482config SND_RIPTIDE
483 tristate "Conexant Riptide"
484 depends on SND
485 depends on FW_LOADER
486 select SND_OPL3_LIB
487 select SND_MPU401_UART
488 select SND_AC97_CODEC
489 help
490 Say 'Y' or 'M' to include support for Conexant Riptide chip.
491
492 To compile this driver as a module, choose M here: the module
493 will be called snd-riptide
494
469config SND_RME32 495config SND_RME32
470 tristate "RME Digi32, 32/8, 32 PRO" 496 tristate "RME Digi32, 32/8, 32 PRO"
471 depends on SND 497 depends on SND
diff --git a/sound/pci/Makefile b/sound/pci/Makefile
index a6c3cd58fe94..cba5105aafea 100644
--- a/sound/pci/Makefile
+++ b/sound/pci/Makefile
@@ -4,6 +4,7 @@
4# 4#
5 5
6snd-ad1889-objs := ad1889.o 6snd-ad1889-objs := ad1889.o
7snd-als300-objs := als300.o
7snd-als4000-objs := als4000.o 8snd-als4000-objs := als4000.o
8snd-atiixp-objs := atiixp.o 9snd-atiixp-objs := atiixp.o
9snd-atiixp-modem-objs := atiixp_modem.o 10snd-atiixp-modem-objs := atiixp_modem.o
@@ -27,6 +28,7 @@ snd-via82xx-modem-objs := via82xx_modem.o
27 28
28# Toplevel Module Dependency 29# Toplevel Module Dependency
29obj-$(CONFIG_SND_AD1889) += snd-ad1889.o 30obj-$(CONFIG_SND_AD1889) += snd-ad1889.o
31obj-$(CONFIG_SND_ALS300) += snd-als300.o
30obj-$(CONFIG_SND_ALS4000) += snd-als4000.o 32obj-$(CONFIG_SND_ALS4000) += snd-als4000.o
31obj-$(CONFIG_SND_ATIIXP) += snd-atiixp.o 33obj-$(CONFIG_SND_ATIIXP) += snd-atiixp.o
32obj-$(CONFIG_SND_ATIIXP_MODEM) += snd-atiixp-modem.o 34obj-$(CONFIG_SND_ATIIXP_MODEM) += snd-atiixp-modem.o
@@ -62,6 +64,7 @@ obj-$(CONFIG_SND) += \
62 mixart/ \ 64 mixart/ \
63 nm256/ \ 65 nm256/ \
64 pcxhr/ \ 66 pcxhr/ \
67 riptide/ \
65 rme9652/ \ 68 rme9652/ \
66 trident/ \ 69 trident/ \
67 ymfpci/ \ 70 ymfpci/ \
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
new file mode 100644
index 000000000000..37b80570a5c6
--- /dev/null
+++ b/sound/pci/als300.c
@@ -0,0 +1,866 @@
1/*
2 * als300.c - driver for Avance Logic ALS300/ALS300+ soundcards.
3 * Copyright (C) 2005 by Ash Willis <ashwillis@programmer.net>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * TODO
20 * 4 channel playback for ALS300+
21 * gameport
22 * mpu401
23 * opl3
24 *
25 * NOTES
26 * The BLOCK_COUNTER registers for the ALS300(+) return a figure related to
27 * the position in the current period, NOT the whole buffer. It is important
28 * to know which period we are in so we can calculate the correct pointer.
29 * This is why we always use 2 periods. We can then use a flip-flop variable
30 * to keep track of what period we are in.
31 */
32
33#include <sound/driver.h>
34#include <linux/delay.h>
35#include <linux/init.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
38#include <linux/interrupt.h>
39#include <linux/slab.h>
40
41#include <asm/io.h>
42
43#include <sound/core.h>
44#include <sound/control.h>
45#include <sound/initval.h>
46#include <sound/pcm.h>
47#include <sound/pcm_params.h>
48#include <sound/ac97_codec.h>
49#include <sound/opl3.h>
50
51/* snd_als300_set_irq_flag */
52#define IRQ_DISABLE 0
53#define IRQ_ENABLE 1
54
55/* I/O port layout */
56#define AC97_ACCESS 0x00
57#define AC97_READ 0x04
58#define AC97_STATUS 0x06
59#define AC97_DATA_AVAIL (1<<6)
60#define AC97_BUSY (1<<7)
61#define ALS300_IRQ_STATUS 0x07 /* ALS300 Only */
62#define IRQ_PLAYBACK (1<<3)
63#define IRQ_CAPTURE (1<<2)
64#define GCR_DATA 0x08
65#define GCR_INDEX 0x0C
66#define ALS300P_DRAM_IRQ_STATUS 0x0D /* ALS300+ Only */
67#define MPU_IRQ_STATUS 0x0E /* ALS300 Rev. E+, ALS300+ */
68#define ALS300P_IRQ_STATUS 0x0F /* ALS300+ Only */
69
70/* General Control Registers */
71#define PLAYBACK_START 0x80
72#define PLAYBACK_END 0x81
73#define PLAYBACK_CONTROL 0x82
74#define TRANSFER_START (1<<16)
75#define FIFO_PAUSE (1<<17)
76#define RECORD_START 0x83
77#define RECORD_END 0x84
78#define RECORD_CONTROL 0x85
79#define DRAM_WRITE_CONTROL 0x8B
80#define WRITE_TRANS_START (1<<16)
81#define DRAM_MODE_2 (1<<17)
82#define MISC_CONTROL 0x8C
83#define IRQ_SET_BIT (1<<15)
84#define VMUTE_NORMAL (1<<20)
85#define MMUTE_NORMAL (1<<21)
86#define MUS_VOC_VOL 0x8E
87#define PLAYBACK_BLOCK_COUNTER 0x9A
88#define RECORD_BLOCK_COUNTER 0x9B
89
90#define DEBUG_CALLS 1
91#define DEBUG_PLAY_REC 1
92
93#if DEBUG_CALLS
94#define snd_als300_dbgcalls(format, args...) printk(format, ##args)
95#define snd_als300_dbgcallenter() printk(KERN_ERR "--> %s\n", __FUNCTION__)
96#define snd_als300_dbgcallleave() printk(KERN_ERR "<-- %s\n", __FUNCTION__)
97#else
98#define snd_als300_dbgcalls(format, args...)
99#define snd_als300_dbgcallenter()
100#define snd_als300_dbgcallleave()
101#endif
102
103#if DEBUG_PLAY_REC
104#define snd_als300_dbgplay(format, args...) printk(KERN_ERR format, ##args)
105#else
106#define snd_als300_dbgplay(format, args...)
107#endif
108
109enum {DEVICE_ALS300, DEVICE_ALS300_PLUS};
110
111MODULE_AUTHOR("Ash Willis <ashwillis@programmer.net>");
112MODULE_DESCRIPTION("Avance Logic ALS300");
113MODULE_LICENSE("GPL");
114MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS300},{Avance Logic,ALS300+}}");
115
116static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
117static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
118static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
119
120struct snd_als300 {
121 unsigned long port;
122 spinlock_t reg_lock;
123 struct snd_card *card;
124 struct pci_dev *pci;
125
126 struct snd_pcm *pcm;
127 struct snd_pcm_substream *playback_substream;
128 struct snd_pcm_substream *capture_substream;
129
130 struct snd_ac97 *ac97;
131 struct snd_opl3 *opl3;
132
133 struct resource *res_port;
134
135 int irq;
136
137 int chip_type; /* ALS300 or ALS300+ */
138
139 char revision;
140};
141
142struct snd_als300_substream_data {
143 int period_flipflop;
144 int control_register;
145 int block_counter_register;
146};
147
148static struct pci_device_id snd_als300_ids[] = {
149 { 0x4005, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALS300 },
150 { 0x4005, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALS300_PLUS },
151 { 0, }
152};
153
154MODULE_DEVICE_TABLE(pci, snd_als300_ids);
155
156static inline u32 snd_als300_gcr_read(unsigned long port, unsigned short reg)
157{
158 outb(reg, port+GCR_INDEX);
159 return inl(port+GCR_DATA);
160}
161
162static inline void snd_als300_gcr_write(unsigned long port,
163 unsigned short reg, u32 val)
164{
165 outb(reg, port+GCR_INDEX);
166 outl(val, port+GCR_DATA);
167}
168
169/* Enable/Disable Interrupts */
170static void snd_als300_set_irq_flag(struct snd_als300 *chip, int cmd)
171{
172 u32 tmp = snd_als300_gcr_read(chip->port, MISC_CONTROL);
173 snd_als300_dbgcallenter();
174
175 /* boolean XOR check, since old vs. new hardware have
176 directly reversed bit setting for ENABLE and DISABLE.
177 ALS300+ acts like newer versions of ALS300 */
178 if (((chip->revision > 5 || chip->chip_type == DEVICE_ALS300_PLUS) ^
179 (cmd == IRQ_ENABLE)) == 0)
180 tmp |= IRQ_SET_BIT;
181 else
182 tmp &= ~IRQ_SET_BIT;
183 snd_als300_gcr_write(chip->port, MISC_CONTROL, tmp);
184 snd_als300_dbgcallleave();
185}
186
187static int snd_als300_free(struct snd_als300 *chip)
188{
189 snd_als300_dbgcallenter();
190 snd_als300_set_irq_flag(chip, IRQ_DISABLE);
191 if (chip->irq >= 0)
192 free_irq(chip->irq, (void *)chip);
193 pci_release_regions(chip->pci);
194 pci_disable_device(chip->pci);
195 kfree(chip);
196 snd_als300_dbgcallleave();
197 return 0;
198}
199
200static int snd_als300_dev_free(struct snd_device *device)
201{
202 struct snd_als300 *chip = device->device_data;
203 return snd_als300_free(chip);
204}
205
206static irqreturn_t snd_als300_interrupt(int irq, void *dev_id,
207 struct pt_regs *regs)
208{
209 u8 status;
210 struct snd_als300 *chip = dev_id;
211 struct snd_als300_substream_data *data;
212
213 status = inb(chip->port+ALS300_IRQ_STATUS);
214 if (!status) /* shared IRQ, for different device?? Exit ASAP! */
215 return IRQ_NONE;
216
217 /* ACK everything ASAP */
218 outb(status, chip->port+ALS300_IRQ_STATUS);
219 if (status & IRQ_PLAYBACK) {
220 if (chip->pcm && chip->playback_substream) {
221 data = chip->playback_substream->runtime->private_data;
222 data->period_flipflop ^= 1;
223 snd_pcm_period_elapsed(chip->playback_substream);
224 snd_als300_dbgplay("IRQ_PLAYBACK\n");
225 }
226 }
227 if (status & IRQ_CAPTURE) {
228 if (chip->pcm && chip->capture_substream) {
229 data = chip->capture_substream->runtime->private_data;
230 data->period_flipflop ^= 1;
231 snd_pcm_period_elapsed(chip->capture_substream);
232 snd_als300_dbgplay("IRQ_CAPTURE\n");
233 }
234 }
235 return IRQ_HANDLED;
236}
237
238static irqreturn_t snd_als300plus_interrupt(int irq, void *dev_id,
239 struct pt_regs *regs)
240{
241 u8 general, mpu, dram;
242 struct snd_als300 *chip = dev_id;
243 struct snd_als300_substream_data *data;
244
245 general = inb(chip->port+ALS300P_IRQ_STATUS);
246 mpu = inb(chip->port+MPU_IRQ_STATUS);
247 dram = inb(chip->port+ALS300P_DRAM_IRQ_STATUS);
248
249 /* shared IRQ, for different device?? Exit ASAP! */
250 if ((general == 0) && ((mpu & 0x80) == 0) && ((dram & 0x01) == 0))
251 return IRQ_NONE;
252
253 if (general & IRQ_PLAYBACK) {
254 if (chip->pcm && chip->playback_substream) {
255 outb(IRQ_PLAYBACK, chip->port+ALS300P_IRQ_STATUS);
256 data = chip->playback_substream->runtime->private_data;
257 data->period_flipflop ^= 1;
258 snd_pcm_period_elapsed(chip->playback_substream);
259 snd_als300_dbgplay("IRQ_PLAYBACK\n");
260 }
261 }
262 if (general & IRQ_CAPTURE) {
263 if (chip->pcm && chip->capture_substream) {
264 outb(IRQ_CAPTURE, chip->port+ALS300P_IRQ_STATUS);
265 data = chip->capture_substream->runtime->private_data;
266 data->period_flipflop ^= 1;
267 snd_pcm_period_elapsed(chip->capture_substream);
268 snd_als300_dbgplay("IRQ_CAPTURE\n");
269 }
270 }
271 /* FIXME: Ack other interrupt types. Not important right now as
272 * those other devices aren't enabled. */
273 return IRQ_HANDLED;
274}
275
276static void __devexit snd_als300_remove(struct pci_dev *pci)
277{
278 snd_als300_dbgcallenter();
279 snd_card_free(pci_get_drvdata(pci));
280 pci_set_drvdata(pci, NULL);
281 snd_als300_dbgcallleave();
282}
283
284static unsigned short snd_als300_ac97_read(struct snd_ac97 *ac97,
285 unsigned short reg)
286{
287 int i;
288 struct snd_als300 *chip = ac97->private_data;
289
290 for (i = 0; i < 1000; i++) {
291 if ((inb(chip->port+AC97_STATUS) & (AC97_BUSY)) == 0)
292 break;
293 udelay(10);
294 }
295 outl((reg << 24) | (1 << 31), chip->port+AC97_ACCESS);
296
297 for (i = 0; i < 1000; i++) {
298 if ((inb(chip->port+AC97_STATUS) & (AC97_DATA_AVAIL)) != 0)
299 break;
300 udelay(10);
301 }
302 return inw(chip->port+AC97_READ);
303}
304
305static void snd_als300_ac97_write(struct snd_ac97 *ac97,
306 unsigned short reg, unsigned short val)
307{
308 int i;
309 struct snd_als300 *chip = ac97->private_data;
310
311 for (i = 0; i < 1000; i++) {
312 if ((inb(chip->port+AC97_STATUS) & (AC97_BUSY)) == 0)
313 break;
314 udelay(10);
315 }
316 outl((reg << 24) | val, chip->port+AC97_ACCESS);
317}
318
319static int snd_als300_ac97(struct snd_als300 *chip)
320{
321 struct snd_ac97_bus *bus;
322 struct snd_ac97_template ac97;
323 int err;
324 static struct snd_ac97_bus_ops ops = {
325 .write = snd_als300_ac97_write,
326 .read = snd_als300_ac97_read,
327 };
328
329 snd_als300_dbgcallenter();
330 if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &bus)) < 0)
331 return err;
332
333 memset(&ac97, 0, sizeof(ac97));
334 ac97.private_data = chip;
335
336 snd_als300_dbgcallleave();
337 return snd_ac97_mixer(bus, &ac97, &chip->ac97);
338}
339
340/* hardware definition
341 *
342 * In AC97 mode, we always use 48k/16bit/stereo.
343 * Any request to change data type is ignored by
344 * the card when it is running outside of legacy
345 * mode.
346 */
347static struct snd_pcm_hardware snd_als300_playback_hw =
348{
349 .info = (SNDRV_PCM_INFO_MMAP |
350 SNDRV_PCM_INFO_INTERLEAVED |
351 SNDRV_PCM_INFO_PAUSE |
352 SNDRV_PCM_INFO_MMAP_VALID),
353 .formats = SNDRV_PCM_FMTBIT_S16,
354 .rates = SNDRV_PCM_RATE_48000,
355 .rate_min = 48000,
356 .rate_max = 48000,
357 .channels_min = 2,
358 .channels_max = 2,
359 .buffer_bytes_max = 64 * 1024,
360 .period_bytes_min = 64,
361 .period_bytes_max = 32 * 1024,
362 .periods_min = 2,
363 .periods_max = 2,
364};
365
366static struct snd_pcm_hardware snd_als300_capture_hw =
367{
368 .info = (SNDRV_PCM_INFO_MMAP |
369 SNDRV_PCM_INFO_INTERLEAVED |
370 SNDRV_PCM_INFO_PAUSE |
371 SNDRV_PCM_INFO_MMAP_VALID),
372 .formats = SNDRV_PCM_FMTBIT_S16,
373 .rates = SNDRV_PCM_RATE_48000,
374 .rate_min = 48000,
375 .rate_max = 48000,
376 .channels_min = 2,
377 .channels_max = 2,
378 .buffer_bytes_max = 64 * 1024,
379 .period_bytes_min = 64,
380 .period_bytes_max = 32 * 1024,
381 .periods_min = 2,
382 .periods_max = 2,
383};
384
385static int snd_als300_playback_open(struct snd_pcm_substream *substream)
386{
387 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
388 struct snd_pcm_runtime *runtime = substream->runtime;
389 struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
390 GFP_KERNEL);
391
392 snd_als300_dbgcallenter();
393 chip->playback_substream = substream;
394 runtime->hw = snd_als300_playback_hw;
395 runtime->private_data = data;
396 data->control_register = PLAYBACK_CONTROL;
397 data->block_counter_register = PLAYBACK_BLOCK_COUNTER;
398 snd_als300_dbgcallleave();
399 return 0;
400}
401
402static int snd_als300_playback_close(struct snd_pcm_substream *substream)
403{
404 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
405 struct snd_als300_substream_data *data;
406
407 data = substream->runtime->private_data;
408 snd_als300_dbgcallenter();
409 kfree(data);
410 chip->playback_substream = NULL;
411 snd_pcm_lib_free_pages(substream);
412 snd_als300_dbgcallleave();
413 return 0;
414}
415
416static int snd_als300_capture_open(struct snd_pcm_substream *substream)
417{
418 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
419 struct snd_pcm_runtime *runtime = substream->runtime;
420 struct snd_als300_substream_data *data = kzalloc(sizeof(*data),
421 GFP_KERNEL);
422
423 snd_als300_dbgcallenter();
424 chip->capture_substream = substream;
425 runtime->hw = snd_als300_capture_hw;
426 runtime->private_data = data;
427 data->control_register = RECORD_CONTROL;
428 data->block_counter_register = RECORD_BLOCK_COUNTER;
429 snd_als300_dbgcallleave();
430 return 0;
431}
432
433static int snd_als300_capture_close(struct snd_pcm_substream *substream)
434{
435 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
436 struct snd_als300_substream_data *data;
437
438 data = substream->runtime->private_data;
439 snd_als300_dbgcallenter();
440 kfree(data);
441 chip->capture_substream = NULL;
442 snd_pcm_lib_free_pages(substream);
443 snd_als300_dbgcallleave();
444 return 0;
445}
446
447static int snd_als300_pcm_hw_params(struct snd_pcm_substream *substream,
448 snd_pcm_hw_params_t * hw_params)
449{
450 return snd_pcm_lib_malloc_pages(substream,
451 params_buffer_bytes(hw_params));
452}
453
454static int snd_als300_pcm_hw_free(struct snd_pcm_substream *substream)
455{
456 return snd_pcm_lib_free_pages(substream);
457}
458
459static int snd_als300_playback_prepare(struct snd_pcm_substream *substream)
460{
461 u32 tmp;
462 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
463 struct snd_pcm_runtime *runtime = substream->runtime;
464 unsigned short period_bytes = snd_pcm_lib_period_bytes(substream);
465 unsigned short buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
466
467 snd_als300_dbgcallenter();
468 spin_lock_irq(&chip->reg_lock);
469 tmp = snd_als300_gcr_read(chip->port, PLAYBACK_CONTROL);
470 tmp &= ~TRANSFER_START;
471
472 snd_als300_dbgplay("Period bytes: %d Buffer bytes %d\n",
473 period_bytes, buffer_bytes);
474
475 /* set block size */
476 tmp &= 0xffff0000;
477 tmp |= period_bytes - 1;
478 snd_als300_gcr_write(chip->port, PLAYBACK_CONTROL, tmp);
479
480 /* set dma area */
481 snd_als300_gcr_write(chip->port, PLAYBACK_START,
482 runtime->dma_addr);
483 snd_als300_gcr_write(chip->port, PLAYBACK_END,
484 runtime->dma_addr + buffer_bytes - 1);
485 spin_unlock_irq(&chip->reg_lock);
486 snd_als300_dbgcallleave();
487 return 0;
488}
489
490static int snd_als300_capture_prepare(struct snd_pcm_substream *substream)
491{
492 u32 tmp;
493 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
494 struct snd_pcm_runtime *runtime = substream->runtime;
495 unsigned short period_bytes = snd_pcm_lib_period_bytes(substream);
496 unsigned short buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
497
498 snd_als300_dbgcallenter();
499 spin_lock_irq(&chip->reg_lock);
500 tmp = snd_als300_gcr_read(chip->port, RECORD_CONTROL);
501 tmp &= ~TRANSFER_START;
502
503 snd_als300_dbgplay("Period bytes: %d Buffer bytes %d\n", period_bytes,
504 buffer_bytes);
505
506 /* set block size */
507 tmp &= 0xffff0000;
508 tmp |= period_bytes - 1;
509
510 /* set dma area */
511 snd_als300_gcr_write(chip->port, RECORD_CONTROL, tmp);
512 snd_als300_gcr_write(chip->port, RECORD_START,
513 runtime->dma_addr);
514 snd_als300_gcr_write(chip->port, RECORD_END,
515 runtime->dma_addr + buffer_bytes - 1);
516 spin_unlock_irq(&chip->reg_lock);
517 snd_als300_dbgcallleave();
518 return 0;
519}
520
521static int snd_als300_trigger(struct snd_pcm_substream *substream, int cmd)
522{
523 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
524 u32 tmp;
525 struct snd_als300_substream_data *data;
526 unsigned short reg;
527 int ret = 0;
528
529 data = substream->runtime->private_data;
530 reg = data->control_register;
531
532 snd_als300_dbgcallenter();
533 spin_lock(&chip->reg_lock);
534 switch (cmd) {
535 case SNDRV_PCM_TRIGGER_START:
536 case SNDRV_PCM_TRIGGER_RESUME:
537 tmp = snd_als300_gcr_read(chip->port, reg);
538 data->period_flipflop = 1;
539 snd_als300_gcr_write(chip->port, reg, tmp | TRANSFER_START);
540 snd_als300_dbgplay("TRIGGER START\n");
541 break;
542 case SNDRV_PCM_TRIGGER_STOP:
543 case SNDRV_PCM_TRIGGER_SUSPEND:
544 tmp = snd_als300_gcr_read(chip->port, reg);
545 snd_als300_gcr_write(chip->port, reg, tmp & ~TRANSFER_START);
546 snd_als300_dbgplay("TRIGGER STOP\n");
547 break;
548 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
549 tmp = snd_als300_gcr_read(chip->port, reg);
550 snd_als300_gcr_write(chip->port, reg, tmp | FIFO_PAUSE);
551 snd_als300_dbgplay("TRIGGER PAUSE\n");
552 break;
553 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
554 tmp = snd_als300_gcr_read(chip->port, reg);
555 snd_als300_gcr_write(chip->port, reg, tmp & ~FIFO_PAUSE);
556 snd_als300_dbgplay("TRIGGER RELEASE\n");
557 break;
558 default:
559 snd_als300_dbgplay("TRIGGER INVALID\n");
560 ret = -EINVAL;
561 }
562 spin_unlock(&chip->reg_lock);
563 snd_als300_dbgcallleave();
564 return ret;
565}
566
567static snd_pcm_uframes_t snd_als300_pointer(struct snd_pcm_substream *substream)
568{
569 u16 current_ptr;
570 struct snd_als300 *chip = snd_pcm_substream_chip(substream);
571 struct snd_als300_substream_data *data;
572 unsigned short period_bytes;
573
574 data = substream->runtime->private_data;
575 period_bytes = snd_pcm_lib_period_bytes(substream);
576
577 snd_als300_dbgcallenter();
578 spin_lock(&chip->reg_lock);
579 current_ptr = (u16) snd_als300_gcr_read(chip->port,
580 data->block_counter_register) + 4;
581 spin_unlock(&chip->reg_lock);
582 if (current_ptr > period_bytes)
583 current_ptr = 0;
584 else
585 current_ptr = period_bytes - current_ptr;
586
587 if (data->period_flipflop == 0)
588 current_ptr += period_bytes;
589 snd_als300_dbgplay("Pointer (bytes): %d\n", current_ptr);
590 snd_als300_dbgcallleave();
591 return bytes_to_frames(substream->runtime, current_ptr);
592}
593
594static struct snd_pcm_ops snd_als300_playback_ops = {
595 .open = snd_als300_playback_open,
596 .close = snd_als300_playback_close,
597 .ioctl = snd_pcm_lib_ioctl,
598 .hw_params = snd_als300_pcm_hw_params,
599 .hw_free = snd_als300_pcm_hw_free,
600 .prepare = snd_als300_playback_prepare,
601 .trigger = snd_als300_trigger,
602 .pointer = snd_als300_pointer,
603};
604
605static struct snd_pcm_ops snd_als300_capture_ops = {
606 .open = snd_als300_capture_open,
607 .close = snd_als300_capture_close,
608 .ioctl = snd_pcm_lib_ioctl,
609 .hw_params = snd_als300_pcm_hw_params,
610 .hw_free = snd_als300_pcm_hw_free,
611 .prepare = snd_als300_capture_prepare,
612 .trigger = snd_als300_trigger,
613 .pointer = snd_als300_pointer,
614};
615
616static int __devinit snd_als300_new_pcm(struct snd_als300 *chip)
617{
618 struct snd_pcm *pcm;
619 int err;
620
621 snd_als300_dbgcallenter();
622 err = snd_pcm_new(chip->card, "ALS300", 0, 1, 1, &pcm);
623 if (err < 0)
624 return err;
625 pcm->private_data = chip;
626 strcpy(pcm->name, "ALS300");
627 chip->pcm = pcm;
628
629 /* set operators */
630 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
631 &snd_als300_playback_ops);
632 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
633 &snd_als300_capture_ops);
634
635 /* pre-allocation of buffers */
636 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
637 snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
638 snd_als300_dbgcallleave();
639 return 0;
640}
641
642static void snd_als300_init(struct snd_als300 *chip)
643{
644 unsigned long flags;
645 u32 tmp;
646
647 snd_als300_dbgcallenter();
648 spin_lock_irqsave(&chip->reg_lock, flags);
649 chip->revision = (snd_als300_gcr_read(chip->port, MISC_CONTROL) >> 16)
650 & 0x0000000F;
651 /* Setup DRAM */
652 tmp = snd_als300_gcr_read(chip->port, DRAM_WRITE_CONTROL);
653 snd_als300_gcr_write(chip->port, DRAM_WRITE_CONTROL,
654 (tmp | DRAM_MODE_2)
655 & ~WRITE_TRANS_START);
656
657 /* Enable IRQ output */
658 snd_als300_set_irq_flag(chip, IRQ_ENABLE);
659
660 /* Unmute hardware devices so their outputs get routed to
661 * the onboard mixer */
662 tmp = snd_als300_gcr_read(chip->port, MISC_CONTROL);
663 snd_als300_gcr_write(chip->port, MISC_CONTROL,
664 tmp | VMUTE_NORMAL | MMUTE_NORMAL);
665
666 /* Reset volumes */
667 snd_als300_gcr_write(chip->port, MUS_VOC_VOL, 0);
668
669 /* Make sure playback transfer is stopped */
670 tmp = snd_als300_gcr_read(chip->port, PLAYBACK_CONTROL);
671 snd_als300_gcr_write(chip->port, PLAYBACK_CONTROL,
672 tmp & ~TRANSFER_START);
673 spin_unlock_irqrestore(&chip->reg_lock, flags);
674 snd_als300_dbgcallleave();
675}
676
677static int __devinit snd_als300_create(snd_card_t *card,
678 struct pci_dev *pci, int chip_type,
679 struct snd_als300 **rchip)
680{
681 struct snd_als300 *chip;
682 void *irq_handler;
683 int err;
684
685 static snd_device_ops_t ops = {
686 .dev_free = snd_als300_dev_free,
687 };
688 *rchip = NULL;
689
690 snd_als300_dbgcallenter();
691 if ((err = pci_enable_device(pci)) < 0)
692 return err;
693
694 if (pci_set_dma_mask(pci, 0x0fffffff) < 0 ||
695 pci_set_consistent_dma_mask(pci, 0x0fffffff) < 0) {
696 printk(KERN_ERR "error setting 28bit DMA mask\n");
697 pci_disable_device(pci);
698 return -ENXIO;
699 }
700 pci_set_master(pci);
701
702 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
703 if (chip == NULL) {
704 pci_disable_device(pci);
705 return -ENOMEM;
706 }
707
708 chip->card = card;
709 chip->pci = pci;
710 chip->irq = -1;
711 chip->chip_type = chip_type;
712 spin_lock_init(&chip->reg_lock);
713
714 if ((err = pci_request_regions(pci, "ALS300")) < 0) {
715 kfree(chip);
716 pci_disable_device(pci);
717 return err;
718 }
719 chip->port = pci_resource_start(pci, 0);
720
721 if (chip->chip_type == DEVICE_ALS300_PLUS)
722 irq_handler = snd_als300plus_interrupt;
723 else
724 irq_handler = snd_als300_interrupt;
725
726 if (request_irq(pci->irq, irq_handler, SA_INTERRUPT|SA_SHIRQ,
727 card->shortname, (void *)chip)) {
728 snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
729 snd_als300_free(chip);
730 return -EBUSY;
731 }
732 chip->irq = pci->irq;
733
734
735 snd_als300_init(chip);
736
737 if (snd_als300_ac97(chip) < 0) {
738 snd_printk(KERN_WARNING "Could not create ac97\n");
739 snd_als300_free(chip);
740 return err;
741 }
742
743 if ((err = snd_als300_new_pcm(chip)) < 0) {
744 snd_printk(KERN_WARNING "Could not create PCM\n");
745 snd_als300_free(chip);
746 return err;
747 }
748
749 if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
750 chip, &ops)) < 0) {
751 snd_als300_free(chip);
752 return err;
753 }
754
755 snd_card_set_dev(card, &pci->dev);
756
757 *rchip = chip;
758 snd_als300_dbgcallleave();
759 return 0;
760}
761
762#ifdef CONFIG_PM
763static int snd_als300_suspend(struct pci_dev *pci, pm_message_t state)
764{
765 struct snd_card *card = pci_get_drvdata(pci);
766 struct snd_als300 *chip = card->private_data;
767
768 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
769 snd_pcm_suspend_all(chip->pcm);
770 snd_ac97_suspend(chip->ac97);
771
772 pci_set_power_state(pci, PCI_D3hot);
773 pci_disable_device(pci);
774 pci_save_state(pci);
775 return 0;
776}
777
778static int snd_als300_resume(struct pci_dev *pci)
779{
780 struct snd_card *card = pci_get_drvdata(pci);
781 struct snd_als300 *chip = card->private_data;
782
783 pci_restore_state(pci);
784 pci_enable_device(pci);
785 pci_set_power_state(pci, PCI_D0);
786 pci_set_master(pci);
787
788 snd_als300_init(chip);
789 snd_ac97_resume(chip->ac97);
790
791 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
792 return 0;
793}
794#endif
795
796static int __devinit snd_als300_probe(struct pci_dev *pci,
797 const struct pci_device_id *pci_id)
798{
799 static int dev;
800 struct snd_card *card;
801 struct snd_als300 *chip;
802 int err, chip_type;
803
804 if (dev >= SNDRV_CARDS)
805 return -ENODEV;
806 if (!enable[dev]) {
807 dev++;
808 return -ENOENT;
809 }
810
811 card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0);
812
813 if (card == NULL)
814 return -ENOMEM;
815
816 chip_type = pci_id->driver_data;
817
818 if ((err = snd_als300_create(card, pci, chip_type, &chip)) < 0) {
819 snd_card_free(card);
820 return err;
821 }
822 card->private_data = chip;
823
824 strcpy(card->driver, "ALS300");
825 if (chip->chip_type == DEVICE_ALS300_PLUS)
826 /* don't know much about ALS300+ yet
827 * print revision number for now */
828 sprintf(card->shortname, "ALS300+ (Rev. %d)", chip->revision);
829 else
830 sprintf(card->shortname, "ALS300 (Rev. %c)", 'A' +
831 chip->revision - 1);
832 sprintf(card->longname, "%s at 0x%lx irq %i",
833 card->shortname, chip->port, chip->irq);
834
835 if ((err = snd_card_register(card)) < 0) {
836 snd_card_free(card);
837 return err;
838 }
839 pci_set_drvdata(pci, card);
840 dev++;
841 return 0;
842}
843
844static struct pci_driver driver = {
845 .name = "ALS300",
846 .id_table = snd_als300_ids,
847 .probe = snd_als300_probe,
848 .remove = __devexit_p(snd_als300_remove),
849#ifdef CONFIG_PM
850 .suspend = snd_als300_suspend,
851 .resume = snd_als300_resume,
852#endif
853};
854
855static int __init alsa_card_als300_init(void)
856{
857 return pci_register_driver(&driver);
858}
859
860static void __exit alsa_card_als300_exit(void)
861{
862 pci_unregister_driver(&driver);
863}
864
865module_init(alsa_card_als300_init)
866module_exit(alsa_card_als300_exit)
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 4f65ec56bf35..ac4e73f69c1d 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1046,7 +1046,7 @@ static int snd_cs4281_put_volume(struct snd_kcontrol *kcontrol,
1046 snd_cs4281_pokeBA0(chip, regL, volL); 1046 snd_cs4281_pokeBA0(chip, regL, volL);
1047 change = 1; 1047 change = 1;
1048 } 1048 }
1049 if (ucontrol->value.integer.value[0] != volL) { 1049 if (ucontrol->value.integer.value[1] != volR) {
1050 volR = CS_VOL_MASK - (ucontrol->value.integer.value[1] & CS_VOL_MASK); 1050 volR = CS_VOL_MASK - (ucontrol->value.integer.value[1] & CS_VOL_MASK);
1051 snd_cs4281_pokeBA0(chip, regR, volR); 1051 snd_cs4281_pokeBA0(chip, regR, volR);
1052 change = 1; 1052 change = 1;
@@ -1416,7 +1416,7 @@ static int __devinit snd_cs4281_create(struct snd_card *card,
1416static int snd_cs4281_chip_init(struct cs4281 *chip) 1416static int snd_cs4281_chip_init(struct cs4281 *chip)
1417{ 1417{
1418 unsigned int tmp; 1418 unsigned int tmp;
1419 int timeout; 1419 unsigned long end_time;
1420 int retry_count = 2; 1420 int retry_count = 2;
1421 1421
1422 /* Having EPPMC.FPDN=1 prevent proper chip initialisation */ 1422 /* Having EPPMC.FPDN=1 prevent proper chip initialisation */
@@ -1496,7 +1496,7 @@ static int snd_cs4281_chip_init(struct cs4281 *chip)
1496 /* 1496 /*
1497 * Wait for the DLL ready signal from the clock logic. 1497 * Wait for the DLL ready signal from the clock logic.
1498 */ 1498 */
1499 timeout = 100; 1499 end_time = jiffies + HZ;
1500 do { 1500 do {
1501 /* 1501 /*
1502 * Read the AC97 status register to see if we've seen a CODEC 1502 * Read the AC97 status register to see if we've seen a CODEC
@@ -1504,8 +1504,8 @@ static int snd_cs4281_chip_init(struct cs4281 *chip)
1504 */ 1504 */
1505 if (snd_cs4281_peekBA0(chip, BA0_CLKCR1) & BA0_CLKCR1_DLLRDY) 1505 if (snd_cs4281_peekBA0(chip, BA0_CLKCR1) & BA0_CLKCR1_DLLRDY)
1506 goto __ok0; 1506 goto __ok0;
1507 msleep(1); 1507 schedule_timeout_uninterruptible(1);
1508 } while (timeout-- > 0); 1508 } while (time_after_eq(end_time, jiffies));
1509 1509
1510 snd_printk(KERN_ERR "DLLRDY not seen\n"); 1510 snd_printk(KERN_ERR "DLLRDY not seen\n");
1511 return -EIO; 1511 return -EIO;
@@ -1522,7 +1522,7 @@ static int snd_cs4281_chip_init(struct cs4281 *chip)
1522 /* 1522 /*
1523 * Wait for the codec ready signal from the AC97 codec. 1523 * Wait for the codec ready signal from the AC97 codec.
1524 */ 1524 */
1525 timeout = 100; 1525 end_time = jiffies + HZ;
1526 do { 1526 do {
1527 /* 1527 /*
1528 * Read the AC97 status register to see if we've seen a CODEC 1528 * Read the AC97 status register to see if we've seen a CODEC
@@ -1530,20 +1530,20 @@ static int snd_cs4281_chip_init(struct cs4281 *chip)
1530 */ 1530 */
1531 if (snd_cs4281_peekBA0(chip, BA0_ACSTS) & BA0_ACSTS_CRDY) 1531 if (snd_cs4281_peekBA0(chip, BA0_ACSTS) & BA0_ACSTS_CRDY)
1532 goto __ok1; 1532 goto __ok1;
1533 msleep(1); 1533 schedule_timeout_uninterruptible(1);
1534 } while (timeout-- > 0); 1534 } while (time_after_eq(end_time, jiffies));
1535 1535
1536 snd_printk(KERN_ERR "never read codec ready from AC'97 (0x%x)\n", snd_cs4281_peekBA0(chip, BA0_ACSTS)); 1536 snd_printk(KERN_ERR "never read codec ready from AC'97 (0x%x)\n", snd_cs4281_peekBA0(chip, BA0_ACSTS));
1537 return -EIO; 1537 return -EIO;
1538 1538
1539 __ok1: 1539 __ok1:
1540 if (chip->dual_codec) { 1540 if (chip->dual_codec) {
1541 timeout = 100; 1541 end_time = jiffies + HZ;
1542 do { 1542 do {
1543 if (snd_cs4281_peekBA0(chip, BA0_ACSTS2) & BA0_ACSTS_CRDY) 1543 if (snd_cs4281_peekBA0(chip, BA0_ACSTS2) & BA0_ACSTS_CRDY)
1544 goto __codec2_ok; 1544 goto __codec2_ok;
1545 msleep(1); 1545 schedule_timeout_uninterruptible(1);
1546 } while (timeout-- > 0); 1546 } while (time_after_eq(end_time, jiffies));
1547 snd_printk(KERN_INFO "secondary codec doesn't respond. disable it...\n"); 1547 snd_printk(KERN_INFO "secondary codec doesn't respond. disable it...\n");
1548 chip->dual_codec = 0; 1548 chip->dual_codec = 0;
1549 __codec2_ok: ; 1549 __codec2_ok: ;
@@ -1561,7 +1561,7 @@ static int snd_cs4281_chip_init(struct cs4281 *chip)
1561 * the codec is pumping ADC data across the AC-link. 1561 * the codec is pumping ADC data across the AC-link.
1562 */ 1562 */
1563 1563
1564 timeout = 100; 1564 end_time = jiffies + HZ;
1565 do { 1565 do {
1566 /* 1566 /*
1567 * Read the input slot valid register and see if input slots 3 1567 * Read the input slot valid register and see if input slots 3
@@ -1569,8 +1569,8 @@ static int snd_cs4281_chip_init(struct cs4281 *chip)
1569 */ 1569 */
1570 if ((snd_cs4281_peekBA0(chip, BA0_ACISV) & (BA0_ACISV_SLV(3) | BA0_ACISV_SLV(4))) == (BA0_ACISV_SLV(3) | BA0_ACISV_SLV(4))) 1570 if ((snd_cs4281_peekBA0(chip, BA0_ACISV) & (BA0_ACISV_SLV(3) | BA0_ACISV_SLV(4))) == (BA0_ACISV_SLV(3) | BA0_ACISV_SLV(4)))
1571 goto __ok2; 1571 goto __ok2;
1572 msleep(1); 1572 schedule_timeout_uninterruptible(1);
1573 } while (timeout-- > 0); 1573 } while (time_after_eq(end_time, jiffies));
1574 1574
1575 if (--retry_count > 0) 1575 if (--retry_count > 0)
1576 goto __retry; 1576 goto __retry;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index b42dff7ceed0..5bee3b536478 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -295,7 +295,7 @@ static int init_unsol_queue(struct hda_bus *bus)
295 snd_printk(KERN_ERR "hda_codec: can't allocate unsolicited queue\n"); 295 snd_printk(KERN_ERR "hda_codec: can't allocate unsolicited queue\n");
296 return -ENOMEM; 296 return -ENOMEM;
297 } 297 }
298 unsol->workq = create_workqueue("hda_codec"); 298 unsol->workq = create_singlethread_workqueue("hda_codec");
299 if (! unsol->workq) { 299 if (! unsol->workq) {
300 snd_printk(KERN_ERR "hda_codec: can't create workqueue\n"); 300 snd_printk(KERN_ERR "hda_codec: can't create workqueue\n");
301 kfree(unsol); 301 kfree(unsol);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c096606970ff..0ad60ae29011 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -81,6 +81,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
81 "{Intel, ESB2}," 81 "{Intel, ESB2},"
82 "{Intel, ICH8}," 82 "{Intel, ICH8},"
83 "{ATI, SB450}," 83 "{ATI, SB450},"
84 "{ATI, SB600},"
84 "{VIA, VT8251}," 85 "{VIA, VT8251},"
85 "{VIA, VT8237A}," 86 "{VIA, VT8237A},"
86 "{SiS, SIS966}," 87 "{SiS, SIS966},"
@@ -1619,6 +1620,7 @@ static struct pci_device_id azx_ids[] = {
1619 { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */ 1620 { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */
1620 { 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */ 1621 { 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */
1621 { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */ 1622 { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */
1623 { 0x1002, 0x4383, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB600 */
1622 { 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */ 1624 { 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */
1623 { 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */ 1625 { 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */
1624 { 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ULI }, /* ULI M5461 */ 1626 { 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ULI }, /* ULI M5461 */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 32401bd8c229..2bfe37e8543c 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -44,6 +44,7 @@ struct ad198x_spec {
44 * dig_out_nid and hp_nid are optional 44 * dig_out_nid and hp_nid are optional
45 */ 45 */
46 unsigned int cur_eapd; 46 unsigned int cur_eapd;
47 unsigned int need_dac_fix;
47 48
48 /* capture */ 49 /* capture */
49 unsigned int num_adc_nids; 50 unsigned int num_adc_nids;
@@ -836,10 +837,14 @@ static int patch_ad1986a(struct hda_codec *codec)
836 case AD1986A_3STACK: 837 case AD1986A_3STACK:
837 spec->num_mixers = 2; 838 spec->num_mixers = 2;
838 spec->mixers[1] = ad1986a_3st_mixers; 839 spec->mixers[1] = ad1986a_3st_mixers;
839 spec->num_init_verbs = 2; 840 spec->num_init_verbs = 3;
840 spec->init_verbs[1] = ad1986a_3st_init_verbs; 841 spec->init_verbs[1] = ad1986a_3st_init_verbs;
842 spec->init_verbs[2] = ad1986a_ch2_init;
841 spec->channel_mode = ad1986a_modes; 843 spec->channel_mode = ad1986a_modes;
842 spec->num_channel_mode = ARRAY_SIZE(ad1986a_modes); 844 spec->num_channel_mode = ARRAY_SIZE(ad1986a_modes);
845 spec->need_dac_fix = 1;
846 spec->multiout.max_channels = 2;
847 spec->multiout.num_dacs = 1;
843 break; 848 break;
844 case AD1986A_LAPTOP: 849 case AD1986A_LAPTOP:
845 spec->mixers[0] = ad1986a_laptop_mixers; 850 spec->mixers[0] = ad1986a_laptop_mixers;
@@ -1555,6 +1560,8 @@ static int ad198x_ch_mode_put(struct snd_kcontrol *kcontrol,
1555{ 1560{
1556 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 1561 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1557 struct ad198x_spec *spec = codec->spec; 1562 struct ad198x_spec *spec = codec->spec;
1563 if (spec->need_dac_fix)
1564 spec->multiout.num_dacs = spec->multiout.max_channels / 2;
1558 return snd_hda_ch_mode_put(codec, ucontrol, spec->channel_mode, 1565 return snd_hda_ch_mode_put(codec, ucontrol, spec->channel_mode,
1559 spec->num_channel_mode, &spec->multiout.max_channels); 1566 spec->num_channel_mode, &spec->multiout.max_channels);
1560} 1567}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4c6c9ec8ea5b..66bbdb60f50b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -52,6 +52,7 @@ enum {
52 ALC880_CLEVO, 52 ALC880_CLEVO,
53 ALC880_TCL_S700, 53 ALC880_TCL_S700,
54 ALC880_LG, 54 ALC880_LG,
55 ALC880_LG_LW,
55#ifdef CONFIG_SND_DEBUG 56#ifdef CONFIG_SND_DEBUG
56 ALC880_TEST, 57 ALC880_TEST,
57#endif 58#endif
@@ -131,6 +132,7 @@ struct alc_spec {
131 hda_nid_t dig_in_nid; /* digital-in NID; optional */ 132 hda_nid_t dig_in_nid; /* digital-in NID; optional */
132 133
133 /* capture source */ 134 /* capture source */
135 unsigned int num_mux_defs;
134 const struct hda_input_mux *input_mux; 136 const struct hda_input_mux *input_mux;
135 unsigned int cur_mux[3]; 137 unsigned int cur_mux[3];
136 138
@@ -172,6 +174,7 @@ struct alc_config_preset {
172 hda_nid_t dig_in_nid; 174 hda_nid_t dig_in_nid;
173 unsigned int num_channel_mode; 175 unsigned int num_channel_mode;
174 const struct hda_channel_mode *channel_mode; 176 const struct hda_channel_mode *channel_mode;
177 unsigned int num_mux_defs;
175 const struct hda_input_mux *input_mux; 178 const struct hda_input_mux *input_mux;
176 void (*unsol_event)(struct hda_codec *, unsigned int); 179 void (*unsol_event)(struct hda_codec *, unsigned int);
177 void (*init_hook)(struct hda_codec *); 180 void (*init_hook)(struct hda_codec *);
@@ -185,7 +188,10 @@ static int alc_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
185{ 188{
186 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 189 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
187 struct alc_spec *spec = codec->spec; 190 struct alc_spec *spec = codec->spec;
188 return snd_hda_input_mux_info(spec->input_mux, uinfo); 191 unsigned int mux_idx = snd_ctl_get_ioffidx(kcontrol, &uinfo->id);
192 if (mux_idx >= spec->num_mux_defs)
193 mux_idx = 0;
194 return snd_hda_input_mux_info(&spec->input_mux[mux_idx], uinfo);
189} 195}
190 196
191static int alc_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) 197static int alc_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
@@ -203,7 +209,8 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
203 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 209 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
204 struct alc_spec *spec = codec->spec; 210 struct alc_spec *spec = codec->spec;
205 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 211 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
206 return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, 212 unsigned int mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
213 return snd_hda_input_mux_put(codec, &spec->input_mux[mux_idx], ucontrol,
207 spec->adc_nids[adc_idx], &spec->cur_mux[adc_idx]); 214 spec->adc_nids[adc_idx], &spec->cur_mux[adc_idx]);
208} 215}
209 216
@@ -245,7 +252,8 @@ static int alc_ch_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_va
245 * states other than HiZ (eg: PIN_VREFxx) and revert to HiZ if any of these 252 * states other than HiZ (eg: PIN_VREFxx) and revert to HiZ if any of these
246 * are requested. Therefore order this list so that this behaviour will not 253 * are requested. Therefore order this list so that this behaviour will not
247 * cause problems when mixer clients move through the enum sequentially. 254 * cause problems when mixer clients move through the enum sequentially.
248 * NIDs 0x0f and 0x10 have been observed to have this behaviour. 255 * NIDs 0x0f and 0x10 have been observed to have this behaviour as of
256 * March 2006.
249 */ 257 */
250static char *alc_pin_mode_names[] = { 258static char *alc_pin_mode_names[] = {
251 "Mic 50pc bias", "Mic 80pc bias", 259 "Mic 50pc bias", "Mic 80pc bias",
@@ -255,19 +263,27 @@ static unsigned char alc_pin_mode_values[] = {
255 PIN_VREF50, PIN_VREF80, PIN_IN, PIN_OUT, PIN_HP, 263 PIN_VREF50, PIN_VREF80, PIN_IN, PIN_OUT, PIN_HP,
256}; 264};
257/* The control can present all 5 options, or it can limit the options based 265/* The control can present all 5 options, or it can limit the options based
258 * in the pin being assumed to be exclusively an input or an output pin. 266 * in the pin being assumed to be exclusively an input or an output pin. In
267 * addition, "input" pins may or may not process the mic bias option
268 * depending on actual widget capability (NIDs 0x0f and 0x10 don't seem to
269 * accept requests for bias as of chip versions up to March 2006) and/or
270 * wiring in the computer.
259 */ 271 */
260#define ALC_PIN_DIR_IN 0x00 272#define ALC_PIN_DIR_IN 0x00
261#define ALC_PIN_DIR_OUT 0x01 273#define ALC_PIN_DIR_OUT 0x01
262#define ALC_PIN_DIR_INOUT 0x02 274#define ALC_PIN_DIR_INOUT 0x02
275#define ALC_PIN_DIR_IN_NOMICBIAS 0x03
276#define ALC_PIN_DIR_INOUT_NOMICBIAS 0x04
263 277
264/* Info about the pin modes supported by the three different pin directions. 278/* Info about the pin modes supported by the different pin direction modes.
265 * For each direction the minimum and maximum values are given. 279 * For each direction the minimum and maximum values are given.
266 */ 280 */
267static signed char alc_pin_mode_dir_info[3][2] = { 281static signed char alc_pin_mode_dir_info[5][2] = {
268 { 0, 2 }, /* ALC_PIN_DIR_IN */ 282 { 0, 2 }, /* ALC_PIN_DIR_IN */
269 { 3, 4 }, /* ALC_PIN_DIR_OUT */ 283 { 3, 4 }, /* ALC_PIN_DIR_OUT */
270 { 0, 4 }, /* ALC_PIN_DIR_INOUT */ 284 { 0, 4 }, /* ALC_PIN_DIR_INOUT */
285 { 2, 2 }, /* ALC_PIN_DIR_IN_NOMICBIAS */
286 { 2, 4 }, /* ALC_PIN_DIR_INOUT_NOMICBIAS */
271}; 287};
272#define alc_pin_mode_min(_dir) (alc_pin_mode_dir_info[_dir][0]) 288#define alc_pin_mode_min(_dir) (alc_pin_mode_dir_info[_dir][0])
273#define alc_pin_mode_max(_dir) (alc_pin_mode_dir_info[_dir][1]) 289#define alc_pin_mode_max(_dir) (alc_pin_mode_dir_info[_dir][1])
@@ -329,9 +345,10 @@ static int alc_pin_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
329 * input modes. 345 * input modes.
330 * 346 *
331 * Dynamically switching the input/output buffers probably 347 * Dynamically switching the input/output buffers probably
332 * reduces noise slightly, particularly on input. However, 348 * reduces noise slightly (particularly on input) so we'll
333 * havingboth input and output buffers enabled 349 * do it. However, having both input and output buffers
334 * simultaneously doesn't seem to be problematic. 350 * enabled simultaneously doesn't seem to be problematic if
351 * this turns out to be necessary in the future.
335 */ 352 */
336 if (val <= 2) { 353 if (val <= 2) {
337 snd_hda_codec_write(codec,nid,0,AC_VERB_SET_AMP_GAIN_MUTE, 354 snd_hda_codec_write(codec,nid,0,AC_VERB_SET_AMP_GAIN_MUTE,
@@ -483,6 +500,9 @@ static void setup_preset(struct alc_spec *spec, const struct alc_config_preset *
483 spec->multiout.dig_out_nid = preset->dig_out_nid; 500 spec->multiout.dig_out_nid = preset->dig_out_nid;
484 spec->multiout.hp_nid = preset->hp_nid; 501 spec->multiout.hp_nid = preset->hp_nid;
485 502
503 spec->num_mux_defs = preset->num_mux_defs;
504 if (! spec->num_mux_defs)
505 spec->num_mux_defs = 1;
486 spec->input_mux = preset->input_mux; 506 spec->input_mux = preset->input_mux;
487 507
488 spec->num_adc_nids = preset->num_adc_nids; 508 spec->num_adc_nids = preset->num_adc_nids;
@@ -1427,6 +1447,82 @@ static void alc880_lg_unsol_event(struct hda_codec *codec, unsigned int res)
1427} 1447}
1428 1448
1429/* 1449/*
1450 * LG LW20
1451 *
1452 * Pin assignment:
1453 * Speaker-out: 0x14
1454 * Mic-In: 0x18
1455 * Built-in Mic-In: 0x19 (?)
1456 * HP-Out: 0x1b
1457 * SPDIF-Out: 0x1e
1458 */
1459
1460/* seems analog CD is not working */
1461static struct hda_input_mux alc880_lg_lw_capture_source = {
1462 .num_items = 2,
1463 .items = {
1464 { "Mic", 0x0 },
1465 { "Internal Mic", 0x1 },
1466 },
1467};
1468
1469static struct snd_kcontrol_new alc880_lg_lw_mixer[] = {
1470 HDA_CODEC_VOLUME("Master Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
1471 HDA_BIND_MUTE("Master Playback Switch", 0x0c, 2, HDA_INPUT),
1472 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
1473 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
1474 HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
1475 HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
1476 { } /* end */
1477};
1478
1479static struct hda_verb alc880_lg_lw_init_verbs[] = {
1480 /* set capture source to mic-in */
1481 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
1482 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
1483 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
1484 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(7)},
1485 /* speaker-out */
1486 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
1487 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1488 /* HP-out */
1489 {0x13, AC_VERB_SET_CONNECT_SEL, 0x00},
1490 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
1491 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1492 /* mic-in to input */
1493 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
1494 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1495 /* built-in mic */
1496 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
1497 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1498 /* jack sense */
1499 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | 0x1},
1500 { }
1501};
1502
1503/* toggle speaker-output according to the hp-jack state */
1504static void alc880_lg_lw_automute(struct hda_codec *codec)
1505{
1506 unsigned int present;
1507
1508 present = snd_hda_codec_read(codec, 0x1b, 0,
1509 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
1510 snd_hda_codec_amp_update(codec, 0x14, 0, HDA_OUTPUT, 0,
1511 0x80, present ? 0x80 : 0);
1512 snd_hda_codec_amp_update(codec, 0x14, 1, HDA_OUTPUT, 0,
1513 0x80, present ? 0x80 : 0);
1514}
1515
1516static void alc880_lg_lw_unsol_event(struct hda_codec *codec, unsigned int res)
1517{
1518 /* Looks like the unsol event is incompatible with the standard
1519 * definition. 4bit tag is placed at 28 bit!
1520 */
1521 if ((res >> 28) == 0x01)
1522 alc880_lg_lw_automute(codec);
1523}
1524
1525/*
1430 * Common callbacks 1526 * Common callbacks
1431 */ 1527 */
1432 1528
@@ -2078,6 +2174,9 @@ static struct hda_board_config alc880_cfg_tbl[] = {
2078 { .modelname = "lg", .config = ALC880_LG }, 2174 { .modelname = "lg", .config = ALC880_LG },
2079 { .pci_subvendor = 0x1854, .pci_subdevice = 0x003b, .config = ALC880_LG }, 2175 { .pci_subvendor = 0x1854, .pci_subdevice = 0x003b, .config = ALC880_LG },
2080 2176
2177 { .modelname = "lg-lw", .config = ALC880_LG_LW },
2178 { .pci_subvendor = 0x1854, .pci_subdevice = 0x0018, .config = ALC880_LG_LW },
2179
2081#ifdef CONFIG_SND_DEBUG 2180#ifdef CONFIG_SND_DEBUG
2082 { .modelname = "test", .config = ALC880_TEST }, 2181 { .modelname = "test", .config = ALC880_TEST },
2083#endif 2182#endif
@@ -2268,6 +2367,19 @@ static struct alc_config_preset alc880_presets[] = {
2268 .unsol_event = alc880_lg_unsol_event, 2367 .unsol_event = alc880_lg_unsol_event,
2269 .init_hook = alc880_lg_automute, 2368 .init_hook = alc880_lg_automute,
2270 }, 2369 },
2370 [ALC880_LG_LW] = {
2371 .mixers = { alc880_lg_lw_mixer },
2372 .init_verbs = { alc880_volume_init_verbs,
2373 alc880_lg_lw_init_verbs },
2374 .num_dacs = 1,
2375 .dac_nids = alc880_dac_nids,
2376 .dig_out_nid = ALC880_DIGOUT_NID,
2377 .num_channel_mode = ARRAY_SIZE(alc880_2_jack_modes),
2378 .channel_mode = alc880_2_jack_modes,
2379 .input_mux = &alc880_lg_lw_capture_source,
2380 .unsol_event = alc880_lg_lw_unsol_event,
2381 .init_hook = alc880_lg_lw_automute,
2382 },
2271#ifdef CONFIG_SND_DEBUG 2383#ifdef CONFIG_SND_DEBUG
2272 [ALC880_TEST] = { 2384 [ALC880_TEST] = {
2273 .mixers = { alc880_test_mixer }, 2385 .mixers = { alc880_test_mixer },
@@ -2593,6 +2705,7 @@ static int alc880_parse_auto_config(struct hda_codec *codec)
2593 2705
2594 spec->init_verbs[spec->num_init_verbs++] = alc880_volume_init_verbs; 2706 spec->init_verbs[spec->num_init_verbs++] = alc880_volume_init_verbs;
2595 2707
2708 spec->num_mux_defs = 1;
2596 spec->input_mux = &spec->private_imux; 2709 spec->input_mux = &spec->private_imux;
2597 2710
2598 return 1; 2711 return 1;
@@ -2722,30 +2835,56 @@ static struct hda_input_mux alc260_capture_source = {
2722}; 2835};
2723 2836
2724/* On Fujitsu S702x laptops capture only makes sense from Mic/LineIn jack, 2837/* On Fujitsu S702x laptops capture only makes sense from Mic/LineIn jack,
2725 * headphone jack and the internal CD lines. 2838 * headphone jack and the internal CD lines since these are the only pins at
2839 * which audio can appear. For flexibility, also allow the option of
2840 * recording the mixer output on the second ADC (ADC0 doesn't have a
2841 * connection to the mixer output).
2726 */ 2842 */
2727static struct hda_input_mux alc260_fujitsu_capture_source = { 2843static struct hda_input_mux alc260_fujitsu_capture_sources[2] = {
2728 .num_items = 3, 2844 {
2729 .items = { 2845 .num_items = 3,
2730 { "Mic/Line", 0x0 }, 2846 .items = {
2731 { "CD", 0x4 }, 2847 { "Mic/Line", 0x0 },
2732 { "Headphone", 0x2 }, 2848 { "CD", 0x4 },
2849 { "Headphone", 0x2 },
2850 },
2733 }, 2851 },
2852 {
2853 .num_items = 4,
2854 .items = {
2855 { "Mic/Line", 0x0 },
2856 { "CD", 0x4 },
2857 { "Headphone", 0x2 },
2858 { "Mixer", 0x5 },
2859 },
2860 },
2861
2734}; 2862};
2735 2863
2736/* Acer TravelMate(/Extensa/Aspire) notebooks have similar configutation to 2864/* Acer TravelMate(/Extensa/Aspire) notebooks have similar configuration to
2737 * the Fujitsu S702x, but jacks are marked differently. We won't allow 2865 * the Fujitsu S702x, but jacks are marked differently.
2738 * retasking the Headphone jack, so it won't be available here.
2739 */ 2866 */
2740static struct hda_input_mux alc260_acer_capture_source = { 2867static struct hda_input_mux alc260_acer_capture_sources[2] = {
2741 .num_items = 3, 2868 {
2742 .items = { 2869 .num_items = 4,
2743 { "Mic", 0x0 }, 2870 .items = {
2744 { "Line", 0x2 }, 2871 { "Mic", 0x0 },
2745 { "CD", 0x4 }, 2872 { "Line", 0x2 },
2873 { "CD", 0x4 },
2874 { "Headphone", 0x5 },
2875 },
2876 },
2877 {
2878 .num_items = 5,
2879 .items = {
2880 { "Mic", 0x0 },
2881 { "Line", 0x2 },
2882 { "CD", 0x4 },
2883 { "Headphone", 0x6 },
2884 { "Mixer", 0x5 },
2885 },
2746 }, 2886 },
2747}; 2887};
2748
2749/* 2888/*
2750 * This is just place-holder, so there's something for alc_build_pcms to look 2889 * This is just place-holder, so there's something for alc_build_pcms to look
2751 * at when it calculates the maximum number of channels. ALC260 has no mixer 2890 * at when it calculates the maximum number of channels. ALC260 has no mixer
@@ -2806,6 +2945,9 @@ static struct snd_kcontrol_new alc260_hp_3013_mixer[] = {
2806 { } /* end */ 2945 { } /* end */
2807}; 2946};
2808 2947
2948/* Fujitsu S702x series laptops. ALC260 pin usage: Mic/Line jack = 0x12,
2949 * HP jack = 0x14, CD audio = 0x16, internal speaker = 0x10.
2950 */
2809static struct snd_kcontrol_new alc260_fujitsu_mixer[] = { 2951static struct snd_kcontrol_new alc260_fujitsu_mixer[] = {
2810 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x08, 0x0, HDA_OUTPUT), 2952 HDA_CODEC_VOLUME("Headphone Playback Volume", 0x08, 0x0, HDA_OUTPUT),
2811 HDA_BIND_MUTE("Headphone Playback Switch", 0x08, 2, HDA_INPUT), 2953 HDA_BIND_MUTE("Headphone Playback Switch", 0x08, 2, HDA_INPUT),
@@ -2822,9 +2964,28 @@ static struct snd_kcontrol_new alc260_fujitsu_mixer[] = {
2822 { } /* end */ 2964 { } /* end */
2823}; 2965};
2824 2966
2967/* Mixer for Acer TravelMate(/Extensa/Aspire) notebooks. Note that current
2968 * versions of the ALC260 don't act on requests to enable mic bias from NID
2969 * 0x0f (used to drive the headphone jack in these laptops). The ALC260
2970 * datasheet doesn't mention this restriction. At this stage it's not clear
2971 * whether this behaviour is intentional or is a hardware bug in chip
2972 * revisions available in early 2006. Therefore for now allow the
2973 * "Headphone Jack Mode" control to span all choices, but if it turns out
2974 * that the lack of mic bias for this NID is intentional we could change the
2975 * mode from ALC_PIN_DIR_INOUT to ALC_PIN_DIR_INOUT_NOMICBIAS.
2976 *
2977 * In addition, Acer TravelMate(/Extensa/Aspire) notebooks in early 2006
2978 * don't appear to make the mic bias available from the "line" jack, even
2979 * though the NID used for this jack (0x14) can supply it. The theory is
2980 * that perhaps Acer have included blocking capacitors between the ALC260
2981 * and the output jack. If this turns out to be the case for all such
2982 * models the "Line Jack Mode" mode could be changed from ALC_PIN_DIR_INOUT
2983 * to ALC_PIN_DIR_INOUT_NOMICBIAS.
2984 */
2825static struct snd_kcontrol_new alc260_acer_mixer[] = { 2985static struct snd_kcontrol_new alc260_acer_mixer[] = {
2826 HDA_CODEC_VOLUME("Master Playback Volume", 0x08, 0x0, HDA_OUTPUT), 2986 HDA_CODEC_VOLUME("Master Playback Volume", 0x08, 0x0, HDA_OUTPUT),
2827 HDA_BIND_MUTE("Master Playback Switch", 0x08, 2, HDA_INPUT), 2987 HDA_BIND_MUTE("Master Playback Switch", 0x08, 2, HDA_INPUT),
2988 ALC_PIN_MODE("Headphone Jack Mode", 0x0f, ALC_PIN_DIR_INOUT),
2828 HDA_CODEC_VOLUME("CD Playback Volume", 0x07, 0x04, HDA_INPUT), 2989 HDA_CODEC_VOLUME("CD Playback Volume", 0x07, 0x04, HDA_INPUT),
2829 HDA_CODEC_MUTE("CD Playback Switch", 0x07, 0x04, HDA_INPUT), 2990 HDA_CODEC_MUTE("CD Playback Switch", 0x07, 0x04, HDA_INPUT),
2830 HDA_CODEC_VOLUME("Mic Playback Volume", 0x07, 0x0, HDA_INPUT), 2991 HDA_CODEC_VOLUME("Mic Playback Volume", 0x07, 0x0, HDA_INPUT),
@@ -3038,7 +3199,8 @@ static struct hda_verb alc260_hp_3013_init_verbs[] = {
3038}; 3199};
3039 3200
3040/* Initialisation sequence for ALC260 as configured in Fujitsu S702x 3201/* Initialisation sequence for ALC260 as configured in Fujitsu S702x
3041 * laptops. 3202 * laptops. ALC260 pin usage: Mic/Line jack = 0x12, HP jack = 0x14, CD
3203 * audio = 0x16, internal speaker = 0x10.
3042 */ 3204 */
3043static struct hda_verb alc260_fujitsu_init_verbs[] = { 3205static struct hda_verb alc260_fujitsu_init_verbs[] = {
3044 /* Disable all GPIOs */ 3206 /* Disable all GPIOs */
@@ -3185,10 +3347,10 @@ static struct hda_verb alc260_acer_init_verbs[] = {
3185 {0x04, AC_VERB_SET_CONNECT_SEL, 0x00}, 3347 {0x04, AC_VERB_SET_CONNECT_SEL, 0x00},
3186 3348
3187 /* Do similar with the second ADC: mute capture input amp and 3349 /* Do similar with the second ADC: mute capture input amp and
3188 * set ADC connection to line (on line1 pin) 3350 * set ADC connection to mic to match ALSA's default state.
3189 */ 3351 */
3190 {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 3352 {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
3191 {0x05, AC_VERB_SET_CONNECT_SEL, 0x02}, 3353 {0x05, AC_VERB_SET_CONNECT_SEL, 0x00},
3192 3354
3193 /* Mute all inputs to mixer widget (even unconnected ones) */ 3355 /* Mute all inputs to mixer widget (even unconnected ones) */
3194 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* mic1 pin */ 3356 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* mic1 pin */
@@ -3213,26 +3375,35 @@ static hda_nid_t alc260_test_dac_nids[1] = {
3213static hda_nid_t alc260_test_adc_nids[2] = { 3375static hda_nid_t alc260_test_adc_nids[2] = {
3214 0x04, 0x05, 3376 0x04, 0x05,
3215}; 3377};
3216/* This is a bit messy since the two input muxes in the ALC260 have slight 3378/* For testing the ALC260, each input MUX needs its own definition since
3217 * variations in their signal assignments. The ideal way to deal with this 3379 * the signal assignments are different. This assumes that the first ADC
3218 * is to extend alc_spec.input_mux to allow a different input MUX for each 3380 * is NID 0x04.
3219 * ADC. For the purposes of the test model it's sufficient to just list
3220 * both options for affected signal indices. The separate input mux
3221 * functionality only needs to be considered if a model comes along which
3222 * actually uses signals 0x5, 0x6 and 0x7 for something which makes sense to
3223 * record.
3224 */ 3381 */
3225static struct hda_input_mux alc260_test_capture_source = { 3382static struct hda_input_mux alc260_test_capture_sources[2] = {
3226 .num_items = 8, 3383 {
3227 .items = { 3384 .num_items = 7,
3228 { "MIC1 pin", 0x0 }, 3385 .items = {
3229 { "MIC2 pin", 0x1 }, 3386 { "MIC1 pin", 0x0 },
3230 { "LINE1 pin", 0x2 }, 3387 { "MIC2 pin", 0x1 },
3231 { "LINE2 pin", 0x3 }, 3388 { "LINE1 pin", 0x2 },
3232 { "CD pin", 0x4 }, 3389 { "LINE2 pin", 0x3 },
3233 { "LINE-OUT pin (cap1), Mixer (cap2)", 0x5 }, 3390 { "CD pin", 0x4 },
3234 { "HP-OUT pin (cap1), LINE-OUT pin (cap2)", 0x6 }, 3391 { "LINE-OUT pin", 0x5 },
3235 { "HP-OUT pin (cap2 only)", 0x7 }, 3392 { "HP-OUT pin", 0x6 },
3393 },
3394 },
3395 {
3396 .num_items = 8,
3397 .items = {
3398 { "MIC1 pin", 0x0 },
3399 { "MIC2 pin", 0x1 },
3400 { "LINE1 pin", 0x2 },
3401 { "LINE2 pin", 0x3 },
3402 { "CD pin", 0x4 },
3403 { "Mixer", 0x5 },
3404 { "LINE-OUT pin", 0x6 },
3405 { "HP-OUT pin", 0x7 },
3406 },
3236 }, 3407 },
3237}; 3408};
3238static struct snd_kcontrol_new alc260_test_mixer[] = { 3409static struct snd_kcontrol_new alc260_test_mixer[] = {
@@ -3244,7 +3415,17 @@ static struct snd_kcontrol_new alc260_test_mixer[] = {
3244 HDA_CODEC_VOLUME("LOUT1 Playback Volume", 0x08, 0x0, HDA_OUTPUT), 3415 HDA_CODEC_VOLUME("LOUT1 Playback Volume", 0x08, 0x0, HDA_OUTPUT),
3245 HDA_BIND_MUTE("LOUT1 Playback Switch", 0x08, 2, HDA_INPUT), 3416 HDA_BIND_MUTE("LOUT1 Playback Switch", 0x08, 2, HDA_INPUT),
3246 3417
3247 /* Modes for retasking pin widgets */ 3418 /* Modes for retasking pin widgets
3419 * Note: the ALC260 doesn't seem to act on requests to enable mic
3420 * bias from NIDs 0x0f and 0x10. The ALC260 datasheet doesn't
3421 * mention this restriction. At this stage it's not clear whether
3422 * this behaviour is intentional or is a hardware bug in chip
3423 * revisions available at least up until early 2006. Therefore for
3424 * now allow the "HP-OUT" and "LINE-OUT" Mode controls to span all
3425 * choices, but if it turns out that the lack of mic bias for these
3426 * NIDs is intentional we could change their modes from
3427 * ALC_PIN_DIR_INOUT to ALC_PIN_DIR_INOUT_NOMICBIAS.
3428 */
3248 ALC_PIN_MODE("HP-OUT pin mode", 0x10, ALC_PIN_DIR_INOUT), 3429 ALC_PIN_MODE("HP-OUT pin mode", 0x10, ALC_PIN_DIR_INOUT),
3249 ALC_PIN_MODE("LINE-OUT pin mode", 0x0f, ALC_PIN_DIR_INOUT), 3430 ALC_PIN_MODE("LINE-OUT pin mode", 0x0f, ALC_PIN_DIR_INOUT),
3250 ALC_PIN_MODE("LINE2 pin mode", 0x15, ALC_PIN_DIR_INOUT), 3431 ALC_PIN_MODE("LINE2 pin mode", 0x15, ALC_PIN_DIR_INOUT),
@@ -3606,6 +3787,7 @@ static int alc260_parse_auto_config(struct hda_codec *codec)
3606 3787
3607 spec->init_verbs[spec->num_init_verbs++] = alc260_volume_init_verbs; 3788 spec->init_verbs[spec->num_init_verbs++] = alc260_volume_init_verbs;
3608 3789
3790 spec->num_mux_defs = 1;
3609 spec->input_mux = &spec->private_imux; 3791 spec->input_mux = &spec->private_imux;
3610 3792
3611 /* check whether NID 0x04 is valid */ 3793 /* check whether NID 0x04 is valid */
@@ -3711,7 +3893,8 @@ static struct alc_config_preset alc260_presets[] = {
3711 .adc_nids = alc260_dual_adc_nids, 3893 .adc_nids = alc260_dual_adc_nids,
3712 .num_channel_mode = ARRAY_SIZE(alc260_modes), 3894 .num_channel_mode = ARRAY_SIZE(alc260_modes),
3713 .channel_mode = alc260_modes, 3895 .channel_mode = alc260_modes,
3714 .input_mux = &alc260_fujitsu_capture_source, 3896 .num_mux_defs = ARRAY_SIZE(alc260_fujitsu_capture_sources),
3897 .input_mux = alc260_fujitsu_capture_sources,
3715 }, 3898 },
3716 [ALC260_ACER] = { 3899 [ALC260_ACER] = {
3717 .mixers = { alc260_acer_mixer, 3900 .mixers = { alc260_acer_mixer,
@@ -3723,7 +3906,8 @@ static struct alc_config_preset alc260_presets[] = {
3723 .adc_nids = alc260_dual_adc_nids, 3906 .adc_nids = alc260_dual_adc_nids,
3724 .num_channel_mode = ARRAY_SIZE(alc260_modes), 3907 .num_channel_mode = ARRAY_SIZE(alc260_modes),
3725 .channel_mode = alc260_modes, 3908 .channel_mode = alc260_modes,
3726 .input_mux = &alc260_acer_capture_source, 3909 .num_mux_defs = ARRAY_SIZE(alc260_acer_capture_sources),
3910 .input_mux = alc260_acer_capture_sources,
3727 }, 3911 },
3728#ifdef CONFIG_SND_DEBUG 3912#ifdef CONFIG_SND_DEBUG
3729 [ALC260_TEST] = { 3913 [ALC260_TEST] = {
@@ -3736,7 +3920,8 @@ static struct alc_config_preset alc260_presets[] = {
3736 .adc_nids = alc260_test_adc_nids, 3920 .adc_nids = alc260_test_adc_nids,
3737 .num_channel_mode = ARRAY_SIZE(alc260_modes), 3921 .num_channel_mode = ARRAY_SIZE(alc260_modes),
3738 .channel_mode = alc260_modes, 3922 .channel_mode = alc260_modes,
3739 .input_mux = &alc260_test_capture_source, 3923 .num_mux_defs = ARRAY_SIZE(alc260_test_capture_sources),
3924 .input_mux = alc260_test_capture_sources,
3740 }, 3925 },
3741#endif 3926#endif
3742}; 3927};
@@ -3828,7 +4013,6 @@ static struct hda_input_mux alc882_capture_source = {
3828 { "CD", 0x4 }, 4013 { "CD", 0x4 },
3829 }, 4014 },
3830}; 4015};
3831
3832#define alc882_mux_enum_info alc_mux_enum_info 4016#define alc882_mux_enum_info alc_mux_enum_info
3833#define alc882_mux_enum_get alc_mux_enum_get 4017#define alc882_mux_enum_get alc_mux_enum_get
3834 4018
@@ -4730,6 +4914,7 @@ static int alc262_parse_auto_config(struct hda_codec *codec)
4730 spec->mixers[spec->num_mixers++] = spec->kctl_alloc; 4914 spec->mixers[spec->num_mixers++] = spec->kctl_alloc;
4731 4915
4732 spec->init_verbs[spec->num_init_verbs++] = alc262_volume_init_verbs; 4916 spec->init_verbs[spec->num_init_verbs++] = alc262_volume_init_verbs;
4917 spec->num_mux_defs = 1;
4733 spec->input_mux = &spec->private_imux; 4918 spec->input_mux = &spec->private_imux;
4734 4919
4735 return 1; 4920 return 1;
@@ -5406,6 +5591,7 @@ static int alc861_parse_auto_config(struct hda_codec *codec)
5406 5591
5407 spec->init_verbs[spec->num_init_verbs++] = alc861_auto_init_verbs; 5592 spec->init_verbs[spec->num_init_verbs++] = alc861_auto_init_verbs;
5408 5593
5594 spec->num_mux_defs = 1;
5409 spec->input_mux = &spec->private_imux; 5595 spec->input_mux = &spec->private_imux;
5410 5596
5411 spec->adc_nids = alc861_adc_nids; 5597 spec->adc_nids = alc861_adc_nids;
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index b56ca4019392..abe9493f0a2c 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -534,6 +534,22 @@ static int stac92xx_build_pcms(struct hda_codec *codec)
534 return 0; 534 return 0;
535} 535}
536 536
537static unsigned int stac92xx_get_vref(struct hda_codec *codec, hda_nid_t nid)
538{
539 unsigned int pincap = snd_hda_param_read(codec, nid,
540 AC_PAR_PIN_CAP);
541 pincap = (pincap & AC_PINCAP_VREF) >> AC_PINCAP_VREF_SHIFT;
542 if (pincap & AC_PINCAP_VREF_100)
543 return AC_PINCTL_VREF_100;
544 if (pincap & AC_PINCAP_VREF_80)
545 return AC_PINCTL_VREF_80;
546 if (pincap & AC_PINCAP_VREF_50)
547 return AC_PINCTL_VREF_50;
548 if (pincap & AC_PINCAP_VREF_GRD)
549 return AC_PINCTL_VREF_GRD;
550 return 0;
551}
552
537static void stac92xx_auto_set_pinctl(struct hda_codec *codec, hda_nid_t nid, int pin_type) 553static void stac92xx_auto_set_pinctl(struct hda_codec *codec, hda_nid_t nid, int pin_type)
538 554
539{ 555{
@@ -571,9 +587,12 @@ static int stac92xx_io_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_
571 587
572 if (val) 588 if (val)
573 stac92xx_auto_set_pinctl(codec, nid, AC_PINCTL_OUT_EN); 589 stac92xx_auto_set_pinctl(codec, nid, AC_PINCTL_OUT_EN);
574 else 590 else {
575 stac92xx_auto_set_pinctl(codec, nid, AC_PINCTL_IN_EN); 591 unsigned int pinctl = AC_PINCTL_IN_EN;
576 592 if (io_idx) /* set VREF for mic */
593 pinctl |= stac92xx_get_vref(codec, nid);
594 stac92xx_auto_set_pinctl(codec, nid, pinctl);
595 }
577 return 1; 596 return 1;
578} 597}
579 598
@@ -767,13 +786,8 @@ static int stac92xx_auto_create_hp_ctls(struct hda_codec *codec, struct auto_pin
767 return 0; 786 return 0;
768 787
769 wid_caps = get_wcaps(codec, pin); 788 wid_caps = get_wcaps(codec, pin);
770 if (wid_caps & AC_WCAP_UNSOL_CAP) { 789 if (wid_caps & AC_WCAP_UNSOL_CAP)
771 /* Enable unsolicited responses on the HP widget */
772 snd_hda_codec_write(codec, pin, 0,
773 AC_VERB_SET_UNSOLICITED_ENABLE,
774 STAC_UNSOL_ENABLE);
775 spec->hp_detect = 1; 790 spec->hp_detect = 1;
776 }
777 791
778 nid = snd_hda_codec_read(codec, pin, 0, AC_VERB_GET_CONNECT_LIST, 0) & 0xff; 792 nid = snd_hda_codec_read(codec, pin, 0, AC_VERB_GET_CONNECT_LIST, 0) & 0xff;
779 for (i = 0; i < cfg->line_outs; i++) { 793 for (i = 0; i < cfg->line_outs; i++) {
@@ -896,13 +910,8 @@ static int stac9200_auto_create_hp_ctls(struct hda_codec *codec,
896 return 0; 910 return 0;
897 911
898 wid_caps = get_wcaps(codec, pin); 912 wid_caps = get_wcaps(codec, pin);
899 if (wid_caps & AC_WCAP_UNSOL_CAP) { 913 if (wid_caps & AC_WCAP_UNSOL_CAP)
900 /* Enable unsolicited responses on the HP widget */
901 snd_hda_codec_write(codec, pin, 0,
902 AC_VERB_SET_UNSOLICITED_ENABLE,
903 STAC_UNSOL_ENABLE);
904 spec->hp_detect = 1; 914 spec->hp_detect = 1;
905 }
906 915
907 return 0; 916 return 0;
908} 917}
@@ -944,6 +953,10 @@ static int stac92xx_init(struct hda_codec *codec)
944 953
945 /* set up pins */ 954 /* set up pins */
946 if (spec->hp_detect) { 955 if (spec->hp_detect) {
956 /* Enable unsolicited responses on the HP widget */
957 snd_hda_codec_write(codec, cfg->hp_pin, 0,
958 AC_VERB_SET_UNSOLICITED_ENABLE,
959 STAC_UNSOL_ENABLE);
947 /* fake event to set up pins */ 960 /* fake event to set up pins */
948 codec->patch_ops.unsol_event(codec, STAC_HP_EVENT << 26); 961 codec->patch_ops.unsol_event(codec, STAC_HP_EVENT << 26);
949 } else { 962 } else {
@@ -951,9 +964,13 @@ static int stac92xx_init(struct hda_codec *codec)
951 stac92xx_auto_init_hp_out(codec); 964 stac92xx_auto_init_hp_out(codec);
952 } 965 }
953 for (i = 0; i < AUTO_PIN_LAST; i++) { 966 for (i = 0; i < AUTO_PIN_LAST; i++) {
954 if (cfg->input_pins[i]) 967 hda_nid_t nid = cfg->input_pins[i];
955 stac92xx_auto_set_pinctl(codec, cfg->input_pins[i], 968 if (nid) {
956 AC_PINCTL_IN_EN); 969 unsigned int pinctl = AC_PINCTL_IN_EN;
970 if (i == AUTO_PIN_MIC || i == AUTO_PIN_FRONT_MIC)
971 pinctl |= stac92xx_get_vref(codec, nid);
972 stac92xx_auto_set_pinctl(codec, nid, pinctl);
973 }
957 } 974 }
958 if (cfg->dig_out_pin) 975 if (cfg->dig_out_pin)
959 stac92xx_auto_set_pinctl(codec, cfg->dig_out_pin, 976 stac92xx_auto_set_pinctl(codec, cfg->dig_out_pin,
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 7e6608b14abc..336dc489aee1 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -87,7 +87,151 @@
87#define CS8415_C_BUFFER 0x20 87#define CS8415_C_BUFFER 0x20
88#define CS8415_ID 0x7F 88#define CS8415_ID 0x7F
89 89
90static void aureon_ac97_write(struct snd_ice1712 *ice, unsigned short reg, unsigned short val) { 90/* PCA9554 registers */
91#define PCA9554_DEV 0x40 /* I2C device address */
92#define PCA9554_IN 0x00 /* input port */
93#define PCA9554_OUT 0x01 /* output port */
94#define PCA9554_INVERT 0x02 /* input invert */
95#define PCA9554_DIR 0x03 /* port directions */
96
97/*
98 * Aureon Universe additional controls using PCA9554
99 */
100
101/*
102 * Send data to pca9554
103 */
104static void aureon_pca9554_write(struct snd_ice1712 *ice, unsigned char reg,
105 unsigned char data)
106{
107 unsigned int tmp;
108 int i, j;
109 unsigned char dev = PCA9554_DEV; /* ID 0100000, write */
110 unsigned char val = 0;
111
112 tmp = snd_ice1712_gpio_read(ice);
113
114 snd_ice1712_gpio_set_mask(ice, ~(AUREON_SPI_MOSI|AUREON_SPI_CLK|
115 AUREON_WM_RW|AUREON_WM_CS|
116 AUREON_CS8415_CS));
117 tmp |= AUREON_WM_RW;
118 tmp |= AUREON_CS8415_CS | AUREON_WM_CS; /* disable SPI devices */
119
120 tmp &= ~AUREON_SPI_MOSI;
121 tmp &= ~AUREON_SPI_CLK;
122 snd_ice1712_gpio_write(ice, tmp);
123 udelay(50);
124
125 /*
126 * send i2c stop condition and start condition
127 * to obtain sane state
128 */
129 tmp |= AUREON_SPI_CLK;
130 snd_ice1712_gpio_write(ice, tmp);
131 udelay(50);
132 tmp |= AUREON_SPI_MOSI;
133 snd_ice1712_gpio_write(ice, tmp);
134 udelay(100);
135 tmp &= ~AUREON_SPI_MOSI;
136 snd_ice1712_gpio_write(ice, tmp);
137 udelay(50);
138 tmp &= ~AUREON_SPI_CLK;
139 snd_ice1712_gpio_write(ice, tmp);
140 udelay(100);
141 /*
142 * send device address, command and value,
143 * skipping ack cycles inbetween
144 */
145 for (j = 0; j < 3; j++) {
146 switch(j) {
147 case 0: val = dev; break;
148 case 1: val = reg; break;
149 case 2: val = data; break;
150 }
151 for (i = 7; i >= 0; i--) {
152 tmp &= ~AUREON_SPI_CLK;
153 snd_ice1712_gpio_write(ice, tmp);
154 udelay(40);
155 if (val & (1 << i))
156 tmp |= AUREON_SPI_MOSI;
157 else
158 tmp &= ~AUREON_SPI_MOSI;
159 snd_ice1712_gpio_write(ice, tmp);
160 udelay(40);
161 tmp |= AUREON_SPI_CLK;
162 snd_ice1712_gpio_write(ice, tmp);
163 udelay(40);
164 }
165 tmp &= ~AUREON_SPI_CLK;
166 snd_ice1712_gpio_write(ice, tmp);
167 udelay(40);
168 tmp |= AUREON_SPI_CLK;
169 snd_ice1712_gpio_write(ice, tmp);
170 udelay(40);
171 tmp &= ~AUREON_SPI_CLK;
172 snd_ice1712_gpio_write(ice, tmp);
173 udelay(40);
174 }
175 tmp &= ~AUREON_SPI_CLK;
176 snd_ice1712_gpio_write(ice, tmp);
177 udelay(40);
178 tmp &= ~AUREON_SPI_MOSI;
179 snd_ice1712_gpio_write(ice, tmp);
180 udelay(40);
181 tmp |= AUREON_SPI_CLK;
182 snd_ice1712_gpio_write(ice, tmp);
183 udelay(50);
184 tmp |= AUREON_SPI_MOSI;
185 snd_ice1712_gpio_write(ice, tmp);
186 udelay(100);
187}
188
189static int aureon_universe_inmux_info(struct snd_kcontrol *kcontrol,
190 struct snd_ctl_elem_info *uinfo)
191{
192 char *texts[3] = {"Internal Aux", "Wavetable", "Rear Line-In"};
193
194 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
195 uinfo->count = 1;
196 uinfo->value.enumerated.items = 3;
197 if(uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
198 uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
199 strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
200 return 0;
201}
202
203static int aureon_universe_inmux_get(struct snd_kcontrol *kcontrol,
204 struct snd_ctl_elem_value *ucontrol)
205{
206 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
207 ucontrol->value.integer.value[0] = ice->spec.aureon.pca9554_out;
208 return 0;
209}
210
211static int aureon_universe_inmux_put(struct snd_kcontrol *kcontrol,
212 struct snd_ctl_elem_value *ucontrol)
213{
214 struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
215 unsigned char oval, nval;
216 int change;
217
218 snd_ice1712_save_gpio_status(ice);
219
220 oval = ice->spec.aureon.pca9554_out;
221 nval = ucontrol->value.integer.value[0];
222 if ((change = (oval != nval))) {
223 aureon_pca9554_write(ice, PCA9554_OUT, nval);
224 ice->spec.aureon.pca9554_out = nval;
225 }
226 snd_ice1712_restore_gpio_status(ice);
227
228 return change;
229}
230
231
232static void aureon_ac97_write(struct snd_ice1712 *ice, unsigned short reg,
233 unsigned short val)
234{
91 unsigned int tmp; 235 unsigned int tmp;
92 236
93 /* Send address to XILINX chip */ 237 /* Send address to XILINX chip */
@@ -146,7 +290,8 @@ static unsigned short aureon_ac97_read(struct snd_ice1712 *ice, unsigned short r
146/* 290/*
147 * Initialize STAC9744 chip 291 * Initialize STAC9744 chip
148 */ 292 */
149static int aureon_ac97_init (struct snd_ice1712 *ice) { 293static int aureon_ac97_init (struct snd_ice1712 *ice)
294{
150 int i; 295 int i;
151 static unsigned short ac97_defaults[] = { 296 static unsigned short ac97_defaults[] = {
152 0x00, 0x9640, 297 0x00, 0x9640,
@@ -1598,7 +1743,15 @@ static struct snd_kcontrol_new universe_ac97_controls[] __devinitdata = {
1598 .get = aureon_ac97_vol_get, 1743 .get = aureon_ac97_vol_get,
1599 .put = aureon_ac97_vol_put, 1744 .put = aureon_ac97_vol_put,
1600 .private_value = AC97_VIDEO|AUREON_AC97_STEREO 1745 .private_value = AC97_VIDEO|AUREON_AC97_STEREO
1601 } 1746 },
1747 {
1748 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1749 .name = "Aux Source",
1750 .info = aureon_universe_inmux_info,
1751 .get = aureon_universe_inmux_get,
1752 .put = aureon_universe_inmux_put
1753 }
1754
1602}; 1755};
1603 1756
1604 1757
@@ -1856,6 +2009,10 @@ static int __devinit aureon_init(struct snd_ice1712 *ice)
1856 } 2009 }
1857 2010
1858 snd_ice1712_restore_gpio_status(ice); 2011 snd_ice1712_restore_gpio_status(ice);
2012
2013 /* initialize PCA9554 pin directions & set default input*/
2014 aureon_pca9554_write(ice, PCA9554_DIR, 0x00);
2015 aureon_pca9554_write(ice, PCA9554_OUT, 0x00); /* internal AUX */
1859 2016
1860 ice->spec.aureon.master[0] = WM_VOL_MUTE; 2017 ice->spec.aureon.master[0] = WM_VOL_MUTE;
1861 ice->spec.aureon.master[1] = WM_VOL_MUTE; 2018 ice->spec.aureon.master[1] = WM_VOL_MUTE;
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index b88eeba2f5d1..32f8415558a5 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -2402,7 +2402,7 @@ static int __devinit snd_ice1712_chip_init(struct snd_ice1712 *ice)
2402 if (ice->eeprom.subvendor == ICE1712_SUBDEVICE_DMX6FIRE && !ice->dxr_enable) { 2402 if (ice->eeprom.subvendor == ICE1712_SUBDEVICE_DMX6FIRE && !ice->dxr_enable) {
2403 /* Limit active ADCs and DACs to 6; */ 2403 /* Limit active ADCs and DACs to 6; */
2404 /* Note: DXR extension not supported */ 2404 /* Note: DXR extension not supported */
2405 pci_write_config_byte(ice->pci, 0x60, 0x0a); 2405 pci_write_config_byte(ice->pci, 0x60, 0x2a);
2406 } else { 2406 } else {
2407 pci_write_config_byte(ice->pci, 0x60, ice->eeprom.data[ICE_EEP1_CODEC]); 2407 pci_write_config_byte(ice->pci, 0x60, ice->eeprom.data[ICE_EEP1_CODEC]);
2408 } 2408 }
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index f9b22d4a3932..053f8e56fd68 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -373,6 +373,7 @@ struct snd_ice1712 {
373 unsigned int cs8415_mux; 373 unsigned int cs8415_mux;
374 unsigned short master[2]; 374 unsigned short master[2];
375 unsigned short vol[8]; 375 unsigned short vol[8];
376 unsigned char pca9554_out;
376 } aureon; 377 } aureon;
377 /* AC97 register cache for Phase28 */ 378 /* AC97 register cache for Phase28 */
378 struct phase28_spec { 379 struct phase28_spec {
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 44393e190929..9c90d901e6b9 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -831,8 +831,8 @@ struct snd_m3 {
831 struct snd_pcm *pcm; 831 struct snd_pcm *pcm;
832 832
833 struct pci_dev *pci; 833 struct pci_dev *pci;
834 struct m3_quirk *quirk; 834 const struct m3_quirk *quirk;
835 struct m3_hv_quirk *hv_quirk; 835 const struct m3_hv_quirk *hv_quirk;
836 836
837 int dacs_active; 837 int dacs_active;
838 int timer_users; 838 int timer_users;
@@ -892,7 +892,7 @@ static struct pci_device_id snd_m3_ids[] = {
892 892
893MODULE_DEVICE_TABLE(pci, snd_m3_ids); 893MODULE_DEVICE_TABLE(pci, snd_m3_ids);
894 894
895static struct m3_quirk m3_quirk_list[] = { 895static const struct m3_quirk m3_quirk_list[] = {
896 /* panasonic CF-28 "toughbook" */ 896 /* panasonic CF-28 "toughbook" */
897 { 897 {
898 .name = "Panasonic CF-28", 898 .name = "Panasonic CF-28",
@@ -950,7 +950,7 @@ static struct m3_quirk m3_quirk_list[] = {
950}; 950};
951 951
952/* These values came from the Windows driver. */ 952/* These values came from the Windows driver. */
953static struct m3_hv_quirk m3_hv_quirk_list[] = { 953static const struct m3_hv_quirk m3_hv_quirk_list[] = {
954 /* Allegro chips */ 954 /* Allegro chips */
955 { 0x125D, 0x1988, 0x0E11, 0x002E, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD, 0 }, 955 { 0x125D, 0x1988, 0x0E11, 0x002E, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD, 0 },
956 { 0x125D, 0x1988, 0x0E11, 0x0094, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD, 0 }, 956 { 0x125D, 0x1988, 0x0E11, 0x0094, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD, 0 },
@@ -1361,7 +1361,7 @@ static void snd_m3_pcm_setup2(struct snd_m3 *chip, struct m3_dma *s,
1361} 1361}
1362 1362
1363 1363
1364static struct play_vals { 1364static const struct play_vals {
1365 u16 addr, val; 1365 u16 addr, val;
1366} pv[] = { 1366} pv[] = {
1367 {CDATA_LEFT_VOLUME, ARB_VOLUME}, 1367 {CDATA_LEFT_VOLUME, ARB_VOLUME},
@@ -1428,7 +1428,7 @@ snd_m3_playback_setup(struct snd_m3 *chip, struct m3_dma *s,
1428/* 1428/*
1429 * Native record driver 1429 * Native record driver
1430 */ 1430 */
1431static struct rec_vals { 1431static const struct rec_vals {
1432 u16 addr, val; 1432 u16 addr, val;
1433} rv[] = { 1433} rv[] = {
1434 {CDATA_LEFT_VOLUME, ARB_VOLUME}, 1434 {CDATA_LEFT_VOLUME, ARB_VOLUME},
@@ -1598,12 +1598,26 @@ static void snd_m3_update_ptr(struct snd_m3 *chip, struct m3_dma *s)
1598 if (! s->running) 1598 if (! s->running)
1599 return; 1599 return;
1600 1600
1601 hwptr = snd_m3_get_pointer(chip, s, subs) % s->dma_size; 1601 hwptr = snd_m3_get_pointer(chip, s, subs);
1602 diff = (s->dma_size + hwptr - s->hwptr) % s->dma_size; 1602
1603 /* try to avoid expensive modulo divisions */
1604 if (hwptr >= s->dma_size)
1605 hwptr %= s->dma_size;
1606
1607 diff = s->dma_size + hwptr - s->hwptr;
1608 if (diff >= s->dma_size)
1609 diff %= s->dma_size;
1610
1603 s->hwptr = hwptr; 1611 s->hwptr = hwptr;
1604 s->count += diff; 1612 s->count += diff;
1613
1605 if (s->count >= (signed)s->period_size) { 1614 if (s->count >= (signed)s->period_size) {
1606 s->count %= s->period_size; 1615
1616 if (s->count < 2 * (signed)s->period_size)
1617 s->count -= (signed)s->period_size;
1618 else
1619 s->count %= s->period_size;
1620
1607 spin_unlock(&chip->reg_lock); 1621 spin_unlock(&chip->reg_lock);
1608 snd_pcm_period_elapsed(subs); 1622 snd_pcm_period_elapsed(subs);
1609 spin_lock(&chip->reg_lock); 1623 spin_lock(&chip->reg_lock);
@@ -1942,6 +1956,7 @@ static int snd_m3_ac97_wait(struct snd_m3 *chip)
1942 do { 1956 do {
1943 if (! (snd_m3_inb(chip, 0x30) & 1)) 1957 if (! (snd_m3_inb(chip, 0x30) & 1))
1944 return 0; 1958 return 0;
1959 cpu_relax();
1945 } while (i-- > 0); 1960 } while (i-- > 0);
1946 1961
1947 snd_printk(KERN_ERR "ac97 serial bus busy\n"); 1962 snd_printk(KERN_ERR "ac97 serial bus busy\n");
@@ -1953,16 +1968,18 @@ snd_m3_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
1953{ 1968{
1954 struct snd_m3 *chip = ac97->private_data; 1969 struct snd_m3 *chip = ac97->private_data;
1955 unsigned long flags; 1970 unsigned long flags;
1956 unsigned short data; 1971 unsigned short data = 0xffff;
1957 1972
1958 if (snd_m3_ac97_wait(chip)) 1973 if (snd_m3_ac97_wait(chip))
1959 return 0xffff; 1974 goto fail;
1960 spin_lock_irqsave(&chip->ac97_lock, flags); 1975 spin_lock_irqsave(&chip->ac97_lock, flags);
1961 snd_m3_outb(chip, 0x80 | (reg & 0x7f), CODEC_COMMAND); 1976 snd_m3_outb(chip, 0x80 | (reg & 0x7f), CODEC_COMMAND);
1962 if (snd_m3_ac97_wait(chip)) 1977 if (snd_m3_ac97_wait(chip))
1963 return 0xffff; 1978 goto fail_unlock;
1964 data = snd_m3_inw(chip, CODEC_DATA); 1979 data = snd_m3_inw(chip, CODEC_DATA);
1980fail_unlock:
1965 spin_unlock_irqrestore(&chip->ac97_lock, flags); 1981 spin_unlock_irqrestore(&chip->ac97_lock, flags);
1982fail:
1966 return data; 1983 return data;
1967} 1984}
1968 1985
@@ -2121,7 +2138,7 @@ static int __devinit snd_m3_mixer(struct snd_m3 *chip)
2121 * DSP Code images 2138 * DSP Code images
2122 */ 2139 */
2123 2140
2124static u16 assp_kernel_image[] __devinitdata = { 2141static const u16 assp_kernel_image[] __devinitdata = {
2125 0x7980, 0x0030, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x00FB, 0x7980, 0x00DD, 0x7980, 0x03B4, 2142 0x7980, 0x0030, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x00FB, 0x7980, 0x00DD, 0x7980, 0x03B4,
2126 0x7980, 0x0332, 0x7980, 0x0287, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4, 2143 0x7980, 0x0332, 0x7980, 0x0287, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4,
2127 0x7980, 0x031A, 0x7980, 0x03B4, 0x7980, 0x022F, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4, 2144 0x7980, 0x031A, 0x7980, 0x03B4, 0x7980, 0x022F, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4,
@@ -2208,7 +2225,7 @@ static u16 assp_kernel_image[] __devinitdata = {
2208 * Mini sample rate converter code image 2225 * Mini sample rate converter code image
2209 * that is to be loaded at 0x400 on the DSP. 2226 * that is to be loaded at 0x400 on the DSP.
2210 */ 2227 */
2211static u16 assp_minisrc_image[] __devinitdata = { 2228static const u16 assp_minisrc_image[] __devinitdata = {
2212 2229
2213 0xBF80, 0x101E, 0x906E, 0x006E, 0x8B88, 0x6980, 0xEF88, 0x906F, 0x0D6F, 0x6900, 0xEB08, 0x0412, 2230 0xBF80, 0x101E, 0x906E, 0x006E, 0x8B88, 0x6980, 0xEF88, 0x906F, 0x0D6F, 0x6900, 0xEB08, 0x0412,
2214 0xBC20, 0x696E, 0xB801, 0x906E, 0x7980, 0x0403, 0xB90E, 0x8807, 0xBE43, 0xBF01, 0xBE47, 0xBE41, 2231 0xBC20, 0x696E, 0xB801, 0x906E, 0x7980, 0x0403, 0xB90E, 0x8807, 0xBE43, 0xBF01, 0xBE47, 0xBE41,
@@ -2251,7 +2268,7 @@ static u16 assp_minisrc_image[] __devinitdata = {
2251 */ 2268 */
2252 2269
2253#define MINISRC_LPF_LEN 10 2270#define MINISRC_LPF_LEN 10
2254static u16 minisrc_lpf[MINISRC_LPF_LEN] __devinitdata = { 2271static const u16 minisrc_lpf[MINISRC_LPF_LEN] __devinitdata = {
2255 0X0743, 0X1104, 0X0A4C, 0XF88D, 0X242C, 2272 0X0743, 0X1104, 0X0A4C, 0XF88D, 0X242C,
2256 0X1023, 0X1AA9, 0X0B60, 0XEFDD, 0X186F 2273 0X1023, 0X1AA9, 0X0B60, 0XEFDD, 0X186F
2257}; 2274};
@@ -2358,7 +2375,7 @@ static int __devinit snd_m3_assp_client_init(struct snd_m3 *chip, struct m3_dma
2358 */ 2375 */
2359 2376
2360 /* 2377 /*
2361 * align instance address to 256 bytes so that it's 2378 * align instance address to 256 bytes so that its
2362 * shifted list address is aligned. 2379 * shifted list address is aligned.
2363 * list address = (mem address >> 1) >> 7; 2380 * list address = (mem address >> 1) >> 7;
2364 */ 2381 */
@@ -2647,8 +2664,8 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
2647{ 2664{
2648 struct snd_m3 *chip; 2665 struct snd_m3 *chip;
2649 int i, err; 2666 int i, err;
2650 struct m3_quirk *quirk; 2667 const struct m3_quirk *quirk;
2651 struct m3_hv_quirk *hv_quirk; 2668 const struct m3_hv_quirk *hv_quirk;
2652 static struct snd_device_ops ops = { 2669 static struct snd_device_ops ops = {
2653 .dev_free = snd_m3_dev_free, 2670 .dev_free = snd_m3_dev_free,
2654 }; 2671 };
@@ -2843,12 +2860,12 @@ snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2843 } 2860 }
2844 2861
2845#if 0 /* TODO: not supported yet */ 2862#if 0 /* TODO: not supported yet */
2846 /* TODO enable midi irq and i/o */ 2863 /* TODO enable MIDI IRQ and I/O */
2847 err = snd_mpu401_uart_new(chip->card, 0, MPU401_HW_MPU401, 2864 err = snd_mpu401_uart_new(chip->card, 0, MPU401_HW_MPU401,
2848 chip->iobase + MPU401_DATA_PORT, 1, 2865 chip->iobase + MPU401_DATA_PORT, 1,
2849 chip->irq, 0, &chip->rmidi); 2866 chip->irq, 0, &chip->rmidi);
2850 if (err < 0) 2867 if (err < 0)
2851 printk(KERN_WARNING "maestro3: no midi support.\n"); 2868 printk(KERN_WARNING "maestro3: no MIDI support.\n");
2852#endif 2869#endif
2853 2870
2854 pci_set_drvdata(pci, card); 2871 pci_set_drvdata(pci, card);
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index fdc652c6992d..c40f59062684 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -274,12 +274,9 @@ int pcxhr_load_xilinx_binary(struct pcxhr_mgr *mgr, const struct firmware *xilin
274 274
275 /* test first xilinx */ 275 /* test first xilinx */
276 chipsc = PCXHR_INPL(mgr, PCXHR_PLX_CHIPSC); 276 chipsc = PCXHR_INPL(mgr, PCXHR_PLX_CHIPSC);
277 if (!second) { 277 /* REV01 cards do not support the PCXHR_CHIPSC_GPI_USERI bit anymore */
278 if (chipsc & PCXHR_CHIPSC_GPI_USERI) { 278 /* this bit will always be 1; no possibility to test presence of first xilinx */
279 snd_printdd("no need to load first xilinx\n"); 279 if(second) {
280 return 0; /* first xilinx is already present and cannot be reset */
281 }
282 } else {
283 if ((chipsc & PCXHR_CHIPSC_GPI_USERI) == 0) { 280 if ((chipsc & PCXHR_CHIPSC_GPI_USERI) == 0) {
284 snd_printk(KERN_ERR "error loading first xilinx\n"); 281 snd_printk(KERN_ERR "error loading first xilinx\n");
285 return -EINVAL; 282 return -EINVAL;
diff --git a/sound/pci/riptide/Makefile b/sound/pci/riptide/Makefile
new file mode 100644
index 000000000000..dcd2e64e4818
--- /dev/null
+++ b/sound/pci/riptide/Makefile
@@ -0,0 +1,3 @@
1snd-riptide-objs := riptide.o
2
3obj-$(CONFIG_SND_RIPTIDE) += snd-riptide.o
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
new file mode 100644
index 000000000000..f148ee434a6b
--- /dev/null
+++ b/sound/pci/riptide/riptide.c
@@ -0,0 +1,2223 @@
1/*
2 * Driver for the Conexant Riptide Soundchip
3 *
4 * Copyright (c) 2004 Peter Gruber <nokos@gmx.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21/*
22 History:
23 - 02/15/2004 first release
24
25 This Driver is based on the OSS Driver version from Linuxant (riptide-0.6lnxtbeta03111100)
26 credits from the original files:
27
28 MODULE NAME: cnxt_rt.h
29 AUTHOR: K. Lazarev (Transcribed by KNL)
30 HISTORY: Major Revision Date By
31 ----------------------------- -------- -----
32 Created 02/1/2000 KNL
33
34 MODULE NAME: int_mdl.c
35 AUTHOR: Konstantin Lazarev (Transcribed by KNL)
36 HISTORY: Major Revision Date By
37 ----------------------------- -------- -----
38 Created 10/01/99 KNL
39
40 MODULE NAME: riptide.h
41 AUTHOR: O. Druzhinin (Transcribed by OLD)
42 HISTORY: Major Revision Date By
43 ----------------------------- -------- -----
44 Created 10/16/97 OLD
45
46 MODULE NAME: Rp_Cmdif.cpp
47 AUTHOR: O. Druzhinin (Transcribed by OLD)
48 K. Lazarev (Transcribed by KNL)
49 HISTORY: Major Revision Date By
50 ----------------------------- -------- -----
51 Adopted from NT4 driver 6/22/99 OLD
52 Ported to Linux 9/01/99 KNL
53
54 MODULE NAME: rt_hw.c
55 AUTHOR: O. Druzhinin (Transcribed by OLD)
56 C. Lazarev (Transcribed by CNL)
57 HISTORY: Major Revision Date By
58 ----------------------------- -------- -----
59 Created 11/18/97 OLD
60 Hardware functions for RipTide 11/24/97 CNL
61 (ES1) are coded
62 Hardware functions for RipTide 12/24/97 CNL
63 (A0) are coded
64 Hardware functions for RipTide 03/20/98 CNL
65 (A1) are coded
66 Boot loader is included 05/07/98 CNL
67 Redesigned for WDM 07/27/98 CNL
68 Redesigned for Linux 09/01/99 CNL
69
70 MODULE NAME: rt_hw.h
71 AUTHOR: C. Lazarev (Transcribed by CNL)
72 HISTORY: Major Revision Date By
73 ----------------------------- -------- -----
74 Created 11/18/97 CNL
75
76 MODULE NAME: rt_mdl.c
77 AUTHOR: Konstantin Lazarev (Transcribed by KNL)
78 HISTORY: Major Revision Date By
79 ----------------------------- -------- -----
80 Created 10/01/99 KNL
81
82 MODULE NAME: mixer.h
83 AUTHOR: K. Kenney
84 HISTORY: Major Revision Date By
85 ----------------------------- -------- -----
86 Created from MS W95 Sample 11/28/95 KRS
87 RipTide 10/15/97 KRS
88 Adopted for Windows NT driver 01/20/98 CNL
89*/
90
91#include <sound/driver.h>
92#include <linux/delay.h>
93#include <linux/init.h>
94#include <linux/interrupt.h>
95#include <linux/pci.h>
96#include <linux/slab.h>
97#include <linux/wait.h>
98#include <linux/gameport.h>
99#include <linux/device.h>
100#include <linux/firmware.h>
101#include <asm/io.h>
102#include <sound/core.h>
103#include <sound/info.h>
104#include <sound/control.h>
105#include <sound/pcm.h>
106#include <sound/pcm_params.h>
107#include <sound/ac97_codec.h>
108#include <sound/mpu401.h>
109#include <sound/opl3.h>
110#include <sound/initval.h>
111
112#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
113#define SUPPORT_JOYSTICK 1
114#endif
115
116MODULE_AUTHOR("Peter Gruber <nokos@gmx.net>");
117MODULE_DESCRIPTION("riptide");
118MODULE_LICENSE("GPL");
119MODULE_SUPPORTED_DEVICE("{{Conexant,Riptide}}");
120
121static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
122static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
123static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE;
124
125#ifdef SUPPORT_JOYSTICK
126static int joystick_port[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = 0x200 };
127#endif
128static int mpu_port[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = 0x330 };
129static int opl3_port[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = 0x388 };
130
131module_param_array(index, int, NULL, 0444);
132MODULE_PARM_DESC(index, "Index value for Riptide soundcard.");
133module_param_array(id, charp, NULL, 0444);
134MODULE_PARM_DESC(id, "ID string for Riptide soundcard.");
135module_param_array(enable, bool, NULL, 0444);
136MODULE_PARM_DESC(enable, "Enable Riptide soundcard.");
137#ifdef SUPPORT_JOYSTICK
138module_param_array(joystick_port, int, NULL, 0444);
139MODULE_PARM_DESC(joystick_port, "Joystick port # for Riptide soundcard.");
140#endif
141module_param_array(mpu_port, int, NULL, 0444);
142MODULE_PARM_DESC(mpu_port, "MPU401 port # for Riptide driver.");
143module_param_array(opl3_port, int, NULL, 0444);
144MODULE_PARM_DESC(opl3_port, "OPL3 port # for Riptide driver.");
145
146/*
147 */
148
149#define MPU401_HW_RIPTIDE MPU401_HW_MPU401
150#define OPL3_HW_RIPTIDE OPL3_HW_OPL3
151
152#define PCI_EXT_CapId 0x40
153#define PCI_EXT_NextCapPrt 0x41
154#define PCI_EXT_PWMC 0x42
155#define PCI_EXT_PWSCR 0x44
156#define PCI_EXT_Data00 0x46
157#define PCI_EXT_PMSCR_BSE 0x47
158#define PCI_EXT_SB_Base 0x48
159#define PCI_EXT_FM_Base 0x4a
160#define PCI_EXT_MPU_Base 0x4C
161#define PCI_EXT_Game_Base 0x4E
162#define PCI_EXT_Legacy_Mask 0x50
163#define PCI_EXT_AsicRev 0x52
164#define PCI_EXT_Reserved3 0x53
165
166#define LEGACY_ENABLE_ALL 0x8000 /* legacy device options */
167#define LEGACY_ENABLE_SB 0x4000
168#define LEGACY_ENABLE_FM 0x2000
169#define LEGACY_ENABLE_MPU_INT 0x1000
170#define LEGACY_ENABLE_MPU 0x0800
171#define LEGACY_ENABLE_GAMEPORT 0x0400
172
173#define MAX_WRITE_RETRY 10 /* cmd interface limits */
174#define MAX_ERROR_COUNT 10
175#define CMDIF_TIMEOUT 500000
176#define RESET_TRIES 5
177
178#define READ_PORT_ULONG(p) inl((unsigned long)&(p))
179#define WRITE_PORT_ULONG(p,x) outl(x,(unsigned long)&(p))
180
181#define READ_AUDIO_CONTROL(p) READ_PORT_ULONG(p->audio_control)
182#define WRITE_AUDIO_CONTROL(p,x) WRITE_PORT_ULONG(p->audio_control,x)
183#define UMASK_AUDIO_CONTROL(p,x) WRITE_PORT_ULONG(p->audio_control,READ_PORT_ULONG(p->audio_control)|x)
184#define MASK_AUDIO_CONTROL(p,x) WRITE_PORT_ULONG(p->audio_control,READ_PORT_ULONG(p->audio_control)&x)
185#define READ_AUDIO_STATUS(p) READ_PORT_ULONG(p->audio_status)
186
187#define SET_GRESET(p) UMASK_AUDIO_CONTROL(p,0x0001) /* global reset switch */
188#define UNSET_GRESET(p) MASK_AUDIO_CONTROL(p,~0x0001)
189#define SET_AIE(p) UMASK_AUDIO_CONTROL(p,0x0004) /* interrupt enable */
190#define UNSET_AIE(p) MASK_AUDIO_CONTROL(p,~0x0004)
191#define SET_AIACK(p) UMASK_AUDIO_CONTROL(p,0x0008) /* interrupt acknowledge */
192#define UNSET_AIACKT(p) MASKAUDIO_CONTROL(p,~0x0008)
193#define SET_ECMDAE(p) UMASK_AUDIO_CONTROL(p,0x0010)
194#define UNSET_ECMDAE(p) MASK_AUDIO_CONTROL(p,~0x0010)
195#define SET_ECMDBE(p) UMASK_AUDIO_CONTROL(p,0x0020)
196#define UNSET_ECMDBE(p) MASK_AUDIO_CONTROL(p,~0x0020)
197#define SET_EDATAF(p) UMASK_AUDIO_CONTROL(p,0x0040)
198#define UNSET_EDATAF(p) MASK_AUDIO_CONTROL(p,~0x0040)
199#define SET_EDATBF(p) UMASK_AUDIO_CONTROL(p,0x0080)
200#define UNSET_EDATBF(p) MASK_AUDIO_CONTROL(p,~0x0080)
201#define SET_ESBIRQON(p) UMASK_AUDIO_CONTROL(p,0x0100)
202#define UNSET_ESBIRQON(p) MASK_AUDIO_CONTROL(p,~0x0100)
203#define SET_EMPUIRQ(p) UMASK_AUDIO_CONTROL(p,0x0200)
204#define UNSET_EMPUIRQ(p) MASK_AUDIO_CONTROL(p,~0x0200)
205#define IS_CMDE(a) (READ_PORT_ULONG(a->stat)&0x1) /* cmd empty */
206#define IS_DATF(a) (READ_PORT_ULONG(a->stat)&0x2) /* data filled */
207#define IS_READY(p) (READ_AUDIO_STATUS(p)&0x0001)
208#define IS_DLREADY(p) (READ_AUDIO_STATUS(p)&0x0002)
209#define IS_DLERR(p) (READ_AUDIO_STATUS(p)&0x0004)
210#define IS_GERR(p) (READ_AUDIO_STATUS(p)&0x0008) /* error ! */
211#define IS_CMDAEIRQ(p) (READ_AUDIO_STATUS(p)&0x0010)
212#define IS_CMDBEIRQ(p) (READ_AUDIO_STATUS(p)&0x0020)
213#define IS_DATAFIRQ(p) (READ_AUDIO_STATUS(p)&0x0040)
214#define IS_DATBFIRQ(p) (READ_AUDIO_STATUS(p)&0x0080)
215#define IS_EOBIRQ(p) (READ_AUDIO_STATUS(p)&0x0100) /* interrupt status */
216#define IS_EOSIRQ(p) (READ_AUDIO_STATUS(p)&0x0200)
217#define IS_EOCIRQ(p) (READ_AUDIO_STATUS(p)&0x0400)
218#define IS_UNSLIRQ(p) (READ_AUDIO_STATUS(p)&0x0800)
219#define IS_SBIRQ(p) (READ_AUDIO_STATUS(p)&0x1000)
220#define IS_MPUIRQ(p) (READ_AUDIO_STATUS(p)&0x2000)
221
222#define RESP 0x00000001 /* command flags */
223#define PARM 0x00000002
224#define CMDA 0x00000004
225#define CMDB 0x00000008
226#define NILL 0x00000000
227
228#define LONG0(a) ((u32)a) /* shifts and masks */
229#define BYTE0(a) (LONG0(a)&0xff)
230#define BYTE1(a) (BYTE0(a)<<8)
231#define BYTE2(a) (BYTE0(a)<<16)
232#define BYTE3(a) (BYTE0(a)<<24)
233#define WORD0(a) (LONG0(a)&0xffff)
234#define WORD1(a) (WORD0(a)<<8)
235#define WORD2(a) (WORD0(a)<<16)
236#define TRINIB0(a) (LONG0(a)&0xffffff)
237#define TRINIB1(a) (TRINIB0(a)<<8)
238
239#define RET(a) ((union cmdret *)(a))
240
241#define SEND_GETV(p,b) sendcmd(p,RESP,GETV,0,RET(b)) /* get version */
242#define SEND_GETC(p,b,c) sendcmd(p,PARM|RESP,GETC,c,RET(b))
243#define SEND_GUNS(p,b) sendcmd(p,RESP,GUNS,0,RET(b))
244#define SEND_SCID(p,b) sendcmd(p,RESP,SCID,0,RET(b))
245#define SEND_RMEM(p,b,c,d) sendcmd(p,PARM|RESP,RMEM|BYTE1(b),LONG0(c),RET(d)) /* memory access for firmware write */
246#define SEND_SMEM(p,b,c) sendcmd(p,PARM,SMEM|BYTE1(b),LONG0(c),RET(0)) /* memory access for firmware write */
247#define SEND_WMEM(p,b,c) sendcmd(p,PARM,WMEM|BYTE1(b),LONG0(c),RET(0)) /* memory access for firmware write */
248#define SEND_SDTM(p,b,c) sendcmd(p,PARM|RESP,SDTM|TRINIB1(b),0,RET(c)) /* memory access for firmware write */
249#define SEND_GOTO(p,b) sendcmd(p,PARM,GOTO,LONG0(b),RET(0)) /* memory access for firmware write */
250#define SEND_SETDPLL(p) sendcmd(p,0,ARM_SETDPLL,0,RET(0))
251#define SEND_SSTR(p,b,c) sendcmd(p,PARM,SSTR|BYTE3(b),LONG0(c),RET(0)) /* start stream */
252#define SEND_PSTR(p,b) sendcmd(p,PARM,PSTR,BYTE3(b),RET(0)) /* pause stream */
253#define SEND_KSTR(p,b) sendcmd(p,PARM,KSTR,BYTE3(b),RET(0)) /* stop stream */
254#define SEND_KDMA(p) sendcmd(p,0,KDMA,0,RET(0)) /* stop all dma */
255#define SEND_GPOS(p,b,c,d) sendcmd(p,PARM|RESP,GPOS,BYTE3(c)|BYTE2(b),RET(d)) /* get position in dma */
256#define SEND_SETF(p,b,c,d,e,f,g) sendcmd(p,PARM,SETF|WORD1(b)|BYTE3(c),d|BYTE1(e)|BYTE2(f)|BYTE3(g),RET(0)) /* set sample format at mixer */
257#define SEND_GSTS(p,b,c,d) sendcmd(p,PARM|RESP,GSTS,BYTE3(c)|BYTE2(b),RET(d))
258#define SEND_NGPOS(p,b,c,d) sendcmd(p,PARM|RESP,NGPOS,BYTE3(c)|BYTE2(b),RET(d))
259#define SEND_PSEL(p,b,c) sendcmd(p,PARM,PSEL,BYTE2(b)|BYTE3(c),RET(0)) /* activate lbus path */
260#define SEND_PCLR(p,b,c) sendcmd(p,PARM,PCLR,BYTE2(b)|BYTE3(c),RET(0)) /* deactivate lbus path */
261#define SEND_PLST(p,b) sendcmd(p,PARM,PLST,BYTE3(b),RET(0))
262#define SEND_RSSV(p,b,c,d) sendcmd(p,PARM|RESP,RSSV,BYTE2(b)|BYTE3(c),RET(d))
263#define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0)) /* select paths for internal connections */
264#define SEND_SSRC(p,b,c,d,e) sendcmd(p,PARM,SSRC|BYTE1(b)|WORD2(c),WORD0(d)|WORD2(e),RET(0)) /* configure source */
265#define SEND_SLST(p,b) sendcmd(p,PARM,SLST,BYTE3(b),RET(0))
266#define SEND_RSRC(p,b,c) sendcmd(p,RESP,RSRC|BYTE1(b),0,RET(c)) /* read source config */
267#define SEND_SSRB(p,b,c) sendcmd(p,PARM,SSRB|BYTE1(b),WORD2(c),RET(0))
268#define SEND_SDGV(p,b,c,d,e) sendcmd(p,PARM,SDGV|BYTE2(b)|BYTE3(c),WORD0(d)|WORD2(e),RET(0)) /* set digital mixer */
269#define SEND_RDGV(p,b,c,d) sendcmd(p,PARM|RESP,RDGV|BYTE2(b)|BYTE3(c),0,RET(d)) /* read digital mixer */
270#define SEND_DLST(p,b) sendcmd(p,PARM,DLST,BYTE3(b),RET(0))
271#define SEND_SACR(p,b,c) sendcmd(p,PARM,SACR,WORD0(b)|WORD2(c),RET(0)) /* set AC97 register */
272#define SEND_RACR(p,b,c) sendcmd(p,PARM|RESP,RACR,WORD2(b),RET(c)) /* get AC97 register */
273#define SEND_ALST(p,b) sendcmd(p,PARM,ALST,BYTE3(b),RET(0))
274#define SEND_TXAC(p,b,c,d,e,f) sendcmd(p,PARM,TXAC|BYTE1(b)|WORD2(c),WORD0(d)|BYTE2(e)|BYTE3(f),RET(0))
275#define SEND_RXAC(p,b,c,d) sendcmd(p,PARM|RESP,RXAC,BYTE2(b)|BYTE3(c),RET(d))
276#define SEND_SI2S(p,b) sendcmd(p,PARM,SI2S,WORD2(b),RET(0))
277
278#define EOB_STATUS 0x80000000 /* status flags : block boundary */
279#define EOS_STATUS 0x40000000 /* : stoppped */
280#define EOC_STATUS 0x20000000 /* : stream end */
281#define ERR_STATUS 0x10000000
282#define EMPTY_STATUS 0x08000000
283
284#define IEOB_ENABLE 0x1 /* enable interrupts for status notification above */
285#define IEOS_ENABLE 0x2
286#define IEOC_ENABLE 0x4
287#define RDONCE 0x8
288#define DESC_MAX_MASK 0xff
289
290#define ST_PLAY 0x1 /* stream states */
291#define ST_STOP 0x2
292#define ST_PAUSE 0x4
293
294#define I2S_INTDEC 3 /* config for I2S link */
295#define I2S_MERGER 0
296#define I2S_SPLITTER 0
297#define I2S_MIXER 7
298#define I2S_RATE 44100
299
300#define MODEM_INTDEC 4 /* config for modem link */
301#define MODEM_MERGER 3
302#define MODEM_SPLITTER 0
303#define MODEM_MIXER 11
304
305#define FM_INTDEC 3 /* config for FM/OPL3 link */
306#define FM_MERGER 0
307#define FM_SPLITTER 0
308#define FM_MIXER 9
309
310#define SPLIT_PATH 0x80 /* path splitting flag */
311
312enum FIRMWARE {
313 DATA_REC = 0, EXT_END_OF_FILE, EXT_SEG_ADDR_REC, EXT_GOTO_CMD_REC,
314 EXT_LIN_ADDR_REC,
315};
316
317enum CMDS {
318 GETV = 0x00, GETC, GUNS, SCID, RMEM =
319 0x10, SMEM, WMEM, SDTM, GOTO, SSTR =
320 0x20, PSTR, KSTR, KDMA, GPOS, SETF, GSTS, NGPOS, PSEL =
321 0x30, PCLR, PLST, RSSV, LSEL, SSRC = 0x40, SLST, RSRC, SSRB, SDGV =
322 0x50, RDGV, DLST, SACR = 0x60, RACR, ALST, TXAC, RXAC, SI2S =
323 0x70, ARM_SETDPLL = 0x72,
324};
325
326enum E1SOURCE {
327 ARM2LBUS_FIFO0 = 0, ARM2LBUS_FIFO1, ARM2LBUS_FIFO2, ARM2LBUS_FIFO3,
328 ARM2LBUS_FIFO4, ARM2LBUS_FIFO5, ARM2LBUS_FIFO6, ARM2LBUS_FIFO7,
329 ARM2LBUS_FIFO8, ARM2LBUS_FIFO9, ARM2LBUS_FIFO10, ARM2LBUS_FIFO11,
330 ARM2LBUS_FIFO12, ARM2LBUS_FIFO13, ARM2LBUS_FIFO14, ARM2LBUS_FIFO15,
331 INTER0_OUT, INTER1_OUT, INTER2_OUT, INTER3_OUT, INTER4_OUT,
332 INTERM0_OUT, INTERM1_OUT, INTERM2_OUT, INTERM3_OUT, INTERM4_OUT,
333 INTERM5_OUT, INTERM6_OUT, DECIMM0_OUT, DECIMM1_OUT, DECIMM2_OUT,
334 DECIMM3_OUT, DECIM0_OUT, SR3_4_OUT, OPL3_SAMPLE, ASRC0, ASRC1,
335 ACLNK2PADC, ACLNK2MODEM0RX, ACLNK2MIC, ACLNK2MODEM1RX, ACLNK2HNDMIC,
336 DIGITAL_MIXER_OUT0, GAINFUNC0_OUT, GAINFUNC1_OUT, GAINFUNC2_OUT,
337 GAINFUNC3_OUT, GAINFUNC4_OUT, SOFTMODEMTX, SPLITTER0_OUTL,
338 SPLITTER0_OUTR, SPLITTER1_OUTL, SPLITTER1_OUTR, SPLITTER2_OUTL,
339 SPLITTER2_OUTR, SPLITTER3_OUTL, SPLITTER3_OUTR, MERGER0_OUT,
340 MERGER1_OUT, MERGER2_OUT, MERGER3_OUT, ARM2LBUS_FIFO_DIRECT, NO_OUT
341};
342
343enum E2SINK {
344 LBUS2ARM_FIFO0 = 0, LBUS2ARM_FIFO1, LBUS2ARM_FIFO2, LBUS2ARM_FIFO3,
345 LBUS2ARM_FIFO4, LBUS2ARM_FIFO5, LBUS2ARM_FIFO6, LBUS2ARM_FIFO7,
346 INTER0_IN, INTER1_IN, INTER2_IN, INTER3_IN, INTER4_IN, INTERM0_IN,
347 INTERM1_IN, INTERM2_IN, INTERM3_IN, INTERM4_IN, INTERM5_IN, INTERM6_IN,
348 DECIMM0_IN, DECIMM1_IN, DECIMM2_IN, DECIMM3_IN, DECIM0_IN, SR3_4_IN,
349 PDAC2ACLNK, MODEM0TX2ACLNK, MODEM1TX2ACLNK, HNDSPK2ACLNK,
350 DIGITAL_MIXER_IN0, DIGITAL_MIXER_IN1, DIGITAL_MIXER_IN2,
351 DIGITAL_MIXER_IN3, DIGITAL_MIXER_IN4, DIGITAL_MIXER_IN5,
352 DIGITAL_MIXER_IN6, DIGITAL_MIXER_IN7, DIGITAL_MIXER_IN8,
353 DIGITAL_MIXER_IN9, DIGITAL_MIXER_IN10, DIGITAL_MIXER_IN11,
354 GAINFUNC0_IN, GAINFUNC1_IN, GAINFUNC2_IN, GAINFUNC3_IN, GAINFUNC4_IN,
355 SOFTMODEMRX, SPLITTER0_IN, SPLITTER1_IN, SPLITTER2_IN, SPLITTER3_IN,
356 MERGER0_INL, MERGER0_INR, MERGER1_INL, MERGER1_INR, MERGER2_INL,
357 MERGER2_INR, MERGER3_INL, MERGER3_INR, E2SINK_MAX
358};
359
360enum LBUS_SINK {
361 LS_SRC_INTERPOLATOR = 0, LS_SRC_INTERPOLATORM, LS_SRC_DECIMATOR,
362 LS_SRC_DECIMATORM, LS_MIXER_IN, LS_MIXER_GAIN_FUNCTION,
363 LS_SRC_SPLITTER, LS_SRC_MERGER, LS_NONE1, LS_NONE2,
364};
365
366enum RT_CHANNEL_IDS {
367 M0TX = 0, M1TX, TAMTX, HSSPKR, PDAC, DSNDTX0, DSNDTX1, DSNDTX2,
368 DSNDTX3, DSNDTX4, DSNDTX5, DSNDTX6, DSNDTX7, WVSTRTX, COP3DTX, SPARE,
369 M0RX, HSMIC, M1RX, CLEANRX, MICADC, PADC, COPRX1, COPRX2,
370 CHANNEL_ID_COUNTER
371};
372
373enum { SB_CMD = 0, MODEM_CMD, I2S_CMD0, I2S_CMD1, FM_CMD, MAX_CMD };
374
375struct lbuspath {
376 unsigned char *noconv;
377 unsigned char *stereo;
378 unsigned char *mono;
379};
380
381struct cmdport {
382 u32 data1; /* cmd,param */
383 u32 data2; /* param */
384 u32 stat; /* status */
385 u32 pad[5];
386};
387
388struct riptideport {
389 u32 audio_control; /* status registers */
390 u32 audio_status;
391 u32 pad[2];
392 struct cmdport port[2]; /* command ports */
393};
394
395struct cmdif {
396 struct riptideport *hwport;
397 spinlock_t lock;
398 unsigned int cmdcnt; /* cmd statistics */
399 unsigned int cmdtime;
400 unsigned int cmdtimemax;
401 unsigned int cmdtimemin;
402 unsigned int errcnt;
403 int is_reset;
404};
405
406struct riptide_firmware {
407 u16 ASIC;
408 u16 CODEC;
409 u16 AUXDSP;
410 u16 PROG;
411};
412
413union cmdret {
414 u8 retbytes[8];
415 u16 retwords[4];
416 u32 retlongs[2];
417};
418
419union firmware_version {
420 union cmdret ret;
421 struct riptide_firmware firmware;
422};
423
424#define get_pcmhwdev(substream) (struct pcmhw *)(substream->runtime->private_data)
425
426#define PLAYBACK_SUBSTREAMS 3
427struct snd_riptide {
428 struct snd_card *card;
429 struct pci_dev *pci;
430 const struct firmware *fw_entry;
431
432 struct cmdif *cif;
433
434 struct snd_pcm *pcm;
435 struct snd_pcm *pcm_i2s;
436 struct snd_rawmidi *rmidi;
437 struct snd_opl3 *opl3;
438 struct snd_ac97 *ac97;
439 struct snd_ac97_bus *ac97_bus;
440
441 struct snd_pcm_substream *playback_substream[PLAYBACK_SUBSTREAMS];
442 struct snd_pcm_substream *capture_substream;
443
444 int openstreams;
445
446 int irq;
447 unsigned long port;
448 unsigned short mpuaddr;
449 unsigned short opladdr;
450#ifdef SUPPORT_JOYSTICK
451 unsigned short gameaddr;
452#endif
453 struct resource *res_port;
454
455 unsigned short device_id;
456
457 union firmware_version firmware;
458
459 spinlock_t lock;
460 struct tasklet_struct riptide_tq;
461 struct snd_info_entry *proc_entry;
462
463 unsigned long received_irqs;
464 unsigned long handled_irqs;
465#ifdef CONFIG_PM
466 int in_suspend;
467#endif
468};
469
470struct sgd { /* scatter gather desriptor */
471 u32 dwNextLink;
472 u32 dwSegPtrPhys;
473 u32 dwSegLen;
474 u32 dwStat_Ctl;
475};
476
477struct pcmhw { /* pcm descriptor */
478 struct lbuspath paths;
479 unsigned char *lbuspath;
480 unsigned char source;
481 unsigned char intdec[2];
482 unsigned char mixer;
483 unsigned char id;
484 unsigned char state;
485 unsigned int rate;
486 unsigned int channels;
487 snd_pcm_format_t format;
488 struct snd_dma_buffer sgdlist;
489 struct sgd *sgdbuf;
490 unsigned int size;
491 unsigned int pages;
492 unsigned int oldpos;
493 unsigned int pointer;
494};
495
496#define CMDRET_ZERO (union cmdret){{(u32)0, (u32) 0}}
497
498static int sendcmd(struct cmdif *cif, u32 flags, u32 cmd, u32 parm,
499 union cmdret *ret);
500static int getsourcesink(struct cmdif *cif, unsigned char source,
501 unsigned char sink, unsigned char *a,
502 unsigned char *b);
503static int snd_riptide_initialize(struct snd_riptide *chip);
504static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip);
505
506/*
507 */
508
509static struct pci_device_id snd_riptide_ids[] = {
510 {
511 .vendor = 0x127a,.device = 0x4310,
512 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
513 },
514 {
515 .vendor = 0x127a,.device = 0x4320,
516 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
517 },
518 {
519 .vendor = 0x127a,.device = 0x4330,
520 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
521 },
522 {
523 .vendor = 0x127a,.device = 0x4340,
524 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
525 },
526 {0,},
527};
528
529#ifdef SUPPORT_JOYSTICK
530static struct pci_device_id snd_riptide_joystick_ids[] = {
531 {
532 .vendor = 0x127a,.device = 0x4312,
533 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
534 },
535 {
536 .vendor = 0x127a,.device = 0x4322,
537 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
538 },
539 {.vendor = 0x127a,.device = 0x4332,
540 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
541 },
542 {.vendor = 0x127a,.device = 0x4342,
543 .subvendor = PCI_ANY_ID,.subdevice = PCI_ANY_ID,
544 },
545 {0,},
546};
547#endif
548
549MODULE_DEVICE_TABLE(pci, snd_riptide_ids);
550
551/*
552 */
553
554static unsigned char lbusin2out[E2SINK_MAX + 1][2] = {
555 {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1}, {NO_OUT,
556 LS_NONE2},
557 {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1}, {NO_OUT,
558 LS_NONE2},
559 {INTER0_OUT, LS_SRC_INTERPOLATOR}, {INTER1_OUT, LS_SRC_INTERPOLATOR},
560 {INTER2_OUT, LS_SRC_INTERPOLATOR}, {INTER3_OUT, LS_SRC_INTERPOLATOR},
561 {INTER4_OUT, LS_SRC_INTERPOLATOR}, {INTERM0_OUT, LS_SRC_INTERPOLATORM},
562 {INTERM1_OUT, LS_SRC_INTERPOLATORM}, {INTERM2_OUT,
563 LS_SRC_INTERPOLATORM},
564 {INTERM3_OUT, LS_SRC_INTERPOLATORM}, {INTERM4_OUT,
565 LS_SRC_INTERPOLATORM},
566 {INTERM5_OUT, LS_SRC_INTERPOLATORM}, {INTERM6_OUT,
567 LS_SRC_INTERPOLATORM},
568 {DECIMM0_OUT, LS_SRC_DECIMATORM}, {DECIMM1_OUT, LS_SRC_DECIMATORM},
569 {DECIMM2_OUT, LS_SRC_DECIMATORM}, {DECIMM3_OUT, LS_SRC_DECIMATORM},
570 {DECIM0_OUT, LS_SRC_DECIMATOR}, {SR3_4_OUT, LS_NONE1}, {NO_OUT,
571 LS_NONE2},
572 {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1},
573 {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN},
574 {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN},
575 {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN},
576 {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN},
577 {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN},
578 {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN},
579 {GAINFUNC0_OUT, LS_MIXER_GAIN_FUNCTION}, {GAINFUNC1_OUT,
580 LS_MIXER_GAIN_FUNCTION},
581 {GAINFUNC2_OUT, LS_MIXER_GAIN_FUNCTION}, {GAINFUNC3_OUT,
582 LS_MIXER_GAIN_FUNCTION},
583 {GAINFUNC4_OUT, LS_MIXER_GAIN_FUNCTION}, {SOFTMODEMTX, LS_NONE1},
584 {SPLITTER0_OUTL, LS_SRC_SPLITTER}, {SPLITTER1_OUTL, LS_SRC_SPLITTER},
585 {SPLITTER2_OUTL, LS_SRC_SPLITTER}, {SPLITTER3_OUTL, LS_SRC_SPLITTER},
586 {MERGER0_OUT, LS_SRC_MERGER}, {MERGER0_OUT, LS_SRC_MERGER},
587 {MERGER1_OUT, LS_SRC_MERGER},
588 {MERGER1_OUT, LS_SRC_MERGER}, {MERGER2_OUT, LS_SRC_MERGER},
589 {MERGER2_OUT, LS_SRC_MERGER},
590 {MERGER3_OUT, LS_SRC_MERGER}, {MERGER3_OUT, LS_SRC_MERGER}, {NO_OUT,
591 LS_NONE2},
592};
593
594static unsigned char lbus_play_opl3[] = {
595 DIGITAL_MIXER_IN0 + FM_MIXER, 0xff
596};
597static unsigned char lbus_play_modem[] = {
598 DIGITAL_MIXER_IN0 + MODEM_MIXER, 0xff
599};
600static unsigned char lbus_play_i2s[] = {
601 INTER0_IN + I2S_INTDEC, DIGITAL_MIXER_IN0 + I2S_MIXER, 0xff
602};
603static unsigned char lbus_play_out[] = {
604 PDAC2ACLNK, 0xff
605};
606static unsigned char lbus_play_outhp[] = {
607 HNDSPK2ACLNK, 0xff
608};
609static unsigned char lbus_play_noconv1[] = {
610 DIGITAL_MIXER_IN0, 0xff
611};
612static unsigned char lbus_play_stereo1[] = {
613 INTER0_IN, DIGITAL_MIXER_IN0, 0xff
614};
615static unsigned char lbus_play_mono1[] = {
616 INTERM0_IN, DIGITAL_MIXER_IN0, 0xff
617};
618static unsigned char lbus_play_noconv2[] = {
619 DIGITAL_MIXER_IN1, 0xff
620};
621static unsigned char lbus_play_stereo2[] = {
622 INTER1_IN, DIGITAL_MIXER_IN1, 0xff
623};
624static unsigned char lbus_play_mono2[] = {
625 INTERM1_IN, DIGITAL_MIXER_IN1, 0xff
626};
627static unsigned char lbus_play_noconv3[] = {
628 DIGITAL_MIXER_IN2, 0xff
629};
630static unsigned char lbus_play_stereo3[] = {
631 INTER2_IN, DIGITAL_MIXER_IN2, 0xff
632};
633static unsigned char lbus_play_mono3[] = {
634 INTERM2_IN, DIGITAL_MIXER_IN2, 0xff
635};
636static unsigned char lbus_rec_noconv1[] = {
637 LBUS2ARM_FIFO5, 0xff
638};
639static unsigned char lbus_rec_stereo1[] = {
640 DECIM0_IN, LBUS2ARM_FIFO5, 0xff
641};
642static unsigned char lbus_rec_mono1[] = {
643 DECIMM3_IN, LBUS2ARM_FIFO5, 0xff
644};
645
646static unsigned char play_ids[] = { 4, 1, 2, };
647static unsigned char play_sources[] = {
648 ARM2LBUS_FIFO4, ARM2LBUS_FIFO1, ARM2LBUS_FIFO2,
649};
650static struct lbuspath lbus_play_paths[] = {
651 {
652 .noconv = lbus_play_noconv1,
653 .stereo = lbus_play_stereo1,
654 .mono = lbus_play_mono1,
655 },
656 {
657 .noconv = lbus_play_noconv2,
658 .stereo = lbus_play_stereo2,
659 .mono = lbus_play_mono2,
660 },
661 {
662 .noconv = lbus_play_noconv3,
663 .stereo = lbus_play_stereo3,
664 .mono = lbus_play_mono3,
665 },
666};
667static struct lbuspath lbus_rec_path = {
668 .noconv = lbus_rec_noconv1,
669 .stereo = lbus_rec_stereo1,
670 .mono = lbus_rec_mono1,
671};
672
673#define FIRMWARE_VERSIONS 1
674static union firmware_version firmware_versions[] = {
675 {
676 .firmware.ASIC = 3,.firmware.CODEC = 2,
677 .firmware.AUXDSP = 3,.firmware.PROG = 773,
678 },
679};
680
681static u32 atoh(unsigned char *in, unsigned int len)
682{
683 u32 sum = 0;
684 unsigned int mult = 1;
685 unsigned char c;
686
687 while (len) {
688 c = in[len - 1];
689 if ((c >= '0') && (c <= '9'))
690 sum += mult * (c - '0');
691 else if ((c >= 'A') && (c <= 'F'))
692 sum += mult * (c - ('A' - 10));
693 else if ((c >= 'a') && (c <= 'f'))
694 sum += mult * (c - ('a' - 10));
695 mult *= 16;
696 --len;
697 }
698 return sum;
699}
700
701static int senddata(struct cmdif *cif, unsigned char *in, u32 offset)
702{
703 u32 addr;
704 u32 data;
705 u32 i;
706 unsigned char *p;
707
708 i = atoh(&in[1], 2);
709 addr = offset + atoh(&in[3], 4);
710 if (SEND_SMEM(cif, 0, addr) != 0)
711 return -EACCES;
712 p = in + 9;
713 while (i) {
714 data = atoh(p, 8);
715 if (SEND_WMEM(cif, 2,
716 ((data & 0x0f0f0f0f) << 4) | ((data & 0xf0f0f0f0)
717 >> 4)))
718 return -EACCES;
719 i -= 4;
720 p += 8;
721 }
722 return 0;
723}
724
725static int loadfirmware(struct cmdif *cif, unsigned char *img,
726 unsigned int size)
727{
728 unsigned char *in;
729 u32 laddr, saddr, t, val;
730 int err = 0;
731
732 laddr = saddr = 0;
733 while (size > 0 && err == 0) {
734 in = img;
735 if (in[0] == ':') {
736 t = atoh(&in[7], 2);
737 switch (t) {
738 case DATA_REC:
739 err = senddata(cif, in, laddr + saddr);
740 break;
741 case EXT_SEG_ADDR_REC:
742 saddr = atoh(&in[9], 4) << 4;
743 break;
744 case EXT_LIN_ADDR_REC:
745 laddr = atoh(&in[9], 4) << 16;
746 break;
747 case EXT_GOTO_CMD_REC:
748 val = atoh(&in[9], 8);
749 if (SEND_GOTO(cif, val) != 0)
750 err = -EACCES;
751 break;
752 case EXT_END_OF_FILE:
753 size = 0;
754 break;
755 default:
756 break;
757 }
758 while (size > 0) {
759 size--;
760 if (*img++ == '\n')
761 break;
762 }
763 }
764 }
765 snd_printdd("load firmware return %d\n", err);
766 return err;
767}
768
769static void
770alloclbuspath(struct cmdif *cif, unsigned char source,
771 unsigned char *path, unsigned char *mixer, unsigned char *s)
772{
773 while (*path != 0xff) {
774 unsigned char sink, type;
775
776 sink = *path & (~SPLIT_PATH);
777 if (sink != E2SINK_MAX) {
778 snd_printdd("alloc path 0x%x->0x%x\n", source, sink);
779 SEND_PSEL(cif, source, sink);
780 source = lbusin2out[sink][0];
781 type = lbusin2out[sink][1];
782 if (type == LS_MIXER_IN) {
783 if (mixer)
784 *mixer = sink - DIGITAL_MIXER_IN0;
785 }
786 if (type == LS_SRC_DECIMATORM ||
787 type == LS_SRC_DECIMATOR ||
788 type == LS_SRC_INTERPOLATORM ||
789 type == LS_SRC_INTERPOLATOR) {
790 if (s) {
791 if (s[0] != 0xff)
792 s[1] = sink;
793 else
794 s[0] = sink;
795 }
796 }
797 }
798 if (*path++ & SPLIT_PATH) {
799 unsigned char *npath = path;
800
801 while (*npath != 0xff)
802 npath++;
803 alloclbuspath(cif, source + 1, ++npath, mixer, s);
804 }
805 }
806}
807
808static void
809freelbuspath(struct cmdif *cif, unsigned char source, unsigned char *path)
810{
811 while (*path != 0xff) {
812 unsigned char sink;
813
814 sink = *path & (~SPLIT_PATH);
815 if (sink != E2SINK_MAX) {
816 snd_printdd("free path 0x%x->0x%x\n", source, sink);
817 SEND_PCLR(cif, source, sink);
818 source = lbusin2out[sink][0];
819 }
820 if (*path++ & SPLIT_PATH) {
821 unsigned char *npath = path;
822
823 while (*npath != 0xff)
824 npath++;
825 freelbuspath(cif, source + 1, ++npath);
826 }
827 }
828}
829
830static int writearm(struct cmdif *cif, u32 addr, u32 data, u32 mask)
831{
832 union cmdret rptr = CMDRET_ZERO;
833 unsigned int i = MAX_WRITE_RETRY;
834 int flag = 1;
835
836 SEND_RMEM(cif, 0x02, addr, &rptr);
837 rptr.retlongs[0] &= (~mask);
838
839 while (--i) {
840 SEND_SMEM(cif, 0x01, addr);
841 SEND_WMEM(cif, 0x02, (rptr.retlongs[0] | data));
842 SEND_RMEM(cif, 0x02, addr, &rptr);
843 if ((rptr.retlongs[0] & data) == data) {
844 flag = 0;
845 break;
846 } else
847 rptr.retlongs[0] &= ~mask;
848 }
849 snd_printdd("send arm 0x%x 0x%x 0x%x return %d\n", addr, data, mask,
850 flag);
851 return flag;
852}
853
854static int sendcmd(struct cmdif *cif, u32 flags, u32 cmd, u32 parm,
855 union cmdret *ret)
856{
857 int i, j;
858 int err;
859 unsigned int time = 0;
860 unsigned long irqflags;
861 struct riptideport *hwport;
862 struct cmdport *cmdport = NULL;
863
864 snd_assert(cif, return -EINVAL);
865
866 hwport = cif->hwport;
867 if (cif->errcnt > MAX_ERROR_COUNT) {
868 if (cif->is_reset) {
869 snd_printk(KERN_ERR
870 "Riptide: Too many failed cmds, reinitializing\n");
871 if (riptide_reset(cif, NULL) == 0) {
872 cif->errcnt = 0;
873 return -EIO;
874 }
875 }
876 snd_printk(KERN_ERR "Riptide: Initialization failed.\n");
877 return -EINVAL;
878 }
879 if (ret) {
880 ret->retlongs[0] = 0;
881 ret->retlongs[1] = 0;
882 }
883 i = 0;
884 spin_lock_irqsave(&cif->lock, irqflags);
885 while (i++ < CMDIF_TIMEOUT && !IS_READY(cif->hwport))
886 udelay(10);
887 if (i >= CMDIF_TIMEOUT) {
888 err = -EBUSY;
889 goto errout;
890 }
891
892 err = 0;
893 for (j = 0, time = 0; time < CMDIF_TIMEOUT; j++, time += 2) {
894 cmdport = &(hwport->port[j % 2]);
895 if (IS_DATF(cmdport)) { /* free pending data */
896 READ_PORT_ULONG(cmdport->data1);
897 READ_PORT_ULONG(cmdport->data2);
898 }
899 if (IS_CMDE(cmdport)) {
900 if (flags & PARM) /* put data */
901 WRITE_PORT_ULONG(cmdport->data2, parm);
902 WRITE_PORT_ULONG(cmdport->data1, cmd); /* write cmd */
903 if ((flags & RESP) && ret) {
904 while (!IS_DATF(cmdport) &&
905 time++ < CMDIF_TIMEOUT)
906 udelay(10);
907 if (time < CMDIF_TIMEOUT) { /* read response */
908 ret->retlongs[0] =
909 READ_PORT_ULONG(cmdport->data1);
910 ret->retlongs[1] =
911 READ_PORT_ULONG(cmdport->data2);
912 } else {
913 err = -ENOSYS;
914 goto errout;
915 }
916 }
917 break;
918 }
919 udelay(20);
920 }
921 if (time == CMDIF_TIMEOUT) {
922 err = -ENODATA;
923 goto errout;
924 }
925 spin_unlock_irqrestore(&cif->lock, irqflags);
926
927 cif->cmdcnt++; /* update command statistics */
928 cif->cmdtime += time;
929 if (time > cif->cmdtimemax)
930 cif->cmdtimemax = time;
931 if (time < cif->cmdtimemin)
932 cif->cmdtimemin = time;
933 if ((cif->cmdcnt) % 1000 == 0)
934 snd_printdd
935 ("send cmd %d time: %d mintime: %d maxtime %d err: %d\n",
936 cif->cmdcnt, cif->cmdtime, cif->cmdtimemin,
937 cif->cmdtimemax, cif->errcnt);
938 return 0;
939
940 errout:
941 cif->errcnt++;
942 spin_unlock_irqrestore(&cif->lock, irqflags);
943 snd_printdd
944 ("send cmd %d hw: 0x%x flag: 0x%x cmd: 0x%x parm: 0x%x ret: 0x%x 0x%x CMDE: %d DATF: %d failed %d\n",
945 cif->cmdcnt, (int)((void *)&(cmdport->stat) - (void *)hwport),
946 flags, cmd, parm, ret ? ret->retlongs[0] : 0,
947 ret ? ret->retlongs[1] : 0, IS_CMDE(cmdport), IS_DATF(cmdport),
948 err);
949 return err;
950}
951
952static int
953setmixer(struct cmdif *cif, short num, unsigned short rval, unsigned short lval)
954{
955 union cmdret rptr = CMDRET_ZERO;
956 int i = 0;
957
958 snd_printdd("sent mixer %d: 0x%d 0x%d\n", num, rval, lval);
959 do {
960 SEND_SDGV(cif, num, num, rval, lval);
961 SEND_RDGV(cif, num, num, &rptr);
962 if (rptr.retwords[0] == lval && rptr.retwords[1] == rval)
963 return 0;
964 } while (i++ < MAX_WRITE_RETRY);
965 snd_printdd("sent mixer failed\n");
966 return -EIO;
967}
968
969static int getpaths(struct cmdif *cif, unsigned char *o)
970{
971 unsigned char src[E2SINK_MAX];
972 unsigned char sink[E2SINK_MAX];
973 int i, j = 0;
974
975 for (i = 0; i < E2SINK_MAX; i++) {
976 getsourcesink(cif, i, i, &src[i], &sink[i]);
977 if (sink[i] < E2SINK_MAX) {
978 o[j++] = sink[i];
979 o[j++] = i;
980 }
981 }
982 return j;
983}
984
985static int
986getsourcesink(struct cmdif *cif, unsigned char source, unsigned char sink,
987 unsigned char *a, unsigned char *b)
988{
989 union cmdret rptr = CMDRET_ZERO;
990
991 if (SEND_RSSV(cif, source, sink, &rptr) &&
992 SEND_RSSV(cif, source, sink, &rptr))
993 return -EIO;
994 *a = rptr.retbytes[0];
995 *b = rptr.retbytes[1];
996 snd_printdd("getsourcesink 0x%x 0x%x\n", *a, *b);
997 return 0;
998}
999
1000static int
1001getsamplerate(struct cmdif *cif, unsigned char *intdec, unsigned int *rate)
1002{
1003 unsigned char *s;
1004 unsigned int p[2] = { 0, 0 };
1005 int i;
1006 union cmdret rptr = CMDRET_ZERO;
1007
1008 s = intdec;
1009 for (i = 0; i < 2; i++) {
1010 if (*s != 0xff) {
1011 if (SEND_RSRC(cif, *s, &rptr) &&
1012 SEND_RSRC(cif, *s, &rptr))
1013 return -EIO;
1014 p[i] += rptr.retwords[1];
1015 p[i] *= rptr.retwords[2];
1016 p[i] += rptr.retwords[3];
1017 p[i] /= 65536;
1018 }
1019 s++;
1020 }
1021 if (p[0]) {
1022 if (p[1] != p[0])
1023 snd_printdd("rates differ %d %d\n", p[0], p[1]);
1024 *rate = (unsigned int)p[0];
1025 } else
1026 *rate = (unsigned int)p[1];
1027 snd_printdd("getsampleformat %d %d %d\n", intdec[0], intdec[1], *rate);
1028 return 0;
1029}
1030
1031static int
1032setsampleformat(struct cmdif *cif,
1033 unsigned char mixer, unsigned char id,
1034 unsigned char channels, unsigned char format)
1035{
1036 unsigned char w, ch, sig, order;
1037
1038 snd_printdd
1039 ("setsampleformat mixer: %d id: %d channels: %d format: %d\n",
1040 mixer, id, channels, format);
1041 ch = channels == 1;
1042 w = snd_pcm_format_width(format) == 8;
1043 sig = snd_pcm_format_unsigned(format) != 0;
1044 order = snd_pcm_format_big_endian(format) != 0;
1045
1046 if (SEND_SETF(cif, mixer, w, ch, order, sig, id) &&
1047 SEND_SETF(cif, mixer, w, ch, order, sig, id)) {
1048 snd_printdd("setsampleformat failed\n");
1049 return -EIO;
1050 }
1051 return 0;
1052}
1053
1054static int
1055setsamplerate(struct cmdif *cif, unsigned char *intdec, unsigned int rate)
1056{
1057 u32 D, M, N;
1058 union cmdret rptr = CMDRET_ZERO;
1059 int i;
1060
1061 snd_printdd("setsamplerate intdec: %d,%d rate: %d\n", intdec[0],
1062 intdec[1], rate);
1063 D = 48000;
1064 M = ((rate == 48000) ? 47999 : rate) * 65536;
1065 N = M % D;
1066 M /= D;
1067 for (i = 0; i < 2; i++) {
1068 if (*intdec != 0xff) {
1069 do {
1070 SEND_SSRC(cif, *intdec, D, M, N);
1071 SEND_RSRC(cif, *intdec, &rptr);
1072 } while (rptr.retwords[1] != D &&
1073 rptr.retwords[2] != M &&
1074 rptr.retwords[3] != N &&
1075 i++ < MAX_WRITE_RETRY);
1076 if (i == MAX_WRITE_RETRY) {
1077 snd_printdd("sent samplerate %d: %d failed\n",
1078 *intdec, rate);
1079 return -EIO;
1080 }
1081 }
1082 intdec++;
1083 }
1084 return 0;
1085}
1086
1087static int
1088getmixer(struct cmdif *cif, short num, unsigned short *rval,
1089 unsigned short *lval)
1090{
1091 union cmdret rptr = CMDRET_ZERO;
1092
1093 if (SEND_RDGV(cif, num, num, &rptr) && SEND_RDGV(cif, num, num, &rptr))
1094 return -EIO;
1095 *rval = rptr.retwords[0];
1096 *lval = rptr.retwords[1];
1097 snd_printdd("got mixer %d: 0x%d 0x%d\n", num, *rval, *lval);
1098 return 0;
1099}
1100
1101static void riptide_handleirq(unsigned long dev_id)
1102{
1103 struct snd_riptide *chip = (void *)dev_id;
1104 struct cmdif *cif = chip->cif;
1105 struct snd_pcm_substream *substream[PLAYBACK_SUBSTREAMS + 1];
1106 struct snd_pcm_runtime *runtime;
1107 struct pcmhw *data = NULL;
1108 unsigned int pos, period_bytes;
1109 struct sgd *c;
1110 int i, j;
1111 unsigned int flag;
1112
1113 if (!cif)
1114 return;
1115
1116 for (i = 0; i < PLAYBACK_SUBSTREAMS; i++)
1117 substream[i] = chip->playback_substream[i];
1118 substream[i] = chip->capture_substream;
1119 for (i = 0; i < PLAYBACK_SUBSTREAMS + 1; i++) {
1120 if (substream[i] &&
1121 (runtime = substream[i]->runtime) &&
1122 (data = runtime->private_data) && data->state != ST_STOP) {
1123 pos = 0;
1124 for (j = 0; j < data->pages; j++) {
1125 c = &data->sgdbuf[j];
1126 flag = le32_to_cpu(c->dwStat_Ctl);
1127 if (flag & EOB_STATUS)
1128 pos += le32_to_cpu(c->dwSegLen);
1129 if (flag & EOC_STATUS)
1130 pos += le32_to_cpu(c->dwSegLen);
1131 if ((flag & EOS_STATUS)
1132 && (data->state == ST_PLAY)) {
1133 data->state = ST_STOP;
1134 snd_printk(KERN_ERR
1135 "Riptide: DMA stopped unexpectedly\n");
1136 }
1137 c->dwStat_Ctl =
1138 cpu_to_le32(flag &
1139 ~(EOS_STATUS | EOB_STATUS |
1140 EOC_STATUS));
1141 }
1142 data->pointer += pos;
1143 pos += data->oldpos;
1144 if (data->state != ST_STOP) {
1145 period_bytes =
1146 frames_to_bytes(runtime,
1147 runtime->period_size);
1148 snd_printdd
1149 ("interrupt 0x%x after 0x%lx of 0x%lx frames in period\n",
1150 READ_AUDIO_STATUS(cif->hwport),
1151 bytes_to_frames(runtime, pos),
1152 runtime->period_size);
1153 j = 0;
1154 if (pos >= period_bytes) {
1155 j++;
1156 while (pos >= period_bytes)
1157 pos -= period_bytes;
1158 }
1159 data->oldpos = pos;
1160 if (j > 0)
1161 snd_pcm_period_elapsed(substream[i]);
1162 }
1163 }
1164 }
1165}
1166
1167#ifdef CONFIG_PM
1168static int riptide_suspend(struct pci_dev *pci, pm_message_t state)
1169{
1170 struct snd_card *card = pci_get_drvdata(pci);
1171 struct snd_riptide *chip = card->private_data;
1172
1173 chip->in_suspend = 1;
1174 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
1175 snd_pcm_suspend_all(chip->pcm);
1176 snd_ac97_suspend(chip->ac97);
1177 pci_set_power_state(pci, PCI_D3hot);
1178 pci_disable_device(pci);
1179 pci_save_state(pci);
1180 return 0;
1181}
1182
1183static int riptide_resume(struct pci_dev *pci)
1184{
1185 struct snd_card *card = pci_get_drvdata(pci);
1186 struct snd_riptide *chip = card->private_data;
1187
1188 pci_restore_state(pci);
1189 pci_enable_device(pci);
1190 pci_set_power_state(pci, PCI_D0);
1191 pci_set_master(pci);
1192 snd_riptide_initialize(chip);
1193 snd_ac97_resume(chip->ac97);
1194 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
1195 chip->in_suspend = 0;
1196 return 0;
1197}
1198#endif
1199
1200static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip)
1201{
1202 int timeout, tries;
1203 union cmdret rptr = CMDRET_ZERO;
1204 union firmware_version firmware;
1205 int i, j, err, has_firmware;
1206
1207 if (!cif)
1208 return -EINVAL;
1209
1210 cif->cmdcnt = 0;
1211 cif->cmdtime = 0;
1212 cif->cmdtimemax = 0;
1213 cif->cmdtimemin = 0xffffffff;
1214 cif->errcnt = 0;
1215 cif->is_reset = 0;
1216
1217 tries = RESET_TRIES;
1218 has_firmware = 0;
1219 while (has_firmware == 0 && tries-- > 0) {
1220 for (i = 0; i < 2; i++) {
1221 WRITE_PORT_ULONG(cif->hwport->port[i].data1, 0);
1222 WRITE_PORT_ULONG(cif->hwport->port[i].data2, 0);
1223 }
1224 SET_GRESET(cif->hwport);
1225 udelay(100);
1226 UNSET_GRESET(cif->hwport);
1227 udelay(100);
1228
1229 for (timeout = 100000; --timeout; udelay(10)) {
1230 if (IS_READY(cif->hwport) && !IS_GERR(cif->hwport))
1231 break;
1232 }
1233 if (timeout == 0) {
1234 snd_printk(KERN_ERR
1235 "Riptide: device not ready, audio status: 0x%x ready: %d gerr: %d\n",
1236 READ_AUDIO_STATUS(cif->hwport),
1237 IS_READY(cif->hwport), IS_GERR(cif->hwport));
1238 return -EIO;
1239 } else {
1240 snd_printdd
1241 ("Riptide: audio status: 0x%x ready: %d gerr: %d\n",
1242 READ_AUDIO_STATUS(cif->hwport),
1243 IS_READY(cif->hwport), IS_GERR(cif->hwport));
1244 }
1245
1246 SEND_GETV(cif, &rptr);
1247 for (i = 0; i < 4; i++)
1248 firmware.ret.retwords[i] = rptr.retwords[i];
1249
1250 snd_printdd
1251 ("Firmware version: ASIC: %d CODEC %d AUXDSP %d PROG %d\n",
1252 firmware.firmware.ASIC, firmware.firmware.CODEC,
1253 firmware.firmware.AUXDSP, firmware.firmware.PROG);
1254
1255 for (j = 0; j < FIRMWARE_VERSIONS; j++) {
1256 has_firmware = 1;
1257 for (i = 0; i < 4; i++) {
1258 if (firmware_versions[j].ret.retwords[i] !=
1259 firmware.ret.retwords[i])
1260 has_firmware = 0;
1261 }
1262 if (has_firmware)
1263 break;
1264 }
1265
1266 if (chip != NULL && has_firmware == 0) {
1267 snd_printdd("Writing Firmware\n");
1268 if (!chip->fw_entry) {
1269 if ((err =
1270 request_firmware(&chip->fw_entry,
1271 "riptide.hex",
1272 &chip->pci->dev)) != 0) {
1273 snd_printk(KERN_ERR
1274 "Riptide: Firmware not available %d\n",
1275 err);
1276 return -EIO;
1277 }
1278 }
1279 err = loadfirmware(cif, chip->fw_entry->data,
1280 chip->fw_entry->size);
1281 if (err)
1282 snd_printk(KERN_ERR
1283 "Riptide: Could not load firmware %d\n",
1284 err);
1285 }
1286 }
1287
1288 SEND_SACR(cif, 0, AC97_RESET);
1289 SEND_RACR(cif, AC97_RESET, &rptr);
1290 snd_printdd("AC97: 0x%x 0x%x\n", rptr.retlongs[0], rptr.retlongs[1]);
1291
1292 SEND_PLST(cif, 0);
1293 SEND_SLST(cif, 0);
1294 SEND_DLST(cif, 0);
1295 SEND_ALST(cif, 0);
1296 SEND_KDMA(cif);
1297
1298 writearm(cif, 0x301F8, 1, 1);
1299 writearm(cif, 0x301F4, 1, 1);
1300
1301 SEND_LSEL(cif, MODEM_CMD, 0, 0, MODEM_INTDEC, MODEM_MERGER,
1302 MODEM_SPLITTER, MODEM_MIXER);
1303 setmixer(cif, MODEM_MIXER, 0x7fff, 0x7fff);
1304 alloclbuspath(cif, ARM2LBUS_FIFO13, lbus_play_modem, NULL, NULL);
1305
1306 SEND_LSEL(cif, FM_CMD, 0, 0, FM_INTDEC, FM_MERGER, FM_SPLITTER,
1307 FM_MIXER);
1308 setmixer(cif, FM_MIXER, 0x7fff, 0x7fff);
1309 writearm(cif, 0x30648 + FM_MIXER * 4, 0x01, 0x00000005);
1310 writearm(cif, 0x301A8, 0x02, 0x00000002);
1311 writearm(cif, 0x30264, 0x08, 0xffffffff);
1312 alloclbuspath(cif, OPL3_SAMPLE, lbus_play_opl3, NULL, NULL);
1313
1314 SEND_SSRC(cif, I2S_INTDEC, 48000,
1315 ((u32) I2S_RATE * 65536) / 48000,
1316 ((u32) I2S_RATE * 65536) % 48000);
1317 SEND_LSEL(cif, I2S_CMD0, 0, 0, I2S_INTDEC, I2S_MERGER, I2S_SPLITTER,
1318 I2S_MIXER);
1319 SEND_SI2S(cif, 1);
1320 alloclbuspath(cif, ARM2LBUS_FIFO0, lbus_play_i2s, NULL, NULL);
1321 alloclbuspath(cif, DIGITAL_MIXER_OUT0, lbus_play_out, NULL, NULL);
1322 alloclbuspath(cif, DIGITAL_MIXER_OUT0, lbus_play_outhp, NULL, NULL);
1323
1324 SET_AIACK(cif->hwport);
1325 SET_AIE(cif->hwport);
1326 SET_AIACK(cif->hwport);
1327 cif->is_reset = 1;
1328 if (chip) {
1329 for (i = 0; i < 4; i++)
1330 chip->firmware.ret.retwords[i] =
1331 firmware.ret.retwords[i];
1332 }
1333
1334 return 0;
1335}
1336
1337static struct snd_pcm_hardware snd_riptide_playback = {
1338 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1339 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1340 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID),
1341 .formats =
1342 SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8
1343 | SNDRV_PCM_FMTBIT_U16_LE,
1344 .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000,
1345 .rate_min = 5500,
1346 .rate_max = 48000,
1347 .channels_min = 1,
1348 .channels_max = 2,
1349 .buffer_bytes_max = (64 * 1024),
1350 .period_bytes_min = PAGE_SIZE >> 1,
1351 .period_bytes_max = PAGE_SIZE << 8,
1352 .periods_min = 2,
1353 .periods_max = 64,
1354 .fifo_size = 0,
1355};
1356static struct snd_pcm_hardware snd_riptide_capture = {
1357 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
1358 SNDRV_PCM_INFO_BLOCK_TRANSFER |
1359 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID),
1360 .formats =
1361 SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8
1362 | SNDRV_PCM_FMTBIT_U16_LE,
1363 .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000,
1364 .rate_min = 5500,
1365 .rate_max = 48000,
1366 .channels_min = 1,
1367 .channels_max = 2,
1368 .buffer_bytes_max = (64 * 1024),
1369 .period_bytes_min = PAGE_SIZE >> 1,
1370 .period_bytes_max = PAGE_SIZE << 3,
1371 .periods_min = 2,
1372 .periods_max = 64,
1373 .fifo_size = 0,
1374};
1375
1376static snd_pcm_uframes_t snd_riptide_pointer(struct snd_pcm_substream
1377 *substream)
1378{
1379 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1380 struct snd_pcm_runtime *runtime = substream->runtime;
1381 struct pcmhw *data = get_pcmhwdev(substream);
1382 struct cmdif *cif = chip->cif;
1383 union cmdret rptr = CMDRET_ZERO;
1384 snd_pcm_uframes_t ret;
1385
1386 SEND_GPOS(cif, 0, data->id, &rptr);
1387 if (data->size && runtime->period_size) {
1388 snd_printdd
1389 ("pointer stream %d position 0x%x(0x%x in buffer) bytes 0x%lx(0x%lx in period) frames\n",
1390 data->id, rptr.retlongs[1], rptr.retlongs[1] % data->size,
1391 bytes_to_frames(runtime, rptr.retlongs[1]),
1392 bytes_to_frames(runtime,
1393 rptr.retlongs[1]) % runtime->period_size);
1394 if (rptr.retlongs[1] > data->pointer)
1395 ret =
1396 bytes_to_frames(runtime,
1397 rptr.retlongs[1] % data->size);
1398 else
1399 ret =
1400 bytes_to_frames(runtime,
1401 data->pointer % data->size);
1402 } else {
1403 snd_printdd("stream not started or strange parms (%d %ld)\n",
1404 data->size, runtime->period_size);
1405 ret = bytes_to_frames(runtime, 0);
1406 }
1407 return ret;
1408}
1409
1410static int snd_riptide_trigger(struct snd_pcm_substream *substream, int cmd)
1411{
1412 int i, j;
1413 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1414 struct pcmhw *data = get_pcmhwdev(substream);
1415 struct cmdif *cif = chip->cif;
1416 union cmdret rptr = CMDRET_ZERO;
1417
1418 spin_lock(&chip->lock);
1419 switch (cmd) {
1420 case SNDRV_PCM_TRIGGER_START:
1421 case SNDRV_PCM_TRIGGER_RESUME:
1422 if (!(data->state & ST_PLAY)) {
1423 SEND_SSTR(cif, data->id, data->sgdlist.addr);
1424 SET_AIE(cif->hwport);
1425 data->state = ST_PLAY;
1426 if (data->mixer != 0xff)
1427 setmixer(cif, data->mixer, 0x7fff, 0x7fff);
1428 chip->openstreams++;
1429 data->oldpos = 0;
1430 data->pointer = 0;
1431 }
1432 break;
1433 case SNDRV_PCM_TRIGGER_STOP:
1434 case SNDRV_PCM_TRIGGER_SUSPEND:
1435 if (data->mixer != 0xff)
1436 setmixer(cif, data->mixer, 0, 0);
1437 setmixer(cif, data->mixer, 0, 0);
1438 SEND_KSTR(cif, data->id);
1439 data->state = ST_STOP;
1440 chip->openstreams--;
1441 j = 0;
1442 do {
1443 i = rptr.retlongs[1];
1444 SEND_GPOS(cif, 0, data->id, &rptr);
1445 udelay(1);
1446 } while (i != rptr.retlongs[1] && j++ < MAX_WRITE_RETRY);
1447 if (j >= MAX_WRITE_RETRY)
1448 snd_printk(KERN_ERR "Riptide: Could not stop stream!");
1449 break;
1450 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1451 if (!(data->state & ST_PAUSE)) {
1452 SEND_PSTR(cif, data->id);
1453 data->state |= ST_PAUSE;
1454 chip->openstreams--;
1455 }
1456 break;
1457 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1458 if (data->state & ST_PAUSE) {
1459 SEND_SSTR(cif, data->id, data->sgdlist.addr);
1460 data->state &= ~ST_PAUSE;
1461 chip->openstreams++;
1462 }
1463 break;
1464 default:
1465 spin_unlock(&chip->lock);
1466 return -EINVAL;
1467 }
1468 spin_unlock(&chip->lock);
1469 return 0;
1470}
1471
1472static int snd_riptide_prepare(struct snd_pcm_substream *substream)
1473{
1474 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1475 struct snd_pcm_runtime *runtime = substream->runtime;
1476 struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream);
1477 struct pcmhw *data = get_pcmhwdev(substream);
1478 struct cmdif *cif = chip->cif;
1479 unsigned char *lbuspath = NULL;
1480 unsigned int rate, channels;
1481 int err = 0;
1482 snd_pcm_format_t format;
1483
1484 snd_assert(cif && data, return -EINVAL);
1485
1486 snd_printdd("prepare id %d ch: %d f:0x%x r:%d\n", data->id,
1487 runtime->channels, runtime->format, runtime->rate);
1488
1489 spin_lock_irq(&chip->lock);
1490 channels = runtime->channels;
1491 format = runtime->format;
1492 rate = runtime->rate;
1493 switch (channels) {
1494 case 1:
1495 if (rate == 48000 && format == SNDRV_PCM_FORMAT_S16_LE)
1496 lbuspath = data->paths.noconv;
1497 else
1498 lbuspath = data->paths.mono;
1499 break;
1500 case 2:
1501 if (rate == 48000 && format == SNDRV_PCM_FORMAT_S16_LE)
1502 lbuspath = data->paths.noconv;
1503 else
1504 lbuspath = data->paths.stereo;
1505 break;
1506 }
1507 snd_printdd("use sgdlist at 0x%p and buffer at 0x%p\n",
1508 data->sgdlist.area, sgbuf);
1509 if (data->sgdlist.area && sgbuf) {
1510 unsigned int i, j, size, pages, f, pt, period;
1511 struct sgd *c, *p = NULL;
1512
1513 size = frames_to_bytes(runtime, runtime->buffer_size);
1514 period = frames_to_bytes(runtime, runtime->period_size);
1515 f = PAGE_SIZE;
1516 while ((size + (f >> 1) - 1) <= (f << 7) && (f << 1) > period)
1517 f = f >> 1;
1518 pages = (size + f - 1) / f;
1519 data->size = size;
1520 data->pages = pages;
1521 snd_printdd
1522 ("create sgd size: 0x%x pages %d of size 0x%x for period 0x%x\n",
1523 size, pages, f, period);
1524 pt = 0;
1525 j = 0;
1526 for (i = 0; i < pages; i++) {
1527 c = &data->sgdbuf[i];
1528 if (p)
1529 p->dwNextLink = cpu_to_le32(data->sgdlist.addr +
1530 (i *
1531 sizeof(struct
1532 sgd)));
1533 c->dwNextLink = cpu_to_le32(data->sgdlist.addr);
1534 c->dwSegPtrPhys =
1535 cpu_to_le32(sgbuf->table[j].addr + pt);
1536 pt = (pt + f) % PAGE_SIZE;
1537 if (pt == 0)
1538 j++;
1539 c->dwSegLen = cpu_to_le32(f);
1540 c->dwStat_Ctl =
1541 cpu_to_le32(IEOB_ENABLE | IEOS_ENABLE |
1542 IEOC_ENABLE);
1543 p = c;
1544 size -= f;
1545 }
1546 data->sgdbuf[i].dwSegLen = cpu_to_le32(size);
1547 }
1548 if (lbuspath && lbuspath != data->lbuspath) {
1549 if (data->lbuspath)
1550 freelbuspath(cif, data->source, data->lbuspath);
1551 alloclbuspath(cif, data->source, lbuspath,
1552 &data->mixer, data->intdec);
1553 data->lbuspath = lbuspath;
1554 data->rate = 0;
1555 }
1556 if (data->rate != rate || data->format != format ||
1557 data->channels != channels) {
1558 data->rate = rate;
1559 data->format = format;
1560 data->channels = channels;
1561 if (setsampleformat
1562 (cif, data->mixer, data->id, channels, format)
1563 || setsamplerate(cif, data->intdec, rate))
1564 err = -EIO;
1565 }
1566 spin_unlock_irq(&chip->lock);
1567 return err;
1568}
1569
1570static int
1571snd_riptide_hw_params(struct snd_pcm_substream *substream,
1572 struct snd_pcm_hw_params *hw_params)
1573{
1574 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1575 struct pcmhw *data = get_pcmhwdev(substream);
1576 struct snd_dma_buffer *sgdlist = &data->sgdlist;
1577 int err;
1578
1579 snd_printdd("hw params id %d (sgdlist: 0x%p 0x%lx %d)\n", data->id,
1580 sgdlist->area, (unsigned long)sgdlist->addr,
1581 (int)sgdlist->bytes);
1582 if (sgdlist->area)
1583 snd_dma_free_pages(sgdlist);
1584 if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
1585 snd_dma_pci_data(chip->pci),
1586 sizeof(struct sgd) * (DESC_MAX_MASK + 1),
1587 sgdlist)) < 0) {
1588 snd_printk(KERN_ERR "Riptide: failed to alloc %d dma bytes\n",
1589 (int)sizeof(struct sgd) * (DESC_MAX_MASK + 1));
1590 return err;
1591 }
1592 data->sgdbuf = (struct sgd *)sgdlist->area;
1593 return snd_pcm_lib_malloc_pages(substream,
1594 params_buffer_bytes(hw_params));
1595}
1596
1597static int snd_riptide_hw_free(struct snd_pcm_substream *substream)
1598{
1599 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1600 struct pcmhw *data = get_pcmhwdev(substream);
1601 struct cmdif *cif = chip->cif;
1602
1603 if (cif && data) {
1604 if (data->lbuspath)
1605 freelbuspath(cif, data->source, data->lbuspath);
1606 data->lbuspath = NULL;
1607 data->source = 0xff;
1608 data->intdec[0] = 0xff;
1609 data->intdec[1] = 0xff;
1610
1611 if (data->sgdlist.area) {
1612 snd_dma_free_pages(&data->sgdlist);
1613 data->sgdlist.area = NULL;
1614 }
1615 }
1616 return snd_pcm_lib_free_pages(substream);
1617}
1618
1619static int snd_riptide_playback_open(struct snd_pcm_substream *substream)
1620{
1621 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1622 struct snd_pcm_runtime *runtime = substream->runtime;
1623 struct pcmhw *data;
1624 int index = substream->number;
1625
1626 chip->playback_substream[index] = substream;
1627 runtime->hw = snd_riptide_playback;
1628 data = kzalloc(sizeof(struct pcmhw), GFP_KERNEL);
1629 data->paths = lbus_play_paths[index];
1630 data->id = play_ids[index];
1631 data->source = play_sources[index];
1632 data->intdec[0] = 0xff;
1633 data->intdec[1] = 0xff;
1634 data->state = ST_STOP;
1635 runtime->private_data = data;
1636 return snd_pcm_hw_constraint_integer(runtime,
1637 SNDRV_PCM_HW_PARAM_PERIODS);
1638}
1639
1640static int snd_riptide_capture_open(struct snd_pcm_substream *substream)
1641{
1642 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1643 struct snd_pcm_runtime *runtime = substream->runtime;
1644 struct pcmhw *data;
1645
1646 chip->capture_substream = substream;
1647 runtime->hw = snd_riptide_capture;
1648 data = kzalloc(sizeof(struct pcmhw), GFP_KERNEL);
1649 data->paths = lbus_rec_path;
1650 data->id = PADC;
1651 data->source = ACLNK2PADC;
1652 data->intdec[0] = 0xff;
1653 data->intdec[1] = 0xff;
1654 data->state = ST_STOP;
1655 runtime->private_data = data;
1656 return snd_pcm_hw_constraint_integer(runtime,
1657 SNDRV_PCM_HW_PARAM_PERIODS);
1658}
1659
1660static int snd_riptide_playback_close(struct snd_pcm_substream *substream)
1661{
1662 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1663 struct pcmhw *data = get_pcmhwdev(substream);
1664 int index = substream->number;
1665
1666 substream->runtime->private_data = NULL;
1667 chip->playback_substream[index] = NULL;
1668 kfree(data);
1669 return 0;
1670}
1671
1672static int snd_riptide_capture_close(struct snd_pcm_substream *substream)
1673{
1674 struct snd_riptide *chip = snd_pcm_substream_chip(substream);
1675 struct pcmhw *data = get_pcmhwdev(substream);
1676
1677 substream->runtime->private_data = NULL;
1678 chip->capture_substream = NULL;
1679 kfree(data);
1680 return 0;
1681}
1682
1683static struct snd_pcm_ops snd_riptide_playback_ops = {
1684 .open = snd_riptide_playback_open,
1685 .close = snd_riptide_playback_close,
1686 .ioctl = snd_pcm_lib_ioctl,
1687 .hw_params = snd_riptide_hw_params,
1688 .hw_free = snd_riptide_hw_free,
1689 .prepare = snd_riptide_prepare,
1690 .page = snd_pcm_sgbuf_ops_page,
1691 .trigger = snd_riptide_trigger,
1692 .pointer = snd_riptide_pointer,
1693};
1694static struct snd_pcm_ops snd_riptide_capture_ops = {
1695 .open = snd_riptide_capture_open,
1696 .close = snd_riptide_capture_close,
1697 .ioctl = snd_pcm_lib_ioctl,
1698 .hw_params = snd_riptide_hw_params,
1699 .hw_free = snd_riptide_hw_free,
1700 .prepare = snd_riptide_prepare,
1701 .page = snd_pcm_sgbuf_ops_page,
1702 .trigger = snd_riptide_trigger,
1703 .pointer = snd_riptide_pointer,
1704};
1705
1706static int __devinit
1707snd_riptide_pcm(struct snd_riptide *chip, int device, struct snd_pcm **rpcm)
1708{
1709 struct snd_pcm *pcm;
1710 int err;
1711
1712 if (rpcm)
1713 *rpcm = NULL;
1714 if ((err =
1715 snd_pcm_new(chip->card, "RIPTIDE", device, PLAYBACK_SUBSTREAMS, 1,
1716 &pcm)) < 0)
1717 return err;
1718 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
1719 &snd_riptide_playback_ops);
1720 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
1721 &snd_riptide_capture_ops);
1722 pcm->private_data = chip;
1723 pcm->info_flags = 0;
1724 strcpy(pcm->name, "RIPTIDE");
1725 chip->pcm = pcm;
1726 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1727 snd_dma_pci_data(chip->pci),
1728 64 * 1024, 128 * 1024);
1729 if (rpcm)
1730 *rpcm = pcm;
1731 return 0;
1732}
1733
1734static irqreturn_t
1735snd_riptide_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1736{
1737 struct snd_riptide *chip = dev_id;
1738 struct cmdif *cif = chip->cif;
1739
1740 if (cif) {
1741 chip->received_irqs++;
1742 if (IS_EOBIRQ(cif->hwport) || IS_EOSIRQ(cif->hwport) ||
1743 IS_EOCIRQ(cif->hwport)) {
1744 chip->handled_irqs++;
1745 tasklet_hi_schedule(&chip->riptide_tq);
1746 }
1747 if (chip->rmidi && IS_MPUIRQ(cif->hwport)) {
1748 chip->handled_irqs++;
1749 snd_mpu401_uart_interrupt(irq,
1750 chip->rmidi->private_data,
1751 regs);
1752 }
1753 SET_AIACK(cif->hwport);
1754 }
1755 return IRQ_HANDLED;
1756}
1757
1758static void
1759snd_riptide_codec_write(struct snd_ac97 *ac97, unsigned short reg,
1760 unsigned short val)
1761{
1762 struct snd_riptide *chip = ac97->private_data;
1763 struct cmdif *cif = chip->cif;
1764 union cmdret rptr = CMDRET_ZERO;
1765 int i = 0;
1766
1767 snd_assert(cif, return);
1768
1769 snd_printdd("Write AC97 reg 0x%x 0x%x\n", reg, val);
1770 do {
1771 SEND_SACR(cif, val, reg);
1772 SEND_RACR(cif, reg, &rptr);
1773 } while (rptr.retwords[1] != val && i++ < MAX_WRITE_RETRY);
1774 if (i == MAX_WRITE_RETRY)
1775 snd_printdd("Write AC97 reg failed\n");
1776}
1777
1778static unsigned short snd_riptide_codec_read(struct snd_ac97 *ac97,
1779 unsigned short reg)
1780{
1781 struct snd_riptide *chip = ac97->private_data;
1782 struct cmdif *cif = chip->cif;
1783 union cmdret rptr = CMDRET_ZERO;
1784
1785 snd_assert(cif, return 0);
1786
1787 if (SEND_RACR(cif, reg, &rptr) != 0)
1788 SEND_RACR(cif, reg, &rptr);
1789 snd_printdd("Read AC97 reg 0x%x got 0x%x\n", reg, rptr.retwords[1]);
1790 return rptr.retwords[1];
1791}
1792
1793static int snd_riptide_initialize(struct snd_riptide *chip)
1794{
1795 struct cmdif *cif;
1796 unsigned int device_id;
1797 int err;
1798
1799 snd_assert(chip, return -EINVAL);
1800
1801 cif = chip->cif;
1802 if (!cif) {
1803 if ((cif = kzalloc(sizeof(struct cmdif), GFP_KERNEL)) == NULL)
1804 return -ENOMEM;
1805 cif->hwport = (struct riptideport *)chip->port;
1806 spin_lock_init(&cif->lock);
1807 chip->cif = cif;
1808 }
1809 cif->is_reset = 0;
1810 if ((err = riptide_reset(cif, chip)) != 0)
1811 return err;
1812 device_id = chip->device_id;
1813 switch (device_id) {
1814 case 0x4310:
1815 case 0x4320:
1816 case 0x4330:
1817 snd_printdd("Modem enable?\n");
1818 SEND_SETDPLL(cif);
1819 break;
1820 }
1821 snd_printdd("Enabling MPU IRQs\n");
1822 if (chip->rmidi)
1823 SET_EMPUIRQ(cif->hwport);
1824 return err;
1825}
1826
1827static int snd_riptide_free(struct snd_riptide *chip)
1828{
1829 struct cmdif *cif;
1830
1831 snd_assert(chip, return 0);
1832
1833 if ((cif = chip->cif)) {
1834 SET_GRESET(cif->hwport);
1835 udelay(100);
1836 UNSET_GRESET(cif->hwport);
1837 kfree(chip->cif);
1838 }
1839 if (chip->fw_entry)
1840 release_firmware(chip->fw_entry);
1841 release_and_free_resource(chip->res_port);
1842 if (chip->irq >= 0)
1843 free_irq(chip->irq, chip);
1844 kfree(chip);
1845 return 0;
1846}
1847
1848static int snd_riptide_dev_free(struct snd_device *device)
1849{
1850 struct snd_riptide *chip = device->device_data;
1851
1852 return snd_riptide_free(chip);
1853}
1854
1855static int __devinit
1856snd_riptide_create(struct snd_card *card, struct pci_dev *pci,
1857 struct snd_riptide **rchip)
1858{
1859 struct snd_riptide *chip;
1860 struct riptideport *hwport;
1861 int err;
1862 static struct snd_device_ops ops = {
1863 .dev_free = snd_riptide_dev_free,
1864 };
1865
1866 *rchip = NULL;
1867 if ((err = pci_enable_device(pci)) < 0)
1868 return err;
1869 if (!(chip = kzalloc(sizeof(struct snd_riptide), GFP_KERNEL)))
1870 return -ENOMEM;
1871
1872 spin_lock_init(&chip->lock);
1873 chip->card = card;
1874 chip->pci = pci;
1875 chip->irq = -1;
1876 chip->openstreams = 0;
1877 chip->port = pci_resource_start(pci, 0);
1878 chip->received_irqs = 0;
1879 chip->handled_irqs = 0;
1880 chip->cif = NULL;
1881 tasklet_init(&chip->riptide_tq, riptide_handleirq, (unsigned long)chip);
1882
1883 if ((chip->res_port =
1884 request_region(chip->port, 64, "RIPTIDE")) == NULL) {
1885 snd_printk(KERN_ERR
1886 "Riptide: unable to grab region 0x%lx-0x%lx\n",
1887 chip->port, chip->port + 64 - 1);
1888 snd_riptide_free(chip);
1889 return -EBUSY;
1890 }
1891 hwport = (struct riptideport *)chip->port;
1892 UNSET_AIE(hwport);
1893
1894 if (request_irq
1895 (pci->irq, snd_riptide_interrupt, SA_INTERRUPT | SA_SHIRQ,
1896 "RIPTIDE", chip)) {
1897 snd_printk(KERN_ERR "Riptide: unable to grab IRQ %d\n",
1898 pci->irq);
1899 snd_riptide_free(chip);
1900 return -EBUSY;
1901 }
1902 chip->irq = pci->irq;
1903 chip->device_id = pci->device;
1904 pci_set_master(pci);
1905 if ((err = snd_riptide_initialize(chip)) < 0) {
1906 snd_riptide_free(chip);
1907 return err;
1908 }
1909
1910 if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
1911 snd_riptide_free(chip);
1912 return err;
1913 }
1914
1915 *rchip = chip;
1916 return 0;
1917}
1918
1919static void
1920snd_riptide_proc_read(struct snd_info_entry *entry,
1921 struct snd_info_buffer *buffer)
1922{
1923 struct snd_riptide *chip = entry->private_data;
1924 struct pcmhw *data;
1925 int i;
1926 struct cmdif *cif = NULL;
1927 unsigned char p[256];
1928 unsigned short rval = 0, lval = 0;
1929 unsigned int rate;
1930
1931 if (!chip)
1932 return;
1933
1934 snd_iprintf(buffer, "%s\n\n", chip->card->longname);
1935 snd_iprintf(buffer, "Device ID: 0x%x\nReceived IRQs: (%ld)%ld\nPorts:",
1936 chip->device_id, chip->handled_irqs, chip->received_irqs);
1937 for (i = 0; i < 64; i += 4)
1938 snd_iprintf(buffer, "%c%02x: %08x",
1939 (i % 16) ? ' ' : '\n', i, inl(chip->port + i));
1940 if ((cif = chip->cif)) {
1941 snd_iprintf(buffer,
1942 "\nVersion: ASIC: %d CODEC: %d AUXDSP: %d PROG: %d",
1943 chip->firmware.firmware.ASIC,
1944 chip->firmware.firmware.CODEC,
1945 chip->firmware.firmware.AUXDSP,
1946 chip->firmware.firmware.PROG);
1947 snd_iprintf(buffer, "\nDigital mixer:");
1948 for (i = 0; i < 12; i++) {
1949 getmixer(cif, i, &rval, &lval);
1950 snd_iprintf(buffer, "\n %d: %d %d", i, rval, lval);
1951 }
1952 snd_iprintf(buffer,
1953 "\nARM Commands num: %d failed: %d time: %d max: %d min: %d",
1954 cif->cmdcnt, cif->errcnt,
1955 cif->cmdtime, cif->cmdtimemax, cif->cmdtimemin);
1956 }
1957 snd_iprintf(buffer, "\nOpen streams %d:\n", chip->openstreams);
1958 for (i = 0; i < PLAYBACK_SUBSTREAMS; i++) {
1959 if (chip->playback_substream[i]
1960 && chip->playback_substream[i]->runtime
1961 && (data =
1962 chip->playback_substream[i]->runtime->private_data)) {
1963 snd_iprintf(buffer,
1964 "stream: %d mixer: %d source: %d (%d,%d)\n",
1965 data->id, data->mixer, data->source,
1966 data->intdec[0], data->intdec[1]);
1967 if (!(getsamplerate(cif, data->intdec, &rate)))
1968 snd_iprintf(buffer, "rate: %d\n", rate);
1969 }
1970 }
1971 if (chip->capture_substream
1972 && chip->capture_substream->runtime
1973 && (data = chip->capture_substream->runtime->private_data)) {
1974 snd_iprintf(buffer,
1975 "stream: %d mixer: %d source: %d (%d,%d)\n",
1976 data->id, data->mixer,
1977 data->source, data->intdec[0], data->intdec[1]);
1978 if (!(getsamplerate(cif, data->intdec, &rate)))
1979 snd_iprintf(buffer, "rate: %d\n", rate);
1980 }
1981 snd_iprintf(buffer, "Paths:\n");
1982 i = getpaths(cif, p);
1983 while (i--) {
1984 snd_iprintf(buffer, "%x->%x ", p[i - 1], p[i]);
1985 i--;
1986 }
1987 snd_iprintf(buffer, "\n");
1988}
1989
1990static void __devinit snd_riptide_proc_init(struct snd_riptide *chip)
1991{
1992 struct snd_info_entry *entry;
1993
1994 if (!snd_card_proc_new(chip->card, "riptide", &entry))
1995 snd_info_set_text_ops(entry, chip, 4096, snd_riptide_proc_read);
1996}
1997
1998static int __devinit snd_riptide_mixer(struct snd_riptide *chip)
1999{
2000 struct snd_ac97_bus *pbus;
2001 struct snd_ac97_template ac97;
2002 int err = 0;
2003 static struct snd_ac97_bus_ops ops = {
2004 .write = snd_riptide_codec_write,
2005 .read = snd_riptide_codec_read,
2006 };
2007
2008 memset(&ac97, 0, sizeof(ac97));
2009 ac97.private_data = chip;
2010 ac97.scaps = AC97_SCAP_SKIP_MODEM;
2011
2012 if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0)
2013 return err;
2014
2015 chip->ac97_bus = pbus;
2016 ac97.pci = chip->pci;
2017 if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0)
2018 return err;
2019 return err;
2020}
2021
2022#ifdef SUPPORT_JOYSTICK
2023static int have_joystick;
2024static struct pci_dev *riptide_gameport_pci;
2025static struct gameport *riptide_gameport;
2026
2027static int __devinit
2028snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
2029{
2030 static int dev;
2031
2032 if (dev >= SNDRV_CARDS)
2033 return -ENODEV;
2034 if (!enable[dev]) {
2035 dev++;
2036 return -ENOENT;
2037 }
2038
2039 if (joystick_port[dev]) {
2040 riptide_gameport = gameport_allocate_port();
2041 if (riptide_gameport) {
2042 if (!request_region
2043 (joystick_port[dev], 8, "Riptide gameport")) {
2044 snd_printk(KERN_WARNING
2045 "Riptide: cannot grab gameport 0x%x\n",
2046 joystick_port[dev]);
2047 gameport_free_port(riptide_gameport);
2048 riptide_gameport = NULL;
2049 } else {
2050 riptide_gameport_pci = pci;
2051 riptide_gameport->io = joystick_port[dev];
2052 gameport_register_port(riptide_gameport);
2053 }
2054 }
2055 }
2056 dev++;
2057 return 0;
2058}
2059
2060static void __devexit snd_riptide_joystick_remove(struct pci_dev *pci)
2061{
2062 if (riptide_gameport) {
2063 if (riptide_gameport_pci == pci) {
2064 release_region(riptide_gameport->io, 8);
2065 riptide_gameport_pci = NULL;
2066 gameport_unregister_port(riptide_gameport);
2067 riptide_gameport = NULL;
2068 }
2069 }
2070}
2071#endif
2072
2073static int __devinit
2074snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2075{
2076 static int dev;
2077 struct snd_card *card;
2078 struct snd_riptide *chip;
2079 unsigned short addr;
2080 int err = 0;
2081
2082 if (dev >= SNDRV_CARDS)
2083 return -ENODEV;
2084 if (!enable[dev]) {
2085 dev++;
2086 return -ENOENT;
2087 }
2088
2089 card = snd_card_new(index[dev], id[dev], THIS_MODULE, 0);
2090 if (card == NULL)
2091 return -ENOMEM;
2092 if ((err = snd_riptide_create(card, pci, &chip)) < 0) {
2093 snd_card_free(card);
2094 return err;
2095 }
2096 card->private_data = chip;
2097 if ((err = snd_riptide_pcm(chip, 0, NULL)) < 0) {
2098 snd_card_free(card);
2099 return err;
2100 }
2101 if ((err = snd_riptide_mixer(chip)) < 0) {
2102 snd_card_free(card);
2103 return err;
2104 }
2105 pci_write_config_word(chip->pci, PCI_EXT_Legacy_Mask, LEGACY_ENABLE_ALL
2106 | (opl3_port[dev] ? LEGACY_ENABLE_FM : 0)
2107#ifdef SUPPORT_JOYSTICK
2108 | (joystick_port[dev] ? LEGACY_ENABLE_GAMEPORT :
2109 0)
2110#endif
2111 | (mpu_port[dev]
2112 ? (LEGACY_ENABLE_MPU_INT | LEGACY_ENABLE_MPU) :
2113 0)
2114 | ((chip->irq << 4) & 0xF0));
2115 if ((addr = mpu_port[dev]) != 0) {
2116 pci_write_config_word(chip->pci, PCI_EXT_MPU_Base, addr);
2117 if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_RIPTIDE,
2118 addr, 0, chip->irq, 0,
2119 &chip->rmidi)) < 0)
2120 snd_printk(KERN_WARNING
2121 "Riptide: Can't Allocate MPU at 0x%x\n",
2122 addr);
2123 else
2124 chip->mpuaddr = addr;
2125 }
2126 if ((addr = opl3_port[dev]) != 0) {
2127 pci_write_config_word(chip->pci, PCI_EXT_FM_Base, addr);
2128 if ((err = snd_opl3_create(card, addr, addr + 2,
2129 OPL3_HW_RIPTIDE, 0,
2130 &chip->opl3)) < 0)
2131 snd_printk(KERN_WARNING
2132 "Riptide: Can't Allocate OPL3 at 0x%x\n",
2133 addr);
2134 else {
2135 chip->opladdr = addr;
2136 if ((err =
2137 snd_opl3_hwdep_new(chip->opl3, 0, 1, NULL)) < 0)
2138 snd_printk(KERN_WARNING
2139 "Riptide: Can't Allocate OPL3-HWDEP\n");
2140 }
2141 }
2142#ifdef SUPPORT_JOYSTICK
2143 if ((addr = joystick_port[dev]) != 0) {
2144 pci_write_config_word(chip->pci, PCI_EXT_Game_Base, addr);
2145 chip->gameaddr = addr;
2146 }
2147#endif
2148
2149 strcpy(card->driver, "RIPTIDE");
2150 strcpy(card->shortname, "Riptide");
2151#ifdef SUPPORT_JOYSTICK
2152 snprintf(card->longname, sizeof(card->longname),
2153 "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x gameport 0x%x",
2154 card->shortname, chip->port, chip->irq, chip->mpuaddr,
2155 chip->opladdr, chip->gameaddr);
2156#else
2157 snprintf(card->longname, sizeof(card->longname),
2158 "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x",
2159 card->shortname, chip->port, chip->irq, chip->mpuaddr,
2160 chip->opladdr);
2161#endif
2162 snd_riptide_proc_init(chip);
2163 if ((err = snd_card_register(card)) < 0) {
2164 snd_card_free(card);
2165 return err;
2166 }
2167 pci_set_drvdata(pci, card);
2168 dev++;
2169 return 0;
2170}
2171
2172static void __devexit snd_card_riptide_remove(struct pci_dev *pci)
2173{
2174 snd_card_free(pci_get_drvdata(pci));
2175 pci_set_drvdata(pci, NULL);
2176}
2177
2178static struct pci_driver driver = {
2179 .name = "RIPTIDE",
2180 .id_table = snd_riptide_ids,
2181 .probe = snd_card_riptide_probe,
2182 .remove = __devexit_p(snd_card_riptide_remove),
2183#ifdef CONFIG_PM
2184 .suspend = riptide_suspend,
2185 .resume = riptide_resume,
2186#endif
2187};
2188
2189#ifdef SUPPORT_JOYSTICK
2190static struct pci_driver joystick_driver = {
2191 .name = "Riptide Joystick",
2192 .id_table = snd_riptide_joystick_ids,
2193 .probe = snd_riptide_joystick_probe,
2194 .remove = __devexit_p(snd_riptide_joystick_remove),
2195};
2196#endif
2197
2198static int __init alsa_card_riptide_init(void)
2199{
2200 int err;
2201 if ((err = pci_register_driver(&driver)) < 0)
2202 return err;
2203#if defined(SUPPORT_JOYSTICK)
2204 if (pci_register_driver(&joystick_driver) < 0) {
2205 have_joystick = 0;
2206 snd_printk(KERN_INFO "no joystick found\n");
2207 } else
2208 have_joystick = 1;
2209#endif
2210 return 0;
2211}
2212
2213static void __exit alsa_card_riptide_exit(void)
2214{
2215 pci_unregister_driver(&driver);
2216#if defined(SUPPORT_JOYSTICK)
2217 if (have_joystick)
2218 pci_unregister_driver(&joystick_driver);
2219#endif
2220}
2221
2222module_init(alsa_card_riptide_init);
2223module_exit(alsa_card_riptide_exit);
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 1957d29c119e..1e7398de2865 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -2373,6 +2373,7 @@ static int __devinit check_dxs_list(struct pci_dev *pci)
2373 { .subvendor = 0x161f, .subdevice = 0x2032, .action = VIA_DXS_48K }, /* m680x machines */ 2373 { .subvendor = 0x161f, .subdevice = 0x2032, .action = VIA_DXS_48K }, /* m680x machines */
2374 { .subvendor = 0x1631, .subdevice = 0xe004, .action = VIA_DXS_ENABLE }, /* Easy Note 3174, Packard Bell */ 2374 { .subvendor = 0x1631, .subdevice = 0xe004, .action = VIA_DXS_ENABLE }, /* Easy Note 3174, Packard Bell */
2375 { .subvendor = 0x1695, .subdevice = 0x3005, .action = VIA_DXS_ENABLE }, /* EPoX EP-8K9A */ 2375 { .subvendor = 0x1695, .subdevice = 0x3005, .action = VIA_DXS_ENABLE }, /* EPoX EP-8K9A */
2376 { .subvendor = 0x1695, .subdevice = 0x300c, .action = VIA_DXS_SRC }, /* EPoX EP-8KRAI */
2376 { .subvendor = 0x1695, .subdevice = 0x300e, .action = VIA_DXS_SRC }, /* EPoX 9HEAI */ 2377 { .subvendor = 0x1695, .subdevice = 0x300e, .action = VIA_DXS_SRC }, /* EPoX 9HEAI */
2377 { .subvendor = 0x16f3, .subdevice = 0x6405, .action = VIA_DXS_SRC }, /* Jetway K8M8MS */ 2378 { .subvendor = 0x16f3, .subdevice = 0x6405, .action = VIA_DXS_SRC }, /* Jetway K8M8MS */
2378 { .subvendor = 0x1734, .subdevice = 0x1078, .action = VIA_DXS_SRC }, /* FSC Amilo L7300 */ 2379 { .subvendor = 0x1734, .subdevice = 0x1078, .action = VIA_DXS_SRC }, /* FSC Amilo L7300 */
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 77caf43a3109..adfdce7499d1 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -57,18 +57,12 @@ static struct snd_card *card_list[SNDRV_CARDS];
57/* 57/*
58 * prototypes 58 * prototypes
59 */ 59 */
60static void pdacf_config(dev_link_t *link); 60static int pdacf_config(struct pcmcia_device *link);
61static void snd_pdacf_detach(struct pcmcia_device *p_dev); 61static void snd_pdacf_detach(struct pcmcia_device *p_dev);
62 62
63static void pdacf_release(dev_link_t *link) 63static void pdacf_release(struct pcmcia_device *link)
64{ 64{
65 if (link->state & DEV_CONFIG) { 65 pcmcia_disable_device(link);
66 /* release cs resources */
67 pcmcia_release_configuration(link->handle);
68 pcmcia_release_io(link->handle, &link->io);
69 pcmcia_release_irq(link->handle, &link->irq);
70 link->state &= ~DEV_CONFIG;
71 }
72} 66}
73 67
74/* 68/*
@@ -76,7 +70,7 @@ static void pdacf_release(dev_link_t *link)
76 */ 70 */
77static int snd_pdacf_free(struct snd_pdacf *pdacf) 71static int snd_pdacf_free(struct snd_pdacf *pdacf)
78{ 72{
79 dev_link_t *link = &pdacf->link; 73 struct pcmcia_device *link = pdacf->p_dev;
80 74
81 pdacf_release(link); 75 pdacf_release(link);
82 76
@@ -96,10 +90,9 @@ static int snd_pdacf_dev_free(struct snd_device *device)
96/* 90/*
97 * snd_pdacf_attach - attach callback for cs 91 * snd_pdacf_attach - attach callback for cs
98 */ 92 */
99static int snd_pdacf_attach(struct pcmcia_device *p_dev) 93static int snd_pdacf_probe(struct pcmcia_device *link)
100{ 94{
101 int i; 95 int i;
102 dev_link_t *link; /* Info for cardmgr */
103 struct snd_pdacf *pdacf; 96 struct snd_pdacf *pdacf;
104 struct snd_card *card; 97 struct snd_card *card;
105 static struct snd_device_ops ops = { 98 static struct snd_device_ops ops = {
@@ -139,7 +132,7 @@ static int snd_pdacf_attach(struct pcmcia_device *p_dev)
139 pdacf->index = i; 132 pdacf->index = i;
140 card_list[i] = card; 133 card_list[i] = card;
141 134
142 link = &pdacf->link; 135 pdacf->p_dev = link;
143 link->priv = pdacf; 136 link->priv = pdacf;
144 137
145 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 138 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -156,13 +149,7 @@ static int snd_pdacf_attach(struct pcmcia_device *p_dev)
156 link->conf.ConfigIndex = 1; 149 link->conf.ConfigIndex = 1;
157 link->conf.Present = PRESENT_OPTION; 150 link->conf.Present = PRESENT_OPTION;
158 151
159 /* Chain drivers */ 152 return pdacf_config(link);
160 link->next = NULL;
161
162 link->handle = p_dev;
163 pdacf_config(link);
164
165 return 0;
166} 153}
167 154
168 155
@@ -209,9 +196,8 @@ static int snd_pdacf_assign_resources(struct snd_pdacf *pdacf, int port, int irq
209/* 196/*
210 * snd_pdacf_detach - detach callback for cs 197 * snd_pdacf_detach - detach callback for cs
211 */ 198 */
212static void snd_pdacf_detach(struct pcmcia_device *p_dev) 199static void snd_pdacf_detach(struct pcmcia_device *link)
213{ 200{
214 dev_link_t *link = dev_to_instance(p_dev);
215 struct snd_pdacf *chip = link->priv; 201 struct snd_pdacf *chip = link->priv;
216 202
217 snd_printdd(KERN_DEBUG "pdacf_detach called\n"); 203 snd_printdd(KERN_DEBUG "pdacf_detach called\n");
@@ -230,13 +216,11 @@ static void snd_pdacf_detach(struct pcmcia_device *p_dev)
230#define CS_CHECK(fn, ret) \ 216#define CS_CHECK(fn, ret) \
231do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 217do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
232 218
233static void pdacf_config(dev_link_t *link) 219static int pdacf_config(struct pcmcia_device *link)
234{ 220{
235 client_handle_t handle = link->handle;
236 struct snd_pdacf *pdacf = link->priv; 221 struct snd_pdacf *pdacf = link->priv;
237 tuple_t tuple; 222 tuple_t tuple;
238 cisparse_t *parse = NULL; 223 cisparse_t *parse = NULL;
239 config_info_t conf;
240 u_short buf[32]; 224 u_short buf[32];
241 int last_fn, last_ret; 225 int last_fn, last_ret;
242 226
@@ -244,7 +228,7 @@ static void pdacf_config(dev_link_t *link)
244 parse = kmalloc(sizeof(*parse), GFP_KERNEL); 228 parse = kmalloc(sizeof(*parse), GFP_KERNEL);
245 if (! parse) { 229 if (! parse) {
246 snd_printk(KERN_ERR "pdacf_config: cannot allocate\n"); 230 snd_printk(KERN_ERR "pdacf_config: cannot allocate\n");
247 return; 231 return -ENOMEM;
248 } 232 }
249 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 233 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
250 tuple.Attributes = 0; 234 tuple.Attributes = 0;
@@ -252,71 +236,51 @@ static void pdacf_config(dev_link_t *link)
252 tuple.TupleDataMax = sizeof(buf); 236 tuple.TupleDataMax = sizeof(buf);
253 tuple.TupleOffset = 0; 237 tuple.TupleOffset = 0;
254 tuple.DesiredTuple = CISTPL_CONFIG; 238 tuple.DesiredTuple = CISTPL_CONFIG;
255 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 239 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
256 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 240 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
257 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, parse)); 241 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
258 link->conf.ConfigBase = parse->config.base; 242 link->conf.ConfigBase = parse->config.base;
259 link->conf.ConfigIndex = 0x5; 243 link->conf.ConfigIndex = 0x5;
260 kfree(parse); 244 kfree(parse);
261 245
262 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(handle, &conf)); 246 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
263 link->conf.Vcc = conf.Vcc; 247 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
264 248 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
265 /* Configure card */
266 link->state |= DEV_CONFIG;
267
268 CS_CHECK(RequestIO, pcmcia_request_io(handle, &link->io));
269 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
270 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
271 249
272 if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0) 250 if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
273 goto failed; 251 goto failed;
274 252
275 link->dev = &pdacf->node; 253 link->dev_node = &pdacf->node;
276 link->state &= ~DEV_CONFIG_PENDING; 254 return 0;
277 return;
278 255
279cs_failed: 256cs_failed:
280 cs_error(link->handle, last_fn, last_ret); 257 cs_error(link, last_fn, last_ret);
281failed: 258failed:
282 pcmcia_release_configuration(link->handle); 259 pcmcia_disable_device(link);
283 pcmcia_release_io(link->handle, &link->io); 260 return -ENODEV;
284 pcmcia_release_irq(link->handle, &link->irq);
285} 261}
286 262
287#ifdef CONFIG_PM 263#ifdef CONFIG_PM
288 264
289static int pdacf_suspend(struct pcmcia_device *dev) 265static int pdacf_suspend(struct pcmcia_device *link)
290{ 266{
291 dev_link_t *link = dev_to_instance(dev);
292 struct snd_pdacf *chip = link->priv; 267 struct snd_pdacf *chip = link->priv;
293 268
294 snd_printdd(KERN_DEBUG "SUSPEND\n"); 269 snd_printdd(KERN_DEBUG "SUSPEND\n");
295 link->state |= DEV_SUSPEND;
296 if (chip) { 270 if (chip) {
297 snd_printdd(KERN_DEBUG "snd_pdacf_suspend calling\n"); 271 snd_printdd(KERN_DEBUG "snd_pdacf_suspend calling\n");
298 snd_pdacf_suspend(chip, PMSG_SUSPEND); 272 snd_pdacf_suspend(chip, PMSG_SUSPEND);
299 } 273 }
300 274
301 snd_printdd(KERN_DEBUG "RESET_PHYSICAL\n");
302 if (link->state & DEV_CONFIG)
303 pcmcia_release_configuration(link->handle);
304
305 return 0; 275 return 0;
306} 276}
307 277
308static int pdacf_resume(struct pcmcia_device *dev) 278static int pdacf_resume(struct pcmcia_device *link)
309{ 279{
310 dev_link_t *link = dev_to_instance(dev);
311 struct snd_pdacf *chip = link->priv; 280 struct snd_pdacf *chip = link->priv;
312 281
313 snd_printdd(KERN_DEBUG "RESUME\n"); 282 snd_printdd(KERN_DEBUG "RESUME\n");
314 link->state &= ~DEV_SUSPEND; 283 if (pcmcia_dev_present(link)) {
315
316 snd_printdd(KERN_DEBUG "CARD_RESET\n");
317 if (DEV_OK(link)) {
318 snd_printdd(KERN_DEBUG "requestconfig...\n");
319 pcmcia_request_configuration(link->handle, &link->conf);
320 if (chip) { 284 if (chip) {
321 snd_printdd(KERN_DEBUG "calling snd_pdacf_resume\n"); 285 snd_printdd(KERN_DEBUG "calling snd_pdacf_resume\n");
322 snd_pdacf_resume(chip); 286 snd_pdacf_resume(chip);
@@ -343,7 +307,7 @@ static struct pcmcia_driver pdacf_cs_driver = {
343 .drv = { 307 .drv = {
344 .name = "snd-pdaudiocf", 308 .name = "snd-pdaudiocf",
345 }, 309 },
346 .probe = snd_pdacf_attach, 310 .probe = snd_pdacf_probe,
347 .remove = snd_pdacf_detach, 311 .remove = snd_pdacf_detach,
348 .id_table = snd_pdacf_ids, 312 .id_table = snd_pdacf_ids,
349#ifdef CONFIG_PM 313#ifdef CONFIG_PM
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.h b/sound/pcmcia/pdaudiocf/pdaudiocf.h
index 2744f189a613..9a14a4f64bd3 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.h
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.h
@@ -116,7 +116,7 @@ struct snd_pdacf {
116 void *pcm_area; 116 void *pcm_area;
117 117
118 /* pcmcia stuff */ 118 /* pcmcia stuff */
119 dev_link_t link; 119 struct pcmcia_device *p_dev;
120 dev_node_t node; 120 dev_node_t node;
121}; 121};
122 122
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 66900d20a42f..7e0cda2b6ef9 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -59,15 +59,9 @@ static unsigned int card_alloc;
59 59
60/* 60/*
61 */ 61 */
62static void vxpocket_release(dev_link_t *link) 62static void vxpocket_release(struct pcmcia_device *link)
63{ 63{
64 if (link->state & DEV_CONFIG) { 64 pcmcia_disable_device(link);
65 /* release cs resources */
66 pcmcia_release_configuration(link->handle);
67 pcmcia_release_io(link->handle, &link->io);
68 pcmcia_release_irq(link->handle, &link->irq);
69 link->state &= ~DEV_CONFIG;
70 }
71} 65}
72 66
73/* 67/*
@@ -132,9 +126,9 @@ static struct snd_vx_hardware vxp440_hw = {
132/* 126/*
133 * create vxpocket instance 127 * create vxpocket instance
134 */ 128 */
135static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl) 129static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl,
130 struct pcmcia_device *link)
136{ 131{
137 dev_link_t *link; /* Info for cardmgr */
138 struct vx_core *chip; 132 struct vx_core *chip;
139 struct snd_vxpocket *vxp; 133 struct snd_vxpocket *vxp;
140 static struct snd_device_ops ops = { 134 static struct snd_device_ops ops = {
@@ -154,7 +148,7 @@ static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl)
154 148
155 vxp = (struct snd_vxpocket *)chip; 149 vxp = (struct snd_vxpocket *)chip;
156 150
157 link = &vxp->link; 151 vxp->p_dev = link;
158 link->priv = chip; 152 link->priv = chip;
159 153
160 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 154 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
@@ -167,7 +161,6 @@ static struct snd_vxpocket *snd_vxpocket_new(struct snd_card *card, int ibl)
167 link->irq.Instance = chip; 161 link->irq.Instance = chip;
168 162
169 link->conf.Attributes = CONF_ENABLE_IRQ; 163 link->conf.Attributes = CONF_ENABLE_IRQ;
170 link->conf.Vcc = 50;
171 link->conf.IntType = INT_MEMORY_AND_IO; 164 link->conf.IntType = INT_MEMORY_AND_IO;
172 link->conf.ConfigIndex = 1; 165 link->conf.ConfigIndex = 1;
173 link->conf.Present = PRESENT_OPTION; 166 link->conf.Present = PRESENT_OPTION;
@@ -215,9 +208,8 @@ static int snd_vxpocket_assign_resources(struct vx_core *chip, int port, int irq
215#define CS_CHECK(fn, ret) \ 208#define CS_CHECK(fn, ret) \
216do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 209do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
217 210
218static void vxpocket_config(dev_link_t *link) 211static int vxpocket_config(struct pcmcia_device *link)
219{ 212{
220 client_handle_t handle = link->handle;
221 struct vx_core *chip = link->priv; 213 struct vx_core *chip = link->priv;
222 struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip; 214 struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
223 tuple_t tuple; 215 tuple_t tuple;
@@ -229,24 +221,24 @@ static void vxpocket_config(dev_link_t *link)
229 parse = kmalloc(sizeof(*parse), GFP_KERNEL); 221 parse = kmalloc(sizeof(*parse), GFP_KERNEL);
230 if (! parse) { 222 if (! parse) {
231 snd_printk(KERN_ERR "vx: cannot allocate\n"); 223 snd_printk(KERN_ERR "vx: cannot allocate\n");
232 return; 224 return -ENOMEM;
233 } 225 }
234 tuple.Attributes = 0; 226 tuple.Attributes = 0;
235 tuple.TupleData = (cisdata_t *)buf; 227 tuple.TupleData = (cisdata_t *)buf;
236 tuple.TupleDataMax = sizeof(buf); 228 tuple.TupleDataMax = sizeof(buf);
237 tuple.TupleOffset = 0; 229 tuple.TupleOffset = 0;
238 tuple.DesiredTuple = CISTPL_CONFIG; 230 tuple.DesiredTuple = CISTPL_CONFIG;
239 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 231 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
240 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 232 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
241 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, parse)); 233 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
242 link->conf.ConfigBase = parse->config.base; 234 link->conf.ConfigBase = parse->config.base;
243 link->conf.Present = parse->config.rmask[0]; 235 link->conf.Present = parse->config.rmask[0];
244 236
245 /* redefine hardware record according to the VERSION1 string */ 237 /* redefine hardware record according to the VERSION1 string */
246 tuple.DesiredTuple = CISTPL_VERS_1; 238 tuple.DesiredTuple = CISTPL_VERS_1;
247 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); 239 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
248 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); 240 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
249 CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, parse)); 241 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
250 if (! strcmp(parse->version_1.str + parse->version_1.ofs[1], "VX-POCKET")) { 242 if (! strcmp(parse->version_1.str + parse->version_1.ofs[1], "VX-POCKET")) {
251 snd_printdd("VX-pocket is detected\n"); 243 snd_printdd("VX-pocket is detected\n");
252 } else { 244 } else {
@@ -257,67 +249,50 @@ static void vxpocket_config(dev_link_t *link)
257 strcpy(chip->card->driver, vxp440_hw.name); 249 strcpy(chip->card->driver, vxp440_hw.name);
258 } 250 }
259 251
260 /* Configure card */ 252 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
261 link->state |= DEV_CONFIG; 253 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
262 254 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
263 CS_CHECK(RequestIO, pcmcia_request_io(handle, &link->io));
264 CS_CHECK(RequestIRQ, pcmcia_request_irq(link->handle, &link->irq));
265 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
266 255
267 chip->dev = &handle_to_dev(link->handle); 256 chip->dev = &handle_to_dev(link);
268 snd_card_set_dev(chip->card, chip->dev); 257 snd_card_set_dev(chip->card, chip->dev);
269 258
270 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0) 259 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
271 goto failed; 260 goto failed;
272 261
273 link->dev = &vxp->node; 262 link->dev_node = &vxp->node;
274 link->state &= ~DEV_CONFIG_PENDING;
275 kfree(parse); 263 kfree(parse);
276 return; 264 return 9;
277 265
278cs_failed: 266cs_failed:
279 cs_error(link->handle, last_fn, last_ret); 267 cs_error(link, last_fn, last_ret);
280failed: 268failed:
281 pcmcia_release_configuration(link->handle); 269 pcmcia_disable_device(link);
282 pcmcia_release_io(link->handle, &link->io);
283 pcmcia_release_irq(link->handle, &link->irq);
284 link->state &= ~DEV_CONFIG;
285 kfree(parse); 270 kfree(parse);
271 return -ENODEV;
286} 272}
287 273
288#ifdef CONFIG_PM 274#ifdef CONFIG_PM
289 275
290static int vxp_suspend(struct pcmcia_device *dev) 276static int vxp_suspend(struct pcmcia_device *link)
291{ 277{
292 dev_link_t *link = dev_to_instance(dev);
293 struct vx_core *chip = link->priv; 278 struct vx_core *chip = link->priv;
294 279
295 snd_printdd(KERN_DEBUG "SUSPEND\n"); 280 snd_printdd(KERN_DEBUG "SUSPEND\n");
296 link->state |= DEV_SUSPEND;
297 if (chip) { 281 if (chip) {
298 snd_printdd(KERN_DEBUG "snd_vx_suspend calling\n"); 282 snd_printdd(KERN_DEBUG "snd_vx_suspend calling\n");
299 snd_vx_suspend(chip, PMSG_SUSPEND); 283 snd_vx_suspend(chip, PMSG_SUSPEND);
300 } 284 }
301 snd_printdd(KERN_DEBUG "RESET_PHYSICAL\n");
302 if (link->state & DEV_CONFIG)
303 pcmcia_release_configuration(link->handle);
304 285
305 return 0; 286 return 0;
306} 287}
307 288
308static int vxp_resume(struct pcmcia_device *dev) 289static int vxp_resume(struct pcmcia_device *link)
309{ 290{
310 dev_link_t *link = dev_to_instance(dev);
311 struct vx_core *chip = link->priv; 291 struct vx_core *chip = link->priv;
312 292
313 snd_printdd(KERN_DEBUG "RESUME\n"); 293 snd_printdd(KERN_DEBUG "RESUME\n");
314 link->state &= ~DEV_SUSPEND; 294 if (pcmcia_dev_present(link)) {
315
316 snd_printdd(KERN_DEBUG "CARD_RESET\n");
317 if (DEV_OK(link)) {
318 //struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip; 295 //struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
319 snd_printdd(KERN_DEBUG "requestconfig...\n");
320 pcmcia_request_configuration(link->handle, &link->conf);
321 if (chip) { 296 if (chip) {
322 snd_printdd(KERN_DEBUG "calling snd_vx_resume\n"); 297 snd_printdd(KERN_DEBUG "calling snd_vx_resume\n");
323 snd_vx_resume(chip); 298 snd_vx_resume(chip);
@@ -333,7 +308,7 @@ static int vxp_resume(struct pcmcia_device *dev)
333 308
334/* 309/*
335 */ 310 */
336static int vxpocket_attach(struct pcmcia_device *p_dev) 311static int vxpocket_probe(struct pcmcia_device *p_dev)
337{ 312{
338 struct snd_card *card; 313 struct snd_card *card;
339 struct snd_vxpocket *vxp; 314 struct snd_vxpocket *vxp;
@@ -358,7 +333,7 @@ static int vxpocket_attach(struct pcmcia_device *p_dev)
358 return -ENOMEM; 333 return -ENOMEM;
359 } 334 }
360 335
361 vxp = snd_vxpocket_new(card, ibl[i]); 336 vxp = snd_vxpocket_new(card, ibl[i], p_dev);
362 if (! vxp) { 337 if (! vxp) {
363 snd_card_free(card); 338 snd_card_free(card);
364 return -ENODEV; 339 return -ENODEV;
@@ -368,20 +343,13 @@ static int vxpocket_attach(struct pcmcia_device *p_dev)
368 vxp->index = i; 343 vxp->index = i;
369 card_alloc |= 1 << i; 344 card_alloc |= 1 << i;
370 345
371 /* Chain drivers */ 346 vxp->p_dev = p_dev;
372 vxp->link.next = NULL;
373
374 vxp->link.handle = p_dev;
375 vxp->link.state |= DEV_PRESENT | DEV_CONFIG_PENDING;
376 p_dev->instance = &vxp->link;
377 vxpocket_config(&vxp->link);
378 347
379 return 0; 348 return vxpocket_config(p_dev);
380} 349}
381 350
382static void vxpocket_detach(struct pcmcia_device *p_dev) 351static void vxpocket_detach(struct pcmcia_device *link)
383{ 352{
384 dev_link_t *link = dev_to_instance(p_dev);
385 struct snd_vxpocket *vxp; 353 struct snd_vxpocket *vxp;
386 struct vx_core *chip; 354 struct vx_core *chip;
387 355
@@ -413,7 +381,7 @@ static struct pcmcia_driver vxp_cs_driver = {
413 .drv = { 381 .drv = {
414 .name = "snd-vxpocket", 382 .name = "snd-vxpocket",
415 }, 383 },
416 .probe = vxpocket_attach, 384 .probe = vxpocket_probe,
417 .remove = vxpocket_detach, 385 .remove = vxpocket_detach,
418 .id_table = vxp_ids, 386 .id_table = vxp_ids,
419#ifdef CONFIG_PM 387#ifdef CONFIG_PM
diff --git a/sound/pcmcia/vx/vxpocket.h b/sound/pcmcia/vx/vxpocket.h
index 67efae3f6c8d..27ea002294c0 100644
--- a/sound/pcmcia/vx/vxpocket.h
+++ b/sound/pcmcia/vx/vxpocket.h
@@ -42,7 +42,7 @@ struct snd_vxpocket {
42 int index; /* card index */ 42 int index; /* card index */
43 43
44 /* pcmcia stuff */ 44 /* pcmcia stuff */
45 dev_link_t link; 45 struct pcmcia_device *p_dev;
46 dev_node_t node; 46 dev_node_t node;
47}; 47};
48 48
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index 8d08b34a1cb5..ce86283ee0fa 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -306,8 +306,8 @@ static int get_relative_value(struct usb_mixer_elem_info *cval, int val)
306 cval->res = 1; 306 cval->res = 1;
307 if (val < cval->min) 307 if (val < cval->min)
308 return 0; 308 return 0;
309 else if (val > cval->max) 309 else if (val >= cval->max)
310 return (cval->max - cval->min) / cval->res; 310 return (cval->max - cval->min + cval->res - 1) / cval->res;
311 else 311 else
312 return (val - cval->min) / cval->res; 312 return (val - cval->min) / cval->res;
313} 313}
@@ -670,6 +670,36 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
670 } 670 }
671 if (cval->res == 0) 671 if (cval->res == 0)
672 cval->res = 1; 672 cval->res = 1;
673
674 /* Additional checks for the proper resolution
675 *
676 * Some devices report smaller resolutions than actually
677 * reacting. They don't return errors but simply clip
678 * to the lower aligned value.
679 */
680 if (cval->min + cval->res < cval->max) {
681 int last_valid_res = cval->res;
682 int saved, test, check;
683 get_cur_mix_value(cval, minchn, &saved);
684 for (;;) {
685 test = saved;
686 if (test < cval->max)
687 test += cval->res;
688 else
689 test -= cval->res;
690 if (test < cval->min || test > cval->max ||
691 set_cur_mix_value(cval, minchn, test) ||
692 get_cur_mix_value(cval, minchn, &check)) {
693 cval->res = last_valid_res;
694 break;
695 }
696 if (test == check)
697 break;
698 cval->res *= 2;
699 }
700 set_cur_mix_value(cval, minchn, saved);
701 }
702
673 cval->initialized = 1; 703 cval->initialized = 1;
674 } 704 }
675 return 0; 705 return 0;
@@ -695,7 +725,8 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_
695 if (! cval->initialized) 725 if (! cval->initialized)
696 get_min_max(cval, 0); 726 get_min_max(cval, 0);
697 uinfo->value.integer.min = 0; 727 uinfo->value.integer.min = 0;
698 uinfo->value.integer.max = (cval->max - cval->min) / cval->res; 728 uinfo->value.integer.max =
729 (cval->max - cval->min + cval->res - 1) / cval->res;
699 } 730 }
700 return 0; 731 return 0;
701} 732}