aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2007-02-12 22:28:39 -0500
committerKumar Gala <galak@kernel.crashing.org>2007-02-12 22:28:39 -0500
commit67c2b7d9d224232ee730b9c9444abed824b62e7a (patch)
tree61a48c2e3562f8f66c04fd02691390dec96466e1
parent49baa91d6863df480fa05eb57524a274f77fa886 (diff)
parent5986a2ec35836a878350c54af4bd91b1de6abc59 (diff)
Merge branch 'master' into 83xx
-rw-r--r--Documentation/HOWTO1
-rw-r--r--Documentation/feature-removal-schedule.txt17
-rw-r--r--Documentation/s390/Debugging390.txt2
-rw-r--r--Documentation/usb/proc_usb_info.txt21
-rw-r--r--Documentation/usb/usbmon.txt152
-rw-r--r--Documentation/video-output.txt34
-rw-r--r--MAINTAINERS34
-rw-r--r--Makefile17
-rw-r--r--arch/alpha/kernel/pci.c4
-rw-r--r--arch/i386/defconfig3
-rw-r--r--arch/i386/kernel/acpi/boot.c235
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c15
-rw-r--r--arch/i386/kernel/io_apic.c17
-rw-r--r--arch/i386/kernel/mpparse.c4
-rw-r--r--arch/i386/kernel/srat.c84
-rw-r--r--arch/i386/mach-es7000/es7000.h9
-rw-r--r--arch/i386/mach-es7000/es7000plat.c53
-rw-r--r--arch/i386/pci/mmconfig.c24
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c4
-rw-r--r--arch/ia64/kernel/acpi.c200
-rw-r--r--arch/ia64/kernel/crash.c16
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/iosapic.c5
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/msi_ia64.c19
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/ia64/kernel/setup.c31
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/contig.c76
-rw-r--r--arch/ia64/mm/discontig.c46
-rw-r--r--arch/ia64/mm/init.c38
-rw-r--r--arch/ia64/sn/kernel/huberror.c16
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c314
-rw-r--r--arch/ia64/sn/kernel/io_common.c90
-rw-r--r--arch/ia64/sn/kernel/io_init.c54
-rw-r--r--arch/ia64/sn/kernel/iomv.c5
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c20
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c6
-rw-r--r--arch/mips/Kconfig103
-rw-r--r--arch/mips/Kconfig.debug8
-rw-r--r--arch/mips/arc/identify.c2
-rw-r--r--arch/mips/arc/memory.c18
-rw-r--r--arch/mips/au1000/common/irq.c8
-rw-r--r--arch/mips/au1000/common/pci.c18
-rw-r--r--arch/mips/au1000/common/prom.c3
-rw-r--r--arch/mips/au1000/common/setup.c19
-rw-r--r--arch/mips/au1000/pb1100/board_setup.c93
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c32
-rw-r--r--arch/mips/basler/excite/excite_irq.c6
-rw-r--r--arch/mips/cobalt/irq.c2
-rw-r--r--arch/mips/cobalt/setup.c3
-rw-r--r--arch/mips/ddb5xxx/common/prom.c3
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq.c9
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq_5477.c2
-rw-r--r--arch/mips/dec/ioasic-irq.c4
-rw-r--r--arch/mips/dec/kn02-irq.c2
-rw-r--r--arch/mips/dec/prom/memory.c17
-rw-r--r--arch/mips/dec/setup.c12
-rw-r--r--arch/mips/emma2rh/common/irq_emma2rh.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq_markeins.c4
-rw-r--r--arch/mips/gt64120/ev64120/irq.c2
-rw-r--r--arch/mips/gt64120/ev64120/setup.c3
-rw-r--r--arch/mips/gt64120/momenco_ocelot/dbg_io.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/irq.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/prom.c3
-rw-r--r--arch/mips/gt64120/wrppmc/irq.c2
-rw-r--r--arch/mips/gt64120/wrppmc/setup.c3
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/jmr3927/common/prom.c3
-rw-r--r--arch/mips/jmr3927/rbhma3100/irq.c2
-rw-r--r--arch/mips/jmr3927/rbhma3100/setup.c2
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/gdb-stub.c6
-rw-r--r--arch/mips/kernel/head.S25
-rw-r--r--arch/mips/kernel/i8259.c24
-rw-r--r--arch/mips/kernel/irixelf.c331
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq-mv6434x.c14
-rw-r--r--arch/mips/kernel/irq-rm7000.c13
-rw-r--r--arch/mips/kernel/irq-rm9000.c24
-rw-r--r--arch/mips/kernel/irq_cpu.c21
-rw-r--r--arch/mips/kernel/linux32.c28
-rw-r--r--arch/mips/kernel/mips-mt.c9
-rw-r--r--arch/mips/kernel/proc.c8
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/r4k_fpu.S19
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c47
-rw-r--r--arch/mips/kernel/signal.c6
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/smp-mt.c9
-rw-r--r--arch/mips/kernel/smtc.c54
-rw-r--r--arch/mips/kernel/sysirix.c4
-rw-r--r--arch/mips/kernel/vpe.c36
-rw-r--r--arch/mips/lasat/interrupt.c2
-rw-r--r--arch/mips/lasat/prom.c3
-rw-r--r--arch/mips/lib-32/Makefile2
-rw-r--r--arch/mips/lib-64/Makefile2
-rw-r--r--arch/mips/lib-64/memset.S142
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/memset.S (renamed from arch/mips/lib-32/memset.S)35
-rw-r--r--arch/mips/lib/uncached.c4
-rw-r--r--arch/mips/mips-boards/atlas/atlas_int.c9
-rw-r--r--arch/mips/mips-boards/generic/memory.c18
-rw-r--r--arch/mips/mips-boards/malta/malta_int.c7
-rw-r--r--arch/mips/mips-boards/sead/sead_int.c2
-rw-r--r--arch/mips/mips-boards/sim/sim_int.c6
-rw-r--r--arch/mips/mips-boards/sim/sim_mem.c16
-rw-r--r--arch/mips/mm/init.c50
-rw-r--r--arch/mips/momentum/jaguar_atx/Makefile2
-rw-r--r--arch/mips/momentum/jaguar_atx/irq.c4
-rw-r--r--arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h6
-rw-r--r--arch/mips/momentum/jaguar_atx/platform.c235
-rw-r--r--arch/mips/momentum/jaguar_atx/prom.c58
-rw-r--r--arch/mips/momentum/ocelot_3/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_3/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/cpci-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_c/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/uart-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_g/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_g/irq.c4
-rw-r--r--arch/mips/momentum/ocelot_g/prom.c3
-rw-r--r--arch/mips/oprofile/Kconfig2
-rw-r--r--arch/mips/pci/fixup-vr4133.c16
-rw-r--r--arch/mips/philips/pnx8550/common/int.c2
-rw-r--r--arch/mips/philips/pnx8550/common/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/dbg_io.c2
-rw-r--r--arch/mips/pmc-sierra/yosemite/irq.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c2
-rw-r--r--arch/mips/qemu/q-mem.c3
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c13
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c10
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c3
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c3
-rw-r--r--arch/mips/sibyte/sb1250/irq.c2
-rw-r--r--arch/mips/sibyte/sb1250/prom.c3
-rw-r--r--arch/mips/sni/irq.c2
-rw-r--r--arch/mips/sni/sniprom.c3
-rw-r--r--arch/mips/tx4927/common/tx4927_irq.c4
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c12
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c3
-rw-r--r--arch/mips/tx4938/common/irq.c4
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/irq.c2
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/prom.c3
-rw-r--r--arch/mips/vr41xx/common/icu.c31
-rw-r--r--arch/mips/vr41xx/common/init.c3
-rw-r--r--arch/mips/vr41xx/common/irq.c18
-rw-r--r--arch/mips/vr41xx/nec-cmbvr4133/irq.c53
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/s390/Kconfig29
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/appldata/appldata_net_sum.c2
-rw-r--r--arch/s390/crypto/Kconfig60
-rw-r--r--arch/s390/crypto/Makefile3
-rw-r--r--arch/s390/crypto/aes_s390.c47
-rw-r--r--arch/s390/crypto/crypt_s390.h281
-rw-r--r--arch/s390/crypto/crypt_s390_query.c129
-rw-r--r--arch/s390/crypto/des_check_key.c6
-rw-r--r--arch/s390/crypto/des_s390.c8
-rw-r--r--arch/s390/crypto/prng.c213
-rw-r--r--arch/s390/crypto/sha1_s390.c83
-rw-r--r--arch/s390/crypto/sha256_s390.c11
-rw-r--r--arch/s390/defconfig12
-rw-r--r--arch/s390/hypfs/Makefile2
-rw-r--r--arch/s390/hypfs/hypfs.h9
-rw-r--r--arch/s390/hypfs/hypfs_diag.h16
-rw-r--r--arch/s390/hypfs/hypfs_vm.c231
-rw-r--r--arch/s390/hypfs/inode.c31
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S150
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/compat_exec_domain.c5
-rw-r--r--arch/s390/kernel/compat_linux.c24
-rw-r--r--arch/s390/kernel/compat_linux.h31
-rw-r--r--arch/s390/kernel/compat_signal.c8
-rw-r--r--arch/s390/kernel/cpcmd.c14
-rw-r--r--arch/s390/kernel/crash.c1
-rw-r--r--arch/s390/kernel/debug.c18
-rw-r--r--arch/s390/kernel/early.c306
-rw-r--r--arch/s390/kernel/ebcdic.c1
-rw-r--r--arch/s390/kernel/head31.S194
-rw-r--r--arch/s390/kernel/head64.S193
-rw-r--r--arch/s390/kernel/ipl.c106
-rw-r--r--arch/s390/kernel/irq.c15
-rw-r--r--arch/s390/kernel/kprobes.c32
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/module.c5
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/profile.c20
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reset.S90
-rw-r--r--arch/s390/kernel/s390_ext.c8
-rw-r--r--arch/s390/kernel/setup.c155
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c32
-rw-r--r--arch/s390/kernel/stacktrace.c10
-rw-r--r--arch/s390/kernel/time.c1185
-rw-r--r--arch/s390/kernel/traps.c24
-rw-r--r--arch/s390/kernel/vmlinux.lds.S13
-rw-r--r--arch/s390/kernel/vtime.c10
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c48
-rw-r--r--arch/s390/lib/qrnnd.S77
-rw-r--r--arch/s390/lib/uaccess.h23
-rw-r--r--arch/s390/lib/uaccess_mvcos.c78
-rw-r--r--arch/s390/lib/uaccess_pt.c329
-rw-r--r--arch/s390/lib/uaccess_std.c23
-rw-r--r--arch/s390/math-emu/Makefile2
-rw-r--r--arch/s390/math-emu/math.c2
-rw-r--r--arch/s390/math-emu/qrnnd.S77
-rw-r--r--arch/s390/mm/cmm.c4
-rw-r--r--arch/s390/mm/extmem.c66
-rw-r--r--arch/s390/mm/fault.c93
-rw-r--r--arch/s390/mm/init.c20
-rw-r--r--arch/s390/mm/vmem.c14
-rw-r--r--arch/x86_64/kernel/early-quirks.c4
-rw-r--r--arch/x86_64/kernel/genapic.c4
-rw-r--r--arch/x86_64/kernel/io_apic.c17
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/mm/srat.c48
-rw-r--r--arch/x86_64/pci/mmconfig.c29
-rw-r--r--crypto/Kconfig49
-rw-r--r--drivers/acpi/Kconfig37
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/asus_acpi.c9
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/bay.c490
-rw-r--r--drivers/acpi/blacklist.c29
-rw-r--r--drivers/acpi/bus.c44
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/container.c6
-rw-r--r--drivers/acpi/debug.c62
-rw-r--r--drivers/acpi/dispatcher/dsfield.c32
-rw-r--r--drivers/acpi/dispatcher/dsinit.c25
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c55
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c2
-rw-r--r--drivers/acpi/dispatcher/dsobject.c78
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c6
-rw-r--r--drivers/acpi/dispatcher/dsutils.c2
-rw-r--r--drivers/acpi/dispatcher/dswexec.c12
-rw-r--r--drivers/acpi/dispatcher/dswload.c19
-rw-r--r--drivers/acpi/dispatcher/dswscope.c2
-rw-r--r--drivers/acpi/dispatcher/dswstate.c2
-rw-r--r--drivers/acpi/dock.c16
-rw-r--r--drivers/acpi/ec.c13
-rw-r--r--drivers/acpi/events/evevent.c17
-rw-r--r--drivers/acpi/events/evgpe.c91
-rw-r--r--drivers/acpi/events/evgpeblk.c64
-rw-r--r--drivers/acpi/events/evmisc.c201
-rw-r--r--drivers/acpi/events/evregion.c17
-rw-r--r--drivers/acpi/events/evrgnini.c168
-rw-r--r--drivers/acpi/events/evsci.c14
-rw-r--r--drivers/acpi/events/evxface.c8
-rw-r--r--drivers/acpi/events/evxfevnt.c27
-rw-r--r--drivers/acpi/events/evxfregn.c2
-rw-r--r--drivers/acpi/executer/exconfig.c235
-rw-r--r--drivers/acpi/executer/exconvrt.c2
-rw-r--r--drivers/acpi/executer/excreate.c21
-rw-r--r--drivers/acpi/executer/exdump.c29
-rw-r--r--drivers/acpi/executer/exfield.c2
-rw-r--r--drivers/acpi/executer/exfldio.c7
-rw-r--r--drivers/acpi/executer/exmisc.c2
-rw-r--r--drivers/acpi/executer/exmutex.c86
-rw-r--r--drivers/acpi/executer/exnames.c2
-rw-r--r--drivers/acpi/executer/exoparg1.c4
-rw-r--r--drivers/acpi/executer/exoparg2.c2
-rw-r--r--drivers/acpi/executer/exoparg3.c2
-rw-r--r--drivers/acpi/executer/exoparg6.c2
-rw-r--r--drivers/acpi/executer/exprep.c2
-rw-r--r--drivers/acpi/executer/exregion.c16
-rw-r--r--drivers/acpi/executer/exresnte.c2
-rw-r--r--drivers/acpi/executer/exresolv.c10
-rw-r--r--drivers/acpi/executer/exresop.c12
-rw-r--r--drivers/acpi/executer/exstore.c2
-rw-r--r--drivers/acpi/executer/exstoren.c2
-rw-r--r--drivers/acpi/executer/exstorob.c2
-rw-r--r--drivers/acpi/executer/exsystem.c110
-rw-r--r--drivers/acpi/executer/exutils.c106
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/glue.c123
-rw-r--r--drivers/acpi/hardware/hwacpi.c56
-rw-r--r--drivers/acpi/hardware/hwgpe.c15
-rw-r--r--drivers/acpi/hardware/hwregs.c98
-rw-r--r--drivers/acpi/hardware/hwsleep.c81
-rw-r--r--drivers/acpi/hardware/hwtimer.c9
-rw-r--r--drivers/acpi/motherboard.c191
-rw-r--r--drivers/acpi/namespace/nsaccess.c36
-rw-r--r--drivers/acpi/namespace/nsalloc.c14
-rw-r--r--drivers/acpi/namespace/nsdump.c13
-rw-r--r--drivers/acpi/namespace/nsdumpdv.c2
-rw-r--r--drivers/acpi/namespace/nseval.c13
-rw-r--r--drivers/acpi/namespace/nsinit.c9
-rw-r--r--drivers/acpi/namespace/nsload.c160
-rw-r--r--drivers/acpi/namespace/nsnames.c2
-rw-r--r--drivers/acpi/namespace/nsobject.c2
-rw-r--r--drivers/acpi/namespace/nsparse.c52
-rw-r--r--drivers/acpi/namespace/nssearch.c9
-rw-r--r--drivers/acpi/namespace/nsutils.c9
-rw-r--r--drivers/acpi/namespace/nswalk.c65
-rw-r--r--drivers/acpi/namespace/nsxfeval.c13
-rw-r--r--drivers/acpi/namespace/nsxfname.c47
-rw-r--r--drivers/acpi/namespace/nsxfobj.c2
-rw-r--r--drivers/acpi/numa.c77
-rw-r--r--drivers/acpi/osl.c97
-rw-r--r--drivers/acpi/parser/psargs.c2
-rw-r--r--drivers/acpi/parser/psloop.c1408
-rw-r--r--drivers/acpi/parser/psopcode.c2
-rw-r--r--drivers/acpi/parser/psparse.c7
-rw-r--r--drivers/acpi/parser/psscope.c2
-rw-r--r--drivers/acpi/parser/pstree.c2
-rw-r--r--drivers/acpi/parser/psutils.c2
-rw-r--r--drivers/acpi/parser/pswalk.c2
-rw-r--r--drivers/acpi/parser/psxface.c116
-rw-r--r--drivers/acpi/pci_link.c4
-rw-r--r--drivers/acpi/pci_root.c38
-rw-r--r--drivers/acpi/processor_core.c189
-rw-r--r--drivers/acpi/processor_idle.c52
-rw-r--r--drivers/acpi/processor_perflib.c27
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/acpi/resources/rsaddr.c2
-rw-r--r--drivers/acpi/resources/rscalc.c2
-rw-r--r--drivers/acpi/resources/rscreate.c2
-rw-r--r--drivers/acpi/resources/rsdump.c2
-rw-r--r--drivers/acpi/resources/rsinfo.c2
-rw-r--r--drivers/acpi/resources/rsio.c2
-rw-r--r--drivers/acpi/resources/rsirq.c2
-rw-r--r--drivers/acpi/resources/rslist.c2
-rw-r--r--drivers/acpi/resources/rsmemory.c2
-rw-r--r--drivers/acpi/resources/rsmisc.c2
-rw-r--r--drivers/acpi/resources/rsutils.c2
-rw-r--r--drivers/acpi/resources/rsxface.c2
-rw-r--r--drivers/acpi/scan.c1265
-rw-r--r--drivers/acpi/sleep/proc.c36
-rw-r--r--drivers/acpi/system.c39
-rw-r--r--drivers/acpi/tables.c508
-rw-r--r--drivers/acpi/tables/Makefile3
-rw-r--r--drivers/acpi/tables/tbconvrt.c622
-rw-r--r--drivers/acpi/tables/tbfadt.c434
-rw-r--r--drivers/acpi/tables/tbfind.c126
-rw-r--r--drivers/acpi/tables/tbget.c471
-rw-r--r--drivers/acpi/tables/tbgetall.c311
-rw-r--r--drivers/acpi/tables/tbinstal.c664
-rw-r--r--drivers/acpi/tables/tbrsdt.c307
-rw-r--r--drivers/acpi/tables/tbutils.c513
-rw-r--r--drivers/acpi/tables/tbxface.c671
-rw-r--r--drivers/acpi/tables/tbxfroot.c552
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/acpi/utilities/utalloc.c11
-rw-r--r--drivers/acpi/utilities/utcache.c10
-rw-r--r--drivers/acpi/utilities/utcopy.c11
-rw-r--r--drivers/acpi/utilities/utdebug.c7
-rw-r--r--drivers/acpi/utilities/utdelete.c16
-rw-r--r--drivers/acpi/utilities/uteval.c2
-rw-r--r--drivers/acpi/utilities/utglobal.c199
-rw-r--r--drivers/acpi/utilities/utinit.c114
-rw-r--r--drivers/acpi/utilities/utmath.c2
-rw-r--r--drivers/acpi/utilities/utmisc.c102
-rw-r--r--drivers/acpi/utilities/utmutex.c2
-rw-r--r--drivers/acpi/utilities/utobject.c2
-rw-r--r--drivers/acpi/utilities/utresrc.c2
-rw-r--r--drivers/acpi/utilities/utstate.c2
-rw-r--r--drivers/acpi/utilities/utxface.c29
-rw-r--r--drivers/acpi/video.c166
-rw-r--r--drivers/base/class.c21
-rw-r--r--drivers/base/core.c203
-rw-r--r--drivers/base/dd.c21
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/char/drm/drmP.h36
-rw-r--r--drivers/char/drm/drm_bufs.c19
-rw-r--r--drivers/char/drm/drm_memory.c94
-rw-r--r--drivers/char/drm/drm_memory.h20
-rw-r--r--drivers/char/drm/drm_memory_debug.h70
-rw-r--r--drivers/char/drm/drm_mm.c183
-rw-r--r--drivers/char/drm/drm_pciids.h4
-rw-r--r--drivers/char/drm/drm_proc.c4
-rw-r--r--drivers/char/drm/drm_sman.c3
-rw-r--r--drivers/char/drm/drm_vm.c16
-rw-r--r--drivers/char/drm/i810_dma.c34
-rw-r--r--drivers/char/drm/i810_drv.h2
-rw-r--r--drivers/char/drm/i830_dma.c32
-rw-r--r--drivers/char/drm/i830_drv.h2
-rw-r--r--drivers/char/drm/via_dma.c9
-rw-r--r--drivers/char/drm/via_dmablit.c2
-rw-r--r--drivers/char/drm/via_drv.h11
-rw-r--r--drivers/char/drm/via_irq.c16
-rw-r--r--drivers/char/drm/via_map.c3
-rw-r--r--drivers/char/drm/via_verifier.c50
-rw-r--r--drivers/char/drm/via_verifier.h1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c18
-rw-r--r--drivers/char/tpm/tpm_bios.c8
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/firmware/pcdp.c2
-rw-r--r--drivers/hid/Kconfig14
-rw-r--r--drivers/hid/Makefile11
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-debug.c764
-rw-r--r--drivers/hid/hid-input.c35
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/ide/Kconfig24
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/ide-acpi.c697
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/ide.c36
-rw-r--r--drivers/ide/pci/Makefile4
-rw-r--r--drivers/ide/pci/delkin_cb.c140
-rw-r--r--drivers/ide/pci/hpt366.c1583
-rw-r--r--drivers/ide/pci/it8213.c362
-rw-r--r--drivers/ide/pci/pdc202xx_new.c56
-rw-r--r--drivers/ide/pci/pdc202xx_old.c27
-rw-r--r--drivers/ide/pci/piix.c31
-rw-r--r--drivers/ide/pci/slc90e66.c55
-rw-r--r--drivers/ide/pci/tc86c001.c309
-rw-r--r--drivers/ide/setup-pci.c7
-rw-r--r--drivers/infiniband/core/addr.c3
-rw-r--r--drivers/infiniband/core/mad.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h29
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c65
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c78
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c395
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/input/serio/serio.c6
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h1
-rw-r--r--drivers/misc/Kconfig19
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/asus-laptop.c1165
-rw-r--r--drivers/misc/tifm_7xx1.c402
-rw-r--r--drivers/misc/tifm_core.c65
-rw-r--r--drivers/mmc/at91_mci.c3
-rw-r--r--drivers/mmc/au1xmmc.c13
-rw-r--r--drivers/mmc/imxmmc.c4
-rw-r--r--drivers/mmc/mmc.c182
-rw-r--r--drivers/mmc/mmc_block.c15
-rw-r--r--drivers/mmc/mmc_queue.c2
-rw-r--r--drivers/mmc/mmc_sysfs.c2
-rw-r--r--drivers/mmc/mmci.c15
-rw-r--r--drivers/mmc/omap.c6
-rw-r--r--drivers/mmc/pxamci.c10
-rw-r--r--drivers/mmc/sdhci.c91
-rw-r--r--drivers/mmc/sdhci.h2
-rw-r--r--drivers/mmc/tifm_sd.c487
-rw-r--r--drivers/mmc/wbsd.c102
-rw-r--r--drivers/mmc/wbsd.h1
-rw-r--r--drivers/net/3c59x.c3
-rw-r--r--drivers/net/Kconfig65
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/Space.c4
-rw-r--r--drivers/net/amd8111e.c3
-rw-r--r--drivers/net/arm/at91_ether.c2
-rw-r--r--drivers/net/arm/etherh.c2
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/b44.h10
-rw-r--r--drivers/net/bmac.c20
-rw-r--r--drivers/net/bnx2.c16
-rw-r--r--drivers/net/bonding/bond_main.c23
-rw-r--r--drivers/net/bonding/bond_sysfs.c302
-rw-r--r--drivers/net/bonding/bonding.h9
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h18
-rw-r--r--drivers/net/chelsio/cxgb2.c149
-rw-r--r--drivers/net/chelsio/elmer0.h40
-rw-r--r--drivers/net/chelsio/espi.c44
-rw-r--r--drivers/net/chelsio/fpga_defs.h6
-rw-r--r--drivers/net/chelsio/gmac.h11
-rw-r--r--drivers/net/chelsio/ixf1010.c100
-rw-r--r--drivers/net/chelsio/mv88e1xxx.c27
-rw-r--r--drivers/net/chelsio/my3126.c16
-rw-r--r--drivers/net/chelsio/pm3393.c91
-rw-r--r--drivers/net/chelsio/sge.c328
-rw-r--r--drivers/net/chelsio/subr.c89
-rw-r--r--drivers/net/chelsio/tp.c62
-rw-r--r--drivers/net/chelsio/vsc7326.c139
-rw-r--r--drivers/net/chelsio/vsc7326_reg.h139
-rw-r--r--drivers/net/chelsio/vsc8244.c41
-rw-r--r--drivers/net/cxgb3/Makefile8
-rw-r--r--drivers/net/cxgb3/adapter.h279
-rw-r--r--drivers/net/cxgb3/ael1002.c251
-rw-r--r--drivers/net/cxgb3/common.h729
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h164
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h99
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h185
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2515
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c1222
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h193
-rw-r--r--drivers/net/cxgb3/firmware_exports.h177
-rw-r--r--drivers/net/cxgb3/l2t.c450
-rw-r--r--drivers/net/cxgb3/l2t.h143
-rw-r--r--drivers/net/cxgb3/mc5.c473
-rw-r--r--drivers/net/cxgb3/regs.h2195
-rw-r--r--drivers/net/cxgb3/sge.c2681
-rw-r--r--drivers/net/cxgb3/sge_defs.h251
-rw-r--r--drivers/net/cxgb3/t3_cpl.h1444
-rw-r--r--drivers/net/cxgb3/t3_hw.c3375
-rw-r--r--drivers/net/cxgb3/t3cdev.h73
-rw-r--r--drivers/net/cxgb3/version.h39
-rw-r--r--drivers/net/cxgb3/vsc8211.c228
-rw-r--r--drivers/net/cxgb3/xgmac.c409
-rw-r--r--drivers/net/declance.c164
-rw-r--r--drivers/net/e1000/e1000.h7
-rw-r--r--drivers/net/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/e1000/e1000_main.c130
-rw-r--r--drivers/net/e1000/e1000_osdep.h4
-rw-r--r--drivers/net/e1000/e1000_param.c15
-rw-r--r--drivers/net/forcedeth.c1342
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/macb.c61
-rw-r--r--drivers/net/macb.h8
-rw-r--r--drivers/net/mace.c16
-rw-r--r--drivers/net/macmace.c18
-rw-r--r--drivers/net/macsonic.c6
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/netxen/netxen_nic.h17
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c96
-rw-r--r--drivers/net/netxen/netxen_nic_init.c279
-rw-r--r--drivers/net/oaknet.c666
-rw-r--r--drivers/net/pasemi_mac.c1019
-rw-r--r--drivers/net/pasemi_mac.h460
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.c363
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.h88
-rw-r--r--drivers/net/s2io-regs.h7
-rw-r--r--drivers/net/s2io.c1178
-rw-r--r--drivers/net/s2io.h223
-rw-r--r--drivers/net/sc92031.c1620
-rw-r--r--drivers/net/sk_mca.c1216
-rw-r--r--drivers/net/sk_mca.h170
-rw-r--r--drivers/net/skfp/can.c83
-rw-r--r--drivers/net/skfp/drvfbi.c24
-rw-r--r--drivers/net/skfp/fplustm.c4
-rw-r--r--drivers/net/skfp/smt.c10
-rw-r--r--drivers/net/skge.c235
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c543
-rw-r--r--drivers/net/sky2.h85
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/spider_net.c313
-rw-r--r--drivers/net/spider_net.h20
-rw-r--r--drivers/net/spider_net_ethtool.c4
-rw-r--r--drivers/net/tg3.c32
-rw-r--r--drivers/net/ucc_geth.c23
-rw-r--r--drivers/net/wan/Kconfig24
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/hdlc.c3
-rw-r--r--drivers/net/wan/pc300too.c565
-rw-r--r--drivers/net/wan/z85230.c14
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h7
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c11
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c36
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.h16
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/orinoco.c6
-rw-r--r--drivers/net/wireless/orinoco_cs.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c3
-rw-r--r--drivers/net/wireless/spectrum_cs.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c126
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h158
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_types.h71
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c128
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h6
-rw-r--r--drivers/pci/hotplug/Kconfig9
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c10
-rw-r--r--drivers/pci/hotplug/pciehp.h194
-rw-r--r--drivers/pci/hotplug/pciehp_core.c292
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c223
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c827
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c155
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c20
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c185
-rw-r--r--drivers/pci/msi.c325
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pci/pci.c171
-rw-r--r--drivers/pci/pci.h14
-rw-r--r--drivers/pci/probe.c70
-rw-r--r--drivers/pci/quirks.c131
-rw-r--r--drivers/pci/search.c38
-rw-r--r--drivers/pcmcia/cs.c34
-rw-r--r--drivers/pcmcia/cs_internal.h4
-rw-r--r--drivers/pcmcia/ds.c14
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82365.c2
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c1
-rw-r--r--drivers/pcmcia/pcmcia_resource.c1
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c56
-rw-r--r--drivers/pcmcia/soc_common.c6
-rw-r--r--drivers/pcmcia/socket_sysfs.c104
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/pnp/pnpacpi/Kconfig4
-rw-r--r--drivers/pnp/system.c52
-rw-r--r--drivers/s390/Kconfig8
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_3990_erp.c5
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c95
-rw-r--r--drivers/s390/block/dasd_eer.c24
-rw-r--r--drivers/s390/block/dasd_erp.c80
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c8
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/char/Makefile4
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/defkeymap.c2
-rw-r--r--drivers/s390/char/fs3270.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monwriter.c4
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.c93
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_con.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_info.c57
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape.h22
-rw-r--r--drivers/s390/char/tape_3590.c479
-rw-r--r--drivers/s390/char/tape_3590.h53
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_char.c27
-rw-r--r--drivers/s390/char/tape_core.c69
-rw-r--r--drivers/s390/char/tty3270.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c5
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c270
-rw-r--r--drivers/s390/cio/chsc.h11
-rw-r--r--drivers/s390/cio/cio.c37
-rw-r--r--drivers/s390/cio/cmf.c4
-rw-r--r--drivers/s390/cio/css.c13
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/qdio.c77
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c20
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c8
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c3
-rw-r--r--drivers/s390/net/claw.c16
-rw-r--r--drivers/s390/net/ctcmain.c8
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_eddp.c28
-rw-r--r--drivers/s390/net/qeth_main.c92
-rw-r--r--drivers/s390/net/qeth_sys.c30
-rw-r--r--drivers/s390/s390mach.c37
-rw-r--r--drivers/s390/s390mach.h3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c44
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c38
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/s390/sysinfo.c63
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/usb/atm/speedtch.c2
-rw-r--r--drivers/usb/class/usblp.c16
-rw-r--r--drivers/usb/core/Kconfig13
-rw-r--r--drivers/usb/core/buffer.c36
-rw-r--r--drivers/usb/core/devices.c22
-rw-r--r--drivers/usb/core/devio.c25
-rw-r--r--drivers/usb/core/driver.c39
-rw-r--r--drivers/usb/core/file.c13
-rw-r--r--drivers/usb/core/generic.c28
-rw-r--r--drivers/usb/core/hcd.c137
-rw-r--r--drivers/usb/core/hcd.h6
-rw-r--r--drivers/usb/core/hub.c64
-rw-r--r--drivers/usb/core/message.c6
-rw-r--r--drivers/usb/core/sysfs.c98
-rw-r--r--drivers/usb/core/urb.c21
-rw-r--r--drivers/usb/core/usb.c96
-rw-r--r--drivers/usb/gadget/at91_udc.c21
-rw-r--r--drivers/usb/gadget/at91_udc.h1
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/ether.c148
-rw-r--r--drivers/usb/gadget/file_storage.c33
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/gmidi.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c240
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.h2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c2
-rw-r--r--drivers/usb/gadget/serial.c2
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/Kconfig38
-rw-r--r--drivers/usb/host/ehci-dbg.c24
-rw-r--r--drivers/usb/host/ehci-fsl.c8
-rw-r--r--drivers/usb/host/ehci-hcd.c127
-rw-r--r--drivers/usb/host/ehci-hub.c324
-rw-r--r--drivers/usb/host/ehci-pci.c38
-rw-r--r--drivers/usb/host/ehci-ps3.c193
-rw-r--r--drivers/usb/host/ehci-q.c16
-rw-r--r--drivers/usb/host/ehci-sched.c22
-rw-r--r--drivers/usb/host/ehci.h46
-rw-r--r--drivers/usb/host/ohci-at91.c23
-rw-r--r--drivers/usb/host/ohci-au1xxx.c16
-rw-r--r--drivers/usb/host/ohci-ep93xx.c12
-rw-r--r--drivers/usb/host/ohci-hcd.c128
-rw-r--r--drivers/usb/host/ohci-lh7a404.c16
-rw-r--r--drivers/usb/host/ohci-omap.c19
-rw-r--r--drivers/usb/host/ohci-pci.c219
-rw-r--r--drivers/usb/host/ohci-pnx4008.c12
-rw-r--r--drivers/usb/host/ohci-pnx8550.c16
-rw-r--r--drivers/usb/host/ohci-ppc-of.c232
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c18
-rw-r--r--drivers/usb/host/ohci-ps3.c196
-rw-r--r--drivers/usb/host/ohci-pxa27x.c16
-rw-r--r--drivers/usb/host/ohci-s3c2410.c12
-rw-r--r--drivers/usb/host/ohci-sa1111.c16
-rw-r--r--drivers/usb/host/ohci.h155
-rw-r--r--drivers/usb/host/uhci-debug.c71
-rw-r--r--drivers/usb/host/uhci-hcd.c51
-rw-r--r--drivers/usb/host/uhci-hcd.h8
-rw-r--r--drivers/usb/host/uhci-q.c258
-rw-r--r--drivers/usb/image/mdc800.c4
-rw-r--r--drivers/usb/input/Kconfig20
-rw-r--r--drivers/usb/input/Makefile4
-rw-r--r--drivers/usb/input/gtco.c1104
-rw-r--r--drivers/usb/input/hid-core.c116
-rw-r--r--drivers/usb/input/hid-ff.c3
-rw-r--r--drivers/usb/input/hid-lgff.c4
-rw-r--r--drivers/usb/input/hid-plff.c129
-rw-r--r--drivers/usb/misc/idmouse.c10
-rw-r--r--drivers/usb/misc/rio500.c54
-rw-r--r--drivers/usb/mon/Makefile2
-rw-r--r--drivers/usb/mon/mon_bin.c1172
-rw-r--r--drivers/usb/mon/mon_dma.c39
-rw-r--r--drivers/usb/mon/mon_main.c97
-rw-r--r--drivers/usb/mon/mon_text.c67
-rw-r--r--drivers/usb/mon/usb_mon.h30
-rw-r--r--drivers/usb/net/Kconfig6
-rw-r--r--drivers/usb/net/asix.c4
-rw-r--r--drivers/usb/net/cdc_ether.c60
-rw-r--r--drivers/usb/net/kaweth.c37
-rw-r--r--drivers/usb/net/pegasus.h4
-rw-r--r--drivers/usb/net/rndis_host.c81
-rw-r--r--drivers/usb/serial/aircable.c21
-rw-r--r--drivers/usb/serial/airprime.c1
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/belkin_sa.c1
-rw-r--r--drivers/usb/serial/bus.c45
-rw-r--r--drivers/usb/serial/cp2101.c1
-rw-r--r--drivers/usb/serial/cyberjack.c3
-rw-r--r--drivers/usb/serial/cypress_m8.c3
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/empeg.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.h1
-rw-r--r--drivers/usb/serial/funsoft.c1
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/generic.c35
-rw-r--r--drivers/usb/serial/hp4x.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c417
-rw-r--r--drivers/usb/serial/io_edgeport.h6
-rw-r--r--drivers/usb/serial/io_tables.h61
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h5
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c1
-rw-r--r--drivers/usb/serial/ir-usb.c1
-rw-r--r--drivers/usb/serial/keyspan.c49
-rw-r--r--drivers/usb/serial/keyspan.h7
-rw-r--r--drivers/usb/serial/keyspan_pda.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c1
-rw-r--r--drivers/usb/serial/kobil_sct.c1
-rw-r--r--drivers/usb/serial/mct_u232.c1
-rw-r--r--drivers/usb/serial/mos7720.c16
-rw-r--r--drivers/usb/serial/mos7840.c16
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/omninet.c1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/safe_serial.c1
-rw-r--r--drivers/usb/serial/sierra.c29
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb-serial.c102
-rw-r--r--drivers/usb/serial/visor.c6
-rw-r--r--drivers/usb/serial/visor.h1
-rw-r--r--drivers/usb/serial/whiteheat.c2
-rw-r--r--drivers/usb/storage/onetouch.c1
-rw-r--r--drivers/usb/storage/scsiglue.c31
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/usb.c23
-rw-r--r--drivers/video/output.c129
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/cifs/CHANGES2
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/file.c12
-rw-r--r--fs/cifs/readdir.c6
-rw-r--r--fs/cifs/smbdes.c10
-rw-r--r--fs/configfs/file.c9
-rw-r--r--fs/dlm/Kconfig18
-rw-r--r--fs/dlm/config.c154
-rw-r--r--fs/dlm/config.h17
-rw-r--r--fs/dlm/dlm_internal.h20
-rw-r--r--fs/dlm/lock.c87
-rw-r--r--fs/dlm/lockspace.c10
-rw-r--r--fs/dlm/lowcomms-sctp.c151
-rw-r--r--fs/dlm/lowcomms-tcp.c361
-rw-r--r--fs/dlm/midcomms.c4
-rw-r--r--fs/dlm/rcom.c85
-rw-r--r--fs/dlm/recover.c8
-rw-r--r--fs/dlm/recoverd.c22
-rw-r--r--fs/dlm/user.c9
-rw-r--r--fs/dlm/util.c4
-rw-r--r--fs/gfs2/Kconfig47
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/dir.c25
-rw-r--r--fs/gfs2/dir.h21
-rw-r--r--fs/gfs2/eattr.c8
-rw-r--r--fs/gfs2/glock.c316
-rw-r--r--fs/gfs2/glock.h11
-rw-r--r--fs/gfs2/glops.c136
-rw-r--r--fs/gfs2/incore.h18
-rw-r--r--fs/gfs2/inode.c61
-rw-r--r--fs/gfs2/lm.c8
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h2
-rw-r--r--fs/gfs2/locking/dlm/main.c6
-rw-r--r--fs/gfs2/locking/dlm/mount.c6
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c13
-rw-r--r--fs/gfs2/lops.c14
-rw-r--r--fs/gfs2/ops_address.c134
-rw-r--r--fs/gfs2/ops_dentry.c16
-rw-r--r--fs/gfs2/ops_export.c15
-rw-r--r--fs/gfs2/ops_file.c52
-rw-r--r--fs/gfs2/ops_inode.c55
-rw-r--r--fs/gfs2/ops_super.c11
-rw-r--r--fs/gfs2/ops_vm.c24
-rw-r--r--fs/gfs2/super.c16
-rw-r--r--fs/gfs2/sys.c10
-rw-r--r--fs/jfs/inode.c6
-rw-r--r--fs/jfs/jfs_debug.h5
-rw-r--r--fs/jfs/jfs_dmap.c16
-rw-r--r--fs/jfs/jfs_imap.c16
-rw-r--r--fs/jfs/jfs_incore.h29
-rw-r--r--fs/jfs/jfs_lock.h2
-rw-r--r--fs/jfs/jfs_metapage.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/jfs_xtree.c15
-rw-r--r--fs/jfs/namei.c48
-rw-r--r--fs/ocfs2/cluster/heartbeat.c158
-rw-r--r--fs/ocfs2/cluster/tcp.c35
-rw-r--r--fs/ocfs2/cluster/tcp.h6
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h12
-rw-r--r--fs/ocfs2/dlm/dlmast.c14
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h130
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c40
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c30
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c253
-rw-r--r--fs/ocfs2/dlm/dlmlock.c7
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c579
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c182
-rw-r--r--fs/ocfs2/dlm/dlmthread.c200
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c15
-rw-r--r--fs/ocfs2/journal.h4
-rw-r--r--fs/ocfs2/vote.c8
-rw-r--r--fs/sysfs/bin.c6
-rw-r--r--fs/sysfs/dir.c214
-rw-r--r--fs/sysfs/file.c82
-rw-r--r--fs/sysfs/group.c2
-rw-r--r--fs/sysfs/inode.c36
-rw-r--r--fs/sysfs/mount.c11
-rw-r--r--fs/sysfs/symlink.c1
-rw-r--r--fs/sysfs/sysfs.h21
-rw-r--r--include/acpi/acconfig.h13
-rw-r--r--include/acpi/acdebug.h8
-rw-r--r--include/acpi/acdisasm.h22
-rw-r--r--include/acpi/acdispat.h4
-rw-r--r--include/acpi/acevents.h2
-rw-r--r--include/acpi/acexcep.h10
-rw-r--r--include/acpi/acglobal.h117
-rw-r--r--include/acpi/achware.h6
-rw-r--r--include/acpi/acinterp.h14
-rw-r--r--include/acpi/aclocal.h77
-rw-r--r--include/acpi/acmacros.h71
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acnamesp.h21
-rw-r--r--include/acpi/acobject.h19
-rw-r--r--include/acpi/acopcode.h4
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acparser.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h23
-rw-r--r--include/acpi/acpi_drivers.h15
-rw-r--r--include/acpi/acpiosxf.h8
-rw-r--r--include/acpi/acpixf.h34
-rw-r--r--include/acpi/acresrc.h2
-rw-r--r--include/acpi/acstruct.h5
-rw-r--r--include/acpi/actables.h106
-rw-r--r--include/acpi/actbl.h333
-rw-r--r--include/acpi/actbl1.h568
-rw-r--r--include/acpi/actbl2.h49
-rw-r--r--include/acpi/actbl71.h134
-rw-r--r--include/acpi/actypes.h106
-rw-r--r--include/acpi/acutils.h8
-rw-r--r--include/acpi/amlcode.h4
-rw-r--r--include/acpi/amlresrc.h2
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/asm-alpha/pci.h2
-rw-r--r--include/asm-i386/acpi.h24
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h17
-rw-r--r--include/asm-ia64/acpi.h10
-rw-r--r--include/asm-ia64/dma.h2
-rw-r--r--include/asm-ia64/esi.h1
-rw-r--r--include/asm-ia64/machvec.h3
-rw-r--r--include/asm-ia64/meminit.h3
-rw-r--r--include/asm-ia64/pgalloc.h3
-rw-r--r--include/asm-ia64/sn/acpi.h3
-rw-r--r--include/asm-ia64/sn/pcibr_provider.h2
-rw-r--r--include/asm-ia64/sn/pcidev.h8
-rw-r--r--include/asm-ia64/swiotlb.h9
-rw-r--r--include/asm-ia64/thread_info.h4
-rw-r--r--include/asm-ia64/unistd.h4
-rw-r--r--include/asm-mips/bootinfo.h4
-rw-r--r--include/asm-mips/ddb5xxx/ddb5477.h41
-rw-r--r--include/asm-mips/dec/interrupts.h3
-rw-r--r--include/asm-mips/dma.h1
-rw-r--r--include/asm-mips/emma2rh/emma2rh.h5
-rw-r--r--include/asm-mips/emma2rh/markeins.h1
-rw-r--r--include/asm-mips/i8259.h3
-rw-r--r--include/asm-mips/io.h4
-rw-r--r--include/asm-mips/irq.h2
-rw-r--r--include/asm-mips/irq_cpu.h6
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h1
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h4
-rw-r--r--include/asm-mips/mach-emma2rh/irq.h2
-rw-r--r--include/asm-mips/mach-generic/irq.h32
-rw-r--r--include/asm-mips/mach-mips/irq.h2
-rw-r--r--include/asm-mips/mach-vr41xx/irq.h11
-rw-r--r--include/asm-mips/mips-boards/atlasint.h4
-rw-r--r--include/asm-mips/mips-boards/maltaint.h4
-rw-r--r--include/asm-mips/mips-boards/prom.h1
-rw-r--r--include/asm-mips/mips-boards/seadint.h4
-rw-r--r--include/asm-mips/mips-boards/simint.h3
-rw-r--r--include/asm-mips/mipsmtregs.h2
-rw-r--r--include/asm-mips/page.h25
-rw-r--r--include/asm-mips/rtlx.h3
-rw-r--r--include/asm-mips/sections.h2
-rw-r--r--include/asm-mips/sgi/ip22.h13
-rw-r--r--include/asm-mips/smtc_ipi.h3
-rw-r--r--include/asm-mips/uaccess.h3
-rw-r--r--include/asm-mips/vr41xx/cmbvr4133.h5
-rw-r--r--include/asm-s390/compat.h28
-rw-r--r--include/asm-s390/etr.h219
-rw-r--r--include/asm-s390/hardirq.h2
-rw-r--r--include/asm-s390/io.h4
-rw-r--r--include/asm-s390/kdebug.h3
-rw-r--r--include/asm-s390/lowcore.h6
-rw-r--r--include/asm-s390/mmu_context.h50
-rw-r--r--include/asm-s390/pgalloc.h85
-rw-r--r--include/asm-s390/pgtable.h147
-rw-r--r--include/asm-s390/processor.h27
-rw-r--r--include/asm-s390/ptrace.h11
-rw-r--r--include/asm-s390/reset.h3
-rw-r--r--include/asm-s390/sclp.h39
-rw-r--r--include/asm-s390/sections.h2
-rw-r--r--include/asm-s390/setup.h23
-rw-r--r--include/asm-s390/sfp-util.h (renamed from arch/s390/math-emu/sfp-util.h)6
-rw-r--r--include/asm-s390/smp.h6
-rw-r--r--include/asm-s390/system.h4
-rw-r--r--include/asm-s390/tape390.h72
-rw-r--r--include/asm-s390/timer.h3
-rw-r--r--include/asm-s390/timex.h50
-rw-r--r--include/asm-s390/tlbflush.h9
-rw-r--r--include/asm-s390/uaccess.h2
-rw-r--r--include/asm-x86_64/acpi.h24
-rw-r--r--include/asm-x86_64/swiotlb.h8
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/acpi.h339
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/hid-debug.h749
-rw-r--r--include/linux/hid.h18
-rw-r--r--include/linux/ide.h31
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/host.h10
-rw-r--r--include/linux/mmc/mmc.h1
-rw-r--r--include/linux/mmc/protocol.h13
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/msi.h5
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/pci.h27
-rw-r--r--include/linux/pci_ids.h7
-rw-r--r--include/linux/serio.h5
-rw-r--r--include/linux/sysfs.h24
-rw-r--r--include/linux/tifm.h35
-rw-r--r--include/linux/usb.h39
-rw-r--r--include/linux/usb/Kbuild5
-rw-r--r--include/linux/usb/ch9.h (renamed from include/linux/usb_ch9.h)1
-rw-r--r--include/linux/usb/serial.h5
-rw-r--r--include/linux/usb_gadgetfs.h2
-rw-r--r--include/linux/video_output.h42
-rw-r--r--include/pcmcia/ss.h2
-rw-r--r--include/rdma/ib_user_mad.h2
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--kernel/irq/chip.c28
-rw-r--r--kernel/module.c76
-rw-r--r--kernel/params.c28
-rw-r--r--lib/kobject.c73
-rw-r--r--lib/swiotlb.c290
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_sysfs_br.c234
-rw-r--r--net/bridge/br_sysfs_if.c2
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/net-sysfs.c175
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c6
-rw-r--r--scripts/Kbuild.include102
-rw-r--r--scripts/gen_initramfs_list.sh43
-rwxr-xr-xscripts/makelst34
-rw-r--r--security/keys/key.c33
1078 files changed, 56733 insertions, 26156 deletions
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 8d51c148f721..48123dba5e6a 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -30,6 +30,7 @@ are not a good substitute for a solid C education and/or years of
30experience, the following books are good for, if anything, reference: 30experience, the following books are good for, if anything, reference:
31 - "The C Programming Language" by Kernighan and Ritchie [Prentice Hall] 31 - "The C Programming Language" by Kernighan and Ritchie [Prentice Hall]
32 - "Practical C Programming" by Steve Oualline [O'Reilly] 32 - "Practical C Programming" by Steve Oualline [O'Reilly]
33 - "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
33 34
34The kernel is written using GNU C and the GNU toolchain. While it 35The kernel is written using GNU C and the GNU toolchain. While it
35adheres to the ISO C89 standard, it uses a number of extensions that are 36adheres to the ISO C89 standard, it uses a number of extensions that are
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 0ba6af02cdaf..2dc5e5da8f88 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -274,6 +274,7 @@ Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
274 274
275--------------------------- 275---------------------------
276 276
277<<<<<<< test:Documentation/feature-removal-schedule.txt
277What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY) 278What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY)
278When: 2.6.21 279When: 2.6.21
279Why: hotkey.c was an attempt to consolidate multiple drivers that use 280Why: hotkey.c was an attempt to consolidate multiple drivers that use
@@ -306,11 +307,18 @@ Why: The ACPI namespace is effectively the symbol list for
306 the BIOS can be extracted and disassembled with acpidump 307 the BIOS can be extracted and disassembled with acpidump
307 and iasl as documented in the pmtools package here: 308 and iasl as documented in the pmtools package here:
308 http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils 309 http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils
309
310Who: Len Brown <len.brown@intel.com> 310Who: Len Brown <len.brown@intel.com>
311 311
312--------------------------- 312---------------------------
313 313
314What: ACPI procfs interface
315When: July 2007
316Why: After ACPI sysfs conversion, ACPI attributes will be duplicated
317 in sysfs and the ACPI procfs interface should be removed.
318Who: Zhang Rui <rui.zhang@intel.com>
319
320---------------------------
321
314What: /proc/acpi/button 322What: /proc/acpi/button
315When: August 2007 323When: August 2007
316Why: /proc/acpi/button has been replaced by events to the input layer 324Why: /proc/acpi/button has been replaced by events to the input layer
@@ -325,3 +333,10 @@ Why: Unmaintained for years, superceded by JFFS2 for years.
325Who: Jeff Garzik <jeff@garzik.org> 333Who: Jeff Garzik <jeff@garzik.org>
326 334
327--------------------------- 335---------------------------
336
337What: sk98lin network driver
338When: July 2007
339Why: In kernel tree version of driver is unmaintained. Sk98lin driver
340 replaced by the skge driver.
341Who: Stephen Hemminger <shemminger@osdl.org>
342
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 3f9ddbc23b27..0993969609cf 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -480,7 +480,7 @@ r2 argument 0 / return value 0 call-clobbered
480r3 argument 1 / return value 1 (if long long) call-clobbered 480r3 argument 1 / return value 1 (if long long) call-clobbered
481r4 argument 2 call-clobbered 481r4 argument 2 call-clobbered
482r5 argument 3 call-clobbered 482r5 argument 3 call-clobbered
483r6 argument 5 saved 483r6 argument 4 saved
484r7 pointer-to arguments 5 to ... saved 484r7 pointer-to arguments 5 to ... saved
485r8 this & that saved 485r8 this & that saved
486r9 this & that saved 486r9 this & that saved
diff --git a/Documentation/usb/proc_usb_info.txt b/Documentation/usb/proc_usb_info.txt
index 22c5331260ca..077e9032d0cd 100644
--- a/Documentation/usb/proc_usb_info.txt
+++ b/Documentation/usb/proc_usb_info.txt
@@ -213,15 +213,16 @@ C:* #Ifs=dd Cfg#=dd Atr=xx MPwr=dddmA
213 213
214Interface descriptor info (can be multiple per Config): 214Interface descriptor info (can be multiple per Config):
215 215
216I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=ssss 216I:* If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=ssss
217| | | | | | | |__Driver name 217| | | | | | | | |__Driver name
218| | | | | | | or "(none)" 218| | | | | | | | or "(none)"
219| | | | | | |__InterfaceProtocol 219| | | | | | | |__InterfaceProtocol
220| | | | | |__InterfaceSubClass 220| | | | | | |__InterfaceSubClass
221| | | | |__InterfaceClass 221| | | | | |__InterfaceClass
222| | | |__NumberOfEndpoints 222| | | | |__NumberOfEndpoints
223| | |__AlternateSettingNumber 223| | | |__AlternateSettingNumber
224| |__InterfaceNumber 224| | |__InterfaceNumber
225| |__ "*" indicates the active altsetting (others are " ")
225|__Interface info tag 226|__Interface info tag
226 227
227 A given interface may have one or more "alternate" settings. 228 A given interface may have one or more "alternate" settings.
@@ -277,7 +278,7 @@ of the USB devices on a system's root hub. (See more below
277on how to do this.) 278on how to do this.)
278 279
279The Interface lines can be used to determine what driver is 280The Interface lines can be used to determine what driver is
280being used for each device. 281being used for each device, and which altsetting it activated.
281 282
282The Configuration lines could be used to list maximum power 283The Configuration lines could be used to list maximum power
283(in milliamps) that a system's USB devices are using. 284(in milliamps) that a system's USB devices are using.
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index e65ec828d7aa..0f6808abd612 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -77,7 +77,7 @@ that the file size is not excessive for your favourite editor.
77 77
78The '1t' type data consists of a stream of events, such as URB submission, 78The '1t' type data consists of a stream of events, such as URB submission,
79URB callback, submission error. Every event is a text line, which consists 79URB callback, submission error. Every event is a text line, which consists
80of whitespace separated words. The number of position of words may depend 80of whitespace separated words. The number or position of words may depend
81on the event type, but there is a set of words, common for all types. 81on the event type, but there is a set of words, common for all types.
82 82
83Here is the list of words, from left to right: 83Here is the list of words, from left to right:
@@ -170,4 +170,152 @@ dd65f0e8 4128379808 C Bo:005:02 0 31 >
170 170
171* Raw binary format and API 171* Raw binary format and API
172 172
173TBD 173The overall architecture of the API is about the same as the one above,
174only the events are delivered in binary format. Each event is sent in
175the following structure (its name is made up, so that we can refer to it):
176
177struct usbmon_packet {
178 u64 id; /* 0: URB ID - from submission to callback */
179 unsigned char type; /* 8: Same as text; extensible. */
180 unsigned char xfer_type; /* ISO (0), Intr, Control, Bulk (3) */
181 unsigned char epnum; /* Endpoint number and transfer direction */
182 unsigned char devnum; /* Device address */
183 u16 busnum; /* 12: Bus number */
184 char flag_setup; /* 14: Same as text */
185 char flag_data; /* 15: Same as text; Binary zero is OK. */
186 s64 ts_sec; /* 16: gettimeofday */
187 s32 ts_usec; /* 24: gettimeofday */
188 int status; /* 28: */
189 unsigned int length; /* 32: Length of data (submitted or actual) */
190 unsigned int len_cap; /* 36: Delivered length */
191 unsigned char setup[8]; /* 40: Only for Control 'S' */
192}; /* 48 bytes total */
193
194These events can be received from a character device by reading with read(2),
195with an ioctl(2), or by accessing the buffer with mmap.
196
197The character device is usually called /dev/usbmonN, where N is the USB bus
198number. Number zero (/dev/usbmon0) is special and means "all buses".
199However, this feature is not implemented yet. Note that specific naming
200policy is set by your Linux distribution.
201
202If you create /dev/usbmon0 by hand, make sure that it is owned by root
203and has mode 0600. Otherwise, unpriviledged users will be able to snoop
204keyboard traffic.
205
206The following ioctl calls are available, with MON_IOC_MAGIC 0x92:
207
208 MON_IOCQ_URB_LEN, defined as _IO(MON_IOC_MAGIC, 1)
209
210This call returns the length of data in the next event. Note that majority of
211events contain no data, so if this call returns zero, it does not mean that
212no events are available.
213
214 MON_IOCG_STATS, defined as _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
215
216The argument is a pointer to the following structure:
217
218struct mon_bin_stats {
219 u32 queued;
220 u32 dropped;
221};
222
223The member "queued" refers to the number of events currently queued in the
224buffer (and not to the number of events processed since the last reset).
225
226The member "dropped" is the number of events lost since the last call
227to MON_IOCG_STATS.
228
229 MON_IOCT_RING_SIZE, defined as _IO(MON_IOC_MAGIC, 4)
230
231This call sets the buffer size. The argument is the size in bytes.
232The size may be rounded down to the next chunk (or page). If the requested
233size is out of [unspecified] bounds for this kernel, the call fails with
234-EINVAL.
235
236 MON_IOCQ_RING_SIZE, defined as _IO(MON_IOC_MAGIC, 5)
237
238This call returns the current size of the buffer in bytes.
239
240 MON_IOCX_GET, defined as _IOW(MON_IOC_MAGIC, 6, struct mon_get_arg)
241
242This call waits for events to arrive if none were in the kernel buffer,
243then returns the first event. Its argument is a pointer to the following
244structure:
245
246struct mon_get_arg {
247 struct usbmon_packet *hdr;
248 void *data;
249 size_t alloc; /* Length of data (can be zero) */
250};
251
252Before the call, hdr, data, and alloc should be filled. Upon return, the area
253pointed by hdr contains the next event structure, and the data buffer contains
254the data, if any. The event is removed from the kernel buffer.
255
256 MON_IOCX_MFETCH, defined as _IOWR(MON_IOC_MAGIC, 7, struct mon_mfetch_arg)
257
258This ioctl is primarily used when the application accesses the buffer
259with mmap(2). Its argument is a pointer to the following structure:
260
261struct mon_mfetch_arg {
262 uint32_t *offvec; /* Vector of events fetched */
263 uint32_t nfetch; /* Number of events to fetch (out: fetched) */
264 uint32_t nflush; /* Number of events to flush */
265};
266
267The ioctl operates in 3 stages.
268
269First, it removes and discards up to nflush events from the kernel buffer.
270The actual number of events discarded is returned in nflush.
271
272Second, it waits for an event to be present in the buffer, unless the pseudo-
273device is open with O_NONBLOCK.
274
275Third, it extracts up to nfetch offsets into the mmap buffer, and stores
276them into the offvec. The actual number of event offsets is stored into
277the nfetch.
278
279 MON_IOCH_MFLUSH, defined as _IO(MON_IOC_MAGIC, 8)
280
281This call removes a number of events from the kernel buffer. Its argument
282is the number of events to remove. If the buffer contains fewer events
283than requested, all events present are removed, and no error is reported.
284This works when no events are available too.
285
286 FIONBIO
287
288The ioctl FIONBIO may be implemented in the future, if there's a need.
289
290In addition to ioctl(2) and read(2), the special file of binary API can
291be polled with select(2) and poll(2). But lseek(2) does not work.
292
293* Memory-mapped access of the kernel buffer for the binary API
294
295The basic idea is simple:
296
297To prepare, map the buffer by getting the current size, then using mmap(2).
298Then, execute a loop similar to the one written in pseudo-code below:
299
300 struct mon_mfetch_arg fetch;
301 struct usbmon_packet *hdr;
302 int nflush = 0;
303 for (;;) {
304 fetch.offvec = vec; // Has N 32-bit words
305 fetch.nfetch = N; // Or less than N
306 fetch.nflush = nflush;
307 ioctl(fd, MON_IOCX_MFETCH, &fetch); // Process errors, too
308 nflush = fetch.nfetch; // This many packets to flush when done
309 for (i = 0; i < nflush; i++) {
310 hdr = (struct ubsmon_packet *) &mmap_area[vec[i]];
311 if (hdr->type == '@') // Filler packet
312 continue;
313 caddr_t data = &mmap_area[vec[i]] + 64;
314 process_packet(hdr, data);
315 }
316 }
317
318Thus, the main idea is to execute only one ioctl per N events.
319
320Although the buffer is circular, the returned headers and data do not cross
321the end of the buffer, so the above pseudo-code does not need any gathering.
diff --git a/Documentation/video-output.txt b/Documentation/video-output.txt
new file mode 100644
index 000000000000..e517011be4f9
--- /dev/null
+++ b/Documentation/video-output.txt
@@ -0,0 +1,34 @@
1
2 Video Output Switcher Control
3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 2006 luming.yu@intel.com
5
6The output sysfs class driver provides an abstract video output layer that
7can be used to hook platform specific methods to enable/disable video output
8device through common sysfs interface. For example, on my IBM ThinkPad T42
9laptop, The ACPI video driver registered its output devices and read/write
10method for 'state' with output sysfs class. The user interface under sysfs is:
11
12linux:/sys/class/video_output # tree .
13.
14|-- CRT0
15| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
16| |-- state
17| |-- subsystem -> ../../../class/video_output
18| `-- uevent
19|-- DVI0
20| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
21| |-- state
22| |-- subsystem -> ../../../class/video_output
23| `-- uevent
24|-- LCD0
25| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
26| |-- state
27| |-- subsystem -> ../../../class/video_output
28| `-- uevent
29`-- TV0
30 |-- device -> ../../../devices/pci0000:00/0000:00:01.0
31 |-- state
32 |-- subsystem -> ../../../class/video_output
33 `-- uevent
34
diff --git a/MAINTAINERS b/MAINTAINERS
index 0ad8803a0c75..fe35f3ac4cd3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -584,12 +584,30 @@ W: http://sourceforge.net/projects/acpi4asus
584W: http://xf.iksaif.net/acpi4asus 584W: http://xf.iksaif.net/acpi4asus
585S: Maintained 585S: Maintained
586 586
587ASUS LAPTOP EXTRAS DRIVER
588P: Corentin Chary
589M: corentincj@iksaif.net
590L: acpi4asus-user@lists.sourceforge.net
591W: http://sourceforge.net/projects/acpi4asus
592W: http://xf.iksaif.net/acpi4asus
593S: Maintained
594
587ATA OVER ETHERNET DRIVER 595ATA OVER ETHERNET DRIVER
588P: Ed L. Cashin 596P: Ed L. Cashin
589M: ecashin@coraid.com 597M: ecashin@coraid.com
590W: http://www.coraid.com/support/linux 598W: http://www.coraid.com/support/linux
591S: Supported 599S: Supported
592 600
601ATL1 ETHERNET DRIVER
602P: Jay Cliburn
603M: jcliburn@gmail.com
604P: Chris Snook
605M: csnook@redhat.com
606L: atl1-devel@lists.sourceforge.net
607W: http://sourceforge.net/projects/atl1
608W: http://atl1.sourceforge.net
609S: Maintained
610
593ATM 611ATM
594P: Chas Williams 612P: Chas Williams
595M: chas@cmf.nrl.navy.mil 613M: chas@cmf.nrl.navy.mil
@@ -2477,6 +2495,12 @@ L: orinoco-devel@lists.sourceforge.net
2477W: http://www.nongnu.org/orinoco/ 2495W: http://www.nongnu.org/orinoco/
2478S: Maintained 2496S: Maintained
2479 2497
2498PA SEMI ETHERNET DRIVER
2499P: Olof Johansson
2500M: olof@lixom.net
2501L: netdev@vger.kernel.org
2502S: Maintained
2503
2480PARALLEL PORT SUPPORT 2504PARALLEL PORT SUPPORT
2481P: Phil Blundell 2505P: Phil Blundell
2482M: philb@gnu.org 2506M: philb@gnu.org
@@ -2646,7 +2670,7 @@ S: Supported
2646 2670
2647PRISM54 WIRELESS DRIVER 2671PRISM54 WIRELESS DRIVER
2648P: Prism54 Development Team 2672P: Prism54 Development Team
2649M: prism54-private@prism54.org 2673M: developers@islsm.org
2650L: netdev@vger.kernel.org 2674L: netdev@vger.kernel.org
2651W: http://prism54.org 2675W: http://prism54.org
2652S: Maintained 2676S: Maintained
@@ -2791,7 +2815,7 @@ M: schwidefsky@de.ibm.com
2791P: Heiko Carstens 2815P: Heiko Carstens
2792M: heiko.carstens@de.ibm.com 2816M: heiko.carstens@de.ibm.com
2793M: linux390@de.ibm.com 2817M: linux390@de.ibm.com
2794L: linux-390@vm.marist.edu 2818L: linux-s390@vger.kernel.org
2795W: http://www.ibm.com/developerworks/linux/linux390/ 2819W: http://www.ibm.com/developerworks/linux/linux390/
2796S: Supported 2820S: Supported
2797 2821
@@ -2799,7 +2823,7 @@ S390 NETWORK DRIVERS
2799P: Frank Pavlic 2823P: Frank Pavlic
2800M: fpavlic@de.ibm.com 2824M: fpavlic@de.ibm.com
2801M: linux390@de.ibm.com 2825M: linux390@de.ibm.com
2802L: linux-390@vm.marist.edu 2826L: linux-s390@vger.kernel.org
2803W: http://www.ibm.com/developerworks/linux/linux390/ 2827W: http://www.ibm.com/developerworks/linux/linux390/
2804S: Supported 2828S: Supported
2805 2829
@@ -2807,7 +2831,7 @@ S390 ZFCP DRIVER
2807P: Swen Schillig 2831P: Swen Schillig
2808M: swen@vnet.ibm.com 2832M: swen@vnet.ibm.com
2809M: linux390@de.ibm.com 2833M: linux390@de.ibm.com
2810L: linux-390@vm.marist.edu 2834L: linux-s390@vger.kernel.org
2811W: http://www.ibm.com/developerworks/linux/linux390/ 2835W: http://www.ibm.com/developerworks/linux/linux390/
2812S: Supported 2836S: Supported
2813 2837
@@ -3647,7 +3671,7 @@ S: Maintained
3647W83L51xD SD/MMC CARD INTERFACE DRIVER 3671W83L51xD SD/MMC CARD INTERFACE DRIVER
3648P: Pierre Ossman 3672P: Pierre Ossman
3649M: drzeus-wbsd@drzeus.cx 3673M: drzeus-wbsd@drzeus.cx
3650L: wbsd-devel@list.drzeus.cx 3674L: linux-kernel@vger.kernel.org
3651W: http://projects.drzeus.cx/wbsd 3675W: http://projects.drzeus.cx/wbsd
3652S: Maintained 3676S: Maintained
3653 3677
diff --git a/Makefile b/Makefile
index 7e2750f4ca70..cdeda68cf2aa 100644
--- a/Makefile
+++ b/Makefile
@@ -776,7 +776,7 @@ $(vmlinux-dirs): prepare scripts
776# $(EXTRAVERSION) eg, -rc6 776# $(EXTRAVERSION) eg, -rc6
777# $(localver-full) 777# $(localver-full)
778# $(localver) 778# $(localver)
779# localversion* (all localversion* files) 779# localversion* (files without backups, containing '~')
780# $(CONFIG_LOCALVERSION) (from kernel config setting) 780# $(CONFIG_LOCALVERSION) (from kernel config setting)
781# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set) 781# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set)
782# ./scripts/setlocalversion (SCM tag, if one exists) 782# ./scripts/setlocalversion (SCM tag, if one exists)
@@ -787,17 +787,12 @@ $(vmlinux-dirs): prepare scripts
787# moment, only git is supported but other SCMs can edit the script 787# moment, only git is supported but other SCMs can edit the script
788# scripts/setlocalversion and add the appropriate checks as needed. 788# scripts/setlocalversion and add the appropriate checks as needed.
789 789
790nullstring := 790pattern = ".*/localversion[^~]*"
791space := $(nullstring) # end of line 791string = $(shell cat /dev/null \
792 `find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort`)
792 793
793___localver = $(objtree)/localversion* $(srctree)/localversion* 794localver = $(subst $(space),, $(string) \
794__localver = $(sort $(wildcard $(___localver))) 795 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
795# skip backup files (containing '~')
796_localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
797
798localver = $(subst $(space),, \
799 $(shell cat /dev/null $(_localver)) \
800 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
801 796
802# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called 797# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
803# and if the SCM is know a tag from the SCM is appended. 798# and if the SCM is know a tag from the SCM is appended.
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 3c10b9a1ddf5..ab642a4f08de 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -575,3 +575,7 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
575 575
576EXPORT_SYMBOL(pci_iomap); 576EXPORT_SYMBOL(pci_iomap);
577EXPORT_SYMBOL(pci_iounmap); 577EXPORT_SYMBOL(pci_iounmap);
578
579/* FIXME: Some boxes have multiple ISA bridges! */
580struct pci_dev *isa_bridge;
581EXPORT_SYMBOL(isa_bridge);
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 5d80edfc61b7..bb0c376b62b3 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -466,7 +466,8 @@ CONFIG_FW_LOADER=y
466# 466#
467# Plug and Play support 467# Plug and Play support
468# 468#
469# CONFIG_PNP is not set 469CONFIG_PNP=y
470CONFIG_PNPACPI=y
470 471
471# 472#
472# Block devices 473# Block devices
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index cbcb2c27f48b..e94aff6888ca 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -66,7 +66,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
66 66
67#define BAD_MADT_ENTRY(entry, end) ( \ 67#define BAD_MADT_ENTRY(entry, end) ( \
68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
69 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 69 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
70 70
71#define PREFIX "ACPI: " 71#define PREFIX "ACPI: "
72 72
@@ -79,7 +79,7 @@ int acpi_ioapic;
79int acpi_strict; 79int acpi_strict;
80EXPORT_SYMBOL(acpi_strict); 80EXPORT_SYMBOL(acpi_strict);
81 81
82acpi_interrupt_flags acpi_sci_flags __initdata; 82u8 acpi_sci_flags __initdata;
83int acpi_sci_override_gsi __initdata; 83int acpi_sci_override_gsi __initdata;
84int acpi_skip_timer_override __initdata; 84int acpi_skip_timer_override __initdata;
85int acpi_use_timer_override __initdata; 85int acpi_use_timer_override __initdata;
@@ -92,11 +92,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
92#warning ACPI uses CMPXCHG, i486 and later hardware 92#warning ACPI uses CMPXCHG, i486 and later hardware
93#endif 93#endif
94 94
95#define MAX_MADT_ENTRIES 256
96u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
97 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
98EXPORT_SYMBOL(x86_acpiid_to_apicid);
99
100/* -------------------------------------------------------------------------- 95/* --------------------------------------------------------------------------
101 Boot-time Configuration 96 Boot-time Configuration
102 -------------------------------------------------------------------------- */ 97 -------------------------------------------------------------------------- */
@@ -166,30 +161,26 @@ char *__acpi_map_table(unsigned long phys, unsigned long size)
166 161
167#ifdef CONFIG_PCI_MMCONFIG 162#ifdef CONFIG_PCI_MMCONFIG
168/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ 163/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
169struct acpi_table_mcfg_config *pci_mmcfg_config; 164struct acpi_mcfg_allocation *pci_mmcfg_config;
170int pci_mmcfg_config_num; 165int pci_mmcfg_config_num;
171 166
172int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) 167int __init acpi_parse_mcfg(struct acpi_table_header *header)
173{ 168{
174 struct acpi_table_mcfg *mcfg; 169 struct acpi_table_mcfg *mcfg;
175 unsigned long i; 170 unsigned long i;
176 int config_size; 171 int config_size;
177 172
178 if (!phys_addr || !size) 173 if (!header)
179 return -EINVAL; 174 return -EINVAL;
180 175
181 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size); 176 mcfg = (struct acpi_table_mcfg *)header;
182 if (!mcfg) {
183 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
184 return -ENODEV;
185 }
186 177
187 /* how many config structures do we have */ 178 /* how many config structures do we have */
188 pci_mmcfg_config_num = 0; 179 pci_mmcfg_config_num = 0;
189 i = size - sizeof(struct acpi_table_mcfg); 180 i = header->length - sizeof(struct acpi_table_mcfg);
190 while (i >= sizeof(struct acpi_table_mcfg_config)) { 181 while (i >= sizeof(struct acpi_mcfg_allocation)) {
191 ++pci_mmcfg_config_num; 182 ++pci_mmcfg_config_num;
192 i -= sizeof(struct acpi_table_mcfg_config); 183 i -= sizeof(struct acpi_mcfg_allocation);
193 }; 184 };
194 if (pci_mmcfg_config_num == 0) { 185 if (pci_mmcfg_config_num == 0) {
195 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); 186 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
@@ -204,9 +195,9 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
204 return -ENOMEM; 195 return -ENOMEM;
205 } 196 }
206 197
207 memcpy(pci_mmcfg_config, &mcfg->config, config_size); 198 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
208 for (i = 0; i < pci_mmcfg_config_num; ++i) { 199 for (i = 0; i < pci_mmcfg_config_num; ++i) {
209 if (mcfg->config[i].base_reserved) { 200 if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
210 printk(KERN_ERR PREFIX 201 printk(KERN_ERR PREFIX
211 "MMCONFIG not in low 4GB of memory\n"); 202 "MMCONFIG not in low 4GB of memory\n");
212 kfree(pci_mmcfg_config); 203 kfree(pci_mmcfg_config);
@@ -220,24 +211,24 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
220#endif /* CONFIG_PCI_MMCONFIG */ 211#endif /* CONFIG_PCI_MMCONFIG */
221 212
222#ifdef CONFIG_X86_LOCAL_APIC 213#ifdef CONFIG_X86_LOCAL_APIC
223static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 214static int __init acpi_parse_madt(struct acpi_table_header *table)
224{ 215{
225 struct acpi_table_madt *madt = NULL; 216 struct acpi_table_madt *madt = NULL;
226 217
227 if (!phys_addr || !size || !cpu_has_apic) 218 if (!cpu_has_apic)
228 return -EINVAL; 219 return -EINVAL;
229 220
230 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); 221 madt = (struct acpi_table_madt *)table;
231 if (!madt) { 222 if (!madt) {
232 printk(KERN_WARNING PREFIX "Unable to map MADT\n"); 223 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
233 return -ENODEV; 224 return -ENODEV;
234 } 225 }
235 226
236 if (madt->lapic_address) { 227 if (madt->address) {
237 acpi_lapic_addr = (u64) madt->lapic_address; 228 acpi_lapic_addr = (u64) madt->address;
238 229
239 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", 230 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
240 madt->lapic_address); 231 madt->address);
241 } 232 }
242 233
243 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 234 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
@@ -246,21 +237,17 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
246} 237}
247 238
248static int __init 239static int __init
249acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end) 240acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
250{ 241{
251 struct acpi_table_lapic *processor = NULL; 242 struct acpi_madt_local_apic *processor = NULL;
252 243
253 processor = (struct acpi_table_lapic *)header; 244 processor = (struct acpi_madt_local_apic *)header;
254 245
255 if (BAD_MADT_ENTRY(processor, end)) 246 if (BAD_MADT_ENTRY(processor, end))
256 return -EINVAL; 247 return -EINVAL;
257 248
258 acpi_table_print_madt_entry(header); 249 acpi_table_print_madt_entry(header);
259 250
260 /* Record local apic id only when enabled */
261 if (processor->flags.enabled)
262 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
263
264 /* 251 /*
265 * We need to register disabled CPU as well to permit 252 * We need to register disabled CPU as well to permit
266 * counting disabled CPUs. This allows us to size 253 * counting disabled CPUs. This allows us to size
@@ -269,18 +256,18 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
269 * when we use CPU hotplug. 256 * when we use CPU hotplug.
270 */ 257 */
271 mp_register_lapic(processor->id, /* APIC ID */ 258 mp_register_lapic(processor->id, /* APIC ID */
272 processor->flags.enabled); /* Enabled? */ 259 processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
273 260
274 return 0; 261 return 0;
275} 262}
276 263
277static int __init 264static int __init
278acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 265acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
279 const unsigned long end) 266 const unsigned long end)
280{ 267{
281 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; 268 struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
282 269
283 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header; 270 lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
284 271
285 if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) 272 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
286 return -EINVAL; 273 return -EINVAL;
@@ -291,11 +278,11 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
291} 278}
292 279
293static int __init 280static int __init
294acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 281acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
295{ 282{
296 struct acpi_table_lapic_nmi *lapic_nmi = NULL; 283 struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
297 284
298 lapic_nmi = (struct acpi_table_lapic_nmi *)header; 285 lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
299 286
300 if (BAD_MADT_ENTRY(lapic_nmi, end)) 287 if (BAD_MADT_ENTRY(lapic_nmi, end))
301 return -EINVAL; 288 return -EINVAL;
@@ -313,11 +300,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
313#ifdef CONFIG_X86_IO_APIC 300#ifdef CONFIG_X86_IO_APIC
314 301
315static int __init 302static int __init
316acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) 303acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
317{ 304{
318 struct acpi_table_ioapic *ioapic = NULL; 305 struct acpi_madt_io_apic *ioapic = NULL;
319 306
320 ioapic = (struct acpi_table_ioapic *)header; 307 ioapic = (struct acpi_madt_io_apic *)header;
321 308
322 if (BAD_MADT_ENTRY(ioapic, end)) 309 if (BAD_MADT_ENTRY(ioapic, end))
323 return -EINVAL; 310 return -EINVAL;
@@ -342,11 +329,11 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
342 polarity = 3; 329 polarity = 3;
343 330
344 /* Command-line over-ride via acpi_sci= */ 331 /* Command-line over-ride via acpi_sci= */
345 if (acpi_sci_flags.trigger) 332 if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
346 trigger = acpi_sci_flags.trigger; 333 trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
347 334
348 if (acpi_sci_flags.polarity) 335 if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
349 polarity = acpi_sci_flags.polarity; 336 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
350 337
351 /* 338 /*
352 * mp_config_acpi_legacy_irqs() already setup IRQs < 16 339 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
@@ -357,51 +344,52 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
357 344
358 /* 345 /*
359 * stash over-ride to indicate we've been here 346 * stash over-ride to indicate we've been here
360 * and for later update of acpi_fadt 347 * and for later update of acpi_gbl_FADT
361 */ 348 */
362 acpi_sci_override_gsi = gsi; 349 acpi_sci_override_gsi = gsi;
363 return; 350 return;
364} 351}
365 352
366static int __init 353static int __init
367acpi_parse_int_src_ovr(acpi_table_entry_header * header, 354acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
368 const unsigned long end) 355 const unsigned long end)
369{ 356{
370 struct acpi_table_int_src_ovr *intsrc = NULL; 357 struct acpi_madt_interrupt_override *intsrc = NULL;
371 358
372 intsrc = (struct acpi_table_int_src_ovr *)header; 359 intsrc = (struct acpi_madt_interrupt_override *)header;
373 360
374 if (BAD_MADT_ENTRY(intsrc, end)) 361 if (BAD_MADT_ENTRY(intsrc, end))
375 return -EINVAL; 362 return -EINVAL;
376 363
377 acpi_table_print_madt_entry(header); 364 acpi_table_print_madt_entry(header);
378 365
379 if (intsrc->bus_irq == acpi_fadt.sci_int) { 366 if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
380 acpi_sci_ioapic_setup(intsrc->global_irq, 367 acpi_sci_ioapic_setup(intsrc->global_irq,
381 intsrc->flags.polarity, 368 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
382 intsrc->flags.trigger); 369 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
383 return 0; 370 return 0;
384 } 371 }
385 372
386 if (acpi_skip_timer_override && 373 if (acpi_skip_timer_override &&
387 intsrc->bus_irq == 0 && intsrc->global_irq == 2) { 374 intsrc->source_irq == 0 && intsrc->global_irq == 2) {
388 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); 375 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
389 return 0; 376 return 0;
390 } 377 }
391 378
392 mp_override_legacy_irq(intsrc->bus_irq, 379 mp_override_legacy_irq(intsrc->source_irq,
393 intsrc->flags.polarity, 380 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
394 intsrc->flags.trigger, intsrc->global_irq); 381 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
382 intsrc->global_irq);
395 383
396 return 0; 384 return 0;
397} 385}
398 386
399static int __init 387static int __init
400acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 388acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
401{ 389{
402 struct acpi_table_nmi_src *nmi_src = NULL; 390 struct acpi_madt_nmi_source *nmi_src = NULL;
403 391
404 nmi_src = (struct acpi_table_nmi_src *)header; 392 nmi_src = (struct acpi_madt_nmi_source *)header;
405 393
406 if (BAD_MADT_ENTRY(nmi_src, end)) 394 if (BAD_MADT_ENTRY(nmi_src, end))
407 return -EINVAL; 395 return -EINVAL;
@@ -417,7 +405,7 @@ acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
417 405
418/* 406/*
419 * acpi_pic_sci_set_trigger() 407 * acpi_pic_sci_set_trigger()
420 * 408 *
421 * use ELCR to set PIC-mode trigger type for SCI 409 * use ELCR to set PIC-mode trigger type for SCI
422 * 410 *
423 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's 411 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
@@ -511,7 +499,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
511{ 499{
512 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 500 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
513 union acpi_object *obj; 501 union acpi_object *obj;
514 struct acpi_table_lapic *lapic; 502 struct acpi_madt_local_apic *lapic;
515 cpumask_t tmp_map, new_map; 503 cpumask_t tmp_map, new_map;
516 u8 physid; 504 u8 physid;
517 int cpu; 505 int cpu;
@@ -529,10 +517,10 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
529 return -EINVAL; 517 return -EINVAL;
530 } 518 }
531 519
532 lapic = (struct acpi_table_lapic *)obj->buffer.pointer; 520 lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
533 521
534 if ((lapic->header.type != ACPI_MADT_LAPIC) || 522 if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
535 (!lapic->flags.enabled)) { 523 !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
536 kfree(buffer.pointer); 524 kfree(buffer.pointer);
537 return -EINVAL; 525 return -EINVAL;
538 } 526 }
@@ -544,7 +532,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
544 buffer.pointer = NULL; 532 buffer.pointer = NULL;
545 533
546 tmp_map = cpu_present_map; 534 tmp_map = cpu_present_map;
547 mp_register_lapic(physid, lapic->flags.enabled); 535 mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
548 536
549 /* 537 /*
550 * If mp_register_lapic successfully generates a new logical cpu 538 * If mp_register_lapic successfully generates a new logical cpu
@@ -566,14 +554,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
566 554
567int acpi_unmap_lsapic(int cpu) 555int acpi_unmap_lsapic(int cpu)
568{ 556{
569 int i;
570
571 for_each_possible_cpu(i) {
572 if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
573 x86_acpiid_to_apicid[i] = -1;
574 break;
575 }
576 }
577 x86_cpu_to_apicid[cpu] = -1; 557 x86_cpu_to_apicid[cpu] = -1;
578 cpu_clear(cpu, cpu_present_map); 558 cpu_clear(cpu, cpu_present_map);
579 num_processors--; 559 num_processors--;
@@ -619,42 +599,36 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
619 return 0; 599 return 0;
620} 600}
621 601
622static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) 602static int __init acpi_parse_sbf(struct acpi_table_header *table)
623{ 603{
624 struct acpi_table_sbf *sb; 604 struct acpi_table_boot *sb;
625
626 if (!phys_addr || !size)
627 return -EINVAL;
628 605
629 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size); 606 sb = (struct acpi_table_boot *)table;
630 if (!sb) { 607 if (!sb) {
631 printk(KERN_WARNING PREFIX "Unable to map SBF\n"); 608 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
632 return -ENODEV; 609 return -ENODEV;
633 } 610 }
634 611
635 sbf_port = sb->sbf_cmos; /* Save CMOS port */ 612 sbf_port = sb->cmos_index; /* Save CMOS port */
636 613
637 return 0; 614 return 0;
638} 615}
639 616
640#ifdef CONFIG_HPET_TIMER 617#ifdef CONFIG_HPET_TIMER
641 618
642static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) 619static int __init acpi_parse_hpet(struct acpi_table_header *table)
643{ 620{
644 struct acpi_table_hpet *hpet_tbl; 621 struct acpi_table_hpet *hpet_tbl;
645 struct resource *hpet_res; 622 struct resource *hpet_res;
646 resource_size_t res_start; 623 resource_size_t res_start;
647 624
648 if (!phys || !size) 625 hpet_tbl = (struct acpi_table_hpet *)table;
649 return -EINVAL;
650
651 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
652 if (!hpet_tbl) { 626 if (!hpet_tbl) {
653 printk(KERN_WARNING PREFIX "Unable to map HPET\n"); 627 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
654 return -ENODEV; 628 return -ENODEV;
655 } 629 }
656 630
657 if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) { 631 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
658 printk(KERN_WARNING PREFIX "HPET timers must be located in " 632 printk(KERN_WARNING PREFIX "HPET timers must be located in "
659 "memory.\n"); 633 "memory.\n");
660 return -1; 634 return -1;
@@ -667,29 +641,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
667 hpet_res->name = (void *)&hpet_res[1]; 641 hpet_res->name = (void *)&hpet_res[1];
668 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 642 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
669 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, 643 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
670 "HPET %u", hpet_tbl->number); 644 "HPET %u", hpet_tbl->sequence);
671 hpet_res->end = (1 * 1024) - 1; 645 hpet_res->end = (1 * 1024) - 1;
672 } 646 }
673 647
674#ifdef CONFIG_X86_64 648#ifdef CONFIG_X86_64
675 vxtime.hpet_address = hpet_tbl->addr.addrl | 649 vxtime.hpet_address = hpet_tbl->address.address;
676 ((long)hpet_tbl->addr.addrh << 32);
677 650
678 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 651 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
679 hpet_tbl->id, vxtime.hpet_address); 652 hpet_tbl->id, vxtime.hpet_address);
680 653
681 res_start = vxtime.hpet_address; 654 res_start = vxtime.hpet_address;
682#else /* X86 */ 655#else /* X86 */
683 { 656 {
684 extern unsigned long hpet_address; 657 extern unsigned long hpet_address;
685 658
686 hpet_address = hpet_tbl->addr.addrl; 659 hpet_address = hpet_tbl->address.address;
687 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 660 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
688 hpet_tbl->id, hpet_address); 661 hpet_tbl->id, hpet_address);
689 662
690 res_start = hpet_address; 663 res_start = hpet_address;
691 } 664 }
692#endif /* X86 */ 665#endif /* X86 */
693 666
694 if (hpet_res) { 667 if (hpet_res) {
695 hpet_res->start = res_start; 668 hpet_res->start = res_start;
@@ -707,42 +680,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
707extern u32 pmtmr_ioport; 680extern u32 pmtmr_ioport;
708#endif 681#endif
709 682
710static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) 683static int __init acpi_parse_fadt(struct acpi_table_header *table)
711{ 684{
712 struct fadt_descriptor *fadt = NULL;
713
714 fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
715 if (!fadt) {
716 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
717 return 0;
718 }
719 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
720 acpi_fadt.sci_int = fadt->sci_int;
721
722 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
723 acpi_fadt.revision = fadt->revision;
724 acpi_fadt.force_apic_physical_destination_mode =
725 fadt->force_apic_physical_destination_mode;
726 685
727#ifdef CONFIG_X86_PM_TIMER 686#ifdef CONFIG_X86_PM_TIMER
728 /* detect the location of the ACPI PM Timer */ 687 /* detect the location of the ACPI PM Timer */
729 if (fadt->revision >= FADT2_REVISION_ID) { 688 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
730 /* FADT rev. 2 */ 689 /* FADT rev. 2 */
731 if (fadt->xpm_tmr_blk.address_space_id != 690 if (acpi_gbl_FADT.xpm_timer_block.space_id !=
732 ACPI_ADR_SPACE_SYSTEM_IO) 691 ACPI_ADR_SPACE_SYSTEM_IO)
733 return 0; 692 return 0;
734 693
735 pmtmr_ioport = fadt->xpm_tmr_blk.address; 694 pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
736 /* 695 /*
737 * "X" fields are optional extensions to the original V1.0 696 * "X" fields are optional extensions to the original V1.0
738 * fields, so we must selectively expand V1.0 fields if the 697 * fields, so we must selectively expand V1.0 fields if the
739 * corresponding X field is zero. 698 * corresponding X field is zero.
740 */ 699 */
741 if (!pmtmr_ioport) 700 if (!pmtmr_ioport)
742 pmtmr_ioport = fadt->V1_pm_tmr_blk; 701 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
743 } else { 702 } else {
744 /* FADT rev. 1 */ 703 /* FADT rev. 1 */
745 pmtmr_ioport = fadt->V1_pm_tmr_blk; 704 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
746 } 705 }
747 if (pmtmr_ioport) 706 if (pmtmr_ioport)
748 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", 707 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
@@ -784,13 +743,13 @@ static int __init acpi_parse_madt_lapic_entries(void)
784 if (!cpu_has_apic) 743 if (!cpu_has_apic)
785 return -ENODEV; 744 return -ENODEV;
786 745
787 /* 746 /*
788 * Note that the LAPIC address is obtained from the MADT (32-bit value) 747 * Note that the LAPIC address is obtained from the MADT (32-bit value)
789 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 748 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
790 */ 749 */
791 750
792 count = 751 count =
793 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, 752 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
794 acpi_parse_lapic_addr_ovr, 0); 753 acpi_parse_lapic_addr_ovr, 0);
795 if (count < 0) { 754 if (count < 0) {
796 printk(KERN_ERR PREFIX 755 printk(KERN_ERR PREFIX
@@ -800,7 +759,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
800 759
801 mp_register_lapic_address(acpi_lapic_addr); 760 mp_register_lapic_address(acpi_lapic_addr);
802 761
803 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic, 762 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
804 MAX_APICS); 763 MAX_APICS);
805 if (!count) { 764 if (!count) {
806 printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 765 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
@@ -813,7 +772,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
813 } 772 }
814 773
815 count = 774 count =
816 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0); 775 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
817 if (count < 0) { 776 if (count < 0) {
818 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 777 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
819 /* TBD: Cleanup to allow fallback to MPS */ 778 /* TBD: Cleanup to allow fallback to MPS */
@@ -842,7 +801,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
842 return -ENODEV; 801 return -ENODEV;
843 } 802 }
844 803
845 if (!cpu_has_apic) 804 if (!cpu_has_apic)
846 return -ENODEV; 805 return -ENODEV;
847 806
848 /* 807 /*
@@ -855,7 +814,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
855 } 814 }
856 815
857 count = 816 count =
858 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, 817 acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
859 MAX_IO_APICS); 818 MAX_IO_APICS);
860 if (!count) { 819 if (!count) {
861 printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); 820 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
@@ -866,7 +825,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
866 } 825 }
867 826
868 count = 827 count =
869 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 828 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
870 NR_IRQ_VECTORS); 829 NR_IRQ_VECTORS);
871 if (count < 0) { 830 if (count < 0) {
872 printk(KERN_ERR PREFIX 831 printk(KERN_ERR PREFIX
@@ -880,13 +839,13 @@ static int __init acpi_parse_madt_ioapic_entries(void)
880 * pretend we got one so we can set the SCI flags. 839 * pretend we got one so we can set the SCI flags.
881 */ 840 */
882 if (!acpi_sci_override_gsi) 841 if (!acpi_sci_override_gsi)
883 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); 842 acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
884 843
885 /* Fill in identity legacy mapings where no override */ 844 /* Fill in identity legacy mapings where no override */
886 mp_config_acpi_legacy_irqs(); 845 mp_config_acpi_legacy_irqs();
887 846
888 count = 847 count =
889 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 848 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
890 NR_IRQ_VECTORS); 849 NR_IRQ_VECTORS);
891 if (count < 0) { 850 if (count < 0) {
892 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 851 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
@@ -908,7 +867,7 @@ static void __init acpi_process_madt(void)
908#ifdef CONFIG_X86_LOCAL_APIC 867#ifdef CONFIG_X86_LOCAL_APIC
909 int count, error; 868 int count, error;
910 869
911 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt); 870 count = acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt);
912 if (count >= 1) { 871 if (count >= 1) {
913 872
914 /* 873 /*
@@ -1195,7 +1154,7 @@ int __init acpi_boot_table_init(void)
1195 if (acpi_disabled && !acpi_ht) 1154 if (acpi_disabled && !acpi_ht)
1196 return 1; 1155 return 1;
1197 1156
1198 /* 1157 /*
1199 * Initialize the ACPI boot-time table parser. 1158 * Initialize the ACPI boot-time table parser.
1200 */ 1159 */
1201 error = acpi_table_init(); 1160 error = acpi_table_init();
@@ -1204,7 +1163,7 @@ int __init acpi_boot_table_init(void)
1204 return error; 1163 return error;
1205 } 1164 }
1206 1165
1207 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1166 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1208 1167
1209 /* 1168 /*
1210 * blacklist may disable ACPI entirely 1169 * blacklist may disable ACPI entirely
@@ -1232,19 +1191,19 @@ int __init acpi_boot_init(void)
1232 if (acpi_disabled && !acpi_ht) 1191 if (acpi_disabled && !acpi_ht)
1233 return 1; 1192 return 1;
1234 1193
1235 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1194 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1236 1195
1237 /* 1196 /*
1238 * set sci_int and PM timer address 1197 * set sci_int and PM timer address
1239 */ 1198 */
1240 acpi_table_parse(ACPI_FADT, acpi_parse_fadt); 1199 acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1241 1200
1242 /* 1201 /*
1243 * Process the Multiple APIC Description Table (MADT), if present 1202 * Process the Multiple APIC Description Table (MADT), if present
1244 */ 1203 */
1245 acpi_process_madt(); 1204 acpi_process_madt();
1246 1205
1247 acpi_table_parse(ACPI_HPET, acpi_parse_hpet); 1206 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1248 1207
1249 return 0; 1208 return 0;
1250} 1209}
@@ -1315,13 +1274,17 @@ static int __init setup_acpi_sci(char *s)
1315 if (!s) 1274 if (!s)
1316 return -EINVAL; 1275 return -EINVAL;
1317 if (!strcmp(s, "edge")) 1276 if (!strcmp(s, "edge"))
1318 acpi_sci_flags.trigger = 1; 1277 acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
1278 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1319 else if (!strcmp(s, "level")) 1279 else if (!strcmp(s, "level"))
1320 acpi_sci_flags.trigger = 3; 1280 acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1281 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1321 else if (!strcmp(s, "high")) 1282 else if (!strcmp(s, "high"))
1322 acpi_sci_flags.polarity = 1; 1283 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1284 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1323 else if (!strcmp(s, "low")) 1285 else if (!strcmp(s, "low"))
1324 acpi_sci_flags.polarity = 3; 1286 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1287 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1325 else 1288 else
1326 return -EINVAL; 1289 return -EINVAL;
1327 return 0; 1290 return 0;
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 4b60af7f91dd..bf86f7662d8b 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -16,7 +16,7 @@
16 16
17static int nvidia_hpet_detected __initdata; 17static int nvidia_hpet_detected __initdata;
18 18
19static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 19static int __init nvidia_hpet_check(struct acpi_table_header *header)
20{ 20{
21 nvidia_hpet_detected = 1; 21 nvidia_hpet_detected = 1;
22 return 0; 22 return 0;
@@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
30 is enabled. */ 30 is enabled. */
31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { 31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
32 nvidia_hpet_detected = 0; 32 nvidia_hpet_detected = 0;
33 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 33 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
34 if (nvidia_hpet_detected == 0) { 34 if (nvidia_hpet_detected == 0) {
35 acpi_skip_timer_override = 1; 35 acpi_skip_timer_override = 1;
36 printk(KERN_INFO "Nvidia board " 36 printk(KERN_INFO "Nvidia board "
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index e940e00b96c9..a3db9332d652 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
190 /* Invoke C3 */ 190 /* Invoke C3 */
191 inb(cx_address); 191 inb(cx_address);
192 /* Dummy op - must do something useless after P_LVL3 read */ 192 /* Dummy op - must do something useless after P_LVL3 read */
193 t = inl(acpi_fadt.xpm_tmr_blk.address); 193 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
194 } 194 }
195 /* Disable bus ratio bit */ 195 /* Disable bus ratio bit */
196 local_irq_disable(); 196 local_irq_disable();
@@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
250 outb(3, 0x22); 250 outb(3, 0x22);
251 } else if ((pr != NULL) && pr->flags.bm_control) { 251 } else if ((pr != NULL) && pr->flags.bm_control) {
252 /* Disable bus master arbitration */ 252 /* Disable bus master arbitration */
253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
254 ACPI_MTX_DO_NOT_LOCK);
255 } 254 }
256 switch (longhaul_version) { 255 switch (longhaul_version) {
257 256
@@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
281 case TYPE_POWERSAVER: 280 case TYPE_POWERSAVER:
282 if (longhaul_flags & USE_ACPI_C3) { 281 if (longhaul_flags & USE_ACPI_C3) {
283 /* Don't allow wakeup */ 282 /* Don't allow wakeup */
284 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 283 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
285 ACPI_MTX_DO_NOT_LOCK);
286 do_powersaver(cx->address, clock_ratio_index); 284 do_powersaver(cx->address, clock_ratio_index);
287 } else { 285 } else {
288 do_powersaver(0, clock_ratio_index); 286 do_powersaver(0, clock_ratio_index);
@@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
295 outb(0, 0x22); 293 outb(0, 0x22);
296 } else if ((pr != NULL) && pr->flags.bm_control) { 294 } else if ((pr != NULL) && pr->flags.bm_control) {
297 /* Enable bus master arbitration */ 295 /* Enable bus master arbitration */
298 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 296 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
299 ACPI_MTX_DO_NOT_LOCK);
300 } 297 }
301 outb(pic2_mask,0xA1); /* restore mask */ 298 outb(pic2_mask,0xA1); /* restore mask */
302 outb(pic1_mask,0x21); 299 outb(pic1_mask,0x21);
@@ -414,7 +411,7 @@ static int __init longhaul_get_ranges(void)
414 highest_speed = calc_speed(maxmult); 411 highest_speed = calc_speed(maxmult);
415 lowest_speed = calc_speed(minmult); 412 lowest_speed = calc_speed(minmult);
416 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, 413 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
417 print_speed(lowest_speed/1000), 414 print_speed(lowest_speed/1000),
418 print_speed(highest_speed/1000)); 415 print_speed(highest_speed/1000));
419 416
420 if (lowest_speed == highest_speed) { 417 if (lowest_speed == highest_speed) {
@@ -498,7 +495,7 @@ static void __init longhaul_setup_voltagescaling(void)
498 maxvid.mV/1000, maxvid.mV%1000, 495 maxvid.mV/1000, maxvid.mV%1000,
499 minvid.mV/1000, minvid.mV%1000, 496 minvid.mV/1000, minvid.mV%1000,
500 numvscales); 497 numvscales);
501 498
502 j = 0; 499 j = 0;
503 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { 500 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
504 speed = longhaul_table[j].frequency; 501 speed = longhaul_table[j].frequency;
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 6a3875f81a0a..5592fa6e1fa1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -2606,25 +2606,32 @@ static struct irq_chip msi_chip = {
2606 .retrigger = ioapic_retrigger_irq, 2606 .retrigger = ioapic_retrigger_irq,
2607}; 2607};
2608 2608
2609int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) 2609int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2610{ 2610{
2611 struct msi_msg msg; 2611 struct msi_msg msg;
2612 int ret; 2612 int irq, ret;
2613 irq = create_irq();
2614 if (irq < 0)
2615 return irq;
2616
2617 set_irq_msi(irq, desc);
2613 ret = msi_compose_msg(dev, irq, &msg); 2618 ret = msi_compose_msg(dev, irq, &msg);
2614 if (ret < 0) 2619 if (ret < 0) {
2620 destroy_irq(irq);
2615 return ret; 2621 return ret;
2622 }
2616 2623
2617 write_msi_msg(irq, &msg); 2624 write_msi_msg(irq, &msg);
2618 2625
2619 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, 2626 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
2620 "edge"); 2627 "edge");
2621 2628
2622 return 0; 2629 return irq;
2623} 2630}
2624 2631
2625void arch_teardown_msi_irq(unsigned int irq) 2632void arch_teardown_msi_irq(unsigned int irq)
2626{ 2633{
2627 return; 2634 destroy_irq(irq);
2628} 2635}
2629 2636
2630#endif /* CONFIG_PCI_MSI */ 2637#endif /* CONFIG_PCI_MSI */
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 49bff3596bff..4f5983c98669 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1057 static int gsi_to_irq[MAX_GSI_NUM]; 1057 static int gsi_to_irq[MAX_GSI_NUM];
1058 1058
1059 /* Don't set up the ACPI SCI because it's already set up */ 1059 /* Don't set up the ACPI SCI because it's already set up */
1060 if (acpi_fadt.sci_int == gsi) 1060 if (acpi_gbl_FADT.sci_interrupt == gsi)
1061 return gsi; 1061 return gsi;
1062 1062
1063 ioapic = mp_find_ioapic(gsi); 1063 ioapic = mp_find_ioapic(gsi);
@@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1114 /* 1114 /*
1115 * Don't assign IRQ used by ACPI SCI 1115 * Don't assign IRQ used by ACPI SCI
1116 */ 1116 */
1117 if (gsi == acpi_fadt.sci_int) 1117 if (gsi == acpi_gbl_FADT.sci_interrupt)
1118 gsi = pci_irq++; 1118 gsi = pci_irq++;
1119 gsi_to_irq[irq] = gsi; 1119 gsi_to_irq[irq] = gsi;
1120 } else { 1120 } else {
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index f7e735c077c3..2a8713ec0f9a 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
62/* Identify CPU proximity domains */ 62/* Identify CPU proximity domains */
63static void __init parse_cpu_affinity_structure(char *p) 63static void __init parse_cpu_affinity_structure(char *p)
64{ 64{
65 struct acpi_table_processor_affinity *cpu_affinity = 65 struct acpi_srat_cpu_affinity *cpu_affinity =
66 (struct acpi_table_processor_affinity *) p; 66 (struct acpi_srat_cpu_affinity *) p;
67 67
68 if (!cpu_affinity->flags.enabled) 68 if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
69 return; /* empty entry */ 69 return; /* empty entry */
70 70
71 /* mark this node as "seen" in node bitmap */ 71 /* mark this node as "seen" in node bitmap */
72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain); 72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
73 73
74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain; 74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
75 75
76 printk("CPU 0x%02X in proximity domain 0x%02X\n", 76 printk("CPU 0x%02X in proximity domain 0x%02X\n",
77 cpu_affinity->apic_id, cpu_affinity->proximity_domain); 77 cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
78} 78}
79 79
80/* 80/*
@@ -84,28 +84,27 @@ static void __init parse_cpu_affinity_structure(char *p)
84static void __init parse_memory_affinity_structure (char *sratp) 84static void __init parse_memory_affinity_structure (char *sratp)
85{ 85{
86 unsigned long long paddr, size; 86 unsigned long long paddr, size;
87 unsigned long start_pfn, end_pfn; 87 unsigned long start_pfn, end_pfn;
88 u8 pxm; 88 u8 pxm;
89 struct node_memory_chunk_s *p, *q, *pend; 89 struct node_memory_chunk_s *p, *q, *pend;
90 struct acpi_table_memory_affinity *memory_affinity = 90 struct acpi_srat_mem_affinity *memory_affinity =
91 (struct acpi_table_memory_affinity *) sratp; 91 (struct acpi_srat_mem_affinity *) sratp;
92 92
93 if (!memory_affinity->flags.enabled) 93 if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
94 return; /* empty entry */ 94 return; /* empty entry */
95 95
96 pxm = memory_affinity->proximity_domain & 0xff;
97
96 /* mark this node as "seen" in node bitmap */ 98 /* mark this node as "seen" in node bitmap */
97 BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain); 99 BMAP_SET(pxm_bitmap, pxm);
98 100
99 /* calculate info for memory chunk structure */ 101 /* calculate info for memory chunk structure */
100 paddr = memory_affinity->base_addr_hi; 102 paddr = memory_affinity->base_address;
101 paddr = (paddr << 32) | memory_affinity->base_addr_lo; 103 size = memory_affinity->length;
102 size = memory_affinity->length_hi; 104
103 size = (size << 32) | memory_affinity->length_lo;
104
105 start_pfn = paddr >> PAGE_SHIFT; 105 start_pfn = paddr >> PAGE_SHIFT;
106 end_pfn = (paddr + size) >> PAGE_SHIFT; 106 end_pfn = (paddr + size) >> PAGE_SHIFT;
107 107
108 pxm = memory_affinity->proximity_domain;
109 108
110 if (num_memory_chunks >= MAXCHUNKS) { 109 if (num_memory_chunks >= MAXCHUNKS) {
111 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n", 110 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
@@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
132 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n", 131 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
133 start_pfn, end_pfn, 132 start_pfn, end_pfn,
134 memory_affinity->memory_type, 133 memory_affinity->memory_type,
135 memory_affinity->proximity_domain, 134 pxm,
136 (memory_affinity->flags.hot_pluggable ? 135 ((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
137 "enabled and removable" : "enabled" ) ); 136 "enabled and removable" : "enabled" ) );
138} 137}
139 138
@@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
185 num_memory_chunks = 0; 184 num_memory_chunks = 0;
186 while (p < end) { 185 while (p < end) {
187 switch (*p) { 186 switch (*p) {
188 case ACPI_SRAT_PROCESSOR_AFFINITY: 187 case ACPI_SRAT_TYPE_CPU_AFFINITY:
189 parse_cpu_affinity_structure(p); 188 parse_cpu_affinity_structure(p);
190 break; 189 break;
191 case ACPI_SRAT_MEMORY_AFFINITY: 190 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
192 parse_memory_affinity_structure(p); 191 parse_memory_affinity_structure(p);
193 break; 192 break;
194 default: 193 default:
@@ -262,31 +261,30 @@ out_fail:
262 return 0; 261 return 0;
263} 262}
264 263
264struct acpi_static_rsdt {
265 struct acpi_table_rsdt table;
266 u32 padding[7]; /* Allow for 7 more table entries */
267};
268
265int __init get_memcfg_from_srat(void) 269int __init get_memcfg_from_srat(void)
266{ 270{
267 struct acpi_table_header *header = NULL; 271 struct acpi_table_header *header = NULL;
268 struct acpi_table_rsdp *rsdp = NULL; 272 struct acpi_table_rsdp *rsdp = NULL;
269 struct acpi_table_rsdt *rsdt = NULL; 273 struct acpi_table_rsdt *rsdt = NULL;
270 struct acpi_pointer *rsdp_address = NULL; 274 acpi_native_uint rsdp_address = 0;
271 struct acpi_table_rsdt saved_rsdt; 275 struct acpi_static_rsdt saved_rsdt;
272 int tables = 0; 276 int tables = 0;
273 int i = 0; 277 int i = 0;
274 278
275 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, 279 rsdp_address = acpi_find_rsdp();
276 rsdp_address))) { 280 if (!rsdp_address) {
277 printk("%s: System description tables not found\n", 281 printk("%s: System description tables not found\n",
278 __FUNCTION__); 282 __FUNCTION__);
279 goto out_err; 283 goto out_err;
280 } 284 }
281 285
282 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { 286 printk("%s: assigning address to rsdp\n", __FUNCTION__);
283 printk("%s: assigning address to rsdp\n", __FUNCTION__); 287 rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
284 rsdp = (struct acpi_table_rsdp *)
285 (u32)rsdp_address->pointer.physical;
286 } else {
287 printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
288 goto out_err;
289 }
290 if (!rsdp) { 288 if (!rsdp) {
291 printk("%s: Didn't find ACPI root!\n", __FUNCTION__); 289 printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
292 goto out_err; 290 goto out_err;
@@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
295 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision, 293 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
296 rsdp->oem_id); 294 rsdp->oem_id);
297 295
298 if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) { 296 if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
299 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__); 297 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
300 goto out_err; 298 goto out_err;
301 } 299 }
302 300
303 rsdt = (struct acpi_table_rsdt *) 301 rsdt = (struct acpi_table_rsdt *)
304 boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt)); 302 boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
305 303
306 if (!rsdt) { 304 if (!rsdt) {
307 printk(KERN_WARNING 305 printk(KERN_WARNING
@@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
310 goto out_err; 308 goto out_err;
311 } 309 }
312 310
313 header = & rsdt->header; 311 header = &rsdt->header;
314 312
315 if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) { 313 if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
316 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n"); 314 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
317 goto out_err; 315 goto out_err;
318 } 316 }
@@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
330 328
331 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt)); 329 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
332 330
333 if (saved_rsdt.header.length > sizeof(saved_rsdt)) { 331 if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
334 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n", 332 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
335 saved_rsdt.header.length); 333 saved_rsdt.table.header.length);
336 goto out_err; 334 goto out_err;
337 } 335 }
338 336
@@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
341 for (i = 0; i < tables; i++) { 339 for (i = 0; i < tables; i++) {
342 /* Map in header, then map in full table length. */ 340 /* Map in header, then map in full table length. */
343 header = (struct acpi_table_header *) 341 header = (struct acpi_table_header *)
344 boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header)); 342 boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
345 if (!header) 343 if (!header)
346 break; 344 break;
347 header = (struct acpi_table_header *) 345 header = (struct acpi_table_header *)
348 boot_ioremap(saved_rsdt.entry[i], header->length); 346 boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
349 if (!header) 347 if (!header)
350 break; 348 break;
351 349
352 if (strncmp((char *) &header->signature, "SRAT", 4)) 350 if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
353 continue; 351 continue;
354 352
355 /* we've found the srat table. don't need to look at any more tables */ 353 /* we've found the srat table. don't need to look at any more tables */
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index 80566ca4a80a..c8d5aa132fa0 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -84,15 +84,6 @@ struct es7000_oem_table {
84}; 84};
85 85
86#ifdef CONFIG_ACPI 86#ifdef CONFIG_ACPI
87struct acpi_table_sdt {
88 unsigned long pa;
89 unsigned long count;
90 struct {
91 unsigned long pa;
92 enum acpi_table_id id;
93 unsigned long size;
94 } entry[50];
95};
96 87
97struct oem_table { 88struct oem_table {
98 struct acpi_table_header Header; 89 struct acpi_table_header Header;
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index 3d0fc853516d..9be6ceabf042 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -160,51 +160,14 @@ parse_unisys_oem (char *oemptr)
160int __init 160int __init
161find_unisys_acpi_oem_table(unsigned long *oem_addr) 161find_unisys_acpi_oem_table(unsigned long *oem_addr)
162{ 162{
163 struct acpi_table_rsdp *rsdp = NULL; 163 struct acpi_table_header *header = NULL;
164 unsigned long rsdp_phys = 0; 164 int i = 0;
165 struct acpi_table_header *header = NULL; 165 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
166 int i; 166 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
167 struct acpi_table_sdt sdt; 167 struct oem_table *t = (struct oem_table *)header;
168 168 *oem_addr = (unsigned long)__acpi_map_table(t->OEMTableAddr,
169 rsdp_phys = acpi_find_rsdp(); 169 t->OEMTableSize);
170 rsdp = __va(rsdp_phys); 170 return 0;
171 if (rsdp->rsdt_address) {
172 struct acpi_table_rsdt *mapped_rsdt = NULL;
173 sdt.pa = rsdp->rsdt_address;
174
175 header = (struct acpi_table_header *)
176 __acpi_map_table(sdt.pa, sizeof(struct acpi_table_header));
177 if (!header)
178 return -ENODEV;
179
180 sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3;
181 mapped_rsdt = (struct acpi_table_rsdt *)
182 __acpi_map_table(sdt.pa, header->length);
183 if (!mapped_rsdt)
184 return -ENODEV;
185
186 header = &mapped_rsdt->header;
187
188 for (i = 0; i < sdt.count; i++)
189 sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
190 };
191 for (i = 0; i < sdt.count; i++) {
192
193 header = (struct acpi_table_header *)
194 __acpi_map_table(sdt.entry[i].pa,
195 sizeof(struct acpi_table_header));
196 if (!header)
197 continue;
198 if (!strncmp((char *) &header->signature, "OEM1", 4)) {
199 if (!strncmp((char *) &header->oem_id, "UNISYS", 6)) {
200 void *addr;
201 struct oem_table *t;
202 acpi_table_print(header, sdt.entry[i].pa);
203 t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
204 addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
205 *oem_addr = (unsigned long) addr;
206 return 0;
207 }
208 } 171 }
209 } 172 }
210 return -1; 173 return -1;
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index e2616a266e13..5700220dcf5f 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
36static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) 36static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
37{ 37{
38 int cfg_num = -1; 38 int cfg_num = -1;
39 struct acpi_table_mcfg_config *cfg; 39 struct acpi_mcfg_allocation *cfg;
40 40
41 if (seg == 0 && bus < MAX_CHECK_BUS && 41 if (seg == 0 && bus < MAX_CHECK_BUS &&
42 test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots)) 42 test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
@@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
48 break; 48 break;
49 } 49 }
50 cfg = &pci_mmcfg_config[cfg_num]; 50 cfg = &pci_mmcfg_config[cfg_num];
51 if (cfg->pci_segment_group_number != seg) 51 if (cfg->pci_segment != seg)
52 continue; 52 continue;
53 if ((cfg->start_bus_number <= bus) && 53 if ((cfg->start_bus_number <= bus) &&
54 (cfg->end_bus_number >= bus)) 54 (cfg->end_bus_number >= bus))
55 return cfg->base_address; 55 return cfg->address;
56 } 56 }
57 57
58 /* Handle more broken MCFG tables on Asus etc. 58 /* Handle more broken MCFG tables on Asus etc.
@@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
60 this applies to all busses. */ 60 this applies to all busses. */
61 cfg = &pci_mmcfg_config[0]; 61 cfg = &pci_mmcfg_config[0];
62 if (pci_mmcfg_config_num == 1 && 62 if (pci_mmcfg_config_num == 1 &&
63 cfg->pci_segment_group_number == 0 && 63 cfg->pci_segment == 0 &&
64 (cfg->start_bus_number | cfg->end_bus_number) == 0) 64 (cfg->start_bus_number | cfg->end_bus_number) == 0)
65 return cfg->base_address; 65 return cfg->address;
66 66
67 /* Fall back to type 0 */ 67 /* Fall back to type 0 */
68 return 0; 68 return 0;
@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
125 unsigned long flags; 125 unsigned long flags;
126 u32 base; 126 u32 base;
127 127
128 if ((bus > 255) || (devfn > 255) || (reg > 4095)) 128 if ((bus > 255) || (devfn > 255) || (reg > 4095))
129 return -EINVAL; 129 return -EINVAL;
130 130
131 base = get_base_addr(seg, bus, devfn); 131 base = get_base_addr(seg, bus, devfn);
@@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type)
199 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 199 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
200 return; 200 return;
201 201
202 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 202 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
203 if ((pci_mmcfg_config_num == 0) || 203 if ((pci_mmcfg_config_num == 0) ||
204 (pci_mmcfg_config == NULL) || 204 (pci_mmcfg_config == NULL) ||
205 (pci_mmcfg_config[0].base_address == 0)) 205 (pci_mmcfg_config[0].address == 0))
206 return; 206 return;
207 207
208 /* Only do this check when type 1 works. If it doesn't work 208 /* Only do this check when type 1 works. If it doesn't work
209 assume we run on a Mac and always use MCFG */ 209 assume we run on a Mac and always use MCFG */
210 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, 210 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
211 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, 211 pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
212 E820_RESERVED)) { 212 E820_RESERVED)) {
213 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", 213 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
214 pci_mmcfg_config[0].base_address); 214 (unsigned long)pci_mmcfg_config[0].address);
215 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 215 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
216 return; 216 return;
217 } 217 }
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fcacfe291b9b..f1d2899e9a62 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,6 +11,8 @@ menu "Processor type and features"
11 11
12config IA64 12config IA64
13 bool 13 bool
14 select PCI if (!IA64_HP_SIM)
15 select ACPI if (!IA64_HP_SIM)
14 default y 16 default y
15 help 17 help
16 The Itanium Processor Family is Intel's 64-bit successor to 18 The Itanium Processor Family is Intel's 64-bit successor to
@@ -28,7 +30,6 @@ config MMU
28 30
29config SWIOTLB 31config SWIOTLB
30 bool 32 bool
31 default y
32 33
33config RWSEM_XCHGADD_ALGORITHM 34config RWSEM_XCHGADD_ALGORITHM
34 bool 35 bool
@@ -84,10 +85,9 @@ choice
84 85
85config IA64_GENERIC 86config IA64_GENERIC
86 bool "generic" 87 bool "generic"
87 select ACPI
88 select PCI
89 select NUMA 88 select NUMA
90 select ACPI_NUMA 89 select ACPI_NUMA
90 select SWIOTLB
91 help 91 help
92 This selects the system type of your hardware. A "generic" kernel 92 This selects the system type of your hardware. A "generic" kernel
93 will run on any supported IA-64 system. However, if you configure 93 will run on any supported IA-64 system. However, if you configure
@@ -104,6 +104,7 @@ config IA64_GENERIC
104 104
105config IA64_DIG 105config IA64_DIG
106 bool "DIG-compliant" 106 bool "DIG-compliant"
107 select SWIOTLB
107 108
108config IA64_HP_ZX1 109config IA64_HP_ZX1
109 bool "HP-zx1/sx1000" 110 bool "HP-zx1/sx1000"
@@ -113,6 +114,7 @@ config IA64_HP_ZX1
113 114
114config IA64_HP_ZX1_SWIOTLB 115config IA64_HP_ZX1_SWIOTLB
115 bool "HP-zx1/sx1000 with software I/O TLB" 116 bool "HP-zx1/sx1000 with software I/O TLB"
117 select SWIOTLB
116 help 118 help
117 Build a kernel that runs on HP zx1 and sx1000 systems even when they 119 Build a kernel that runs on HP zx1 and sx1000 systems even when they
118 have broken PCI devices which cannot DMA to full 32 bits. Apart 120 have broken PCI devices which cannot DMA to full 32 bits. Apart
@@ -131,6 +133,7 @@ config IA64_SGI_SN2
131 133
132config IA64_HP_SIM 134config IA64_HP_SIM
133 bool "Ski-simulator" 135 bool "Ski-simulator"
136 select SWIOTLB
134 137
135endchoice 138endchoice
136 139
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index a5a5637507be..2153bcacbe6c 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 192EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 193EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 194EXPORT_SYMBOL(hwsw_free_coherent);
195EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
196EXPORT_SYMBOL(hwsw_sync_single_for_device);
197EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
198EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 29f05d4b68cd..9197d7b361b3 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
55 55
56#define BAD_MADT_ENTRY(entry, end) ( \ 56#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
58 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 58 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
59 59
60#define PREFIX "ACPI: " 60#define PREFIX "ACPI: "
61 61
@@ -67,16 +67,11 @@ EXPORT_SYMBOL(pm_power_off);
67unsigned int acpi_cpei_override; 67unsigned int acpi_cpei_override;
68unsigned int acpi_cpei_phys_cpuid; 68unsigned int acpi_cpei_phys_cpuid;
69 69
70#define MAX_SAPICS 256
71u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
72
73EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
74
75const char *acpi_get_sysname(void) 70const char *acpi_get_sysname(void)
76{ 71{
77#ifdef CONFIG_IA64_GENERIC 72#ifdef CONFIG_IA64_GENERIC
78 unsigned long rsdp_phys; 73 unsigned long rsdp_phys;
79 struct acpi20_table_rsdp *rsdp; 74 struct acpi_table_rsdp *rsdp;
80 struct acpi_table_xsdt *xsdt; 75 struct acpi_table_xsdt *xsdt;
81 struct acpi_table_header *hdr; 76 struct acpi_table_header *hdr;
82 77
@@ -87,16 +82,16 @@ const char *acpi_get_sysname(void)
87 return "dig"; 82 return "dig";
88 } 83 }
89 84
90 rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys); 85 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
91 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { 86 if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
92 printk(KERN_ERR 87 printk(KERN_ERR
93 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 88 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
94 return "dig"; 89 return "dig";
95 } 90 }
96 91
97 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address); 92 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
98 hdr = &xsdt->header; 93 hdr = &xsdt->header;
99 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { 94 if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
100 printk(KERN_ERR 95 printk(KERN_ERR
101 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 96 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
102 return "dig"; 97 return "dig";
@@ -169,12 +164,12 @@ struct acpi_table_madt *acpi_madt __initdata;
169static u8 has_8259; 164static u8 has_8259;
170 165
171static int __init 166static int __init
172acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 167acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
173 const unsigned long end) 168 const unsigned long end)
174{ 169{
175 struct acpi_table_lapic_addr_ovr *lapic; 170 struct acpi_madt_local_apic_override *lapic;
176 171
177 lapic = (struct acpi_table_lapic_addr_ovr *)header; 172 lapic = (struct acpi_madt_local_apic_override *)header;
178 173
179 if (BAD_MADT_ENTRY(lapic, end)) 174 if (BAD_MADT_ENTRY(lapic, end))
180 return -EINVAL; 175 return -EINVAL;
@@ -187,22 +182,19 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
187} 182}
188 183
189static int __init 184static int __init
190acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end) 185acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
191{ 186{
192 struct acpi_table_lsapic *lsapic; 187 struct acpi_madt_local_sapic *lsapic;
193 188
194 lsapic = (struct acpi_table_lsapic *)header; 189 lsapic = (struct acpi_madt_local_sapic *)header;
195 190
196 if (BAD_MADT_ENTRY(lsapic, end)) 191 /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
197 return -EINVAL;
198 192
199 if (lsapic->flags.enabled) { 193 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
200#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
201 smp_boot_data.cpu_phys_id[available_cpus] = 195 smp_boot_data.cpu_phys_id[available_cpus] =
202 (lsapic->id << 8) | lsapic->eid; 196 (lsapic->id << 8) | lsapic->eid;
203#endif 197#endif
204 ia64_acpiid_to_sapicid[lsapic->acpi_id] =
205 (lsapic->id << 8) | lsapic->eid;
206 ++available_cpus; 198 ++available_cpus;
207 } 199 }
208 200
@@ -211,11 +203,11 @@ acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
211} 203}
212 204
213static int __init 205static int __init
214acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 206acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
215{ 207{
216 struct acpi_table_lapic_nmi *lacpi_nmi; 208 struct acpi_madt_local_apic_nmi *lacpi_nmi;
217 209
218 lacpi_nmi = (struct acpi_table_lapic_nmi *)header; 210 lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
219 211
220 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 212 if (BAD_MADT_ENTRY(lacpi_nmi, end))
221 return -EINVAL; 213 return -EINVAL;
@@ -225,11 +217,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
225} 217}
226 218
227static int __init 219static int __init
228acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) 220acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
229{ 221{
230 struct acpi_table_iosapic *iosapic; 222 struct acpi_madt_io_sapic *iosapic;
231 223
232 iosapic = (struct acpi_table_iosapic *)header; 224 iosapic = (struct acpi_madt_io_sapic *)header;
233 225
234 if (BAD_MADT_ENTRY(iosapic, end)) 226 if (BAD_MADT_ENTRY(iosapic, end))
235 return -EINVAL; 227 return -EINVAL;
@@ -240,13 +232,13 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
240static unsigned int __initdata acpi_madt_rev; 232static unsigned int __initdata acpi_madt_rev;
241 233
242static int __init 234static int __init
243acpi_parse_plat_int_src(acpi_table_entry_header * header, 235acpi_parse_plat_int_src(struct acpi_subtable_header * header,
244 const unsigned long end) 236 const unsigned long end)
245{ 237{
246 struct acpi_table_plat_int_src *plintsrc; 238 struct acpi_madt_interrupt_source *plintsrc;
247 int vector; 239 int vector;
248 240
249 plintsrc = (struct acpi_table_plat_int_src *)header; 241 plintsrc = (struct acpi_madt_interrupt_source *)header;
250 242
251 if (BAD_MADT_ENTRY(plintsrc, end)) 243 if (BAD_MADT_ENTRY(plintsrc, end))
252 return -EINVAL; 244 return -EINVAL;
@@ -257,19 +249,19 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
257 */ 249 */
258 vector = iosapic_register_platform_intr(plintsrc->type, 250 vector = iosapic_register_platform_intr(plintsrc->type,
259 plintsrc->global_irq, 251 plintsrc->global_irq,
260 plintsrc->iosapic_vector, 252 plintsrc->io_sapic_vector,
261 plintsrc->eid, 253 plintsrc->eid,
262 plintsrc->id, 254 plintsrc->id,
263 (plintsrc->flags.polarity == 255 ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
264 1) ? IOSAPIC_POL_HIGH : 256 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
265 IOSAPIC_POL_LOW, 257 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
266 (plintsrc->flags.trigger == 258 ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
267 1) ? IOSAPIC_EDGE : 259 ACPI_MADT_TRIGGER_EDGE) ?
268 IOSAPIC_LEVEL); 260 IOSAPIC_EDGE : IOSAPIC_LEVEL);
269 261
270 platform_intr_list[plintsrc->type] = vector; 262 platform_intr_list[plintsrc->type] = vector;
271 if (acpi_madt_rev > 1) { 263 if (acpi_madt_rev > 1) {
272 acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; 264 acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
273 } 265 }
274 266
275 /* 267 /*
@@ -324,30 +316,32 @@ unsigned int get_cpei_target_cpu(void)
324} 316}
325 317
326static int __init 318static int __init
327acpi_parse_int_src_ovr(acpi_table_entry_header * header, 319acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
328 const unsigned long end) 320 const unsigned long end)
329{ 321{
330 struct acpi_table_int_src_ovr *p; 322 struct acpi_madt_interrupt_override *p;
331 323
332 p = (struct acpi_table_int_src_ovr *)header; 324 p = (struct acpi_madt_interrupt_override *)header;
333 325
334 if (BAD_MADT_ENTRY(p, end)) 326 if (BAD_MADT_ENTRY(p, end))
335 return -EINVAL; 327 return -EINVAL;
336 328
337 iosapic_override_isa_irq(p->bus_irq, p->global_irq, 329 iosapic_override_isa_irq(p->source_irq, p->global_irq,
338 (p->flags.polarity == 330 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
339 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 331 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
340 (p->flags.trigger == 332 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
341 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 333 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
334 ACPI_MADT_TRIGGER_EDGE) ?
335 IOSAPIC_EDGE : IOSAPIC_LEVEL);
342 return 0; 336 return 0;
343} 337}
344 338
345static int __init 339static int __init
346acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 340acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
347{ 341{
348 struct acpi_table_nmi_src *nmi_src; 342 struct acpi_madt_nmi_source *nmi_src;
349 343
350 nmi_src = (struct acpi_table_nmi_src *)header; 344 nmi_src = (struct acpi_madt_nmi_source *)header;
351 345
352 if (BAD_MADT_ENTRY(nmi_src, end)) 346 if (BAD_MADT_ENTRY(nmi_src, end))
353 return -EINVAL; 347 return -EINVAL;
@@ -371,12 +365,12 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
371 } 365 }
372} 366}
373 367
374static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 368static int __init acpi_parse_madt(struct acpi_table_header *table)
375{ 369{
376 if (!phys_addr || !size) 370 if (!table)
377 return -EINVAL; 371 return -EINVAL;
378 372
379 acpi_madt = (struct acpi_table_madt *)__va(phys_addr); 373 acpi_madt = (struct acpi_table_madt *)table;
380 374
381 acpi_madt_rev = acpi_madt->header.revision; 375 acpi_madt_rev = acpi_madt->header.revision;
382 376
@@ -384,14 +378,14 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
384#ifdef CONFIG_ITANIUM 378#ifdef CONFIG_ITANIUM
385 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 379 has_8259 = 1; /* Firmware on old Itanium systems is broken */
386#else 380#else
387 has_8259 = acpi_madt->flags.pcat_compat; 381 has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
388#endif 382#endif
389 iosapic_system_init(has_8259); 383 iosapic_system_init(has_8259);
390 384
391 /* Get base address of IPI Message Block */ 385 /* Get base address of IPI Message Block */
392 386
393 if (acpi_madt->lapic_address) 387 if (acpi_madt->address)
394 ipi_base_addr = ioremap(acpi_madt->lapic_address, 0); 388 ipi_base_addr = ioremap(acpi_madt->address, 0);
395 389
396 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 390 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
397 391
@@ -413,23 +407,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
413#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 407#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
414static struct acpi_table_slit __initdata *slit_table; 408static struct acpi_table_slit __initdata *slit_table;
415 409
416static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) 410static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
417{ 411{
418 int pxm; 412 int pxm;
419 413
420 pxm = pa->proximity_domain; 414 pxm = pa->proximity_domain_lo;
421 if (ia64_platform_is("sn2")) 415 if (ia64_platform_is("sn2"))
422 pxm += pa->reserved[0] << 8; 416 pxm += pa->proximity_domain_hi[0] << 8;
423 return pxm; 417 return pxm;
424} 418}
425 419
426static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma) 420static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
427{ 421{
428 int pxm; 422 int pxm;
429 423
430 pxm = ma->proximity_domain; 424 pxm = ma->proximity_domain;
431 if (ia64_platform_is("sn2")) 425 if (!ia64_platform_is("sn2"))
432 pxm += ma->reserved1[0] << 8; 426 pxm &= 0xff;
427
433 return pxm; 428 return pxm;
434} 429}
435 430
@@ -442,7 +437,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
442 u32 len; 437 u32 len;
443 438
444 len = sizeof(struct acpi_table_header) + 8 439 len = sizeof(struct acpi_table_header) + 8
445 + slit->localities * slit->localities; 440 + slit->locality_count * slit->locality_count;
446 if (slit->header.length != len) { 441 if (slit->header.length != len) {
447 printk(KERN_ERR 442 printk(KERN_ERR
448 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 443 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
@@ -454,11 +449,11 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
454} 449}
455 450
456void __init 451void __init
457acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 452acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
458{ 453{
459 int pxm; 454 int pxm;
460 455
461 if (!pa->flags.enabled) 456 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
462 return; 457 return;
463 458
464 pxm = get_processor_proximity_domain(pa); 459 pxm = get_processor_proximity_domain(pa);
@@ -467,14 +462,14 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
467 pxm_bit_set(pxm); 462 pxm_bit_set(pxm);
468 463
469 node_cpuid[srat_num_cpus].phys_id = 464 node_cpuid[srat_num_cpus].phys_id =
470 (pa->apic_id << 8) | (pa->lsapic_eid); 465 (pa->apic_id << 8) | (pa->local_sapic_eid);
471 /* nid should be overridden as logical node id later */ 466 /* nid should be overridden as logical node id later */
472 node_cpuid[srat_num_cpus].nid = pxm; 467 node_cpuid[srat_num_cpus].nid = pxm;
473 srat_num_cpus++; 468 srat_num_cpus++;
474} 469}
475 470
476void __init 471void __init
477acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 472acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
478{ 473{
479 unsigned long paddr, size; 474 unsigned long paddr, size;
480 int pxm; 475 int pxm;
@@ -483,13 +478,11 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
483 pxm = get_memory_proximity_domain(ma); 478 pxm = get_memory_proximity_domain(ma);
484 479
485 /* fill node memory chunk structure */ 480 /* fill node memory chunk structure */
486 paddr = ma->base_addr_hi; 481 paddr = ma->base_address;
487 paddr = (paddr << 32) | ma->base_addr_lo; 482 size = ma->length;
488 size = ma->length_hi;
489 size = (size << 32) | ma->length_lo;
490 483
491 /* Ignore disabled entries */ 484 /* Ignore disabled entries */
492 if (!ma->flags.enabled) 485 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
493 return; 486 return;
494 487
495 /* record this node in proximity bitmap */ 488 /* record this node in proximity bitmap */
@@ -560,16 +553,16 @@ void __init acpi_numa_arch_fixup(void)
560 if (!slit_table) 553 if (!slit_table)
561 return; 554 return;
562 memset(numa_slit, -1, sizeof(numa_slit)); 555 memset(numa_slit, -1, sizeof(numa_slit));
563 for (i = 0; i < slit_table->localities; i++) { 556 for (i = 0; i < slit_table->locality_count; i++) {
564 if (!pxm_bit_test(i)) 557 if (!pxm_bit_test(i))
565 continue; 558 continue;
566 node_from = pxm_to_node(i); 559 node_from = pxm_to_node(i);
567 for (j = 0; j < slit_table->localities; j++) { 560 for (j = 0; j < slit_table->locality_count; j++) {
568 if (!pxm_bit_test(j)) 561 if (!pxm_bit_test(j))
569 continue; 562 continue;
570 node_to = pxm_to_node(j); 563 node_to = pxm_to_node(j);
571 node_distance(node_from, node_to) = 564 node_distance(node_from, node_to) =
572 slit_table->entry[i * slit_table->localities + j]; 565 slit_table->entry[i * slit_table->locality_count + j];
573 } 566 }
574 } 567 }
575 568
@@ -617,21 +610,21 @@ void acpi_unregister_gsi(u32 gsi)
617 610
618EXPORT_SYMBOL(acpi_unregister_gsi); 611EXPORT_SYMBOL(acpi_unregister_gsi);
619 612
620static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) 613static int __init acpi_parse_fadt(struct acpi_table_header *table)
621{ 614{
622 struct acpi_table_header *fadt_header; 615 struct acpi_table_header *fadt_header;
623 struct fadt_descriptor *fadt; 616 struct acpi_table_fadt *fadt;
624 617
625 if (!phys_addr || !size) 618 if (!table)
626 return -EINVAL; 619 return -EINVAL;
627 620
628 fadt_header = (struct acpi_table_header *)__va(phys_addr); 621 fadt_header = (struct acpi_table_header *)table;
629 if (fadt_header->revision != 3) 622 if (fadt_header->revision != 3)
630 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 623 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
631 624
632 fadt = (struct fadt_descriptor *)fadt_header; 625 fadt = (struct acpi_table_fadt *)fadt_header;
633 626
634 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); 627 acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
635 return 0; 628 return 0;
636} 629}
637 630
@@ -658,7 +651,7 @@ int __init acpi_boot_init(void)
658 * information -- the successor to MPS tables. 651 * information -- the successor to MPS tables.
659 */ 652 */
660 653
661 if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) { 654 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) < 1) {
662 printk(KERN_ERR PREFIX "Can't find MADT\n"); 655 printk(KERN_ERR PREFIX "Can't find MADT\n");
663 goto skip_madt; 656 goto skip_madt;
664 } 657 }
@@ -666,40 +659,40 @@ int __init acpi_boot_init(void)
666 /* Local APIC */ 659 /* Local APIC */
667 660
668 if (acpi_table_parse_madt 661 if (acpi_table_parse_madt
669 (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) 662 (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
670 printk(KERN_ERR PREFIX 663 printk(KERN_ERR PREFIX
671 "Error parsing LAPIC address override entry\n"); 664 "Error parsing LAPIC address override entry\n");
672 665
673 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) 666 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
674 < 1) 667 < 1)
675 printk(KERN_ERR PREFIX 668 printk(KERN_ERR PREFIX
676 "Error parsing MADT - no LAPIC entries\n"); 669 "Error parsing MADT - no LAPIC entries\n");
677 670
678 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) 671 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
679 < 0) 672 < 0)
680 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 673 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
681 674
682 /* I/O APIC */ 675 /* I/O APIC */
683 676
684 if (acpi_table_parse_madt 677 if (acpi_table_parse_madt
685 (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) 678 (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
686 printk(KERN_ERR PREFIX 679 printk(KERN_ERR PREFIX
687 "Error parsing MADT - no IOSAPIC entries\n"); 680 "Error parsing MADT - no IOSAPIC entries\n");
688 681
689 /* System-Level Interrupt Routing */ 682 /* System-Level Interrupt Routing */
690 683
691 if (acpi_table_parse_madt 684 if (acpi_table_parse_madt
692 (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, 685 (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
693 ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 686 ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
694 printk(KERN_ERR PREFIX 687 printk(KERN_ERR PREFIX
695 "Error parsing platform interrupt source entry\n"); 688 "Error parsing platform interrupt source entry\n");
696 689
697 if (acpi_table_parse_madt 690 if (acpi_table_parse_madt
698 (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) 691 (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
699 printk(KERN_ERR PREFIX 692 printk(KERN_ERR PREFIX
700 "Error parsing interrupt source overrides entry\n"); 693 "Error parsing interrupt source overrides entry\n");
701 694
702 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) 695 if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
703 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 696 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
704 skip_madt: 697 skip_madt:
705 698
@@ -709,7 +702,7 @@ int __init acpi_boot_init(void)
709 * gets interrupts such as power and sleep buttons. If it's not 702 * gets interrupts such as power and sleep buttons. If it's not
710 * on a Legacy interrupt, it needs to be setup. 703 * on a Legacy interrupt, it needs to be setup.
711 */ 704 */
712 if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1) 705 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt) < 1)
713 printk(KERN_ERR PREFIX "Can't find FADT\n"); 706 printk(KERN_ERR PREFIX "Can't find FADT\n");
714 707
715#ifdef CONFIG_SMP 708#ifdef CONFIG_SMP
@@ -842,7 +835,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
842{ 835{
843 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 836 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
844 union acpi_object *obj; 837 union acpi_object *obj;
845 struct acpi_table_lsapic *lsapic; 838 struct acpi_madt_local_sapic *lsapic;
846 cpumask_t tmp_map; 839 cpumask_t tmp_map;
847 long physid; 840 long physid;
848 int cpu; 841 int cpu;
@@ -854,16 +847,16 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
854 return -EINVAL; 847 return -EINVAL;
855 848
856 obj = buffer.pointer; 849 obj = buffer.pointer;
857 if (obj->type != ACPI_TYPE_BUFFER || 850 if (obj->type != ACPI_TYPE_BUFFER)
858 obj->buffer.length < sizeof(*lsapic)) { 851 {
859 kfree(buffer.pointer); 852 kfree(buffer.pointer);
860 return -EINVAL; 853 return -EINVAL;
861 } 854 }
862 855
863 lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer; 856 lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
864 857
865 if ((lsapic->header.type != ACPI_MADT_LSAPIC) || 858 if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
866 (!lsapic->flags.enabled)) { 859 (!lsapic->lapic_flags & ACPI_MADT_ENABLED)) {
867 kfree(buffer.pointer); 860 kfree(buffer.pointer);
868 return -EINVAL; 861 return -EINVAL;
869 } 862 }
@@ -883,7 +876,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
883 876
884 cpu_set(cpu, cpu_present_map); 877 cpu_set(cpu, cpu_present_map);
885 ia64_cpu_to_sapicid[cpu] = physid; 878 ia64_cpu_to_sapicid[cpu] = physid;
886 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
887 879
888 *pcpu = cpu; 880 *pcpu = cpu;
889 return (0); 881 return (0);
@@ -893,14 +885,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
893 885
894int acpi_unmap_lsapic(int cpu) 886int acpi_unmap_lsapic(int cpu)
895{ 887{
896 int i;
897
898 for (i = 0; i < MAX_SAPICS; i++) {
899 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
900 ia64_acpiid_to_sapicid[i] = -1;
901 break;
902 }
903 }
904 ia64_cpu_to_sapicid[cpu] = -1; 888 ia64_cpu_to_sapicid[cpu] = -1;
905 cpu_clear(cpu, cpu_present_map); 889 cpu_clear(cpu, cpu_present_map);
906 890
@@ -920,7 +904,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
920{ 904{
921 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 905 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
922 union acpi_object *obj; 906 union acpi_object *obj;
923 struct acpi_table_iosapic *iosapic; 907 struct acpi_madt_io_sapic *iosapic;
924 unsigned int gsi_base; 908 unsigned int gsi_base;
925 int pxm, node; 909 int pxm, node;
926 910
@@ -938,9 +922,9 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
938 return AE_OK; 922 return AE_OK;
939 } 923 }
940 924
941 iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer; 925 iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
942 926
943 if (iosapic->header.type != ACPI_MADT_IOSAPIC) { 927 if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
944 kfree(buffer.pointer); 928 kfree(buffer.pointer);
945 return AE_OK; 929 return AE_OK;
946 } 930 }
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bc2f64d72244..9d92097ce96d 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -79,6 +79,7 @@ crash_save_this_cpu()
79 final_note(buf); 79 final_note(buf);
80} 80}
81 81
82#ifdef CONFIG_SMP
82static int 83static int
83kdump_wait_cpu_freeze(void) 84kdump_wait_cpu_freeze(void)
84{ 85{
@@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
91 } 92 }
92 return 1; 93 return 1;
93} 94}
95#endif
94 96
95void 97void
96machine_crash_shutdown(struct pt_regs *pt) 98machine_crash_shutdown(struct pt_regs *pt)
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
116static void 118static void
117machine_kdump_on_init(void) 119machine_kdump_on_init(void)
118{ 120{
121 if (!ia64_kimage) {
122 printk(KERN_NOTICE "machine_kdump_on_init(): "
123 "kdump not configured\n");
124 return;
125 }
119 local_irq_disable(); 126 local_irq_disable();
120 kexec_disable_iosapic(); 127 kexec_disable_iosapic();
121 machine_kexec(ia64_kimage); 128 machine_kexec(ia64_kimage);
@@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
132 atomic_inc(&kdump_cpu_freezed); 139 atomic_inc(&kdump_cpu_freezed);
133 kdump_status[cpuid] = 1; 140 kdump_status[cpuid] = 1;
134 mb(); 141 mb();
135 if (cpuid == 0) { 142#ifdef CONFIG_HOTPLUG_CPU
136 for (;;) 143 if (cpuid != 0)
137 cpu_relax();
138 } else
139 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]); 144 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
145#endif
146 for (;;)
147 cpu_relax();
140} 148}
141 149
142static int 150static int
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 83b8c91c1408..da60e90eeeb1 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -9,7 +9,8 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12#include <linux/uaccess.h> 12#include <asm/page.h>
13#include <asm/uaccess.h>
13 14
14/** 15/**
15 * copy_oldmem_page - copy one page from "oldmem" 16 * copy_oldmem_page - copy one page from "oldmem"
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0b25a7d4e1e4..6c03928544c2 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -380,7 +380,7 @@ efi_get_pal_addr (void)
380#endif 380#endif
381 return __va(md->phys_addr); 381 return __va(md->phys_addr);
382 } 382 }
383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found", 383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
384 __FUNCTION__); 384 __FUNCTION__);
385 return NULL; 385 return NULL;
386} 386}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 15234ed3a341..e7873eeae448 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1610,5 +1610,7 @@ sys_call_table:
1610 data8 sys_sync_file_range // 1300 1610 data8 sys_sync_file_range // 1300
1611 data8 sys_tee 1611 data8 sys_tee
1612 data8 sys_vmsplice 1612 data8 sys_vmsplice
1613 data8 sys_ni_syscall // reserved for move_pages
1614 data8 sys_getcpu
1613 1615
1614 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1616 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 0fc5fb7865cf..d6aab40c6416 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
925 /* Clear the interrupt controller descriptor */ 925 /* Clear the interrupt controller descriptor */
926 idesc->chip = &no_irq_type; 926 idesc->chip = &no_irq_type;
927 927
928#ifdef CONFIG_SMP
929 /* Clear affinity */
930 cpus_setall(idesc->affinity);
931#endif
932
928 /* Clear the interrupt information */ 933 /* Clear the interrupt information */
929 memset(&iosapic_intr_info[vector], 0, 934 memset(&iosapic_intr_info[vector], 0,
930 sizeof(struct iosapic_intr_info)); 935 sizeof(struct iosapic_intr_info));
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index e2ccc9f660c5..4f0f3b8c1ee2 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
14#include <linux/kexec.h> 14#include <linux/kexec.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/efi.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/delay.h> 20#include <asm/delay.h>
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
68{ 69{
69} 70}
70 71
71void machine_shutdown(void)
72{
73 int cpu;
74
75 for_each_online_cpu(cpu) {
76 if (cpu != smp_processor_id())
77 cpu_down(cpu);
78 }
79 kexec_disable_iosapic();
80}
81
82/* 72/*
83 * Do not allocate memory (or fail in any way) in machine_kexec(). 73 * Do not allocate memory (or fail in any way) in machine_kexec().
84 * We are past the point of no return, committed to rebooting now. 74 * We are past the point of no return, committed to rebooting now.
85 */ 75 */
86extern void *efi_get_pal_addr(void);
87static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) 76static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
88{ 77{
89 struct kimage *image = arg; 78 struct kimage *image = arg;
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
93 unsigned long vector; 82 unsigned long vector;
94 int ii; 83 int ii;
95 84
85 BUG_ON(!image);
96 if (image->type == KEXEC_TYPE_CRASH) { 86 if (image->type == KEXEC_TYPE_CRASH) {
97 crash_save_this_cpu(); 87 crash_save_this_cpu();
98 current->thread.ksp = (__u64)info->sw - 16; 88 current->thread.ksp = (__u64)info->sw - 16;
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
131 121
132void machine_kexec(struct kimage *image) 122void machine_kexec(struct kimage *image)
133{ 123{
124 BUG_ON(!image);
134 unw_init_running(ia64_machine_kexec, image); 125 unw_init_running(ia64_machine_kexec, image);
135 for(;;); 126 for(;;);
136} 127}
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 822e59a1b822..0d05450c91c4 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -64,12 +64,17 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
64} 64}
65#endif /* CONFIG_SMP */ 65#endif /* CONFIG_SMP */
66 66
67int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 67int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
68{ 68{
69 struct msi_msg msg; 69 struct msi_msg msg;
70 unsigned long dest_phys_id; 70 unsigned long dest_phys_id;
71 unsigned int vector; 71 unsigned int irq, vector;
72 72
73 irq = create_irq();
74 if (irq < 0)
75 return irq;
76
77 set_irq_msi(irq, desc);
73 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); 78 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
74 vector = irq; 79 vector = irq;
75 80
@@ -89,12 +94,12 @@ int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
89 write_msi_msg(irq, &msg); 94 write_msi_msg(irq, &msg);
90 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 95 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
91 96
92 return 0; 97 return irq;
93} 98}
94 99
95void ia64_teardown_msi_irq(unsigned int irq) 100void ia64_teardown_msi_irq(unsigned int irq)
96{ 101{
97 return; /* no-op */ 102 destroy_irq(irq);
98} 103}
99 104
100static void ia64_ack_msi_irq(unsigned int irq) 105static void ia64_ack_msi_irq(unsigned int irq)
@@ -126,12 +131,12 @@ static struct irq_chip ia64_msi_chip = {
126}; 131};
127 132
128 133
129int arch_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 134int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
130{ 135{
131 if (platform_setup_msi_irq) 136 if (platform_setup_msi_irq)
132 return platform_setup_msi_irq(irq, pdev); 137 return platform_setup_msi_irq(pdev, desc);
133 138
134 return ia64_setup_msi_irq(irq, pdev); 139 return ia64_setup_msi_irq(pdev, desc);
135} 140}
136 141
137void arch_teardown_msi_irq(unsigned int irq) 142void arch_teardown_msi_irq(unsigned int irq)
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 17685abaf496..ae96d4176995 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <asm/ia32.h> 34#include <asm/ia32.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/kdebug.h> 36#include <asm/kdebug.h>
37#include <asm/kexec.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39#include <asm/sal.h> 40#include <asm/sal.h>
@@ -803,6 +804,21 @@ cpu_halt (void)
803 ia64_pal_halt(min_power_state); 804 ia64_pal_halt(min_power_state);
804} 805}
805 806
807void machine_shutdown(void)
808{
809#ifdef CONFIG_HOTPLUG_CPU
810 int cpu;
811
812 for_each_online_cpu(cpu) {
813 if (cpu != smp_processor_id())
814 cpu_down(cpu);
815 }
816#endif
817#ifdef CONFIG_KEXEC
818 kexec_disable_iosapic();
819#endif
820}
821
806void 822void
807machine_restart (char *restart_cmd) 823machine_restart (char *restart_cmd)
808{ 824{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index aa705e46b974..3f8918782e0c 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
607 */ 607 */
608 list_for_each_safe(this, next, &current->children) { 608 list_for_each_safe(this, next, &current->children) {
609 p = list_entry(this, struct task_struct, sibling); 609 p = list_entry(this, struct task_struct, sibling);
610 if (p->mm != mm) 610 if (p->tgid != child->tgid)
611 continue; 611 continue;
612 if (thread_matches(p, addr)) { 612 if (thread_matches(p, addr)) {
613 child = p; 613 child = p;
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1406 1406
1407 /* make sure the single step/taken-branch trap bits are not set: */ 1407 /* make sure the single step/taken-branch trap bits are not set: */
1408 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1408 child_psr->ss = 0; 1409 child_psr->ss = 0;
1409 child_psr->tb = 0; 1410 child_psr->tb = 0;
1410} 1411}
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1525 * Make sure the single step/taken-branch trap bits 1526 * Make sure the single step/taken-branch trap bits
1526 * are not set: 1527 * are not set:
1527 */ 1528 */
1529 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1528 ia64_psr(pt)->ss = 0; 1530 ia64_psr(pt)->ss = 0;
1529 ia64_psr(pt)->tb = 0; 1531 ia64_psr(pt)->tb = 0;
1530 1532
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1556 goto out_tsk; 1558 goto out_tsk;
1557 1559
1558 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 1560 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1561 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1559 if (request == PTRACE_SINGLESTEP) { 1562 if (request == PTRACE_SINGLESTEP) {
1560 ia64_psr(pt)->ss = 1; 1563 ia64_psr(pt)->ss = 1;
1561 } else { 1564 } else {
@@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1595} 1598}
1596 1599
1597 1600
1598void 1601static void
1599syscall_trace (void) 1602syscall_trace (void)
1600{ 1603{
1601 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1602 return;
1603 if (!(current->ptrace & PT_PTRACED))
1604 return;
1605 /* 1604 /*
1606 * The 0x80 provides a way for the tracing parent to 1605 * The 0x80 provides a way for the tracing parent to
1607 * distinguish between a syscall stop and SIGTRAP delivery. 1606 * distinguish between a syscall stop and SIGTRAP delivery.
@@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1664 audit_syscall_exit(success, result); 1663 audit_syscall_exit(success, result);
1665 } 1664 }
1666 1665
1667 if (test_thread_flag(TIF_SYSCALL_TRACE) 1666 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1667 || test_thread_flag(TIF_SINGLESTEP))
1668 && (current->ptrace & PT_PTRACED)) 1668 && (current->ptrace & PT_PTRACED))
1669 syscall_trace(); 1669 syscall_trace();
1670} 1670}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad567b8d432e..83c2629e1c4c 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
569 { 1UL << 1, "spontaneous deferral"}, 569 { 1UL << 1, "spontaneous deferral"},
570 { 1UL << 2, "16-byte atomic ops" } 570 { 1UL << 2, "16-byte atomic ops" }
571 }; 571 };
572 char features[128], *cp, sep; 572 char features[128], *cp, *sep;
573 struct cpuinfo_ia64 *c = v; 573 struct cpuinfo_ia64 *c = v;
574 unsigned long mask; 574 unsigned long mask;
575 unsigned long proc_freq; 575 unsigned long proc_freq;
576 int i; 576 int i, size;
577 577
578 mask = c->features; 578 mask = c->features;
579 579
580 /* build the feature string: */ 580 /* build the feature string: */
581 memcpy(features, " standard", 10); 581 memcpy(features, "standard", 9);
582 cp = features; 582 cp = features;
583 sep = 0; 583 size = sizeof(features);
584 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 584 sep = "";
585 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
585 if (mask & feature_bits[i].mask) { 586 if (mask & feature_bits[i].mask) {
586 if (sep) 587 cp += snprintf(cp, size, "%s%s", sep,
587 *cp++ = sep; 588 feature_bits[i].feature_name),
588 sep = ','; 589 sep = ", ";
589 *cp++ = ' ';
590 strcpy(cp, feature_bits[i].feature_name);
591 cp += strlen(feature_bits[i].feature_name);
592 mask &= ~feature_bits[i].mask; 590 mask &= ~feature_bits[i].mask;
591 size = sizeof(features) - (cp - features);
593 } 592 }
594 } 593 }
595 if (mask) { 594 if (mask && size > 1) {
596 /* print unknown features as a hex value: */ 595 /* print unknown features as a hex value */
597 if (sep) 596 snprintf(cp, size, "%s0x%lx", sep, mask);
598 *cp++ = sep;
599 sprintf(cp, " 0x%lx", mask);
600 } 597 }
601 598
602 proc_freq = cpufreq_quick_get(cpunum); 599 proc_freq = cpufreq_quick_get(cpunum);
@@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
612 "model name : %s\n" 609 "model name : %s\n"
613 "revision : %u\n" 610 "revision : %u\n"
614 "archrev : %u\n" 611 "archrev : %u\n"
615 "features :%s\n" /* don't change this---it _is_ right! */ 612 "features : %s\n"
616 "cpu number : %lu\n" 613 "cpu number : %lu\n"
617 "cpu regs : %u\n" 614 "cpu regs : %u\n"
618 "cpu MHz : %lu.%06lu\n" 615 "cpu MHz : %lu.%06lu\n"
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d6083a0936f4..8f3d0066f446 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -157,6 +157,7 @@ SECTIONS
157 } 157 }
158#endif 158#endif
159 159
160 . = ALIGN(8);
160 __con_initcall_start = .; 161 __con_initcall_start = .;
161 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) 162 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
162 { *(.con_initcall.init) } 163 { *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b9..63e6d49c5813 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,47 +30,69 @@ static unsigned long max_gap;
30#endif 30#endif
31 31
32/** 32/**
33 * show_mem - display a memory statistics summary 33 * show_mem - give short summary of memory stats
34 * 34 *
35 * Just walks the pages in the system and describes where they're allocated. 35 * Shows a simple page count of reserved and used pages in the system.
36 * For discontig machines, it does this on a per-pgdat basis.
36 */ 37 */
37void 38void show_mem(void)
38show_mem (void)
39{ 39{
40 int i, total = 0, reserved = 0; 40 int i, total_reserved = 0;
41 int shared = 0, cached = 0; 41 int total_shared = 0, total_cached = 0;
42 unsigned long total_present = 0;
43 pg_data_t *pgdat;
42 44
43 printk(KERN_INFO "Mem-info:\n"); 45 printk(KERN_INFO "Mem-info:\n");
44 show_free_areas(); 46 show_free_areas();
45
46 printk(KERN_INFO "Free swap: %6ldkB\n", 47 printk(KERN_INFO "Free swap: %6ldkB\n",
47 nr_swap_pages<<(PAGE_SHIFT-10)); 48 nr_swap_pages<<(PAGE_SHIFT-10));
48 i = max_mapnr; 49 printk(KERN_INFO "Node memory in pages:\n");
49 for (i = 0; i < max_mapnr; i++) { 50 for_each_online_pgdat(pgdat) {
50 if (!pfn_valid(i)) { 51 unsigned long present;
52 unsigned long flags;
53 int shared = 0, cached = 0, reserved = 0;
54
55 pgdat_resize_lock(pgdat, &flags);
56 present = pgdat->node_present_pages;
57 for(i = 0; i < pgdat->node_spanned_pages; i++) {
58 struct page *page;
59 if (pfn_valid(pgdat->node_start_pfn + i))
60 page = pfn_to_page(pgdat->node_start_pfn + i);
61 else {
51#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP) 63 if (max_gap < LARGE_GAP)
53 continue; 64 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif 65#endif
56 continue; 66 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
67 i) - 1;
68 continue;
69 }
70 if (PageReserved(page))
71 reserved++;
72 else if (PageSwapCache(page))
73 cached++;
74 else if (page_count(page))
75 shared += page_count(page)-1;
57 } 76 }
58 total++; 77 pgdat_resize_unlock(pgdat, &flags);
59 if (PageReserved(mem_map+i)) 78 total_present += present;
60 reserved++; 79 total_reserved += reserved;
61 else if (PageSwapCache(mem_map+i)) 80 total_cached += cached;
62 cached++; 81 total_shared += shared;
63 else if (page_count(mem_map + i)) 82 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
64 shared += page_count(mem_map + i) - 1; 83 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
84 present, reserved, shared, cached);
65 } 85 }
66 printk(KERN_INFO "%d pages of RAM\n", total); 86 printk(KERN_INFO "%ld pages of RAM\n", total_present);
67 printk(KERN_INFO "%d reserved pages\n", reserved); 87 printk(KERN_INFO "%d reserved pages\n", total_reserved);
68 printk(KERN_INFO "%d pages shared\n", shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
69 printk(KERN_INFO "%d pages swap cached\n", cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
70 printk(KERN_INFO "%ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 pgtable_quicklist_total_size()); 91 pgtable_quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
72} 93}
73 94
95
74/* physical address where the bootmem map is located */ 96/* physical address where the bootmem map is located */
75unsigned long bootmap_start; 97unsigned long bootmap_start;
76 98
@@ -177,7 +199,7 @@ find_memory (void)
177 199
178#ifdef CONFIG_CRASH_DUMP 200#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem 201 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */ 202 * size before original memory map is reset. */
181 saved_max_pfn = max_pfn; 203 saved_max_pfn = max_pfn;
182#endif 204#endif
183} 205}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49d..6eae596c509d 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
412 return; 412 return;
413} 413}
414 414
415#ifdef CONFIG_SPARSEMEM
416/**
417 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
418 * @start: physical start of range
419 * @end: physical end of range
420 * @arg: unused
421 *
422 * Simply calls SPARSEMEM to register memory section(s).
423 */
424static int __init register_sparse_mem(unsigned long start, unsigned long end,
425 void *arg)
426{
427 int nid;
428
429 start = __pa(start) >> PAGE_SHIFT;
430 end = __pa(end) >> PAGE_SHIFT;
431 nid = early_pfn_to_nid(start);
432 memory_present(nid, start, end);
433
434 return 0;
435}
436
437static void __init arch_sparse_init(void)
438{
439 efi_memmap_walk(register_sparse_mem, NULL);
440 sparse_init();
441}
442#else
443#define arch_sparse_init() do {} while (0)
444#endif
445
446/** 415/**
447 * find_memory - walk the EFI memory map and setup the bootmem allocator 416 * find_memory - walk the EFI memory map and setup the bootmem allocator
448 * 417 *
@@ -473,6 +442,9 @@ void __init find_memory(void)
473 node_clear(node, memory_less_mask); 442 node_clear(node, memory_less_mask);
474 mem_data[node].min_pfn = ~0UL; 443 mem_data[node].min_pfn = ~0UL;
475 } 444 }
445
446 efi_memmap_walk(register_active_ranges, NULL);
447
476 /* 448 /*
477 * Initialize the boot memory maps in reverse order since that's 449 * Initialize the boot memory maps in reverse order since that's
478 * what the bootmem allocator expects 450 * what the bootmem allocator expects
@@ -506,6 +478,12 @@ void __init find_memory(void)
506 max_pfn = max_low_pfn; 478 max_pfn = max_low_pfn;
507 479
508 find_initrd(); 480 find_initrd();
481
482#ifdef CONFIG_CRASH_DUMP
483 /* If we are doing a crash dump, we still need to know the real mem
484 * size before original memory map is reset. */
485 saved_max_pfn = max_pfn;
486#endif
509} 487}
510 488
511#ifdef CONFIG_SMP 489#ifdef CONFIG_SMP
@@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
654{ 632{
655 unsigned long end = start + len; 633 unsigned long end = start + len;
656 634
657 add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
658 mem_data[node].num_physpages += len >> PAGE_SHIFT; 635 mem_data[node].num_physpages += len >> PAGE_SHIFT;
659 if (start <= __pa(MAX_DMA_ADDRESS)) 636 if (start <= __pa(MAX_DMA_ADDRESS))
660 mem_data[node].num_dma_physpages += 637 mem_data[node].num_dma_physpages +=
@@ -686,10 +663,11 @@ void __init paging_init(void)
686 663
687 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 664 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
688 665
689 arch_sparse_init();
690
691 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 666 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
692 667
668 sparse_memory_present_with_active_regions(MAX_NUMNODES);
669 sparse_init();
670
693#ifdef CONFIG_VIRTUAL_MEM_MAP 671#ifdef CONFIG_VIRTUAL_MEM_MAP
694 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 672 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
695 sizeof(struct page)); 673 sizeof(struct page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657f..faaca21a3718 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h>
22 23
23#include <asm/a.out.h> 24#include <asm/a.out.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129} 130}
130 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
131inline void 151inline void
132ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
133{ 153{
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
595 return 0; 615 return 0;
596} 616}
597 617
618#endif /* CONFIG_VIRTUAL_MEM_MAP */
619
598int __init 620int __init
599register_active_ranges(u64 start, u64 end, void *arg) 621register_active_ranges(u64 start, u64 end, void *arg)
600{ 622{
601 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 623 int nid = paddr_to_nid(__pa(start));
624
625 if (nid < 0)
626 nid = 0;
627#ifdef CONFIG_KEXEC
628 if (start > crashk_res.start && start < crashk_res.end)
629 start = crashk_res.end;
630 if (end > crashk_res.start && end < crashk_res.end)
631 end = crashk_res.start;
632#endif
633
634 if (start < end)
635 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
636 __pa(end) >> PAGE_SHIFT);
602 return 0; 637 return 0;
603} 638}
604#endif /* CONFIG_VIRTUAL_MEM_MAP */
605 639
606static int __init 640static int __init
607count_reserved_pages (u64 start, u64 end, void *arg) 641count_reserved_pages (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index abca6bd7962f..fcf7f93c4b61 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,12 +38,20 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
38 (u64) nasid, 0, 0, 0, 0, 0, 0); 38 (u64) nasid, 0, 0, 0, 0, 0, 0);
39 39
40 if ((int)ret_stuff.v0) 40 if ((int)ret_stuff.v0)
41 panic("hubii_eint_handler(): Fatal TIO Error"); 41 panic("%s: Fatal %s Error", __FUNCTION__,
42 ((nasid & 1) ? "TIO" : "HUBII"));
42 43
43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 44 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
44 (void)hubiio_crb_error_handler(hubdev_info); 45 (void)hubiio_crb_error_handler(hubdev_info);
45 } else 46 } else
46 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 47 if (nasid & 1) { /* TIO errors */
48 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
49 (u64) nasid, 0, 0, 0, 0, 0, 0);
50
51 if ((int)ret_stuff.v0)
52 panic("%s: Fatal TIO Error", __FUNCTION__);
53 } else
54 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
47 55
48 return IRQ_HANDLED; 56 return IRQ_HANDLED;
49} 57}
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index cb96b4ea7df6..8c331ca6e5c9 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -13,6 +13,7 @@
13#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <acpi/acnamesp.h>
16 17
17 18
18/* 19/*
@@ -31,6 +32,12 @@ struct acpi_vendor_uuid sn_uuid = {
31 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 }, 32 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
32}; 33};
33 34
35struct sn_pcidev_match {
36 u8 bus;
37 unsigned int devfn;
38 acpi_handle handle;
39};
40
34/* 41/*
35 * Perform the early IO init in PROM. 42 * Perform the early IO init in PROM.
36 */ 43 */
@@ -119,9 +126,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
119 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 126 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
120 &sn_uuid, &buffer); 127 &sn_uuid, &buffer);
121 if (ACPI_FAILURE(status)) { 128 if (ACPI_FAILURE(status)) {
122 printk(KERN_ERR "get_acpi_pcibus_ptr: " 129 printk(KERN_ERR "%s: "
123 "get_acpi_bussoft_info() failed: %d\n", 130 "acpi_get_vendor_resource() failed (0x%x) for: ",
124 status); 131 __FUNCTION__, status);
132 acpi_ns_print_node_pathname(handle, NULL);
133 printk("\n");
125 return NULL; 134 return NULL;
126 } 135 }
127 resource = buffer.pointer; 136 resource = buffer.pointer;
@@ -130,8 +139,8 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
130 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != 139 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
131 sizeof(struct pcibus_bussoft *)) { 140 sizeof(struct pcibus_bussoft *)) {
132 printk(KERN_ERR 141 printk(KERN_ERR
133 "get_acpi_bussoft_ptr: Invalid vendor data " 142 "%s: Invalid vendor data length %d\n",
134 "length %d\n", vendor->byte_length); 143 __FUNCTION__, vendor->byte_length);
135 kfree(buffer.pointer); 144 kfree(buffer.pointer);
136 return NULL; 145 return NULL;
137 } 146 }
@@ -143,34 +152,254 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
143} 152}
144 153
145/* 154/*
146 * sn_acpi_bus_fixup 155 * sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
156 * pointers from the vendor resource using the
157 * provided acpi handle, and copy the structures
158 * into the argument buffers.
147 */ 159 */
148void 160static int
149sn_acpi_bus_fixup(struct pci_bus *bus) 161sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
162 struct sn_irq_info **sn_irq_info)
150{ 163{
151 struct pci_dev *pci_dev = NULL; 164 u64 addr;
152 struct pcibus_bussoft *prom_bussoft_ptr; 165 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
153 extern void sn_common_bus_fixup(struct pci_bus *, 166 struct sn_irq_info *irq_info, *irq_info_prom;
154 struct pcibus_bussoft *); 167 struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
168 struct acpi_resource *resource;
169 int ret = 0;
170 acpi_status status;
171 struct acpi_resource_vendor_typed *vendor;
155 172
156 if (!bus->parent) { /* If root bus */ 173 /*
157 prom_bussoft_ptr = sn_get_bussoft_ptr(bus); 174 * The pointer to this device's pcidev_info structure in
158 if (prom_bussoft_ptr == NULL) { 175 * the PROM, is in the vendor resource.
176 */
177 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
178 &sn_uuid, &buffer);
179 if (ACPI_FAILURE(status)) {
180 printk(KERN_ERR
181 "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
182 __FUNCTION__, status);
183 acpi_ns_print_node_pathname(handle, NULL);
184 printk("\n");
185 return 1;
186 }
187
188 resource = buffer.pointer;
189 vendor = &resource->data.vendor_typed;
190 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
191 sizeof(struct pci_devdev_info *)) {
192 printk(KERN_ERR
193 "%s: Invalid vendor data length: %d for: ",
194 __FUNCTION__, vendor->byte_length);
195 acpi_ns_print_node_pathname(handle, NULL);
196 printk("\n");
197 ret = 1;
198 goto exit;
199 }
200
201 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
202 if (!pcidev_ptr)
203 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
204
205 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
206 pcidev_prom_ptr = __va(addr);
207 memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
208
209 /* Get the IRQ info */
210 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
211 if (!irq_info)
212 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
213
214 if (pcidev_ptr->pdi_sn_irq_info) {
215 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
216 memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
217 }
218
219 *pcidev_info = pcidev_ptr;
220 *sn_irq_info = irq_info;
221
222exit:
223 kfree(buffer.pointer);
224 return ret;
225}
226
227static unsigned int
228get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
229{
230 unsigned long adr;
231 acpi_handle child;
232 unsigned int devfn;
233 int function;
234 acpi_handle parent;
235 int slot;
236 acpi_status status;
237
238 /*
239 * Do an upward search to find the root bus device, and
240 * obtain the host devfn from the previous child device.
241 */
242 child = device_handle;
243 while (child) {
244 status = acpi_get_parent(child, &parent);
245 if (ACPI_FAILURE(status)) {
246 printk(KERN_ERR "%s: acpi_get_parent() failed "
247 "(0x%x) for: ", __FUNCTION__, status);
248 acpi_ns_print_node_pathname(child, NULL);
249 printk("\n");
250 panic("%s: Unable to find host devfn\n", __FUNCTION__);
251 }
252 if (parent == rootbus_handle)
253 break;
254 child = parent;
255 }
256 if (!child) {
257 printk(KERN_ERR "%s: Unable to find root bus for: ",
258 __FUNCTION__);
259 acpi_ns_print_node_pathname(device_handle, NULL);
260 printk("\n");
261 BUG();
262 }
263
264 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
265 if (ACPI_FAILURE(status)) {
266 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
267 __FUNCTION__, status);
268 acpi_ns_print_node_pathname(child, NULL);
269 printk("\n");
270 panic("%s: Unable to find host devfn\n", __FUNCTION__);
271 }
272
273 slot = (adr >> 16) & 0xffff;
274 function = adr & 0xffff;
275 devfn = PCI_DEVFN(slot, function);
276 return devfn;
277}
278
279/*
280 * find_matching_device - Callback routine to find the ACPI device
281 * that matches up with our pci_dev device.
282 * Matching is done on bus number and devfn.
283 * To find the bus number for a particular
284 * ACPI device, we must look at the _BBN method
285 * of its parent.
286 */
287static acpi_status
288find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
289{
290 unsigned long bbn = -1;
291 unsigned long adr;
292 acpi_handle parent = NULL;
293 acpi_status status;
294 unsigned int devfn;
295 int function;
296 int slot;
297 struct sn_pcidev_match *info = context;
298
299 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
300 &adr);
301 if (ACPI_SUCCESS(status)) {
302 status = acpi_get_parent(handle, &parent);
303 if (ACPI_FAILURE(status)) {
159 printk(KERN_ERR 304 printk(KERN_ERR
160 "sn_pci_fixup_bus: 0x%04x:0x%02x Unable to " 305 "%s: acpi_get_parent() failed (0x%x) for: ",
161 "obtain prom_bussoft_ptr\n", 306 __FUNCTION__, status);
162 pci_domain_nr(bus), bus->number); 307 acpi_ns_print_node_pathname(handle, NULL);
163 return; 308 printk("\n");
309 return AE_OK;
310 }
311 status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
312 NULL, &bbn);
313 if (ACPI_FAILURE(status)) {
314 printk(KERN_ERR
315 "%s: Failed to find _BBN in parent of: ",
316 __FUNCTION__);
317 acpi_ns_print_node_pathname(handle, NULL);
318 printk("\n");
319 return AE_OK;
320 }
321
322 slot = (adr >> 16) & 0xffff;
323 function = adr & 0xffff;
324 devfn = PCI_DEVFN(slot, function);
325 if ((info->devfn == devfn) && (info->bus == bbn)) {
326 /* We have a match! */
327 info->handle = handle;
328 return 1;
164 } 329 }
165 sn_common_bus_fixup(bus, prom_bussoft_ptr);
166 } 330 }
167 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 331 return AE_OK;
168 sn_pci_fixup_slot(pci_dev); 332}
333
334/*
335 * sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
336 * device matching the specified pci_dev,
337 * and return the pcidev info and irq info.
338 */
339int
340sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
341 struct sn_irq_info **sn_irq_info)
342{
343 unsigned int host_devfn;
344 struct sn_pcidev_match pcidev_match;
345 acpi_handle rootbus_handle;
346 unsigned long segment;
347 acpi_status status;
348
349 rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
350 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
351 &segment);
352 if (ACPI_SUCCESS(status)) {
353 if (segment != pci_domain_nr(dev)) {
354 printk(KERN_ERR
355 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
356 __FUNCTION__, segment, pci_domain_nr(dev));
357 acpi_ns_print_node_pathname(rootbus_handle, NULL);
358 printk("\n");
359 return 1;
360 }
361 } else {
362 printk(KERN_ERR "%s: Unable to get __SEG from: ",
363 __FUNCTION__);
364 acpi_ns_print_node_pathname(rootbus_handle, NULL);
365 printk("\n");
366 return 1;
367 }
368
369 /*
370 * We want to search all devices in this segment/domain
371 * of the ACPI namespace for the matching ACPI device,
372 * which holds the pcidev_info pointer in its vendor resource.
373 */
374 pcidev_match.bus = dev->bus->number;
375 pcidev_match.devfn = dev->devfn;
376 pcidev_match.handle = NULL;
377
378 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
379 find_matching_device, &pcidev_match, NULL);
380
381 if (!pcidev_match.handle) {
382 printk(KERN_ERR
383 "%s: Could not find matching ACPI device for %s.\n",
384 __FUNCTION__, pci_name(dev));
385 return 1;
169 } 386 }
387
388 if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
389 return 1;
390
391 /* Build up the pcidev_info.pdi_slot_host_handle */
392 host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
393 (*pcidev_info)->pdi_slot_host_handle =
394 ((unsigned long) pci_domain_nr(dev) << 40) |
395 /* bus == 0 */
396 host_devfn;
397 return 0;
170} 398}
171 399
172/* 400/*
173 * sn_acpi_slot_fixup - Perform any SN specific slot fixup. 401 * sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
402 * Perform any SN specific slot fixup.
174 * At present there does not appear to be 403 * At present there does not appear to be
175 * any generic way to handle a ROM image 404 * any generic way to handle a ROM image
176 * that has been shadowed by the PROM, so 405 * that has been shadowed by the PROM, so
@@ -179,11 +408,18 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
179 */ 408 */
180 409
181void 410void
182sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 411sn_acpi_slot_fixup(struct pci_dev *dev)
183{ 412{
184 void __iomem *addr; 413 void __iomem *addr;
414 struct pcidev_info *pcidev_info = NULL;
415 struct sn_irq_info *sn_irq_info = NULL;
185 size_t size; 416 size_t size;
186 417
418 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
419 panic("%s: Failure obtaining pcidev_info for %s\n",
420 __FUNCTION__, pci_name(dev));
421 }
422
187 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { 423 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
188 /* 424 /*
189 * A valid ROM image exists and has been shadowed by the 425 * A valid ROM image exists and has been shadowed by the
@@ -200,8 +436,11 @@ sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
200 (unsigned long) addr + size; 436 (unsigned long) addr + size;
201 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY; 437 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
202 } 438 }
439 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
203} 440}
204 441
442EXPORT_SYMBOL(sn_acpi_slot_fixup);
443
205static struct acpi_driver acpi_sn_hubdev_driver = { 444static struct acpi_driver acpi_sn_hubdev_driver = {
206 .name = "SGI HUBDEV Driver", 445 .name = "SGI HUBDEV Driver",
207 .ids = "SGIHUB,SGITIO", 446 .ids = "SGIHUB,SGITIO",
@@ -212,6 +451,33 @@ static struct acpi_driver acpi_sn_hubdev_driver = {
212 451
213 452
214/* 453/*
454 * sn_acpi_bus_fixup - Perform SN specific setup of software structs
455 * (pcibus_bussoft, pcidev_info) and hardware
456 * registers, for the specified bus and devices under it.
457 */
458void
459sn_acpi_bus_fixup(struct pci_bus *bus)
460{
461 struct pci_dev *pci_dev = NULL;
462 struct pcibus_bussoft *prom_bussoft_ptr;
463
464 if (!bus->parent) { /* If root bus */
465 prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
466 if (prom_bussoft_ptr == NULL) {
467 printk(KERN_ERR
468 "%s: 0x%04x:0x%02x Unable to "
469 "obtain prom_bussoft_ptr\n",
470 __FUNCTION__, pci_domain_nr(bus), bus->number);
471 return;
472 }
473 sn_common_bus_fixup(bus, prom_bussoft_ptr);
474 }
475 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
476 sn_acpi_slot_fixup(pci_dev);
477 }
478}
479
480/*
215 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the 481 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
216 * nodes and root buses in the DSDT. As a result, bus scanning 482 * nodes and root buses in the DSDT. As a result, bus scanning
217 * will be initiated by the Linux ACPI code. 483 * will be initiated by the Linux ACPI code.
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index d4dd8f4b6b8d..d48bcd83253c 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -26,14 +26,10 @@
26#include <linux/acpi.h> 26#include <linux/acpi.h>
27#include <asm/sn/sn2/sn_hwperf.h> 27#include <asm/sn/sn2/sn_hwperf.h>
28#include <asm/sn/acpi.h> 28#include <asm/sn/acpi.h>
29#include "acpi/acglobal.h"
29 30
30extern void sn_init_cpei_timer(void); 31extern void sn_init_cpei_timer(void);
31extern void register_sn_procfs(void); 32extern void register_sn_procfs(void);
32extern void sn_acpi_bus_fixup(struct pci_bus *);
33extern void sn_bus_fixup(struct pci_bus *);
34extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
35extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
36extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
37extern void sn_io_acpi_init(void); 33extern void sn_io_acpi_init(void);
38extern void sn_io_init(void); 34extern void sn_io_init(void);
39 35
@@ -48,6 +44,9 @@ struct sysdata_el {
48 44
49int sn_ioif_inited; /* SN I/O infrastructure initialized? */ 45int sn_ioif_inited; /* SN I/O infrastructure initialized? */
50 46
47int sn_acpi_rev; /* SN ACPI revision */
48EXPORT_SYMBOL_GPL(sn_acpi_rev);
49
51struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ 50struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
52 51
53/* 52/*
@@ -99,25 +98,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
99} 98}
100 99
101/* 100/*
102 * Retrieve the pci device information given the bus and device|function number.
103 */
104static inline u64
105sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
106 u64 sn_irq_info)
107{
108 struct ia64_sal_retval ret_stuff;
109 ret_stuff.status = 0;
110 ret_stuff.v0 = 0;
111
112 SAL_CALL_NOLOCK(ret_stuff,
113 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
114 (u64) segment, (u64) bus_number, (u64) devfn,
115 (u64) pci_dev,
116 sn_irq_info, 0, 0);
117 return ret_stuff.v0;
118}
119
120/*
121 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified 101 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
122 * device. 102 * device.
123 */ 103 */
@@ -249,50 +229,25 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
249} 229}
250 230
251/* 231/*
252 * sn_pci_fixup_slot() - This routine sets up a slot's resources consistent 232 * sn_pci_fixup_slot()
253 * with the Linux PCI abstraction layer. Resources
254 * acquired from our PCI provider include PIO maps
255 * to BAR space and interrupt objects.
256 */ 233 */
257void sn_pci_fixup_slot(struct pci_dev *dev) 234void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
235 struct sn_irq_info *sn_irq_info)
258{ 236{
259 int segment = pci_domain_nr(dev->bus); 237 int segment = pci_domain_nr(dev->bus);
260 int status = 0;
261 struct pcibus_bussoft *bs; 238 struct pcibus_bussoft *bs;
262 struct pci_bus *host_pci_bus; 239 struct pci_bus *host_pci_bus;
263 struct pci_dev *host_pci_dev; 240 struct pci_dev *host_pci_dev;
264 struct pcidev_info *pcidev_info; 241 unsigned int bus_no, devfn;
265 struct sn_irq_info *sn_irq_info;
266 unsigned int bus_no, devfn;
267 242
268 pci_dev_get(dev); /* for the sysdata pointer */ 243 pci_dev_get(dev); /* for the sysdata pointer */
269 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
270 if (!pcidev_info)
271 BUG(); /* Cannot afford to run out of memory */
272
273 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
274 if (!sn_irq_info)
275 BUG(); /* Cannot afford to run out of memory */
276
277 /* Call to retrieve pci device information needed by kernel. */
278 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
279 dev->devfn,
280 (u64) __pa(pcidev_info),
281 (u64) __pa(sn_irq_info));
282 if (status)
283 BUG(); /* Cannot get platform pci device information */
284 244
285 /* Add pcidev_info to list in pci_controller.platform_data */ 245 /* Add pcidev_info to list in pci_controller.platform_data */
286 list_add_tail(&pcidev_info->pdi_list, 246 list_add_tail(&pcidev_info->pdi_list,
287 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info)); 247 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
288
289 if (SN_ACPI_BASE_SUPPORT())
290 sn_acpi_slot_fixup(dev, pcidev_info);
291 else
292 sn_more_slot_fixup(dev, pcidev_info);
293 /* 248 /*
294 * Using the PROMs values for the PCI host bus, get the Linux 249 * Using the PROMs values for the PCI host bus, get the Linux
295 * PCI host_pci_dev struct and set up host bus linkages 250 * PCI host_pci_dev struct and set up host bus linkages
296 */ 251 */
297 252
298 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff; 253 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
@@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
489 sprintf(address, "%s^%d", address, geo_slot(geoid)); 444 sprintf(address, "%s^%d", address, geo_slot(geoid));
490} 445}
491 446
492/*
493 * sn_pci_fixup_bus() - Perform SN specific setup of software structs
494 * (pcibus_bussoft, pcidev_info) and hardware
495 * registers, for the specified bus and devices under it.
496 */
497void __devinit 447void __devinit
498sn_pci_fixup_bus(struct pci_bus *bus) 448sn_pci_fixup_bus(struct pci_bus *bus)
499{ 449{
@@ -519,6 +469,15 @@ sn_io_early_init(void)
519 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) 469 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
520 return 0; 470 return 0;
521 471
472 /* we set the acpi revision to that of the DSDT table OEM rev. */
473 {
474 struct acpi_table_header *header = NULL;
475
476 acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
477 BUG_ON(header == NULL);
478 sn_acpi_rev = header->oem_revision;
479 }
480
522 /* 481 /*
523 * prime sn_pci_provider[]. Individial provider init routines will 482 * prime sn_pci_provider[]. Individial provider init routines will
524 * override their respective default entries. 483 * override their respective default entries.
@@ -544,8 +503,12 @@ sn_io_early_init(void)
544 register_sn_procfs(); 503 register_sn_procfs();
545#endif 504#endif
546 505
547 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", 506 {
548 acpi_gbl_DSDT->oem_revision); 507 struct acpi_table_header *header;
508 (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
509 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
510 header->oem_revision);
511 }
549 if (SN_ACPI_BASE_SUPPORT()) 512 if (SN_ACPI_BASE_SUPPORT())
550 sn_io_acpi_init(); 513 sn_io_acpi_init();
551 else 514 else
@@ -605,7 +568,6 @@ sn_io_late_init(void)
605 568
606fs_initcall(sn_io_late_init); 569fs_initcall(sn_io_late_init);
607 570
608EXPORT_SYMBOL(sn_pci_fixup_slot);
609EXPORT_SYMBOL(sn_pci_unfixup_slot); 571EXPORT_SYMBOL(sn_pci_unfixup_slot);
610EXPORT_SYMBOL(sn_bus_store_sysdata); 572EXPORT_SYMBOL(sn_bus_store_sysdata);
611EXPORT_SYMBOL(sn_bus_free_sysdata); 573EXPORT_SYMBOL(sn_bus_free_sysdata);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 9ad843e0383b..600be3ebae05 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
56 return ret_stuff.v0; 56 return ret_stuff.v0;
57} 57}
58 58
59/*
60 * Retrieve the pci device information given the bus and device|function number.
61 */
62static inline u64
63sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
64 u64 sn_irq_info)
65{
66 struct ia64_sal_retval ret_stuff;
67 ret_stuff.status = 0;
68 ret_stuff.v0 = 0;
69
70 SAL_CALL_NOLOCK(ret_stuff,
71 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
72 (u64) segment, (u64) bus_number, (u64) devfn,
73 (u64) pci_dev,
74 sn_irq_info, 0, 0);
75 return ret_stuff.v0;
76}
77
59 78
60/* 79/*
61 * sn_fixup_ionodes() - This routine initializes the HUB data structure for 80 * sn_fixup_ionodes() - This routine initializes the HUB data structure for
@@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
172} 191}
173 192
174/* 193/*
175 * sn_more_slot_fixup() - We are not running with an ACPI capable PROM, 194 * sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
176 * and need to convert the pci_dev->resource 195 * and need to convert the pci_dev->resource
177 * 'start' and 'end' addresses to mapped addresses, 196 * 'start' and 'end' addresses to mapped addresses,
178 * and setup the pci_controller->window array entries. 197 * and setup the pci_controller->window array entries.
179 */ 198 */
180void 199void
181sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 200sn_io_slot_fixup(struct pci_dev *dev)
182{ 201{
183 unsigned int count = 0; 202 unsigned int count = 0;
184 int idx; 203 int idx;
185 s64 pci_addrs[PCI_ROM_RESOURCE + 1]; 204 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
186 unsigned long addr, end, size, start; 205 unsigned long addr, end, size, start;
206 struct pcidev_info *pcidev_info;
207 struct sn_irq_info *sn_irq_info;
208 int status;
209
210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
211 if (!pcidev_info)
212 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
213
214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
215 if (!sn_irq_info)
216 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
217
218 /* Call to retrieve pci device information needed by kernel. */
219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
220 (u64) dev->bus->number,
221 dev->devfn,
222 (u64) __pa(pcidev_info),
223 (u64) __pa(sn_irq_info));
224
225 if (status)
226 BUG(); /* Cannot get platform pci device information */
227
187 228
188 /* Copy over PIO Mapped Addresses */ 229 /* Copy over PIO Mapped Addresses */
189 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 230 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
@@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
219 */ 260 */
220 if (count > 0) 261 if (count > 0)
221 sn_pci_window_fixup(dev, count, pci_addrs); 262 sn_pci_window_fixup(dev, count, pci_addrs);
263
264 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
222} 265}
223 266
267EXPORT_SYMBOL(sn_io_slot_fixup);
268
224/* 269/*
225 * sn_pci_controller_fixup() - This routine sets up a bus's resources 270 * sn_pci_controller_fixup() - This routine sets up a bus's resources
226 * consistent with the Linux PCI abstraction layer. 271 * consistent with the Linux PCI abstraction layer.
@@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
272{ 317{
273 struct pci_dev *pci_dev = NULL; 318 struct pci_dev *pci_dev = NULL;
274 struct pcibus_bussoft *prom_bussoft_ptr; 319 struct pcibus_bussoft *prom_bussoft_ptr;
275 extern void sn_common_bus_fixup(struct pci_bus *,
276 struct pcibus_bussoft *);
277
278 320
279 if (!bus->parent) { /* If root bus */ 321 if (!bus->parent) { /* If root bus */
280 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data; 322 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
@@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
291 prom_bussoft_ptr->bs_legacy_mem); 333 prom_bussoft_ptr->bs_legacy_mem);
292 } 334 }
293 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 335 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
294 sn_pci_fixup_slot(pci_dev); 336 sn_io_slot_fixup(pci_dev);
295 } 337 }
296 338
297} 339}
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 4aa4f301d56d..ab7e2fd40798 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
@@ -26,9 +26,10 @@
26 * @port: port to convert 26 * @port: port to convert
27 * 27 *
28 * Legacy in/out instructions are converted to ld/st instructions 28 * Legacy in/out instructions are converted to ld/st instructions
29 * on IA64. This routine will convert a port number into a valid 29 * on IA64. This routine will convert a port number into a valid
30 * SN i/o address. Used by sn_in*() and sn_out*(). 30 * SN i/o address. Used by sn_in*() and sn_out*().
31 */ 31 */
32
32void *sn_io_addr(unsigned long port) 33void *sn_io_addr(unsigned long port)
33{ 34{
34 if (!IS_RUNNING_ON_SIMULATOR()) { 35 if (!IS_RUNNING_ON_SIMULATOR()) {
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index b3a435fd70fb..ea3dc38d73fd 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -59,13 +59,12 @@ void sn_teardown_msi_irq(unsigned int irq)
59 sn_intr_free(nasid, widget, sn_irq_info); 59 sn_intr_free(nasid, widget, sn_irq_info);
60 sn_msi_info[irq].sn_irq_info = NULL; 60 sn_msi_info[irq].sn_irq_info = NULL;
61 61
62 return; 62 destroy_irq(irq);
63} 63}
64 64
65int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 65int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
66{ 66{
67 struct msi_msg msg; 67 struct msi_msg msg;
68 struct msi_desc *entry;
69 int widget; 68 int widget;
70 int status; 69 int status;
71 nasid_t nasid; 70 nasid_t nasid;
@@ -73,8 +72,8 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
73 struct sn_irq_info *sn_irq_info; 72 struct sn_irq_info *sn_irq_info;
74 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); 73 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
75 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 74 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
75 int irq;
76 76
77 entry = get_irq_data(irq);
78 if (!entry->msi_attrib.is_64) 77 if (!entry->msi_attrib.is_64)
79 return -EINVAL; 78 return -EINVAL;
80 79
@@ -84,6 +83,11 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
84 if (provider == NULL || provider->dma_map_consistent == NULL) 83 if (provider == NULL || provider->dma_map_consistent == NULL)
85 return -EINVAL; 84 return -EINVAL;
86 85
86 irq = create_irq();
87 if (irq < 0)
88 return irq;
89
90 set_irq_msi(irq, entry);
87 /* 91 /*
88 * Set up the vector plumbing. Let the prom (via sn_intr_alloc) 92 * Set up the vector plumbing. Let the prom (via sn_intr_alloc)
89 * decide which cpu to direct this msi at by default. 93 * decide which cpu to direct this msi at by default.
@@ -95,12 +99,15 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
95 SWIN_WIDGETNUM(bussoft->bs_base); 99 SWIN_WIDGETNUM(bussoft->bs_base);
96 100
97 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 101 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
98 if (! sn_irq_info) 102 if (! sn_irq_info) {
103 destroy_irq(irq);
99 return -ENOMEM; 104 return -ENOMEM;
105 }
100 106
101 status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1); 107 status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
102 if (status) { 108 if (status) {
103 kfree(sn_irq_info); 109 kfree(sn_irq_info);
110 destroy_irq(irq);
104 return -ENOMEM; 111 return -ENOMEM;
105 } 112 }
106 113
@@ -121,6 +128,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
121 if (! bus_addr) { 128 if (! bus_addr) {
122 sn_intr_free(nasid, widget, sn_irq_info); 129 sn_intr_free(nasid, widget, sn_irq_info);
123 kfree(sn_irq_info); 130 kfree(sn_irq_info);
131 destroy_irq(irq);
124 return -ENOMEM; 132 return -ENOMEM;
125 } 133 }
126 134
@@ -139,7 +147,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
139 write_msi_msg(irq, &msg); 147 write_msi_msg(irq, &msg);
140 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 148 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
141 149
142 return 0; 150 return irq;
143} 151}
144 152
145#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 6846dc9b432d..04a8256017eb 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -20,7 +20,8 @@
20#include "xtalk/hubdev.h" 20#include "xtalk/hubdev.h"
21 21
22int 22int
23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) 23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
24 char **ssdt)
24{ 25{
25 struct ia64_sal_retval ret_stuff; 26 struct ia64_sal_retval ret_stuff;
26 u64 busnum; 27 u64 busnum;
@@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
32 segment = soft->pbi_buscommon.bs_persist_segment; 33 segment = soft->pbi_buscommon.bs_persist_segment;
33 busnum = soft->pbi_buscommon.bs_persist_busnum; 34 busnum = soft->pbi_buscommon.bs_persist_busnum;
34 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, 35 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
35 busnum, (u64) device, (u64) resp, 0, 0, 0); 36 busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
37 0, 0);
36 38
37 return (int)ret_stuff.v0; 39 return (int)ret_stuff.v0;
38} 40}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bbd386f572d9..44a0224c32dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -575,6 +575,7 @@ config SGI_IP27
575 select DMA_IP27 575 select DMA_IP27
576 select EARLY_PRINTK 576 select EARLY_PRINTK
577 select HW_HAS_PCI 577 select HW_HAS_PCI
578 select NR_CPUS_DEFAULT_64
578 select PCI_DOMAINS 579 select PCI_DOMAINS
579 select SYS_HAS_CPU_R10000 580 select SYS_HAS_CPU_R10000
580 select SYS_SUPPORTS_64BIT_KERNEL 581 select SYS_SUPPORTS_64BIT_KERNEL
@@ -612,6 +613,7 @@ config SIBYTE_BIGSUR
612 bool "Sibyte BCM91480B-BigSur" 613 bool "Sibyte BCM91480B-BigSur"
613 select BOOT_ELF32 614 select BOOT_ELF32
614 select DMA_COHERENT 615 select DMA_COHERENT
616 select NR_CPUS_DEFAULT_4
615 select PCI_DOMAINS 617 select PCI_DOMAINS
616 select SIBYTE_BCM1x80 618 select SIBYTE_BCM1x80
617 select SWAP_IO_SPACE 619 select SWAP_IO_SPACE
@@ -623,6 +625,7 @@ config SIBYTE_SWARM
623 bool "Sibyte BCM91250A-SWARM" 625 bool "Sibyte BCM91250A-SWARM"
624 select BOOT_ELF32 626 select BOOT_ELF32
625 select DMA_COHERENT 627 select DMA_COHERENT
628 select NR_CPUS_DEFAULT_2
626 select SIBYTE_SB1250 629 select SIBYTE_SB1250
627 select SWAP_IO_SPACE 630 select SWAP_IO_SPACE
628 select SYS_HAS_CPU_SB1 631 select SYS_HAS_CPU_SB1
@@ -635,6 +638,7 @@ config SIBYTE_SENTOSA
635 depends on EXPERIMENTAL 638 depends on EXPERIMENTAL
636 select BOOT_ELF32 639 select BOOT_ELF32
637 select DMA_COHERENT 640 select DMA_COHERENT
641 select NR_CPUS_DEFAULT_2
638 select SIBYTE_SB1250 642 select SIBYTE_SB1250
639 select SWAP_IO_SPACE 643 select SWAP_IO_SPACE
640 select SYS_HAS_CPU_SB1 644 select SYS_HAS_CPU_SB1
@@ -668,6 +672,7 @@ config SIBYTE_PTSWARM
668 depends on EXPERIMENTAL 672 depends on EXPERIMENTAL
669 select BOOT_ELF32 673 select BOOT_ELF32
670 select DMA_COHERENT 674 select DMA_COHERENT
675 select NR_CPUS_DEFAULT_2
671 select SIBYTE_SB1250 676 select SIBYTE_SB1250
672 select SWAP_IO_SPACE 677 select SWAP_IO_SPACE
673 select SYS_HAS_CPU_SB1 678 select SYS_HAS_CPU_SB1
@@ -680,6 +685,7 @@ config SIBYTE_LITTLESUR
680 depends on EXPERIMENTAL 685 depends on EXPERIMENTAL
681 select BOOT_ELF32 686 select BOOT_ELF32
682 select DMA_COHERENT 687 select DMA_COHERENT
688 select NR_CPUS_DEFAULT_2
683 select SIBYTE_SB1250 689 select SIBYTE_SB1250
684 select SWAP_IO_SPACE 690 select SWAP_IO_SPACE
685 select SYS_HAS_CPU_SB1 691 select SYS_HAS_CPU_SB1
@@ -790,23 +796,6 @@ config TOSHIBA_RBTX4938
790 796
791endchoice 797endchoice
792 798
793config KEXEC
794 bool "Kexec system call (EXPERIMENTAL)"
795 depends on EXPERIMENTAL
796 help
797 kexec is a system call that implements the ability to shutdown your
798 current kernel, and to start another kernel. It is like a reboot
799 but it is indepedent of the system firmware. And like a reboot
800 you can start any kernel with it, not just Linux.
801
802 The name comes from the similiarity to the exec system call.
803
804 It is an ongoing process to be certain the hardware in a machine
805 is properly shutdown, so do not be surprised if this code does not
806 initially work for you. It may help to enable device hotplugging
807 support. As of this writing the exact hardware interface is
808 strongly in flux, so no good recommendation can be made.
809
810source "arch/mips/ddb5xxx/Kconfig" 799source "arch/mips/ddb5xxx/Kconfig"
811source "arch/mips/gt64120/ev64120/Kconfig" 800source "arch/mips/gt64120/ev64120/Kconfig"
812source "arch/mips/jazz/Kconfig" 801source "arch/mips/jazz/Kconfig"
@@ -1541,6 +1530,8 @@ config MIPS_MT_SMTC
1541 select CPU_MIPSR2_IRQ_VI 1530 select CPU_MIPSR2_IRQ_VI
1542 select CPU_MIPSR2_SRS 1531 select CPU_MIPSR2_SRS
1543 select MIPS_MT 1532 select MIPS_MT
1533 select NR_CPUS_DEFAULT_2
1534 select NR_CPUS_DEFAULT_8
1544 select SMP 1535 select SMP
1545 select SYS_SUPPORTS_SMP 1536 select SYS_SUPPORTS_SMP
1546 help 1537 help
@@ -1756,13 +1747,34 @@ config SMP
1756config SYS_SUPPORTS_SMP 1747config SYS_SUPPORTS_SMP
1757 bool 1748 bool
1758 1749
1750config NR_CPUS_DEFAULT_2
1751 bool
1752
1753config NR_CPUS_DEFAULT_4
1754 bool
1755
1756config NR_CPUS_DEFAULT_8
1757 bool
1758
1759config NR_CPUS_DEFAULT_16
1760 bool
1761
1762config NR_CPUS_DEFAULT_32
1763 bool
1764
1765config NR_CPUS_DEFAULT_64
1766 bool
1767
1759config NR_CPUS 1768config NR_CPUS
1760 int "Maximum number of CPUs (2-64)" 1769 int "Maximum number of CPUs (2-64)"
1761 range 2 64 1770 range 2 64
1762 depends on SMP 1771 depends on SMP
1763 default "64" if SGI_IP27 1772 default "2" if NR_CPUS_DEFAULT_2
1764 default "2" 1773 default "4" if NR_CPUS_DEFAULT_4
1765 default "8" if MIPS_MT_SMTC 1774 default "8" if NR_CPUS_DEFAULT_8
1775 default "16" if NR_CPUS_DEFAULT_16
1776 default "32" if NR_CPUS_DEFAULT_32
1777 default "64" if NR_CPUS_DEFAULT_64
1766 help 1778 help
1767 This allows you to specify the maximum number of CPUs which this 1779 This allows you to specify the maximum number of CPUs which this
1768 kernel will support. The maximum supported value is 32 for 32-bit 1780 kernel will support. The maximum supported value is 32 for 32-bit
@@ -1859,6 +1871,40 @@ config MIPS_INSANE_LARGE
1859 This will result in additional memory usage, so it is not 1871 This will result in additional memory usage, so it is not
1860 recommended for normal users. 1872 recommended for normal users.
1861 1873
1874config KEXEC
1875 bool "Kexec system call (EXPERIMENTAL)"
1876 depends on EXPERIMENTAL
1877 help
1878 kexec is a system call that implements the ability to shutdown your
1879 current kernel, and to start another kernel. It is like a reboot
1880 but it is indepedent of the system firmware. And like a reboot
1881 you can start any kernel with it, not just Linux.
1882
1883 The name comes from the similiarity to the exec system call.
1884
1885 It is an ongoing process to be certain the hardware in a machine
1886 is properly shutdown, so do not be surprised if this code does not
1887 initially work for you. It may help to enable device hotplugging
1888 support. As of this writing the exact hardware interface is
1889 strongly in flux, so no good recommendation can be made.
1890
1891config SECCOMP
1892 bool "Enable seccomp to safely compute untrusted bytecode"
1893 depends on PROC_FS && BROKEN
1894 default y
1895 help
1896 This kernel feature is useful for number crunching applications
1897 that may need to compute untrusted bytecode during their
1898 execution. By using pipes or other transports made available to
1899 the process as file descriptors supporting the read/write
1900 syscalls, it's possible to isolate those applications in
1901 their own address space using seccomp. Once seccomp is
1902 enabled via /proc/<pid>/seccomp, it cannot be disabled
1903 and the task is only allowed to execute a few safe syscalls
1904 defined by each seccomp mode.
1905
1906 If unsure, say Y. Only embedded should say N here.
1907
1862endmenu 1908endmenu
1863 1909
1864config RWSEM_GENERIC_SPINLOCK 1910config RWSEM_GENERIC_SPINLOCK
@@ -2025,23 +2071,6 @@ config BINFMT_ELF32
2025 bool 2071 bool
2026 default y if MIPS32_O32 || MIPS32_N32 2072 default y if MIPS32_O32 || MIPS32_N32
2027 2073
2028config SECCOMP
2029 bool "Enable seccomp to safely compute untrusted bytecode"
2030 depends on PROC_FS && BROKEN
2031 default y
2032 help
2033 This kernel feature is useful for number crunching applications
2034 that may need to compute untrusted bytecode during their
2035 execution. By using pipes or other transports made available to
2036 the process as file descriptors supporting the read/write
2037 syscalls, it's possible to isolate those applications in
2038 their own address space using seccomp. Once seccomp is
2039 enabled via /proc/<pid>/seccomp, it cannot be disabled
2040 and the task is only allowed to execute a few safe syscalls
2041 defined by each seccomp mode.
2042
2043 If unsure, say Y. Only embedded should say N here.
2044
2045config PM 2074config PM
2046 bool "Power Management support (EXPERIMENTAL)" 2075 bool "Power Management support (EXPERIMENTAL)"
2047 depends on EXPERIMENTAL && SOC_AU1X00 2076 depends on EXPERIMENTAL && SOC_AU1X00
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 5d6afb52d904..9351f1c04a9d 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -22,10 +22,10 @@ config CMDLINE
22 string "Default kernel command string" 22 string "Default kernel command string"
23 default "" 23 default ""
24 help 24 help
25 On some platforms, there is currently no way for the boot loader to 25 On some platforms, there is currently no way for the boot loader to
26 pass arguments to the kernel. For these platforms, you can supply 26 pass arguments to the kernel. For these platforms, you can supply
27 some command-line options at build time by entering them here. In 27 some command-line options at build time by entering them here. In
28 other cases you can specify kernel args so that you don't have 28 other cases you can specify kernel args so that you don't have
29 to set them up in board prom initialization routines. 29 to set them up in board prom initialization routines.
30 30
31config DEBUG_STACK_USAGE 31config DEBUG_STACK_USAGE
diff --git a/arch/mips/arc/identify.c b/arch/mips/arc/identify.c
index 3ba7c47f9f23..4b907369b0f9 100644
--- a/arch/mips/arc/identify.c
+++ b/arch/mips/arc/identify.c
@@ -77,7 +77,7 @@ static struct smatch * __init string_to_mach(const char *s)
77{ 77{
78 int i; 78 int i;
79 79
80 for (i = 0; i < (sizeof(mach_table) / sizeof (mach_table[0])); i++) { 80 for (i = 0; i < ARRAY_SIZE(mach_table); i++) {
81 if (!strcmp(s, mach_table[i].arcname)) 81 if (!strcmp(s, mach_table[i].arcname))
82 return &mach_table[i]; 82 return &mach_table[i];
83 } 83 }
diff --git a/arch/mips/arc/memory.c b/arch/mips/arc/memory.c
index 8a9ef58cc399..456cb81a32d9 100644
--- a/arch/mips/arc/memory.c
+++ b/arch/mips/arc/memory.c
@@ -141,30 +141,20 @@ void __init prom_meminit(void)
141 } 141 }
142} 142}
143 143
144unsigned long __init prom_free_prom_memory(void) 144void __init prom_free_prom_memory(void)
145{ 145{
146 unsigned long freed = 0;
147 unsigned long addr; 146 unsigned long addr;
148 int i; 147 int i;
149 148
150 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP) 149 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
151 return 0; 150 return;
152 151
153 for (i = 0; i < boot_mem_map.nr_map; i++) { 152 for (i = 0; i < boot_mem_map.nr_map; i++) {
154 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 153 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
155 continue; 154 continue;
156 155
157 addr = boot_mem_map.map[i].addr; 156 addr = boot_mem_map.map[i].addr;
158 while (addr < boot_mem_map.map[i].addr 157 free_init_pages("prom memory",
159 + boot_mem_map.map[i].size) { 158 addr, addr + boot_mem_map.map[i].size);
160 ClearPageReserved(virt_to_page(__va(addr)));
161 init_page_count(virt_to_page(__va(addr)));
162 free_page((unsigned long)__va(addr));
163 addr += PAGE_SIZE;
164 freed += PAGE_SIZE;
165 }
166 } 159 }
167 printk(KERN_INFO "Freeing prom memory: %ldkb freed\n", freed >> 10);
168
169 return freed;
170} 160}
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index 9cf7b6715836..ea6e99fbe2f7 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -233,7 +233,7 @@ void restore_local_and_enable(int controller, unsigned long mask)
233 233
234 234
235static struct irq_chip rise_edge_irq_type = { 235static struct irq_chip rise_edge_irq_type = {
236 .typename = "Au1000 Rise Edge", 236 .name = "Au1000 Rise Edge",
237 .ack = mask_and_ack_rise_edge_irq, 237 .ack = mask_and_ack_rise_edge_irq,
238 .mask = local_disable_irq, 238 .mask = local_disable_irq,
239 .mask_ack = mask_and_ack_rise_edge_irq, 239 .mask_ack = mask_and_ack_rise_edge_irq,
@@ -242,7 +242,7 @@ static struct irq_chip rise_edge_irq_type = {
242}; 242};
243 243
244static struct irq_chip fall_edge_irq_type = { 244static struct irq_chip fall_edge_irq_type = {
245 .typename = "Au1000 Fall Edge", 245 .name = "Au1000 Fall Edge",
246 .ack = mask_and_ack_fall_edge_irq, 246 .ack = mask_and_ack_fall_edge_irq,
247 .mask = local_disable_irq, 247 .mask = local_disable_irq,
248 .mask_ack = mask_and_ack_fall_edge_irq, 248 .mask_ack = mask_and_ack_fall_edge_irq,
@@ -251,7 +251,7 @@ static struct irq_chip fall_edge_irq_type = {
251}; 251};
252 252
253static struct irq_chip either_edge_irq_type = { 253static struct irq_chip either_edge_irq_type = {
254 .typename = "Au1000 Rise or Fall Edge", 254 .name = "Au1000 Rise or Fall Edge",
255 .ack = mask_and_ack_either_edge_irq, 255 .ack = mask_and_ack_either_edge_irq,
256 .mask = local_disable_irq, 256 .mask = local_disable_irq,
257 .mask_ack = mask_and_ack_either_edge_irq, 257 .mask_ack = mask_and_ack_either_edge_irq,
@@ -260,7 +260,7 @@ static struct irq_chip either_edge_irq_type = {
260}; 260};
261 261
262static struct irq_chip level_irq_type = { 262static struct irq_chip level_irq_type = {
263 .typename = "Au1000 Level", 263 .name = "Au1000 Level",
264 .ack = mask_and_ack_level_irq, 264 .ack = mask_and_ack_level_irq,
265 .mask = local_disable_irq, 265 .mask = local_disable_irq,
266 .mask_ack = mask_and_ack_level_irq, 266 .mask_ack = mask_and_ack_level_irq,
diff --git a/arch/mips/au1000/common/pci.c b/arch/mips/au1000/common/pci.c
index 9f8ce08e173b..6c25e6c09f78 100644
--- a/arch/mips/au1000/common/pci.c
+++ b/arch/mips/au1000/common/pci.c
@@ -76,13 +76,17 @@ static int __init au1x_pci_setup(void)
76 } 76 }
77 77
78#ifdef CONFIG_DMA_NONCOHERENT 78#ifdef CONFIG_DMA_NONCOHERENT
79 /* 79 {
80 * Set the NC bit in controller for Au1500 pre-AC silicon 80 /*
81 */ 81 * Set the NC bit in controller for Au1500 pre-AC silicon
82 u32 prid = read_c0_prid(); 82 */
83 if ( (prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) { 83 u32 prid = read_c0_prid();
84 au_writel( 1<<16 | au_readl(Au1500_PCI_CFG), Au1500_PCI_CFG); 84
85 printk("Non-coherent PCI accesses enabled\n"); 85 if ((prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
86 au_writel((1 << 16) | au_readl(Au1500_PCI_CFG),
87 Au1500_PCI_CFG);
88 printk("Non-coherent PCI accesses enabled\n");
89 }
86 } 90 }
87#endif 91#endif
88 92
diff --git a/arch/mips/au1000/common/prom.c b/arch/mips/au1000/common/prom.c
index 6fce60af005d..a8637cdb5b4b 100644
--- a/arch/mips/au1000/common/prom.c
+++ b/arch/mips/au1000/common/prom.c
@@ -149,9 +149,8 @@ int get_ethernet_addr(char *ethernet_addr)
149 return 0; 149 return 0;
150} 150}
151 151
152unsigned long __init prom_free_prom_memory(void) 152void __init prom_free_prom_memory(void)
153{ 153{
154 return 0;
155} 154}
156 155
157EXPORT_SYMBOL(prom_getcmdline); 156EXPORT_SYMBOL(prom_getcmdline);
diff --git a/arch/mips/au1000/common/setup.c b/arch/mips/au1000/common/setup.c
index 919172db560c..13fe187f35d6 100644
--- a/arch/mips/au1000/common/setup.c
+++ b/arch/mips/au1000/common/setup.c
@@ -141,17 +141,20 @@ void __init plat_mem_setup(void)
141/* This routine should be valid for all Au1x based boards */ 141/* This routine should be valid for all Au1x based boards */
142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) 142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
143{ 143{
144 u32 start, end;
145
146 /* Don't fixup 36 bit addresses */ 144 /* Don't fixup 36 bit addresses */
147 if ((phys_addr >> 32) != 0) return phys_addr; 145 if ((phys_addr >> 32) != 0)
146 return phys_addr;
148 147
149#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
150 start = (u32)Au1500_PCI_MEM_START; 149 {
151 end = (u32)Au1500_PCI_MEM_END; 150 u32 start, end;
152 /* check for pci memory window */ 151
153 if ((phys_addr >= start) && ((phys_addr + size) < end)) { 152 start = (u32)Au1500_PCI_MEM_START;
154 return (phys_t)((phys_addr - start) + Au1500_PCI_MEM_START); 153 end = (u32)Au1500_PCI_MEM_END;
154 /* check for pci memory window */
155 if ((phys_addr >= start) && ((phys_addr + size) < end))
156 return (phys_t)
157 ((phys_addr - start) + Au1500_PCI_MEM_START);
155 } 158 }
156#endif 159#endif
157 160
diff --git a/arch/mips/au1000/pb1100/board_setup.c b/arch/mips/au1000/pb1100/board_setup.c
index 2d1533f116c0..6bc1f8e1b608 100644
--- a/arch/mips/au1000/pb1100/board_setup.c
+++ b/arch/mips/au1000/pb1100/board_setup.c
@@ -47,8 +47,7 @@ void board_reset (void)
47 47
48void __init board_setup(void) 48void __init board_setup(void)
49{ 49{
50 u32 pin_func; 50 volatile void __iomem * base = (volatile void __iomem *) 0xac000000UL;
51 u32 sys_freqctrl, sys_clksrc;
52 51
53 // set AUX clock to 12MHz * 8 = 96 MHz 52 // set AUX clock to 12MHz * 8 = 96 MHz
54 au_writel(8, SYS_AUXPLL); 53 au_writel(8, SYS_AUXPLL);
@@ -56,58 +55,62 @@ void __init board_setup(void)
56 udelay(100); 55 udelay(100);
57 56
58#ifdef CONFIG_USB_OHCI 57#ifdef CONFIG_USB_OHCI
59 // configure pins GPIO[14:9] as GPIO 58 {
60 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80); 59 u32 pin_func, sys_freqctrl, sys_clksrc;
61 60
62 /* zero and disable FREQ2 */ 61 // configure pins GPIO[14:9] as GPIO
63 sys_freqctrl = au_readl(SYS_FREQCTRL0); 62 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80);
64 sys_freqctrl &= ~0xFFF00000; 63
65 au_writel(sys_freqctrl, SYS_FREQCTRL0); 64 /* zero and disable FREQ2 */
66 65 sys_freqctrl = au_readl(SYS_FREQCTRL0);
67 /* zero and disable USBH/USBD/IrDA clock */ 66 sys_freqctrl &= ~0xFFF00000;
68 sys_clksrc = au_readl(SYS_CLKSRC); 67 au_writel(sys_freqctrl, SYS_FREQCTRL0);
69 sys_clksrc &= ~0x0000001F; 68
70 au_writel(sys_clksrc, SYS_CLKSRC); 69 /* zero and disable USBH/USBD/IrDA clock */
71 70 sys_clksrc = au_readl(SYS_CLKSRC);
72 sys_freqctrl = au_readl(SYS_FREQCTRL0); 71 sys_clksrc &= ~0x0000001F;
73 sys_freqctrl &= ~0xFFF00000; 72 au_writel(sys_clksrc, SYS_CLKSRC);
74 73
75 sys_clksrc = au_readl(SYS_CLKSRC); 74 sys_freqctrl = au_readl(SYS_FREQCTRL0);
76 sys_clksrc &= ~0x0000001F; 75 sys_freqctrl &= ~0xFFF00000;
77 76
78 // FREQ2 = aux/2 = 48 MHz 77 sys_clksrc = au_readl(SYS_CLKSRC);
79 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20)); 78 sys_clksrc &= ~0x0000001F;
80 au_writel(sys_freqctrl, SYS_FREQCTRL0); 79
81 80 // FREQ2 = aux/2 = 48 MHz
82 /* 81 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20));
83 * Route 48MHz FREQ2 into USBH/USBD/IrDA 82 au_writel(sys_freqctrl, SYS_FREQCTRL0);
84 */ 83
85 sys_clksrc |= ((4<<2) | (0<<1) | 0 ); 84 /*
86 au_writel(sys_clksrc, SYS_CLKSRC); 85 * Route 48MHz FREQ2 into USBH/USBD/IrDA
87 86 */
88 /* setup the static bus controller */ 87 sys_clksrc |= ((4<<2) | (0<<1) | 0 );
89 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */ 88 au_writel(sys_clksrc, SYS_CLKSRC);
90 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */ 89
91 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */ 90 /* setup the static bus controller */
92 91 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */
93 // get USB Functionality pin state (device vs host drive pins) 92 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
94 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000); 93 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
95 // 2nd USB port is USB host 94
96 pin_func |= 0x8000; 95 // get USB Functionality pin state (device vs host drive pins)
97 au_writel(pin_func, SYS_PINFUNC); 96 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000);
97 // 2nd USB port is USB host
98 pin_func |= 0x8000;
99 au_writel(pin_func, SYS_PINFUNC);
100 }
98#endif // defined (CONFIG_USB_OHCI) 101#endif // defined (CONFIG_USB_OHCI)
99 102
100 /* Enable sys bus clock divider when IDLE state or no bus activity. */ 103 /* Enable sys bus clock divider when IDLE state or no bus activity. */
101 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); 104 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
102 105
103 // Enable the RTC if not already enabled 106 // Enable the RTC if not already enabled
104 if (!(readb(0xac000028) & 0x20)) { 107 if (!(readb(base + 0x28) & 0x20)) {
105 writeb(readb(0xac000028) | 0x20, 0xac000028); 108 writeb(readb(base + 0x28) | 0x20, base + 0x28);
106 au_sync(); 109 au_sync();
107 } 110 }
108 // Put the clock in BCD mode 111 // Put the clock in BCD mode
109 if (readb(0xac00002C) & 0x4) { /* reg B */ 112 if (readb(base + 0x2C) & 0x4) { /* reg B */
110 writeb(readb(0xac00002c) & ~0x4, 0xac00002c); 113 writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
111 au_sync(); 114 au_sync();
112 } 115 }
113} 116}
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index 91983ba407c4..b73b2d18bf56 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -137,33 +137,20 @@ static void pb1200_shutdown_irq( unsigned int irq_nr )
137 return; 137 return;
138} 138}
139 139
140static inline void pb1200_mask_and_ack_irq(unsigned int irq_nr)
141{
142 pb1200_disable_irq( irq_nr );
143}
144
145static void pb1200_end_irq(unsigned int irq_nr)
146{
147 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
148 pb1200_enable_irq(irq_nr);
149 }
150}
151
152static struct irq_chip external_irq_type = 140static struct irq_chip external_irq_type =
153{ 141{
154#ifdef CONFIG_MIPS_PB1200 142#ifdef CONFIG_MIPS_PB1200
155 "Pb1200 Ext", 143 .name = "Pb1200 Ext",
156#endif 144#endif
157#ifdef CONFIG_MIPS_DB1200 145#ifdef CONFIG_MIPS_DB1200
158 "Db1200 Ext", 146 .name = "Db1200 Ext",
159#endif 147#endif
160 pb1200_startup_irq, 148 .startup = pb1200_startup_irq,
161 pb1200_shutdown_irq, 149 .shutdown = pb1200_shutdown_irq,
162 pb1200_enable_irq, 150 .ack = pb1200_disable_irq,
163 pb1200_disable_irq, 151 .mask = pb1200_disable_irq,
164 pb1200_mask_and_ack_irq, 152 .mask_ack = pb1200_disable_irq,
165 pb1200_end_irq, 153 .unmask = pb1200_enable_irq,
166 NULL
167}; 154};
168 155
169void _board_init_irq(void) 156void _board_init_irq(void)
@@ -172,7 +159,8 @@ void _board_init_irq(void)
172 159
173 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++) 160 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++)
174 { 161 {
175 irq_desc[irq_nr].chip = &external_irq_type; 162 set_irq_chip_and_handler(irq_nr, &external_irq_type,
163 handle_level_irq);
176 pb1200_disable_irq(irq_nr); 164 pb1200_disable_irq(irq_nr);
177 } 165 }
178 166
diff --git a/arch/mips/basler/excite/excite_irq.c b/arch/mips/basler/excite/excite_irq.c
index 2e2061a286c5..1ecab6350421 100644
--- a/arch/mips/basler/excite/excite_irq.c
+++ b/arch/mips/basler/excite/excite_irq.c
@@ -47,9 +47,9 @@ extern asmlinkage void excite_handle_int(void);
47 */ 47 */
48void __init arch_init_irq(void) 48void __init arch_init_irq(void)
49{ 49{
50 mips_cpu_irq_init(0); 50 mips_cpu_irq_init();
51 rm7k_cpu_irq_init(8); 51 rm7k_cpu_irq_init();
52 rm9k_cpu_irq_init(12); 52 rm9k_cpu_irq_init();
53 53
54#ifdef CONFIG_KGDB 54#ifdef CONFIG_KGDB
55 excite_kgdb_init(); 55 excite_kgdb_init();
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index 4c46f0e73783..fe93b846923b 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -104,7 +104,7 @@ void __init arch_init_irq(void)
104 GT_WRITE(GT_INTRMASK_OFS, 0); 104 GT_WRITE(GT_INTRMASK_OFS, 0);
105 105
106 init_i8259_irqs(); /* 0 ... 15 */ 106 init_i8259_irqs(); /* 0 ... 15 */
107 mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */ 107 mips_cpu_irq_init(); /* 16 ... 23 */
108 108
109 /* 109 /*
110 * Mask all cpu interrupts 110 * Mask all cpu interrupts
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index e8f0f20b852d..a4b69b543bd9 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -204,8 +204,7 @@ void __init prom_init(void)
204 add_memory_region(0x0, memsz, BOOT_MEM_RAM); 204 add_memory_region(0x0, memsz, BOOT_MEM_RAM);
205} 205}
206 206
207unsigned long __init prom_free_prom_memory(void) 207void __init prom_free_prom_memory(void)
208{ 208{
209 /* Nothing to do! */ 209 /* Nothing to do! */
210 return 0;
211} 210}
diff --git a/arch/mips/ddb5xxx/common/prom.c b/arch/mips/ddb5xxx/common/prom.c
index efef0f57ce1e..54a857b5e3ba 100644
--- a/arch/mips/ddb5xxx/common/prom.c
+++ b/arch/mips/ddb5xxx/common/prom.c
@@ -59,9 +59,8 @@ void __init prom_init(void)
59#endif 59#endif
60} 60}
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67#if defined(CONFIG_DDB5477) 66#if defined(CONFIG_DDB5477)
diff --git a/arch/mips/ddb5xxx/ddb5477/irq.c b/arch/mips/ddb5xxx/ddb5477/irq.c
index a8bd2e66705c..2b23234a5b95 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq.c
@@ -17,6 +17,7 @@
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18 18
19#include <asm/i8259.h> 19#include <asm/i8259.h>
20#include <asm/irq_cpu.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/mipsregs.h> 22#include <asm/mipsregs.h>
22#include <asm/debug.h> 23#include <asm/debug.h>
@@ -73,7 +74,6 @@ set_pci_int_attr(u32 pci, u32 intn, u32 active, u32 trigger)
73} 74}
74 75
75extern void vrc5477_irq_init(u32 base); 76extern void vrc5477_irq_init(u32 base);
76extern void mips_cpu_irq_init(u32 base);
77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL }; 77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
78 78
79void __init arch_init_irq(void) 79void __init arch_init_irq(void)
@@ -125,7 +125,7 @@ void __init arch_init_irq(void)
125 125
126 /* init all controllers */ 126 /* init all controllers */
127 init_i8259_irqs(); 127 init_i8259_irqs();
128 mips_cpu_irq_init(CPU_IRQ_BASE); 128 mips_cpu_irq_init();
129 vrc5477_irq_init(VRC5477_IRQ_BASE); 129 vrc5477_irq_init(VRC5477_IRQ_BASE);
130 130
131 131
@@ -146,8 +146,7 @@ u8 i8259_interrupt_ack(void)
146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE); 146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE);
147 ddb_out32(DDB_PCIINIT10, reg); 147 ddb_out32(DDB_PCIINIT10, reg);
148 148
149 /* i8259.c set the base vector to be 0x0 */ 149 return irq;
150 return irq + I8259_IRQ_BASE;
151} 150}
152/* 151/*
153 * the first level int-handler will jump here if it is a vrc5477 irq 152 * the first level int-handler will jump here if it is a vrc5477 irq
@@ -177,7 +176,7 @@ static void vrc5477_irq_dispatch(void)
177 /* check for i8259 interrupts */ 176 /* check for i8259 interrupts */
178 if (intStatus & (1 << VRC5477_I8259_CASCADE)) { 177 if (intStatus & (1 << VRC5477_I8259_CASCADE)) {
179 int i8259_irq = i8259_interrupt_ack(); 178 int i8259_irq = i8259_interrupt_ack();
180 do_IRQ(I8259_IRQ_BASE + i8259_irq); 179 do_IRQ(i8259_irq);
181 return; 180 return;
182 } 181 }
183 } 182 }
diff --git a/arch/mips/ddb5xxx/ddb5477/irq_5477.c b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
index 96249aa5df5d..98c3b15eb369 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq_5477.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
@@ -82,7 +82,7 @@ vrc5477_irq_end(unsigned int irq)
82} 82}
83 83
84struct irq_chip vrc5477_irq_controller = { 84struct irq_chip vrc5477_irq_controller = {
85 .typename = "vrc5477_irq", 85 .name = "vrc5477_irq",
86 .ack = vrc5477_irq_ack, 86 .ack = vrc5477_irq_ack,
87 .mask = vrc5477_irq_disable, 87 .mask = vrc5477_irq_disable,
88 .mask_ack = vrc5477_irq_ack, 88 .mask_ack = vrc5477_irq_ack,
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 4c7cb4048d35..3acb133668dc 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -62,7 +62,7 @@ static inline void end_ioasic_irq(unsigned int irq)
62} 62}
63 63
64static struct irq_chip ioasic_irq_type = { 64static struct irq_chip ioasic_irq_type = {
65 .typename = "IO-ASIC", 65 .name = "IO-ASIC",
66 .ack = ack_ioasic_irq, 66 .ack = ack_ioasic_irq,
67 .mask = mask_ioasic_irq, 67 .mask = mask_ioasic_irq,
68 .mask_ack = ack_ioasic_irq, 68 .mask_ack = ack_ioasic_irq,
@@ -84,7 +84,7 @@ static inline void end_ioasic_dma_irq(unsigned int irq)
84} 84}
85 85
86static struct irq_chip ioasic_dma_irq_type = { 86static struct irq_chip ioasic_dma_irq_type = {
87 .typename = "IO-ASIC-DMA", 87 .name = "IO-ASIC-DMA",
88 .ack = ack_ioasic_dma_irq, 88 .ack = ack_ioasic_dma_irq,
89 .mask = mask_ioasic_dma_irq, 89 .mask = mask_ioasic_dma_irq,
90 .mask_ack = ack_ioasic_dma_irq, 90 .mask_ack = ack_ioasic_dma_irq,
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 916e46b8ccd8..02439dc0ba83 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -58,7 +58,7 @@ static void ack_kn02_irq(unsigned int irq)
58} 58}
59 59
60static struct irq_chip kn02_irq_type = { 60static struct irq_chip kn02_irq_type = {
61 .typename = "KN02-CSR", 61 .name = "KN02-CSR",
62 .ack = ack_kn02_irq, 62 .ack = ack_kn02_irq,
63 .mask = mask_kn02_irq, 63 .mask = mask_kn02_irq,
64 .mask_ack = ack_kn02_irq, 64 .mask_ack = ack_kn02_irq,
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 3aa01d268f2d..5a557e268f78 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -92,9 +92,9 @@ void __init prom_meminit(u32 magic)
92 rex_setup_memory_region(); 92 rex_setup_memory_region();
93} 93}
94 94
95unsigned long __init prom_free_prom_memory(void) 95void __init prom_free_prom_memory(void)
96{ 96{
97 unsigned long addr, end; 97 unsigned long end;
98 98
99 /* 99 /*
100 * Free everything below the kernel itself but leave 100 * Free everything below the kernel itself but leave
@@ -114,16 +114,5 @@ unsigned long __init prom_free_prom_memory(void)
114#endif 114#endif
115 end = __pa(&_text); 115 end = __pa(&_text);
116 116
117 addr = PAGE_SIZE; 117 free_init_pages("unused PROM memory", PAGE_SIZE, end);
118 while (addr < end) {
119 ClearPageReserved(virt_to_page(__va(addr)));
120 init_page_count(virt_to_page(__va(addr)));
121 free_page((unsigned long)__va(addr));
122 addr += PAGE_SIZE;
123 }
124
125 printk("Freeing unused PROM memory: %ldkb freed\n",
126 (end - PAGE_SIZE) >> 10);
127
128 return end - PAGE_SIZE;
129} 118}
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index d34032ac492a..1058e2f409bb 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -234,7 +234,7 @@ static void __init dec_init_kn01(void)
234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl, 234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl,
235 sizeof(kn01_cpu_mask_nr_tbl)); 235 sizeof(kn01_cpu_mask_nr_tbl));
236 236
237 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 237 mips_cpu_irq_init();
238 238
239} /* dec_init_kn01 */ 239} /* dec_init_kn01 */
240 240
@@ -309,7 +309,7 @@ static void __init dec_init_kn230(void)
309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl, 309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl,
310 sizeof(kn230_cpu_mask_nr_tbl)); 310 sizeof(kn230_cpu_mask_nr_tbl));
311 311
312 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 312 mips_cpu_irq_init();
313 313
314} /* dec_init_kn230 */ 314} /* dec_init_kn230 */
315 315
@@ -403,7 +403,7 @@ static void __init dec_init_kn02(void)
403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl, 403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl,
404 sizeof(kn02_asic_mask_nr_tbl)); 404 sizeof(kn02_asic_mask_nr_tbl));
405 405
406 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 406 mips_cpu_irq_init();
407 init_kn02_irqs(KN02_IRQ_BASE); 407 init_kn02_irqs(KN02_IRQ_BASE);
408 408
409} /* dec_init_kn02 */ 409} /* dec_init_kn02 */
@@ -504,7 +504,7 @@ static void __init dec_init_kn02ba(void)
504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl, 504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl,
505 sizeof(kn02ba_asic_mask_nr_tbl)); 505 sizeof(kn02ba_asic_mask_nr_tbl));
506 506
507 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 507 mips_cpu_irq_init();
508 init_ioasic_irqs(IO_IRQ_BASE); 508 init_ioasic_irqs(IO_IRQ_BASE);
509 509
510} /* dec_init_kn02ba */ 510} /* dec_init_kn02ba */
@@ -601,7 +601,7 @@ static void __init dec_init_kn02ca(void)
601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl, 601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl,
602 sizeof(kn02ca_asic_mask_nr_tbl)); 602 sizeof(kn02ca_asic_mask_nr_tbl));
603 603
604 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 604 mips_cpu_irq_init();
605 init_ioasic_irqs(IO_IRQ_BASE); 605 init_ioasic_irqs(IO_IRQ_BASE);
606 606
607} /* dec_init_kn02ca */ 607} /* dec_init_kn02ca */
@@ -702,7 +702,7 @@ static void __init dec_init_kn03(void)
702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl, 702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl,
703 sizeof(kn03_asic_mask_nr_tbl)); 703 sizeof(kn03_asic_mask_nr_tbl));
704 704
705 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 705 mips_cpu_irq_init();
706 init_ioasic_irqs(IO_IRQ_BASE); 706 init_ioasic_irqs(IO_IRQ_BASE);
707 707
708} /* dec_init_kn03 */ 708} /* dec_init_kn03 */
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c
index 8d880f0b06ec..96df37b77759 100644
--- a/arch/mips/emma2rh/common/irq_emma2rh.c
+++ b/arch/mips/emma2rh/common/irq_emma2rh.c
@@ -57,7 +57,7 @@ static void emma2rh_irq_disable(unsigned int irq)
57} 57}
58 58
59struct irq_chip emma2rh_irq_controller = { 59struct irq_chip emma2rh_irq_controller = {
60 .typename = "emma2rh_irq", 60 .name = "emma2rh_irq",
61 .ack = emma2rh_irq_disable, 61 .ack = emma2rh_irq_disable,
62 .mask = emma2rh_irq_disable, 62 .mask = emma2rh_irq_disable,
63 .mask_ack = emma2rh_irq_disable, 63 .mask_ack = emma2rh_irq_disable,
diff --git a/arch/mips/emma2rh/markeins/irq.c b/arch/mips/emma2rh/markeins/irq.c
index c93369cb4115..3299b6dfe764 100644
--- a/arch/mips/emma2rh/markeins/irq.c
+++ b/arch/mips/emma2rh/markeins/irq.c
@@ -106,7 +106,7 @@ void __init arch_init_irq(void)
106 emma2rh_irq_init(EMMA2RH_IRQ_BASE); 106 emma2rh_irq_init(EMMA2RH_IRQ_BASE);
107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE); 107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE);
108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE); 108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE);
109 mips_cpu_irq_init(CPU_IRQ_BASE); 109 mips_cpu_irq_init();
110 110
111 /* setup cascade interrupts */ 111 /* setup cascade interrupts */
112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade); 112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade);
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c
index 2116d9be5fa9..fba5c156f472 100644
--- a/arch/mips/emma2rh/markeins/irq_markeins.c
+++ b/arch/mips/emma2rh/markeins/irq_markeins.c
@@ -49,7 +49,7 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
49} 49}
50 50
51struct irq_chip emma2rh_sw_irq_controller = { 51struct irq_chip emma2rh_sw_irq_controller = {
52 .typename = "emma2rh_sw_irq", 52 .name = "emma2rh_sw_irq",
53 .ack = emma2rh_sw_irq_disable, 53 .ack = emma2rh_sw_irq_disable,
54 .mask = emma2rh_sw_irq_disable, 54 .mask = emma2rh_sw_irq_disable,
55 .mask_ack = emma2rh_sw_irq_disable, 55 .mask_ack = emma2rh_sw_irq_disable,
@@ -115,7 +115,7 @@ static void emma2rh_gpio_irq_end(unsigned int irq)
115} 115}
116 116
117struct irq_chip emma2rh_gpio_irq_controller = { 117struct irq_chip emma2rh_gpio_irq_controller = {
118 .typename = "emma2rh_gpio_irq", 118 .name = "emma2rh_gpio_irq",
119 .ack = emma2rh_gpio_irq_ack, 119 .ack = emma2rh_gpio_irq_ack,
120 .mask = emma2rh_gpio_irq_disable, 120 .mask = emma2rh_gpio_irq_disable,
121 .mask_ack = emma2rh_gpio_irq_ack, 121 .mask_ack = emma2rh_gpio_irq_ack,
diff --git a/arch/mips/gt64120/ev64120/irq.c b/arch/mips/gt64120/ev64120/irq.c
index b3e5796c81d7..04572b9c9642 100644
--- a/arch/mips/gt64120/ev64120/irq.c
+++ b/arch/mips/gt64120/ev64120/irq.c
@@ -88,7 +88,7 @@ static void end_ev64120_irq(unsigned int irq)
88} 88}
89 89
90static struct irq_chip ev64120_irq_type = { 90static struct irq_chip ev64120_irq_type = {
91 .typename = "EV64120", 91 .name = "EV64120",
92 .ack = disable_ev64120_irq, 92 .ack = disable_ev64120_irq,
93 .mask = disable_ev64120_irq, 93 .mask = disable_ev64120_irq,
94 .mask_ack = disable_ev64120_irq, 94 .mask_ack = disable_ev64120_irq,
diff --git a/arch/mips/gt64120/ev64120/setup.c b/arch/mips/gt64120/ev64120/setup.c
index 99c8d42212e2..477848c22a2c 100644
--- a/arch/mips/gt64120/ev64120/setup.c
+++ b/arch/mips/gt64120/ev64120/setup.c
@@ -59,9 +59,8 @@ extern void galileo_machine_power_off(void);
59 */ 59 */
60extern struct pci_ops galileo_pci_ops; 60extern struct pci_ops galileo_pci_ops;
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67/* 66/*
diff --git a/arch/mips/gt64120/momenco_ocelot/dbg_io.c b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/gt64120/momenco_ocelot/dbg_io.c
+++ b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/gt64120/momenco_ocelot/irq.c b/arch/mips/gt64120/momenco_ocelot/irq.c
index d9294401ccb0..2585d9dbda33 100644
--- a/arch/mips/gt64120/momenco_ocelot/irq.c
+++ b/arch/mips/gt64120/momenco_ocelot/irq.c
@@ -90,6 +90,6 @@ void __init arch_init_irq(void)
90 clear_c0_status(ST0_IM); 90 clear_c0_status(ST0_IM);
91 local_irq_disable(); 91 local_irq_disable();
92 92
93 mips_cpu_irq_init(0); 93 mips_cpu_irq_init();
94 rm7k_cpu_irq_init(8); 94 rm7k_cpu_irq_init();
95} 95}
diff --git a/arch/mips/gt64120/momenco_ocelot/prom.c b/arch/mips/gt64120/momenco_ocelot/prom.c
index 8677b6d3ada7..78f393b2afd9 100644
--- a/arch/mips/gt64120/momenco_ocelot/prom.c
+++ b/arch/mips/gt64120/momenco_ocelot/prom.c
@@ -67,7 +67,6 @@ void __init prom_init(void)
67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM); 67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM);
68} 68}
69 69
70unsigned long __init prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
diff --git a/arch/mips/gt64120/wrppmc/irq.c b/arch/mips/gt64120/wrppmc/irq.c
index eedfc24e1eae..d3d96591780e 100644
--- a/arch/mips/gt64120/wrppmc/irq.c
+++ b/arch/mips/gt64120/wrppmc/irq.c
@@ -63,7 +63,7 @@ void gt64120_init_pic(void)
63void __init arch_init_irq(void) 63void __init arch_init_irq(void)
64{ 64{
65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */ 65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */
66 mips_cpu_irq_init(0); 66 mips_cpu_irq_init();
67 67
68 gt64120_init_pic(); 68 gt64120_init_pic();
69} 69}
diff --git a/arch/mips/gt64120/wrppmc/setup.c b/arch/mips/gt64120/wrppmc/setup.c
index 429afc400cb4..121188d5ec4a 100644
--- a/arch/mips/gt64120/wrppmc/setup.c
+++ b/arch/mips/gt64120/wrppmc/setup.c
@@ -93,9 +93,8 @@ void __init wrppmc_early_printk(const char *fmt, ...)
93} 93}
94#endif /* WRPPMC_EARLY_DEBUG */ 94#endif /* WRPPMC_EARLY_DEBUG */
95 95
96unsigned long __init prom_free_prom_memory(void) 96void __init prom_free_prom_memory(void)
97{ 97{
98 return 0;
99} 98}
100 99
101#ifdef CONFIG_SERIAL_8250 100#ifdef CONFIG_SERIAL_8250
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index f8d417b5c2bb..295892e4ce53 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -40,7 +40,7 @@ void disable_r4030_irq(unsigned int irq)
40} 40}
41 41
42static struct irq_chip r4030_irq_type = { 42static struct irq_chip r4030_irq_type = {
43 .typename = "R4030", 43 .name = "R4030",
44 .ack = disable_r4030_irq, 44 .ack = disable_r4030_irq,
45 .mask = disable_r4030_irq, 45 .mask = disable_r4030_irq,
46 .mask_ack = disable_r4030_irq, 46 .mask_ack = disable_r4030_irq,
diff --git a/arch/mips/jmr3927/common/prom.c b/arch/mips/jmr3927/common/prom.c
index 5d5838f41d23..aa481b774c42 100644
--- a/arch/mips/jmr3927/common/prom.c
+++ b/arch/mips/jmr3927/common/prom.c
@@ -75,7 +75,6 @@ void __init prom_init_cmdline(void)
75 *cp = '\0'; 75 *cp = '\0';
76} 76}
77 77
78unsigned long __init prom_free_prom_memory(void) 78void __init prom_free_prom_memory(void)
79{ 79{
80 return 0;
81} 80}
diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c
index 3da49c5aaf49..7d2c203cb406 100644
--- a/arch/mips/jmr3927/rbhma3100/irq.c
+++ b/arch/mips/jmr3927/rbhma3100/irq.c
@@ -439,7 +439,7 @@ void __init arch_init_irq(void)
439} 439}
440 440
441static struct irq_chip jmr3927_irq_controller = { 441static struct irq_chip jmr3927_irq_controller = {
442 .typename = "jmr3927_irq", 442 .name = "jmr3927_irq",
443 .ack = jmr3927_irq_ack, 443 .ack = jmr3927_irq_ack,
444 .mask = jmr3927_irq_disable, 444 .mask = jmr3927_irq_disable,
445 .mask_ack = jmr3927_irq_ack, 445 .mask_ack = jmr3927_irq_ack,
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index 138f25efe38a..7ca3d6d07b34 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -434,7 +434,7 @@ void __init tx3927_setup(void)
434 434
435 /* DMA */ 435 /* DMA */
436 tx3927_dmaptr->mcr = 0; 436 tx3927_dmaptr->mcr = 0;
437 for (i = 0; i < sizeof(tx3927_dmaptr->ch) / sizeof(tx3927_dmaptr->ch[0]); i++) { 437 for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) {
438 /* reset channel */ 438 /* reset channel */
439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; 439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST;
440 tx3927_dmaptr->ch[i].ccr = 0; 440 tx3927_dmaptr->ch[i].ccr = 0;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ff88b06f89df..ea7df4b8da33 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -234,10 +234,6 @@ void output_mm_defines(void)
234 constant("#define _PMD_SHIFT ", PMD_SHIFT); 234 constant("#define _PMD_SHIFT ", PMD_SHIFT);
235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); 235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
236 linefeed; 236 linefeed;
237 constant("#define _PGD_ORDER ", PGD_ORDER);
238 constant("#define _PMD_ORDER ", PMD_ORDER);
239 constant("#define _PTE_ORDER ", PTE_ORDER);
240 linefeed;
241 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD); 237 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
242 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD); 238 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
243 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE); 239 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 442839e9578c..f59ef271d247 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -565,7 +565,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
565 if (config3 & MIPS_CONF3_VEIC) 565 if (config3 & MIPS_CONF3_VEIC)
566 c->options |= MIPS_CPU_VEIC; 566 c->options |= MIPS_CPU_VEIC;
567 if (config3 & MIPS_CONF3_MT) 567 if (config3 & MIPS_CONF3_MT)
568 c->ases |= MIPS_ASE_MIPSMT; 568 c->ases |= MIPS_ASE_MIPSMT;
569 569
570 return config3 & MIPS_CONF_M; 570 return config3 & MIPS_CONF_M;
571} 571}
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index 719d26968cb2..7bc882049269 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -505,13 +505,13 @@ void show_gdbregs(struct gdb_regs * regs)
505 */ 505 */
506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
507 regs->reg0, regs->reg1, regs->reg2, regs->reg3, 507 regs->reg0, regs->reg1, regs->reg2, regs->reg3,
508 regs->reg4, regs->reg5, regs->reg6, regs->reg7); 508 regs->reg4, regs->reg5, regs->reg6, regs->reg7);
509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
510 regs->reg8, regs->reg9, regs->reg10, regs->reg11, 510 regs->reg8, regs->reg9, regs->reg10, regs->reg11,
511 regs->reg12, regs->reg13, regs->reg14, regs->reg15); 511 regs->reg12, regs->reg13, regs->reg14, regs->reg15);
512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
513 regs->reg16, regs->reg17, regs->reg18, regs->reg19, 513 regs->reg16, regs->reg17, regs->reg18, regs->reg19,
514 regs->reg20, regs->reg21, regs->reg22, regs->reg23); 514 regs->reg20, regs->reg21, regs->reg22, regs->reg23);
515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
516 regs->reg24, regs->reg25, regs->reg26, regs->reg27, 516 regs->reg24, regs->reg25, regs->reg26, regs->reg27,
517 regs->reg28, regs->reg29, regs->reg30, regs->reg31); 517 regs->reg28, regs->reg29, regs->reg30, regs->reg31);
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 9a7811d13db2..6f57ca44291f 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -231,28 +231,3 @@ NESTED(smp_bootstrap, 16, sp)
231#endif /* CONFIG_SMP */ 231#endif /* CONFIG_SMP */
232 232
233 __FINIT 233 __FINIT
234
235 .comm kernelsp, NR_CPUS * 8, 8
236 .comm pgd_current, NR_CPUS * 8, 8
237
238 .comm fw_arg0, SZREG, SZREG # firmware arguments
239 .comm fw_arg1, SZREG, SZREG
240 .comm fw_arg2, SZREG, SZREG
241 .comm fw_arg3, SZREG, SZREG
242
243 .macro page name, order
244 .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
245 .endm
246
247 /*
248 * On 64-bit we've got three-level pagetables with a slightly
249 * different layout ...
250 */
251 page swapper_pg_dir, _PGD_ORDER
252#ifdef CONFIG_64BIT
253#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
254 page module_pg_dir, _PGD_ORDER
255#endif
256 page invalid_pmd_table, _PMD_ORDER
257#endif
258 page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b59a676c6d0e..b33ba6cd7f5b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -54,9 +54,11 @@ static unsigned int cached_irq_mask = 0xffff;
54 54
55void disable_8259A_irq(unsigned int irq) 55void disable_8259A_irq(unsigned int irq)
56{ 56{
57 unsigned int mask = 1 << irq; 57 unsigned int mask;
58 unsigned long flags; 58 unsigned long flags;
59 59
60 irq -= I8259A_IRQ_BASE;
61 mask = 1 << irq;
60 spin_lock_irqsave(&i8259A_lock, flags); 62 spin_lock_irqsave(&i8259A_lock, flags);
61 cached_irq_mask |= mask; 63 cached_irq_mask |= mask;
62 if (irq & 8) 64 if (irq & 8)
@@ -68,9 +70,11 @@ void disable_8259A_irq(unsigned int irq)
68 70
69void enable_8259A_irq(unsigned int irq) 71void enable_8259A_irq(unsigned int irq)
70{ 72{
71 unsigned int mask = ~(1 << irq); 73 unsigned int mask;
72 unsigned long flags; 74 unsigned long flags;
73 75
76 irq -= I8259A_IRQ_BASE;
77 mask = ~(1 << irq);
74 spin_lock_irqsave(&i8259A_lock, flags); 78 spin_lock_irqsave(&i8259A_lock, flags);
75 cached_irq_mask &= mask; 79 cached_irq_mask &= mask;
76 if (irq & 8) 80 if (irq & 8)
@@ -82,10 +86,12 @@ void enable_8259A_irq(unsigned int irq)
82 86
83int i8259A_irq_pending(unsigned int irq) 87int i8259A_irq_pending(unsigned int irq)
84{ 88{
85 unsigned int mask = 1 << irq; 89 unsigned int mask;
86 unsigned long flags; 90 unsigned long flags;
87 int ret; 91 int ret;
88 92
93 irq -= I8259A_IRQ_BASE;
94 mask = 1 << irq;
89 spin_lock_irqsave(&i8259A_lock, flags); 95 spin_lock_irqsave(&i8259A_lock, flags);
90 if (irq < 8) 96 if (irq < 8)
91 ret = inb(PIC_MASTER_CMD) & mask; 97 ret = inb(PIC_MASTER_CMD) & mask;
@@ -134,9 +140,11 @@ static inline int i8259A_irq_real(unsigned int irq)
134 */ 140 */
135void mask_and_ack_8259A(unsigned int irq) 141void mask_and_ack_8259A(unsigned int irq)
136{ 142{
137 unsigned int irqmask = 1 << irq; 143 unsigned int irqmask;
138 unsigned long flags; 144 unsigned long flags;
139 145
146 irq -= I8259A_IRQ_BASE;
147 irqmask = 1 << irq;
140 spin_lock_irqsave(&i8259A_lock, flags); 148 spin_lock_irqsave(&i8259A_lock, flags);
141 /* 149 /*
142 * Lightweight spurious IRQ detection. We do not want 150 * Lightweight spurious IRQ detection. We do not want
@@ -169,8 +177,8 @@ handle_real_irq:
169 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ 177 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
170 } 178 }
171#ifdef CONFIG_MIPS_MT_SMTC 179#ifdef CONFIG_MIPS_MT_SMTC
172 if (irq_hwmask[irq] & ST0_IM) 180 if (irq_hwmask[irq] & ST0_IM)
173 set_c0_status(irq_hwmask[irq] & ST0_IM); 181 set_c0_status(irq_hwmask[irq] & ST0_IM);
174#endif /* CONFIG_MIPS_MT_SMTC */ 182#endif /* CONFIG_MIPS_MT_SMTC */
175 spin_unlock_irqrestore(&i8259A_lock, flags); 183 spin_unlock_irqrestore(&i8259A_lock, flags);
176 return; 184 return;
@@ -322,8 +330,8 @@ void __init init_i8259_irqs (void)
322 330
323 init_8259A(0); 331 init_8259A(0);
324 332
325 for (i = 0; i < 16; i++) 333 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++)
326 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 334 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
327 335
328 setup_irq(PIC_CASCADE_IR, &irq2); 336 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
329} 337}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 37cad5de515c..3cc25c05d367 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -10,6 +10,8 @@
10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> 10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com> 11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
12 */ 12 */
13#undef DEBUG
14
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
15#include <linux/stat.h> 17#include <linux/stat.h>
@@ -40,8 +42,6 @@
40 42
41#include <linux/elf.h> 43#include <linux/elf.h>
42 44
43#undef DEBUG
44
45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); 45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46static int load_irix_library(struct file *); 46static int load_irix_library(struct file *);
47static int irix_core_dump(long signr, struct pt_regs * regs, 47static int irix_core_dump(long signr, struct pt_regs * regs,
@@ -52,72 +52,102 @@ static struct linux_binfmt irix_format = {
52 irix_core_dump, PAGE_SIZE 52 irix_core_dump, PAGE_SIZE
53}; 53};
54 54
55#ifdef DEBUG
56/* Debugging routines. */ 55/* Debugging routines. */
57static char *get_elf_p_type(Elf32_Word p_type) 56static char *get_elf_p_type(Elf32_Word p_type)
58{ 57{
59 int i = (int) p_type; 58#ifdef DEBUG
60 59 switch (p_type) {
61 switch(i) { 60 case PT_NULL:
62 case PT_NULL: return("PT_NULL"); break; 61 return "PT_NULL";
63 case PT_LOAD: return("PT_LOAD"); break; 62 break;
64 case PT_DYNAMIC: return("PT_DYNAMIC"); break; 63
65 case PT_INTERP: return("PT_INTERP"); break; 64 case PT_LOAD:
66 case PT_NOTE: return("PT_NOTE"); break; 65 return "PT_LOAD";
67 case PT_SHLIB: return("PT_SHLIB"); break; 66 break;
68 case PT_PHDR: return("PT_PHDR"); break; 67
69 case PT_LOPROC: return("PT_LOPROC/REGINFO"); break; 68 case PT_DYNAMIC:
70 case PT_HIPROC: return("PT_HIPROC"); break; 69 return "PT_DYNAMIC";
71 default: return("PT_BOGUS"); break; 70 break;
71
72 case PT_INTERP:
73 return "PT_INTERP";
74 break;
75
76 case PT_NOTE:
77 return "PT_NOTE";
78 break;
79
80 case PT_SHLIB:
81 return "PT_SHLIB";
82 break;
83
84 case PT_PHDR:
85 return "PT_PHDR";
86 break;
87
88 case PT_LOPROC:
89 return "PT_LOPROC/REGINFO";
90 break;
91
92 case PT_HIPROC:
93 return "PT_HIPROC";
94 break;
95
96 default:
97 return "PT_BOGUS";
98 break;
72 } 99 }
100#endif
73} 101}
74 102
75static void print_elfhdr(struct elfhdr *ehp) 103static void print_elfhdr(struct elfhdr *ehp)
76{ 104{
77 int i; 105 int i;
78 106
79 printk("ELFHDR: e_ident<"); 107 pr_debug("ELFHDR: e_ident<");
80 for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]); 108 for (i = 0; i < (EI_NIDENT - 1); i++)
81 printk("%x>\n", ehp->e_ident[i]); 109 pr_debug("%x ", ehp->e_ident[i]);
82 printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n", 110 pr_debug("%x>\n", ehp->e_ident[i]);
83 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine, 111 pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
84 (unsigned long) ehp->e_version); 112 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
85 printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] " 113 (unsigned long) ehp->e_version);
86 "e_flags[%08lx]\n", 114 pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
87 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff, 115 "e_flags[%08lx]\n",
88 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags); 116 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
89 printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n", 117 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
90 (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize, 118 pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
91 (unsigned short) ehp->e_phnum); 119 (unsigned short) ehp->e_ehsize,
92 printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n", 120 (unsigned short) ehp->e_phentsize,
93 (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum, 121 (unsigned short) ehp->e_phnum);
94 (unsigned short) ehp->e_shstrndx); 122 pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
123 (unsigned short) ehp->e_shentsize,
124 (unsigned short) ehp->e_shnum,
125 (unsigned short) ehp->e_shstrndx);
95} 126}
96 127
97static void print_phdr(int i, struct elf_phdr *ep) 128static void print_phdr(int i, struct elf_phdr *ep)
98{ 129{
99 printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] " 130 pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
100 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type), 131 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
101 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr, 132 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
102 (unsigned long) ep->p_paddr); 133 (unsigned long) ep->p_paddr);
103 printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] " 134 pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
104 "p_align[%08lx]\n", (unsigned long) ep->p_filesz, 135 "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
105 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags, 136 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
106 (unsigned long) ep->p_align); 137 (unsigned long) ep->p_align);
107} 138}
108 139
109static void dump_phdrs(struct elf_phdr *ep, int pnum) 140static void dump_phdrs(struct elf_phdr *ep, int pnum)
110{ 141{
111 int i; 142 int i;
112 143
113 for(i = 0; i < pnum; i++, ep++) { 144 for (i = 0; i < pnum; i++, ep++) {
114 if((ep->p_type == PT_LOAD) || 145 if ((ep->p_type == PT_LOAD) ||
115 (ep->p_type == PT_INTERP) || 146 (ep->p_type == PT_INTERP) ||
116 (ep->p_type == PT_PHDR)) 147 (ep->p_type == PT_PHDR))
117 print_phdr(i, ep); 148 print_phdr(i, ep);
118 } 149 }
119} 150}
120#endif /* DEBUG */
121 151
122static void set_brk(unsigned long start, unsigned long end) 152static void set_brk(unsigned long start, unsigned long end)
123{ 153{
@@ -156,11 +186,10 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
156 elf_addr_t *envp; 186 elf_addr_t *envp;
157 elf_addr_t *sp, *csp; 187 elf_addr_t *sp, *csp;
158 188
159#ifdef DEBUG 189 pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
160 printk("create_irix_tables: p[%p] argc[%d] envc[%d] " 190 "load_addr[%08x] interp_load_addr[%08x]\n",
161 "load_addr[%08x] interp_load_addr[%08x]\n", 191 p, argc, envc, load_addr, interp_load_addr);
162 p, argc, envc, load_addr, interp_load_addr); 192
163#endif
164 sp = (elf_addr_t *) (~15UL & (unsigned long) p); 193 sp = (elf_addr_t *) (~15UL & (unsigned long) p);
165 csp = sp; 194 csp = sp;
166 csp -= exec ? DLINFO_ITEMS*2 : 2; 195 csp -= exec ? DLINFO_ITEMS*2 : 2;
@@ -181,7 +210,7 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
181 sp -= 2; 210 sp -= 2;
182 NEW_AUX_ENT(0, AT_NULL, 0); 211 NEW_AUX_ENT(0, AT_NULL, 0);
183 212
184 if(exec) { 213 if (exec) {
185 sp -= 11*2; 214 sp -= 11*2;
186 215
187 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff); 216 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
@@ -245,9 +274,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
245 last_bss = 0; 274 last_bss = 0;
246 error = load_addr = 0; 275 error = load_addr = 0;
247 276
248#ifdef DEBUG
249 print_elfhdr(interp_elf_ex); 277 print_elfhdr(interp_elf_ex);
250#endif
251 278
252 /* First of all, some simple consistency checks */ 279 /* First of all, some simple consistency checks */
253 if ((interp_elf_ex->e_type != ET_EXEC && 280 if ((interp_elf_ex->e_type != ET_EXEC &&
@@ -258,7 +285,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
258 } 285 }
259 286
260 /* Now read in all of the header information */ 287 /* Now read in all of the header information */
261 if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) { 288 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
262 printk("IRIX interp header bigger than a page (%d)\n", 289 printk("IRIX interp header bigger than a page (%d)\n",
263 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum)); 290 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
264 return 0xffffffff; 291 return 0xffffffff;
@@ -267,15 +294,15 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
267 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, 294 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
268 GFP_KERNEL); 295 GFP_KERNEL);
269 296
270 if(!elf_phdata) { 297 if (!elf_phdata) {
271 printk("Cannot kmalloc phdata for IRIX interp.\n"); 298 printk("Cannot kmalloc phdata for IRIX interp.\n");
272 return 0xffffffff; 299 return 0xffffffff;
273 } 300 }
274 301
275 /* If the size of this structure has changed, then punt, since 302 /* If the size of this structure has changed, then punt, since
276 * we will be doing the wrong thing. 303 * we will be doing the wrong thing.
277 */ 304 */
278 if(interp_elf_ex->e_phentsize != 32) { 305 if (interp_elf_ex->e_phentsize != 32) {
279 printk("IRIX interp e_phentsize == %d != 32 ", 306 printk("IRIX interp e_phentsize == %d != 32 ",
280 interp_elf_ex->e_phentsize); 307 interp_elf_ex->e_phentsize);
281 kfree(elf_phdata); 308 kfree(elf_phdata);
@@ -286,61 +313,71 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
286 (char *) elf_phdata, 313 (char *) elf_phdata,
287 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 314 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
288 315
289#ifdef DEBUG
290 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); 316 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
291#endif
292 317
293 eppnt = elf_phdata; 318 eppnt = elf_phdata;
294 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { 319 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
295 if(eppnt->p_type == PT_LOAD) { 320 if (eppnt->p_type == PT_LOAD) {
296 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 321 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
297 int elf_prot = 0; 322 int elf_prot = 0;
298 unsigned long vaddr = 0; 323 unsigned long vaddr = 0;
299 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 324 if (eppnt->p_flags & PF_R)
300 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 325 elf_prot = PROT_READ;
301 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 326 if (eppnt->p_flags & PF_W)
302 elf_type |= MAP_FIXED; 327 elf_prot |= PROT_WRITE;
303 vaddr = eppnt->p_vaddr; 328 if (eppnt->p_flags & PF_X)
304 329 elf_prot |= PROT_EXEC;
305 pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", 330 elf_type |= MAP_FIXED;
306 interpreter, vaddr, 331 vaddr = eppnt->p_vaddr;
307 (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), 332
308 (unsigned long) elf_prot, (unsigned long) elf_type, 333 pr_debug("INTERP do_mmap"
309 (unsigned long) (eppnt->p_offset & 0xfffff000)); 334 "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
310 down_write(&current->mm->mmap_sem); 335 interpreter, vaddr,
311 error = do_mmap(interpreter, vaddr, 336 (unsigned long)
312 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), 337 (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
313 elf_prot, elf_type, 338 (unsigned long)
314 eppnt->p_offset & 0xfffff000); 339 elf_prot, (unsigned long) elf_type,
315 up_write(&current->mm->mmap_sem); 340 (unsigned long)
316 341 (eppnt->p_offset & 0xfffff000));
317 if(error < 0 && error > -1024) { 342
318 printk("Aieee IRIX interp mmap error=%d\n", error); 343 down_write(&current->mm->mmap_sem);
319 break; /* Real error */ 344 error = do_mmap(interpreter, vaddr,
320 } 345 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
321 pr_debug("error=%08lx ", (unsigned long) error); 346 elf_prot, elf_type,
322 if(!load_addr && interp_elf_ex->e_type == ET_DYN) { 347 eppnt->p_offset & 0xfffff000);
323 load_addr = error; 348 up_write(&current->mm->mmap_sem);
324 pr_debug("load_addr = error "); 349
325 } 350 if (error < 0 && error > -1024) {
326 351 printk("Aieee IRIX interp mmap error=%d\n",
327 /* Find the end of the file mapping for this phdr, and keep 352 error);
328 * track of the largest address we see for this. 353 break; /* Real error */
329 */ 354 }
330 k = eppnt->p_vaddr + eppnt->p_filesz; 355 pr_debug("error=%08lx ", (unsigned long) error);
331 if(k > elf_bss) elf_bss = k; 356 if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
332 357 load_addr = error;
333 /* Do the same thing for the memory mapping - between 358 pr_debug("load_addr = error ");
334 * elf_bss and last_bss is the bss section. 359 }
335 */ 360
336 k = eppnt->p_memsz + eppnt->p_vaddr; 361 /*
337 if(k > last_bss) last_bss = k; 362 * Find the end of the file mapping for this phdr, and
338 pr_debug("\n"); 363 * keep track of the largest address we see for this.
339 } 364 */
365 k = eppnt->p_vaddr + eppnt->p_filesz;
366 if (k > elf_bss)
367 elf_bss = k;
368
369 /* Do the same thing for the memory mapping - between
370 * elf_bss and last_bss is the bss section.
371 */
372 k = eppnt->p_memsz + eppnt->p_vaddr;
373 if (k > last_bss)
374 last_bss = k;
375 pr_debug("\n");
376 }
340 } 377 }
341 378
342 /* Now use mmap to map the library into memory. */ 379 /* Now use mmap to map the library into memory. */
343 if(error < 0 && error > -1024) { 380 if (error < 0 && error > -1024) {
344 pr_debug("got error %d\n", error); 381 pr_debug("got error %d\n", error);
345 kfree(elf_phdata); 382 kfree(elf_phdata);
346 return 0xffffffff; 383 return 0xffffffff;
@@ -377,7 +414,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
377 return -ENOEXEC; 414 return -ENOEXEC;
378 415
379 /* First of all, some simple consistency checks */ 416 /* First of all, some simple consistency checks */
380 if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || 417 if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
381 !bprm->file->f_op->mmap) { 418 !bprm->file->f_op->mmap) {
382 return -ENOEXEC; 419 return -ENOEXEC;
383 } 420 }
@@ -388,7 +425,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
388 * XXX all registers as 64bits on cpu's capable of this at 425 * XXX all registers as 64bits on cpu's capable of this at
389 * XXX exception time plus frob the XTLB exception vector. 426 * XXX exception time plus frob the XTLB exception vector.
390 */ 427 */
391 if((ehp->e_flags & EF_MIPS_ABI2)) 428 if ((ehp->e_flags & EF_MIPS_ABI2))
392 return -ENOEXEC; 429 return -ENOEXEC;
393 430
394 return 0; 431 return 0;
@@ -410,7 +447,7 @@ static inline int look_for_irix_interpreter(char **name,
410 struct file *file = NULL; 447 struct file *file = NULL;
411 448
412 *name = NULL; 449 *name = NULL;
413 for(i = 0; i < pnum; i++, epp++) { 450 for (i = 0; i < pnum; i++, epp++) {
414 if (epp->p_type != PT_INTERP) 451 if (epp->p_type != PT_INTERP)
415 continue; 452 continue;
416 453
@@ -467,8 +504,8 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
467 unsigned int tmp; 504 unsigned int tmp;
468 int i, prot; 505 int i, prot;
469 506
470 for(i = 0; i < pnum; i++, epp++) { 507 for (i = 0; i < pnum; i++, epp++) {
471 if(epp->p_type != PT_LOAD) 508 if (epp->p_type != PT_LOAD)
472 continue; 509 continue;
473 510
474 /* Map it. */ 511 /* Map it. */
@@ -483,23 +520,23 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
483 up_write(&current->mm->mmap_sem); 520 up_write(&current->mm->mmap_sem);
484 521
485 /* Fixup location tracking vars. */ 522 /* Fixup location tracking vars. */
486 if((epp->p_vaddr & 0xfffff000) < *estack) 523 if ((epp->p_vaddr & 0xfffff000) < *estack)
487 *estack = (epp->p_vaddr & 0xfffff000); 524 *estack = (epp->p_vaddr & 0xfffff000);
488 if(!*laddr) 525 if (!*laddr)
489 *laddr = epp->p_vaddr - epp->p_offset; 526 *laddr = epp->p_vaddr - epp->p_offset;
490 if(epp->p_vaddr < *scode) 527 if (epp->p_vaddr < *scode)
491 *scode = epp->p_vaddr; 528 *scode = epp->p_vaddr;
492 529
493 tmp = epp->p_vaddr + epp->p_filesz; 530 tmp = epp->p_vaddr + epp->p_filesz;
494 if(tmp > *ebss) 531 if (tmp > *ebss)
495 *ebss = tmp; 532 *ebss = tmp;
496 if((epp->p_flags & PF_X) && *ecode < tmp) 533 if ((epp->p_flags & PF_X) && *ecode < tmp)
497 *ecode = tmp; 534 *ecode = tmp;
498 if(*edata < tmp) 535 if (*edata < tmp)
499 *edata = tmp; 536 *edata = tmp;
500 537
501 tmp = epp->p_vaddr + epp->p_memsz; 538 tmp = epp->p_vaddr + epp->p_memsz;
502 if(tmp > *ebrk) 539 if (tmp > *ebrk)
503 *ebrk = tmp; 540 *ebrk = tmp;
504 } 541 }
505 542
@@ -513,12 +550,12 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
513 int i; 550 int i;
514 551
515 *eentry = 0xffffffff; 552 *eentry = 0xffffffff;
516 for(i = 0; i < pnum; i++, epp++) { 553 for (i = 0; i < pnum; i++, epp++) {
517 if(epp->p_type != PT_INTERP) 554 if (epp->p_type != PT_INTERP)
518 continue; 555 continue;
519 556
520 /* We should have fielded this error elsewhere... */ 557 /* We should have fielded this error elsewhere... */
521 if(*eentry != 0xffffffff) 558 if (*eentry != 0xffffffff)
522 return -1; 559 return -1;
523 560
524 set_fs(old_fs); 561 set_fs(old_fs);
@@ -604,9 +641,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
604 if (elf_ex.e_shnum > 20) 641 if (elf_ex.e_shnum > 20)
605 goto out; 642 goto out;
606 643
607#ifdef DEBUG
608 print_elfhdr(&elf_ex); 644 print_elfhdr(&elf_ex);
609#endif
610 645
611 /* Now read in all of the header information */ 646 /* Now read in all of the header information */
612 size = elf_ex.e_phentsize * elf_ex.e_phnum; 647 size = elf_ex.e_phentsize * elf_ex.e_phnum;
@@ -622,13 +657,11 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
622 if (retval < 0) 657 if (retval < 0)
623 goto out_free_ph; 658 goto out_free_ph;
624 659
625#ifdef DEBUG
626 dump_phdrs(elf_phdata, elf_ex.e_phnum); 660 dump_phdrs(elf_phdata, elf_ex.e_phnum);
627#endif
628 661
629 /* Set some things for later. */ 662 /* Set some things for later. */
630 for(i = 0; i < elf_ex.e_phnum; i++) { 663 for (i = 0; i < elf_ex.e_phnum; i++) {
631 switch(elf_phdata[i].p_type) { 664 switch (elf_phdata[i].p_type) {
632 case PT_INTERP: 665 case PT_INTERP:
633 has_interp = 1; 666 has_interp = 1;
634 elf_ihdr = &elf_phdata[i]; 667 elf_ihdr = &elf_phdata[i];
@@ -667,7 +700,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
667 700
668 if (elf_interpreter) { 701 if (elf_interpreter) {
669 retval = verify_irix_interpreter(&interp_elf_ex); 702 retval = verify_irix_interpreter(&interp_elf_ex);
670 if(retval) 703 if (retval)
671 goto out_free_interp; 704 goto out_free_interp;
672 } 705 }
673 706
@@ -706,12 +739,12 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
706 &load_addr, &start_code, &elf_bss, &end_code, 739 &load_addr, &start_code, &elf_bss, &end_code,
707 &end_data, &elf_brk); 740 &end_data, &elf_brk);
708 741
709 if(elf_interpreter) { 742 if (elf_interpreter) {
710 retval = map_interpreter(elf_phdata, &interp_elf_ex, 743 retval = map_interpreter(elf_phdata, &interp_elf_ex,
711 interpreter, &interp_load_addr, 744 interpreter, &interp_load_addr,
712 elf_ex.e_phnum, old_fs, &elf_entry); 745 elf_ex.e_phnum, old_fs, &elf_entry);
713 kfree(elf_interpreter); 746 kfree(elf_interpreter);
714 if(retval) { 747 if (retval) {
715 set_fs(old_fs); 748 set_fs(old_fs);
716 printk("Unable to load IRIX ELF interpreter\n"); 749 printk("Unable to load IRIX ELF interpreter\n");
717 send_sig(SIGSEGV, current, 0); 750 send_sig(SIGSEGV, current, 0);
@@ -809,12 +842,12 @@ static int load_irix_library(struct file *file)
809 return -ENOEXEC; 842 return -ENOEXEC;
810 843
811 /* First of all, some simple consistency checks. */ 844 /* First of all, some simple consistency checks. */
812 if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 845 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
813 !file->f_op->mmap) 846 !file->f_op->mmap)
814 return -ENOEXEC; 847 return -ENOEXEC;
815 848
816 /* Now read in all of the header information. */ 849 /* Now read in all of the header information. */
817 if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) 850 if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
818 return -ENOEXEC; 851 return -ENOEXEC;
819 852
820 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); 853 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
@@ -825,15 +858,15 @@ static int load_irix_library(struct file *file)
825 sizeof(struct elf_phdr) * elf_ex.e_phnum); 858 sizeof(struct elf_phdr) * elf_ex.e_phnum);
826 859
827 j = 0; 860 j = 0;
828 for(i=0; i<elf_ex.e_phnum; i++) 861 for (i=0; i<elf_ex.e_phnum; i++)
829 if((elf_phdata + i)->p_type == PT_LOAD) j++; 862 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
830 863
831 if(j != 1) { 864 if (j != 1) {
832 kfree(elf_phdata); 865 kfree(elf_phdata);
833 return -ENOEXEC; 866 return -ENOEXEC;
834 } 867 }
835 868
836 while(elf_phdata->p_type != PT_LOAD) elf_phdata++; 869 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
837 870
838 /* Now use mmap to map the library into memory. */ 871 /* Now use mmap to map the library into memory. */
839 down_write(&current->mm->mmap_sem); 872 down_write(&current->mm->mmap_sem);
@@ -889,9 +922,7 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
889 return -EFAULT; 922 return -EFAULT;
890 } 923 }
891 924
892#ifdef DEBUG
893 dump_phdrs(user_phdrp, cnt); 925 dump_phdrs(user_phdrp, cnt);
894#endif
895 926
896 for (i = 0; i < cnt; i++, hp++) { 927 for (i = 0; i < cnt; i++, hp++) {
897 if (__get_user(type, &hp->p_type)) 928 if (__get_user(type, &hp->p_type))
@@ -905,14 +936,14 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
905 filp = fget(fd); 936 filp = fget(fd);
906 if (!filp) 937 if (!filp)
907 return -EACCES; 938 return -EACCES;
908 if(!filp->f_op) { 939 if (!filp->f_op) {
909 printk("irix_mapelf: Bogon filp!\n"); 940 printk("irix_mapelf: Bogon filp!\n");
910 fput(filp); 941 fput(filp);
911 return -EACCES; 942 return -EACCES;
912 } 943 }
913 944
914 hp = user_phdrp; 945 hp = user_phdrp;
915 for(i = 0; i < cnt; i++, hp++) { 946 for (i = 0; i < cnt; i++, hp++) {
916 int prot; 947 int prot;
917 948
918 retval = __get_user(vaddr, &hp->p_vaddr); 949 retval = __get_user(vaddr, &hp->p_vaddr);
@@ -1015,8 +1046,6 @@ static int notesize(struct memelfnote *en)
1015 return sz; 1046 return sz;
1016} 1047}
1017 1048
1018/* #define DEBUG */
1019
1020#define DUMP_WRITE(addr, nr) \ 1049#define DUMP_WRITE(addr, nr) \
1021 if (!dump_write(file, (addr), (nr))) \ 1050 if (!dump_write(file, (addr), (nr))) \
1022 goto end_coredump; 1051 goto end_coredump;
@@ -1093,9 +1122,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1093 1122
1094 segs++; 1123 segs++;
1095 } 1124 }
1096#ifdef DEBUG 1125 pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1097 printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1098#endif
1099 1126
1100 /* Set up header. */ 1127 /* Set up header. */
1101 memcpy(elf.e_ident, ELFMAG, SELFMAG); 1128 memcpy(elf.e_ident, ELFMAG, SELFMAG);
@@ -1221,7 +1248,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1221 struct elf_phdr phdr; 1248 struct elf_phdr phdr;
1222 int sz = 0; 1249 int sz = 0;
1223 1250
1224 for(i = 0; i < numnote; i++) 1251 for (i = 0; i < numnote; i++)
1225 sz += notesize(&notes[i]); 1252 sz += notesize(&notes[i]);
1226 1253
1227 phdr.p_type = PT_NOTE; 1254 phdr.p_type = PT_NOTE;
@@ -1241,7 +1268,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1241 dataoff = offset = roundup(offset, PAGE_SIZE); 1268 dataoff = offset = roundup(offset, PAGE_SIZE);
1242 1269
1243 /* Write program headers for segments dump. */ 1270 /* Write program headers for segments dump. */
1244 for(vma = current->mm->mmap, i = 0; 1271 for (vma = current->mm->mmap, i = 0;
1245 i < segs && vma != NULL; vma = vma->vm_next) { 1272 i < segs && vma != NULL; vma = vma->vm_next) {
1246 struct elf_phdr phdr; 1273 struct elf_phdr phdr;
1247 size_t sz; 1274 size_t sz;
@@ -1267,7 +1294,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1267 DUMP_WRITE(&phdr, sizeof(phdr)); 1294 DUMP_WRITE(&phdr, sizeof(phdr));
1268 } 1295 }
1269 1296
1270 for(i = 0; i < numnote; i++) 1297 for (i = 0; i < numnote; i++)
1271 if (!writenote(&notes[i], file)) 1298 if (!writenote(&notes[i], file))
1272 goto end_coredump; 1299 goto end_coredump;
1273 1300
@@ -1275,7 +1302,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1275 1302
1276 DUMP_SEEK(dataoff); 1303 DUMP_SEEK(dataoff);
1277 1304
1278 for(i = 0, vma = current->mm->mmap; 1305 for (i = 0, vma = current->mm->mmap;
1279 i < segs && vma != NULL; 1306 i < segs && vma != NULL;
1280 vma = vma->vm_next) { 1307 vma = vma->vm_next) {
1281 unsigned long addr = vma->vm_start; 1308 unsigned long addr = vma->vm_start;
@@ -1284,9 +1311,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1284 if (!maydump(vma)) 1311 if (!maydump(vma))
1285 continue; 1312 continue;
1286 i++; 1313 i++;
1287#ifdef DEBUG 1314 pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
1288 printk("elf_core_dump: writing %08lx %lx\n", addr, len);
1289#endif
1290 DUMP_WRITE((void __user *)addr, len); 1315 DUMP_WRITE((void __user *)addr, len);
1291 } 1316 }
1292 1317
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index bcaad6696082..2967537221e2 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -112,7 +112,7 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
112} 112}
113 113
114struct irq_chip msc_levelirq_type = { 114struct irq_chip msc_levelirq_type = {
115 .typename = "SOC-it-Level", 115 .name = "SOC-it-Level",
116 .ack = level_mask_and_ack_msc_irq, 116 .ack = level_mask_and_ack_msc_irq,
117 .mask = mask_msc_irq, 117 .mask = mask_msc_irq,
118 .mask_ack = level_mask_and_ack_msc_irq, 118 .mask_ack = level_mask_and_ack_msc_irq,
@@ -122,7 +122,7 @@ struct irq_chip msc_levelirq_type = {
122}; 122};
123 123
124struct irq_chip msc_edgeirq_type = { 124struct irq_chip msc_edgeirq_type = {
125 .typename = "SOC-it-Edge", 125 .name = "SOC-it-Edge",
126 .ack = edge_mask_and_ack_msc_irq, 126 .ack = edge_mask_and_ack_msc_irq,
127 .mask = mask_msc_irq, 127 .mask = mask_msc_irq,
128 .mask_ack = edge_mask_and_ack_msc_irq, 128 .mask_ack = edge_mask_and_ack_msc_irq,
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index efbd219845b5..3dd561832e4c 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -23,13 +23,13 @@ static unsigned int irq_base;
23 23
24static inline int ls1bit32(unsigned int x) 24static inline int ls1bit32(unsigned int x)
25{ 25{
26 int b = 31, s; 26 int b = 31, s;
27 27
28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s; 28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s; 29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s; 30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s; 31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
32 s = 1; if (x << 1 == 0) s = 0; b -= s; 32 s = 1; if (x << 1 == 0) s = 0; b -= s;
33 33
34 return b; 34 return b;
35} 35}
@@ -92,7 +92,7 @@ void ll_mv64340_irq(void)
92} 92}
93 93
94struct irq_chip mv64340_irq_type = { 94struct irq_chip mv64340_irq_type = {
95 .typename = "MV-64340", 95 .name = "MV-64340",
96 .ack = mask_mv64340_irq, 96 .ack = mask_mv64340_irq,
97 .mask = mask_mv64340_irq, 97 .mask = mask_mv64340_irq,
98 .mask_ack = mask_mv64340_irq, 98 .mask_ack = mask_mv64340_irq,
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 123324ba8c14..250732883488 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -17,28 +17,27 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20static int irq_base;
21
22static inline void unmask_rm7k_irq(unsigned int irq) 20static inline void unmask_rm7k_irq(unsigned int irq)
23{ 21{
24 set_c0_intcontrol(0x100 << (irq - irq_base)); 22 set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
25} 23}
26 24
27static inline void mask_rm7k_irq(unsigned int irq) 25static inline void mask_rm7k_irq(unsigned int irq)
28{ 26{
29 clear_c0_intcontrol(0x100 << (irq - irq_base)); 27 clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
30} 28}
31 29
32static struct irq_chip rm7k_irq_controller = { 30static struct irq_chip rm7k_irq_controller = {
33 .typename = "RM7000", 31 .name = "RM7000",
34 .ack = mask_rm7k_irq, 32 .ack = mask_rm7k_irq,
35 .mask = mask_rm7k_irq, 33 .mask = mask_rm7k_irq,
36 .mask_ack = mask_rm7k_irq, 34 .mask_ack = mask_rm7k_irq,
37 .unmask = unmask_rm7k_irq, 35 .unmask = unmask_rm7k_irq,
38}; 36};
39 37
40void __init rm7k_cpu_irq_init(int base) 38void __init rm7k_cpu_irq_init(void)
41{ 39{
40 int base = RM7K_CPU_IRQ_BASE;
42 int i; 41 int i;
43 42
44 clear_c0_intcontrol(0x00000f00); /* Mask all */ 43 clear_c0_intcontrol(0x00000f00); /* Mask all */
@@ -46,6 +45,4 @@ void __init rm7k_cpu_irq_init(int base)
46 for (i = base; i < base + 4; i++) 45 for (i = base; i < base + 4; i++)
47 set_irq_chip_and_handler(i, &rm7k_irq_controller, 46 set_irq_chip_and_handler(i, &rm7k_irq_controller,
48 handle_level_irq); 47 handle_level_irq);
49
50 irq_base = base;
51} 48}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 0e6f4c5349d2..ae83d2df6f31 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -18,16 +18,14 @@
18#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21static int irq_base;
22
23static inline void unmask_rm9k_irq(unsigned int irq) 21static inline void unmask_rm9k_irq(unsigned int irq)
24{ 22{
25 set_c0_intcontrol(0x1000 << (irq - irq_base)); 23 set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
26} 24}
27 25
28static inline void mask_rm9k_irq(unsigned int irq) 26static inline void mask_rm9k_irq(unsigned int irq)
29{ 27{
30 clear_c0_intcontrol(0x1000 << (irq - irq_base)); 28 clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
31} 29}
32 30
33static inline void rm9k_cpu_irq_enable(unsigned int irq) 31static inline void rm9k_cpu_irq_enable(unsigned int irq)
@@ -39,15 +37,6 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
39 local_irq_restore(flags); 37 local_irq_restore(flags);
40} 38}
41 39
42static void rm9k_cpu_irq_disable(unsigned int irq)
43{
44 unsigned long flags;
45
46 local_irq_save(flags);
47 mask_rm9k_irq(irq);
48 local_irq_restore(flags);
49}
50
51/* 40/*
52 * Performance counter interrupts are global on all processors. 41 * Performance counter interrupts are global on all processors.
53 */ 42 */
@@ -81,7 +70,7 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
81} 70}
82 71
83static struct irq_chip rm9k_irq_controller = { 72static struct irq_chip rm9k_irq_controller = {
84 .typename = "RM9000", 73 .name = "RM9000",
85 .ack = mask_rm9k_irq, 74 .ack = mask_rm9k_irq,
86 .mask = mask_rm9k_irq, 75 .mask = mask_rm9k_irq,
87 .mask_ack = mask_rm9k_irq, 76 .mask_ack = mask_rm9k_irq,
@@ -89,7 +78,7 @@ static struct irq_chip rm9k_irq_controller = {
89}; 78};
90 79
91static struct irq_chip rm9k_perfcounter_irq = { 80static struct irq_chip rm9k_perfcounter_irq = {
92 .typename = "RM9000", 81 .name = "RM9000",
93 .startup = rm9k_perfcounter_irq_startup, 82 .startup = rm9k_perfcounter_irq_startup,
94 .shutdown = rm9k_perfcounter_irq_shutdown, 83 .shutdown = rm9k_perfcounter_irq_shutdown,
95 .ack = mask_rm9k_irq, 84 .ack = mask_rm9k_irq,
@@ -102,8 +91,9 @@ unsigned int rm9000_perfcount_irq;
102 91
103EXPORT_SYMBOL(rm9000_perfcount_irq); 92EXPORT_SYMBOL(rm9000_perfcount_irq);
104 93
105void __init rm9k_cpu_irq_init(int base) 94void __init rm9k_cpu_irq_init(void)
106{ 95{
96 int base = RM9K_CPU_IRQ_BASE;
107 int i; 97 int i;
108 98
109 clear_c0_intcontrol(0x0000f000); /* Mask all */ 99 clear_c0_intcontrol(0x0000f000); /* Mask all */
@@ -115,6 +105,4 @@ void __init rm9k_cpu_irq_init(int base)
115 rm9000_perfcount_irq = base + 1; 105 rm9000_perfcount_irq = base + 1;
116 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 106 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
117 handle_level_irq); 107 handle_level_irq);
118
119 irq_base = base;
120} 108}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index fcc86b96ccf6..7b66e03b5899 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -25,7 +25,7 @@
25 * Don't even think about using this on SMP. You have been warned. 25 * Don't even think about using this on SMP. You have been warned.
26 * 26 *
27 * This file exports one global function: 27 * This file exports one global function:
28 * void mips_cpu_irq_init(int irq_base); 28 * void mips_cpu_irq_init(void);
29 */ 29 */
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
@@ -36,22 +36,20 @@
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/system.h> 37#include <asm/system.h>
38 38
39static int mips_cpu_irq_base;
40
41static inline void unmask_mips_irq(unsigned int irq) 39static inline void unmask_mips_irq(unsigned int irq)
42{ 40{
43 set_c0_status(0x100 << (irq - mips_cpu_irq_base)); 41 set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
44 irq_enable_hazard(); 42 irq_enable_hazard();
45} 43}
46 44
47static inline void mask_mips_irq(unsigned int irq) 45static inline void mask_mips_irq(unsigned int irq)
48{ 46{
49 clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); 47 clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
50 irq_disable_hazard(); 48 irq_disable_hazard();
51} 49}
52 50
53static struct irq_chip mips_cpu_irq_controller = { 51static struct irq_chip mips_cpu_irq_controller = {
54 .typename = "MIPS", 52 .name = "MIPS",
55 .ack = mask_mips_irq, 53 .ack = mask_mips_irq,
56 .mask = mask_mips_irq, 54 .mask = mask_mips_irq,
57 .mask_ack = mask_mips_irq, 55 .mask_ack = mask_mips_irq,
@@ -70,7 +68,7 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
70{ 68{
71 unsigned int vpflags = dvpe(); 69 unsigned int vpflags = dvpe();
72 70
73 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 71 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
74 evpe(vpflags); 72 evpe(vpflags);
75 unmask_mips_mt_irq(irq); 73 unmask_mips_mt_irq(irq);
76 74
@@ -84,13 +82,13 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
84static void mips_mt_cpu_irq_ack(unsigned int irq) 82static void mips_mt_cpu_irq_ack(unsigned int irq)
85{ 83{
86 unsigned int vpflags = dvpe(); 84 unsigned int vpflags = dvpe();
87 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 85 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
88 evpe(vpflags); 86 evpe(vpflags);
89 mask_mips_mt_irq(irq); 87 mask_mips_mt_irq(irq);
90} 88}
91 89
92static struct irq_chip mips_mt_cpu_irq_controller = { 90static struct irq_chip mips_mt_cpu_irq_controller = {
93 .typename = "MIPS", 91 .name = "MIPS",
94 .startup = mips_mt_cpu_irq_startup, 92 .startup = mips_mt_cpu_irq_startup,
95 .ack = mips_mt_cpu_irq_ack, 93 .ack = mips_mt_cpu_irq_ack,
96 .mask = mask_mips_mt_irq, 94 .mask = mask_mips_mt_irq,
@@ -99,8 +97,9 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
99 .eoi = unmask_mips_mt_irq, 97 .eoi = unmask_mips_mt_irq,
100}; 98};
101 99
102void __init mips_cpu_irq_init(int irq_base) 100void __init mips_cpu_irq_init(void)
103{ 101{
102 int irq_base = MIPS_CPU_IRQ_BASE;
104 int i; 103 int i;
105 104
106 /* Mask interrupts. */ 105 /* Mask interrupts. */
@@ -118,6 +117,4 @@ void __init mips_cpu_irq_init(int irq_base)
118 for (i = irq_base + 2; i < irq_base + 8; i++) 117 for (i = irq_base + 2; i < irq_base + 8; i++)
119 set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 118 set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
120 handle_level_irq); 119 handle_level_irq);
121
122 mips_cpu_irq_base = irq_base;
123} 120}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index de3fae260ff8..0b8ce59429a8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -194,15 +194,15 @@ sysn32_waitid(int which, compat_pid_t pid,
194} 194}
195 195
196struct sysinfo32 { 196struct sysinfo32 {
197 s32 uptime; 197 s32 uptime;
198 u32 loads[3]; 198 u32 loads[3];
199 u32 totalram; 199 u32 totalram;
200 u32 freeram; 200 u32 freeram;
201 u32 sharedram; 201 u32 sharedram;
202 u32 bufferram; 202 u32 bufferram;
203 u32 totalswap; 203 u32 totalswap;
204 u32 freeswap; 204 u32 freeswap;
205 u16 procs; 205 u16 procs;
206 u32 totalhigh; 206 u32 totalhigh;
207 u32 freehigh; 207 u32 freehigh;
208 u32 mem_unit; 208 u32 mem_unit;
@@ -558,7 +558,7 @@ extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) 558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
559{ 559{
560 int err; 560 int err;
561 struct ustat tmp; 561 struct ustat tmp;
562 struct ustat32 tmp32; 562 struct ustat32 tmp32;
563 mm_segment_t old_fs = get_fs(); 563 mm_segment_t old_fs = get_fs();
564 564
@@ -569,11 +569,11 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
569 if (err) 569 if (err)
570 goto out; 570 goto out;
571 571
572 memset(&tmp32,0,sizeof(struct ustat32)); 572 memset(&tmp32,0,sizeof(struct ustat32));
573 tmp32.f_tfree = tmp.f_tfree; 573 tmp32.f_tfree = tmp.f_tfree;
574 tmp32.f_tinode = tmp.f_tinode; 574 tmp32.f_tinode = tmp.f_tinode;
575 575
576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0; 576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
577 577
578out: 578out:
579 return err; 579 return err;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index c1373a6e668b..a32f6797353a 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -96,6 +96,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
96 goto out_unlock; 96 goto out_unlock;
97 } 97 }
98 98
99 retval = security_task_setscheduler(p, 0, NULL);
100 if (retval)
101 goto out_unlock;
102
99 /* Record new user-specified CPU set for future reference */ 103 /* Record new user-specified CPU set for future reference */
100 p->thread.user_cpus_allowed = new_mask; 104 p->thread.user_cpus_allowed = new_mask;
101 105
@@ -141,8 +145,9 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
141 p = find_process_by_pid(pid); 145 p = find_process_by_pid(pid);
142 if (!p) 146 if (!p)
143 goto out_unlock; 147 goto out_unlock;
144 148 retval = security_task_getscheduler(p);
145 retval = 0; 149 if (retval)
150 goto out_unlock;
146 151
147 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 152 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
148 153
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4ed37ba19731..5ddc2e9deecf 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -31,13 +31,13 @@ static const char *cpu_name[] = {
31 [CPU_R4000PC] = "R4000PC", 31 [CPU_R4000PC] = "R4000PC",
32 [CPU_R4000SC] = "R4000SC", 32 [CPU_R4000SC] = "R4000SC",
33 [CPU_R4000MC] = "R4000MC", 33 [CPU_R4000MC] = "R4000MC",
34 [CPU_R4200] = "R4200", 34 [CPU_R4200] = "R4200",
35 [CPU_R4400PC] = "R4400PC", 35 [CPU_R4400PC] = "R4400PC",
36 [CPU_R4400SC] = "R4400SC", 36 [CPU_R4400SC] = "R4400SC",
37 [CPU_R4400MC] = "R4400MC", 37 [CPU_R4400MC] = "R4400MC",
38 [CPU_R4600] = "R4600", 38 [CPU_R4600] = "R4600",
39 [CPU_R6000] = "R6000", 39 [CPU_R6000] = "R6000",
40 [CPU_R6000A] = "R6000A", 40 [CPU_R6000A] = "R6000A",
41 [CPU_R8000] = "R8000", 41 [CPU_R8000] = "R8000",
42 [CPU_R10000] = "R10000", 42 [CPU_R10000] = "R10000",
43 [CPU_R12000] = "R12000", 43 [CPU_R12000] = "R12000",
@@ -46,14 +46,14 @@ static const char *cpu_name[] = {
46 [CPU_R4650] = "R4650", 46 [CPU_R4650] = "R4650",
47 [CPU_R4700] = "R4700", 47 [CPU_R4700] = "R4700",
48 [CPU_R5000] = "R5000", 48 [CPU_R5000] = "R5000",
49 [CPU_R5000A] = "R5000A", 49 [CPU_R5000A] = "R5000A",
50 [CPU_R4640] = "R4640", 50 [CPU_R4640] = "R4640",
51 [CPU_NEVADA] = "Nevada", 51 [CPU_NEVADA] = "Nevada",
52 [CPU_RM7000] = "RM7000", 52 [CPU_RM7000] = "RM7000",
53 [CPU_RM9000] = "RM9000", 53 [CPU_RM9000] = "RM9000",
54 [CPU_R5432] = "R5432", 54 [CPU_R5432] = "R5432",
55 [CPU_4KC] = "MIPS 4Kc", 55 [CPU_4KC] = "MIPS 4Kc",
56 [CPU_5KC] = "MIPS 5Kc", 56 [CPU_5KC] = "MIPS 5Kc",
57 [CPU_R4310] = "R4310", 57 [CPU_R4310] = "R4310",
58 [CPU_SB1] = "SiByte SB1", 58 [CPU_SB1] = "SiByte SB1",
59 [CPU_SB1A] = "SiByte SB1A", 59 [CPU_SB1A] = "SiByte SB1A",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ec8209f3a0c6..04e5b38d327d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,10 +41,6 @@
41#include <asm/isadep.h> 41#include <asm/isadep.h>
42#include <asm/inst.h> 42#include <asm/inst.h>
43#include <asm/stacktrace.h> 43#include <asm/stacktrace.h>
44#ifdef CONFIG_MIPS_MT_SMTC
45#include <asm/mipsmtregs.h>
46extern void smtc_idle_loop_hook(void);
47#endif /* CONFIG_MIPS_MT_SMTC */
48 44
49/* 45/*
50 * The idle thread. There's no useful work to be done, so just try to conserve 46 * The idle thread. There's no useful work to be done, so just try to conserve
@@ -57,6 +53,8 @@ ATTRIB_NORET void cpu_idle(void)
57 while (1) { 53 while (1) {
58 while (!need_resched()) { 54 while (!need_resched()) {
59#ifdef CONFIG_MIPS_MT_SMTC 55#ifdef CONFIG_MIPS_MT_SMTC
56 extern void smtc_idle_loop_hook(void);
57
60 smtc_idle_loop_hook(); 58 smtc_idle_loop_hook();
61#endif /* CONFIG_MIPS_MT_SMTC */ 59#endif /* CONFIG_MIPS_MT_SMTC */
62 if (cpu_wait) 60 if (cpu_wait)
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 880fa6e841ee..59c1577ecbb3 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -114,6 +114,14 @@ LEAF(_save_fp_context32)
114 */ 114 */
115LEAF(_restore_fp_context) 115LEAF(_restore_fp_context)
116 EX lw t0, SC_FPC_CSR(a0) 116 EX lw t0, SC_FPC_CSR(a0)
117
118 /* Fail if the CSR has exceptions pending */
119 srl t1, t0, 5
120 and t1, t0
121 andi t1, 0x1f << 7
122 bnez t1, fault
123 nop
124
117#ifdef CONFIG_64BIT 125#ifdef CONFIG_64BIT
118 EX ldc1 $f1, SC_FPREGS+8(a0) 126 EX ldc1 $f1, SC_FPREGS+8(a0)
119 EX ldc1 $f3, SC_FPREGS+24(a0) 127 EX ldc1 $f3, SC_FPREGS+24(a0)
@@ -157,6 +165,14 @@ LEAF(_restore_fp_context)
157LEAF(_restore_fp_context32) 165LEAF(_restore_fp_context32)
158 /* Restore an o32 sigcontext. */ 166 /* Restore an o32 sigcontext. */
159 EX lw t0, SC32_FPC_CSR(a0) 167 EX lw t0, SC32_FPC_CSR(a0)
168
169 /* Fail if the CSR has exceptions pending */
170 srl t1, t0, 5
171 and t1, t0
172 andi t1, 0x1f << 7
173 bnez t1, fault
174 nop
175
160 EX ldc1 $f0, SC32_FPREGS+0(a0) 176 EX ldc1 $f0, SC32_FPREGS+0(a0)
161 EX ldc1 $f2, SC32_FPREGS+16(a0) 177 EX ldc1 $f2, SC32_FPREGS+16(a0)
162 EX ldc1 $f4, SC32_FPREGS+32(a0) 178 EX ldc1 $f4, SC32_FPREGS+32(a0)
@@ -177,9 +193,10 @@ LEAF(_restore_fp_context32)
177 jr ra 193 jr ra
178 li v0, 0 # success 194 li v0, 0 # success
179 END(_restore_fp_context32) 195 END(_restore_fp_context32)
180 .set reorder
181#endif 196#endif
182 197
198 .set reorder
199
183 .type fault@function 200 .type fault@function
184 .ent fault 201 .ent fault
185fault: li v0, -EFAULT # failure 202fault: li v0, -EFAULT # failure
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 5a99e3e0c96d..8610f4a925e9 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -63,7 +63,7 @@ extern void *vpe_get_shared(int index);
63 63
64static void rtlx_dispatch(void) 64static void rtlx_dispatch(void)
65{ 65{
66 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ); 66 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
67} 67}
68 68
69 69
@@ -491,7 +491,7 @@ static struct irqaction rtlx_irq = {
491 .name = "RTLX", 491 .name = "RTLX",
492}; 492};
493 493
494static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; 494static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
495 495
496static char register_chrdev_failed[] __initdata = 496static char register_chrdev_failed[] __initdata =
497 KERN_ERR "rtlx_module_init: unable to register device\n"; 497 KERN_ERR "rtlx_module_init: unable to register device\n";
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a7bff2a54723..39add2341aa2 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -384,7 +384,7 @@ EXPORT(sysn32_call_table)
384 PTR sys_readlinkat 384 PTR sys_readlinkat
385 PTR sys_fchmodat 385 PTR sys_fchmodat
386 PTR sys_faccessat 386 PTR sys_faccessat
387 PTR sys_pselect6 387 PTR compat_sys_pselect6
388 PTR sys_ppoll /* 6265 */ 388 PTR sys_ppoll /* 6265 */
389 PTR sys_unshare 389 PTR sys_unshare
390 PTR sys_splice 390 PTR sys_splice
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e91379c1be1d..c58b8e0105ea 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -506,7 +506,7 @@ sys_call_table:
506 PTR sys_readlinkat 506 PTR sys_readlinkat
507 PTR sys_fchmodat 507 PTR sys_fchmodat
508 PTR sys_faccessat /* 4300 */ 508 PTR sys_faccessat /* 4300 */
509 PTR sys_pselect6 509 PTR compat_sys_pselect6
510 PTR sys_ppoll 510 PTR sys_ppoll
511 PTR sys_unshare 511 PTR sys_unshare
512 PTR sys_splice 512 PTR sys_splice
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 89440a0d8528..d2e01e7167b8 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -271,8 +271,7 @@ static void __init bootmem_init(void)
271static void __init bootmem_init(void) 271static void __init bootmem_init(void)
272{ 272{
273 unsigned long reserved_end; 273 unsigned long reserved_end;
274 unsigned long highest = 0; 274 unsigned long mapstart = ~0UL;
275 unsigned long mapstart = -1UL;
276 unsigned long bootmap_size; 275 unsigned long bootmap_size;
277 int i; 276 int i;
278 277
@@ -284,6 +283,13 @@ static void __init bootmem_init(void)
284 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end))); 283 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
285 284
286 /* 285 /*
286 * max_low_pfn is not a number of pages. The number of pages
287 * of the system is given by 'max_low_pfn - min_low_pfn'.
288 */
289 min_low_pfn = ~0UL;
290 max_low_pfn = 0;
291
292 /*
287 * Find the highest page frame number we have available. 293 * Find the highest page frame number we have available.
288 */ 294 */
289 for (i = 0; i < boot_mem_map.nr_map; i++) { 295 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -296,8 +302,10 @@ static void __init bootmem_init(void)
296 end = PFN_DOWN(boot_mem_map.map[i].addr 302 end = PFN_DOWN(boot_mem_map.map[i].addr
297 + boot_mem_map.map[i].size); 303 + boot_mem_map.map[i].size);
298 304
299 if (end > highest) 305 if (end > max_low_pfn)
300 highest = end; 306 max_low_pfn = end;
307 if (start < min_low_pfn)
308 min_low_pfn = start;
301 if (end <= reserved_end) 309 if (end <= reserved_end)
302 continue; 310 continue;
303 if (start >= mapstart) 311 if (start >= mapstart)
@@ -305,22 +313,36 @@ static void __init bootmem_init(void)
305 mapstart = max(reserved_end, start); 313 mapstart = max(reserved_end, start);
306 } 314 }
307 315
316 if (min_low_pfn >= max_low_pfn)
317 panic("Incorrect memory mapping !!!");
318 if (min_low_pfn > ARCH_PFN_OFFSET) {
319 printk(KERN_INFO
320 "Wasting %lu bytes for tracking %lu unused pages\n",
321 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
322 min_low_pfn - ARCH_PFN_OFFSET);
323 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
324 printk(KERN_INFO
325 "%lu free pages won't be used\n",
326 ARCH_PFN_OFFSET - min_low_pfn);
327 }
328 min_low_pfn = ARCH_PFN_OFFSET;
329
308 /* 330 /*
309 * Determine low and high memory ranges 331 * Determine low and high memory ranges
310 */ 332 */
311 if (highest > PFN_DOWN(HIGHMEM_START)) { 333 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
312#ifdef CONFIG_HIGHMEM 334#ifdef CONFIG_HIGHMEM
313 highstart_pfn = PFN_DOWN(HIGHMEM_START); 335 highstart_pfn = PFN_DOWN(HIGHMEM_START);
314 highend_pfn = highest; 336 highend_pfn = max_low_pfn;
315#endif 337#endif
316 highest = PFN_DOWN(HIGHMEM_START); 338 max_low_pfn = PFN_DOWN(HIGHMEM_START);
317 } 339 }
318 340
319 /* 341 /*
320 * Initialize the boot-time allocator with low memory only. 342 * Initialize the boot-time allocator with low memory only.
321 */ 343 */
322 bootmap_size = init_bootmem(mapstart, highest); 344 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
323 345 min_low_pfn, max_low_pfn);
324 /* 346 /*
325 * Register fully available low RAM pages with the bootmem allocator. 347 * Register fully available low RAM pages with the bootmem allocator.
326 */ 348 */
@@ -507,9 +529,9 @@ void __init setup_arch(char **cmdline_p)
507 529
508#if defined(CONFIG_VT) 530#if defined(CONFIG_VT)
509#if defined(CONFIG_VGA_CONSOLE) 531#if defined(CONFIG_VGA_CONSOLE)
510 conswitchp = &vga_con; 532 conswitchp = &vga_con;
511#elif defined(CONFIG_DUMMY_CONSOLE) 533#elif defined(CONFIG_DUMMY_CONSOLE)
512 conswitchp = &dummy_con; 534 conswitchp = &dummy_con;
513#endif 535#endif
514#endif 536#endif
515 537
@@ -541,3 +563,6 @@ int __init dsp_disable(char *s)
541} 563}
542 564
543__setup("nodsp", dsp_disable); 565__setup("nodsp", dsp_disable);
566
567unsigned long kernelsp[NR_CPUS];
568unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b9d358e05214..9a44053cd9f1 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -89,7 +89,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
89 spin_lock_irq(&current->sighand->siglock); 89 spin_lock_irq(&current->sighand->siglock);
90 current->saved_sigmask = current->blocked; 90 current->saved_sigmask = current->blocked;
91 current->blocked = newset; 91 current->blocked = newset;
92 recalc_sigpending(); 92 recalc_sigpending();
93 spin_unlock_irq(&current->sighand->siglock); 93 spin_unlock_irq(&current->sighand->siglock);
94 94
95 current->state = TASK_INTERRUPTIBLE; 95 current->state = TASK_INTERRUPTIBLE;
@@ -124,7 +124,7 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act,
124 124
125 if (!ret && oact) { 125 if (!ret && oact) {
126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
127 return -EFAULT; 127 return -EFAULT;
128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
@@ -304,7 +304,7 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
304 current->comm, current->pid, 304 current->comm, current->pid,
305 frame, regs->cp0_epc, frame->regs[31]); 305 frame, regs->cp0_epc, frame->regs[31]);
306#endif 306#endif
307 return 0; 307 return 0;
308 308
309give_sigsegv: 309give_sigsegv:
310 force_sigsegv(signr, current); 310 force_sigsegv(signr, current);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index a67c18555ed3..b28646b3ceae 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -105,7 +105,7 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
105 spin_lock_irq(&current->sighand->siglock); 105 spin_lock_irq(&current->sighand->siglock);
106 current->saved_sigmask = current->blocked; 106 current->saved_sigmask = current->blocked;
107 current->blocked = newset; 107 current->blocked = newset;
108 recalc_sigpending(); 108 recalc_sigpending();
109 spin_unlock_irq(&current->sighand->siglock); 109 spin_unlock_irq(&current->sighand->siglock);
110 110
111 current->state = TASK_INTERRUPTIBLE; 111 current->state = TASK_INTERRUPTIBLE;
@@ -184,7 +184,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka,
184 /* Create the ucontext. */ 184 /* Create the ucontext. */
185 err |= __put_user(0, &frame->rs_uc.uc_flags); 185 err |= __put_user(0, &frame->rs_uc.uc_flags);
186 err |= __put_user(0, &frame->rs_uc.uc_link); 186 err |= __put_user(0, &frame->rs_uc.uc_link);
187 sp = (int) (long) current->sas_ss_sp; 187 sp = (int) (long) current->sas_ss_sp;
188 err |= __put_user(sp, 188 err |= __put_user(sp,
189 &frame->rs_uc.uc_stack.ss_sp); 189 &frame->rs_uc.uc_stack.ss_sp);
190 err |= __put_user(sas_ss_flags(regs->regs[29]), 190 err |= __put_user(sas_ss_flags(regs->regs[29]),
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 1ee689c0e0c9..64b62bdfb4f6 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -35,7 +35,6 @@
35#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/mips_mt.h> 37#include <asm/mips_mt.h>
38#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
39 38
40#define MIPS_CPU_IPI_RESCHED_IRQ 0 39#define MIPS_CPU_IPI_RESCHED_IRQ 0
41#define MIPS_CPU_IPI_CALL_IRQ 1 40#define MIPS_CPU_IPI_CALL_IRQ 1
@@ -108,12 +107,12 @@ void __init sanitize_tlb_entries(void)
108 107
109static void ipi_resched_dispatch(void) 108static void ipi_resched_dispatch(void)
110{ 109{
111 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ); 110 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
112} 111}
113 112
114static void ipi_call_dispatch(void) 113static void ipi_call_dispatch(void)
115{ 114{
116 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ); 115 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
117} 116}
118 117
119static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 118static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
@@ -270,8 +269,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
270 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); 269 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
271 } 270 }
272 271
273 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 272 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
274 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; 273 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
275 274
276 setup_irq(cpu_ipi_resched_irq, &irq_resched); 275 setup_irq(cpu_ipi_resched_irq, &irq_resched);
277 setup_irq(cpu_ipi_call_irq, &irq_call); 276 setup_irq(cpu_ipi_call_irq, &irq_call);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 6a857bf030b0..9251ea824937 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -26,16 +26,6 @@
26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. 26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
27 */ 27 */
28 28
29/*
30 * MIPSCPU_INT_BASE is identically defined in both
31 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
32 * but as yet there's no properly organized include structure that
33 * will ensure that the right *int.h file will be included for a
34 * given platform build.
35 */
36
37#define MIPSCPU_INT_BASE 16
38
39#define MIPS_CPU_IPI_IRQ 1 29#define MIPS_CPU_IPI_IRQ 1
40 30
41#define LOCK_MT_PRA() \ 31#define LOCK_MT_PRA() \
@@ -77,15 +67,15 @@ unsigned int ipi_timer_latch[NR_CPUS];
77 67
78#define IPIBUF_PER_CPU 4 68#define IPIBUF_PER_CPU 4
79 69
80struct smtc_ipi_q IPIQ[NR_CPUS]; 70static struct smtc_ipi_q IPIQ[NR_CPUS];
81struct smtc_ipi_q freeIPIq; 71static struct smtc_ipi_q freeIPIq;
82 72
83 73
84/* Forward declarations */ 74/* Forward declarations */
85 75
86void ipi_decode(struct smtc_ipi *); 76void ipi_decode(struct smtc_ipi *);
87void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 77static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
88void setup_cross_vpe_interrupts(void); 78static void setup_cross_vpe_interrupts(void);
89void init_smtc_stats(void); 79void init_smtc_stats(void);
90 80
91/* Global SMTC Status */ 81/* Global SMTC Status */
@@ -200,7 +190,7 @@ void __init sanitize_tlb_entries(void)
200 * Configure shared TLB - VPC configuration bit must be set by caller 190 * Configure shared TLB - VPC configuration bit must be set by caller
201 */ 191 */
202 192
203void smtc_configure_tlb(void) 193static void smtc_configure_tlb(void)
204{ 194{
205 int i,tlbsiz,vpes; 195 int i,tlbsiz,vpes;
206 unsigned long mvpconf0; 196 unsigned long mvpconf0;
@@ -648,7 +638,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
648 * the VPE. 638 * the VPE.
649 */ 639 */
650 640
651void smtc_ipi_qdump(void) 641static void smtc_ipi_qdump(void)
652{ 642{
653 int i; 643 int i;
654 644
@@ -686,28 +676,6 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
686 return result; 676 return result;
687} 677}
688 678
689/* No longer used in IPI dispatch, but retained for future recycling */
690
691static __inline__ int atomic_postclear(unsigned int *pv)
692{
693 unsigned long result;
694
695 unsigned long temp;
696
697 __asm__ __volatile__(
698 "1: ll %0, %2 \n"
699 " or %1, $0, $0 \n"
700 " sc %1, %2 \n"
701 " beqz %1, 1b \n"
702 " sync \n"
703 : "=&r" (result), "=&r" (temp), "=m" (*pv)
704 : "m" (*pv)
705 : "memory");
706
707 return result;
708}
709
710
711void smtc_send_ipi(int cpu, int type, unsigned int action) 679void smtc_send_ipi(int cpu, int type, unsigned int action)
712{ 680{
713 int tcstatus; 681 int tcstatus;
@@ -781,7 +749,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
781/* 749/*
782 * Send IPI message to Halted TC, TargTC/TargVPE already having been set 750 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
783 */ 751 */
784void post_direct_ipi(int cpu, struct smtc_ipi *pipi) 752static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
785{ 753{
786 struct pt_regs *kstack; 754 struct pt_regs *kstack;
787 unsigned long tcstatus; 755 unsigned long tcstatus;
@@ -921,7 +889,7 @@ void smtc_timer_broadcast(int vpe)
921 * interrupts. 889 * interrupts.
922 */ 890 */
923 891
924static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ; 892static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
925 893
926static irqreturn_t ipi_interrupt(int irq, void *dev_idm) 894static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
927{ 895{
@@ -1000,7 +968,7 @@ static void ipi_irq_dispatch(void)
1000 968
1001static struct irqaction irq_ipi; 969static struct irqaction irq_ipi;
1002 970
1003void setup_cross_vpe_interrupts(void) 971static void setup_cross_vpe_interrupts(void)
1004{ 972{
1005 if (!cpu_has_vint) 973 if (!cpu_has_vint)
1006 panic("SMTC Kernel requires Vectored Interupt support"); 974 panic("SMTC Kernel requires Vectored Interupt support");
@@ -1191,7 +1159,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1191 * It would be nice to be able to use a spinlock here, 1159 * It would be nice to be able to use a spinlock here,
1192 * but this is invoked from within TLB flush routines 1160 * but this is invoked from within TLB flush routines
1193 * that protect themselves with DVPE, so if a lock is 1161 * that protect themselves with DVPE, so if a lock is
1194 * held by another TC, it'll never be freed. 1162 * held by another TC, it'll never be freed.
1195 * 1163 *
1196 * DVPE/DMT must not be done with interrupts enabled, 1164 * DVPE/DMT must not be done with interrupts enabled,
1197 * so even so most callers will already have disabled 1165 * so even so most callers will already have disabled
@@ -1296,7 +1264,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1296 * Support for single-threading cache flush operations. 1264 * Support for single-threading cache flush operations.
1297 */ 1265 */
1298 1266
1299int halt_state_save[NR_CPUS]; 1267static int halt_state_save[NR_CPUS];
1300 1268
1301/* 1269/*
1302 * To really, really be sure that nothing is being done 1270 * To really, really be sure that nothing is being done
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 6c2406a93f2b..93a148486f88 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -669,7 +669,7 @@ asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name,
669 669
670struct irix_statfs { 670struct irix_statfs {
671 short f_type; 671 short f_type;
672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree; 672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
673 char f_fname[6], f_fpack[6]; 673 char f_fname[6], f_fpack[6];
674}; 674};
675 675
@@ -959,7 +959,7 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin)
959 959
960 fn = default_llseek; 960 fn = default_llseek;
961 if (file->f_op && file->f_op->llseek) 961 if (file->f_op && file->f_op->llseek)
962 fn = file->f_op->llseek; 962 fn = file->f_op->llseek;
963 lock_kernel(); 963 lock_kernel();
964 retval = fn(file, offset, origin); 964 retval = fn(file, offset, origin);
965 unlock_kernel(); 965 unlock_kernel();
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 458fccf87c54..459624969c99 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -522,7 +522,7 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
522}; 522};
523 523
524static char *rstrs[] = { 524static char *rstrs[] = {
525 [R_MIPS_NONE] = "MIPS_NONE", 525 [R_MIPS_NONE] = "MIPS_NONE",
526 [R_MIPS_32] = "MIPS_32", 526 [R_MIPS_32] = "MIPS_32",
527 [R_MIPS_26] = "MIPS_26", 527 [R_MIPS_26] = "MIPS_26",
528 [R_MIPS_HI16] = "MIPS_HI16", 528 [R_MIPS_HI16] = "MIPS_HI16",
@@ -695,7 +695,7 @@ static void dump_tclist(void)
695} 695}
696 696
697/* We are prepared so configure and start the VPE... */ 697/* We are prepared so configure and start the VPE... */
698int vpe_run(struct vpe * v) 698static int vpe_run(struct vpe * v)
699{ 699{
700 struct vpe_notifications *n; 700 struct vpe_notifications *n;
701 unsigned long val, dmt_flag; 701 unsigned long val, dmt_flag;
@@ -713,16 +713,16 @@ int vpe_run(struct vpe * v)
713 dvpe(); 713 dvpe();
714 714
715 if (!list_empty(&v->tc)) { 715 if (!list_empty(&v->tc)) {
716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { 716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", 717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
718 t->index); 718 t->index);
719 return -ENOEXEC; 719 return -ENOEXEC;
720 } 720 }
721 } else { 721 } else {
722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", 722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
723 v->minor); 723 v->minor);
724 return -ENOEXEC; 724 return -ENOEXEC;
725 } 725 }
726 726
727 /* Put MVPE's into 'configuration state' */ 727 /* Put MVPE's into 'configuration state' */
728 set_c0_mvpcontrol(MVPCONTROL_VPC); 728 set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -775,14 +775,14 @@ int vpe_run(struct vpe * v)
775 775
776 back_to_back_c0_hazard(); 776 back_to_back_c0_hazard();
777 777
778 /* Set up the XTC bit in vpeconf0 to point at our tc */ 778 /* Set up the XTC bit in vpeconf0 to point at our tc */
779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) 779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
780 | (t->index << VPECONF0_XTC_SHIFT)); 780 | (t->index << VPECONF0_XTC_SHIFT));
781 781
782 back_to_back_c0_hazard(); 782 back_to_back_c0_hazard();
783 783
784 /* enable this VPE */ 784 /* enable this VPE */
785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
786 786
787 /* clear out any left overs from a previous program */ 787 /* clear out any left overs from a previous program */
788 write_vpe_c0_status(0); 788 write_vpe_c0_status(0);
@@ -832,7 +832,7 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
832 * contents of the program (p)buffer performing relocatations/etc, free's it 832 * contents of the program (p)buffer performing relocatations/etc, free's it
833 * when finished. 833 * when finished.
834 */ 834 */
835int vpe_elfload(struct vpe * v) 835static int vpe_elfload(struct vpe * v)
836{ 836{
837 Elf_Ehdr *hdr; 837 Elf_Ehdr *hdr;
838 Elf_Shdr *sechdrs; 838 Elf_Shdr *sechdrs;
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index 2affa5ff171c..9a622b9a1051 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -45,7 +45,7 @@ void enable_lasat_irq(unsigned int irq_nr)
45} 45}
46 46
47static struct irq_chip lasat_irq_type = { 47static struct irq_chip lasat_irq_type = {
48 .typename = "Lasat", 48 .name = "Lasat",
49 .ack = disable_lasat_irq, 49 .ack = disable_lasat_irq,
50 .mask = disable_lasat_irq, 50 .mask = disable_lasat_irq,
51 .mask_ack = disable_lasat_irq, 51 .mask_ack = disable_lasat_irq,
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 88c7ab871ec4..d47692f73a26 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM); 132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM);
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140const char *get_system_type(void) 139const char *get_system_type(void)
diff --git a/arch/mips/lib-32/Makefile b/arch/mips/lib-32/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-32/Makefile
+++ b/arch/mips/lib-32/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-64/Makefile
+++ b/arch/mips/lib-64/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/memset.S b/arch/mips/lib-64/memset.S
deleted file mode 100644
index e2c42c85113b..000000000000
--- a/arch/mips/lib-64/memset.S
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#include <asm/asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/regdef.h>
12
13#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \
15 .section __ex_table,"a"; \
16 PTR 9b, handler; \
17 .previous
18
19 .macro f_fill64 dst, offset, val, fixup
20 EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
21 EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
22 EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
23 EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
24 EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
28 .endm
29
30/*
31 * memset(void *s, int c, size_t n)
32 *
33 * a0: start of area to clear
34 * a1: char to fill with
35 * a2: size of area to clear
36 */
37 .set noreorder
38 .align 5
39LEAF(memset)
40 beqz a1, 1f
41 move v0, a0 /* result */
42
43 andi a1, 0xff /* spread fillword */
44 dsll t1, a1, 8
45 or a1, t1
46 dsll t1, a1, 16
47 or a1, t1
48 dsll t1, a1, 32
49 or a1, t1
501:
51
52FEXPORT(__bzero)
53 sltiu t0, a2, LONGSIZE /* very small region? */
54 bnez t0, small_memset
55 andi t0, a0, LONGMASK /* aligned? */
56
57 beqz t0, 1f
58 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
59
60#ifdef __MIPSEB__
61 EX(sdl, a1, (a0), first_fixup) /* make dword aligned */
62#endif
63#ifdef __MIPSEL__
64 EX(sdr, a1, (a0), first_fixup) /* make dword aligned */
65#endif
66 PTR_SUBU a0, t0 /* long align ptr */
67 PTR_ADDU a2, t0 /* correct size */
68
691: ori t1, a2, 0x3f /* # of full blocks */
70 xori t1, 0x3f
71 beqz t1, memset_partial /* no block to fill */
72 andi t0, a2, 0x38
73
74 PTR_ADDU t1, a0 /* end address */
75 .set reorder
761: PTR_ADDIU a0, 64
77 f_fill64 a0, -64, a1, fwd_fixup
78 bne t1, a0, 1b
79 .set noreorder
80
81memset_partial:
82 PTR_LA t1, 2f /* where to start */
83 .set noat
84 dsrl AT, t0, 1
85 PTR_SUBU t1, AT
86 .set noat
87 jr t1
88 PTR_ADDU a0, t0 /* dest ptr */
89
90 .set push
91 .set noreorder
92 .set nomacro
93 f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */
942: .set pop
95 andi a2, LONGMASK /* At most one long to go */
96
97 beqz a2, 1f
98 PTR_ADDU a0, a2 /* What's left */
99#ifdef __MIPSEB__
100 EX(sdr, a1, -1(a0), last_fixup)
101#endif
102#ifdef __MIPSEL__
103 EX(sdl, a1, -1(a0), last_fixup)
104#endif
1051: jr ra
106 move a2, zero
107
108small_memset:
109 beqz a2, 2f
110 PTR_ADDU t1, a0, a2
111
1121: PTR_ADDIU a0, 1 /* fill bytewise */
113 bne t1, a0, 1b
114 sb a1, -1(a0)
115
1162: jr ra /* done */
117 move a2, zero
118 END(memset)
119
120first_fixup:
121 jr ra
122 nop
123
124fwd_fixup:
125 PTR_L t0, TI_TASK($28)
126 LONG_L t0, THREAD_BUADDR(t0)
127 andi a2, 0x3f
128 LONG_ADDU a2, t1
129 jr ra
130 LONG_SUBU a2, t0
131
132partial_fixup:
133 PTR_L t0, TI_TASK($28)
134 LONG_L t0, THREAD_BUADDR(t0)
135 andi a2, LONGMASK
136 LONG_ADDU a2, t1
137 jr ra
138 LONG_SUBU a2, t0
139
140last_fixup:
141 jr ra
142 andi v1, a2, LONGMASK
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 989c900b8b14..5ad501b30b43 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial.o memcpy.o promlib.o \ 5lib-y += csum_partial.o memcpy.o memset.o promlib.o \
6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o 6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o
7 7
8obj-y += iomap.o 8obj-y += iomap.o
diff --git a/arch/mips/lib-32/memset.S b/arch/mips/lib/memset.S
index 1981485bd48b..3f8b8b3d0b23 100644
--- a/arch/mips/lib-32/memset.S
+++ b/arch/mips/lib/memset.S
@@ -10,6 +10,14 @@
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11#include <asm/regdef.h> 11#include <asm/regdef.h>
12 12
13#if LONGSIZE == 4
14#define LONG_S_L swl
15#define LONG_S_R swr
16#else
17#define LONG_S_L sdl
18#define LONG_S_R sdr
19#endif
20
13#define EX(insn,reg,addr,handler) \ 21#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \ 229: insn reg, addr; \
15 .section __ex_table,"a"; \ 23 .section __ex_table,"a"; \
@@ -25,6 +33,7 @@
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) 33 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) 34 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) 35 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
36#if LONGSIZE == 4
28 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) 37 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup)
29 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) 38 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup)
30 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) 39 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
@@ -33,6 +42,7 @@
33 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) 42 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
34 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) 43 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
35 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) 44 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
45#endif
36 .endm 46 .endm
37 47
38/* 48/*
@@ -49,9 +59,13 @@ LEAF(memset)
49 move v0, a0 /* result */ 59 move v0, a0 /* result */
50 60
51 andi a1, 0xff /* spread fillword */ 61 andi a1, 0xff /* spread fillword */
52 sll t1, a1, 8 62 LONG_SLL t1, a1, 8
53 or a1, t1 63 or a1, t1
54 sll t1, a1, 16 64 LONG_SLL t1, a1, 16
65#if LONGSIZE == 8
66 or a1, t1
67 LONG_SLL t1, a1, 32
68#endif
55 or a1, t1 69 or a1, t1
561: 701:
57 71
@@ -64,10 +78,10 @@ FEXPORT(__bzero)
64 PTR_SUBU t0, LONGSIZE /* alignment in bytes */ 78 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
65 79
66#ifdef __MIPSEB__ 80#ifdef __MIPSEB__
67 EX(swl, a1, (a0), first_fixup) /* make word aligned */ 81 EX(LONG_S_L, a1, (a0), first_fixup) /* make word/dword aligned */
68#endif 82#endif
69#ifdef __MIPSEL__ 83#ifdef __MIPSEL__
70 EX(swr, a1, (a0), first_fixup) /* make word aligned */ 84 EX(LONG_S_R, a1, (a0), first_fixup) /* make word/dword aligned */
71#endif 85#endif
72 PTR_SUBU a0, t0 /* long align ptr */ 86 PTR_SUBU a0, t0 /* long align ptr */
73 PTR_ADDU a2, t0 /* correct size */ 87 PTR_ADDU a2, t0 /* correct size */
@@ -75,7 +89,7 @@ FEXPORT(__bzero)
751: ori t1, a2, 0x3f /* # of full blocks */ 891: ori t1, a2, 0x3f /* # of full blocks */
76 xori t1, 0x3f 90 xori t1, 0x3f
77 beqz t1, memset_partial /* no block to fill */ 91 beqz t1, memset_partial /* no block to fill */
78 andi t0, a2, 0x3c 92 andi t0, a2, 0x40-LONGSIZE
79 93
80 PTR_ADDU t1, a0 /* end address */ 94 PTR_ADDU t1, a0 /* end address */
81 .set reorder 95 .set reorder
@@ -86,7 +100,14 @@ FEXPORT(__bzero)
86 100
87memset_partial: 101memset_partial:
88 PTR_LA t1, 2f /* where to start */ 102 PTR_LA t1, 2f /* where to start */
103#if LONGSIZE == 4
89 PTR_SUBU t1, t0 104 PTR_SUBU t1, t0
105#else
106 .set noat
107 LONG_SRL AT, t0, 1
108 PTR_SUBU t1, AT
109 .set noat
110#endif
90 jr t1 111 jr t1
91 PTR_ADDU a0, t0 /* dest ptr */ 112 PTR_ADDU a0, t0 /* dest ptr */
92 113
@@ -100,10 +121,10 @@ memset_partial:
100 beqz a2, 1f 121 beqz a2, 1f
101 PTR_ADDU a0, a2 /* What's left */ 122 PTR_ADDU a0, a2 /* What's left */
102#ifdef __MIPSEB__ 123#ifdef __MIPSEB__
103 EX(swr, a1, -1(a0), last_fixup) 124 EX(LONG_S_R, a1, -1(a0), last_fixup)
104#endif 125#endif
105#ifdef __MIPSEL__ 126#ifdef __MIPSEL__
106 EX(swl, a1, -1(a0), last_fixup) 127 EX(LONG_S_L, a1, -1(a0), last_fixup)
107#endif 128#endif
1081: jr ra 1291: jr ra
109 move a2, zero 130 move a2, zero
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 98ce89f8068b..2388f7f3ffde 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -44,20 +44,24 @@ unsigned long __init run_uncached(void *func)
44 44
45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) 45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
46 usp = CKSEG1ADDR(sp); 46 usp = CKSEG1ADDR(sp);
47#ifdef CONFIG_64BIT
47 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 48 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
48 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0)) 49 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0))
49 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 50 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
50 XKPHYS_TO_PHYS((long long)sp)); 51 XKPHYS_TO_PHYS((long long)sp));
52#endif
51 else { 53 else {
52 BUG(); 54 BUG();
53 usp = sp; 55 usp = sp;
54 } 56 }
55 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2) 57 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
56 ufunc = CKSEG1ADDR(lfunc); 58 ufunc = CKSEG1ADDR(lfunc);
59#ifdef CONFIG_64BIT
57 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 60 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
58 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0)) 61 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0))
59 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 62 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
60 XKPHYS_TO_PHYS((long long)lfunc)); 63 XKPHYS_TO_PHYS((long long)lfunc));
64#endif
61 else { 65 else {
62 BUG(); 66 BUG();
63 ufunc = lfunc; 67 ufunc = lfunc;
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index 43dba6ce6603..dfa0acbd7fc2 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
35#include <linux/kernel.h>
35 36
36#include <asm/gdb-stub.h> 37#include <asm/gdb-stub.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -69,7 +70,7 @@ static void end_atlas_irq(unsigned int irq)
69} 70}
70 71
71static struct irq_chip atlas_irq_type = { 72static struct irq_chip atlas_irq_type = {
72 .typename = "Atlas", 73 .name = "Atlas",
73 .ack = disable_atlas_irq, 74 .ack = disable_atlas_irq,
74 .mask = disable_atlas_irq, 75 .mask = disable_atlas_irq,
75 .mask_ack = disable_atlas_irq, 76 .mask_ack = disable_atlas_irq,
@@ -220,7 +221,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
220 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 221 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
221 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 222 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
222}; 223};
223int __initdata msc_nr_irqs = sizeof(msc_irqmap) / sizeof(*msc_irqmap); 224int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
224 225
225msc_irqmap_t __initdata msc_eicirqmap[] = { 226msc_irqmap_t __initdata msc_eicirqmap[] = {
226 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 227 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -231,14 +232,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
231 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 232 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
232 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 233 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
233}; 234};
234int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap) / sizeof(*msc_eicirqmap); 235int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
235 236
236void __init arch_init_irq(void) 237void __init arch_init_irq(void)
237{ 238{
238 init_atlas_irqs(ATLAS_INT_BASE); 239 init_atlas_irqs(ATLAS_INT_BASE);
239 240
240 if (!cpu_has_veic) 241 if (!cpu_has_veic)
241 mips_cpu_irq_init(MIPSCPU_INT_BASE); 242 mips_cpu_irq_init();
242 243
243 switch(mips_revision_corid) { 244 switch(mips_revision_corid) {
244 case MIPS_REVISION_CORID_CORE_MSC: 245 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c
index eeed944e0f83..ebf0e16c5a0d 100644
--- a/arch/mips/mips-boards/generic/memory.c
+++ b/arch/mips/mips-boards/generic/memory.c
@@ -166,9 +166,8 @@ void __init prom_meminit(void)
166 } 166 }
167} 167}
168 168
169unsigned long __init prom_free_prom_memory(void) 169void __init prom_free_prom_memory(void)
170{ 170{
171 unsigned long freed = 0;
172 unsigned long addr; 171 unsigned long addr;
173 int i; 172 int i;
174 173
@@ -176,17 +175,8 @@ unsigned long __init prom_free_prom_memory(void)
176 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 175 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
177 continue; 176 continue;
178 177
179 addr = PAGE_ALIGN(boot_mem_map.map[i].addr); 178 addr = boot_mem_map.map[i].addr;
180 while (addr < boot_mem_map.map[i].addr 179 free_init_pages("prom memory",
181 + boot_mem_map.map[i].size) { 180 addr, addr + boot_mem_map.map[i].size);
182 ClearPageReserved(virt_to_page(__va(addr)));
183 init_page_count(virt_to_page(__va(addr)));
184 free_page((unsigned long)__va(addr));
185 addr += PAGE_SIZE;
186 freed += PAGE_SIZE;
187 }
188 } 181 }
189 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
190
191 return freed;
192} 182}
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index 90ad5bf3e2f1..3c206bb17160 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/kernel.h>
30#include <linux/random.h> 31#include <linux/random.h>
31 32
32#include <asm/i8259.h> 33#include <asm/i8259.h>
@@ -289,7 +290,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
289 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 290 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
290 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 291 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
291}; 292};
292int __initdata msc_nr_irqs = sizeof(msc_irqmap)/sizeof(msc_irqmap_t); 293int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
293 294
294msc_irqmap_t __initdata msc_eicirqmap[] = { 295msc_irqmap_t __initdata msc_eicirqmap[] = {
295 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 296 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -303,14 +304,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
303 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 304 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
304 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 305 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
305}; 306};
306int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap)/sizeof(msc_irqmap_t); 307int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
307 308
308void __init arch_init_irq(void) 309void __init arch_init_irq(void)
309{ 310{
310 init_i8259_irqs(); 311 init_i8259_irqs();
311 312
312 if (!cpu_has_veic) 313 if (!cpu_has_veic)
313 mips_cpu_irq_init (MIPSCPU_INT_BASE); 314 mips_cpu_irq_init();
314 315
315 switch(mips_revision_corid) { 316 switch(mips_revision_corid) {
316 case MIPS_REVISION_CORID_CORE_MSC: 317 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c
index 874ccb0066b8..c4b9de3a7f27 100644
--- a/arch/mips/mips-boards/sead/sead_int.c
+++ b/arch/mips/mips-boards/sead/sead_int.c
@@ -113,5 +113,5 @@ asmlinkage void plat_irq_dispatch(void)
113 113
114void __init arch_init_irq(void) 114void __init arch_init_irq(void)
115{ 115{
116 mips_cpu_irq_init(MIPSCPU_INT_BASE); 116 mips_cpu_irq_init();
117} 117}
diff --git a/arch/mips/mips-boards/sim/sim_int.c b/arch/mips/mips-boards/sim/sim_int.c
index 2ce449dce6f2..15ac0655c1ff 100644
--- a/arch/mips/mips-boards/sim/sim_int.c
+++ b/arch/mips/mips-boards/sim/sim_int.c
@@ -21,9 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel_stat.h> 22#include <linux/kernel_stat.h>
23#include <asm/mips-boards/simint.h> 23#include <asm/mips-boards/simint.h>
24 24#include <asm/irq_cpu.h>
25
26extern void mips_cpu_irq_init(int);
27 25
28static inline int clz(unsigned long x) 26static inline int clz(unsigned long x)
29{ 27{
@@ -86,5 +84,5 @@ asmlinkage void plat_irq_dispatch(void)
86 84
87void __init arch_init_irq(void) 85void __init arch_init_irq(void)
88{ 86{
89 mips_cpu_irq_init(MIPSCPU_INT_BASE); 87 mips_cpu_irq_init();
90} 88}
diff --git a/arch/mips/mips-boards/sim/sim_mem.c b/arch/mips/mips-boards/sim/sim_mem.c
index f7ce76983328..46bc16f8b15d 100644
--- a/arch/mips/mips-boards/sim/sim_mem.c
+++ b/arch/mips/mips-boards/sim/sim_mem.c
@@ -99,10 +99,9 @@ void __init prom_meminit(void)
99 } 99 }
100} 100}
101 101
102unsigned long __init prom_free_prom_memory(void) 102void __init prom_free_prom_memory(void)
103{ 103{
104 int i; 104 int i;
105 unsigned long freed = 0;
106 unsigned long addr; 105 unsigned long addr;
107 106
108 for (i = 0; i < boot_mem_map.nr_map; i++) { 107 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -110,16 +109,7 @@ unsigned long __init prom_free_prom_memory(void)
110 continue; 109 continue;
111 110
112 addr = boot_mem_map.map[i].addr; 111 addr = boot_mem_map.map[i].addr;
113 while (addr < boot_mem_map.map[i].addr 112 free_init_pages("prom memory",
114 + boot_mem_map.map[i].size) { 113 addr, addr + boot_mem_map.map[i].size);
115 ClearPageReserved(virt_to_page(__va(addr)));
116 init_page_count(virt_to_page(__va(addr)));
117 free_page((unsigned long)__va(addr));
118 addr += PAGE_SIZE;
119 freed += PAGE_SIZE;
120 }
121 } 114 }
122 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
123
124 return freed;
125} 115}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 49065c133ebf..125a4a85ec05 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -341,7 +341,6 @@ static int __init page_is_ram(unsigned long pagenr)
341void __init paging_init(void) 341void __init paging_init(void)
342{ 342{
343 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 343 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
344 unsigned long max_dma, low;
345#ifndef CONFIG_FLATMEM 344#ifndef CONFIG_FLATMEM
346 unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; 345 unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
347 unsigned long i, j, pfn; 346 unsigned long i, j, pfn;
@@ -354,19 +353,19 @@ void __init paging_init(void)
354#endif 353#endif
355 kmap_coherent_init(); 354 kmap_coherent_init();
356 355
357 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
358 low = max_low_pfn;
359
360#ifdef CONFIG_ISA 356#ifdef CONFIG_ISA
361 if (low < max_dma) 357 if (max_low_pfn >= MAX_DMA_PFN)
362 zones_size[ZONE_DMA] = low; 358 if (min_low_pfn >= MAX_DMA_PFN) {
363 else { 359 zones_size[ZONE_DMA] = 0;
364 zones_size[ZONE_DMA] = max_dma; 360 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
365 zones_size[ZONE_NORMAL] = low - max_dma; 361 } else {
366 } 362 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
367#else 363 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
368 zones_size[ZONE_DMA] = low; 364 }
365 else
369#endif 366#endif
367 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
368
370#ifdef CONFIG_HIGHMEM 369#ifdef CONFIG_HIGHMEM
371 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; 370 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
372 371
@@ -467,7 +466,7 @@ void __init mem_init(void)
467} 466}
468#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 467#endif /* !CONFIG_NEED_MULTIPLE_NODES */
469 468
470static void free_init_pages(char *what, unsigned long begin, unsigned long end) 469void free_init_pages(const char *what, unsigned long begin, unsigned long end)
471{ 470{
472 unsigned long pfn; 471 unsigned long pfn;
473 472
@@ -493,18 +492,25 @@ void free_initrd_mem(unsigned long start, unsigned long end)
493} 492}
494#endif 493#endif
495 494
496extern unsigned long prom_free_prom_memory(void);
497
498void free_initmem(void) 495void free_initmem(void)
499{ 496{
500 unsigned long freed; 497 prom_free_prom_memory();
501
502 freed = prom_free_prom_memory();
503 if (freed)
504 printk(KERN_INFO "Freeing firmware memory: %ldkb freed\n",
505 freed >> 10);
506
507 free_init_pages("unused kernel memory", 498 free_init_pages("unused kernel memory",
508 __pa_symbol(&__init_begin), 499 __pa_symbol(&__init_begin),
509 __pa_symbol(&__init_end)); 500 __pa_symbol(&__init_end));
510} 501}
502
503unsigned long pgd_current[NR_CPUS];
504/*
505 * On 64-bit we've got three-level pagetables with a slightly
506 * different layout ...
507 */
508#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
509pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
510#ifdef CONFIG_64BIT
511#ifdef MODULE_START
512pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
513#endif
514pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
515#endif
516pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/momentum/jaguar_atx/Makefile b/arch/mips/momentum/jaguar_atx/Makefile
index 67372f3f9654..2e8cebd49bc0 100644
--- a/arch/mips/momentum/jaguar_atx/Makefile
+++ b/arch/mips/momentum/jaguar_atx/Makefile
@@ -6,7 +6,7 @@
6# unless it's something special (ie not a .c file). 6# unless it's something special (ie not a .c file).
7# 7#
8 8
9obj-y += irq.o prom.o reset.o setup.o 9obj-y += irq.o platform.o prom.o reset.o setup.o
10 10
11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o 11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o
12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o 12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o
diff --git a/arch/mips/momentum/jaguar_atx/irq.c b/arch/mips/momentum/jaguar_atx/irq.c
index 2efb25aa1aed..f2b432585df2 100644
--- a/arch/mips/momentum/jaguar_atx/irq.c
+++ b/arch/mips/momentum/jaguar_atx/irq.c
@@ -82,8 +82,8 @@ void __init arch_init_irq(void)
82 */ 82 */
83 clear_c0_status(ST0_IM); 83 clear_c0_status(ST0_IM);
84 84
85 mips_cpu_irq_init(0); 85 mips_cpu_irq_init();
86 rm7k_cpu_irq_init(8); 86 rm7k_cpu_irq_init();
87 87
88 /* set up the cascading interrupts */ 88 /* set up the cascading interrupts */
89 setup_irq(8, &cascade_mv64340); 89 setup_irq(8, &cascade_mv64340);
diff --git a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
index 6978654c712b..022f6974b76e 100644
--- a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
+++ b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
@@ -46,7 +46,9 @@
46 46
47extern unsigned long ja_fpga_base; 47extern unsigned long ja_fpga_base;
48 48
49#define JAGUAR_FPGA_WRITE(x,y) writeb(x, ja_fpga_base + JAGUAR_ATX_REG_##y) 49#define __FPGA_REG_TO_ADDR(reg) \
50#define JAGUAR_FPGA_READ(x) readb(ja_fpga_base + JAGUAR_ATX_REG_##x) 50 ((void *) ja_fpga_base + JAGUAR_ATX_REG_##reg)
51#define JAGUAR_FPGA_WRITE(x, reg) writeb(x, __FPGA_REG_TO_ADDR(reg))
52#define JAGUAR_FPGA_READ(reg) readb(__FPGA_REG_TO_ADDR(reg))
51 53
52#endif 54#endif
diff --git a/arch/mips/momentum/jaguar_atx/platform.c b/arch/mips/momentum/jaguar_atx/platform.c
new file mode 100644
index 000000000000..035ea5137c71
--- /dev/null
+++ b/arch/mips/momentum/jaguar_atx/platform.c
@@ -0,0 +1,235 @@
1#include <linux/delay.h>
2#include <linux/if_ether.h>
3#include <linux/ioport.h>
4#include <linux/mv643xx.h>
5#include <linux/platform_device.h>
6
7#include "jaguar_atx_fpga.h"
8
9#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
10
11static struct resource mv643xx_eth_shared_resources[] = {
12 [0] = {
13 .name = "ethernet shared base",
14 .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
15 .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
16 MV643XX_ETH_SHARED_REGS_SIZE - 1,
17 .flags = IORESOURCE_MEM,
18 },
19};
20
21static struct platform_device mv643xx_eth_shared_device = {
22 .name = MV643XX_ETH_SHARED_NAME,
23 .id = 0,
24 .num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources),
25 .resource = mv643xx_eth_shared_resources,
26};
27
28#define MV_SRAM_BASE 0xfe000000UL
29#define MV_SRAM_SIZE (256 * 1024)
30
31#define MV_SRAM_RXRING_SIZE (MV_SRAM_SIZE / 4)
32#define MV_SRAM_TXRING_SIZE (MV_SRAM_SIZE / 4)
33
34#define MV_SRAM_BASE_ETH0 MV_SRAM_BASE
35#define MV_SRAM_BASE_ETH1 (MV_SRAM_BASE + (MV_SRAM_SIZE / 2))
36
37#define MV64x60_IRQ_ETH_0 48
38#define MV64x60_IRQ_ETH_1 49
39#define MV64x60_IRQ_ETH_2 50
40
41#ifdef CONFIG_MV643XX_ETH_0
42
43static struct resource mv64x60_eth0_resources[] = {
44 [0] = {
45 .name = "eth0 irq",
46 .start = MV64x60_IRQ_ETH_0,
47 .end = MV64x60_IRQ_ETH_0,
48 .flags = IORESOURCE_IRQ,
49 },
50};
51
52static char eth0_mac_addr[ETH_ALEN];
53
54static struct mv643xx_eth_platform_data eth0_pd = {
55 .mac_addr = eth0_mac_addr,
56
57 .tx_sram_addr = MV_SRAM_BASE_ETH0,
58 .tx_sram_size = MV_SRAM_TXRING_SIZE,
59 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
60
61 .rx_sram_addr = MV_SRAM_BASE_ETH0 + MV_SRAM_TXRING_SIZE,
62 .rx_sram_size = MV_SRAM_RXRING_SIZE,
63 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
64};
65
66static struct platform_device eth0_device = {
67 .name = MV643XX_ETH_NAME,
68 .id = 0,
69 .num_resources = ARRAY_SIZE(mv64x60_eth0_resources),
70 .resource = mv64x60_eth0_resources,
71 .dev = {
72 .platform_data = &eth0_pd,
73 },
74};
75#endif /* CONFIG_MV643XX_ETH_0 */
76
77#ifdef CONFIG_MV643XX_ETH_1
78
79static struct resource mv64x60_eth1_resources[] = {
80 [0] = {
81 .name = "eth1 irq",
82 .start = MV64x60_IRQ_ETH_1,
83 .end = MV64x60_IRQ_ETH_1,
84 .flags = IORESOURCE_IRQ,
85 },
86};
87
88static char eth1_mac_addr[ETH_ALEN];
89
90static struct mv643xx_eth_platform_data eth1_pd = {
91 .mac_addr = eth1_mac_addr,
92
93 .tx_sram_addr = MV_SRAM_BASE_ETH1,
94 .tx_sram_size = MV_SRAM_TXRING_SIZE,
95 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
96
97 .rx_sram_addr = MV_SRAM_BASE_ETH1 + MV_SRAM_TXRING_SIZE,
98 .rx_sram_size = MV_SRAM_RXRING_SIZE,
99 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
100};
101
102static struct platform_device eth1_device = {
103 .name = MV643XX_ETH_NAME,
104 .id = 1,
105 .num_resources = ARRAY_SIZE(mv64x60_eth1_resources),
106 .resource = mv64x60_eth1_resources,
107 .dev = {
108 .platform_data = &eth1_pd,
109 },
110};
111#endif /* CONFIG_MV643XX_ETH_1 */
112
113#ifdef CONFIG_MV643XX_ETH_2
114
115static struct resource mv64x60_eth2_resources[] = {
116 [0] = {
117 .name = "eth2 irq",
118 .start = MV64x60_IRQ_ETH_2,
119 .end = MV64x60_IRQ_ETH_2,
120 .flags = IORESOURCE_IRQ,
121 },
122};
123
124static char eth2_mac_addr[ETH_ALEN];
125
126static struct mv643xx_eth_platform_data eth2_pd = {
127 .mac_addr = eth2_mac_addr,
128};
129
130static struct platform_device eth2_device = {
131 .name = MV643XX_ETH_NAME,
132 .id = 1,
133 .num_resources = ARRAY_SIZE(mv64x60_eth2_resources),
134 .resource = mv64x60_eth2_resources,
135 .dev = {
136 .platform_data = &eth2_pd,
137 },
138};
139#endif /* CONFIG_MV643XX_ETH_2 */
140
141static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
142 &mv643xx_eth_shared_device,
143#ifdef CONFIG_MV643XX_ETH_0
144 &eth0_device,
145#endif
146#ifdef CONFIG_MV643XX_ETH_1
147 &eth1_device,
148#endif
149#ifdef CONFIG_MV643XX_ETH_2
150 &eth2_device,
151#endif
152};
153
154static u8 __init exchange_bit(u8 val, u8 cs)
155{
156 /* place the data */
157 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
158 udelay(1);
159
160 /* turn the clock on */
161 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
162 udelay(1);
163
164 /* turn the clock off and read-strobe */
165 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
166
167 /* return the data */
168 return (JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1;
169}
170
171static void __init get_mac(char dest[6])
172{
173 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
174 int i,j;
175
176 for (i = 0; i < 12; i++)
177 exchange_bit(read_opcode[i], 1);
178
179 for (j = 0; j < 6; j++) {
180 dest[j] = 0;
181 for (i = 0; i < 8; i++) {
182 dest[j] <<= 1;
183 dest[j] |= exchange_bit(0, 1);
184 }
185 }
186
187 /* turn off CS */
188 exchange_bit(0,0);
189}
190
191/*
192 * Copy and increment ethernet MAC address by a small value.
193 *
194 * This is useful for systems where the only one MAC address is stored in
195 * non-volatile memory for multiple ports.
196 */
197static inline void eth_mac_add(unsigned char *dst, unsigned char *src,
198 unsigned int add)
199{
200 int i;
201
202 BUG_ON(add >= 256);
203
204 for (i = ETH_ALEN; i >= 0; i--) {
205 dst[i] = src[i] + add;
206 add = dst[i] < src[i]; /* compute carry */
207 }
208
209 WARN_ON(add);
210}
211
212static int __init mv643xx_eth_add_pds(void)
213{
214 unsigned char mac[ETH_ALEN];
215 int ret;
216
217 get_mac(mac);
218#ifdef CONFIG_MV643XX_ETH_0
219 eth_mac_add(eth1_mac_addr, mac, 0);
220#endif
221#ifdef CONFIG_MV643XX_ETH_1
222 eth_mac_add(eth1_mac_addr, mac, 1);
223#endif
224#ifdef CONFIG_MV643XX_ETH_2
225 eth_mac_add(eth2_mac_addr, mac, 2);
226#endif
227 ret = platform_add_devices(mv643xx_eth_pd_devs,
228 ARRAY_SIZE(mv643xx_eth_pd_devs));
229
230 return ret;
231}
232
233device_initcall(mv643xx_eth_add_pds);
234
235#endif /* defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE) */
diff --git a/arch/mips/momentum/jaguar_atx/prom.c b/arch/mips/momentum/jaguar_atx/prom.c
index 3d2712929293..5dd154ee58f6 100644
--- a/arch/mips/momentum/jaguar_atx/prom.c
+++ b/arch/mips/momentum/jaguar_atx/prom.c
@@ -39,56 +39,6 @@ const char *get_system_type(void)
39 return "Momentum Jaguar-ATX"; 39 return "Momentum Jaguar-ATX";
40} 40}
41 41
42#ifdef CONFIG_MV643XX_ETH
43extern unsigned char prom_mac_addr_base[6];
44
45static void burn_clocks(void)
46{
47 int i;
48
49 /* this loop should burn at least 1us -- this should be plenty */
50 for (i = 0; i < 0x10000; i++)
51 ;
52}
53
54static u8 exchange_bit(u8 val, u8 cs)
55{
56 /* place the data */
57 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
58 burn_clocks();
59
60 /* turn the clock on */
61 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
62 burn_clocks();
63
64 /* turn the clock off and read-strobe */
65 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
66
67 /* return the data */
68 return ((JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1);
69}
70
71void get_mac(char dest[6])
72{
73 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
74 int i,j;
75
76 for (i = 0; i < 12; i++)
77 exchange_bit(read_opcode[i], 1);
78
79 for (j = 0; j < 6; j++) {
80 dest[j] = 0;
81 for (i = 0; i < 8; i++) {
82 dest[j] <<= 1;
83 dest[j] |= exchange_bit(0, 1);
84 }
85 }
86
87 /* turn off CS */
88 exchange_bit(0,0);
89}
90#endif
91
92#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
93 43
94unsigned long signext(unsigned long addr) 44unsigned long signext(unsigned long addr)
@@ -228,16 +178,10 @@ void __init prom_init(void)
228#endif /* CONFIG_64BIT */ 178#endif /* CONFIG_64BIT */
229 mips_machgroup = MACH_GROUP_MOMENCO; 179 mips_machgroup = MACH_GROUP_MOMENCO;
230 mips_machtype = MACH_MOMENCO_JAGUAR_ATX; 180 mips_machtype = MACH_MOMENCO_JAGUAR_ATX;
231
232#ifdef CONFIG_MV643XX_ETH
233 /* get the base MAC address for on-board ethernet ports */
234 get_mac(prom_mac_addr_base);
235#endif
236} 181}
237 182
238unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
239{ 184{
240 return 0;
241} 185}
242 186
243void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_3/irq.c b/arch/mips/momentum/ocelot_3/irq.c
index cea0e5deb80e..3862d1d1add4 100644
--- a/arch/mips/momentum/ocelot_3/irq.c
+++ b/arch/mips/momentum/ocelot_3/irq.c
@@ -65,7 +65,7 @@ void __init arch_init_irq(void)
65 */ 65 */
66 clear_c0_status(ST0_IM | ST0_BEV); 66 clear_c0_status(ST0_IM | ST0_BEV);
67 67
68 rm7k_cpu_irq_init(8); 68 rm7k_cpu_irq_init();
69 69
70 /* set up the cascading interrupts */ 70 /* set up the cascading interrupts */
71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */ 71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */
diff --git a/arch/mips/momentum/ocelot_3/prom.c b/arch/mips/momentum/ocelot_3/prom.c
index 6ce9b7fdb824..8e02df63578a 100644
--- a/arch/mips/momentum/ocelot_3/prom.c
+++ b/arch/mips/momentum/ocelot_3/prom.c
@@ -180,9 +180,8 @@ void __init prom_init(void)
180#endif 180#endif
181} 181}
182 182
183unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
184{ 184{
185 return 0;
186} 185}
187 186
188void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index bb11fef08472..186a140fd2a9 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -84,7 +84,7 @@ void ll_cpci_irq(void)
84} 84}
85 85
86struct irq_chip cpci_irq_type = { 86struct irq_chip cpci_irq_type = {
87 .typename = "CPCI/FPGA", 87 .name = "CPCI/FPGA",
88 .ack = mask_cpci_irq, 88 .ack = mask_cpci_irq,
89 .mask = mask_cpci_irq, 89 .mask = mask_cpci_irq,
90 .mask_ack = mask_cpci_irq, 90 .mask_ack = mask_cpci_irq,
diff --git a/arch/mips/momentum/ocelot_c/dbg_io.c b/arch/mips/momentum/ocelot_c/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_c/dbg_io.c
+++ b/arch/mips/momentum/ocelot_c/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_c/irq.c b/arch/mips/momentum/ocelot_c/irq.c
index ea65223a6d2c..40472f7944d7 100644
--- a/arch/mips/momentum/ocelot_c/irq.c
+++ b/arch/mips/momentum/ocelot_c/irq.c
@@ -94,7 +94,7 @@ void __init arch_init_irq(void)
94 */ 94 */
95 clear_c0_status(ST0_IM); 95 clear_c0_status(ST0_IM);
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 98
99 /* set up the cascading interrupts */ 99 /* set up the cascading interrupts */
100 setup_irq(3, &cascade_fpga); 100 setup_irq(3, &cascade_fpga);
diff --git a/arch/mips/momentum/ocelot_c/prom.c b/arch/mips/momentum/ocelot_c/prom.c
index d0b77e101d74..b689ceea8cfb 100644
--- a/arch/mips/momentum/ocelot_c/prom.c
+++ b/arch/mips/momentum/ocelot_c/prom.c
@@ -178,7 +178,6 @@ void __init prom_init(void)
178#endif 178#endif
179} 179}
180 180
181unsigned long __init prom_free_prom_memory(void) 181void __init prom_free_prom_memory(void)
182{ 182{
183 return 0;
184} 183}
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index a7a80c0da569..de1a31ee52f3 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -77,7 +77,7 @@ void ll_uart_irq(void)
77} 77}
78 78
79struct irq_chip uart_irq_type = { 79struct irq_chip uart_irq_type = {
80 .typename = "UART/FPGA", 80 .name = "UART/FPGA",
81 .ack = mask_uart_irq, 81 .ack = mask_uart_irq,
82 .mask = mask_uart_irq, 82 .mask = mask_uart_irq,
83 .mask_ack = mask_uart_irq, 83 .mask_ack = mask_uart_irq,
diff --git a/arch/mips/momentum/ocelot_g/dbg_io.c b/arch/mips/momentum/ocelot_g/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_g/dbg_io.c
+++ b/arch/mips/momentum/ocelot_g/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_g/irq.c b/arch/mips/momentum/ocelot_g/irq.c
index da46524e87cb..273541fe7087 100644
--- a/arch/mips/momentum/ocelot_g/irq.c
+++ b/arch/mips/momentum/ocelot_g/irq.c
@@ -94,8 +94,8 @@ void __init arch_init_irq(void)
94 clear_c0_status(ST0_IM); 94 clear_c0_status(ST0_IM);
95 local_irq_disable(); 95 local_irq_disable();
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 rm7k_cpu_irq_init(8); 98 rm7k_cpu_irq_init();
99 99
100 gt64240_irq_init(); 100 gt64240_irq_init();
101} 101}
diff --git a/arch/mips/momentum/ocelot_g/prom.c b/arch/mips/momentum/ocelot_g/prom.c
index 2f75c6b91ec5..836d0830720d 100644
--- a/arch/mips/momentum/ocelot_g/prom.c
+++ b/arch/mips/momentum/ocelot_g/prom.c
@@ -79,7 +79,6 @@ void __init prom_init(void)
79 } 79 }
80} 80}
81 81
82unsigned long __init prom_free_prom_memory(void) 82void __init prom_free_prom_memory(void)
83{ 83{
84 return 0;
85} 84}
diff --git a/arch/mips/oprofile/Kconfig b/arch/mips/oprofile/Kconfig
index 55feaf798596..ca395ef06d4e 100644
--- a/arch/mips/oprofile/Kconfig
+++ b/arch/mips/oprofile/Kconfig
@@ -11,7 +11,7 @@ config PROFILING
11 11
12config OPROFILE 12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)" 13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING && EXPERIMENTAL 14 depends on PROFILING && !!MIPS_MT_SMTC && EXPERIMENTAL
15 help 15 help
16 OProfile is a profiling system capable of profiling the 16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries, 17 whole system, include the kernel, kernel modules, libraries,
diff --git a/arch/mips/pci/fixup-vr4133.c b/arch/mips/pci/fixup-vr4133.c
index 597b89764ba1..a8d9d22b13df 100644
--- a/arch/mips/pci/fixup-vr4133.c
+++ b/arch/mips/pci/fixup-vr4133.c
@@ -17,8 +17,10 @@
17 */ 17 */
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/kernel.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/i8259.h>
22#include <asm/vr41xx/cmbvr4133.h> 24#include <asm/vr41xx/cmbvr4133.h>
23 25
24extern int vr4133_rockhopper; 26extern int vr4133_rockhopper;
@@ -142,7 +144,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
142 if (bus == NULL) 144 if (bus == NULL)
143 return -1; 145 return -1;
144 146
145 for (i = 0; i < sizeof (int_map) / sizeof (int_map[0]); i++) { 147 for (i = 0; i < ARRAY_SIZE(int_map); i++) {
146 if (int_map[i].bus == bus->number && int_map[i].slot == slot) { 148 if (int_map[i].bus == bus->number && int_map[i].slot == slot) {
147 int line; 149 int line;
148 for (line = 0; line < 4; line++) 150 for (line = 0; line < 4; line++)
@@ -160,17 +162,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
160#ifdef CONFIG_ROCKHOPPER 162#ifdef CONFIG_ROCKHOPPER
161void i8259_init(void) 163void i8259_init(void)
162{ 164{
163 outb(0x11, 0x20); /* Master ICW1 */ 165 init_i8259_irqs();
164 outb(I8259_IRQ_BASE, 0x21); /* Master ICW2 */
165 outb(0x04, 0x21); /* Master ICW3 */
166 outb(0x01, 0x21); /* Master ICW4 */
167 outb(0xff, 0x21); /* Master IMW */
168
169 outb(0x11, 0xa0); /* Slave ICW1 */
170 outb(I8259_IRQ_BASE + 8, 0xa1); /* Slave ICW2 */
171 outb(0x02, 0xa1); /* Slave ICW3 */
172 outb(0x01, 0xa1); /* Slave ICW4 */
173 outb(0xff, 0xa1); /* Slave IMW */
174 166
175 outb(0x00, 0x4d0); 167 outb(0x00, 0x4d0);
176 outb(0x02, 0x4d1); /* USB IRQ9 is level */ 168 outb(0x02, 0x4d1); /* USB IRQ9 is level */
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 2c36c108c4d6..d48665ebd33c 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -159,7 +159,7 @@ int pnx8550_set_gic_priority(int irq, int priority)
159} 159}
160 160
161static struct irq_chip level_irq_type = { 161static struct irq_chip level_irq_type = {
162 .typename = "PNX Level IRQ", 162 .name = "PNX Level IRQ",
163 .ack = mask_irq, 163 .ack = mask_irq,
164 .mask = mask_irq, 164 .mask = mask_irq,
165 .mask_ack = mask_irq, 165 .mask_ack = mask_irq,
diff --git a/arch/mips/philips/pnx8550/common/prom.c b/arch/mips/philips/pnx8550/common/prom.c
index eb6ec11fef07..8aeed6c2b8c3 100644
--- a/arch/mips/philips/pnx8550/common/prom.c
+++ b/arch/mips/philips/pnx8550/common/prom.c
@@ -106,9 +106,8 @@ int get_ethernet_addr(char *ethernet_addr)
106 return 0; 106 return 0;
107} 107}
108 108
109unsigned long __init prom_free_prom_memory(void) 109void __init prom_free_prom_memory(void)
110{ 110{
111 return 0;
112} 111}
113 112
114extern int pnx8550_console_port; 113extern int pnx8550_console_port;
diff --git a/arch/mips/pmc-sierra/yosemite/dbg_io.c b/arch/mips/pmc-sierra/yosemite/dbg_io.c
index 0f659c9106ac..6362c702e389 100644
--- a/arch/mips/pmc-sierra/yosemite/dbg_io.c
+++ b/arch/mips/pmc-sierra/yosemite/dbg_io.c
@@ -93,7 +93,7 @@
93 * Functions to READ and WRITE to serial port 1 93 * Functions to READ and WRITE to serial port 1
94 */ 94 */
95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \ 95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \
96 (TITAN_SERIAL_BASE_1 + ofs) 96 (TITAN_SERIAL_BASE_1 + ofs)))
97 97
98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \ 98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \
99 (TITAN_SERIAL_BASE_1 + ofs))) = val) 99 (TITAN_SERIAL_BASE_1 + ofs))) = val)
diff --git a/arch/mips/pmc-sierra/yosemite/irq.c b/arch/mips/pmc-sierra/yosemite/irq.c
index adb048527e76..428d1f45a287 100644
--- a/arch/mips/pmc-sierra/yosemite/irq.c
+++ b/arch/mips/pmc-sierra/yosemite/irq.c
@@ -148,9 +148,9 @@ void __init arch_init_irq(void)
148{ 148{
149 clear_c0_status(ST0_IM); 149 clear_c0_status(ST0_IM);
150 150
151 mips_cpu_irq_init(0); 151 mips_cpu_irq_init();
152 rm7k_cpu_irq_init(8); 152 rm7k_cpu_irq_init();
153 rm9k_cpu_irq_init(12); 153 rm9k_cpu_irq_init();
154 154
155#ifdef CONFIG_KGDB 155#ifdef CONFIG_KGDB
156 /* At this point, initialize the second serial port */ 156 /* At this point, initialize the second serial port */
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 9fe4973377c3..1e1685e415a4 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 prom_grab_secondary(); 132 prom_grab_secondary();
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 139void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 1b9b0d396d3e..6a6e15e40009 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -171,6 +171,7 @@ static void __init py_map_ocd(void)
171 171
172static void __init py_uart_setup(void) 172static void __init py_uart_setup(void)
173{ 173{
174#ifdef CONFIG_SERIAL_8250
174 struct uart_port up; 175 struct uart_port up;
175 176
176 /* 177 /*
@@ -188,6 +189,7 @@ static void __init py_uart_setup(void)
188 189
189 if (early_serial_setup(&up)) 190 if (early_serial_setup(&up))
190 printk(KERN_ERR "Early serial init of port 0 failed\n"); 191 printk(KERN_ERR "Early serial init of port 0 failed\n");
192#endif /* CONFIG_SERIAL_8250 */
191} 193}
192 194
193static void __init py_rtc_setup(void) 195static void __init py_rtc_setup(void)
diff --git a/arch/mips/qemu/q-mem.c b/arch/mips/qemu/q-mem.c
index d174fac43031..dae39b59de15 100644
--- a/arch/mips/qemu/q-mem.c
+++ b/arch/mips/qemu/q-mem.c
@@ -1,6 +1,5 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3unsigned long __init prom_free_prom_memory(void) 3void __init prom_free_prom_memory(void)
4{ 4{
5 return 0UL;
6} 5}
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index a1a9af6da7bf..6b6e97b90c6e 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -139,7 +139,7 @@ static void end_eisa1_irq(unsigned int irq)
139} 139}
140 140
141static struct irq_chip ip22_eisa1_irq_type = { 141static struct irq_chip ip22_eisa1_irq_type = {
142 .typename = "IP22 EISA", 142 .name = "IP22 EISA",
143 .startup = startup_eisa1_irq, 143 .startup = startup_eisa1_irq,
144 .ack = mask_and_ack_eisa1_irq, 144 .ack = mask_and_ack_eisa1_irq,
145 .mask = disable_eisa1_irq, 145 .mask = disable_eisa1_irq,
@@ -194,7 +194,7 @@ static void end_eisa2_irq(unsigned int irq)
194} 194}
195 195
196static struct irq_chip ip22_eisa2_irq_type = { 196static struct irq_chip ip22_eisa2_irq_type = {
197 .typename = "IP22 EISA", 197 .name = "IP22 EISA",
198 .startup = startup_eisa2_irq, 198 .startup = startup_eisa2_irq,
199 .ack = mask_and_ack_eisa2_irq, 199 .ack = mask_and_ack_eisa2_irq,
200 .mask = disable_eisa2_irq, 200 .mask = disable_eisa2_irq,
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index c44f8be0644f..b454924aeb56 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/mipsregs.h> 20#include <asm/mipsregs.h>
21#include <asm/addrspace.h> 21#include <asm/addrspace.h>
22#include <asm/irq_cpu.h>
22 23
23#include <asm/sgi/ioc.h> 24#include <asm/sgi/ioc.h>
24#include <asm/sgi/hpc3.h> 25#include <asm/sgi/hpc3.h>
@@ -52,7 +53,7 @@ static void disable_local0_irq(unsigned int irq)
52} 53}
53 54
54static struct irq_chip ip22_local0_irq_type = { 55static struct irq_chip ip22_local0_irq_type = {
55 .typename = "IP22 local 0", 56 .name = "IP22 local 0",
56 .ack = disable_local0_irq, 57 .ack = disable_local0_irq,
57 .mask = disable_local0_irq, 58 .mask = disable_local0_irq,
58 .mask_ack = disable_local0_irq, 59 .mask_ack = disable_local0_irq,
@@ -73,7 +74,7 @@ void disable_local1_irq(unsigned int irq)
73} 74}
74 75
75static struct irq_chip ip22_local1_irq_type = { 76static struct irq_chip ip22_local1_irq_type = {
76 .typename = "IP22 local 1", 77 .name = "IP22 local 1",
77 .ack = disable_local1_irq, 78 .ack = disable_local1_irq,
78 .mask = disable_local1_irq, 79 .mask = disable_local1_irq,
79 .mask_ack = disable_local1_irq, 80 .mask_ack = disable_local1_irq,
@@ -94,7 +95,7 @@ void disable_local2_irq(unsigned int irq)
94} 95}
95 96
96static struct irq_chip ip22_local2_irq_type = { 97static struct irq_chip ip22_local2_irq_type = {
97 .typename = "IP22 local 2", 98 .name = "IP22 local 2",
98 .ack = disable_local2_irq, 99 .ack = disable_local2_irq,
99 .mask = disable_local2_irq, 100 .mask = disable_local2_irq,
100 .mask_ack = disable_local2_irq, 101 .mask_ack = disable_local2_irq,
@@ -115,7 +116,7 @@ void disable_local3_irq(unsigned int irq)
115} 116}
116 117
117static struct irq_chip ip22_local3_irq_type = { 118static struct irq_chip ip22_local3_irq_type = {
118 .typename = "IP22 local 3", 119 .name = "IP22 local 3",
119 .ack = disable_local3_irq, 120 .ack = disable_local3_irq,
120 .mask = disable_local3_irq, 121 .mask = disable_local3_irq,
121 .mask_ack = disable_local3_irq, 122 .mask_ack = disable_local3_irq,
@@ -253,8 +254,6 @@ asmlinkage void plat_irq_dispatch(void)
253 indy_8254timer_irq(); 254 indy_8254timer_irq();
254} 255}
255 256
256extern void mips_cpu_irq_init(unsigned int irq_base);
257
258void __init arch_init_irq(void) 257void __init arch_init_irq(void)
259{ 258{
260 int i; 259 int i;
@@ -316,7 +315,7 @@ void __init arch_init_irq(void)
316 sgint->cmeimask1 = 0; 315 sgint->cmeimask1 = 0;
317 316
318 /* init CPU irqs */ 317 /* init CPU irqs */
319 mips_cpu_irq_init(SGINT_CPU); 318 mips_cpu_irq_init();
320 319
321 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) { 320 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) {
322 struct irq_chip *handler; 321 struct irq_chip *handler;
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index b58bd522262b..ddb6506d8341 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -202,7 +202,6 @@ void __init sgimc_init(void)
202} 202}
203 203
204void __init prom_meminit(void) {} 204void __init prom_meminit(void) {}
205unsigned long __init prom_free_prom_memory(void) 205void __init prom_free_prom_memory(void)
206{ 206{
207 return 0;
208} 207}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 319f8803ef6f..60ade7690e09 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -333,7 +333,7 @@ static inline void disable_bridge_irq(unsigned int irq)
333} 333}
334 334
335static struct irq_chip bridge_irq_type = { 335static struct irq_chip bridge_irq_type = {
336 .typename = "bridge", 336 .name = "bridge",
337 .startup = startup_bridge_irq, 337 .startup = startup_bridge_irq,
338 .shutdown = shutdown_bridge_irq, 338 .shutdown = shutdown_bridge_irq,
339 .ack = disable_bridge_irq, 339 .ack = disable_bridge_irq,
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 16e5682b01f1..0e3d535e9f43 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -498,10 +498,9 @@ void __init prom_meminit(void)
498 } 498 }
499} 499}
500 500
501unsigned long __init prom_free_prom_memory(void) 501void __init prom_free_prom_memory(void)
502{ 502{
503 /* We got nothing to free here ... */ 503 /* We got nothing to free here ... */
504 return 0;
505} 504}
506 505
507extern void pagetable_init(void); 506extern void pagetable_init(void);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index c20e9899b34b..9ce513629b14 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -181,7 +181,7 @@ static void disable_rt_irq(unsigned int irq)
181} 181}
182 182
183static struct irq_chip rt_irq_type = { 183static struct irq_chip rt_irq_type = {
184 .typename = "SN HUB RT timer", 184 .name = "SN HUB RT timer",
185 .ack = disable_rt_irq, 185 .ack = disable_rt_irq,
186 .mask = disable_rt_irq, 186 .mask = disable_rt_irq,
187 .mask_ack = disable_rt_irq, 187 .mask_ack = disable_rt_irq,
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index ae063864c026..8c450d9e8696 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -144,7 +144,7 @@ static void end_cpu_irq(unsigned int irq)
144} 144}
145 145
146static struct irq_chip ip32_cpu_interrupt = { 146static struct irq_chip ip32_cpu_interrupt = {
147 .typename = "IP32 CPU", 147 .name = "IP32 CPU",
148 .ack = disable_cpu_irq, 148 .ack = disable_cpu_irq,
149 .mask = disable_cpu_irq, 149 .mask = disable_cpu_irq,
150 .mask_ack = disable_cpu_irq, 150 .mask_ack = disable_cpu_irq,
@@ -193,7 +193,7 @@ static void end_crime_irq(unsigned int irq)
193} 193}
194 194
195static struct irq_chip ip32_crime_interrupt = { 195static struct irq_chip ip32_crime_interrupt = {
196 .typename = "IP32 CRIME", 196 .name = "IP32 CRIME",
197 .ack = mask_and_ack_crime_irq, 197 .ack = mask_and_ack_crime_irq,
198 .mask = disable_crime_irq, 198 .mask = disable_crime_irq,
199 .mask_ack = mask_and_ack_crime_irq, 199 .mask_ack = mask_and_ack_crime_irq,
@@ -234,7 +234,7 @@ static void end_macepci_irq(unsigned int irq)
234} 234}
235 235
236static struct irq_chip ip32_macepci_interrupt = { 236static struct irq_chip ip32_macepci_interrupt = {
237 .typename = "IP32 MACE PCI", 237 .name = "IP32 MACE PCI",
238 .ack = disable_macepci_irq, 238 .ack = disable_macepci_irq,
239 .mask = disable_macepci_irq, 239 .mask = disable_macepci_irq,
240 .mask_ack = disable_macepci_irq, 240 .mask_ack = disable_macepci_irq,
@@ -347,7 +347,7 @@ static void end_maceisa_irq(unsigned irq)
347} 347}
348 348
349static struct irq_chip ip32_maceisa_interrupt = { 349static struct irq_chip ip32_maceisa_interrupt = {
350 .typename = "IP32 MACE ISA", 350 .name = "IP32 MACE ISA",
351 .ack = mask_and_ack_maceisa_irq, 351 .ack = mask_and_ack_maceisa_irq,
352 .mask = disable_maceisa_irq, 352 .mask = disable_maceisa_irq,
353 .mask_ack = mask_and_ack_maceisa_irq, 353 .mask_ack = mask_and_ack_maceisa_irq,
@@ -379,7 +379,7 @@ static void end_mace_irq(unsigned int irq)
379} 379}
380 380
381static struct irq_chip ip32_mace_interrupt = { 381static struct irq_chip ip32_mace_interrupt = {
382 .typename = "IP32 MACE", 382 .name = "IP32 MACE",
383 .ack = disable_mace_irq, 383 .ack = disable_mace_irq,
384 .mask = disable_mace_irq, 384 .mask = disable_mace_irq,
385 .mask_ack = disable_mace_irq, 385 .mask_ack = disable_mace_irq,
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index d37d40a3cdae..849d392a0013 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -43,7 +43,6 @@ void __init prom_meminit (void)
43} 43}
44 44
45 45
46unsigned long __init prom_free_prom_memory (void) 46void __init prom_free_prom_memory(void)
47{ 47{
48 return 0;
49} 48}
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 2e8f6b2e2420..1dc5d05d8962 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -82,7 +82,7 @@ extern char sb1250_duart_present[];
82#endif 82#endif
83 83
84static struct irq_chip bcm1480_irq_type = { 84static struct irq_chip bcm1480_irq_type = {
85 .typename = "BCM1480-IMR", 85 .name = "BCM1480-IMR",
86 .ack = ack_bcm1480_irq, 86 .ack = ack_bcm1480_irq,
87 .mask = disable_bcm1480_irq, 87 .mask = disable_bcm1480_irq,
88 .mask_ack = ack_bcm1480_irq, 88 .mask_ack = ack_bcm1480_irq,
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 6e8952da6e2a..9e6099e69622 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -343,10 +343,9 @@ void __init prom_init(void)
343 prom_meminit(); 343 prom_meminit();
344} 344}
345 345
346unsigned long __init prom_free_prom_memory(void) 346void __init prom_free_prom_memory(void)
347{ 347{
348 /* Not sure what I'm supposed to do here. Nothing, I think */ 348 /* Not sure what I'm supposed to do here. Nothing, I think */
349 return 0;
350} 349}
351 350
352void prom_putchar(char c) 351void prom_putchar(char c)
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 82ce7533053f..148239446e6e 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -67,7 +67,7 @@ extern char sb1250_duart_present[];
67#endif 67#endif
68 68
69static struct irq_chip sb1250_irq_type = { 69static struct irq_chip sb1250_irq_type = {
70 .typename = "SB1250-IMR", 70 .name = "SB1250-IMR",
71 .ack = ack_sb1250_irq, 71 .ack = ack_sb1250_irq,
72 .mask = disable_sb1250_irq, 72 .mask = disable_sb1250_irq,
73 .mask_ack = ack_sb1250_irq, 73 .mask_ack = ack_sb1250_irq,
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index 3c33a4517bc3..257c4e674353 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -87,10 +87,9 @@ void __init prom_init(void)
87 prom_meminit(); 87 prom_meminit();
88} 88}
89 89
90unsigned long __init prom_free_prom_memory(void) 90void __init prom_free_prom_memory(void)
91{ 91{
92 /* Not sure what I'm supposed to do here. Nothing, I think */ 92 /* Not sure what I'm supposed to do here. Nothing, I think */
93 return 0;
94} 93}
95 94
96void prom_putchar(char c) 95void prom_putchar(char c)
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 8511bcc6d99d..039e8e540508 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -37,7 +37,7 @@ static void end_pciasic_irq(unsigned int irq)
37} 37}
38 38
39static struct irq_chip pciasic_irq_type = { 39static struct irq_chip pciasic_irq_type = {
40 .typename = "ASIC-PCI", 40 .name = "ASIC-PCI",
41 .ack = disable_pciasic_irq, 41 .ack = disable_pciasic_irq,
42 .mask = disable_pciasic_irq, 42 .mask = disable_pciasic_irq,
43 .mask_ack = disable_pciasic_irq, 43 .mask_ack = disable_pciasic_irq,
diff --git a/arch/mips/sni/sniprom.c b/arch/mips/sni/sniprom.c
index d1d0f1f493b4..1213d166f22e 100644
--- a/arch/mips/sni/sniprom.c
+++ b/arch/mips/sni/sniprom.c
@@ -67,9 +67,8 @@ void prom_printf(char *fmt, ...)
67 va_end(args); 67 va_end(args);
68} 68}
69 69
70unsigned long prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
74 73
75/* 74/*
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index ed4a19adf361..e7f3e5b84dcf 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -120,7 +120,7 @@ static void tx4927_irq_pic_disable(unsigned int irq);
120 120
121#define TX4927_CP0_NAME "TX4927-CP0" 121#define TX4927_CP0_NAME "TX4927-CP0"
122static struct irq_chip tx4927_irq_cp0_type = { 122static struct irq_chip tx4927_irq_cp0_type = {
123 .typename = TX4927_CP0_NAME, 123 .name = TX4927_CP0_NAME,
124 .ack = tx4927_irq_cp0_disable, 124 .ack = tx4927_irq_cp0_disable,
125 .mask = tx4927_irq_cp0_disable, 125 .mask = tx4927_irq_cp0_disable,
126 .mask_ack = tx4927_irq_cp0_disable, 126 .mask_ack = tx4927_irq_cp0_disable,
@@ -129,7 +129,7 @@ static struct irq_chip tx4927_irq_cp0_type = {
129 129
130#define TX4927_PIC_NAME "TX4927-PIC" 130#define TX4927_PIC_NAME "TX4927-PIC"
131static struct irq_chip tx4927_irq_pic_type = { 131static struct irq_chip tx4927_irq_pic_type = {
132 .typename = TX4927_PIC_NAME, 132 .name = TX4927_PIC_NAME,
133 .ack = tx4927_irq_pic_disable, 133 .ack = tx4927_irq_pic_disable,
134 .mask = tx4927_irq_pic_disable, 134 .mask = tx4927_irq_pic_disable,
135 .mask_ack = tx4927_irq_pic_disable, 135 .mask_ack = tx4927_irq_pic_disable,
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index b54b529a29f9..dcce88f403c9 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -228,7 +228,7 @@ static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq);
228 228
229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC" 229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = { 230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
231 .typename = TOSHIBA_RBTX4927_IOC_NAME, 231 .name = TOSHIBA_RBTX4927_IOC_NAME,
232 .ack = toshiba_rbtx4927_irq_ioc_disable, 232 .ack = toshiba_rbtx4927_irq_ioc_disable,
233 .mask = toshiba_rbtx4927_irq_ioc_disable, 233 .mask = toshiba_rbtx4927_irq_ioc_disable,
234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable, 234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable,
@@ -241,7 +241,7 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
241#ifdef CONFIG_TOSHIBA_FPCIB0 241#ifdef CONFIG_TOSHIBA_FPCIB0
242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA" 242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA"
243static struct irq_chip toshiba_rbtx4927_irq_isa_type = { 243static struct irq_chip toshiba_rbtx4927_irq_isa_type = {
244 .typename = TOSHIBA_RBTX4927_ISA_NAME, 244 .name = TOSHIBA_RBTX4927_ISA_NAME,
245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
246 .mask = toshiba_rbtx4927_irq_isa_disable, 246 .mask = toshiba_rbtx4927_irq_isa_disable,
247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
@@ -490,13 +490,13 @@ void toshiba_rbtx4927_irq_dump(char *key)
490 { 490 {
491 u32 i, j = 0; 491 u32 i, j = 0;
492 for (i = 0; i < NR_IRQS; i++) { 492 for (i = 0; i < NR_IRQS; i++) {
493 if (strcmp(irq_desc[i].chip->typename, "none") 493 if (strcmp(irq_desc[i].chip->name, "none")
494 == 0) 494 == 0)
495 continue; 495 continue;
496 496
497 if ((i >= 1) 497 if ((i >= 1)
498 && (irq_desc[i - 1].chip->typename == 498 && (irq_desc[i - 1].chip->name ==
499 irq_desc[i].chip->typename)) { 499 irq_desc[i].chip->name)) {
500 j++; 500 j++;
501 } else { 501 } else {
502 j = 0; 502 j = 0;
@@ -510,7 +510,7 @@ void toshiba_rbtx4927_irq_dump(char *key)
510 (u32) (irq_desc[i].action ? irq_desc[i]. 510 (u32) (irq_desc[i].action ? irq_desc[i].
511 action->handler : 0), 511 action->handler : 0),
512 irq_desc[i].depth, 512 irq_desc[i].depth,
513 irq_desc[i].chip->typename, j); 513 irq_desc[i].chip->name, j);
514 } 514 }
515 } 515 }
516#endif 516#endif
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
index efe50562f0ce..9a3a5babd1fb 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
@@ -80,9 +80,8 @@ void __init prom_init(void)
80 add_memory_region(0, msize << 20, BOOT_MEM_RAM); 80 add_memory_region(0, msize << 20, BOOT_MEM_RAM);
81} 81}
82 82
83unsigned long __init prom_free_prom_memory(void) 83void __init prom_free_prom_memory(void)
84{ 84{
85 return 0;
86} 85}
87 86
88const char *get_system_type(void) 87const char *get_system_type(void)
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index a347b424d91c..3a2dbfc25014 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -49,7 +49,7 @@ static void tx4938_irq_pic_disable(unsigned int irq);
49 49
50#define TX4938_CP0_NAME "TX4938-CP0" 50#define TX4938_CP0_NAME "TX4938-CP0"
51static struct irq_chip tx4938_irq_cp0_type = { 51static struct irq_chip tx4938_irq_cp0_type = {
52 .typename = TX4938_CP0_NAME, 52 .name = TX4938_CP0_NAME,
53 .ack = tx4938_irq_cp0_disable, 53 .ack = tx4938_irq_cp0_disable,
54 .mask = tx4938_irq_cp0_disable, 54 .mask = tx4938_irq_cp0_disable,
55 .mask_ack = tx4938_irq_cp0_disable, 55 .mask_ack = tx4938_irq_cp0_disable,
@@ -58,7 +58,7 @@ static struct irq_chip tx4938_irq_cp0_type = {
58 58
59#define TX4938_PIC_NAME "TX4938-PIC" 59#define TX4938_PIC_NAME "TX4938-PIC"
60static struct irq_chip tx4938_irq_pic_type = { 60static struct irq_chip tx4938_irq_pic_type = {
61 .typename = TX4938_PIC_NAME, 61 .name = TX4938_PIC_NAME,
62 .ack = tx4938_irq_pic_disable, 62 .ack = tx4938_irq_pic_disable,
63 .mask = tx4938_irq_pic_disable, 63 .mask = tx4938_irq_pic_disable,
64 .mask_ack = tx4938_irq_pic_disable, 64 .mask_ack = tx4938_irq_pic_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index b6f363d08011..2e96dbb248b1 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -92,7 +92,7 @@ static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
92 92
93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" 93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { 94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
95 .typename = TOSHIBA_RBTX4938_IOC_NAME, 95 .name = TOSHIBA_RBTX4938_IOC_NAME,
96 .ack = toshiba_rbtx4938_irq_ioc_disable, 96 .ack = toshiba_rbtx4938_irq_ioc_disable,
97 .mask = toshiba_rbtx4938_irq_ioc_disable, 97 .mask = toshiba_rbtx4938_irq_ioc_disable,
98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable, 98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/prom.c b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
index e44daf30a7c1..7dc6a0aae21c 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/prom.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
@@ -56,9 +56,8 @@ void __init prom_init(void)
56 return; 56 return;
57} 57}
58 58
59unsigned long __init prom_free_prom_memory(void) 59void __init prom_free_prom_memory(void)
60{ 60{
61 return 0;
62} 61}
63 62
64void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 63void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index c075261976c5..adabc6bad440 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001-2002 MontaVista Software Inc. 4 * Copyright (C) 2001-2002 MontaVista Software Inc.
5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com> 5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
6 * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 6 * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -68,6 +68,7 @@ static unsigned char sysint2_assign[16] = {
68#define MPIUINTREG 0x0e 68#define MPIUINTREG 0x0e
69#define MAIUINTREG 0x10 69#define MAIUINTREG 0x10
70#define MKIUINTREG 0x12 70#define MKIUINTREG 0x12
71#define MMACINTREG 0x12
71#define MGIUINTLREG 0x14 72#define MGIUINTLREG 0x14
72#define MDSIUINTREG 0x16 73#define MDSIUINTREG 0x16
73#define NMIREG 0x18 74#define NMIREG 0x18
@@ -241,6 +242,30 @@ void vr41xx_disable_kiuint(uint16_t mask)
241 242
242EXPORT_SYMBOL(vr41xx_disable_kiuint); 243EXPORT_SYMBOL(vr41xx_disable_kiuint);
243 244
245void vr41xx_enable_macint(uint16_t mask)
246{
247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
248 unsigned long flags;
249
250 spin_lock_irqsave(&desc->lock, flags);
251 icu1_set(MMACINTREG, mask);
252 spin_unlock_irqrestore(&desc->lock, flags);
253}
254
255EXPORT_SYMBOL(vr41xx_enable_macint);
256
257void vr41xx_disable_macint(uint16_t mask)
258{
259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
260 unsigned long flags;
261
262 spin_lock_irqsave(&desc->lock, flags);
263 icu1_clear(MMACINTREG, mask);
264 spin_unlock_irqrestore(&desc->lock, flags);
265}
266
267EXPORT_SYMBOL(vr41xx_disable_macint);
268
244void vr41xx_enable_dsiuint(uint16_t mask) 269void vr41xx_enable_dsiuint(uint16_t mask)
245{ 270{
246 struct irq_desc *desc = irq_desc + DSIU_IRQ; 271 struct irq_desc *desc = irq_desc + DSIU_IRQ;
@@ -428,7 +453,7 @@ static void enable_sysint1_irq(unsigned int irq)
428} 453}
429 454
430static struct irq_chip sysint1_irq_type = { 455static struct irq_chip sysint1_irq_type = {
431 .typename = "SYSINT1", 456 .name = "SYSINT1",
432 .ack = disable_sysint1_irq, 457 .ack = disable_sysint1_irq,
433 .mask = disable_sysint1_irq, 458 .mask = disable_sysint1_irq,
434 .mask_ack = disable_sysint1_irq, 459 .mask_ack = disable_sysint1_irq,
@@ -446,7 +471,7 @@ static void enable_sysint2_irq(unsigned int irq)
446} 471}
447 472
448static struct irq_chip sysint2_irq_type = { 473static struct irq_chip sysint2_irq_type = {
449 .typename = "SYSINT2", 474 .name = "SYSINT2",
450 .ack = disable_sysint2_irq, 475 .ack = disable_sysint2_irq,
451 .mask = disable_sysint2_irq, 476 .mask = disable_sysint2_irq,
452 .mask_ack = disable_sysint2_irq, 477 .mask_ack = disable_sysint2_irq,
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index a2e285c1d4d5..4f97e0ba9e24 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -81,7 +81,6 @@ void __init prom_init(void)
81 } 81 }
82} 82}
83 83
84unsigned long __init prom_free_prom_memory (void) 84void __init prom_free_prom_memory(void)
85{ 85{
86 return 0UL;
87} 86}
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 16decf4ac2f4..cba36a247e32 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -95,27 +95,27 @@ asmlinkage void plat_irq_dispatch(void)
95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; 95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
96 96
97 if (pending & CAUSEF_IP7) 97 if (pending & CAUSEF_IP7)
98 do_IRQ(7); 98 do_IRQ(TIMER_IRQ);
99 else if (pending & 0x7800) { 99 else if (pending & 0x7800) {
100 if (pending & CAUSEF_IP3) 100 if (pending & CAUSEF_IP3)
101 irq_dispatch(3); 101 irq_dispatch(INT1_IRQ);
102 else if (pending & CAUSEF_IP4) 102 else if (pending & CAUSEF_IP4)
103 irq_dispatch(4); 103 irq_dispatch(INT2_IRQ);
104 else if (pending & CAUSEF_IP5) 104 else if (pending & CAUSEF_IP5)
105 irq_dispatch(5); 105 irq_dispatch(INT3_IRQ);
106 else if (pending & CAUSEF_IP6) 106 else if (pending & CAUSEF_IP6)
107 irq_dispatch(6); 107 irq_dispatch(INT4_IRQ);
108 } else if (pending & CAUSEF_IP2) 108 } else if (pending & CAUSEF_IP2)
109 irq_dispatch(2); 109 irq_dispatch(INT0_IRQ);
110 else if (pending & CAUSEF_IP0) 110 else if (pending & CAUSEF_IP0)
111 do_IRQ(0); 111 do_IRQ(MIPS_SOFTINT0_IRQ);
112 else if (pending & CAUSEF_IP1) 112 else if (pending & CAUSEF_IP1)
113 do_IRQ(1); 113 do_IRQ(MIPS_SOFTINT1_IRQ);
114 else 114 else
115 spurious_interrupt(); 115 spurious_interrupt();
116} 116}
117 117
118void __init arch_init_irq(void) 118void __init arch_init_irq(void)
119{ 119{
120 mips_cpu_irq_init(MIPS_CPU_IRQ_BASE); 120 mips_cpu_irq_init();
121} 121}
diff --git a/arch/mips/vr41xx/nec-cmbvr4133/irq.c b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
index 128ed8d6f111..7d2d076b0f54 100644
--- a/arch/mips/vr41xx/nec-cmbvr4133/irq.c
+++ b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
@@ -21,60 +21,16 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22 22
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/i8259.h>
24#include <asm/vr41xx/cmbvr4133.h> 25#include <asm/vr41xx/cmbvr4133.h>
25 26
26extern void enable_8259A_irq(unsigned int irq);
27extern void disable_8259A_irq(unsigned int irq);
28extern void mask_and_ack_8259A(unsigned int irq);
29extern void init_8259A(int hoge);
30
31extern int vr4133_rockhopper; 27extern int vr4133_rockhopper;
32 28
33static void enable_i8259_irq(unsigned int irq)
34{
35 enable_8259A_irq(irq - I8259_IRQ_BASE);
36}
37
38static void disable_i8259_irq(unsigned int irq)
39{
40 disable_8259A_irq(irq - I8259_IRQ_BASE);
41}
42
43static void ack_i8259_irq(unsigned int irq)
44{
45 mask_and_ack_8259A(irq - I8259_IRQ_BASE);
46}
47
48static struct irq_chip i8259_irq_type = {
49 .typename = "XT-PIC",
50 .ack = ack_i8259_irq,
51 .mask = disable_i8259_irq,
52 .mask_ack = ack_i8259_irq,
53 .unmask = enable_i8259_irq,
54};
55
56static int i8259_get_irq_number(int irq) 29static int i8259_get_irq_number(int irq)
57{ 30{
58 unsigned long isr; 31 return i8259_irq();
59
60 isr = inb(0x20);
61 irq = ffz(~isr);
62 if (irq == 2) {
63 isr = inb(0xa0);
64 irq = 8 + ffz(~isr);
65 }
66
67 if (irq < 0 || irq > 15)
68 return -EINVAL;
69
70 return I8259_IRQ_BASE + irq;
71} 32}
72 33
73static struct irqaction i8259_slave_cascade = {
74 .handler = &no_action,
75 .name = "cascade",
76};
77
78void __init rockhopper_init_irq(void) 34void __init rockhopper_init_irq(void)
79{ 35{
80 int i; 36 int i;
@@ -84,11 +40,6 @@ void __init rockhopper_init_irq(void)
84 return; 40 return;
85 } 41 }
86 42
87 for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
88 set_irq_chip_and_handler(i, &i8259_irq_type, handle_level_irq);
89
90 setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);
91
92 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH); 43 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH);
93 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH); 44 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH);
94 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number); 45 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index abc6bd2f858e..f08e80a0bf0a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -552,6 +552,11 @@ config PPC_PS3
552 bool "Sony PS3 (incomplete)" 552 bool "Sony PS3 (incomplete)"
553 depends on PPC_MULTIPLATFORM && PPC64 553 depends on PPC_MULTIPLATFORM && PPC64
554 select PPC_CELL 554 select PPC_CELL
555 select USB_ARCH_HAS_OHCI
556 select USB_OHCI_LITTLE_ENDIAN
557 select USB_OHCI_BIG_ENDIAN_MMIO
558 select USB_ARCH_HAS_EHCI
559 select USB_EHCI_BIG_ENDIAN_MMIO
555 help 560 help
556 This option enables support for the Sony PS3 game console 561 This option enables support for the Sony PS3 game console
557 and other platforms using the PS3 hypervisor. 562 and other platforms using the PS3 hypervisor.
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 6828df4afd99..7e97d71a5f8f 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -381,8 +381,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
381 381
382 pci_device_add(dev, bus); 382 pci_device_add(dev, bus);
383 383
384 /* XXX pci_scan_msi_device(dev); */
385
386 return dev; 384 return dev;
387} 385}
388EXPORT_SYMBOL(of_create_pci_dev); 386EXPORT_SYMBOL(of_create_pci_dev);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 12272361c018..eaed402ad346 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -34,10 +34,6 @@ config GENERIC_HWEIGHT
34 bool 34 bool
35 default y 35 default y
36 36
37config GENERIC_CALIBRATE_DELAY
38 bool
39 default y
40
41config GENERIC_TIME 37config GENERIC_TIME
42 def_bool y 38 def_bool y
43 39
@@ -134,6 +130,31 @@ config AUDIT_ARCH
134 bool 130 bool
135 default y 131 default y
136 132
133config S390_SWITCH_AMODE
134 bool "Switch kernel/user addressing modes"
135 help
136 This option allows to switch the addressing modes of kernel and user
137 space. The kernel parameter switch_amode=on will enable this feature,
138 default is disabled. Enabling this (via kernel parameter) on machines
139 earlier than IBM System z9-109 EC/BC will reduce system performance.
140
141 Note that this option will also be selected by selecting the execute
142 protection option below. Enabling the execute protection via the
143 noexec kernel parameter will also switch the addressing modes,
144 independent of the switch_amode kernel parameter.
145
146
147config S390_EXEC_PROTECT
148 bool "Data execute protection"
149 select S390_SWITCH_AMODE
150 help
151 This option allows to enable a buffer overflow protection for user
152 space programs and it also selects the addressing mode option above.
153 The kernel parameter noexec=on will enable this feature and also
154 switch the addressing modes, default is disabled. Enabling this (via
155 kernel parameter) on machines earlier than IBM System z9-109 EC/BC
156 will reduce system performance.
157
137comment "Code generation options" 158comment "Code generation options"
138 159
139choice 160choice
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index b8c237290263..c9da7d16145e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -81,7 +81,7 @@ static struct ctl_table appldata_dir_table[] = {
81/* 81/*
82 * Timer 82 * Timer
83 */ 83 */
84DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 84static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
85static atomic_t appldata_expire_count = ATOMIC_INIT(0); 85static atomic_t appldata_expire_count = ATOMIC_INIT(0);
86 86
87static DEFINE_SPINLOCK(appldata_timer_lock); 87static DEFINE_SPINLOCK(appldata_timer_lock);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 8aea3698a77b..4ca615788702 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -36,7 +36,7 @@
36 * book: 36 * book:
37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
38 */ 38 */
39struct appldata_mem_data { 39static struct appldata_mem_data {
40 u64 timestamp; 40 u64 timestamp;
41 u32 sync_count_1; /* after VM collected the record data, */ 41 u32 sync_count_1; /* after VM collected the record data, */
42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 075e619bf37d..f64b8c867ae2 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -34,7 +34,7 @@
34 * book: 34 * book:
35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
36 */ 36 */
37struct appldata_net_sum_data { 37static struct appldata_net_sum_data {
38 u64 timestamp; 38 u64 timestamp;
39 u32 sync_count_1; /* after VM collected the record data, */ 39 u32 sync_count_1; /* after VM collected the record data, */
40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
new file mode 100644
index 000000000000..99ff9f08e4d7
--- /dev/null
+++ b/arch/s390/crypto/Kconfig
@@ -0,0 +1,60 @@
1config CRYPTO_SHA1_S390
2 tristate "SHA1 digest algorithm"
3 depends on S390
4 select CRYPTO_ALGAPI
5 help
6 This is the s390 hardware accelerated implementation of the
7 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
8
9config CRYPTO_SHA256_S390
10 tristate "SHA256 digest algorithm"
11 depends on S390
12 select CRYPTO_ALGAPI
13 help
14 This is the s390 hardware accelerated implementation of the
15 SHA256 secure hash standard (DFIPS 180-2).
16
17 This version of SHA implements a 256 bit hash with 128 bits of
18 security against collision attacks.
19
20config CRYPTO_DES_S390
21 tristate "DES and Triple DES cipher algorithms"
22 depends on S390
23 select CRYPTO_ALGAPI
24 select CRYPTO_BLKCIPHER
25 help
26 This us the s390 hardware accelerated implementation of the
27 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
28
29config CRYPTO_AES_S390
30 tristate "AES cipher algorithms"
31 depends on S390
32 select CRYPTO_ALGAPI
33 select CRYPTO_BLKCIPHER
34 help
35 This is the s390 hardware accelerated implementation of the
36 AES cipher algorithms (FIPS-197). AES uses the Rijndael
37 algorithm.
38
39 Rijndael appears to be consistently a very good performer in
40 both hardware and software across a wide range of computing
41 environments regardless of its use in feedback or non-feedback
42 modes. Its key setup time is excellent, and its key agility is
43 good. Rijndael's very low memory requirements make it very well
44 suited for restricted-space environments, in which it also
45 demonstrates excellent performance. Rijndael's operations are
46 among the easiest to defend against power and timing attacks.
47
48 On s390 the System z9-109 currently only supports the key size
49 of 128 bit.
50
51config S390_PRNG
52 tristate "Pseudo random number generator device driver"
53 depends on S390
54 default "m"
55 help
56 Select this option if you want to use the s390 pseudo random number
57 generator. The PRNG is part of the cryptograhic processor functions
58 and uses triple-DES to generate secure random numbers like the
59 ANSI X9.17 standard. The PRNG is usable via the char device
60 /dev/prandom.
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index bfe2541dc5cf..14e552c5cc43 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o 7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
9 9obj-$(CONFIG_S390_PRNG) += prng.o
10obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 15c9eec02928..91636353f6f0 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the AES Cipher Algorithm. 4 * s390 implementation of the AES Cipher Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/aes.c" 10 * Derived from "crypto/aes.c"
@@ -27,9 +27,11 @@
27/* data block size for all key lengths */ 27/* data block size for all key lengths */
28#define AES_BLOCK_SIZE 16 28#define AES_BLOCK_SIZE 16
29 29
30int has_aes_128 = 0; 30#define AES_KEYLEN_128 1
31int has_aes_192 = 0; 31#define AES_KEYLEN_192 2
32int has_aes_256 = 0; 32#define AES_KEYLEN_256 4
33
34static char keylen_flag = 0;
33 35
34struct s390_aes_ctx { 36struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE]; 37 u8 iv[AES_BLOCK_SIZE];
@@ -47,20 +49,19 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
47 49
48 switch (key_len) { 50 switch (key_len) {
49 case 16: 51 case 16:
50 if (!has_aes_128) 52 if (!(keylen_flag & AES_KEYLEN_128))
51 goto fail; 53 goto fail;
52 break; 54 break;
53 case 24: 55 case 24:
54 if (!has_aes_192) 56 if (!(keylen_flag & AES_KEYLEN_192))
55 goto fail; 57 goto fail;
56 58
57 break; 59 break;
58 case 32: 60 case 32:
59 if (!has_aes_256) 61 if (!(keylen_flag & AES_KEYLEN_256))
60 goto fail; 62 goto fail;
61 break; 63 break;
62 default: 64 default:
63 /* invalid key length */
64 goto fail; 65 goto fail;
65 break; 66 break;
66 } 67 }
@@ -322,34 +323,32 @@ static int __init aes_init(void)
322 int ret; 323 int ret;
323 324
324 if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) 325 if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
325 has_aes_128 = 1; 326 keylen_flag |= AES_KEYLEN_128;
326 if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) 327 if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
327 has_aes_192 = 1; 328 keylen_flag |= AES_KEYLEN_192;
328 if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) 329 if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
329 has_aes_256 = 1; 330 keylen_flag |= AES_KEYLEN_256;
331
332 if (!keylen_flag)
333 return -EOPNOTSUPP;
330 334
331 if (!has_aes_128 && !has_aes_192 && !has_aes_256) 335 /* z9 109 and z9 BC/EC only support 128 bit key length */
332 return -ENOSYS; 336 if (keylen_flag == AES_KEYLEN_128)
337 printk(KERN_INFO
338 "aes_s390: hardware acceleration only available for"
339 "128 bit keys\n");
333 340
334 ret = crypto_register_alg(&aes_alg); 341 ret = crypto_register_alg(&aes_alg);
335 if (ret != 0) { 342 if (ret)
336 printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n");
337 goto aes_err; 343 goto aes_err;
338 }
339 344
340 ret = crypto_register_alg(&ecb_aes_alg); 345 ret = crypto_register_alg(&ecb_aes_alg);
341 if (ret != 0) { 346 if (ret)
342 printk(KERN_INFO
343 "crypt_s390: ecb-aes-s390 couldn't be loaded.\n");
344 goto ecb_aes_err; 347 goto ecb_aes_err;
345 }
346 348
347 ret = crypto_register_alg(&cbc_aes_alg); 349 ret = crypto_register_alg(&cbc_aes_alg);
348 if (ret != 0) { 350 if (ret)
349 printk(KERN_INFO
350 "crypt_s390: cbc-aes-s390 couldn't be loaded.\n");
351 goto cbc_aes_err; 351 goto cbc_aes_err;
352 }
353 352
354out: 353out:
355 return ret; 354 return ret;
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 2b137089f625..2775d2618332 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,8 +3,9 @@
3 * 3 *
4 * Support for s390 cryptographic instructions. 4 * Support for s390 cryptographic instructions.
5 * 5 *
6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * Jan Glauber (jan.glauber@de.ibm.com)
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
@@ -32,7 +33,8 @@ enum crypt_s390_operations {
32 CRYPT_S390_KMAC = 0x0500 33 CRYPT_S390_KMAC = 0x0500
33}; 34};
34 35
35/* function codes for KM (CIPHER MESSAGE) instruction 36/*
37 * function codes for KM (CIPHER MESSAGE) instruction
36 * 0x80 is the decipher modifier bit 38 * 0x80 is the decipher modifier bit
37 */ 39 */
38enum crypt_s390_km_func { 40enum crypt_s390_km_func {
@@ -51,7 +53,8 @@ enum crypt_s390_km_func {
51 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, 53 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
52}; 54};
53 55
54/* function codes for KMC (CIPHER MESSAGE WITH CHAINING) 56/*
57 * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
55 * instruction 58 * instruction
56 */ 59 */
57enum crypt_s390_kmc_func { 60enum crypt_s390_kmc_func {
@@ -68,9 +71,11 @@ enum crypt_s390_kmc_func {
68 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, 71 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
69 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, 72 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
70 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, 73 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
74 KMC_PRNG = CRYPT_S390_KMC | 0x43,
71}; 75};
72 76
73/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) 77/*
78 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
74 * instruction 79 * instruction
75 */ 80 */
76enum crypt_s390_kimd_func { 81enum crypt_s390_kimd_func {
@@ -79,7 +84,8 @@ enum crypt_s390_kimd_func {
79 KIMD_SHA_256 = CRYPT_S390_KIMD | 2, 84 KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
80}; 85};
81 86
82/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) 87/*
88 * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
83 * instruction 89 * instruction
84 */ 90 */
85enum crypt_s390_klmd_func { 91enum crypt_s390_klmd_func {
@@ -88,7 +94,8 @@ enum crypt_s390_klmd_func {
88 KLMD_SHA_256 = CRYPT_S390_KLMD | 2, 94 KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
89}; 95};
90 96
91/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) 97/*
98 * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
92 * instruction 99 * instruction
93 */ 100 */
94enum crypt_s390_kmac_func { 101enum crypt_s390_kmac_func {
@@ -98,229 +105,219 @@ enum crypt_s390_kmac_func {
98 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 105 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
99}; 106};
100 107
101/* status word for s390 crypto instructions' QUERY functions */ 108/**
102struct crypt_s390_query_status { 109 * crypt_s390_km:
103 u64 high; 110 * @func: the function code passed to KM; see crypt_s390_km_func
104 u64 low; 111 * @param: address of parameter block; see POP for details on each func
105}; 112 * @dest: address of destination memory area
106 113 * @src: address of source memory area
107/* 114 * @src_len: length of src operand in bytes
115 *
108 * Executes the KM (CIPHER MESSAGE) operation of the CPU. 116 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
109 * @param func: the function code passed to KM; see crypt_s390_km_func 117 *
110 * @param param: address of parameter block; see POP for details on each func 118 * Returns -1 for failure, 0 for the query func, number of processed
111 * @param dest: address of destination memory area 119 * bytes for encryption/decryption funcs
112 * @param src: address of source memory area
113 * @param src_len: length of src operand in bytes
114 * @returns < zero for failure, 0 for the query func, number of processed bytes
115 * for encryption/decryption funcs
116 */ 120 */
117static inline int 121static inline int crypt_s390_km(long func, void *param,
118crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) 122 u8 *dest, const u8 *src, long src_len)
119{ 123{
120 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 124 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
121 register void* __param asm("1") = param; 125 register void *__param asm("1") = param;
122 register const u8* __src asm("2") = src; 126 register const u8 *__src asm("2") = src;
123 register long __src_len asm("3") = src_len; 127 register long __src_len asm("3") = src_len;
124 register u8* __dest asm("4") = dest; 128 register u8 *__dest asm("4") = dest;
125 int ret; 129 int ret;
126 130
127 asm volatile( 131 asm volatile(
128 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ 132 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
129 "1: brc 1,0b \n" /* handle partial completion */ 133 "1: brc 1,0b \n" /* handle partial completion */
130 " ahi %0,%h7\n" 134 " la %0,0\n"
131 "2: ahi %0,%h8\n" 135 "2:\n"
132 "3:\n" 136 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
133 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
134 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 137 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
135 : "d" (__func), "a" (__param), "0" (-EFAULT), 138 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
136 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
137 if (ret < 0) 139 if (ret < 0)
138 return ret; 140 return ret;
139 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 141 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
140} 142}
141 143
142/* 144/**
145 * crypt_s390_kmc:
146 * @func: the function code passed to KM; see crypt_s390_kmc_func
147 * @param: address of parameter block; see POP for details on each func
148 * @dest: address of destination memory area
149 * @src: address of source memory area
150 * @src_len: length of src operand in bytes
151 *
143 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. 152 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
144 * @param func: the function code passed to KM; see crypt_s390_kmc_func 153 *
145 * @param param: address of parameter block; see POP for details on each func 154 * Returns -1 for failure, 0 for the query func, number of processed
146 * @param dest: address of destination memory area 155 * bytes for encryption/decryption funcs
147 * @param src: address of source memory area
148 * @param src_len: length of src operand in bytes
149 * @returns < zero for failure, 0 for the query func, number of processed bytes
150 * for encryption/decryption funcs
151 */ 156 */
152static inline int 157static inline int crypt_s390_kmc(long func, void *param,
153crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) 158 u8 *dest, const u8 *src, long src_len)
154{ 159{
155 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 160 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
156 register void* __param asm("1") = param; 161 register void *__param asm("1") = param;
157 register const u8* __src asm("2") = src; 162 register const u8 *__src asm("2") = src;
158 register long __src_len asm("3") = src_len; 163 register long __src_len asm("3") = src_len;
159 register u8* __dest asm("4") = dest; 164 register u8 *__dest asm("4") = dest;
160 int ret; 165 int ret;
161 166
162 asm volatile( 167 asm volatile(
163 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ 168 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
164 "1: brc 1,0b \n" /* handle partial completion */ 169 "1: brc 1,0b \n" /* handle partial completion */
165 " ahi %0,%h7\n" 170 " la %0,0\n"
166 "2: ahi %0,%h8\n" 171 "2:\n"
167 "3:\n" 172 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
168 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
169 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 173 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
170 : "d" (__func), "a" (__param), "0" (-EFAULT), 174 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
171 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
172 if (ret < 0) 175 if (ret < 0)
173 return ret; 176 return ret;
174 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 177 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
175} 178}
176 179
177/* 180/**
181 * crypt_s390_kimd:
182 * @func: the function code passed to KM; see crypt_s390_kimd_func
183 * @param: address of parameter block; see POP for details on each func
184 * @src: address of source memory area
185 * @src_len: length of src operand in bytes
186 *
178 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation 187 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
179 * of the CPU. 188 * of the CPU.
180 * @param func: the function code passed to KM; see crypt_s390_kimd_func 189 *
181 * @param param: address of parameter block; see POP for details on each func 190 * Returns -1 for failure, 0 for the query func, number of processed
182 * @param src: address of source memory area 191 * bytes for digest funcs
183 * @param src_len: length of src operand in bytes
184 * @returns < zero for failure, 0 for the query func, number of processed bytes
185 * for digest funcs
186 */ 192 */
187static inline int 193static inline int crypt_s390_kimd(long func, void *param,
188crypt_s390_kimd(long func, void* param, const u8* src, long src_len) 194 const u8 *src, long src_len)
189{ 195{
190 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 196 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
191 register void* __param asm("1") = param; 197 register void *__param asm("1") = param;
192 register const u8* __src asm("2") = src; 198 register const u8 *__src asm("2") = src;
193 register long __src_len asm("3") = src_len; 199 register long __src_len asm("3") = src_len;
194 int ret; 200 int ret;
195 201
196 asm volatile( 202 asm volatile(
197 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ 203 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
198 "1: brc 1,0b \n" /* handle partial completion */ 204 "1: brc 1,0b \n" /* handle partial completion */
199 " ahi %0,%h6\n" 205 " la %0,0\n"
200 "2: ahi %0,%h7\n" 206 "2:\n"
201 "3:\n" 207 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
202 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
203 : "=d" (ret), "+a" (__src), "+d" (__src_len) 208 : "=d" (ret), "+a" (__src), "+d" (__src_len)
204 : "d" (__func), "a" (__param), "0" (-EFAULT), 209 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
205 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
206 if (ret < 0) 210 if (ret < 0)
207 return ret; 211 return ret;
208 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 212 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
209} 213}
210 214
211/* 215/**
216 * crypt_s390_klmd:
217 * @func: the function code passed to KM; see crypt_s390_klmd_func
218 * @param: address of parameter block; see POP for details on each func
219 * @src: address of source memory area
220 * @src_len: length of src operand in bytes
221 *
212 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. 222 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
213 * @param func: the function code passed to KM; see crypt_s390_klmd_func 223 *
214 * @param param: address of parameter block; see POP for details on each func 224 * Returns -1 for failure, 0 for the query func, number of processed
215 * @param src: address of source memory area 225 * bytes for digest funcs
216 * @param src_len: length of src operand in bytes
217 * @returns < zero for failure, 0 for the query func, number of processed bytes
218 * for digest funcs
219 */ 226 */
220static inline int 227static inline int crypt_s390_klmd(long func, void *param,
221crypt_s390_klmd(long func, void* param, const u8* src, long src_len) 228 const u8 *src, long src_len)
222{ 229{
223 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 230 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
224 register void* __param asm("1") = param; 231 register void *__param asm("1") = param;
225 register const u8* __src asm("2") = src; 232 register const u8 *__src asm("2") = src;
226 register long __src_len asm("3") = src_len; 233 register long __src_len asm("3") = src_len;
227 int ret; 234 int ret;
228 235
229 asm volatile( 236 asm volatile(
230 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ 237 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
231 "1: brc 1,0b \n" /* handle partial completion */ 238 "1: brc 1,0b \n" /* handle partial completion */
232 " ahi %0,%h6\n" 239 " la %0,0\n"
233 "2: ahi %0,%h7\n" 240 "2:\n"
234 "3:\n" 241 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
235 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
236 : "=d" (ret), "+a" (__src), "+d" (__src_len) 242 : "=d" (ret), "+a" (__src), "+d" (__src_len)
237 : "d" (__func), "a" (__param), "0" (-EFAULT), 243 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
238 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
239 if (ret < 0) 244 if (ret < 0)
240 return ret; 245 return ret;
241 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 246 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
242} 247}
243 248
244/* 249/**
250 * crypt_s390_kmac:
251 * @func: the function code passed to KM; see crypt_s390_klmd_func
252 * @param: address of parameter block; see POP for details on each func
253 * @src: address of source memory area
254 * @src_len: length of src operand in bytes
255 *
245 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation 256 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
246 * of the CPU. 257 * of the CPU.
247 * @param func: the function code passed to KM; see crypt_s390_klmd_func 258 *
248 * @param param: address of parameter block; see POP for details on each func 259 * Returns -1 for failure, 0 for the query func, number of processed
249 * @param src: address of source memory area 260 * bytes for digest funcs
250 * @param src_len: length of src operand in bytes
251 * @returns < zero for failure, 0 for the query func, number of processed bytes
252 * for digest funcs
253 */ 261 */
254static inline int 262static inline int crypt_s390_kmac(long func, void *param,
255crypt_s390_kmac(long func, void* param, const u8* src, long src_len) 263 const u8 *src, long src_len)
256{ 264{
257 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 265 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
258 register void* __param asm("1") = param; 266 register void *__param asm("1") = param;
259 register const u8* __src asm("2") = src; 267 register const u8 *__src asm("2") = src;
260 register long __src_len asm("3") = src_len; 268 register long __src_len asm("3") = src_len;
261 int ret; 269 int ret;
262 270
263 asm volatile( 271 asm volatile(
264 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ 272 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
265 "1: brc 1,0b \n" /* handle partial completion */ 273 "1: brc 1,0b \n" /* handle partial completion */
266 " ahi %0,%h6\n" 274 " la %0,0\n"
267 "2: ahi %0,%h7\n" 275 "2:\n"
268 "3:\n" 276 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
269 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
270 : "=d" (ret), "+a" (__src), "+d" (__src_len) 277 : "=d" (ret), "+a" (__src), "+d" (__src_len)
271 : "d" (__func), "a" (__param), "0" (-EFAULT), 278 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
272 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
273 if (ret < 0) 279 if (ret < 0)
274 return ret; 280 return ret;
275 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 281 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
276} 282}
277 283
278/** 284/**
285 * crypt_s390_func_available:
286 * @func: the function code of the specific function; 0 if op in general
287 *
279 * Tests if a specific crypto function is implemented on the machine. 288 * Tests if a specific crypto function is implemented on the machine.
280 * @param func: the function code of the specific function; 0 if op in general 289 *
281 * @return 1 if func available; 0 if func or op in general not available 290 * Returns 1 if func available; 0 if func or op in general not available
282 */ 291 */
283static inline int 292static inline int crypt_s390_func_available(int func)
284crypt_s390_func_available(int func)
285{ 293{
294 unsigned char status[16];
286 int ret; 295 int ret;
287 296
288 struct crypt_s390_query_status status = { 297 switch (func & CRYPT_S390_OP_MASK) {
289 .high = 0, 298 case CRYPT_S390_KM:
290 .low = 0 299 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
291 }; 300 break;
292 switch (func & CRYPT_S390_OP_MASK){ 301 case CRYPT_S390_KMC:
293 case CRYPT_S390_KM: 302 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
294 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 303 break;
295 break; 304 case CRYPT_S390_KIMD:
296 case CRYPT_S390_KMC: 305 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
297 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); 306 break;
298 break; 307 case CRYPT_S390_KLMD:
299 case CRYPT_S390_KIMD: 308 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
300 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); 309 break;
301 break; 310 case CRYPT_S390_KMAC:
302 case CRYPT_S390_KLMD: 311 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
303 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); 312 break;
304 break; 313 default:
305 case CRYPT_S390_KMAC: 314 return 0;
306 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
307 break;
308 default:
309 ret = 0;
310 return ret;
311 }
312 if (ret >= 0){
313 func &= CRYPT_S390_FUNC_MASK;
314 func &= 0x7f; //mask modifier bit
315 if (func < 64){
316 ret = (status.high >> (64 - func - 1)) & 0x1;
317 } else {
318 ret = (status.low >> (128 - func - 1)) & 0x1;
319 }
320 } else {
321 ret = 0;
322 } 315 }
323 return ret; 316 if (ret < 0)
317 return 0;
318 func &= CRYPT_S390_FUNC_MASK;
319 func &= 0x7f; /* mask modifier bit */
320 return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
324} 321}
325 322
326#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H 323#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
deleted file mode 100644
index 54fb11d7fadd..000000000000
--- a/arch/s390/crypto/crypt_s390_query.c
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for s390 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_s390.h"
21
22static void query_available_functions(void)
23{
24 printk(KERN_INFO "#####################\n");
25
26 /* query available KM functions */
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_s390_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_s390_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_s390_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_s390_func_available(KM_TDEA_192_ENCRYPT));
35 printk(KERN_INFO "KM_AES_128: %d\n",
36 crypt_s390_func_available(KM_AES_128_ENCRYPT));
37 printk(KERN_INFO "KM_AES_192: %d\n",
38 crypt_s390_func_available(KM_AES_192_ENCRYPT));
39 printk(KERN_INFO "KM_AES_256: %d\n",
40 crypt_s390_func_available(KM_AES_256_ENCRYPT));
41
42 /* query available KMC functions */
43 printk(KERN_INFO "KMC_QUERY: %d\n",
44 crypt_s390_func_available(KMC_QUERY));
45 printk(KERN_INFO "KMC_DEA: %d\n",
46 crypt_s390_func_available(KMC_DEA_ENCRYPT));
47 printk(KERN_INFO "KMC_TDEA_128: %d\n",
48 crypt_s390_func_available(KMC_TDEA_128_ENCRYPT));
49 printk(KERN_INFO "KMC_TDEA_192: %d\n",
50 crypt_s390_func_available(KMC_TDEA_192_ENCRYPT));
51 printk(KERN_INFO "KMC_AES_128: %d\n",
52 crypt_s390_func_available(KMC_AES_128_ENCRYPT));
53 printk(KERN_INFO "KMC_AES_192: %d\n",
54 crypt_s390_func_available(KMC_AES_192_ENCRYPT));
55 printk(KERN_INFO "KMC_AES_256: %d\n",
56 crypt_s390_func_available(KMC_AES_256_ENCRYPT));
57
58 /* query available KIMD functions */
59 printk(KERN_INFO "KIMD_QUERY: %d\n",
60 crypt_s390_func_available(KIMD_QUERY));
61 printk(KERN_INFO "KIMD_SHA_1: %d\n",
62 crypt_s390_func_available(KIMD_SHA_1));
63 printk(KERN_INFO "KIMD_SHA_256: %d\n",
64 crypt_s390_func_available(KIMD_SHA_256));
65
66 /* query available KLMD functions */
67 printk(KERN_INFO "KLMD_QUERY: %d\n",
68 crypt_s390_func_available(KLMD_QUERY));
69 printk(KERN_INFO "KLMD_SHA_1: %d\n",
70 crypt_s390_func_available(KLMD_SHA_1));
71 printk(KERN_INFO "KLMD_SHA_256: %d\n",
72 crypt_s390_func_available(KLMD_SHA_256));
73
74 /* query available KMAC functions */
75 printk(KERN_INFO "KMAC_QUERY: %d\n",
76 crypt_s390_func_available(KMAC_QUERY));
77 printk(KERN_INFO "KMAC_DEA: %d\n",
78 crypt_s390_func_available(KMAC_DEA));
79 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
80 crypt_s390_func_available(KMAC_TDEA_128));
81 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
82 crypt_s390_func_available(KMAC_TDEA_192));
83}
84
85static int init(void)
86{
87 struct crypt_s390_query_status status = {
88 .high = 0,
89 .low = 0
90 };
91
92 printk(KERN_INFO "crypt_s390: querying available crypto functions\n");
93 crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
94 printk(KERN_INFO "KM:\t%016llx %016llx\n",
95 (unsigned long long) status.high,
96 (unsigned long long) status.low);
97 status.high = status.low = 0;
98 crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
99 printk(KERN_INFO "KMC:\t%016llx %016llx\n",
100 (unsigned long long) status.high,
101 (unsigned long long) status.low);
102 status.high = status.low = 0;
103 crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
104 printk(KERN_INFO "KIMD:\t%016llx %016llx\n",
105 (unsigned long long) status.high,
106 (unsigned long long) status.low);
107 status.high = status.low = 0;
108 crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
109 printk(KERN_INFO "KLMD:\t%016llx %016llx\n",
110 (unsigned long long) status.high,
111 (unsigned long long) status.low);
112 status.high = status.low = 0;
113 crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
114 printk(KERN_INFO "KMAC:\t%016llx %016llx\n",
115 (unsigned long long) status.high,
116 (unsigned long long) status.low);
117
118 query_available_functions();
119 return -ECANCELED;
120}
121
122static void __exit cleanup(void)
123{
124}
125
126module_init(init);
127module_exit(cleanup);
128
129MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
index e3f5c5f238fe..5706af266442 100644
--- a/arch/s390/crypto/des_check_key.c
+++ b/arch/s390/crypto/des_check_key.c
@@ -10,8 +10,9 @@
10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. 10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL.
11 * 11 *
12 * s390 Version: 12 * s390 Version:
13 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 13 * Copyright IBM Corp. 2003
14 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 14 * Author(s): Thomas Spatzier
15 * Jan Glauber (jan.glauber@de.ibm.com)
15 * 16 *
16 * Derived from "crypto/des.c" 17 * Derived from "crypto/des.c"
17 * Copyright (c) 1992 Dana L. How. 18 * Copyright (c) 1992 Dana L. How.
@@ -30,6 +31,7 @@
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/crypto.h> 33#include <linux/crypto.h>
34#include "crypto_des.h"
33 35
34#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) 36#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o))
35 37
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 2aba04852fe3..ea22707f435f 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,9 +3,9 @@
3 * 3 *
4 * s390 implementation of the DES Cipher Algorithm. 4 * s390 implementation of the DES Cipher Algorithm.
5 * 5 *
6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * 8 * Jan Glauber (jan.glauber@de.ibm.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -557,7 +557,7 @@ static int init(void)
557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || 557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || 558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
560 return -ENOSYS; 560 return -EOPNOTSUPP;
561 561
562 ret = crypto_register_alg(&des_alg); 562 ret = crypto_register_alg(&des_alg);
563 if (ret) 563 if (ret)
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
new file mode 100644
index 000000000000..8eb3a1aedc22
--- /dev/null
+++ b/arch/s390/crypto/prng.c
@@ -0,0 +1,213 @@
1/*
2 * Copyright IBM Corp. 2006,2007
3 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
4 * Driver for the s390 pseudo random number generator
5 */
6#include <linux/fs.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/miscdevice.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/random.h>
13#include <asm/debug.h>
14#include <asm/uaccess.h>
15
16#include "crypt_s390.h"
17
18MODULE_LICENSE("GPL");
19MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>");
20MODULE_DESCRIPTION("s390 PRNG interface");
21
22static int prng_chunk_size = 256;
23module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH);
24MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
25
26static int prng_entropy_limit = 4096;
27module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR);
28MODULE_PARM_DESC(prng_entropy_limit,
29 "PRNG add entropy after that much bytes were produced");
30
31/*
32 * Any one who considers arithmetical methods of producing random digits is,
33 * of course, in a state of sin. -- John von Neumann
34 */
35
36struct s390_prng_data {
37 unsigned long count; /* how many bytes were produced */
38 char *buf;
39};
40
41static struct s390_prng_data *p;
42
43/* copied from libica, use a non-zero initial parameter block */
44static unsigned char parm_block[32] = {
450x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4,
460x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0,
47};
48
49static int prng_open(struct inode *inode, struct file *file)
50{
51 return nonseekable_open(inode, file);
52}
53
54static void prng_add_entropy(void)
55{
56 __u64 entropy[4];
57 unsigned int i;
58 int ret;
59
60 for (i = 0; i < 16; i++) {
61 ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy,
62 (char *)entropy, sizeof(entropy));
63 BUG_ON(ret < 0 || ret != sizeof(entropy));
64 memcpy(parm_block, entropy, sizeof(entropy));
65 }
66}
67
68static void prng_seed(int nbytes)
69{
70 char buf[16];
71 int i = 0;
72
73 BUG_ON(nbytes > 16);
74 get_random_bytes(buf, nbytes);
75
76 /* Add the entropy */
77 while (nbytes >= 8) {
78 *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8);
79 prng_add_entropy();
80 i += 8;
81 nbytes -= 8;
82 }
83 prng_add_entropy();
84}
85
86static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
87 loff_t *ppos)
88{
89 int chunk, n;
90 int ret = 0;
91 int tmp;
92
93 /* nbytes can be arbitrary long, we spilt it into chunks */
94 while (nbytes) {
95 /* same as in extract_entropy_user in random.c */
96 if (need_resched()) {
97 if (signal_pending(current)) {
98 if (ret == 0)
99 ret = -ERESTARTSYS;
100 break;
101 }
102 schedule();
103 }
104
105 /*
106 * we lose some random bytes if an attacker issues
107 * reads < 8 bytes, but we don't care
108 */
109 chunk = min_t(int, nbytes, prng_chunk_size);
110
111 /* PRNG only likes multiples of 8 bytes */
112 n = (chunk + 7) & -8;
113
114 if (p->count > prng_entropy_limit)
115 prng_seed(8);
116
117 /* if the CPU supports PRNG stckf is present too */
118 asm volatile(".insn s,0xb27c0000,%0"
119 : "=m" (*((unsigned long long *)p->buf)) : : "cc");
120
121 /*
122 * Beside the STCKF the input for the TDES-EDE is the output
123 * of the last operation. We differ here from X9.17 since we
124 * only store one timestamp into the buffer. Padding the whole
125 * buffer with timestamps does not improve security, since
126 * successive stckf have nearly constant offsets.
127 * If an attacker knows the first timestamp it would be
128 * trivial to guess the additional values. One timestamp
129 * is therefore enough and still guarantees unique input values.
130 *
131 * Note: you can still get strict X9.17 conformity by setting
132 * prng_chunk_size to 8 bytes.
133 */
134 tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n);
135 BUG_ON((tmp < 0) || (tmp != n));
136
137 p->count += n;
138
139 if (copy_to_user(ubuf, p->buf, chunk))
140 return -EFAULT;
141
142 nbytes -= chunk;
143 ret += chunk;
144 ubuf += chunk;
145 }
146 return ret;
147}
148
149static struct file_operations prng_fops = {
150 .owner = THIS_MODULE,
151 .open = &prng_open,
152 .release = NULL,
153 .read = &prng_read,
154};
155
156static struct miscdevice prng_dev = {
157 .name = "prandom",
158 .minor = MISC_DYNAMIC_MINOR,
159 .fops = &prng_fops,
160};
161
162static int __init prng_init(void)
163{
164 int ret;
165
166 /* check if the CPU has a PRNG */
167 if (!crypt_s390_func_available(KMC_PRNG))
168 return -EOPNOTSUPP;
169
170 if (prng_chunk_size < 8)
171 return -EINVAL;
172
173 p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL);
174 if (!p)
175 return -ENOMEM;
176 p->count = 0;
177
178 p->buf = kmalloc(prng_chunk_size, GFP_KERNEL);
179 if (!p->buf) {
180 ret = -ENOMEM;
181 goto out_free;
182 }
183
184 /* initialize the PRNG, add 128 bits of entropy */
185 prng_seed(16);
186
187 ret = misc_register(&prng_dev);
188 if (ret) {
189 printk(KERN_WARNING
190 "Could not register misc device for PRNG.\n");
191 goto out_buf;
192 }
193 return 0;
194
195out_buf:
196 kfree(p->buf);
197out_free:
198 kfree(p);
199 return ret;
200}
201
202static void __exit prng_exit(void)
203{
204 /* wipe me */
205 memset(p->buf, 0, prng_chunk_size);
206 kfree(p->buf);
207 kfree(p);
208
209 misc_deregister(&prng_dev);
210}
211
212module_init(prng_init);
213module_exit(prng_exit);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 49ca8690ee39..969639f31977 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -8,8 +8,9 @@
8 * implementation written by Steve Reid. 8 * implementation written by Steve Reid.
9 * 9 *
10 * s390 Version: 10 * s390 Version:
11 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 11 * Copyright IBM Corp. 2003,2007
12 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 12 * Author(s): Thomas Spatzier
13 * Jan Glauber (jan.glauber@de.ibm.com)
13 * 14 *
14 * Derived from "crypto/sha1.c" 15 * Derived from "crypto/sha1.c"
15 * Copyright (c) Alan Smithee. 16 * Copyright (c) Alan Smithee.
@@ -43,16 +44,14 @@ struct crypt_s390_sha1_ctx {
43static void sha1_init(struct crypto_tfm *tfm) 44static void sha1_init(struct crypto_tfm *tfm)
44{ 45{
45 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); 46 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
46 static const u32 initstate[5] = { 47
47 0x67452301, 48 ctx->state[0] = 0x67452301;
48 0xEFCDAB89, 49 ctx->state[1] = 0xEFCDAB89;
49 0x98BADCFE, 50 ctx->state[2] = 0x98BADCFE;
50 0x10325476, 51 ctx->state[3] = 0x10325476;
51 0xC3D2E1F0 52 ctx->state[4] = 0xC3D2E1F0;
52 };
53 53
54 ctx->count = 0; 54 ctx->count = 0;
55 memcpy(ctx->state, &initstate, sizeof(initstate));
56 ctx->buf_len = 0; 55 ctx->buf_len = 0;
57} 56}
58 57
@@ -63,13 +62,13 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
63 long imd_len; 62 long imd_len;
64 63
65 sctx = crypto_tfm_ctx(tfm); 64 sctx = crypto_tfm_ctx(tfm);
66 sctx->count += len * 8; //message bit length 65 sctx->count += len * 8; /* message bit length */
67 66
68 //anything in buffer yet? -> must be completed 67 /* anything in buffer yet? -> must be completed */
69 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { 68 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) {
70 //complete full block and hash 69 /* complete full block and hash */
71 memcpy(sctx->buffer + sctx->buf_len, data, 70 memcpy(sctx->buffer + sctx->buf_len, data,
72 SHA1_BLOCK_SIZE - sctx->buf_len); 71 SHA1_BLOCK_SIZE - sctx->buf_len);
73 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
74 SHA1_BLOCK_SIZE); 73 SHA1_BLOCK_SIZE);
75 data += SHA1_BLOCK_SIZE - sctx->buf_len; 74 data += SHA1_BLOCK_SIZE - sctx->buf_len;
@@ -77,37 +76,36 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
77 sctx->buf_len = 0; 76 sctx->buf_len = 0;
78 } 77 }
79 78
80 //rest of data contains full blocks? 79 /* rest of data contains full blocks? */
81 imd_len = len & ~0x3ful; 80 imd_len = len & ~0x3ful;
82 if (imd_len){ 81 if (imd_len) {
83 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
84 data += imd_len; 83 data += imd_len;
85 len -= imd_len; 84 len -= imd_len;
86 } 85 }
87 //anything left? store in buffer 86 /* anything left? store in buffer */
88 if (len){ 87 if (len) {
89 memcpy(sctx->buffer + sctx->buf_len , data, len); 88 memcpy(sctx->buffer + sctx->buf_len , data, len);
90 sctx->buf_len += len; 89 sctx->buf_len += len;
91 } 90 }
92} 91}
93 92
94 93
95static void 94static void pad_message(struct crypt_s390_sha1_ctx* sctx)
96pad_message(struct crypt_s390_sha1_ctx* sctx)
97{ 95{
98 int index; 96 int index;
99 97
100 index = sctx->buf_len; 98 index = sctx->buf_len;
101 sctx->buf_len = (sctx->buf_len < 56)? 99 sctx->buf_len = (sctx->buf_len < 56) ?
102 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; 100 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
103 //start pad with 1 101 /* start pad with 1 */
104 sctx->buffer[index] = 0x80; 102 sctx->buffer[index] = 0x80;
105 //pad with zeros 103 /* pad with zeros */
106 index++; 104 index++;
107 memset(sctx->buffer + index, 0x00, sctx->buf_len - index); 105 memset(sctx->buffer + index, 0x00, sctx->buf_len - index);
108 //append length 106 /* append length */
109 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, 107 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
110 sizeof sctx->count); 108 sizeof sctx->count);
111} 109}
112 110
113/* Add padding and return the message digest. */ 111/* Add padding and return the message digest. */
@@ -115,47 +113,40 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
115{ 113{
116 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 114 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
117 115
118 //must perform manual padding 116 /* must perform manual padding */
119 pad_message(sctx); 117 pad_message(sctx);
120 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); 118 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
121 //copy digest to out 119 /* copy digest to out */
122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
123 /* Wipe context */ 121 /* wipe context */
124 memset(sctx, 0, sizeof *sctx); 122 memset(sctx, 0, sizeof *sctx);
125} 123}
126 124
127static struct crypto_alg alg = { 125static struct crypto_alg alg = {
128 .cra_name = "sha1", 126 .cra_name = "sha1",
129 .cra_driver_name = "sha1-s390", 127 .cra_driver_name= "sha1-s390",
130 .cra_priority = CRYPT_S390_PRIORITY, 128 .cra_priority = CRYPT_S390_PRIORITY,
131 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
132 .cra_blocksize = SHA1_BLOCK_SIZE, 130 .cra_blocksize = SHA1_BLOCK_SIZE,
133 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
134 .cra_module = THIS_MODULE, 132 .cra_module = THIS_MODULE,
135 .cra_list = LIST_HEAD_INIT(alg.cra_list), 133 .cra_list = LIST_HEAD_INIT(alg.cra_list),
136 .cra_u = { .digest = { 134 .cra_u = { .digest = {
137 .dia_digestsize = SHA1_DIGEST_SIZE, 135 .dia_digestsize = SHA1_DIGEST_SIZE,
138 .dia_init = sha1_init, 136 .dia_init = sha1_init,
139 .dia_update = sha1_update, 137 .dia_update = sha1_update,
140 .dia_final = sha1_final } } 138 .dia_final = sha1_final } }
141}; 139};
142 140
143static int 141static int __init init(void)
144init(void)
145{ 142{
146 int ret = -ENOSYS; 143 if (!crypt_s390_func_available(KIMD_SHA_1))
144 return -EOPNOTSUPP;
147 145
148 if (crypt_s390_func_available(KIMD_SHA_1)){ 146 return crypto_register_alg(&alg);
149 ret = crypto_register_alg(&alg);
150 if (ret == 0){
151 printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n");
152 }
153 }
154 return ret;
155} 147}
156 148
157static void __exit 149static void __exit fini(void)
158fini(void)
159{ 150{
160 crypto_unregister_alg(&alg); 151 crypto_unregister_alg(&alg);
161} 152}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 8e4e67503fe7..78436c696d37 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the SHA256 Secure Hash Algorithm. 4 * s390 implementation of the SHA256 Secure Hash Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/sha256.c" 10 * Derived from "crypto/sha256.c"
@@ -143,15 +143,10 @@ static struct crypto_alg alg = {
143 143
144static int init(void) 144static int init(void)
145{ 145{
146 int ret;
147
148 if (!crypt_s390_func_available(KIMD_SHA_256)) 146 if (!crypt_s390_func_available(KIMD_SHA_256))
149 return -ENOSYS; 147 return -EOPNOTSUPP;
150 148
151 ret = crypto_register_alg(&alg); 149 return crypto_register_alg(&alg);
152 if (ret != 0)
153 printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded.");
154 return ret;
155} 150}
156 151
157static void __exit fini(void) 152static void __exit fini(void)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 5368cf4a350e..7c621b8ef683 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -108,6 +108,8 @@ CONFIG_DEFAULT_MIGRATION_COST=1000000
108CONFIG_COMPAT=y 108CONFIG_COMPAT=y
109CONFIG_SYSVIPC_COMPAT=y 109CONFIG_SYSVIPC_COMPAT=y
110CONFIG_AUDIT_ARCH=y 110CONFIG_AUDIT_ARCH=y
111CONFIG_S390_SWITCH_AMODE=y
112CONFIG_S390_EXEC_PROTECT=y
111 113
112# 114#
113# Code generation options 115# Code generation options
@@ -431,7 +433,6 @@ CONFIG_TN3270_CONSOLE=y
431CONFIG_TN3215=y 433CONFIG_TN3215=y
432CONFIG_TN3215_CONSOLE=y 434CONFIG_TN3215_CONSOLE=y
433CONFIG_CCW_CONSOLE=y 435CONFIG_CCW_CONSOLE=y
434CONFIG_SCLP=y
435CONFIG_SCLP_TTY=y 436CONFIG_SCLP_TTY=y
436CONFIG_SCLP_CONSOLE=y 437CONFIG_SCLP_CONSOLE=y
437CONFIG_SCLP_VT220_TTY=y 438CONFIG_SCLP_VT220_TTY=y
@@ -724,9 +725,7 @@ CONFIG_CRYPTO_MANAGER=y
724# CONFIG_CRYPTO_MD4 is not set 725# CONFIG_CRYPTO_MD4 is not set
725# CONFIG_CRYPTO_MD5 is not set 726# CONFIG_CRYPTO_MD5 is not set
726# CONFIG_CRYPTO_SHA1 is not set 727# CONFIG_CRYPTO_SHA1 is not set
727# CONFIG_CRYPTO_SHA1_S390 is not set
728# CONFIG_CRYPTO_SHA256 is not set 728# CONFIG_CRYPTO_SHA256 is not set
729# CONFIG_CRYPTO_SHA256_S390 is not set
730# CONFIG_CRYPTO_SHA512 is not set 729# CONFIG_CRYPTO_SHA512 is not set
731# CONFIG_CRYPTO_WP512 is not set 730# CONFIG_CRYPTO_WP512 is not set
732# CONFIG_CRYPTO_TGR192 is not set 731# CONFIG_CRYPTO_TGR192 is not set
@@ -735,12 +734,10 @@ CONFIG_CRYPTO_ECB=m
735CONFIG_CRYPTO_CBC=y 734CONFIG_CRYPTO_CBC=y
736# CONFIG_CRYPTO_LRW is not set 735# CONFIG_CRYPTO_LRW is not set
737# CONFIG_CRYPTO_DES is not set 736# CONFIG_CRYPTO_DES is not set
738# CONFIG_CRYPTO_DES_S390 is not set
739# CONFIG_CRYPTO_BLOWFISH is not set 737# CONFIG_CRYPTO_BLOWFISH is not set
740# CONFIG_CRYPTO_TWOFISH is not set 738# CONFIG_CRYPTO_TWOFISH is not set
741# CONFIG_CRYPTO_SERPENT is not set 739# CONFIG_CRYPTO_SERPENT is not set
742# CONFIG_CRYPTO_AES is not set 740# CONFIG_CRYPTO_AES is not set
743# CONFIG_CRYPTO_AES_S390 is not set
744# CONFIG_CRYPTO_CAST5 is not set 741# CONFIG_CRYPTO_CAST5 is not set
745# CONFIG_CRYPTO_CAST6 is not set 742# CONFIG_CRYPTO_CAST6 is not set
746# CONFIG_CRYPTO_TEA is not set 743# CONFIG_CRYPTO_TEA is not set
@@ -755,6 +752,11 @@ CONFIG_CRYPTO_CBC=y
755# 752#
756# Hardware crypto devices 753# Hardware crypto devices
757# 754#
755# CONFIG_CRYPTO_SHA1_S390 is not set
756# CONFIG_CRYPTO_SHA256_S390 is not set
757# CONFIG_CRYPTO_DES_S390 is not set
758# CONFIG_CRYPTO_AES_S390 is not set
759CONFIG_S390_PRNG=m
758 760
759# 761#
760# Library routines 762# Library routines
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index f4b00cd81f7c..b08d2abf6178 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
6 6
7s390_hypfs-objs := inode.o hypfs_diag.o 7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index f3dbd91965c6..aea572009d60 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -27,4 +27,13 @@ extern struct dentry *hypfs_create_str(struct super_block *sb,
27 struct dentry *dir, const char *name, 27 struct dentry *dir, const char *name,
28 char *string); 28 char *string);
29 29
30/* LPAR Hypervisor */
31extern int hypfs_diag_init(void);
32extern void hypfs_diag_exit(void);
33extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
34
35/* VM Hypervisor */
36extern int hypfs_vm_init(void);
37extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
38
30#endif /* _HYPFS_H_ */ 39#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
deleted file mode 100644
index 256b384aebe1..000000000000
--- a/arch/s390/hypfs/hypfs_diag.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * arch/s390/hypfs_diag.h
3 * Hypervisor filesystem for Linux on s390.
4 *
5 * Copyright (C) IBM Corp. 2006
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#ifndef _HYPFS_DIAG_H_
10#define _HYPFS_DIAG_H_
11
12extern int hypfs_diag_init(void);
13extern void hypfs_diag_exit(void);
14extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
15
16#endif /* _HYPFS_DIAG_H_ */
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
new file mode 100644
index 000000000000..d01fc8f799f0
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -0,0 +1,231 @@
1/*
2 * Hypervisor filesystem for Linux on s390. z/VM implementation.
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/vmalloc.h>
12#include <asm/ebcdic.h>
13#include "hypfs.h"
14
15#define NAME_LEN 8
16
17static char local_guest[] = " ";
18static char all_guests[] = "* ";
19static char *guest_query;
20
21struct diag2fc_data {
22 __u32 version;
23 __u32 flags;
24 __u64 used_cpu;
25 __u64 el_time;
26 __u64 mem_min_kb;
27 __u64 mem_max_kb;
28 __u64 mem_share_kb;
29 __u64 mem_used_kb;
30 __u32 pcpus;
31 __u32 lcpus;
32 __u32 vcpus;
33 __u32 cpu_min;
34 __u32 cpu_max;
35 __u32 cpu_shares;
36 __u32 cpu_use_samp;
37 __u32 cpu_delay_samp;
38 __u32 page_wait_samp;
39 __u32 idle_samp;
40 __u32 other_samp;
41 __u32 total_samp;
42 char guest_name[NAME_LEN];
43};
44
45struct diag2fc_parm_list {
46 char userid[NAME_LEN];
47 char aci_grp[NAME_LEN];
48 __u64 addr;
49 __u32 size;
50 __u32 fmt;
51};
52
53static int diag2fc(int size, char* query, void *addr)
54{
55 unsigned long residual_cnt;
56 unsigned long rc;
57 struct diag2fc_parm_list parm_list;
58
59 memcpy(parm_list.userid, query, NAME_LEN);
60 ASCEBC(parm_list.userid, NAME_LEN);
61 parm_list.addr = (unsigned long) addr ;
62 parm_list.size = size;
63 parm_list.fmt = 0x02;
64 memset(parm_list.aci_grp, 0x40, NAME_LEN);
65 rc = -1;
66
67 asm volatile(
68 " diag %0,%1,0x2fc\n"
69 "0:\n"
70 EX_TABLE(0b,0b)
71 : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory");
72
73 if ((rc != 0 ) && (rc != -2))
74 return rc;
75 else
76 return -residual_cnt;
77}
78
79static struct diag2fc_data *diag2fc_store(char *query, int *count)
80{
81 int size;
82 struct diag2fc_data *data;
83
84 do {
85 size = diag2fc(0, query, NULL);
86 if (size < 0)
87 return ERR_PTR(-EACCES);
88 data = vmalloc(size);
89 if (!data)
90 return ERR_PTR(-ENOMEM);
91 if (diag2fc(size, query, data) == 0)
92 break;
93 vfree(data);
94 } while (1);
95 *count = (size / sizeof(*data));
96
97 return data;
98}
99
100static void diag2fc_free(void *data)
101{
102 vfree(data);
103}
104
105#define ATTRIBUTE(sb, dir, name, member) \
106do { \
107 void *rc; \
108 rc = hypfs_create_u64(sb, dir, name, member); \
109 if (IS_ERR(rc)) \
110 return PTR_ERR(rc); \
111} while(0)
112
113static int hpyfs_vm_create_guest(struct super_block *sb,
114 struct dentry *systems_dir,
115 struct diag2fc_data *data)
116{
117 char guest_name[NAME_LEN + 1] = {};
118 struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
119 int dedicated_flag, capped_value;
120
121 capped_value = (data->flags & 0x00000006) >> 1;
122 dedicated_flag = (data->flags & 0x00000008) >> 3;
123
124 /* guest dir */
125 memcpy(guest_name, data->guest_name, NAME_LEN);
126 EBCASC(guest_name, NAME_LEN);
127 strstrip(guest_name);
128 guest_dir = hypfs_mkdir(sb, systems_dir, guest_name);
129 if (IS_ERR(guest_dir))
130 return PTR_ERR(guest_dir);
131 ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time);
132
133 /* logical cpu information */
134 cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus");
135 if (IS_ERR(cpus_dir))
136 return PTR_ERR(cpus_dir);
137 ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu);
138 ATTRIBUTE(sb, cpus_dir, "capped", capped_value);
139 ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag);
140 ATTRIBUTE(sb, cpus_dir, "count", data->vcpus);
141 ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min);
142 ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max);
143 ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares);
144
145 /* memory information */
146 mem_dir = hypfs_mkdir(sb, guest_dir, "mem");
147 if (IS_ERR(mem_dir))
148 return PTR_ERR(mem_dir);
149 ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb);
150 ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb);
151 ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb);
152 ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb);
153
154 /* samples */
155 samples_dir = hypfs_mkdir(sb, guest_dir, "samples");
156 if (IS_ERR(samples_dir))
157 return PTR_ERR(samples_dir);
158 ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp);
159 ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp);
160 ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp);
161 ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp);
162 ATTRIBUTE(sb, samples_dir, "other", data->other_samp);
163 ATTRIBUTE(sb, samples_dir, "total", data->total_samp);
164 return 0;
165}
166
167int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
168{
169 struct dentry *dir, *file;
170 struct diag2fc_data *data;
171 int rc, i, count = 0;
172
173 data = diag2fc_store(guest_query, &count);
174 if (IS_ERR(data))
175 return PTR_ERR(data);
176
177 /* Hpervisor Info */
178 dir = hypfs_mkdir(sb, root, "hyp");
179 if (IS_ERR(dir)) {
180 rc = PTR_ERR(dir);
181 goto failed;
182 }
183 file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor");
184 if (IS_ERR(file)) {
185 rc = PTR_ERR(file);
186 goto failed;
187 }
188
189 /* physical cpus */
190 dir = hypfs_mkdir(sb, root, "cpus");
191 if (IS_ERR(dir)) {
192 rc = PTR_ERR(dir);
193 goto failed;
194 }
195 file = hypfs_create_u64(sb, dir, "count", data->lcpus);
196 if (IS_ERR(file)) {
197 rc = PTR_ERR(file);
198 goto failed;
199 }
200
201 /* guests */
202 dir = hypfs_mkdir(sb, root, "systems");
203 if (IS_ERR(dir)) {
204 rc = PTR_ERR(dir);
205 goto failed;
206 }
207
208 for (i = 0; i < count; i++) {
209 rc = hpyfs_vm_create_guest(sb, dir, &(data[i]));
210 if (rc)
211 goto failed;
212 }
213 diag2fc_free(data);
214 return 0;
215
216failed:
217 diag2fc_free(data);
218 return rc;
219}
220
221int hypfs_vm_init(void)
222{
223 if (diag2fc(0, all_guests, NULL) > 0)
224 guest_query = all_guests;
225 else if (diag2fc(0, local_guest, NULL) > 0)
226 guest_query = local_guest;
227 else
228 return -EACCES;
229
230 return 0;
231}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index b6716c4b9934..a4fda7b53640 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <asm/ebcdic.h> 20#include <asm/ebcdic.h>
21#include "hypfs.h" 21#include "hypfs.h"
22#include "hypfs_diag.h"
23 22
24#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ 23#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
25#define TMP_SIZE 64 /* size of temporary buffers */ 24#define TMP_SIZE 64 /* size of temporary buffers */
@@ -192,7 +191,10 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
192 goto out; 191 goto out;
193 } 192 }
194 hypfs_delete_tree(sb->s_root); 193 hypfs_delete_tree(sb->s_root);
195 rc = hypfs_diag_create_files(sb, sb->s_root); 194 if (MACHINE_IS_VM)
195 rc = hypfs_vm_create_files(sb, sb->s_root);
196 else
197 rc = hypfs_diag_create_files(sb, sb->s_root);
196 if (rc) { 198 if (rc) {
197 printk(KERN_ERR "hypfs: Update failed\n"); 199 printk(KERN_ERR "hypfs: Update failed\n");
198 hypfs_delete_tree(sb->s_root); 200 hypfs_delete_tree(sb->s_root);
@@ -289,7 +291,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
289 rc = -ENOMEM; 291 rc = -ENOMEM;
290 goto err_alloc; 292 goto err_alloc;
291 } 293 }
292 rc = hypfs_diag_create_files(sb, root_dentry); 294 if (MACHINE_IS_VM)
295 rc = hypfs_vm_create_files(sb, root_dentry);
296 else
297 rc = hypfs_diag_create_files(sb, root_dentry);
293 if (rc) 298 if (rc)
294 goto err_tree; 299 goto err_tree;
295 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 300 sbi->update_file = hypfs_create_update_file(sb, root_dentry);
@@ -462,11 +467,15 @@ static int __init hypfs_init(void)
462{ 467{
463 int rc; 468 int rc;
464 469
465 if (MACHINE_IS_VM) 470 if (MACHINE_IS_VM) {
466 return -ENODATA; 471 if (hypfs_vm_init())
467 if (hypfs_diag_init()) { 472 /* no diag 2fc, just exit */
468 rc = -ENODATA; 473 return -ENODATA;
469 goto fail_diag; 474 } else {
475 if (hypfs_diag_init()) {
476 rc = -ENODATA;
477 goto fail_diag;
478 }
470 } 479 }
471 kset_set_kset_s(&s390_subsys, hypervisor_subsys); 480 kset_set_kset_s(&s390_subsys, hypervisor_subsys);
472 rc = subsystem_register(&s390_subsys); 481 rc = subsystem_register(&s390_subsys);
@@ -480,7 +489,8 @@ static int __init hypfs_init(void)
480fail_filesystem: 489fail_filesystem:
481 subsystem_unregister(&s390_subsys); 490 subsystem_unregister(&s390_subsys);
482fail_sysfs: 491fail_sysfs:
483 hypfs_diag_exit(); 492 if (!MACHINE_IS_VM)
493 hypfs_diag_exit();
484fail_diag: 494fail_diag:
485 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); 495 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
486 return rc; 496 return rc;
@@ -488,7 +498,8 @@ fail_diag:
488 498
489static void __exit hypfs_exit(void) 499static void __exit hypfs_exit(void)
490{ 500{
491 hypfs_diag_exit(); 501 if (!MACHINE_IS_VM)
502 hypfs_diag_exit();
492 unregister_filesystem(&hypfs_type); 503 unregister_filesystem(&hypfs_type);
493 subsystem_unregister(&s390_subsys); 504 subsystem_unregister(&s390_subsys);
494} 505}
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a81881c9b297..5492d25d7d69 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,9 +4,9 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o reset.o \ 7obj-y := bitmap.o traps.o time.o process.o base.o early.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o 9 semaphore.o s390_ext.o debug.o irq.o ipl.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
new file mode 100644
index 000000000000..dc7e5259770f
--- /dev/null
+++ b/arch/s390/kernel/base.S
@@ -0,0 +1,150 @@
1/*
2 * arch/s390/kernel/base.S
3 *
4 * Copyright IBM Corp. 2006,2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl s390_base_mcck_handler
15s390_base_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 larl %r1,s390_base_mcck_handler_fn
20 lg %r1,0(%r1)
21 ltgr %r1,%r1
22 jz 1f
23 basr %r14,%r1
241: la %r1,4095
25 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
26 lpswe __LC_MCK_OLD_PSW
27
28 .section .bss
29 .globl s390_base_mcck_handler_fn
30s390_base_mcck_handler_fn:
31 .quad 0
32 .previous
33
34 .globl s390_base_ext_handler
35s390_base_ext_handler:
36 stmg %r0,%r15,__LC_SAVE_AREA
37 basr %r13,0
380: aghi %r15,-STACK_FRAME_OVERHEAD
39 larl %r1,s390_base_ext_handler_fn
40 lg %r1,0(%r1)
41 ltgr %r1,%r1
42 jz 1f
43 basr %r14,%r1
441: lmg %r0,%r15,__LC_SAVE_AREA
45 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
46 lpswe __LC_EXT_OLD_PSW
47
48 .section .bss
49 .globl s390_base_ext_handler_fn
50s390_base_ext_handler_fn:
51 .quad 0
52 .previous
53
54 .globl s390_base_pgm_handler
55s390_base_pgm_handler:
56 stmg %r0,%r15,__LC_SAVE_AREA
57 basr %r13,0
580: aghi %r15,-STACK_FRAME_OVERHEAD
59 larl %r1,s390_base_pgm_handler_fn
60 lg %r1,0(%r1)
61 ltgr %r1,%r1
62 jz 1f
63 basr %r14,%r1
64 lmg %r0,%r15,__LC_SAVE_AREA
65 lpswe __LC_PGM_OLD_PSW
661: lpswe disabled_wait_psw-0b(%r13)
67
68 .align 8
69disabled_wait_psw:
70 .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
71
72 .section .bss
73 .globl s390_base_pgm_handler_fn
74s390_base_pgm_handler_fn:
75 .quad 0
76 .previous
77
78#else /* CONFIG_64BIT */
79
80 .globl s390_base_mcck_handler
81s390_base_mcck_handler:
82 basr %r13,0
830: l %r15,__LC_PANIC_STACK # load panic stack
84 ahi %r15,-STACK_FRAME_OVERHEAD
85 l %r1,2f-0b(%r13)
86 l %r1,0(%r1)
87 ltr %r1,%r1
88 jz 1f
89 basr %r14,%r1
901: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
91 lpsw __LC_MCK_OLD_PSW
92
932: .long s390_base_mcck_handler_fn
94
95 .section .bss
96 .globl s390_base_mcck_handler_fn
97s390_base_mcck_handler_fn:
98 .long 0
99 .previous
100
101 .globl s390_base_ext_handler
102s390_base_ext_handler:
103 stm %r0,%r15,__LC_SAVE_AREA
104 basr %r13,0
1050: ahi %r15,-STACK_FRAME_OVERHEAD
106 l %r1,2f-0b(%r13)
107 l %r1,0(%r1)
108 ltr %r1,%r1
109 jz 1f
110 basr %r14,%r1
1111: lm %r0,%r15,__LC_SAVE_AREA
112 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
113 lpsw __LC_EXT_OLD_PSW
114
1152: .long s390_base_ext_handler_fn
116
117 .section .bss
118 .globl s390_base_ext_handler_fn
119s390_base_ext_handler_fn:
120 .long 0
121 .previous
122
123 .globl s390_base_pgm_handler
124s390_base_pgm_handler:
125 stm %r0,%r15,__LC_SAVE_AREA
126 basr %r13,0
1270: ahi %r15,-STACK_FRAME_OVERHEAD
128 l %r1,2f-0b(%r13)
129 l %r1,0(%r1)
130 ltr %r1,%r1
131 jz 1f
132 basr %r14,%r1
133 lm %r0,%r15,__LC_SAVE_AREA
134 lpsw __LC_PGM_OLD_PSW
135
1361: lpsw disabled_wait_psw-0b(%r13)
137
1382: .long s390_base_pgm_handler_fn
139
140disabled_wait_psw:
141 .align 8
142 .long 0x000a0000,0x00000000 + s390_base_pgm_handler
143
144 .section .bss
145 .globl s390_base_pgm_handler_fn
146s390_base_pgm_handler_fn:
147 .long 0
148 .previous
149
150#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 5c46054195cb..f1e40ca00d8d 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -192,7 +192,7 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
192 192
193#undef cputime_to_timeval 193#undef cputime_to_timeval
194#define cputime_to_timeval cputime_to_compat_timeval 194#define cputime_to_timeval cputime_to_compat_timeval
195static __inline__ void 195static inline void
196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) 196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
197{ 197{
198 value->tv_usec = cputime % 1000000; 198 value->tv_usec = cputime % 1000000;
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
index 71d27c493568..914d49444f92 100644
--- a/arch/s390/kernel/compat_exec_domain.c
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -12,10 +12,9 @@
12#include <linux/personality.h> 12#include <linux/personality.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15struct exec_domain s390_exec_domain; 15static struct exec_domain s390_exec_domain;
16 16
17static int __init 17static int __init s390_init (void)
18s390_init (void)
19{ 18{
20 s390_exec_domain.name = "Linux/s390"; 19 s390_exec_domain.name = "Linux/s390";
21 s390_exec_domain.handler = NULL; 20 s390_exec_domain.handler = NULL;
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 5b33f823863a..666bb6daa148 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -69,6 +69,12 @@
69 69
70#include "compat_linux.h" 70#include "compat_linux.h"
71 71
72long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
73 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
74 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
75long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
76 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
77 PSW32_MASK_PSTATE);
72 78
73/* For this source file, we want overflow handling. */ 79/* For this source file, we want overflow handling. */
74 80
@@ -416,7 +422,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
416 mm_segment_t old_fs = get_fs (); 422 mm_segment_t old_fs = get_fs ();
417 423
418 set_fs (KERNEL_DS); 424 set_fs (KERNEL_DS);
419 ret = sys_sysinfo((struct sysinfo __user *) &s); 425 ret = sys_sysinfo((struct sysinfo __force __user *) &s);
420 set_fs (old_fs); 426 set_fs (old_fs);
421 err = put_user (s.uptime, &info->uptime); 427 err = put_user (s.uptime, &info->uptime);
422 err |= __put_user (s.loads[0], &info->loads[0]); 428 err |= __put_user (s.loads[0], &info->loads[0]);
@@ -445,7 +451,8 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
445 mm_segment_t old_fs = get_fs (); 451 mm_segment_t old_fs = get_fs ();
446 452
447 set_fs (KERNEL_DS); 453 set_fs (KERNEL_DS);
448 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); 454 ret = sys_sched_rr_get_interval(pid,
455 (struct timespec __force __user *) &t);
449 set_fs (old_fs); 456 set_fs (old_fs);
450 if (put_compat_timespec(&t, interval)) 457 if (put_compat_timespec(&t, interval))
451 return -EFAULT; 458 return -EFAULT;
@@ -472,8 +479,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
472 } 479 }
473 set_fs (KERNEL_DS); 480 set_fs (KERNEL_DS);
474 ret = sys_rt_sigprocmask(how, 481 ret = sys_rt_sigprocmask(how,
475 set ? (sigset_t __user *) &s : NULL, 482 set ? (sigset_t __force __user *) &s : NULL,
476 oset ? (sigset_t __user *) &s : NULL, 483 oset ? (sigset_t __force __user *) &s : NULL,
477 sigsetsize); 484 sigsetsize);
478 set_fs (old_fs); 485 set_fs (old_fs);
479 if (ret) return ret; 486 if (ret) return ret;
@@ -499,7 +506,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
499 mm_segment_t old_fs = get_fs(); 506 mm_segment_t old_fs = get_fs();
500 507
501 set_fs (KERNEL_DS); 508 set_fs (KERNEL_DS);
502 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 509 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
503 set_fs (old_fs); 510 set_fs (old_fs);
504 if (!ret) { 511 if (!ret) {
505 switch (_NSIG_WORDS) { 512 switch (_NSIG_WORDS) {
@@ -524,7 +531,7 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
524 if (copy_siginfo_from_user32(&info, uinfo)) 531 if (copy_siginfo_from_user32(&info, uinfo))
525 return -EFAULT; 532 return -EFAULT;
526 set_fs (KERNEL_DS); 533 set_fs (KERNEL_DS);
527 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); 534 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
528 set_fs (old_fs); 535 set_fs (old_fs);
529 return ret; 536 return ret;
530} 537}
@@ -682,7 +689,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offse
682 689
683 set_fs(KERNEL_DS); 690 set_fs(KERNEL_DS);
684 ret = sys_sendfile(out_fd, in_fd, 691 ret = sys_sendfile(out_fd, in_fd,
685 offset ? (off_t __user *) &of : NULL, count); 692 offset ? (off_t __force __user *) &of : NULL, count);
686 set_fs(old_fs); 693 set_fs(old_fs);
687 694
688 if (offset && put_user(of, offset)) 695 if (offset && put_user(of, offset))
@@ -703,7 +710,8 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
703 710
704 set_fs(KERNEL_DS); 711 set_fs(KERNEL_DS);
705 ret = sys_sendfile64(out_fd, in_fd, 712 ret = sys_sendfile64(out_fd, in_fd,
706 offset ? (loff_t __user *) &lof : NULL, count); 713 offset ? (loff_t __force __user *) &lof : NULL,
714 count);
707 set_fs(old_fs); 715 set_fs(old_fs);
708 716
709 if (offset && put_user(lof, offset)) 717 if (offset && put_user(lof, offset))
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 1a18e29668ef..e89f8c0c42a0 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -115,37 +115,6 @@ typedef struct
115 __u32 addr; 115 __u32 addr;
116} _psw_t32 __attribute__ ((aligned(8))); 116} _psw_t32 __attribute__ ((aligned(8)));
117 117
118#define PSW32_MASK_PER 0x40000000UL
119#define PSW32_MASK_DAT 0x04000000UL
120#define PSW32_MASK_IO 0x02000000UL
121#define PSW32_MASK_EXT 0x01000000UL
122#define PSW32_MASK_KEY 0x00F00000UL
123#define PSW32_MASK_MCHECK 0x00040000UL
124#define PSW32_MASK_WAIT 0x00020000UL
125#define PSW32_MASK_PSTATE 0x00010000UL
126#define PSW32_MASK_ASC 0x0000C000UL
127#define PSW32_MASK_CC 0x00003000UL
128#define PSW32_MASK_PM 0x00000f00UL
129
130#define PSW32_ADDR_AMODE31 0x80000000UL
131#define PSW32_ADDR_INSN 0x7FFFFFFFUL
132
133#define PSW32_BASE_BITS 0x00080000UL
134
135#define PSW32_ASC_PRIMARY 0x00000000UL
136#define PSW32_ASC_ACCREG 0x00004000UL
137#define PSW32_ASC_SECONDARY 0x00008000UL
138#define PSW32_ASC_HOME 0x0000C000UL
139
140#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
141 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
142 PSW32_MASK_PSTATE)
143
144#define PSW32_MASK_MERGE(CURRENT,NEW) \
145 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
146 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
147
148
149typedef struct 118typedef struct
150{ 119{
151 _psw_t32 psw; 120 _psw_t32 psw;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 861888ab8c13..887a9881d0d0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -275,8 +275,8 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
275 } 275 }
276 276
277 set_fs (KERNEL_DS); 277 set_fs (KERNEL_DS);
278 ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL), 278 ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL),
279 (stack_t __user *) (uoss ? &koss : NULL), 279 (stack_t __force __user *) (uoss ? &koss : NULL),
280 regs->gprs[15]); 280 regs->gprs[15]);
281 set_fs (old_fs); 281 set_fs (old_fs);
282 282
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
298 _s390_regs_common32 regs32; 298 _s390_regs_common32 regs32;
299 int err, i; 299 int err, i;
300 300
301 regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, 301 regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
302 (__u32)(regs->psw.mask >> 32)); 302 (__u32)(regs->psw.mask >> 32));
303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; 303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
304 for (i = 0; i < NUM_GPRS; i++) 304 for (i = 0; i < NUM_GPRS; i++)
@@ -401,7 +401,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
401 goto badframe; 401 goto badframe;
402 402
403 set_fs (KERNEL_DS); 403 set_fs (KERNEL_DS);
404 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); 404 do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
405 set_fs (old_fs); 405 set_fs (old_fs);
406 406
407 return regs->gprs[2]; 407 return regs->gprs[2];
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index a5972f1541fe..6c89f30c8e31 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -16,6 +16,7 @@
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17#include <asm/cpcmd.h> 17#include <asm/cpcmd.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/io.h>
19 20
20static DEFINE_SPINLOCK(cpcmd_lock); 21static DEFINE_SPINLOCK(cpcmd_lock);
21static char cpcmd_buf[241]; 22static char cpcmd_buf[241];
@@ -88,13 +89,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
88 int len; 89 int len;
89 unsigned long flags; 90 unsigned long flags;
90 91
91 if ((rlen == 0) || (response == NULL) 92 if ((virt_to_phys(response) != (unsigned long) response) ||
92 || !((unsigned long)response >> 31)) { 93 (((unsigned long)response + rlen) >> 31)) {
93 spin_lock_irqsave(&cpcmd_lock, flags);
94 len = __cpcmd(cmd, response, rlen, response_code);
95 spin_unlock_irqrestore(&cpcmd_lock, flags);
96 }
97 else {
98 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); 94 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
99 if (!lowbuf) { 95 if (!lowbuf) {
100 printk(KERN_WARNING 96 printk(KERN_WARNING
@@ -106,6 +102,10 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
106 spin_unlock_irqrestore(&cpcmd_lock, flags); 102 spin_unlock_irqrestore(&cpcmd_lock, flags);
107 memcpy(response, lowbuf, rlen); 103 memcpy(response, lowbuf, rlen);
108 kfree(lowbuf); 104 kfree(lowbuf);
105 } else {
106 spin_lock_irqsave(&cpcmd_lock, flags);
107 len = __cpcmd(cmd, response, rlen, response_code);
108 spin_unlock_irqrestore(&cpcmd_lock, flags);
109 } 109 }
110 return len; 110 return len;
111} 111}
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 926cceeae0fa..8cc7c9fa64f5 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <linux/kexec.h> 11#include <linux/kexec.h>
12#include <linux/reboot.h>
12 13
13void machine_crash_shutdown(struct pt_regs *regs) 14void machine_crash_shutdown(struct pt_regs *regs)
14{ 15{
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bb57bc0e3fc8..f4b62df02aa2 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -120,7 +120,7 @@ struct debug_view debug_hex_ascii_view = {
120 NULL 120 NULL
121}; 121};
122 122
123struct debug_view debug_level_view = { 123static struct debug_view debug_level_view = {
124 "level", 124 "level",
125 &debug_prolog_level_fn, 125 &debug_prolog_level_fn,
126 NULL, 126 NULL,
@@ -129,7 +129,7 @@ struct debug_view debug_level_view = {
129 NULL 129 NULL
130}; 130};
131 131
132struct debug_view debug_pages_view = { 132static struct debug_view debug_pages_view = {
133 "pages", 133 "pages",
134 &debug_prolog_pages_fn, 134 &debug_prolog_pages_fn,
135 NULL, 135 NULL,
@@ -138,7 +138,7 @@ struct debug_view debug_pages_view = {
138 NULL 138 NULL
139}; 139};
140 140
141struct debug_view debug_flush_view = { 141static struct debug_view debug_flush_view = {
142 "flush", 142 "flush",
143 NULL, 143 NULL,
144 NULL, 144 NULL,
@@ -156,14 +156,14 @@ struct debug_view debug_sprintf_view = {
156 NULL 156 NULL
157}; 157};
158 158
159 159/* used by dump analysis tools to determine version of debug feature */
160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; 160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
161 161
162/* static globals */ 162/* static globals */
163 163
164static debug_info_t *debug_area_first = NULL; 164static debug_info_t *debug_area_first = NULL;
165static debug_info_t *debug_area_last = NULL; 165static debug_info_t *debug_area_last = NULL;
166DECLARE_MUTEX(debug_lock); 166static DECLARE_MUTEX(debug_lock);
167 167
168static int initialized; 168static int initialized;
169 169
@@ -905,7 +905,7 @@ static struct ctl_table s390dbf_dir_table[] = {
905 { .ctl_name = 0 } 905 { .ctl_name = 0 }
906}; 906};
907 907
908struct ctl_table_header *s390dbf_sysctl_header; 908static struct ctl_table_header *s390dbf_sysctl_header;
909 909
910void 910void
911debug_stop_all(void) 911debug_stop_all(void)
@@ -1300,8 +1300,7 @@ out:
1300 * flushes debug areas 1300 * flushes debug areas
1301 */ 1301 */
1302 1302
1303void 1303static void debug_flush(debug_info_t* id, int area)
1304debug_flush(debug_info_t* id, int area)
1305{ 1304{
1306 unsigned long flags; 1305 unsigned long flags;
1307 int i,j; 1306 int i,j;
@@ -1511,8 +1510,7 @@ out:
1511/* 1510/*
1512 * clean up module 1511 * clean up module
1513 */ 1512 */
1514void 1513static void __exit debug_exit(void)
1515__exit debug_exit(void)
1516{ 1514{
1517 debugfs_remove(debug_debugfs_root_entry); 1515 debugfs_remove(debug_debugfs_root_entry);
1518 unregister_sysctl_table(s390dbf_sysctl_header); 1516 unregister_sysctl_table(s390dbf_sysctl_header);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
new file mode 100644
index 000000000000..e518dd53eff5
--- /dev/null
+++ b/arch/s390/kernel/early.c
@@ -0,0 +1,306 @@
1/*
2 * arch/s390/kernel/early.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/ctype.h>
13#include <linux/lockdep.h>
14#include <linux/module.h>
15#include <linux/pfn.h>
16#include <linux/uaccess.h>
17#include <asm/lowcore.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/setup.h>
21#include <asm/cpcmd.h>
22#include <asm/sclp.h>
23
24/*
25 * Create a Kernel NSS if the SAVESYS= parameter is defined
26 */
27#define DEFSYS_CMD_SIZE 96
28#define SAVESYS_CMD_SIZE 32
29
30char kernel_nss_name[NSS_NAME_SIZE + 1];
31
32#ifdef CONFIG_SHARED_KERNEL
33static noinline __init void create_kernel_nss(void)
34{
35 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
36#ifdef CONFIG_BLK_DEV_INITRD
37 unsigned int sinitrd_pfn, einitrd_pfn;
38#endif
39 int response;
40 char *savesys_ptr;
41 char upper_command_line[COMMAND_LINE_SIZE];
42 char defsys_cmd[DEFSYS_CMD_SIZE];
43 char savesys_cmd[SAVESYS_CMD_SIZE];
44
45 /* Do nothing if we are not running under VM */
46 if (!MACHINE_IS_VM)
47 return;
48
49 /* Convert COMMAND_LINE to upper case */
50 for (i = 0; i < strlen(COMMAND_LINE); i++)
51 upper_command_line[i] = toupper(COMMAND_LINE[i]);
52
53 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
54
55 if (!savesys_ptr)
56 return;
57
58 savesys_ptr += 8; /* Point to the beginning of the NSS name */
59 for (i = 0; i < NSS_NAME_SIZE; i++) {
60 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
61 break;
62 kernel_nss_name[i] = savesys_ptr[i];
63 }
64
65 stext_pfn = PFN_DOWN(__pa(&_stext));
66 eshared_pfn = PFN_DOWN(__pa(&_eshared));
67 end_pfn = PFN_UP(__pa(&_end));
68 min_size = end_pfn << 2;
69
70 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
71 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
72 eshared_pfn, end_pfn);
73
74#ifdef CONFIG_BLK_DEV_INITRD
75 if (INITRD_START && INITRD_SIZE) {
76 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
77 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
78 min_size = einitrd_pfn << 2;
79 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
80 sinitrd_pfn, einitrd_pfn);
81 }
82#endif
83
84 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
85 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
86 kernel_nss_name, kernel_nss_name);
87
88 __cpcmd(defsys_cmd, NULL, 0, &response);
89
90 if (response != 0)
91 return;
92
93 __cpcmd(savesys_cmd, NULL, 0, &response);
94
95 if (response != strlen(savesys_cmd))
96 return;
97
98 ipl_flags = IPL_NSS_VALID;
99}
100
101#else /* CONFIG_SHARED_KERNEL */
102
103static inline void create_kernel_nss(void) { }
104
105#endif /* CONFIG_SHARED_KERNEL */
106
107/*
108 * Clear bss memory
109 */
110static noinline __init void clear_bss_section(void)
111{
112 memset(__bss_start, 0, _end - __bss_start);
113}
114
115/*
116 * Initialize storage key for kernel pages
117 */
118static noinline __init void init_kernel_storage_key(void)
119{
120 unsigned long end_pfn, init_pfn;
121
122 end_pfn = PFN_UP(__pa(&_end));
123
124 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
125 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
126}
127
128static noinline __init void detect_machine_type(void)
129{
130 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
131
132 asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id));
133
134 /* Running under z/VM ? */
135 if (cpuinfo->cpu_id.version == 0xff)
136 machine_flags |= 1;
137
138 /* Running on a P/390 ? */
139 if (cpuinfo->cpu_id.machine == 0x7490)
140 machine_flags |= 4;
141}
142
143static noinline __init int memory_fast_detect(void)
144{
145
146 unsigned long val0 = 0;
147 unsigned long val1 = 0xc;
148 int ret = -ENOSYS;
149
150 if (ipl_flags & IPL_NSS_VALID)
151 return -ENOSYS;
152
153 asm volatile(
154 " diag %1,%2,0x260\n"
155 "0: lhi %0,0\n"
156 "1:\n"
157 EX_TABLE(0b,1b)
158 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
159
160 if (ret || val0 != val1)
161 return -ENOSYS;
162
163 memory_chunk[0].size = val0;
164 return 0;
165}
166
167#define ADDR2G (1UL << 31)
168
169static noinline __init unsigned long sclp_memory_detect(void)
170{
171 struct sclp_readinfo_sccb *sccb;
172 unsigned long long memsize;
173
174 sccb = &s390_readinfo_sccb;
175
176 if (sccb->header.response_code != 0x10)
177 return 0;
178
179 if (sccb->rnsize)
180 memsize = sccb->rnsize << 20;
181 else
182 memsize = sccb->rnsize2 << 20;
183 if (sccb->rnmax)
184 memsize *= sccb->rnmax;
185 else
186 memsize *= sccb->rnmax2;
187#ifndef CONFIG_64BIT
188 /*
189 * Can't deal with more than 2G in 31 bit addressing mode, so
190 * limit the value in order to avoid strange side effects.
191 */
192 if (memsize > ADDR2G)
193 memsize = ADDR2G;
194#endif
195 return (unsigned long) memsize;
196}
197
198static inline __init unsigned long __tprot(unsigned long addr)
199{
200 int cc = -1;
201
202 asm volatile(
203 " tprot 0(%1),0\n"
204 "0: ipm %0\n"
205 " srl %0,28\n"
206 "1:\n"
207 EX_TABLE(0b,1b)
208 : "+d" (cc) : "a" (addr) : "cc");
209 return (unsigned long)cc;
210}
211
212/* Checking memory in 128KB increments. */
213#define CHUNK_INCR (1UL << 17)
214
215static noinline __init void find_memory_chunks(unsigned long memsize)
216{
217 unsigned long addr = 0, old_addr = 0;
218 unsigned long old_cc = CHUNK_READ_WRITE;
219 unsigned long cc;
220 int chunk = 0;
221
222 while (chunk < MEMORY_CHUNKS) {
223 cc = __tprot(addr);
224 while (cc == old_cc) {
225 addr += CHUNK_INCR;
226 cc = __tprot(addr);
227#ifndef CONFIG_64BIT
228 if (addr == ADDR2G)
229 break;
230#endif
231 }
232
233 if (old_addr != addr &&
234 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
235 memory_chunk[chunk].addr = old_addr;
236 memory_chunk[chunk].size = addr - old_addr;
237 memory_chunk[chunk].type = old_cc;
238 chunk++;
239 }
240
241 old_addr = addr;
242 old_cc = cc;
243
244#ifndef CONFIG_64BIT
245 if (addr == ADDR2G)
246 break;
247#endif
248 /*
249 * Finish memory detection at the first hole, unless
250 * - we reached the hsa -> skip it.
251 * - we know there must be more.
252 */
253 if (cc == -1UL && !memsize && old_addr != ADDR2G)
254 break;
255 if (memsize && addr >= memsize)
256 break;
257 }
258}
259
260static __init void early_pgm_check_handler(void)
261{
262 unsigned long addr;
263 const struct exception_table_entry *fixup;
264
265 addr = S390_lowcore.program_old_psw.addr;
266 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
267 if (!fixup)
268 disabled_wait(0);
269 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
270}
271
272static noinline __init void setup_lowcore_early(void)
273{
274 psw_t psw;
275
276 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
277 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
278 S390_lowcore.external_new_psw = psw;
279 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
280 S390_lowcore.program_new_psw = psw;
281 s390_base_pgm_handler_fn = early_pgm_check_handler;
282}
283
284/*
285 * Save ipl parameters, clear bss memory, initialize storage keys
286 * and create a kernel NSS at startup if the SAVESYS= parm is defined
287 */
288void __init startup_init(void)
289{
290 unsigned long memsize;
291
292 ipl_save_parameters();
293 clear_bss_section();
294 init_kernel_storage_key();
295 lockdep_init();
296 lockdep_off();
297 detect_machine_type();
298 create_kernel_nss();
299 sort_main_extable();
300 setup_lowcore_early();
301 sclp_readinfo_early();
302 memsize = sclp_memory_detect();
303 if (memory_fast_detect() < 0)
304 find_memory_chunks(memsize);
305 lockdep_on();
306}
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index bb0f973137f0..cc0dc609d738 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/types.h> 13#include <asm/types.h>
14#include <asm/ebcdic.h>
14 15
15/* 16/*
16 * ASCII (IBM PC 437) -> EBCDIC 037 17 * ASCII (IBM PC 437) -> EBCDIC 037
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index eca507050e47..453fd3b4edea 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -51,176 +51,15 @@ startup_continue:
51 st %r15,__LC_KERNEL_STACK # set end of kernel stack 51 st %r15,__LC_KERNEL_STACK # set end of kernel stack
52 ahi %r15,-96 52 ahi %r15,-96
53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
54
55 l %r14,.Lipl_save_parameters-.LPG1(%r13)
56 basr %r14,%r14
57# 54#
58# clear bss memory 55# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
56# and create a kernel NSS if the SAVESYS= parm is defined
59# 57#
60 l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss 58 l %r14,.Lstartup_init-.LPG1(%r13)
61 l %r3,.Lbss_end-.LPG1(%r13) # end of bss 59 basr %r14,%r14
62 sr %r3,%r2 # length of bss
63 sr %r4,%r4
64 sr %r5,%r5 # set src,length and pad to zero
65 sr %r0,%r0
66 mvcle %r2,%r4,0 # clear mem
67 jo .-4 # branch back, if not finish
68
69 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
70.Lservicecall:
71 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
72
73 stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
74 la %r1,0x200 # set bit 22
75 o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
76 st %r1,.Lcr-.LPG1(%r13)
77 lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
78
79 mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
80 la %r1, .Lsclph-.LPG1(%r13)
81 a %r1,__LC_EXT_NEW_PSW+4 # set handler
82 st %r1,__LC_EXT_NEW_PSW+4
83
84 l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff
85 lr %r1,%r4 # our sccb
86 .insn rre,0xb2200000,%r2,%r1 # service call
87 ipm %r1
88 srl %r1,28 # get cc code
89 xr %r3, %r3
90 chi %r1,3
91 be .Lfchunk-.LPG1(%r13) # leave
92 chi %r1,2
93 be .Lservicecall-.LPG1(%r13)
94 lpsw .Lwaitsclp-.LPG1(%r13)
95.Lsclph:
96 lh %r1,.Lsccbr-.Lsccb(%r4)
97 chi %r1,0x10 # 0x0010 is the sucess code
98 je .Lprocsccb # let's process the sccb
99 chi %r1,0x1f0
100 bne .Lfchunk-.LPG1(%r13) # unhandled error code
101 c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
102 bne .Lfchunk-.LPG1(%r13) # if no, give up
103 l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
104 b .Lservicecall-.LPG1(%r13)
105.Lprocsccb:
106 lhi %r1,0
107 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
108 jnz .Lscnd
109 lhi %r1,0x800 # otherwise report 2GB
110.Lscnd:
111 lhi %r3,0x800 # limit reported memory size to 2GB
112 cr %r1,%r3
113 jl .Lno2gb
114 lr %r1,%r3
115.Lno2gb:
116 xr %r3,%r3 # same logic
117 ic %r3,.Lscpa1-.Lsccb(%r4)
118 chi %r3,0x00
119 jne .Lcompmem
120 l %r3,.Lscpa2-.Lsccb(%r4)
121.Lcompmem:
122 mr %r2,%r1 # mem in MB on 128-bit
123 l %r1,.Lonemb-.LPG1(%r13)
124 mr %r2,%r1 # mem size in bytes in %r3
125 b .Lfchunk-.LPG1(%r13)
126
127 .align 4
128.Lipl_save_parameters:
129 .long ipl_save_parameters
130.Linittu:
131 .long init_thread_union
132.Lpmask:
133 .byte 0
134 .align 8
135.Lpcext:.long 0x00080000,0x80000000
136.Lcr:
137 .long 0x00 # place holder for cr0
138 .align 8
139.Lwaitsclp:
140 .long 0x010a0000,0x80000000 + .Lsclph
141.Lrcp:
142 .int 0x00120001 # Read SCP forced code
143.Lrcp2:
144 .int 0x00020001 # Read SCP code
145.Lonemb:
146 .int 0x100000
147.Lfchunk:
148 60
149#
150# find memory chunks.
151#
152 lr %r9,%r3 # end of mem
153 mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
154 la %r1,1 # test in increments of 128KB
155 sll %r1,17
156 l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
157 slr %r4,%r4 # set start of chunk to zero
158 slr %r5,%r5 # set end of chunk to zero
159 slr %r6,%r6 # set access code to zero
160 la %r10,MEMORY_CHUNKS # number of chunks
161.Lloop:
162 tprot 0(%r5),0 # test protection of first byte
163 ipm %r7
164 srl %r7,28
165 clr %r6,%r7 # compare cc with last access code
166 be .Lsame-.LPG1(%r13)
167 lhi %r8,0 # no program checks
168 b .Lsavchk-.LPG1(%r13)
169.Lsame:
170 ar %r5,%r1 # add 128KB to end of chunk
171 bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
172.Lchkmem: # > 2GB or tprot got a program check
173 lhi %r8,1 # set program check flag
174.Lsavchk:
175 clr %r4,%r5 # chunk size > 0?
176 be .Lchkloop-.LPG1(%r13)
177 st %r4,0(%r3) # store start address of chunk
178 lr %r0,%r5
179 slr %r0,%r4
180 st %r0,4(%r3) # store size of chunk
181 st %r6,8(%r3) # store type of chunk
182 la %r3,12(%r3)
183 ahi %r10,-1 # update chunk number
184.Lchkloop:
185 lr %r6,%r7 # set access code to last cc
186 # we got an exception or we're starting a new
187 # chunk , we must check if we should
188 # still try to find valid memory (if we detected
189 # the amount of available storage), and if we
190 # have chunks left
191 xr %r0,%r0
192 clr %r0,%r9 # did we detect memory?
193 je .Ldonemem # if not, leave
194 chi %r10,0 # do we have chunks left?
195 je .Ldonemem
196 chi %r8,1 # program check ?
197 je .Lpgmchk
198 lr %r4,%r5 # potential new chunk
199 alr %r5,%r1 # add 128KB to end of chunk
200 j .Llpcnt
201.Lpgmchk:
202 alr %r5,%r1 # add 128KB to end of chunk
203 lr %r4,%r5 # potential new chunk
204.Llpcnt:
205 clr %r5,%r9 # should we go on?
206 jl .Lloop
207.Ldonemem:
208 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags 61 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
209# 62#
210# find out if we are running under VM
211#
212 stidp __LC_CPUID # store cpuid
213 tm __LC_CPUID,0xff # running under VM ?
214 bno .Lnovm-.LPG1(%r13)
215 oi 3(%r12),1 # set VM flag
216.Lnovm:
217 lh %r0,__LC_CPUID+4 # get cpu version
218 chi %r0,0x7490 # running on a P/390 ?
219 bne .Lnop390-.LPG1(%r13)
220 oi 3(%r12),4 # set P/390 flag
221.Lnop390:
222
223#
224# find out if we have an IEEE fpu 63# find out if we have an IEEE fpu
225# 64#
226 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) 65 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
@@ -295,7 +134,6 @@ startup_continue:
295 .long 0 # cr15: linkage stack operations 134 .long 0 # cr15: linkage stack operations
296.Lduct: .long 0,0,0,0,0,0,0,0 135.Lduct: .long 0,0,0,0,0,0,0,0
297 .long 0,0,0,0,0,0,0,0 136 .long 0,0,0,0,0,0,0,0
298.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
299.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu 137.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
300.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp 138.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
301.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg 139.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
@@ -306,7 +144,9 @@ startup_continue:
306.Lbss_bgn: .long __bss_start 144.Lbss_bgn: .long __bss_start
307.Lbss_end: .long _end 145.Lbss_end: .long _end
308.Lparmaddr: .long PARMAREA 146.Lparmaddr: .long PARMAREA
309.Lsccbaddr: .long .Lsccb 147.Linittu: .long init_thread_union
148.Lstartup_init:
149 .long startup_init
310 150
311 .globl ipl_schib 151 .globl ipl_schib
312ipl_schib: 152ipl_schib:
@@ -322,26 +162,6 @@ ipl_devno:
322 .word 0 162 .word 0
323 163
324 .org 0x12000 164 .org 0x12000
325.globl s390_readinfo_sccb
326s390_readinfo_sccb:
327.Lsccb:
328 .hword 0x1000 # length, one page
329 .byte 0x00,0x00,0x00
330 .byte 0x80 # variable response bit set
331.Lsccbr:
332 .hword 0x00 # response code
333.Lscpincr1:
334 .hword 0x00
335.Lscpa1:
336 .byte 0x00
337 .fill 89,1,0
338.Lscpa2:
339 .int 0x00
340.Lscpincr2:
341 .quad 0x00
342 .fill 3984,1,0
343 .org 0x13000
344
345#ifdef CONFIG_SHARED_KERNEL 165#ifdef CONFIG_SHARED_KERNEL
346 .org 0x100000 166 .org 0x100000
347#endif 167#endif
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6ba3f4512dd1..b8fec4e5c5d4 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -58,183 +58,15 @@ startup_continue:
58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack 58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
59 aghi %r15,-160 59 aghi %r15,-160
60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
61
62 brasl %r14,ipl_save_parameters
63# 61#
64# clear bss memory 62# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
63# and create a kernel NSS if the SAVESYS= parm is defined
65# 64#
66 larl %r2,__bss_start # start of bss segment 65 brasl %r14,startup_init
67 larl %r3,_end # end of bss segment
68 sgr %r3,%r2 # length of bss
69 sgr %r4,%r4 #
70 sgr %r5,%r5 # set src,length and pad to zero
71 mvcle %r2,%r4,0 # clear mem
72 jo .-4 # branch back, if not finish
73 # set program check new psw mask 66 # set program check new psw mask
74 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) 67 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
75 larl %r1,.Lslowmemdetect # set program check address
76 stg %r1,__LC_PGM_NEW_PSW+8
77 lghi %r1,0xc
78 diag %r0,%r1,0x260 # get memory size of virtual machine
79 cgr %r0,%r1 # different? -> old detection routine
80 jne .Lslowmemdetect
81 aghi %r1,1 # size is one more than end
82 larl %r2,memory_chunk
83 stg %r1,8(%r2) # store size of chunk
84 j .Ldonemem
85
86.Lslowmemdetect:
87 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
88.Lservicecall:
89 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
90
91 stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
92 la %r1,0x200 # set bit 22
93 og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
94 stg %r1,.Lcr-.LPG1(%r13)
95 lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
96
97 mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
98 larl %r1,.Lsclph
99 stg %r1,__LC_EXT_NEW_PSW+8 # set handler
100
101 larl %r4,.Lsccb # %r4 is our index for sccb stuff
102 lgr %r1,%r4 # our sccb
103 .insn rre,0xb2200000,%r2,%r1 # service call
104 ipm %r1
105 srl %r1,28 # get cc code
106 xr %r3,%r3
107 chi %r1,3
108 be .Lfchunk-.LPG1(%r13) # leave
109 chi %r1,2
110 be .Lservicecall-.LPG1(%r13)
111 lpswe .Lwaitsclp-.LPG1(%r13)
112.Lsclph:
113 lh %r1,.Lsccbr-.Lsccb(%r4)
114 chi %r1,0x10 # 0x0010 is the sucess code
115 je .Lprocsccb # let's process the sccb
116 chi %r1,0x1f0
117 bne .Lfchunk-.LPG1(%r13) # unhandled error code
118 c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
119 bne .Lfchunk-.LPG1(%r13) # if no, give up
120 l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
121 b .Lservicecall-.LPG1(%r13)
122.Lprocsccb:
123 lghi %r1,0
124 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
125 jnz .Lscnd
126 lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
127.Lscnd:
128 xr %r3,%r3 # same logic
129 ic %r3,.Lscpa1-.Lsccb(%r4)
130 chi %r3,0x00
131 jne .Lcompmem
132 l %r3,.Lscpa2-.Lsccb(%r4)
133.Lcompmem:
134 mlgr %r2,%r1 # mem in MB on 128-bit
135 l %r1,.Lonemb-.LPG1(%r13)
136 mlgr %r2,%r1 # mem size in bytes in %r3
137 b .Lfchunk-.LPG1(%r13)
138
139 .align 4
140.Lpmask:
141 .byte 0
142 .align 8
143.Lcr:
144 .quad 0x00 # place holder for cr0
145.Lwaitsclp:
146 .quad 0x0102000180000000,.Lsclph
147.Lrcp:
148 .int 0x00120001 # Read SCP forced code
149.Lrcp2:
150 .int 0x00020001 # Read SCP code
151.Lonemb:
152 .int 0x100000
153
154.Lfchunk:
155
156#
157# find memory chunks.
158#
159 lgr %r9,%r3 # end of mem
160 larl %r1,.Lchkmem # set program check address
161 stg %r1,__LC_PGM_NEW_PSW+8
162 la %r1,1 # test in increments of 128KB
163 sllg %r1,%r1,17
164 larl %r3,memory_chunk
165 slgr %r4,%r4 # set start of chunk to zero
166 slgr %r5,%r5 # set end of chunk to zero
167 slr %r6,%r6 # set access code to zero
168 la %r10,MEMORY_CHUNKS # number of chunks
169.Lloop:
170 tprot 0(%r5),0 # test protection of first byte
171 ipm %r7
172 srl %r7,28
173 clr %r6,%r7 # compare cc with last access code
174 je .Lsame
175 lghi %r8,0 # no program checks
176 j .Lsavchk
177.Lsame:
178 algr %r5,%r1 # add 128KB to end of chunk
179 # no need to check here,
180 brc 12,.Lloop # this is the same chunk
181.Lchkmem: # > 16EB or tprot got a program check
182 lghi %r8,1 # set program check flag
183.Lsavchk:
184 clgr %r4,%r5 # chunk size > 0?
185 je .Lchkloop
186 stg %r4,0(%r3) # store start address of chunk
187 lgr %r0,%r5
188 slgr %r0,%r4
189 stg %r0,8(%r3) # store size of chunk
190 st %r6,20(%r3) # store type of chunk
191 la %r3,24(%r3)
192 ahi %r10,-1 # update chunk number
193.Lchkloop:
194 lr %r6,%r7 # set access code to last cc
195 # we got an exception or we're starting a new
196 # chunk , we must check if we should
197 # still try to find valid memory (if we detected
198 # the amount of available storage), and if we
199 # have chunks left
200 lghi %r4,1
201 sllg %r4,%r4,31
202 clgr %r5,%r4
203 je .Lhsaskip
204 xr %r0, %r0
205 clgr %r0, %r9 # did we detect memory?
206 je .Ldonemem # if not, leave
207 chi %r10, 0 # do we have chunks left?
208 je .Ldonemem
209.Lhsaskip:
210 chi %r8,1 # program check ?
211 je .Lpgmchk
212 lgr %r4,%r5 # potential new chunk
213 algr %r5,%r1 # add 128KB to end of chunk
214 j .Llpcnt
215.Lpgmchk:
216 algr %r5,%r1 # add 128KB to end of chunk
217 lgr %r4,%r5 # potential new chunk
218.Llpcnt:
219 clgr %r5,%r9 # should we go on?
220 jl .Lloop
221.Ldonemem:
222
223 larl %r12,machine_flags 68 larl %r12,machine_flags
224# 69#
225# find out if we are running under VM
226#
227 stidp __LC_CPUID # store cpuid
228 tm __LC_CPUID,0xff # running under VM ?
229 bno 0f-.LPG1(%r13)
230 oi 7(%r12),1 # set VM flag
2310: lh %r0,__LC_CPUID+4 # get cpu version
232 chi %r0,0x7490 # running on a P/390 ?
233 bne 1f-.LPG1(%r13)
234 oi 7(%r12),4 # set P/390 flag
2351:
236
237#
238# find out if we have the MVPG instruction 70# find out if we have the MVPG instruction
239# 71#
240 la %r1,0f-.LPG1(%r13) # set program check address 72 la %r1,0f-.LPG1(%r13) # set program check address
@@ -336,25 +168,6 @@ ipl_devno:
336 .word 0 168 .word 0
337 169
338 .org 0x12000 170 .org 0x12000
339.globl s390_readinfo_sccb
340s390_readinfo_sccb:
341.Lsccb:
342 .hword 0x1000 # length, one page
343 .byte 0x00,0x00,0x00
344 .byte 0x80 # variable response bit set
345.Lsccbr:
346 .hword 0x00 # response code
347.Lscpincr1:
348 .hword 0x00
349.Lscpa1:
350 .byte 0x00
351 .fill 89,1,0
352.Lscpa2:
353 .int 0x00
354.Lscpincr2:
355 .quad 0x00
356 .fill 3984,1,0
357 .org 0x13000
358 171
359#ifdef CONFIG_SHARED_KERNEL 172#ifdef CONFIG_SHARED_KERNEL
360 .org 0x100000 173 .org 0x100000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 9e9972e8a52b..052259530651 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -20,26 +20,27 @@
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/reset.h> 22#include <asm/reset.h>
23#include <asm/sclp.h>
23 24
24#define IPL_PARM_BLOCK_VERSION 0 25#define IPL_PARM_BLOCK_VERSION 0
25#define LOADPARM_LEN 8
26 26
27extern char s390_readinfo_sccb[]; 27#define SCCB_VALID (s390_readinfo_sccb.header.response_code == 0x10)
28#define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010) 28#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
29#define SCCB_LOADPARM (&s390_readinfo_sccb[24]) 29#define SCCB_FLAG (s390_readinfo_sccb.flags)
30#define SCCB_FLAG (s390_readinfo_sccb[91])
31 30
32enum ipl_type { 31enum ipl_type {
33 IPL_TYPE_NONE = 1, 32 IPL_TYPE_NONE = 1,
34 IPL_TYPE_UNKNOWN = 2, 33 IPL_TYPE_UNKNOWN = 2,
35 IPL_TYPE_CCW = 4, 34 IPL_TYPE_CCW = 4,
36 IPL_TYPE_FCP = 8, 35 IPL_TYPE_FCP = 8,
36 IPL_TYPE_NSS = 16,
37}; 37};
38 38
39#define IPL_NONE_STR "none" 39#define IPL_NONE_STR "none"
40#define IPL_UNKNOWN_STR "unknown" 40#define IPL_UNKNOWN_STR "unknown"
41#define IPL_CCW_STR "ccw" 41#define IPL_CCW_STR "ccw"
42#define IPL_FCP_STR "fcp" 42#define IPL_FCP_STR "fcp"
43#define IPL_NSS_STR "nss"
43 44
44static char *ipl_type_str(enum ipl_type type) 45static char *ipl_type_str(enum ipl_type type)
45{ 46{
@@ -50,6 +51,8 @@ static char *ipl_type_str(enum ipl_type type)
50 return IPL_CCW_STR; 51 return IPL_CCW_STR;
51 case IPL_TYPE_FCP: 52 case IPL_TYPE_FCP:
52 return IPL_FCP_STR; 53 return IPL_FCP_STR;
54 case IPL_TYPE_NSS:
55 return IPL_NSS_STR;
53 case IPL_TYPE_UNKNOWN: 56 case IPL_TYPE_UNKNOWN:
54 default: 57 default:
55 return IPL_UNKNOWN_STR; 58 return IPL_UNKNOWN_STR;
@@ -64,6 +67,7 @@ enum ipl_method {
64 IPL_METHOD_FCP_RO_DIAG, 67 IPL_METHOD_FCP_RO_DIAG,
65 IPL_METHOD_FCP_RW_DIAG, 68 IPL_METHOD_FCP_RW_DIAG,
66 IPL_METHOD_FCP_RO_VM, 69 IPL_METHOD_FCP_RO_VM,
70 IPL_METHOD_NSS,
67}; 71};
68 72
69enum shutdown_action { 73enum shutdown_action {
@@ -114,11 +118,14 @@ enum diag308_rc {
114static int diag308_set_works = 0; 118static int diag308_set_works = 0;
115 119
116static int reipl_capabilities = IPL_TYPE_UNKNOWN; 120static int reipl_capabilities = IPL_TYPE_UNKNOWN;
121
117static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 122static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
118static enum ipl_method reipl_method = IPL_METHOD_NONE; 123static enum ipl_method reipl_method = IPL_METHOD_NONE;
119static struct ipl_parameter_block *reipl_block_fcp; 124static struct ipl_parameter_block *reipl_block_fcp;
120static struct ipl_parameter_block *reipl_block_ccw; 125static struct ipl_parameter_block *reipl_block_ccw;
121 126
127static char reipl_nss_name[NSS_NAME_SIZE + 1];
128
122static int dump_capabilities = IPL_TYPE_NONE; 129static int dump_capabilities = IPL_TYPE_NONE;
123static enum ipl_type dump_type = IPL_TYPE_NONE; 130static enum ipl_type dump_type = IPL_TYPE_NONE;
124static enum ipl_method dump_method = IPL_METHOD_NONE; 131static enum ipl_method dump_method = IPL_METHOD_NONE;
@@ -173,6 +180,24 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
173 sys_##_prefix##_##_name##_show, \ 180 sys_##_prefix##_##_name##_show, \
174 sys_##_prefix##_##_name##_store); 181 sys_##_prefix##_##_name##_store);
175 182
183#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
184static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
185 char *page) \
186{ \
187 return sprintf(page, _fmt_out, _value); \
188} \
189static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
190 const char *buf, size_t len) \
191{ \
192 if (sscanf(buf, _fmt_in, _value) != 1) \
193 return -EINVAL; \
194 return len; \
195} \
196static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
197 __ATTR(_name,(S_IRUGO | S_IWUSR), \
198 sys_##_prefix##_##_name##_show, \
199 sys_##_prefix##_##_name##_store);
200
176static void make_attrs_ro(struct attribute **attrs) 201static void make_attrs_ro(struct attribute **attrs)
177{ 202{
178 while (*attrs) { 203 while (*attrs) {
@@ -189,6 +214,8 @@ static enum ipl_type ipl_get_type(void)
189{ 214{
190 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 215 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
191 216
217 if (ipl_flags & IPL_NSS_VALID)
218 return IPL_TYPE_NSS;
192 if (!(ipl_flags & IPL_DEVNO_VALID)) 219 if (!(ipl_flags & IPL_DEVNO_VALID))
193 return IPL_TYPE_UNKNOWN; 220 return IPL_TYPE_UNKNOWN;
194 if (!(ipl_flags & IPL_PARMBLOCK_VALID)) 221 if (!(ipl_flags & IPL_PARMBLOCK_VALID))
@@ -324,6 +351,20 @@ static struct attribute_group ipl_ccw_attr_group = {
324 .attrs = ipl_ccw_attrs, 351 .attrs = ipl_ccw_attrs,
325}; 352};
326 353
354/* NSS ipl device attributes */
355
356DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
357
358static struct attribute *ipl_nss_attrs[] = {
359 &sys_ipl_type_attr.attr,
360 &sys_ipl_nss_name_attr.attr,
361 NULL,
362};
363
364static struct attribute_group ipl_nss_attr_group = {
365 .attrs = ipl_nss_attrs,
366};
367
327/* UNKNOWN ipl device attributes */ 368/* UNKNOWN ipl device attributes */
328 369
329static struct attribute *ipl_unknown_attrs[] = { 370static struct attribute *ipl_unknown_attrs[] = {
@@ -432,6 +473,21 @@ static struct attribute_group reipl_ccw_attr_group = {
432 .attrs = reipl_ccw_attrs, 473 .attrs = reipl_ccw_attrs,
433}; 474};
434 475
476
477/* NSS reipl device attributes */
478
479DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name);
480
481static struct attribute *reipl_nss_attrs[] = {
482 &sys_reipl_nss_name_attr.attr,
483 NULL,
484};
485
486static struct attribute_group reipl_nss_attr_group = {
487 .name = IPL_NSS_STR,
488 .attrs = reipl_nss_attrs,
489};
490
435/* reipl type */ 491/* reipl type */
436 492
437static int reipl_set_type(enum ipl_type type) 493static int reipl_set_type(enum ipl_type type)
@@ -454,6 +510,9 @@ static int reipl_set_type(enum ipl_type type)
454 else 510 else
455 reipl_method = IPL_METHOD_FCP_RO_DIAG; 511 reipl_method = IPL_METHOD_FCP_RO_DIAG;
456 break; 512 break;
513 case IPL_TYPE_NSS:
514 reipl_method = IPL_METHOD_NSS;
515 break;
457 default: 516 default:
458 reipl_method = IPL_METHOD_NONE; 517 reipl_method = IPL_METHOD_NONE;
459 } 518 }
@@ -475,6 +534,8 @@ static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
475 rc = reipl_set_type(IPL_TYPE_CCW); 534 rc = reipl_set_type(IPL_TYPE_CCW);
476 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) 535 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
477 rc = reipl_set_type(IPL_TYPE_FCP); 536 rc = reipl_set_type(IPL_TYPE_FCP);
537 else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
538 rc = reipl_set_type(IPL_TYPE_NSS);
478 return (rc != 0) ? rc : len; 539 return (rc != 0) ? rc : len;
479} 540}
480 541
@@ -647,6 +708,10 @@ void do_reipl(void)
647 case IPL_METHOD_FCP_RO_VM: 708 case IPL_METHOD_FCP_RO_VM:
648 __cpcmd("IPL", NULL, 0, NULL); 709 __cpcmd("IPL", NULL, 0, NULL);
649 break; 710 break;
711 case IPL_METHOD_NSS:
712 sprintf(buf, "IPL %s", reipl_nss_name);
713 __cpcmd(buf, NULL, 0, NULL);
714 break;
650 case IPL_METHOD_NONE: 715 case IPL_METHOD_NONE:
651 default: 716 default:
652 if (MACHINE_IS_VM) 717 if (MACHINE_IS_VM)
@@ -733,6 +798,10 @@ static int __init ipl_init(void)
733 case IPL_TYPE_FCP: 798 case IPL_TYPE_FCP:
734 rc = ipl_register_fcp_files(); 799 rc = ipl_register_fcp_files();
735 break; 800 break;
801 case IPL_TYPE_NSS:
802 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
803 &ipl_nss_attr_group);
804 break;
736 default: 805 default:
737 rc = sysfs_create_group(&ipl_subsys.kset.kobj, 806 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
738 &ipl_unknown_attr_group); 807 &ipl_unknown_attr_group);
@@ -755,6 +824,20 @@ static void __init reipl_probe(void)
755 free_page((unsigned long)buffer); 824 free_page((unsigned long)buffer);
756} 825}
757 826
827static int __init reipl_nss_init(void)
828{
829 int rc;
830
831 if (!MACHINE_IS_VM)
832 return 0;
833 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_nss_attr_group);
834 if (rc)
835 return rc;
836 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
837 reipl_capabilities |= IPL_TYPE_NSS;
838 return 0;
839}
840
758static int __init reipl_ccw_init(void) 841static int __init reipl_ccw_init(void)
759{ 842{
760 int rc; 843 int rc;
@@ -837,6 +920,9 @@ static int __init reipl_init(void)
837 rc = reipl_fcp_init(); 920 rc = reipl_fcp_init();
838 if (rc) 921 if (rc)
839 return rc; 922 return rc;
923 rc = reipl_nss_init();
924 if (rc)
925 return rc;
840 rc = reipl_set_type(ipl_get_type()); 926 rc = reipl_set_type(ipl_get_type());
841 if (rc) 927 if (rc)
842 return rc; 928 return rc;
@@ -993,8 +1079,6 @@ static void do_reset_calls(void)
993 reset->fn(); 1079 reset->fn();
994} 1080}
995 1081
996extern void reset_mcck_handler(void);
997extern void reset_pgm_handler(void);
998extern __u32 dump_prefix_page; 1082extern __u32 dump_prefix_page;
999 1083
1000void s390_reset_system(void) 1084void s390_reset_system(void)
@@ -1016,14 +1100,14 @@ void s390_reset_system(void)
1016 __ctl_clear_bit(0,28); 1100 __ctl_clear_bit(0,28);
1017 1101
1018 /* Set new machine check handler */ 1102 /* Set new machine check handler */
1019 S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1103 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1020 S390_lowcore.mcck_new_psw.addr = 1104 S390_lowcore.mcck_new_psw.addr =
1021 PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; 1105 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
1022 1106
1023 /* Set new program check handler */ 1107 /* Set new program check handler */
1024 S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1108 S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1025 S390_lowcore.program_new_psw.addr = 1109 S390_lowcore.program_new_psw.addr =
1026 PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler; 1110 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
1027 1111
1028 do_reset_calls(); 1112 do_reset_calls();
1029} 1113}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1eef50918615..8f0cbca31203 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/irq.c 2 * arch/s390/kernel/irq.c
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 2004,2007
5 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Thomas Spatzier (tspat@de.ibm.com)
7 * 7 *
8 * This file contains interrupt related functions. 8 * This file contains interrupt related functions.
9 */ 9 */
@@ -14,6 +14,8 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/proc_fs.h>
18#include <linux/profile.h>
17 19
18/* 20/*
19 * show_interrupts is needed by /proc/interrupts. 21 * show_interrupts is needed by /proc/interrupts.
@@ -93,5 +95,12 @@ asmlinkage void do_softirq(void)
93 95
94 local_irq_restore(flags); 96 local_irq_restore(flags);
95} 97}
96
97EXPORT_SYMBOL(do_softirq); 98EXPORT_SYMBOL(do_softirq);
99
100void init_irq_proc(void)
101{
102 struct proc_dir_entry *root_irq_dir;
103
104 root_irq_dir = proc_mkdir("irq", NULL);
105 create_prof_cpu_mask(root_irq_dir);
106}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 576368c4f605..a466bab6677e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
155static int __kprobes swap_instruction(void *aref) 155static int __kprobes swap_instruction(void *aref)
156{ 156{
157 struct ins_replace_args *args = aref; 157 struct ins_replace_args *args = aref;
158 u32 *addr;
159 u32 instr;
158 int err = -EFAULT; 160 int err = -EFAULT;
159 161
162 /*
163 * Text segment is read-only, hence we use stura to bypass dynamic
164 * address translation to exchange the instruction. Since stura
165 * always operates on four bytes, but we only want to exchange two
166 * bytes do some calculations to get things right. In addition we
167 * shall not cross any page boundaries (vmalloc area!) when writing
168 * the new instruction.
169 */
170 addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
171 if ((unsigned long)args->ptr & 2)
172 instr = ((*addr) & 0xffff0000) | args->new;
173 else
174 instr = ((*addr) & 0x0000ffff) | args->new << 16;
175
160 asm volatile( 176 asm volatile(
161 "0: mvc 0(2,%2),0(%3)\n" 177 " lra %1,0(%1)\n"
162 "1: la %0,0\n" 178 "0: stura %2,%1\n"
179 "1: la %0,0\n"
163 "2:\n" 180 "2:\n"
164 EX_TABLE(0b,2b) 181 EX_TABLE(0b,2b)
165 : "+d" (err), "=m" (*args->ptr) 182 : "+d" (err)
166 : "a" (args->ptr), "a" (&args->new), "m" (args->new)); 183 : "a" (addr), "d" (instr)
184 : "memory", "cc");
185
167 return err; 186 return err;
168} 187}
169 188
@@ -356,7 +375,7 @@ no_kprobe:
356 * - When the probed function returns, this probe 375 * - When the probed function returns, this probe
357 * causes the handlers to fire 376 * causes the handlers to fire
358 */ 377 */
359void __kprobes kretprobe_trampoline_holder(void) 378void kretprobe_trampoline_holder(void)
360{ 379{
361 asm volatile(".global kretprobe_trampoline\n" 380 asm volatile(".global kretprobe_trampoline\n"
362 "kretprobe_trampoline: bcr 0,0\n"); 381 "kretprobe_trampoline: bcr 0,0\n");
@@ -365,7 +384,8 @@ void __kprobes kretprobe_trampoline_holder(void)
365/* 384/*
366 * Called when the probe at kretprobe trampoline is hit 385 * Called when the probe at kretprobe trampoline is hit
367 */ 386 */
368int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 387static int __kprobes trampoline_probe_handler(struct kprobe *p,
388 struct pt_regs *regs)
369{ 389{
370 struct kretprobe_instance *ri = NULL; 390 struct kretprobe_instance *ri = NULL;
371 struct hlist_head *head, empty_rp; 391 struct hlist_head *head, empty_rp;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index f6d9bcc0f75b..52f57af252b4 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/kexec.h> 12#include <linux/kexec.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/reboot.h>
14#include <asm/cio.h> 15#include <asm/cio.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index d989ed45a7aa..39d1dd752529 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -30,6 +30,7 @@
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/moduleloader.h>
33 34
34#if 0 35#if 0
35#define DEBUGP printk 36#define DEBUGP printk
@@ -58,7 +59,7 @@ void module_free(struct module *mod, void *module_region)
58 table entries. */ 59 table entries. */
59} 60}
60 61
61static inline void 62static void
62check_rela(Elf_Rela *rela, struct module *me) 63check_rela(Elf_Rela *rela, struct module *me)
63{ 64{
64 struct mod_arch_syminfo *info; 65 struct mod_arch_syminfo *info;
@@ -181,7 +182,7 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
181 return -ENOEXEC; 182 return -ENOEXEC;
182} 183}
183 184
184static inline int 185static int
185apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 186apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
186 struct module *me) 187 struct module *me)
187{ 188{
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6603fbb41d07..5acfac654f9d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -144,7 +144,7 @@ static void default_idle(void)
144 144
145 trace_hardirqs_on(); 145 trace_hardirqs_on();
146 /* Wait for external, I/O or machine check interrupt. */ 146 /* Wait for external, I/O or machine check interrupt. */
147 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | 147 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
148 PSW_MASK_IO | PSW_MASK_EXT); 148 PSW_MASK_IO | PSW_MASK_EXT);
149} 149}
150 150
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
190 struct pt_regs regs; 190 struct pt_regs regs;
191 191
192 memset(&regs, 0, sizeof(regs)); 192 memset(&regs, 0, sizeof(regs));
193 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 193 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
195 regs.gprs[9] = (unsigned long) fn; 195 regs.gprs[9] = (unsigned long) fn;
196 regs.gprs[10] = (unsigned long) arg; 196 regs.gprs[10] = (unsigned long) arg;
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
deleted file mode 100644
index b81aa1f569ca..000000000000
--- a/arch/s390/kernel/profile.c
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * arch/s390/kernel/profile.c
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
6 *
7 */
8#include <linux/proc_fs.h>
9#include <linux/profile.h>
10
11static struct proc_dir_entry * root_irq_dir;
12
13void init_irq_proc(void)
14{
15 /* create /proc/irq */
16 root_irq_dir = proc_mkdir("irq", NULL);
17
18 /* create /proc/irq/prof_cpu_mask */
19 create_prof_cpu_mask(root_irq_dir);
20}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8f36504075ed..2a8f0872ea8b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,15 +86,13 @@ FixPerRegisters(struct task_struct *task)
86 per_info->control_regs.bits.storage_alt_space_ctl = 0; 86 per_info->control_regs.bits.storage_alt_space_ctl = 0;
87} 87}
88 88
89void 89static void set_single_step(struct task_struct *task)
90set_single_step(struct task_struct *task)
91{ 90{
92 task->thread.per_info.single_step = 1; 91 task->thread.per_info.single_step = 1;
93 FixPerRegisters(task); 92 FixPerRegisters(task);
94} 93}
95 94
96void 95static void clear_single_step(struct task_struct *task)
97clear_single_step(struct task_struct *task)
98{ 96{
99 task->thread.per_info.single_step = 0; 97 task->thread.per_info.single_step = 0;
100 FixPerRegisters(task); 98 FixPerRegisters(task);
@@ -232,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
232 */ 230 */
233 if (addr == (addr_t) &dummy->regs.psw.mask && 231 if (addr == (addr_t) &dummy->regs.psw.mask &&
234#ifdef CONFIG_COMPAT 232#ifdef CONFIG_COMPAT
235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && 233 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
236#endif 234#endif
237 data != PSW_MASK_MERGE(PSW_USER_BITS, data)) 235 data != PSW_MASK_MERGE(psw_user_bits, data))
238 /* Invalid psw mask. */ 236 /* Invalid psw mask. */
239 return -EINVAL; 237 return -EINVAL;
240#ifndef CONFIG_64BIT 238#ifndef CONFIG_64BIT
@@ -309,7 +307,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
309 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 307 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
310 if (copied != sizeof(tmp)) 308 if (copied != sizeof(tmp))
311 return -EIO; 309 return -EIO;
312 return put_user(tmp, (unsigned long __user *) data); 310 return put_user(tmp, (unsigned long __force __user *) data);
313 311
314 case PTRACE_PEEKUSR: 312 case PTRACE_PEEKUSR:
315 /* read the word at location addr in the USER area. */ 313 /* read the word at location addr in the USER area. */
@@ -331,7 +329,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
331 329
332 case PTRACE_PEEKUSR_AREA: 330 case PTRACE_PEEKUSR_AREA:
333 case PTRACE_POKEUSR_AREA: 331 case PTRACE_POKEUSR_AREA:
334 if (copy_from_user(&parea, (void __user *) addr, 332 if (copy_from_user(&parea, (void __force __user *) addr,
335 sizeof(parea))) 333 sizeof(parea)))
336 return -EFAULT; 334 return -EFAULT;
337 addr = parea.kernel_addr; 335 addr = parea.kernel_addr;
@@ -341,10 +339,11 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
341 if (request == PTRACE_PEEKUSR_AREA) 339 if (request == PTRACE_PEEKUSR_AREA)
342 ret = peek_user(child, addr, data); 340 ret = peek_user(child, addr, data);
343 else { 341 else {
344 addr_t tmp; 342 addr_t utmp;
345 if (get_user (tmp, (addr_t __user *) data)) 343 if (get_user(utmp,
344 (addr_t __force __user *) data))
346 return -EFAULT; 345 return -EFAULT;
347 ret = poke_user(child, addr, tmp); 346 ret = poke_user(child, addr, utmp);
348 } 347 }
349 if (ret) 348 if (ret)
350 return ret; 349 return ret;
@@ -394,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
394 if (addr == (addr_t) &dummy32->regs.psw.mask) { 393 if (addr == (addr_t) &dummy32->regs.psw.mask) {
395 /* Fake a 31 bit psw mask. */ 394 /* Fake a 31 bit psw mask. */
396 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); 395 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); 396 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 397 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
399 /* Fake a 31 bit psw address. */ 398 /* Fake a 31 bit psw address. */
400 tmp = (__u32) task_pt_regs(child)->psw.addr | 399 tmp = (__u32) task_pt_regs(child)->psw.addr |
@@ -469,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
469 */ 468 */
470 if (addr == (addr_t) &dummy32->regs.psw.mask) { 469 if (addr == (addr_t) &dummy32->regs.psw.mask) {
471 /* Build a 64 bit psw mask from 31 bit mask. */ 470 /* Build a 64 bit psw mask from 31 bit mask. */
472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) 471 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
473 /* Invalid psw mask. */ 472 /* Invalid psw mask. */
474 return -EINVAL; 473 return -EINVAL;
475 task_pt_regs(child)->psw.mask = 474 task_pt_regs(child)->psw.mask =
476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); 475 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 476 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
478 /* Build a 64 bit psw address from 31 bit address. */ 477 /* Build a 64 bit psw address from 31 bit address. */
479 task_pt_regs(child)->psw.addr = 478 task_pt_regs(child)->psw.addr =
@@ -550,7 +549,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
550 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 549 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
551 if (copied != sizeof(tmp)) 550 if (copied != sizeof(tmp))
552 return -EIO; 551 return -EIO;
553 return put_user(tmp, (unsigned int __user *) data); 552 return put_user(tmp, (unsigned int __force __user *) data);
554 553
555 case PTRACE_PEEKUSR: 554 case PTRACE_PEEKUSR:
556 /* read the word at location addr in the USER area. */ 555 /* read the word at location addr in the USER area. */
@@ -571,7 +570,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
571 570
572 case PTRACE_PEEKUSR_AREA: 571 case PTRACE_PEEKUSR_AREA:
573 case PTRACE_POKEUSR_AREA: 572 case PTRACE_POKEUSR_AREA:
574 if (copy_from_user(&parea, (void __user *) addr, 573 if (copy_from_user(&parea, (void __force __user *) addr,
575 sizeof(parea))) 574 sizeof(parea)))
576 return -EFAULT; 575 return -EFAULT;
577 addr = parea.kernel_addr; 576 addr = parea.kernel_addr;
@@ -581,10 +580,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
581 if (request == PTRACE_PEEKUSR_AREA) 580 if (request == PTRACE_PEEKUSR_AREA)
582 ret = peek_user_emu31(child, addr, data); 581 ret = peek_user_emu31(child, addr, data);
583 else { 582 else {
584 __u32 tmp; 583 __u32 utmp;
585 if (get_user (tmp, (__u32 __user *) data)) 584 if (get_user(utmp,
585 (__u32 __force __user *) data))
586 return -EFAULT; 586 return -EFAULT;
587 ret = poke_user_emu31(child, addr, tmp); 587 ret = poke_user_emu31(child, addr, utmp);
588 } 588 }
589 if (ret) 589 if (ret)
590 return ret; 590 return ret;
@@ -595,17 +595,19 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
595 return 0; 595 return 0;
596 case PTRACE_GETEVENTMSG: 596 case PTRACE_GETEVENTMSG:
597 return put_user((__u32) child->ptrace_message, 597 return put_user((__u32) child->ptrace_message,
598 (unsigned int __user *) data); 598 (unsigned int __force __user *) data);
599 case PTRACE_GETSIGINFO: 599 case PTRACE_GETSIGINFO:
600 if (child->last_siginfo == NULL) 600 if (child->last_siginfo == NULL)
601 return -EINVAL; 601 return -EINVAL;
602 return copy_siginfo_to_user32((compat_siginfo_t __user *) data, 602 return copy_siginfo_to_user32((compat_siginfo_t
603 __force __user *) data,
603 child->last_siginfo); 604 child->last_siginfo);
604 case PTRACE_SETSIGINFO: 605 case PTRACE_SETSIGINFO:
605 if (child->last_siginfo == NULL) 606 if (child->last_siginfo == NULL)
606 return -EINVAL; 607 return -EINVAL;
607 return copy_siginfo_from_user32(child->last_siginfo, 608 return copy_siginfo_from_user32(child->last_siginfo,
608 (compat_siginfo_t __user *) data); 609 (compat_siginfo_t
610 __force __user *) data);
609 } 611 }
610 return ptrace_request(child, request, addr, data); 612 return ptrace_request(child, request, addr, data);
611} 613}
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S
deleted file mode 100644
index 8a87355161fa..000000000000
--- a/arch/s390/kernel/reset.S
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * arch/s390/kernel/reset.S
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl reset_mcck_handler
15reset_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 lg %r1,s390_reset_mcck_handler-0b(%r13)
20 ltgr %r1,%r1
21 jz 1f
22 basr %r14,%r1
231: la %r1,4095
24 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
25 lpswe __LC_MCK_OLD_PSW
26
27 .globl s390_reset_mcck_handler
28s390_reset_mcck_handler:
29 .quad 0
30
31 .globl reset_pgm_handler
32reset_pgm_handler:
33 stmg %r0,%r15,__LC_SAVE_AREA
34 basr %r13,0
350: lg %r15,__LC_PANIC_STACK # load panic stack
36 aghi %r15,-STACK_FRAME_OVERHEAD
37 lg %r1,s390_reset_pgm_handler-0b(%r13)
38 ltgr %r1,%r1
39 jz 1f
40 basr %r14,%r1
41 lmg %r0,%r15,__LC_SAVE_AREA
42 lpswe __LC_PGM_OLD_PSW
431: lpswe disabled_wait_psw-0b(%r13)
44 .globl s390_reset_pgm_handler
45s390_reset_pgm_handler:
46 .quad 0
47 .align 8
48disabled_wait_psw:
49 .quad 0x0002000180000000,0x0000000000000000 + reset_pgm_handler
50
51#else /* CONFIG_64BIT */
52
53 .globl reset_mcck_handler
54reset_mcck_handler:
55 basr %r13,0
560: l %r15,__LC_PANIC_STACK # load panic stack
57 ahi %r15,-STACK_FRAME_OVERHEAD
58 l %r1,s390_reset_mcck_handler-0b(%r13)
59 ltr %r1,%r1
60 jz 1f
61 basr %r14,%r1
621: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
63 lpsw __LC_MCK_OLD_PSW
64
65 .globl s390_reset_mcck_handler
66s390_reset_mcck_handler:
67 .long 0
68
69 .globl reset_pgm_handler
70reset_pgm_handler:
71 stm %r0,%r15,__LC_SAVE_AREA
72 basr %r13,0
730: l %r15,__LC_PANIC_STACK # load panic stack
74 ahi %r15,-STACK_FRAME_OVERHEAD
75 l %r1,s390_reset_pgm_handler-0b(%r13)
76 ltr %r1,%r1
77 jz 1f
78 basr %r14,%r1
79 lm %r0,%r15,__LC_SAVE_AREA
80 lpsw __LC_PGM_OLD_PSW
81
821: lpsw disabled_wait_psw-0b(%r13)
83 .globl s390_reset_pgm_handler
84s390_reset_pgm_handler:
85 .long 0
86disabled_wait_psw:
87 .align 8
88 .long 0x000a0000,0x00000000 + reset_pgm_handler
89
90#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bc5beaa8f98e..acf93dba7727 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -125,14 +125,12 @@ void do_extint(struct pt_regs *regs, unsigned short code)
125 * Make sure that the i/o interrupt did not "overtake" 125 * Make sure that the i/o interrupt did not "overtake"
126 * the last HZ timer interrupt. 126 * the last HZ timer interrupt.
127 */ 127 */
128 account_ticks(); 128 account_ticks(S390_lowcore.int_clock);
129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
130 index = ext_hash(code); 130 index = ext_hash(code);
131 for (p = ext_int_hash[index]; p; p = p->next) { 131 for (p = ext_int_hash[index]; p; p = p->next) {
132 if (likely(p->code == code)) { 132 if (likely(p->code == code))
133 if (likely(p->handler)) 133 p->handler(code);
134 p->handler(code);
135 }
136 } 134 }
137 irq_exit(); 135 irq_exit();
138 set_irq_regs(old_regs); 136 set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5d8ee3baac14..03739813d3bf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -38,6 +38,8 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/notifier.h> 39#include <linux/notifier.h>
40#include <linux/pfn.h> 40#include <linux/pfn.h>
41#include <linux/ctype.h>
42#include <linux/reboot.h>
41 43
42#include <asm/uaccess.h> 44#include <asm/uaccess.h>
43#include <asm/system.h> 45#include <asm/system.h>
@@ -49,6 +51,14 @@
49#include <asm/page.h> 51#include <asm/page.h>
50#include <asm/ptrace.h> 52#include <asm/ptrace.h>
51#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/ebcdic.h>
55#include <asm/compat.h>
56
57long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
58 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
59long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
60 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
61 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
52 62
53/* 63/*
54 * User copy operations. 64 * User copy operations.
@@ -117,9 +127,9 @@ void __devinit cpu_init (void)
117 */ 127 */
118char vmhalt_cmd[128] = ""; 128char vmhalt_cmd[128] = "";
119char vmpoff_cmd[128] = ""; 129char vmpoff_cmd[128] = "";
120char vmpanic_cmd[128] = ""; 130static char vmpanic_cmd[128] = "";
121 131
122static inline void strncpy_skip_quote(char *dst, char *src, int n) 132static void strncpy_skip_quote(char *dst, char *src, int n)
123{ 133{
124 int sx, dx; 134 int sx, dx;
125 135
@@ -275,10 +285,6 @@ static void __init conmode_default(void)
275} 285}
276 286
277#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
278extern void machine_restart_smp(char *);
279extern void machine_halt_smp(void);
280extern void machine_power_off_smp(void);
281
282void (*_machine_restart)(char *command) = machine_restart_smp; 288void (*_machine_restart)(char *command) = machine_restart_smp;
283void (*_machine_halt)(void) = machine_halt_smp; 289void (*_machine_halt)(void) = machine_halt_smp;
284void (*_machine_power_off)(void) = machine_power_off_smp; 290void (*_machine_power_off)(void) = machine_power_off_smp;
@@ -386,6 +392,84 @@ static int __init early_parse_ipldelay(char *p)
386} 392}
387early_param("ipldelay", early_parse_ipldelay); 393early_param("ipldelay", early_parse_ipldelay);
388 394
395#ifdef CONFIG_S390_SWITCH_AMODE
396unsigned int switch_amode = 0;
397EXPORT_SYMBOL_GPL(switch_amode);
398
399static void set_amode_and_uaccess(unsigned long user_amode,
400 unsigned long user32_amode)
401{
402 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
403 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
404 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
405#ifdef CONFIG_COMPAT
406 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
407 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
408 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
409 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
410 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
411 PSW32_MASK_PSTATE;
412#endif
413 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
414 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
415
416 if (MACHINE_HAS_MVCOS) {
417 printk("mvcos available.\n");
418 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
419 } else {
420 printk("mvcos not available.\n");
421 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
422 }
423}
424
425/*
426 * Switch kernel/user addressing modes?
427 */
428static int __init early_parse_switch_amode(char *p)
429{
430 switch_amode = 1;
431 return 0;
432}
433early_param("switch_amode", early_parse_switch_amode);
434
435#else /* CONFIG_S390_SWITCH_AMODE */
436static inline void set_amode_and_uaccess(unsigned long user_amode,
437 unsigned long user32_amode)
438{
439}
440#endif /* CONFIG_S390_SWITCH_AMODE */
441
442#ifdef CONFIG_S390_EXEC_PROTECT
443unsigned int s390_noexec = 0;
444EXPORT_SYMBOL_GPL(s390_noexec);
445
446/*
447 * Enable execute protection?
448 */
449static int __init early_parse_noexec(char *p)
450{
451 if (!strncmp(p, "off", 3))
452 return 0;
453 switch_amode = 1;
454 s390_noexec = 1;
455 return 0;
456}
457early_param("noexec", early_parse_noexec);
458#endif /* CONFIG_S390_EXEC_PROTECT */
459
460static void setup_addressing_mode(void)
461{
462 if (s390_noexec) {
463 printk("S390 execute protection active, ");
464 set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
465 return;
466 }
467 if (switch_amode) {
468 printk("S390 address spaces switched, ");
469 set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
470 }
471}
472
389static void __init 473static void __init
390setup_lowcore(void) 474setup_lowcore(void)
391{ 475{
@@ -402,19 +486,21 @@ setup_lowcore(void)
402 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 486 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
403 lc->restart_psw.addr = 487 lc->restart_psw.addr =
404 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 488 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
405 lc->external_new_psw.mask = PSW_KERNEL_BITS; 489 if (switch_amode)
490 lc->restart_psw.mask |= PSW_ASC_HOME;
491 lc->external_new_psw.mask = psw_kernel_bits;
406 lc->external_new_psw.addr = 492 lc->external_new_psw.addr =
407 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 493 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
408 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 494 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
409 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 495 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
410 lc->program_new_psw.mask = PSW_KERNEL_BITS; 496 lc->program_new_psw.mask = psw_kernel_bits;
411 lc->program_new_psw.addr = 497 lc->program_new_psw.addr =
412 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 498 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
413 lc->mcck_new_psw.mask = 499 lc->mcck_new_psw.mask =
414 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; 500 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
415 lc->mcck_new_psw.addr = 501 lc->mcck_new_psw.addr =
416 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 502 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
417 lc->io_new_psw.mask = PSW_KERNEL_BITS; 503 lc->io_new_psw.mask = psw_kernel_bits;
418 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 504 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
419 lc->ipl_device = S390_lowcore.ipl_device; 505 lc->ipl_device = S390_lowcore.ipl_device;
420 lc->jiffy_timer = -1LL; 506 lc->jiffy_timer = -1LL;
@@ -439,7 +525,7 @@ setup_lowcore(void)
439static void __init 525static void __init
440setup_resources(void) 526setup_resources(void)
441{ 527{
442 struct resource *res; 528 struct resource *res, *sub_res;
443 int i; 529 int i;
444 530
445 code_resource.start = (unsigned long) &_text; 531 code_resource.start = (unsigned long) &_text;
@@ -464,8 +550,38 @@ setup_resources(void)
464 res->start = memory_chunk[i].addr; 550 res->start = memory_chunk[i].addr;
465 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; 551 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
466 request_resource(&iomem_resource, res); 552 request_resource(&iomem_resource, res);
467 request_resource(res, &code_resource); 553
468 request_resource(res, &data_resource); 554 if (code_resource.start >= res->start &&
555 code_resource.start <= res->end &&
556 code_resource.end > res->end) {
557 sub_res = alloc_bootmem_low(sizeof(struct resource));
558 memcpy(sub_res, &code_resource,
559 sizeof(struct resource));
560 sub_res->end = res->end;
561 code_resource.start = res->end + 1;
562 request_resource(res, sub_res);
563 }
564
565 if (code_resource.start >= res->start &&
566 code_resource.start <= res->end &&
567 code_resource.end <= res->end)
568 request_resource(res, &code_resource);
569
570 if (data_resource.start >= res->start &&
571 data_resource.start <= res->end &&
572 data_resource.end > res->end) {
573 sub_res = alloc_bootmem_low(sizeof(struct resource));
574 memcpy(sub_res, &data_resource,
575 sizeof(struct resource));
576 sub_res->end = res->end;
577 data_resource.start = res->end + 1;
578 request_resource(res, sub_res);
579 }
580
581 if (data_resource.start >= res->start &&
582 data_resource.start <= res->end &&
583 data_resource.end <= res->end)
584 request_resource(res, &data_resource);
469 } 585 }
470} 586}
471 587
@@ -495,16 +611,13 @@ static void __init setup_memory_end(void)
495 } 611 }
496 if (!memory_end) 612 if (!memory_end)
497 memory_end = memory_size; 613 memory_end = memory_size;
498 if (real_size > memory_end)
499 printk("More memory detected than supported. Unused: %luk\n",
500 (real_size - memory_end) >> 10);
501} 614}
502 615
503static void __init 616static void __init
504setup_memory(void) 617setup_memory(void)
505{ 618{
506 unsigned long bootmap_size; 619 unsigned long bootmap_size;
507 unsigned long start_pfn, end_pfn, init_pfn; 620 unsigned long start_pfn, end_pfn;
508 int i; 621 int i;
509 622
510 /* 623 /*
@@ -514,10 +627,6 @@ setup_memory(void)
514 start_pfn = PFN_UP(__pa(&_end)); 627 start_pfn = PFN_UP(__pa(&_end));
515 end_pfn = max_pfn = PFN_DOWN(memory_end); 628 end_pfn = max_pfn = PFN_DOWN(memory_end);
516 629
517 /* Initialize storage key for kernel pages */
518 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
519 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
520
521#ifdef CONFIG_BLK_DEV_INITRD 630#ifdef CONFIG_BLK_DEV_INITRD
522 /* 631 /*
523 * Move the initrd in case the bitmap of the bootmem allocater 632 * Move the initrd in case the bitmap of the bootmem allocater
@@ -651,6 +760,7 @@ setup_arch(char **cmdline_p)
651 parse_early_param(); 760 parse_early_param();
652 761
653 setup_memory_end(); 762 setup_memory_end();
763 setup_addressing_mode();
654 setup_memory(); 764 setup_memory();
655 setup_resources(); 765 setup_resources();
656 setup_lowcore(); 766 setup_lowcore();
@@ -694,6 +804,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
694 struct cpuinfo_S390 *cpuinfo; 804 struct cpuinfo_S390 *cpuinfo;
695 unsigned long n = (unsigned long) v - 1; 805 unsigned long n = (unsigned long) v - 1;
696 806
807 s390_adjust_jiffies();
697 preempt_disable(); 808 preempt_disable();
698 if (!n) { 809 if (!n) {
699 seq_printf(m, "vendor_id : IBM/S390\n" 810 seq_printf(m, "vendor_id : IBM/S390\n"
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4c8a7954ef48..554f9cf7499c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
119 119
120 /* Copy a 'clean' PSW mask to the user to avoid leaking 120 /* Copy a 'clean' PSW mask to the user to avoid leaking
121 information about whether PER is currently on. */ 121 information about whether PER is currently on. */
122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); 122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
123 user_sregs.regs.psw.addr = regs->psw.addr; 123 user_sregs.regs.psw.addr = regs->psw.addr;
124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
125 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 125 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c0cd255fddbd..65b52320d145 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -22,23 +22,23 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/init.h> 24#include <linux/init.h>
25
26#include <linux/mm.h> 25#include <linux/mm.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
28#include <linux/kernel_stat.h> 27#include <linux/kernel_stat.h>
29#include <linux/smp_lock.h> 28#include <linux/smp_lock.h>
30
31#include <linux/delay.h> 29#include <linux/delay.h>
32#include <linux/cache.h> 30#include <linux/cache.h>
33#include <linux/interrupt.h> 31#include <linux/interrupt.h>
34#include <linux/cpu.h> 32#include <linux/cpu.h>
35 33#include <linux/timex.h>
34#include <asm/setup.h>
36#include <asm/sigp.h> 35#include <asm/sigp.h>
37#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
38#include <asm/irq.h> 37#include <asm/irq.h>
39#include <asm/s390_ext.h> 38#include <asm/s390_ext.h>
40#include <asm/cpcmd.h> 39#include <asm/cpcmd.h>
41#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
41#include <asm/timer.h>
42 42
43extern volatile int __cpu_logical_map[]; 43extern volatile int __cpu_logical_map[];
44 44
@@ -53,12 +53,6 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
53 53
54static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
55 55
56/*
57 * Reboot, halt and power_off routines for SMP.
58 */
59extern char vmhalt_cmd[];
60extern char vmpoff_cmd[];
61
62static void smp_ext_bitcall(int, ec_bit_sig); 56static void smp_ext_bitcall(int, ec_bit_sig);
63static void smp_ext_bitcall_others(ec_bit_sig); 57static void smp_ext_bitcall_others(ec_bit_sig);
64 58
@@ -200,7 +194,7 @@ int smp_call_function_on(void (*func) (void *info), void *info,
200} 194}
201EXPORT_SYMBOL(smp_call_function_on); 195EXPORT_SYMBOL(smp_call_function_on);
202 196
203static inline void do_send_stop(void) 197static void do_send_stop(void)
204{ 198{
205 int cpu, rc; 199 int cpu, rc;
206 200
@@ -214,7 +208,7 @@ static inline void do_send_stop(void)
214 } 208 }
215} 209}
216 210
217static inline void do_store_status(void) 211static void do_store_status(void)
218{ 212{
219 int cpu, rc; 213 int cpu, rc;
220 214
@@ -230,7 +224,7 @@ static inline void do_store_status(void)
230 } 224 }
231} 225}
232 226
233static inline void do_wait_for_stop(void) 227static void do_wait_for_stop(void)
234{ 228{
235 int cpu; 229 int cpu;
236 230
@@ -250,7 +244,7 @@ static inline void do_wait_for_stop(void)
250void smp_send_stop(void) 244void smp_send_stop(void)
251{ 245{
252 /* Disable all interrupts/machine checks */ 246 /* Disable all interrupts/machine checks */
253 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 247 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
254 248
255 /* write magic number to zero page (absolute 0) */ 249 /* write magic number to zero page (absolute 0) */
256 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 250 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
@@ -298,7 +292,7 @@ void machine_power_off_smp(void)
298 * cpus are handled. 292 * cpus are handled.
299 */ 293 */
300 294
301void do_ext_call_interrupt(__u16 code) 295static void do_ext_call_interrupt(__u16 code)
302{ 296{
303 unsigned long bits; 297 unsigned long bits;
304 298
@@ -385,7 +379,7 @@ struct ec_creg_mask_parms {
385/* 379/*
386 * callback for setting/clearing control bits 380 * callback for setting/clearing control bits
387 */ 381 */
388void smp_ctl_bit_callback(void *info) { 382static void smp_ctl_bit_callback(void *info) {
389 struct ec_creg_mask_parms *pp = info; 383 struct ec_creg_mask_parms *pp = info;
390 unsigned long cregs[16]; 384 unsigned long cregs[16];
391 int i; 385 int i;
@@ -458,17 +452,15 @@ __init smp_count_cpus(void)
458/* 452/*
459 * Activate a secondary processor. 453 * Activate a secondary processor.
460 */ 454 */
461extern void init_cpu_timer(void);
462extern void init_cpu_vtimer(void);
463
464int __devinit start_secondary(void *cpuvoid) 455int __devinit start_secondary(void *cpuvoid)
465{ 456{
466 /* Setup the cpu */ 457 /* Setup the cpu */
467 cpu_init(); 458 cpu_init();
468 preempt_disable(); 459 preempt_disable();
469 /* init per CPU timer */ 460 /* Enable TOD clock interrupts on the secondary cpu. */
470 init_cpu_timer(); 461 init_cpu_timer();
471#ifdef CONFIG_VIRT_TIMER 462#ifdef CONFIG_VIRT_TIMER
463 /* Enable cpu timer interrupts on the secondary cpu. */
472 init_cpu_vtimer(); 464 init_cpu_vtimer();
473#endif 465#endif
474 /* Enable pfault pseudo page faults on this cpu. */ 466 /* Enable pfault pseudo page faults on this cpu. */
@@ -542,7 +534,7 @@ smp_put_cpu(int cpu)
542 spin_unlock_irqrestore(&smp_reserve_lock, flags); 534 spin_unlock_irqrestore(&smp_reserve_lock, flags);
543} 535}
544 536
545static inline int 537static int
546cpu_stopped(int cpu) 538cpu_stopped(int cpu)
547{ 539{
548 __u32 status; 540 __u32 status;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 0d14a4789bf2..2e5c65a1863e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,11 +11,11 @@
11#include <linux/stacktrace.h> 11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13 13
14static inline unsigned long save_context_stack(struct stack_trace *trace, 14static unsigned long save_context_stack(struct stack_trace *trace,
15 unsigned int *skip, 15 unsigned int *skip,
16 unsigned long sp, 16 unsigned long sp,
17 unsigned long low, 17 unsigned long low,
18 unsigned long high) 18 unsigned long high)
19{ 19{
20 struct stack_frame *sf; 20 struct stack_frame *sf;
21 struct pt_regs *regs; 21 struct pt_regs *regs;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 6cceed4df73e..3b91f27ab202 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -37,11 +37,15 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/irq_regs.h> 38#include <asm/irq_regs.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/etr.h>
40 41
41/* change this if you have some constant time drift */ 42/* change this if you have some constant time drift */
42#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 43#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
43#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 44#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
44 45
46/* The value of the TOD clock for 1.1.1970. */
47#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
48
45/* 49/*
46 * Create a small time difference between the timer interrupts 50 * Create a small time difference between the timer interrupts
47 * on the different cpus to avoid lock contention. 51 * on the different cpus to avoid lock contention.
@@ -51,6 +55,7 @@
51#define TICK_SIZE tick 55#define TICK_SIZE tick
52 56
53static ext_int_info_t ext_int_info_cc; 57static ext_int_info_t ext_int_info_cc;
58static ext_int_info_t ext_int_etr_cc;
54static u64 init_timer_cc; 59static u64 init_timer_cc;
55static u64 jiffies_timer_cc; 60static u64 jiffies_timer_cc;
56static u64 xtime_cc; 61static u64 xtime_cc;
@@ -89,29 +94,21 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
89#define s390_do_profile() do { ; } while(0) 94#define s390_do_profile() do { ; } while(0)
90#endif /* CONFIG_PROFILING */ 95#endif /* CONFIG_PROFILING */
91 96
92
93/* 97/*
94 * timer_interrupt() needs to keep up the real-time clock, 98 * Advance the per cpu tick counter up to the time given with the
95 * as well as call the "do_timer()" routine every clocktick 99 * "time" argument. The per cpu update consists of accounting
100 * the virtual cpu time, calling update_process_times and calling
101 * the profiling hook. If xtime is before time it is advanced as well.
96 */ 102 */
97void account_ticks(void) 103void account_ticks(u64 time)
98{ 104{
99 __u64 tmp;
100 __u32 ticks; 105 __u32 ticks;
106 __u64 tmp;
101 107
102 /* Calculate how many ticks have passed. */ 108 /* Calculate how many ticks have passed. */
103 if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { 109 if (time < S390_lowcore.jiffy_timer)
104 /*
105 * We have to program the clock comparator even if
106 * no tick has passed. That happens if e.g. an i/o
107 * interrupt wakes up an idle processor that has
108 * switched off its hz timer.
109 */
110 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
111 asm volatile ("SCKC %0" : : "m" (tmp));
112 return; 110 return;
113 } 111 tmp = time - S390_lowcore.jiffy_timer;
114 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
115 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ 112 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
116 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; 113 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
117 S390_lowcore.jiffy_timer += 114 S390_lowcore.jiffy_timer +=
@@ -124,10 +121,6 @@ void account_ticks(void)
124 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; 121 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
125 } 122 }
126 123
127 /* set clock comparator for next tick */
128 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
129 asm volatile ("SCKC %0" : : "m" (tmp));
130
131#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
132 /* 125 /*
133 * Do not rely on the boot cpu to do the calls to do_timer. 126 * Do not rely on the boot cpu to do the calls to do_timer.
@@ -173,7 +166,7 @@ int sysctl_hz_timer = 1;
173 * Stop the HZ tick on the current CPU. 166 * Stop the HZ tick on the current CPU.
174 * Only cpu_idle may call this function. 167 * Only cpu_idle may call this function.
175 */ 168 */
176static inline void stop_hz_timer(void) 169static void stop_hz_timer(void)
177{ 170{
178 unsigned long flags; 171 unsigned long flags;
179 unsigned long seq, next; 172 unsigned long seq, next;
@@ -210,20 +203,21 @@ static inline void stop_hz_timer(void)
210 if (timer >= jiffies_timer_cc) 203 if (timer >= jiffies_timer_cc)
211 todval = timer; 204 todval = timer;
212 } 205 }
213 asm volatile ("SCKC %0" : : "m" (todval)); 206 set_clock_comparator(todval);
214} 207}
215 208
216/* 209/*
217 * Start the HZ tick on the current CPU. 210 * Start the HZ tick on the current CPU.
218 * Only cpu_idle may call this function. 211 * Only cpu_idle may call this function.
219 */ 212 */
220static inline void start_hz_timer(void) 213static void start_hz_timer(void)
221{ 214{
222 BUG_ON(!in_interrupt()); 215 BUG_ON(!in_interrupt());
223 216
224 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 217 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
225 return; 218 return;
226 account_ticks(); 219 account_ticks(get_clock());
220 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
227 cpu_clear(smp_processor_id(), nohz_cpu_mask); 221 cpu_clear(smp_processor_id(), nohz_cpu_mask);
228} 222}
229 223
@@ -245,7 +239,7 @@ static struct notifier_block nohz_idle_nb = {
245 .notifier_call = nohz_idle_notify, 239 .notifier_call = nohz_idle_notify,
246}; 240};
247 241
248void __init nohz_init(void) 242static void __init nohz_init(void)
249{ 243{
250 if (register_idle_notifier(&nohz_idle_nb)) 244 if (register_idle_notifier(&nohz_idle_nb))
251 panic("Couldn't register idle notifier"); 245 panic("Couldn't register idle notifier");
@@ -254,24 +248,57 @@ void __init nohz_init(void)
254#endif 248#endif
255 249
256/* 250/*
257 * Start the clock comparator on the current CPU. 251 * Set up per cpu jiffy timer and set the clock comparator.
252 */
253static void setup_jiffy_timer(void)
254{
255 /* Set up clock comparator to next jiffy. */
256 S390_lowcore.jiffy_timer =
257 jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
258 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
259}
260
261/*
262 * Set up lowcore and control register of the current cpu to
263 * enable TOD clock and clock comparator interrupts.
258 */ 264 */
259void init_cpu_timer(void) 265void init_cpu_timer(void)
260{ 266{
261 unsigned long cr0; 267 setup_jiffy_timer();
262 __u64 timer;
263 268
264 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; 269 /* Enable clock comparator timer interrupt. */
265 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; 270 __ctl_set_bit(0,11);
266 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; 271
267 asm volatile ("SCKC %0" : : "m" (timer)); 272 /* Always allow ETR external interrupts, even without an ETR. */
268 /* allow clock comparator timer interrupt */ 273 __ctl_set_bit(0, 4);
269 __ctl_store(cr0, 0, 0);
270 cr0 |= 0x800;
271 __ctl_load(cr0, 0, 0);
272} 274}
273 275
274extern void vtime_init(void); 276static void clock_comparator_interrupt(__u16 code)
277{
278 /* set clock comparator for next tick */
279 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
280}
281
282static void etr_reset(void);
283static void etr_init(void);
284static void etr_ext_handler(__u16);
285
286/*
287 * Get the TOD clock running.
288 */
289static u64 __init reset_tod_clock(void)
290{
291 u64 time;
292
293 etr_reset();
294 if (store_clock(&time) == 0)
295 return time;
296 /* TOD clock not running. Set the clock to Unix Epoch. */
297 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
298 panic("TOD clock not operational.");
299
300 return TOD_UNIX_EPOCH;
301}
275 302
276static cycle_t read_tod_clock(void) 303static cycle_t read_tod_clock(void)
277{ 304{
@@ -295,48 +322,31 @@ static struct clocksource clocksource_tod = {
295 */ 322 */
296void __init time_init(void) 323void __init time_init(void)
297{ 324{
298 __u64 set_time_cc; 325 init_timer_cc = reset_tod_clock();
299 int cc; 326 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
300
301 /* kick the TOD clock */
302 asm volatile(
303 " stck 0(%2)\n"
304 " ipm %0\n"
305 " srl %0,28"
306 : "=d" (cc), "=m" (init_timer_cc)
307 : "a" (&init_timer_cc) : "cc");
308 switch (cc) {
309 case 0: /* clock in set state: all is fine */
310 break;
311 case 1: /* clock in non-set state: FIXME */
312 printk("time_init: TOD clock in non-set state\n");
313 break;
314 case 2: /* clock in error state: FIXME */
315 printk("time_init: TOD clock in error state\n");
316 break;
317 case 3: /* clock in stopped or not-operational state: FIXME */
318 printk("time_init: TOD clock stopped/non-operational\n");
319 break;
320 }
321 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; 327 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
322 328
323 /* set xtime */ 329 /* set xtime */
324 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; 330 tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime);
325 set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
326 (0x3c26700LL*1000000*4096);
327 tod_to_timeval(set_time_cc, &xtime);
328 set_normalized_timespec(&wall_to_monotonic, 331 set_normalized_timespec(&wall_to_monotonic,
329 -xtime.tv_sec, -xtime.tv_nsec); 332 -xtime.tv_sec, -xtime.tv_nsec);
330 333
331 /* request the clock comparator external interrupt */ 334 /* request the clock comparator external interrupt */
332 if (register_early_external_interrupt(0x1004, NULL, 335 if (register_early_external_interrupt(0x1004,
336 clock_comparator_interrupt,
333 &ext_int_info_cc) != 0) 337 &ext_int_info_cc) != 0)
334 panic("Couldn't request external interrupt 0x1004"); 338 panic("Couldn't request external interrupt 0x1004");
335 339
336 if (clocksource_register(&clocksource_tod) != 0) 340 if (clocksource_register(&clocksource_tod) != 0)
337 panic("Could not register TOD clock source"); 341 panic("Could not register TOD clock source");
338 342
339 init_cpu_timer(); 343 /* request the etr external interrupt */
344 if (register_early_external_interrupt(0x1406, etr_ext_handler,
345 &ext_int_etr_cc) != 0)
346 panic("Couldn't request external interrupt 0x1406");
347
348 /* Enable TOD clock interrupts on the boot cpu. */
349 init_cpu_timer();
340 350
341#ifdef CONFIG_NO_IDLE_HZ 351#ifdef CONFIG_NO_IDLE_HZ
342 nohz_init(); 352 nohz_init();
@@ -345,5 +355,1048 @@ void __init time_init(void)
345#ifdef CONFIG_VIRT_TIMER 355#ifdef CONFIG_VIRT_TIMER
346 vtime_init(); 356 vtime_init();
347#endif 357#endif
358 etr_init();
359}
360
361/*
362 * External Time Reference (ETR) code.
363 */
364static int etr_port0_online;
365static int etr_port1_online;
366
367static int __init early_parse_etr(char *p)
368{
369 if (strncmp(p, "off", 3) == 0)
370 etr_port0_online = etr_port1_online = 0;
371 else if (strncmp(p, "port0", 5) == 0)
372 etr_port0_online = 1;
373 else if (strncmp(p, "port1", 5) == 0)
374 etr_port1_online = 1;
375 else if (strncmp(p, "on", 2) == 0)
376 etr_port0_online = etr_port1_online = 1;
377 return 0;
378}
379early_param("etr", early_parse_etr);
380
381enum etr_event {
382 ETR_EVENT_PORT0_CHANGE,
383 ETR_EVENT_PORT1_CHANGE,
384 ETR_EVENT_PORT_ALERT,
385 ETR_EVENT_SYNC_CHECK,
386 ETR_EVENT_SWITCH_LOCAL,
387 ETR_EVENT_UPDATE,
388};
389
390enum etr_flags {
391 ETR_FLAG_ENOSYS,
392 ETR_FLAG_EACCES,
393 ETR_FLAG_STEAI,
394};
395
396/*
397 * Valid bit combinations of the eacr register are (x = don't care):
398 * e0 e1 dp p0 p1 ea es sl
399 * 0 0 x 0 0 0 0 0 initial, disabled state
400 * 0 0 x 0 1 1 0 0 port 1 online
401 * 0 0 x 1 0 1 0 0 port 0 online
402 * 0 0 x 1 1 1 0 0 both ports online
403 * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
404 * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
405 * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
406 * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
407 * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
408 * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
409 * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
410 * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
411 * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
412 * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
413 * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
414 * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
415 * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
416 * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
417 * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
418 * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
419 */
420static struct etr_eacr etr_eacr;
421static u64 etr_tolec; /* time of last eacr update */
422static unsigned long etr_flags;
423static struct etr_aib etr_port0;
424static int etr_port0_uptodate;
425static struct etr_aib etr_port1;
426static int etr_port1_uptodate;
427static unsigned long etr_events;
428static struct timer_list etr_timer;
429static struct tasklet_struct etr_tasklet;
430static DEFINE_PER_CPU(atomic_t, etr_sync_word);
431
432static void etr_timeout(unsigned long dummy);
433static void etr_tasklet_fn(unsigned long dummy);
434
435/*
436 * The etr get_clock function. It will write the current clock value
437 * to the clock pointer and return 0 if the clock is in sync with the
438 * external time source. If the clock mode is local it will return
439 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
440 * reference. This function is what ETR is all about..
441 */
442int get_sync_clock(unsigned long long *clock)
443{
444 atomic_t *sw_ptr;
445 unsigned int sw0, sw1;
446
447 sw_ptr = &get_cpu_var(etr_sync_word);
448 sw0 = atomic_read(sw_ptr);
449 *clock = get_clock();
450 sw1 = atomic_read(sw_ptr);
451 put_cpu_var(etr_sync_sync);
452 if (sw0 == sw1 && (sw0 & 0x80000000U))
453 /* Success: time is in sync. */
454 return 0;
455 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
456 return -ENOSYS;
457 if (test_bit(ETR_FLAG_EACCES, &etr_flags))
458 return -EACCES;
459 return -EAGAIN;
460}
461EXPORT_SYMBOL(get_sync_clock);
462
463/*
464 * Make get_sync_clock return -EAGAIN.
465 */
466static void etr_disable_sync_clock(void *dummy)
467{
468 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
469 /*
470 * Clear the in-sync bit 2^31. All get_sync_clock calls will
471 * fail until the sync bit is turned back on. In addition
472 * increase the "sequence" counter to avoid the race of an
473 * etr event and the complete recovery against get_sync_clock.
474 */
475 atomic_clear_mask(0x80000000, sw_ptr);
476 atomic_inc(sw_ptr);
477}
478
479/*
480 * Make get_sync_clock return 0 again.
481 * Needs to be called from a context disabled for preemption.
482 */
483static void etr_enable_sync_clock(void)
484{
485 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
486 atomic_set_mask(0x80000000, sw_ptr);
487}
488
489/*
490 * Reset ETR attachment.
491 */
492static void etr_reset(void)
493{
494 etr_eacr = (struct etr_eacr) {
495 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
496 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
497 .es = 0, .sl = 0 };
498 if (etr_setr(&etr_eacr) == 0)
499 etr_tolec = get_clock();
500 else {
501 set_bit(ETR_FLAG_ENOSYS, &etr_flags);
502 if (etr_port0_online || etr_port1_online) {
503 printk(KERN_WARNING "Running on non ETR capable "
504 "machine, only local mode available.\n");
505 etr_port0_online = etr_port1_online = 0;
506 }
507 }
508}
509
510static void etr_init(void)
511{
512 struct etr_aib aib;
513
514 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
515 return;
516 /* Check if this machine has the steai instruction. */
517 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
518 set_bit(ETR_FLAG_STEAI, &etr_flags);
519 setup_timer(&etr_timer, etr_timeout, 0UL);
520 tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
521 if (!etr_port0_online && !etr_port1_online)
522 set_bit(ETR_FLAG_EACCES, &etr_flags);
523 if (etr_port0_online) {
524 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
525 tasklet_hi_schedule(&etr_tasklet);
526 }
527 if (etr_port1_online) {
528 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
529 tasklet_hi_schedule(&etr_tasklet);
530 }
531}
532
533/*
534 * Two sorts of ETR machine checks. The architecture reads:
535 * "When a machine-check niterruption occurs and if a switch-to-local or
536 * ETR-sync-check interrupt request is pending but disabled, this pending
537 * disabled interruption request is indicated and is cleared".
538 * Which means that we can get etr_switch_to_local events from the machine
539 * check handler although the interruption condition is disabled. Lovely..
540 */
541
542/*
543 * Switch to local machine check. This is called when the last usable
544 * ETR port goes inactive. After switch to local the clock is not in sync.
545 */
546void etr_switch_to_local(void)
547{
548 if (!etr_eacr.sl)
549 return;
550 etr_disable_sync_clock(NULL);
551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
552 tasklet_hi_schedule(&etr_tasklet);
553}
554
555/*
556 * ETR sync check machine check. This is called when the ETR OTE and the
557 * local clock OTE are farther apart than the ETR sync check tolerance.
558 * After a ETR sync check the clock is not in sync. The machine check
559 * is broadcasted to all cpus at the same time.
560 */
561void etr_sync_check(void)
562{
563 if (!etr_eacr.es)
564 return;
565 etr_disable_sync_clock(NULL);
566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
567 tasklet_hi_schedule(&etr_tasklet);
568}
569
570/*
571 * ETR external interrupt. There are two causes:
572 * 1) port state change, check the usability of the port
573 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
574 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
575 * or ETR-data word 4 (edf4) has changed.
576 */
577static void etr_ext_handler(__u16 code)
578{
579 struct etr_interruption_parameter *intparm =
580 (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
581
582 if (intparm->pc0)
583 /* ETR port 0 state change. */
584 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
585 if (intparm->pc1)
586 /* ETR port 1 state change. */
587 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
588 if (intparm->eai)
589 /*
590 * ETR port alert on either port 0, 1 or both.
591 * Both ports are not up-to-date now.
592 */
593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
594 tasklet_hi_schedule(&etr_tasklet);
595}
596
597static void etr_timeout(unsigned long dummy)
598{
599 set_bit(ETR_EVENT_UPDATE, &etr_events);
600 tasklet_hi_schedule(&etr_tasklet);
601}
602
603/*
604 * Check if the etr mode is pss.
605 */
606static inline int etr_mode_is_pps(struct etr_eacr eacr)
607{
608 return eacr.es && !eacr.sl;
609}
610
611/*
612 * Check if the etr mode is etr.
613 */
614static inline int etr_mode_is_etr(struct etr_eacr eacr)
615{
616 return eacr.es && eacr.sl;
617}
618
619/*
620 * Check if the port can be used for TOD synchronization.
621 * For PPS mode the port has to receive OTEs. For ETR mode
622 * the port has to receive OTEs, the ETR stepping bit has to
623 * be zero and the validity bits for data frame 1, 2, and 3
624 * have to be 1.
625 */
626static int etr_port_valid(struct etr_aib *aib, int port)
627{
628 unsigned int psc;
629
630 /* Check that this port is receiving OTEs. */
631 if (aib->tsp == 0)
632 return 0;
633
634 psc = port ? aib->esw.psc1 : aib->esw.psc0;
635 if (psc == etr_lpsc_pps_mode)
636 return 1;
637 if (psc == etr_lpsc_operational_step)
638 return !aib->esw.y && aib->slsw.v1 &&
639 aib->slsw.v2 && aib->slsw.v3;
640 return 0;
641}
642
643/*
644 * Check if two ports are on the same network.
645 */
646static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
647{
648 // FIXME: any other fields we have to compare?
649 return aib1->edf1.net_id == aib2->edf1.net_id;
650}
651
652/*
653 * Wrapper for etr_stei that converts physical port states
654 * to logical port states to be consistent with the output
655 * of stetr (see etr_psc vs. etr_lpsc).
656 */
657static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
658{
659 BUG_ON(etr_steai(aib, func) != 0);
660 /* Convert port state to logical port state. */
661 if (aib->esw.psc0 == 1)
662 aib->esw.psc0 = 2;
663 else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
664 aib->esw.psc0 = 1;
665 if (aib->esw.psc1 == 1)
666 aib->esw.psc1 = 2;
667 else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
668 aib->esw.psc1 = 1;
669}
670
671/*
672 * Check if the aib a2 is still connected to the same attachment as
673 * aib a1, the etv values differ by one and a2 is valid.
674 */
675static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
676{
677 int state_a1, state_a2;
678
679 /* Paranoia check: e0/e1 should better be the same. */
680 if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
681 a1->esw.eacr.e1 != a2->esw.eacr.e1)
682 return 0;
683
684 /* Still connected to the same etr ? */
685 state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
686 state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
687 if (state_a1 == etr_lpsc_operational_step) {
688 if (state_a2 != etr_lpsc_operational_step ||
689 a1->edf1.net_id != a2->edf1.net_id ||
690 a1->edf1.etr_id != a2->edf1.etr_id ||
691 a1->edf1.etr_pn != a2->edf1.etr_pn)
692 return 0;
693 } else if (state_a2 != etr_lpsc_pps_mode)
694 return 0;
695
696 /* The ETV value of a2 needs to be ETV of a1 + 1. */
697 if (a1->edf2.etv + 1 != a2->edf2.etv)
698 return 0;
699
700 if (!etr_port_valid(a2, p))
701 return 0;
702
703 return 1;
704}
705
706/*
707 * The time is "clock". xtime is what we think the time is.
708 * Adjust the value by a multiple of jiffies and add the delta to ntp.
709 * "delay" is an approximation how long the synchronization took. If
710 * the time correction is positive, then "delay" is subtracted from
711 * the time difference and only the remaining part is passed to ntp.
712 */
713static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
714{
715 unsigned long long delta, ticks;
716 struct timex adjust;
717
718 /*
719 * We don't have to take the xtime lock because the cpu
720 * executing etr_adjust_time is running disabled in
721 * tasklet context and all other cpus are looping in
722 * etr_sync_cpu_start.
723 */
724 if (clock > xtime_cc) {
725 /* It is later than we thought. */
726 delta = ticks = clock - xtime_cc;
727 delta = ticks = (delta < delay) ? 0 : delta - delay;
728 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
729 init_timer_cc = init_timer_cc + delta;
730 jiffies_timer_cc = jiffies_timer_cc + delta;
731 xtime_cc = xtime_cc + delta;
732 adjust.offset = ticks * (1000000 / HZ);
733 } else {
734 /* It is earlier than we thought. */
735 delta = ticks = xtime_cc - clock;
736 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
737 init_timer_cc = init_timer_cc - delta;
738 jiffies_timer_cc = jiffies_timer_cc - delta;
739 xtime_cc = xtime_cc - delta;
740 adjust.offset = -ticks * (1000000 / HZ);
741 }
742 if (adjust.offset != 0) {
743 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
744 adjust.offset);
745 adjust.modes = ADJ_OFFSET_SINGLESHOT;
746 do_adjtimex(&adjust);
747 }
748}
749
750static void etr_sync_cpu_start(void *dummy)
751{
752 int *in_sync = dummy;
753
754 etr_enable_sync_clock();
755 /*
756 * This looks like a busy wait loop but it isn't. etr_sync_cpus
757 * is called on all other cpus while the TOD clocks is stopped.
758 * __udelay will stop the cpu on an enabled wait psw until the
759 * TOD is running again.
760 */
761 while (*in_sync == 0)
762 __udelay(1);
763 if (*in_sync != 1)
764 /* Didn't work. Clear per-cpu in sync bit again. */
765 etr_disable_sync_clock(NULL);
766 /*
767 * This round of TOD syncing is done. Set the clock comparator
768 * to the next tick and let the processor continue.
769 */
770 setup_jiffy_timer();
771}
772
773static void etr_sync_cpu_end(void *dummy)
774{
775}
776
777/*
778 * Sync the TOD clock using the port refered to by aibp. This port
779 * has to be enabled and the other port has to be disabled. The
780 * last eacr update has to be more than 1.6 seconds in the past.
781 */
782static int etr_sync_clock(struct etr_aib *aib, int port)
783{
784 struct etr_aib *sync_port;
785 unsigned long long clock, delay;
786 int in_sync, follows;
787 int rc;
788
789 /* Check if the current aib is adjacent to the sync port aib. */
790 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
791 follows = etr_aib_follows(sync_port, aib, port);
792 memcpy(sync_port, aib, sizeof(*aib));
793 if (!follows)
794 return -EAGAIN;
795
796 /*
797 * Catch all other cpus and make them wait until we have
798 * successfully synced the clock. smp_call_function will
799 * return after all other cpus are in etr_sync_cpu_start.
800 */
801 in_sync = 0;
802 preempt_disable();
803 smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
804 local_irq_disable();
805 etr_enable_sync_clock();
806
807 /* Set clock to next OTE. */
808 __ctl_set_bit(14, 21);
809 __ctl_set_bit(0, 29);
810 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
811 if (set_clock(clock) == 0) {
812 __udelay(1); /* Wait for the clock to start. */
813 __ctl_clear_bit(0, 29);
814 __ctl_clear_bit(14, 21);
815 etr_stetr(aib);
816 /* Adjust Linux timing variables. */
817 delay = (unsigned long long)
818 (aib->edf2.etv - sync_port->edf2.etv) << 32;
819 etr_adjust_time(clock, delay);
820 setup_jiffy_timer();
821 /* Verify that the clock is properly set. */
822 if (!etr_aib_follows(sync_port, aib, port)) {
823 /* Didn't work. */
824 etr_disable_sync_clock(NULL);
825 in_sync = -EAGAIN;
826 rc = -EAGAIN;
827 } else {
828 in_sync = 1;
829 rc = 0;
830 }
831 } else {
832 /* Could not set the clock ?!? */
833 __ctl_clear_bit(0, 29);
834 __ctl_clear_bit(14, 21);
835 etr_disable_sync_clock(NULL);
836 in_sync = -EAGAIN;
837 rc = -EAGAIN;
838 }
839 local_irq_enable();
840 smp_call_function(etr_sync_cpu_end,NULL,0,0);
841 preempt_enable();
842 return rc;
843}
844
845/*
846 * Handle the immediate effects of the different events.
847 * The port change event is used for online/offline changes.
848 */
849static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
850{
851 if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
852 eacr.es = 0;
853 if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
854 eacr.es = eacr.sl = 0;
855 if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
856 etr_port0_uptodate = etr_port1_uptodate = 0;
857
858 if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
859 if (eacr.e0)
860 /*
861 * Port change of an enabled port. We have to
862 * assume that this can have caused an stepping
863 * port switch.
864 */
865 etr_tolec = get_clock();
866 eacr.p0 = etr_port0_online;
867 if (!eacr.p0)
868 eacr.e0 = 0;
869 etr_port0_uptodate = 0;
870 }
871 if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
872 if (eacr.e1)
873 /*
874 * Port change of an enabled port. We have to
875 * assume that this can have caused an stepping
876 * port switch.
877 */
878 etr_tolec = get_clock();
879 eacr.p1 = etr_port1_online;
880 if (!eacr.p1)
881 eacr.e1 = 0;
882 etr_port1_uptodate = 0;
883 }
884 clear_bit(ETR_EVENT_UPDATE, &etr_events);
885 return eacr;
886}
887
888/*
889 * Set up a timer that expires after the etr_tolec + 1.6 seconds if
890 * one of the ports needs an update.
891 */
892static void etr_set_tolec_timeout(unsigned long long now)
893{
894 unsigned long micros;
895
896 if ((!etr_eacr.p0 || etr_port0_uptodate) &&
897 (!etr_eacr.p1 || etr_port1_uptodate))
898 return;
899 micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
900 micros = (micros > 1600000) ? 0 : 1600000 - micros;
901 mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
902}
903
904/*
905 * Set up a time that expires after 1/2 second.
906 */
907static void etr_set_sync_timeout(void)
908{
909 mod_timer(&etr_timer, jiffies + HZ/2);
910}
911
912/*
913 * Update the aib information for one or both ports.
914 */
915static struct etr_eacr etr_handle_update(struct etr_aib *aib,
916 struct etr_eacr eacr)
917{
918 /* With both ports disabled the aib information is useless. */
919 if (!eacr.e0 && !eacr.e1)
920 return eacr;
921
922 /* Update port0 or port1 with aib stored in etr_tasklet_fn. */
923 if (aib->esw.q == 0) {
924 /* Information for port 0 stored. */
925 if (eacr.p0 && !etr_port0_uptodate) {
926 etr_port0 = *aib;
927 if (etr_port0_online)
928 etr_port0_uptodate = 1;
929 }
930 } else {
931 /* Information for port 1 stored. */
932 if (eacr.p1 && !etr_port1_uptodate) {
933 etr_port1 = *aib;
934 if (etr_port0_online)
935 etr_port1_uptodate = 1;
936 }
937 }
938
939 /*
940 * Do not try to get the alternate port aib if the clock
941 * is not in sync yet.
942 */
943 if (!eacr.es)
944 return eacr;
945
946 /*
947 * If steai is available we can get the information about
948 * the other port immediately. If only stetr is available the
949 * data-port bit toggle has to be used.
950 */
951 if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
952 if (eacr.p0 && !etr_port0_uptodate) {
953 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
954 etr_port0_uptodate = 1;
955 }
956 if (eacr.p1 && !etr_port1_uptodate) {
957 etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
958 etr_port1_uptodate = 1;
959 }
960 } else {
961 /*
962 * One port was updated above, if the other
963 * port is not uptodate toggle dp bit.
964 */
965 if ((eacr.p0 && !etr_port0_uptodate) ||
966 (eacr.p1 && !etr_port1_uptodate))
967 eacr.dp ^= 1;
968 else
969 eacr.dp = 0;
970 }
971 return eacr;
972}
973
974/*
975 * Write new etr control register if it differs from the current one.
976 * Return 1 if etr_tolec has been updated as well.
977 */
978static void etr_update_eacr(struct etr_eacr eacr)
979{
980 int dp_changed;
981
982 if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
983 /* No change, return. */
984 return;
985 /*
986 * The disable of an active port of the change of the data port
987 * bit can/will cause a change in the data port.
988 */
989 dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
990 (etr_eacr.dp ^ eacr.dp) != 0;
991 etr_eacr = eacr;
992 etr_setr(&etr_eacr);
993 if (dp_changed)
994 etr_tolec = get_clock();
995}
996
997/*
998 * ETR tasklet. In this function you'll find the main logic. In
999 * particular this is the only function that calls etr_update_eacr(),
1000 * it "controls" the etr control register.
1001 */
1002static void etr_tasklet_fn(unsigned long dummy)
1003{
1004 unsigned long long now;
1005 struct etr_eacr eacr;
1006 struct etr_aib aib;
1007 int sync_port;
1008
1009 /* Create working copy of etr_eacr. */
1010 eacr = etr_eacr;
1011
1012 /* Check for the different events and their immediate effects. */
1013 eacr = etr_handle_events(eacr);
1014
1015 /* Check if ETR is supposed to be active. */
1016 eacr.ea = eacr.p0 || eacr.p1;
1017 if (!eacr.ea) {
1018 /* Both ports offline. Reset everything. */
1019 eacr.dp = eacr.es = eacr.sl = 0;
1020 on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
1021 del_timer_sync(&etr_timer);
1022 etr_update_eacr(eacr);
1023 set_bit(ETR_FLAG_EACCES, &etr_flags);
1024 return;
1025 }
1026
1027 /* Store aib to get the current ETR status word. */
1028 BUG_ON(etr_stetr(&aib) != 0);
1029 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
1030 now = get_clock();
1031
1032 /*
1033 * Update the port information if the last stepping port change
1034 * or data port change is older than 1.6 seconds.
1035 */
1036 if (now >= etr_tolec + (1600000 << 12))
1037 eacr = etr_handle_update(&aib, eacr);
1038
1039 /*
1040 * Select ports to enable. The prefered synchronization mode is PPS.
1041 * If a port can be enabled depends on a number of things:
1042 * 1) The port needs to be online and uptodate. A port is not
1043 * disabled just because it is not uptodate, but it is only
1044 * enabled if it is uptodate.
1045 * 2) The port needs to have the same mode (pps / etr).
1046 * 3) The port needs to be usable -> etr_port_valid() == 1
1047 * 4) To enable the second port the clock needs to be in sync.
1048 * 5) If both ports are useable and are ETR ports, the network id
1049 * has to be the same.
1050 * The eacr.sl bit is used to indicate etr mode vs. pps mode.
1051 */
1052 if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
1053 eacr.sl = 0;
1054 eacr.e0 = 1;
1055 if (!etr_mode_is_pps(etr_eacr))
1056 eacr.es = 0;
1057 if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
1058 eacr.e1 = 0;
1059 // FIXME: uptodate checks ?
1060 else if (etr_port0_uptodate && etr_port1_uptodate)
1061 eacr.e1 = 1;
1062 sync_port = (etr_port0_uptodate &&
1063 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1064 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1065 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
1066 eacr.sl = 0;
1067 eacr.e0 = 0;
1068 eacr.e1 = 1;
1069 if (!etr_mode_is_pps(etr_eacr))
1070 eacr.es = 0;
1071 sync_port = (etr_port1_uptodate &&
1072 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1073 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1074 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
1075 eacr.sl = 1;
1076 eacr.e0 = 1;
1077 if (!etr_mode_is_etr(etr_eacr))
1078 eacr.es = 0;
1079 if (!eacr.es || !eacr.p1 ||
1080 aib.esw.psc1 != etr_lpsc_operational_alt)
1081 eacr.e1 = 0;
1082 else if (etr_port0_uptodate && etr_port1_uptodate &&
1083 etr_compare_network(&etr_port0, &etr_port1))
1084 eacr.e1 = 1;
1085 sync_port = (etr_port0_uptodate &&
1086 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1087 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1088 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
1089 eacr.sl = 1;
1090 eacr.e0 = 0;
1091 eacr.e1 = 1;
1092 if (!etr_mode_is_etr(etr_eacr))
1093 eacr.es = 0;
1094 sync_port = (etr_port1_uptodate &&
1095 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1096 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1097 } else {
1098 /* Both ports not usable. */
1099 eacr.es = eacr.sl = 0;
1100 sync_port = -1;
1101 set_bit(ETR_FLAG_EACCES, &etr_flags);
1102 }
1103
1104 /*
1105 * If the clock is in sync just update the eacr and return.
1106 * If there is no valid sync port wait for a port update.
1107 */
1108 if (eacr.es || sync_port < 0) {
1109 etr_update_eacr(eacr);
1110 etr_set_tolec_timeout(now);
1111 return;
1112 }
1113
1114 /*
1115 * Prepare control register for clock syncing
1116 * (reset data port bit, set sync check control.
1117 */
1118 eacr.dp = 0;
1119 eacr.es = 1;
1120
1121 /*
1122 * Update eacr and try to synchronize the clock. If the update
1123 * of eacr caused a stepping port switch (or if we have to
1124 * assume that a stepping port switch has occured) or the
1125 * clock syncing failed, reset the sync check control bit
1126 * and set up a timer to try again after 0.5 seconds
1127 */
1128 etr_update_eacr(eacr);
1129 if (now < etr_tolec + (1600000 << 12) ||
1130 etr_sync_clock(&aib, sync_port) != 0) {
1131 /* Sync failed. Try again in 1/2 second. */
1132 eacr.es = 0;
1133 etr_update_eacr(eacr);
1134 etr_set_sync_timeout();
1135 } else
1136 etr_set_tolec_timeout(now);
1137}
1138
1139/*
1140 * Sysfs interface functions
1141 */
1142static struct sysdev_class etr_sysclass = {
1143 set_kset_name("etr")
1144};
1145
1146static struct sys_device etr_port0_dev = {
1147 .id = 0,
1148 .cls = &etr_sysclass,
1149};
1150
1151static struct sys_device etr_port1_dev = {
1152 .id = 1,
1153 .cls = &etr_sysclass,
1154};
1155
1156/*
1157 * ETR class attributes
1158 */
1159static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf)
1160{
1161 return sprintf(buf, "%i\n", etr_port0.esw.p);
1162}
1163
1164static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1165
1166static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf)
1167{
1168 char *mode_str;
1169
1170 if (etr_mode_is_pps(etr_eacr))
1171 mode_str = "pps";
1172 else if (etr_mode_is_etr(etr_eacr))
1173 mode_str = "etr";
1174 else
1175 mode_str = "local";
1176 return sprintf(buf, "%s\n", mode_str);
1177}
1178
1179static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
1180
1181/*
1182 * ETR port attributes
1183 */
1184static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1185{
1186 if (dev == &etr_port0_dev)
1187 return etr_port0_online ? &etr_port0 : NULL;
1188 else
1189 return etr_port1_online ? &etr_port1 : NULL;
1190}
1191
1192static ssize_t etr_online_show(struct sys_device *dev, char *buf)
1193{
1194 unsigned int online;
1195
1196 online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
1197 return sprintf(buf, "%i\n", online);
1198}
1199
1200static ssize_t etr_online_store(struct sys_device *dev,
1201 const char *buf, size_t count)
1202{
1203 unsigned int value;
1204
1205 value = simple_strtoul(buf, NULL, 0);
1206 if (value != 0 && value != 1)
1207 return -EINVAL;
1208 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
1209 return -ENOSYS;
1210 if (dev == &etr_port0_dev) {
1211 if (etr_port0_online == value)
1212 return count; /* Nothing to do. */
1213 etr_port0_online = value;
1214 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1215 tasklet_hi_schedule(&etr_tasklet);
1216 } else {
1217 if (etr_port1_online == value)
1218 return count; /* Nothing to do. */
1219 etr_port1_online = value;
1220 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1221 tasklet_hi_schedule(&etr_tasklet);
1222 }
1223 return count;
1224}
1225
1226static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1227
1228static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf)
1229{
1230 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1231 etr_eacr.e0 : etr_eacr.e1);
1232}
1233
1234static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1235
1236static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf)
1237{
1238 if (!etr_port0_online && !etr_port1_online)
1239 /* Status word is not uptodate if both ports are offline. */
1240 return -ENODATA;
1241 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1242 etr_port0.esw.psc0 : etr_port0.esw.psc1);
1243}
1244
1245static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1246
1247static ssize_t etr_untuned_show(struct sys_device *dev, char *buf)
1248{
1249 struct etr_aib *aib = etr_aib_from_dev(dev);
1250
1251 if (!aib || !aib->slsw.v1)
1252 return -ENODATA;
1253 return sprintf(buf, "%i\n", aib->edf1.u);
1254}
1255
1256static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1257
1258static ssize_t etr_network_id_show(struct sys_device *dev, char *buf)
1259{
1260 struct etr_aib *aib = etr_aib_from_dev(dev);
1261
1262 if (!aib || !aib->slsw.v1)
1263 return -ENODATA;
1264 return sprintf(buf, "%i\n", aib->edf1.net_id);
1265}
1266
1267static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1268
1269static ssize_t etr_id_show(struct sys_device *dev, char *buf)
1270{
1271 struct etr_aib *aib = etr_aib_from_dev(dev);
1272
1273 if (!aib || !aib->slsw.v1)
1274 return -ENODATA;
1275 return sprintf(buf, "%i\n", aib->edf1.etr_id);
1276}
1277
1278static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1279
1280static ssize_t etr_port_number_show(struct sys_device *dev, char *buf)
1281{
1282 struct etr_aib *aib = etr_aib_from_dev(dev);
1283
1284 if (!aib || !aib->slsw.v1)
1285 return -ENODATA;
1286 return sprintf(buf, "%i\n", aib->edf1.etr_pn);
1287}
1288
1289static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1290
1291static ssize_t etr_coupled_show(struct sys_device *dev, char *buf)
1292{
1293 struct etr_aib *aib = etr_aib_from_dev(dev);
1294
1295 if (!aib || !aib->slsw.v3)
1296 return -ENODATA;
1297 return sprintf(buf, "%i\n", aib->edf3.c);
1298}
1299
1300static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1301
1302static ssize_t etr_local_time_show(struct sys_device *dev, char *buf)
1303{
1304 struct etr_aib *aib = etr_aib_from_dev(dev);
1305
1306 if (!aib || !aib->slsw.v3)
1307 return -ENODATA;
1308 return sprintf(buf, "%i\n", aib->edf3.blto);
1309}
1310
1311static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1312
1313static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf)
1314{
1315 struct etr_aib *aib = etr_aib_from_dev(dev);
1316
1317 if (!aib || !aib->slsw.v3)
1318 return -ENODATA;
1319 return sprintf(buf, "%i\n", aib->edf3.buo);
1320}
1321
1322static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
1323
1324static struct sysdev_attribute *etr_port_attributes[] = {
1325 &attr_online,
1326 &attr_stepping_control,
1327 &attr_state_code,
1328 &attr_untuned,
1329 &attr_network,
1330 &attr_id,
1331 &attr_port,
1332 &attr_coupled,
1333 &attr_local_time,
1334 &attr_utc_offset,
1335 NULL
1336};
1337
1338static int __init etr_register_port(struct sys_device *dev)
1339{
1340 struct sysdev_attribute **attr;
1341 int rc;
1342
1343 rc = sysdev_register(dev);
1344 if (rc)
1345 goto out;
1346 for (attr = etr_port_attributes; *attr; attr++) {
1347 rc = sysdev_create_file(dev, *attr);
1348 if (rc)
1349 goto out_unreg;
1350 }
1351 return 0;
1352out_unreg:
1353 for (; attr >= etr_port_attributes; attr--)
1354 sysdev_remove_file(dev, *attr);
1355 sysdev_unregister(dev);
1356out:
1357 return rc;
1358}
1359
1360static void __init etr_unregister_port(struct sys_device *dev)
1361{
1362 struct sysdev_attribute **attr;
1363
1364 for (attr = etr_port_attributes; *attr; attr++)
1365 sysdev_remove_file(dev, *attr);
1366 sysdev_unregister(dev);
1367}
1368
1369static int __init etr_init_sysfs(void)
1370{
1371 int rc;
1372
1373 rc = sysdev_class_register(&etr_sysclass);
1374 if (rc)
1375 goto out;
1376 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
1377 if (rc)
1378 goto out_unreg_class;
1379 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
1380 if (rc)
1381 goto out_remove_stepping_port;
1382 rc = etr_register_port(&etr_port0_dev);
1383 if (rc)
1384 goto out_remove_stepping_mode;
1385 rc = etr_register_port(&etr_port1_dev);
1386 if (rc)
1387 goto out_remove_port0;
1388 return 0;
1389
1390out_remove_port0:
1391 etr_unregister_port(&etr_port0_dev);
1392out_remove_stepping_mode:
1393 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
1394out_remove_stepping_port:
1395 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
1396out_unreg_class:
1397 sysdev_class_unregister(&etr_sysclass);
1398out:
1399 return rc;
348} 1400}
349 1401
1402device_initcall(etr_init_sysfs);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3cbb0dcf1f1d..f0e5a320e2ec 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -283,7 +283,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
283 return buffer; 283 return buffer;
284} 284}
285 285
286DEFINE_SPINLOCK(die_lock); 286static DEFINE_SPINLOCK(die_lock);
287 287
288void die(const char * str, struct pt_regs * regs, long err) 288void die(const char * str, struct pt_regs * regs, long err)
289{ 289{
@@ -364,8 +364,7 @@ void __kprobes do_single_step(struct pt_regs *regs)
364 force_sig(SIGTRAP, current); 364 force_sig(SIGTRAP, current);
365} 365}
366 366
367asmlinkage void 367static void default_trap_handler(struct pt_regs * regs, long interruption_code)
368default_trap_handler(struct pt_regs * regs, long interruption_code)
369{ 368{
370 if (regs->psw.mask & PSW_MASK_PSTATE) { 369 if (regs->psw.mask & PSW_MASK_PSTATE) {
371 local_irq_enable(); 370 local_irq_enable();
@@ -376,7 +375,7 @@ default_trap_handler(struct pt_regs * regs, long interruption_code)
376} 375}
377 376
378#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 377#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
379asmlinkage void name(struct pt_regs * regs, long interruption_code) \ 378static void name(struct pt_regs * regs, long interruption_code) \
380{ \ 379{ \
381 siginfo_t info; \ 380 siginfo_t info; \
382 info.si_signo = signr; \ 381 info.si_signo = signr; \
@@ -442,7 +441,7 @@ do_fp_trap(struct pt_regs *regs, void __user *location,
442 "floating point exception", regs, &si); 441 "floating point exception", regs, &si);
443} 442}
444 443
445asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) 444static void illegal_op(struct pt_regs * regs, long interruption_code)
446{ 445{
447 siginfo_t info; 446 siginfo_t info;
448 __u8 opcode[6]; 447 __u8 opcode[6];
@@ -491,8 +490,15 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
491#endif 490#endif
492 } else 491 } else
493 signal = SIGILL; 492 signal = SIGILL;
494 } else 493 } else {
495 signal = SIGILL; 494 /*
495 * If we get an illegal op in kernel mode, send it through the
496 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
497 */
498 if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
499 3, SIGTRAP) != NOTIFY_STOP)
500 signal = SIGILL;
501 }
496 502
497#ifdef CONFIG_MATHEMU 503#ifdef CONFIG_MATHEMU
498 if (signal == SIGFPE) 504 if (signal == SIGFPE)
@@ -585,7 +591,7 @@ DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
585 ILL_ILLOPN, get_check_address(regs)); 591 ILL_ILLOPN, get_check_address(regs));
586#endif 592#endif
587 593
588asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) 594static void data_exception(struct pt_regs * regs, long interruption_code)
589{ 595{
590 __u16 __user *location; 596 __u16 __user *location;
591 int signal = 0; 597 int signal = 0;
@@ -675,7 +681,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
675 } 681 }
676} 682}
677 683
678asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) 684static void space_switch_exception(struct pt_regs * regs, long int_code)
679{ 685{
680 siginfo_t info; 686 siginfo_t info;
681 687
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index fe0f2e97ba7b..a48907392522 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,18 +31,19 @@ SECTIONS
31 31
32 _etext = .; /* End of text section */ 32 _etext = .; /* End of text section */
33 33
34 . = ALIGN(16); /* Exception table */
35 __start___ex_table = .;
36 __ex_table : { *(__ex_table) }
37 __stop___ex_table = .;
38
39 RODATA 34 RODATA
40 35
41#ifdef CONFIG_SHARED_KERNEL 36#ifdef CONFIG_SHARED_KERNEL
42 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ 37 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */
38#endif
43 39
40 . = ALIGN(4096);
44 _eshared = .; /* End of shareable data */ 41 _eshared = .; /* End of shareable data */
45#endif 42
43 . = ALIGN(16); /* Exception table */
44 __start___ex_table = .;
45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .;
46 47
47 .data : { /* Data */ 48 .data : { /* Data */
48 *(.data) 49 *(.data)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 21baaf5496d6..9d5b02801b46 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@
25#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
26 26
27static ext_int_info_t ext_int_info_timer; 27static ext_int_info_t ext_int_info_timer;
28DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 28static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
29 29
30#ifdef CONFIG_VIRT_CPU_ACCOUNTING 30#ifdef CONFIG_VIRT_CPU_ACCOUNTING
31/* 31/*
@@ -524,16 +524,15 @@ EXPORT_SYMBOL(del_virt_timer);
524void init_cpu_vtimer(void) 524void init_cpu_vtimer(void)
525{ 525{
526 struct vtimer_queue *vt_list; 526 struct vtimer_queue *vt_list;
527 unsigned long cr0;
528 527
529 /* kick the virtual timer */ 528 /* kick the virtual timer */
530 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 529 S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
531 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 530 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
532 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 531 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
533 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 532 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
534 __ctl_store(cr0, 0, 0); 533
535 cr0 |= 0x400; 534 /* enable cpu timer interrupts */
536 __ctl_load(cr0, 0, 0); 535 __ctl_set_bit(0,10);
537 536
538 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 537 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
539 INIT_LIST_HEAD(&vt_list->list); 538 INIT_LIST_HEAD(&vt_list->list);
@@ -572,6 +571,7 @@ void __init vtime_init(void)
572 if (register_idle_notifier(&vtimer_idle_nb)) 571 if (register_idle_notifier(&vtimer_idle_nb))
573 panic("Couldn't register idle notifier"); 572 panic("Couldn't register idle notifier");
574 573
574 /* Enable cpu timer interrupts on the boot cpu. */
575 init_cpu_vtimer(); 575 init_cpu_vtimer();
576} 576}
577 577
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b5f94cf3bde8..7a44fed21b35 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o
8lib-$(CONFIG_32BIT) += div64.o 8lib-$(CONFIG_32BIT) += div64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
10lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 027c4742a001..02854449b74b 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/delay.c 2 * arch/s390/lib/delay.c
3 * Precise Delay Loops for S390 3 * Precise Delay Loops for S390
4 * 4 *
5 * S390 version 5 * S390 version
@@ -13,10 +13,8 @@
13 13
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/timex.h>
17#ifdef CONFIG_SMP 17#include <linux/irqflags.h>
18#include <asm/smp.h>
19#endif
20 18
21void __delay(unsigned long loops) 19void __delay(unsigned long loops)
22{ 20{
@@ -31,17 +29,39 @@ void __delay(unsigned long loops)
31} 29}
32 30
33/* 31/*
34 * Waits for 'usecs' microseconds using the tod clock, giving up the time slice 32 * Waits for 'usecs' microseconds using the TOD clock comparator.
35 * of the virtual PU inbetween to avoid congestion.
36 */ 33 */
37void __udelay(unsigned long usecs) 34void __udelay(unsigned long usecs)
38{ 35{
39 uint64_t start_cc; 36 u64 end, time, jiffy_timer = 0;
37 unsigned long flags, cr0, mask, dummy;
38
39 local_irq_save(flags);
40 if (raw_irqs_disabled_flags(flags)) {
41 jiffy_timer = S390_lowcore.jiffy_timer;
42 S390_lowcore.jiffy_timer = -1ULL - (4096 << 12);
43 __ctl_store(cr0, 0, 0);
44 dummy = (cr0 & 0xffff00e0) | 0x00000800;
45 __ctl_load(dummy , 0, 0);
46 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
47 } else
48 mask = psw_kernel_bits | PSW_MASK_WAIT |
49 PSW_MASK_EXT | PSW_MASK_IO;
50
51 end = get_clock() + ((u64) usecs << 12);
52 do {
53 time = end < S390_lowcore.jiffy_timer ?
54 end : S390_lowcore.jiffy_timer;
55 set_clock_comparator(time);
56 trace_hardirqs_on();
57 __load_psw_mask(mask);
58 local_irq_disable();
59 } while (get_clock() < end);
40 60
41 if (usecs == 0) 61 if (raw_irqs_disabled_flags(flags)) {
42 return; 62 __ctl_load(cr0, 0, 0);
43 start_cc = get_clock(); 63 S390_lowcore.jiffy_timer = jiffy_timer;
44 do { 64 }
45 cpu_relax(); 65 set_clock_comparator(S390_lowcore.jiffy_timer);
46 } while (((get_clock() - start_cc)/4096) < usecs); 66 local_irq_restore(flags);
47} 67}
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
new file mode 100644
index 000000000000..eb1df632e749
--- /dev/null
+++ b/arch/s390/lib/qrnnd.S
@@ -0,0 +1,77 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
new file mode 100644
index 000000000000..126011df14f1
--- /dev/null
+++ b/arch/s390/lib/uaccess.h
@@ -0,0 +1,23 @@
1/*
2 * arch/s390/uaccess.h
3 *
4 * Copyright IBM Corp. 2007
5 *
6 */
7
8#ifndef __ARCH_S390_LIB_UACCESS_H
9#define __ARCH_S390_LIB_UACCESS_H
10
11extern size_t copy_from_user_std(size_t, const void __user *, void *);
12extern size_t copy_to_user_std(size_t, void __user *, const void *);
13extern size_t strnlen_user_std(size_t, const char __user *);
14extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
15extern int futex_atomic_cmpxchg_std(int __user *, int, int);
16extern int futex_atomic_op_std(int, int __user *, int, int *);
17
18extern size_t copy_from_user_pt(size_t, const void __user *, void *);
19extern size_t copy_to_user_pt(size_t, void __user *, const void *);
20extern int futex_atomic_op_pt(int, int __user *, int, int *);
21extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
22
23#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index f9a23d57eb79..6d8772339d76 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/futex.h> 14#include <asm/futex.h>
15#include "uaccess.h"
15 16
16#ifndef __s390x__ 17#ifndef __s390x__
17#define AHI "ahi" 18#define AHI "ahi"
@@ -27,10 +28,7 @@
27#define SLR "slgr" 28#define SLR "slgr"
28#endif 29#endif
29 30
30extern size_t copy_from_user_std(size_t, const void __user *, void *); 31static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31extern size_t copy_to_user_std(size_t, void __user *, const void *);
32
33size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
34{ 32{
35 register unsigned long reg0 asm("0") = 0x81UL; 33 register unsigned long reg0 asm("0") = 0x81UL;
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -69,14 +67,14 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
69 return size; 67 return size;
70} 68}
71 69
72size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) 70static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
73{ 71{
74 if (size <= 256) 72 if (size <= 256)
75 return copy_from_user_std(size, ptr, x); 73 return copy_from_user_std(size, ptr, x);
76 return copy_from_user_mvcos(size, ptr, x); 74 return copy_from_user_mvcos(size, ptr, x);
77} 75}
78 76
79size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 77static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
80{ 78{
81 register unsigned long reg0 asm("0") = 0x810000UL; 79 register unsigned long reg0 asm("0") = 0x810000UL;
82 unsigned long tmp1, tmp2; 80 unsigned long tmp1, tmp2;
@@ -105,14 +103,16 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
105 return size; 103 return size;
106} 104}
107 105
108size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) 106static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
107 const void *x)
109{ 108{
110 if (size <= 256) 109 if (size <= 256)
111 return copy_to_user_std(size, ptr, x); 110 return copy_to_user_std(size, ptr, x);
112 return copy_to_user_mvcos(size, ptr, x); 111 return copy_to_user_mvcos(size, ptr, x);
113} 112}
114 113
115size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) 114static size_t copy_in_user_mvcos(size_t size, void __user *to,
115 const void __user *from)
116{ 116{
117 register unsigned long reg0 asm("0") = 0x810081UL; 117 register unsigned long reg0 asm("0") = 0x810081UL;
118 unsigned long tmp1, tmp2; 118 unsigned long tmp1, tmp2;
@@ -134,7 +134,7 @@ size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
134 return size; 134 return size;
135} 135}
136 136
137size_t clear_user_mvcos(size_t size, void __user *to) 137static size_t clear_user_mvcos(size_t size, void __user *to)
138{ 138{
139 register unsigned long reg0 asm("0") = 0x810000UL; 139 register unsigned long reg0 asm("0") = 0x810000UL;
140 unsigned long tmp1, tmp2; 140 unsigned long tmp1, tmp2;
@@ -162,10 +162,43 @@ size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165extern size_t strnlen_user_std(size_t, const char __user *); 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
166extern size_t strncpy_from_user_std(size_t, const char __user *, char *); 166{
167extern int futex_atomic_op(int, int __user *, int, int *); 167 char buf[256];
168extern int futex_atomic_cmpxchg(int __user *, int, int); 168 int rc;
169 size_t done, len, len_str;
170
171 done = 0;
172 do {
173 len = min(count - done, (size_t) 256);
174 rc = uaccess.copy_from_user(len, src + done, buf);
175 if (unlikely(rc == len))
176 return 0;
177 len -= rc;
178 len_str = strnlen(buf, len);
179 done += len_str;
180 } while ((len_str == len) && (done < count));
181 return done + 1;
182}
183
184static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
185 char *dst)
186{
187 int rc;
188 size_t done, len, len_str;
189
190 done = 0;
191 do {
192 len = min(count - done, (size_t) 4096);
193 rc = uaccess.copy_from_user(len, src + done, dst);
194 if (unlikely(rc == len))
195 return -EFAULT;
196 len -= rc;
197 len_str = strnlen(dst, len);
198 done += len_str;
199 } while ((len_str == len) && (done < count));
200 return done;
201}
169 202
170struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
171 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -176,6 +209,21 @@ struct uaccess_ops uaccess_mvcos = {
176 .clear_user = clear_user_mvcos, 209 .clear_user = clear_user_mvcos,
177 .strnlen_user = strnlen_user_std, 210 .strnlen_user = strnlen_user_std,
178 .strncpy_from_user = strncpy_from_user_std, 211 .strncpy_from_user = strncpy_from_user_std,
179 .futex_atomic_op = futex_atomic_op, 212 .futex_atomic_op = futex_atomic_op_std,
180 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214};
215
216#ifdef CONFIG_S390_SWITCH_AMODE
217struct uaccess_ops uaccess_mvcos_switch = {
218 .copy_from_user = copy_from_user_mvcos,
219 .copy_from_user_small = copy_from_user_mvcos,
220 .copy_to_user = copy_to_user_mvcos,
221 .copy_to_user_small = copy_to_user_mvcos,
222 .copy_in_user = copy_in_user_mvcos,
223 .clear_user = clear_user_mvcos,
224 .strnlen_user = strnlen_user_mvcos,
225 .strncpy_from_user = strncpy_from_user_mvcos,
226 .futex_atomic_op = futex_atomic_op_pt,
227 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
181}; 228};
229#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 49c3e46b4065..63181671e3e3 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/lib/uaccess_pt.c 2 * arch/s390/lib/uaccess_pt.c
3 * 3 *
4 * User access functions based on page table walks. 4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
5 * 6 *
6 * Copyright IBM Corp. 2006 7 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
@@ -12,9 +13,10 @@
12#include <linux/mm.h> 13#include <linux/mm.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
15 17
16static inline int __handle_fault(struct mm_struct *mm, unsigned long address, 18static int __handle_fault(struct mm_struct *mm, unsigned long address,
17 int write_access) 19 int write_access)
18{ 20{
19 struct vm_area_struct *vma; 21 struct vm_area_struct *vma;
20 int ret = -EFAULT; 22 int ret = -EFAULT;
@@ -79,8 +81,8 @@ out_sigbus:
79 return ret; 81 return ret;
80} 82}
81 83
82static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 84static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
83 size_t n, int write_user) 85 size_t n, int write_user)
84{ 86{
85 struct mm_struct *mm = current->mm; 87 struct mm_struct *mm = current->mm;
86 unsigned long offset, pfn, done, size; 88 unsigned long offset, pfn, done, size;
@@ -133,6 +135,49 @@ fault:
133 goto retry; 135 goto retry;
134} 136}
135 137
138/*
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
141 */
142static unsigned long __dat_user_addr(unsigned long uaddr)
143{
144 struct mm_struct *mm = current->mm;
145 unsigned long pfn, ret;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int rc;
150
151 ret = 0;
152retry:
153 pgd = pgd_offset(mm, uaddr);
154 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
155 goto fault;
156
157 pmd = pmd_offset(pgd, uaddr);
158 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
159 goto fault;
160
161 pte = pte_offset_map(pmd, uaddr);
162 if (!pte || !pte_present(*pte))
163 goto fault;
164
165 pfn = pte_pfn(*pte);
166 if (!pfn_valid(pfn))
167 goto out;
168
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170out:
171 return ret;
172fault:
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
176 if (rc)
177 goto out;
178 goto retry;
179}
180
136size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 181size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
137{ 182{
138 size_t rc; 183 size_t rc;
@@ -155,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
155 } 200 }
156 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
157} 202}
203
204static size_t clear_user_pt(size_t n, void __user *to)
205{
206 long done, size, ret;
207
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
210 return 0;
211 }
212 done = 0;
213 do {
214 if (n - done > PAGE_SIZE)
215 size = PAGE_SIZE;
216 else
217 size = n - done;
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
220 done += size;
221 if (ret)
222 return ret + n - done;
223 } while (done < n);
224 return 0;
225}
226
227static size_t strnlen_user_pt(size_t count, const char __user *src)
228{
229 char *addr;
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
233 pgd_t *pgd;
234 pmd_t *pmd;
235 pte_t *pte;
236 size_t len_str;
237
238 if (segment_eq(get_fs(), KERNEL_DS))
239 return strnlen((const char __kernel __force *) src, count) + 1;
240 done = 0;
241retry:
242 spin_lock(&mm->page_table_lock);
243 do {
244 pgd = pgd_offset(mm, uaddr);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 goto fault;
247
248 pmd = pmd_offset(pgd, uaddr);
249 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
250 goto fault;
251
252 pte = pte_offset_map(pmd, uaddr);
253 if (!pte || !pte_present(*pte))
254 goto fault;
255
256 pfn = pte_pfn(*pte);
257 if (!pfn_valid(pfn)) {
258 done = -1;
259 goto out;
260 }
261
262 offset = uaddr & (PAGE_SIZE-1);
263 addr = (char *)(pfn << PAGE_SHIFT) + offset;
264 len = min(count - done, PAGE_SIZE - offset);
265 len_str = strnlen(addr, len);
266 done += len_str;
267 uaddr += len_str;
268 } while ((len_str == len) && (done < count));
269out:
270 spin_unlock(&mm->page_table_lock);
271 return done + 1;
272fault:
273 spin_unlock(&mm->page_table_lock);
274 if (__handle_fault(mm, uaddr, 0)) {
275 return 0;
276 }
277 goto retry;
278}
279
280static size_t strncpy_from_user_pt(size_t count, const char __user *src,
281 char *dst)
282{
283 size_t n = strnlen_user_pt(count, src);
284
285 if (!n)
286 return -EFAULT;
287 if (n > count)
288 n = count;
289 if (segment_eq(get_fs(), KERNEL_DS)) {
290 memcpy(dst, (const char __kernel __force *) src, n);
291 if (dst[n-1] == '\0')
292 return n-1;
293 else
294 return n;
295 }
296 if (__user_copy_pt((unsigned long) src, dst, n, 0))
297 return -EFAULT;
298 if (dst[n-1] == '\0')
299 return n-1;
300 else
301 return n;
302}
303
304static size_t copy_in_user_pt(size_t n, void __user *to,
305 const void __user *from)
306{
307 struct mm_struct *mm = current->mm;
308 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
309 uaddr, done, size;
310 unsigned long uaddr_from = (unsigned long) from;
311 unsigned long uaddr_to = (unsigned long) to;
312 pgd_t *pgd_from, *pgd_to;
313 pmd_t *pmd_from, *pmd_to;
314 pte_t *pte_from, *pte_to;
315 int write_user;
316
317 done = 0;
318retry:
319 spin_lock(&mm->page_table_lock);
320 do {
321 pgd_from = pgd_offset(mm, uaddr_from);
322 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
323 uaddr = uaddr_from;
324 write_user = 0;
325 goto fault;
326 }
327 pgd_to = pgd_offset(mm, uaddr_to);
328 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
329 uaddr = uaddr_to;
330 write_user = 1;
331 goto fault;
332 }
333
334 pmd_from = pmd_offset(pgd_from, uaddr_from);
335 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
336 uaddr = uaddr_from;
337 write_user = 0;
338 goto fault;
339 }
340 pmd_to = pmd_offset(pgd_to, uaddr_to);
341 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
342 uaddr = uaddr_to;
343 write_user = 1;
344 goto fault;
345 }
346
347 pte_from = pte_offset_map(pmd_from, uaddr_from);
348 if (!pte_from || !pte_present(*pte_from)) {
349 uaddr = uaddr_from;
350 write_user = 0;
351 goto fault;
352 }
353 pte_to = pte_offset_map(pmd_to, uaddr_to);
354 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
355 uaddr = uaddr_to;
356 write_user = 1;
357 goto fault;
358 }
359
360 pfn_from = pte_pfn(*pte_from);
361 if (!pfn_valid(pfn_from))
362 goto out;
363 pfn_to = pte_pfn(*pte_to);
364 if (!pfn_valid(pfn_to))
365 goto out;
366
367 offset_from = uaddr_from & (PAGE_SIZE-1);
368 offset_to = uaddr_from & (PAGE_SIZE-1);
369 offset_max = max(offset_from, offset_to);
370 size = min(n - done, PAGE_SIZE - offset_max);
371
372 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
373 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
374 done += size;
375 uaddr_from += size;
376 uaddr_to += size;
377 } while (done < n);
378out:
379 spin_unlock(&mm->page_table_lock);
380 return n - done;
381fault:
382 spin_unlock(&mm->page_table_lock);
383 if (__handle_fault(mm, uaddr, write_user))
384 return n - done;
385 goto retry;
386}
387
388#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
390 "1: " insn \
391 "2: cs %1,%2,0(%6)\n" \
392 "3: jl 1b\n" \
393 " lhi %0,0\n" \
394 "4:\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 "=m" (*uaddr) \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
400
401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
402{
403 int oldval = 0, newval, ret;
404
405 spin_lock(&current->mm->page_table_lock);
406 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 if (!uaddr) {
408 spin_unlock(&current->mm->page_table_lock);
409 return -EFAULT;
410 }
411 get_page(virt_to_page(uaddr));
412 spin_unlock(&current->mm->page_table_lock);
413 switch (op) {
414 case FUTEX_OP_SET:
415 __futex_atomic_op("lr %2,%5\n",
416 ret, oldval, newval, uaddr, oparg);
417 break;
418 case FUTEX_OP_ADD:
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret, oldval, newval, uaddr, oparg);
421 break;
422 case FUTEX_OP_OR:
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret, oldval, newval, uaddr, oparg);
425 break;
426 case FUTEX_OP_ANDN:
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret, oldval, newval, uaddr, oparg);
429 break;
430 case FUTEX_OP_XOR:
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret, oldval, newval, uaddr, oparg);
433 break;
434 default:
435 ret = -ENOSYS;
436 }
437 put_page(virt_to_page(uaddr));
438 *old = oldval;
439 return ret;
440}
441
442int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
443{
444 int ret;
445
446 spin_lock(&current->mm->page_table_lock);
447 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
448 if (!uaddr) {
449 spin_unlock(&current->mm->page_table_lock);
450 return -EFAULT;
451 }
452 get_page(virt_to_page(uaddr));
453 spin_unlock(&current->mm->page_table_lock);
454 asm volatile(" cs %1,%4,0(%5)\n"
455 "0: lr %0,%1\n"
456 "1:\n"
457 EX_TABLE(0b,1b)
458 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
459 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
460 : "cc", "memory" );
461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_from_user_small = copy_from_user_pt,
468 .copy_to_user = copy_to_user_pt,
469 .copy_to_user_small = copy_to_user_pt,
470 .copy_in_user = copy_in_user_pt,
471 .clear_user = clear_user_pt,
472 .strnlen_user = strnlen_user_pt,
473 .strncpy_from_user = strncpy_from_user_pt,
474 .futex_atomic_op = futex_atomic_op_pt,
475 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
476};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 56a0214e9928..28c4500a58d0 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -13,6 +13,7 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
16 17
17#ifndef __s390x__ 18#ifndef __s390x__
18#define AHI "ahi" 19#define AHI "ahi"
@@ -28,9 +29,6 @@
28#define SLR "slgr" 29#define SLR "slgr"
29#endif 30#endif
30 31
31extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
32extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
33
34size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) 32size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
35{ 33{
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -72,7 +70,8 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
72 return size; 70 return size;
73} 71}
74 72
75size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) 73static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
74 void *x)
76{ 75{
77 if (size <= 1024) 76 if (size <= 1024)
78 return copy_from_user_std(size, ptr, x); 77 return copy_from_user_std(size, ptr, x);
@@ -110,14 +109,16 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
110 return size; 109 return size;
111} 110}
112 111
113size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) 112static size_t copy_to_user_std_check(size_t size, void __user *ptr,
113 const void *x)
114{ 114{
115 if (size <= 1024) 115 if (size <= 1024)
116 return copy_to_user_std(size, ptr, x); 116 return copy_to_user_std(size, ptr, x);
117 return copy_to_user_pt(size, ptr, x); 117 return copy_to_user_pt(size, ptr, x);
118} 118}
119 119
120size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) 120static size_t copy_in_user_std(size_t size, void __user *to,
121 const void __user *from)
121{ 122{
122 unsigned long tmp1; 123 unsigned long tmp1;
123 124
@@ -148,7 +149,7 @@ size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
148 return size; 149 return size;
149} 150}
150 151
151size_t clear_user_std(size_t size, void __user *to) 152static size_t clear_user_std(size_t size, void __user *to)
152{ 153{
153 unsigned long tmp1, tmp2; 154 unsigned long tmp1, tmp2;
154 155
@@ -254,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
254 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 255 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
255 "m" (*uaddr) : "cc"); 256 "m" (*uaddr) : "cc");
256 257
257int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) 258int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
258{ 259{
259 int oldval = 0, newval, ret; 260 int oldval = 0, newval, ret;
260 261
@@ -286,7 +287,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
286 return ret; 287 return ret;
287} 288}
288 289
289int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) 290int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
290{ 291{
291 int ret; 292 int ret;
292 293
@@ -311,6 +312,6 @@ struct uaccess_ops uaccess_std = {
311 .clear_user = clear_user_std, 312 .clear_user = clear_user_std,
312 .strnlen_user = strnlen_user_std, 313 .strnlen_user = strnlen_user_std,
313 .strncpy_from_user = strncpy_from_user_std, 314 .strncpy_from_user = strncpy_from_user_std,
314 .futex_atomic_op = futex_atomic_op, 315 .futex_atomic_op = futex_atomic_op_std,
315 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 316 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
316}; 317};
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
index c10df144f2ab..73b3e72efc46 100644
--- a/arch/s390/math-emu/Makefile
+++ b/arch/s390/math-emu/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the FPU instruction emulation. 2# Makefile for the FPU instruction emulation.
3# 3#
4 4
5obj-$(CONFIG_MATHEMU) := math.o qrnnd.o 5obj-$(CONFIG_MATHEMU) := math.o
6 6
7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w 7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w
8EXTRA_AFLAGS := -traditional 8EXTRA_AFLAGS := -traditional
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index 6b9aec5a2c18..3ee78ccb617d 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -15,7 +15,7 @@
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/lowcore.h> 16#include <asm/lowcore.h>
17 17
18#include "sfp-util.h" 18#include <asm/sfp-util.h>
19#include <math-emu/soft-fp.h> 19#include <math-emu/soft-fp.h>
20#include <math-emu/single.h> 20#include <math-emu/single.h>
21#include <math-emu/double.h> 21#include <math-emu/double.h>
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S
deleted file mode 100644
index b01c2b648e22..000000000000
--- a/arch/s390/math-emu/qrnnd.S
+++ /dev/null
@@ -1,77 +0,0 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 607f50ead1fd..f93a056869bc 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -245,7 +245,7 @@ cmm_set_timeout(long nr, long seconds)
245 cmm_set_timer(); 245 cmm_set_timer();
246} 246}
247 247
248static inline int 248static int
249cmm_skip_blanks(char *cp, char **endp) 249cmm_skip_blanks(char *cp, char **endp)
250{ 250{
251 char *str; 251 char *str;
@@ -414,7 +414,7 @@ cmm_smsg_target(char *from, char *msg)
414} 414}
415#endif 415#endif
416 416
417struct ctl_table_header *cmm_sysctl_header; 417static struct ctl_table_header *cmm_sysctl_header;
418 418
419static int 419static int
420cmm_init (void) 420cmm_init (void)
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 775bf19e742b..394980b05e6f 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/ioport.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
20#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
@@ -70,6 +71,7 @@ struct qin64 {
70struct dcss_segment { 71struct dcss_segment {
71 struct list_head list; 72 struct list_head list;
72 char dcss_name[8]; 73 char dcss_name[8];
74 char res_name[15];
73 unsigned long start_addr; 75 unsigned long start_addr;
74 unsigned long end; 76 unsigned long end;
75 atomic_t ref_count; 77 atomic_t ref_count;
@@ -77,6 +79,7 @@ struct dcss_segment {
77 unsigned int vm_segtype; 79 unsigned int vm_segtype;
78 struct qrange range[6]; 80 struct qrange range[6];
79 int segcnt; 81 int segcnt;
82 struct resource *res;
80}; 83};
81 84
82static DEFINE_MUTEX(dcss_lock); 85static DEFINE_MUTEX(dcss_lock);
@@ -88,7 +91,7 @@ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
88 * Create the 8 bytes, ebcdic VM segment name from 91 * Create the 8 bytes, ebcdic VM segment name from
89 * an ascii name. 92 * an ascii name.
90 */ 93 */
91static void inline 94static void
92dcss_mkname(char *name, char *dcss_name) 95dcss_mkname(char *name, char *dcss_name)
93{ 96{
94 int i; 97 int i;
@@ -303,6 +306,29 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
303 goto out_free; 306 goto out_free;
304 } 307 }
305 308
309 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
310 if (seg->res == NULL) {
311 rc = -ENOMEM;
312 goto out_shared;
313 }
314 seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
315 seg->res->start = seg->start_addr;
316 seg->res->end = seg->end;
317 memcpy(&seg->res_name, seg->dcss_name, 8);
318 EBCASC(seg->res_name, 8);
319 seg->res_name[8] = '\0';
320 strncat(seg->res_name, " (DCSS)", 7);
321 seg->res->name = seg->res_name;
322 rc = seg->vm_segtype;
323 if (rc == SEG_TYPE_SC ||
324 ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
325 seg->res->flags |= IORESOURCE_READONLY;
326 if (request_resource(&iomem_resource, seg->res)) {
327 rc = -EBUSY;
328 kfree(seg->res);
329 goto out_shared;
330 }
331
306 if (do_nonshared) 332 if (do_nonshared)
307 dcss_command = DCSS_LOADNSR; 333 dcss_command = DCSS_LOADNSR;
308 else 334 else
@@ -316,12 +342,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
316 rc = dcss_diag_translate_rc (seg->end); 342 rc = dcss_diag_translate_rc (seg->end);
317 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 343 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
318 &seg->start_addr, &seg->end); 344 &seg->start_addr, &seg->end);
319 goto out_shared; 345 goto out_resource;
320 } 346 }
321 seg->do_nonshared = do_nonshared; 347 seg->do_nonshared = do_nonshared;
322 atomic_set(&seg->ref_count, 1); 348 atomic_set(&seg->ref_count, 1);
323 list_add(&seg->list, &dcss_list); 349 list_add(&seg->list, &dcss_list);
324 rc = seg->vm_segtype;
325 *addr = seg->start_addr; 350 *addr = seg->start_addr;
326 *end = seg->end; 351 *end = seg->end;
327 if (do_nonshared) 352 if (do_nonshared)
@@ -329,12 +354,16 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
329 "type %s in non-shared mode\n", name, 354 "type %s in non-shared mode\n", name,
330 (void*)seg->start_addr, (void*)seg->end, 355 (void*)seg->start_addr, (void*)seg->end,
331 segtype_string[seg->vm_segtype]); 356 segtype_string[seg->vm_segtype]);
332 else 357 else {
333 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " 358 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
334 "type %s in shared mode\n", name, 359 "type %s in shared mode\n", name,
335 (void*)seg->start_addr, (void*)seg->end, 360 (void*)seg->start_addr, (void*)seg->end,
336 segtype_string[seg->vm_segtype]); 361 segtype_string[seg->vm_segtype]);
362 }
337 goto out; 363 goto out;
364 out_resource:
365 release_resource(seg->res);
366 kfree(seg->res);
338 out_shared: 367 out_shared:
339 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 368 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
340 out_free: 369 out_free:
@@ -401,6 +430,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
401 * -ENOENT : no such segment (segment gone!) 430 * -ENOENT : no such segment (segment gone!)
402 * -EAGAIN : segment is in use by other exploiters, try later 431 * -EAGAIN : segment is in use by other exploiters, try later
403 * -EINVAL : no segment with the given name is currently loaded - name invalid 432 * -EINVAL : no segment with the given name is currently loaded - name invalid
433 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
404 * 0 : operation succeeded 434 * 0 : operation succeeded
405 */ 435 */
406int 436int
@@ -428,12 +458,24 @@ segment_modify_shared (char *name, int do_nonshared)
428 rc = -EAGAIN; 458 rc = -EAGAIN;
429 goto out_unlock; 459 goto out_unlock;
430 } 460 }
431 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 461 release_resource(seg->res);
432 &dummy, &dummy); 462 if (do_nonshared) {
433 if (do_nonshared)
434 dcss_command = DCSS_LOADNSR; 463 dcss_command = DCSS_LOADNSR;
435 else 464 seg->res->flags &= ~IORESOURCE_READONLY;
436 dcss_command = DCSS_LOADNOLY; 465 } else {
466 dcss_command = DCSS_LOADNOLY;
467 if (seg->vm_segtype == SEG_TYPE_SR ||
468 seg->vm_segtype == SEG_TYPE_ER)
469 seg->res->flags |= IORESOURCE_READONLY;
470 }
471 if (request_resource(&iomem_resource, seg->res)) {
472 PRINT_WARN("segment_modify_shared: could not reload segment %s"
473 " - overlapping resources\n", name);
474 rc = -EBUSY;
475 kfree(seg->res);
476 goto out_del;
477 }
478 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
437 diag_cc = dcss_diag(dcss_command, seg->dcss_name, 479 diag_cc = dcss_diag(dcss_command, seg->dcss_name,
438 &seg->start_addr, &seg->end); 480 &seg->start_addr, &seg->end);
439 if (diag_cc > 1) { 481 if (diag_cc > 1) {
@@ -446,9 +488,9 @@ segment_modify_shared (char *name, int do_nonshared)
446 rc = 0; 488 rc = 0;
447 goto out_unlock; 489 goto out_unlock;
448 out_del: 490 out_del:
491 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
449 list_del(&seg->list); 492 list_del(&seg->list);
450 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 493 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
451 &dummy, &dummy);
452 kfree(seg); 494 kfree(seg);
453 out_unlock: 495 out_unlock:
454 mutex_unlock(&dcss_lock); 496 mutex_unlock(&dcss_lock);
@@ -478,6 +520,8 @@ segment_unload(char *name)
478 } 520 }
479 if (atomic_dec_return(&seg->ref_count) != 0) 521 if (atomic_dec_return(&seg->ref_count) != 0)
480 goto out_unlock; 522 goto out_unlock;
523 release_resource(seg->res);
524 kfree(seg->res);
481 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 525 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
482 list_del(&seg->list); 526 list_del(&seg->list);
483 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); 527 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index cd85e34d8703..9ff143e87746 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -52,7 +52,7 @@ extern int sysctl_userprocess_debug;
52extern void die(const char *,struct pt_regs *,long); 52extern void die(const char *,struct pt_regs *,long);
53 53
54#ifdef CONFIG_KPROBES 54#ifdef CONFIG_KPROBES
55ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 55static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
56int register_page_fault_notifier(struct notifier_block *nb) 56int register_page_fault_notifier(struct notifier_block *nb)
57{ 57{
58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
@@ -137,7 +137,9 @@ static int __check_access_register(struct pt_regs *regs, int error_code)
137 137
138/* 138/*
139 * Check which address space the address belongs to. 139 * Check which address space the address belongs to.
140 * Returns 1 for user space and 0 for kernel space. 140 * May return 1 or 2 for user space and 0 for kernel space.
141 * Returns 2 for user space in primary addressing mode with
142 * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 */ 143 */
142static inline int check_user_space(struct pt_regs *regs, int error_code) 144static inline int check_user_space(struct pt_regs *regs, int error_code)
143{ 145{
@@ -154,7 +156,7 @@ static inline int check_user_space(struct pt_regs *regs, int error_code)
154 return __check_access_register(regs, error_code); 156 return __check_access_register(regs, error_code);
155 if (descriptor == 2) 157 if (descriptor == 2)
156 return current->thread.mm_segment.ar4; 158 return current->thread.mm_segment.ar4;
157 return descriptor != 0; 159 return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
158} 160}
159 161
160/* 162/*
@@ -183,6 +185,77 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
183 force_sig_info(SIGSEGV, &si, current); 185 force_sig_info(SIGSEGV, &si, current);
184} 186}
185 187
188#ifdef CONFIG_S390_EXEC_PROTECT
189extern long sys_sigreturn(struct pt_regs *regs);
190extern long sys_rt_sigreturn(struct pt_regs *regs);
191extern long sys32_sigreturn(struct pt_regs *regs);
192extern long sys32_rt_sigreturn(struct pt_regs *regs);
193
194static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
195 int rt)
196{
197 up_read(&mm->mmap_sem);
198 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
199#ifdef CONFIG_COMPAT
200 if (test_tsk_thread_flag(current, TIF_31BIT)) {
201 if (rt)
202 sys32_rt_sigreturn(regs);
203 else
204 sys32_sigreturn(regs);
205 return;
206 }
207#endif /* CONFIG_COMPAT */
208 if (rt)
209 sys_rt_sigreturn(regs);
210 else
211 sys_sigreturn(regs);
212 return;
213}
214
215static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
216 unsigned long address, unsigned long error_code)
217{
218 pgd_t *pgd;
219 pmd_t *pmd;
220 pte_t *pte;
221 u16 *instruction;
222 unsigned long pfn, uaddr = regs->psw.addr;
223
224 spin_lock(&mm->page_table_lock);
225 pgd = pgd_offset(mm, uaddr);
226 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
227 goto out_fault;
228 pmd = pmd_offset(pgd, uaddr);
229 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
230 goto out_fault;
231 pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
232 if (!pte || !pte_present(*pte))
233 goto out_fault;
234 pfn = pte_pfn(*pte);
235 if (!pfn_valid(pfn))
236 goto out_fault;
237 spin_unlock(&mm->page_table_lock);
238
239 instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
240 if (*instruction == 0x0a77)
241 do_sigreturn(mm, regs, 0);
242 else if (*instruction == 0x0aad)
243 do_sigreturn(mm, regs, 1);
244 else {
245 printk("- XXX - do_exception: task = %s, primary, NO EXEC "
246 "-> SIGSEGV\n", current->comm);
247 up_read(&mm->mmap_sem);
248 current->thread.prot_addr = address;
249 current->thread.trap_no = error_code;
250 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
251 }
252 return 0;
253out_fault:
254 spin_unlock(&mm->page_table_lock);
255 return -EFAULT;
256}
257#endif /* CONFIG_S390_EXEC_PROTECT */
258
186/* 259/*
187 * This routine handles page faults. It determines the address, 260 * This routine handles page faults. It determines the address,
188 * and the problem, and then passes it off to one of the appropriate 261 * and the problem, and then passes it off to one of the appropriate
@@ -260,6 +333,17 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
260 vma = find_vma(mm, address); 333 vma = find_vma(mm, address);
261 if (!vma) 334 if (!vma)
262 goto bad_area; 335 goto bad_area;
336
337#ifdef CONFIG_S390_EXEC_PROTECT
338 if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
339 if (!signal_return(mm, regs, address, error_code))
340 /*
341 * signal_return() has done an up_read(&mm->mmap_sem)
342 * if it returns 0.
343 */
344 return;
345#endif
346
263 if (vma->vm_start <= address) 347 if (vma->vm_start <= address)
264 goto good_area; 348 goto good_area;
265 if (!(vma->vm_flags & VM_GROWSDOWN)) 349 if (!(vma->vm_flags & VM_GROWSDOWN))
@@ -452,8 +536,7 @@ void pfault_fini(void)
452 : : "a" (&refbk), "m" (refbk) : "cc"); 536 : : "a" (&refbk), "m" (refbk) : "cc");
453} 537}
454 538
455asmlinkage void 539static void pfault_interrupt(__u16 error_code)
456pfault_interrupt(__u16 error_code)
457{ 540{
458 struct task_struct *tsk; 541 struct task_struct *tsk;
459 __u16 subcode; 542 __u16 subcode;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 4bb21be3b007..b3e7c45efb63 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -25,7 +25,7 @@
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h> 27#include <linux/poison.h>
28 28#include <linux/initrd.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -95,20 +95,18 @@ static void __init setup_ro_region(void)
95 pte_t new_pte; 95 pte_t new_pte;
96 unsigned long address, end; 96 unsigned long address, end;
97 97
98 address = ((unsigned long)&__start_rodata) & PAGE_MASK; 98 address = ((unsigned long)&_stext) & PAGE_MASK;
99 end = PFN_ALIGN((unsigned long)&__end_rodata); 99 end = PFN_ALIGN((unsigned long)&_eshared);
100 100
101 for (; address < end; address += PAGE_SIZE) { 101 for (; address < end; address += PAGE_SIZE) {
102 pgd = pgd_offset_k(address); 102 pgd = pgd_offset_k(address);
103 pmd = pmd_offset(pgd, address); 103 pmd = pmd_offset(pgd, address);
104 pte = pte_offset_kernel(pmd, address); 104 pte = pte_offset_kernel(pmd, address);
105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
106 set_pte(pte, new_pte); 106 *pte = new_pte;
107 } 107 }
108} 108}
109 109
110extern void vmem_map_init(void);
111
112/* 110/*
113 * paging_init() sets up the page tables 111 * paging_init() sets up the page tables
114 */ 112 */
@@ -125,11 +123,11 @@ void __init paging_init(void)
125#ifdef CONFIG_64BIT 123#ifdef CONFIG_64BIT
126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 124 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
127 for (i = 0; i < PTRS_PER_PGD; i++) 125 for (i = 0; i < PTRS_PER_PGD; i++)
128 pgd_clear(pg_dir + i); 126 pgd_clear_kernel(pg_dir + i);
129#else 127#else
130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 128 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
131 for (i = 0; i < PTRS_PER_PGD; i++) 129 for (i = 0; i < PTRS_PER_PGD; i++)
132 pmd_clear((pmd_t *)(pg_dir + i)); 130 pmd_clear_kernel((pmd_t *)(pg_dir + i));
133#endif 131#endif
134 vmem_map_init(); 132 vmem_map_init();
135 setup_ro_region(); 133 setup_ro_region();
@@ -174,10 +172,8 @@ void __init mem_init(void)
174 datasize >>10, 172 datasize >>10,
175 initsize >> 10); 173 initsize >> 10);
176 printk("Write protected kernel read-only data: %#lx - %#lx\n", 174 printk("Write protected kernel read-only data: %#lx - %#lx\n",
177 (unsigned long)&__start_rodata, 175 (unsigned long)&_stext,
178 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 176 PFN_ALIGN((unsigned long)&_eshared) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
181} 177}
182 178
183void free_initmem(void) 179void free_initmem(void)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index cd3d93e8c211..92a565190028 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -82,7 +82,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
82 if (!pmd) 82 if (!pmd)
83 return NULL; 83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 84 for (i = 0; i < PTRS_PER_PMD; i++)
85 pmd_clear(pmd + i); 85 pmd_clear_kernel(pmd + i);
86 return pmd; 86 return pmd;
87} 87}
88 88
@@ -97,7 +97,7 @@ static inline pte_t *vmem_pte_alloc(void)
97 return NULL; 97 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 for (i = 0; i < PTRS_PER_PTE; i++) 99 for (i = 0; i < PTRS_PER_PTE; i++)
100 set_pte(pte + i, empty_pte); 100 pte[i] = empty_pte;
101 return pte; 101 return pte;
102} 102}
103 103
@@ -119,7 +119,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
119 pm_dir = vmem_pmd_alloc(); 119 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir) 120 if (!pm_dir)
121 goto out; 121 goto out;
122 pgd_populate(&init_mm, pg_dir, pm_dir); 122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 } 123 }
124 124
125 pm_dir = pmd_offset(pg_dir, address); 125 pm_dir = pmd_offset(pg_dir, address);
@@ -132,7 +132,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); 134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 set_pte(pt_dir, pte); 135 *pt_dir = pte;
136 } 136 }
137 ret = 0; 137 ret = 0;
138out: 138out:
@@ -161,7 +161,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
161 if (pmd_none(*pm_dir)) 161 if (pmd_none(*pm_dir))
162 continue; 162 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address); 163 pt_dir = pte_offset_kernel(pm_dir, address);
164 set_pte(pt_dir, pte); 164 *pt_dir = pte;
165 } 165 }
166 flush_tlb_kernel_range(start, start + size); 166 flush_tlb_kernel_range(start, start + size);
167} 167}
@@ -191,7 +191,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
191 pm_dir = vmem_pmd_alloc(); 191 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir) 192 if (!pm_dir)
193 goto out; 193 goto out;
194 pgd_populate(&init_mm, pg_dir, pm_dir); 194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 } 195 }
196 196
197 pm_dir = pmd_offset(pg_dir, address); 197 pm_dir = pmd_offset(pg_dir, address);
@@ -210,7 +210,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
210 if (!new_page) 210 if (!new_page)
211 goto out; 211 goto out;
212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 set_pte(pt_dir, pte); 213 *pt_dir = pte;
214 } 214 }
215 } 215 }
216 ret = 0; 216 ret = 0;
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 49802f1bee94..bd30d138113f 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -32,7 +32,7 @@ static void via_bugs(void)
32 32
33static int nvidia_hpet_detected __initdata; 33static int nvidia_hpet_detected __initdata;
34 34
35static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 35static int __init nvidia_hpet_check(struct acpi_table_header *header)
36{ 36{
37 nvidia_hpet_detected = 1; 37 nvidia_hpet_detected = 1;
38 return 0; 38 return 0;
@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
53 return; 53 return;
54 54
55 nvidia_hpet_detected = 0; 55 nvidia_hpet_detected = 0;
56 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 56 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
57 if (nvidia_hpet_detected == 0) { 57 if (nvidia_hpet_detected == 0) {
58 acpi_skip_timer_override = 1; 58 acpi_skip_timer_override = 1;
59 printk(KERN_INFO "Nvidia board " 59 printk(KERN_INFO "Nvidia board "
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index b007433f96bb..0b3603adf56d 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
58 * Some x86_64 machines use physical APIC mode regardless of how many 58 * Some x86_64 machines use physical APIC mode regardless of how many
59 * procs/clusters are present (x86_64 ES7000 is an example). 59 * procs/clusters are present (x86_64 ES7000 is an example).
60 */ 60 */
61 if (acpi_fadt.revision > FADT2_REVISION_ID) 61 if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
62 if (acpi_fadt.force_apic_physical_destination_mode) { 62 if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
63 genapic = &apic_cluster; 63 genapic = &apic_cluster;
64 goto print; 64 goto print;
65 } 65 }
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index d7bad90a5ad8..6be6730acb5c 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -1956,24 +1956,31 @@ static struct irq_chip msi_chip = {
1956 .retrigger = ioapic_retrigger_irq, 1956 .retrigger = ioapic_retrigger_irq,
1957}; 1957};
1958 1958
1959int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) 1959int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
1960{ 1960{
1961 struct msi_msg msg; 1961 struct msi_msg msg;
1962 int ret; 1962 int irq, ret;
1963 irq = create_irq();
1964 if (irq < 0)
1965 return irq;
1966
1967 set_irq_msi(irq, desc);
1963 ret = msi_compose_msg(dev, irq, &msg); 1968 ret = msi_compose_msg(dev, irq, &msg);
1964 if (ret < 0) 1969 if (ret < 0) {
1970 destroy_irq(irq);
1965 return ret; 1971 return ret;
1972 }
1966 1973
1967 write_msi_msg(irq, &msg); 1974 write_msi_msg(irq, &msg);
1968 1975
1969 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); 1976 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
1970 1977
1971 return 0; 1978 return irq;
1972} 1979}
1973 1980
1974void arch_teardown_msi_irq(unsigned int irq) 1981void arch_teardown_msi_irq(unsigned int irq)
1975{ 1982{
1976 return; 1983 destroy_irq(irq);
1977} 1984}
1978 1985
1979#endif /* CONFIG_PCI_MSI */ 1986#endif /* CONFIG_PCI_MSI */
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 08072568847d..50dd8bef850e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
798 return gsi; 798 return gsi;
799 799
800 /* Don't set up the ACPI SCI because it's already set up */ 800 /* Don't set up the ACPI SCI because it's already set up */
801 if (acpi_fadt.sci_int == gsi) 801 if (acpi_gbl_FADT.sci_interrupt == gsi)
802 return gsi; 802 return gsi;
803 803
804 ioapic = mp_find_ioapic(gsi); 804 ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa794b9..eb18be5a6569 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
29 .dma_supported = NULL, 29 .dma_supported = NULL,
30}; 30};
31 31
32void pci_swiotlb_init(void) 32void __init pci_swiotlb_init(void)
33{ 33{
34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) 35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 5cc76d0d331f..335cc91c49b7 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
498{ 498{
499 unsigned int year, mon, day, hour, min, sec; 499 unsigned int year, mon, day, hour, min, sec;
500 unsigned long flags; 500 unsigned long flags;
501 unsigned extyear = 0; 501 unsigned century = 0;
502 502
503 spin_lock_irqsave(&rtc_lock, flags); 503 spin_lock_irqsave(&rtc_lock, flags);
504 504
@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
510 mon = CMOS_READ(RTC_MONTH); 510 mon = CMOS_READ(RTC_MONTH);
511 year = CMOS_READ(RTC_YEAR); 511 year = CMOS_READ(RTC_YEAR);
512#ifdef CONFIG_ACPI 512#ifdef CONFIG_ACPI
513 if (acpi_fadt.revision >= FADT2_REVISION_ID && 513 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
514 acpi_fadt.century) 514 acpi_gbl_FADT.century)
515 extyear = CMOS_READ(acpi_fadt.century); 515 century = CMOS_READ(acpi_gbl_FADT.century);
516#endif 516#endif
517 } while (sec != CMOS_READ(RTC_SECONDS)); 517 } while (sec != CMOS_READ(RTC_SECONDS));
518 518
@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
530 BCD_TO_BIN(mon); 530 BCD_TO_BIN(mon);
531 BCD_TO_BIN(year); 531 BCD_TO_BIN(year);
532 532
533 if (extyear) { 533 if (century) {
534 BCD_TO_BIN(extyear); 534 BCD_TO_BIN(century);
535 year += extyear; 535 year += century * 100;
536 printk(KERN_INFO "Extended CMOS year: %d\n", extyear); 536 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
537 } else { 537 } else {
538 /* 538 /*
539 * x86-64 systems only exists since 2002. 539 * x86-64 systems only exists since 2002.
@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
955#ifdef CONFIG_ACPI 955#ifdef CONFIG_ACPI
956 /* But TSC doesn't tick in C3 so don't use it there */ 956 /* But TSC doesn't tick in C3 so don't use it there */
957 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000) 957 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
958 return 1; 958 return 1;
959#endif 959#endif
960 return 0; 960 return 0;
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1087e150a218..2efe215fc76a 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
101static __init int slit_valid(struct acpi_table_slit *slit) 101static __init int slit_valid(struct acpi_table_slit *slit)
102{ 102{
103 int i, j; 103 int i, j;
104 int d = slit->localities; 104 int d = slit->locality_count;
105 for (i = 0; i < d; i++) { 105 for (i = 0; i < d; i++) {
106 for (j = 0; j < d; j++) { 106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j]; 107 u8 val = slit->entry[d*i + j];
@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
127 127
128/* Callback for Proximity Domain -> LAPIC mapping */ 128/* Callback for Proximity Domain -> LAPIC mapping */
129void __init 129void __init
130acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 130acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
131{ 131{
132 int pxm, node; 132 int pxm, node;
133 if (srat_disabled()) 133 if (srat_disabled())
134 return; 134 return;
135 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { 135 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
136 bad_srat(); 136 bad_srat();
137 return; 137 return;
138 } 138 }
139 if (pa->flags.enabled == 0) 139 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
140 return; 140 return;
141 pxm = pa->proximity_domain; 141 pxm = pa->proximity_domain_lo;
142 node = setup_node(pxm); 142 node = setup_node(pxm);
143 if (node < 0) { 143 if (node < 0) {
144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); 144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
254 /* Looks good */ 254 /* Looks good */
255 255
256 if (nd->start == nd->end) { 256 if (nd->start == nd->end) {
257 nd->start = start; 257 nd->start = start;
258 nd->end = end; 258 nd->end = end;
259 changed = 1; 259 changed = 1;
260 } else { 260 } else {
261 if (nd->start == end) { 261 if (nd->start == end) {
262 nd->start = start; 262 nd->start = start;
263 changed = 1; 263 changed = 1;
264 } 264 }
265 if (nd->end == start) { 265 if (nd->end == start) {
266 nd->end = end; 266 nd->end = end;
267 changed = 1; 267 changed = 1;
268 } 268 }
269 if (!changed) 269 if (!changed)
270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); 270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
271 } 271 }
272 272
273 ret = update_end_of_memory(nd->end); 273 ret = update_end_of_memory(nd->end);
274 274
@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
279 279
280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
281void __init 281void __init
282acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 282acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
283{ 283{
284 struct bootnode *nd, oldnode; 284 struct bootnode *nd, oldnode;
285 unsigned long start, end; 285 unsigned long start, end;
@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
288 288
289 if (srat_disabled()) 289 if (srat_disabled())
290 return; 290 return;
291 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) { 291 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
292 bad_srat(); 292 bad_srat();
293 return; 293 return;
294 } 294 }
295 if (ma->flags.enabled == 0) 295 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
296 return; 296 return;
297 if (ma->flags.hot_pluggable && !save_add_info()) 297
298 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
298 return; 299 return;
299 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); 300 start = ma->base_address;
300 end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); 301 end = start + ma->length;
301 pxm = ma->proximity_domain; 302 pxm = ma->proximity_domain;
302 node = setup_node(pxm); 303 node = setup_node(pxm);
303 if (node < 0) { 304 if (node < 0) {
@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
337 push_node_boundaries(node, nd->start >> PAGE_SHIFT, 338 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
338 nd->end >> PAGE_SHIFT); 339 nd->end >> PAGE_SHIFT);
339 340
340 if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) { 341 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
342 (reserve_hotadd(node, start, end) < 0)) {
341 /* Ignore hotadd region. Undo damage */ 343 /* Ignore hotadd region. Undo damage */
342 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); 344 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
343 *nd = oldnode; 345 *nd = oldnode;
@@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
394 396
395 /* First clean up the node list */ 397 /* First clean up the node list */
396 for (i = 0; i < MAX_NUMNODES; i++) { 398 for (i = 0; i < MAX_NUMNODES; i++) {
397 cutoff_node(i, start, end); 399 cutoff_node(i, start, end);
398 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { 400 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
399 unparse_node(i); 401 unparse_node(i);
400 node_set_offline(i); 402 node_set_offline(i);
@@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
426 if (!node_online(i)) 428 if (!node_online(i))
427 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 429 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
428 430
429 for (i = 0; i < NR_CPUS; i++) { 431 for (i = 0; i < NR_CPUS; i++) {
430 if (cpu_to_node[i] == NUMA_NO_NODE) 432 if (cpu_to_node[i] == NUMA_NO_NODE)
431 continue; 433 continue;
432 if (!node_isset(cpu_to_node[i], nodes_parsed)) 434 if (!node_isset(cpu_to_node[i], nodes_parsed))
@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
461 463
462 if (!acpi_slit) 464 if (!acpi_slit)
463 return a == b ? 10 : 20; 465 return a == b ? 10 : 20;
464 index = acpi_slit->localities * node_to_pxm(a); 466 index = acpi_slit->locality_count * node_to_pxm(a);
465 return acpi_slit->entry[index + node_to_pxm(b)]; 467 return acpi_slit->entry[index + node_to_pxm(b)];
466} 468}
467 469
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f8b6b2800a62..faabb6e87f12 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG 2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
3 * 3 *
4 * This is an 64bit optimized version that always keeps the full mmconfig 4 * This is an 64bit optimized version that always keeps the full mmconfig
5 * space mapped. This allows lockless config space operation. 5 * space mapped. This allows lockless config space operation.
6 */ 6 */
@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
25 25
26/* Static virtual mapping of the MMCONFIG aperture */ 26/* Static virtual mapping of the MMCONFIG aperture */
27struct mmcfg_virt { 27struct mmcfg_virt {
28 struct acpi_table_mcfg_config *cfg; 28 struct acpi_mcfg_allocation *cfg;
29 char __iomem *virt; 29 char __iomem *virt;
30}; 30};
31static struct mmcfg_virt *pci_mmcfg_virt; 31static struct mmcfg_virt *pci_mmcfg_virt;
@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
33static char __iomem *get_virt(unsigned int seg, unsigned bus) 33static char __iomem *get_virt(unsigned int seg, unsigned bus)
34{ 34{
35 int cfg_num = -1; 35 int cfg_num = -1;
36 struct acpi_table_mcfg_config *cfg; 36 struct acpi_mcfg_allocation *cfg;
37 37
38 while (1) { 38 while (1) {
39 ++cfg_num; 39 ++cfg_num;
40 if (cfg_num >= pci_mmcfg_config_num) 40 if (cfg_num >= pci_mmcfg_config_num)
41 break; 41 break;
42 cfg = pci_mmcfg_virt[cfg_num].cfg; 42 cfg = pci_mmcfg_virt[cfg_num].cfg;
43 if (cfg->pci_segment_group_number != seg) 43 if (cfg->pci_segment != seg)
44 continue; 44 continue;
45 if ((cfg->start_bus_number <= bus) && 45 if ((cfg->start_bus_number <= bus) &&
46 (cfg->end_bus_number >= bus)) 46 (cfg->end_bus_number >= bus))
@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
52 this applies to all busses. */ 52 this applies to all busses. */
53 cfg = &pci_mmcfg_config[0]; 53 cfg = &pci_mmcfg_config[0];
54 if (pci_mmcfg_config_num == 1 && 54 if (pci_mmcfg_config_num == 1 &&
55 cfg->pci_segment_group_number == 0 && 55 cfg->pci_segment == 0 &&
56 (cfg->start_bus_number | cfg->end_bus_number) == 0) 56 (cfg->start_bus_number | cfg->end_bus_number) == 0)
57 return pci_mmcfg_virt[0].virt; 57 return pci_mmcfg_virt[0].virt;
58 58
@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
170 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 170 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
171 return; 171 return;
172 172
173 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 173 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
174 if ((pci_mmcfg_config_num == 0) || 174 if ((pci_mmcfg_config_num == 0) ||
175 (pci_mmcfg_config == NULL) || 175 (pci_mmcfg_config == NULL) ||
176 (pci_mmcfg_config[0].base_address == 0)) 176 (pci_mmcfg_config[0].address == 0))
177 return; 177 return;
178 178
179 /* Only do this check when type 1 works. If it doesn't work 179 /* Only do this check when type 1 works. If it doesn't work
180 assume we run on a Mac and always use MCFG */ 180 assume we run on a Mac and always use MCFG */
181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, 181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
182 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, 182 pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
183 E820_RESERVED)) { 183 E820_RESERVED)) {
184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", 184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
185 pci_mmcfg_config[0].base_address); 185 (unsigned long)pci_mmcfg_config[0].address);
186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
187 return; 187 return;
188 } 188 }
@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
194 } 194 }
195 for (i = 0; i < pci_mmcfg_config_num; ++i) { 195 for (i = 0; i < pci_mmcfg_config_num; ++i) {
196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; 196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, 197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
198 MMCONFIG_APER_MAX); 198 MMCONFIG_APER_MAX);
199 if (!pci_mmcfg_virt[i].virt) { 199 if (!pci_mmcfg_virt[i].virt) {
200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " 200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
201 "segment %d\n", 201 "segment %d\n",
202 pci_mmcfg_config[i].pci_segment_group_number); 202 pci_mmcfg_config[i].pci_segment);
203 return; 203 return;
204 } 204 }
205 printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address); 205 printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
206 (unsigned long)pci_mmcfg_config[i].address);
206 } 207 }
207 208
208 unreachable_devices(); 209 unreachable_devices();
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 92ba249f3a5b..918b4d845f93 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -74,14 +74,6 @@ config CRYPTO_SHA1
74 help 74 help
75 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 75 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
76 76
77config CRYPTO_SHA1_S390
78 tristate "SHA1 digest algorithm (s390)"
79 depends on S390
80 select CRYPTO_ALGAPI
81 help
82 This is the s390 hardware accelerated implementation of the
83 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
84
85config CRYPTO_SHA256 77config CRYPTO_SHA256
86 tristate "SHA256 digest algorithm" 78 tristate "SHA256 digest algorithm"
87 select CRYPTO_ALGAPI 79 select CRYPTO_ALGAPI
@@ -91,17 +83,6 @@ config CRYPTO_SHA256
91 This version of SHA implements a 256 bit hash with 128 bits of 83 This version of SHA implements a 256 bit hash with 128 bits of
92 security against collision attacks. 84 security against collision attacks.
93 85
94config CRYPTO_SHA256_S390
95 tristate "SHA256 digest algorithm (s390)"
96 depends on S390
97 select CRYPTO_ALGAPI
98 help
99 This is the s390 hardware accelerated implementation of the
100 SHA256 secure hash standard (DFIPS 180-2).
101
102 This version of SHA implements a 256 bit hash with 128 bits of
103 security against collision attacks.
104
105config CRYPTO_SHA512 86config CRYPTO_SHA512
106 tristate "SHA384 and SHA512 digest algorithms" 87 tristate "SHA384 and SHA512 digest algorithms"
107 select CRYPTO_ALGAPI 88 select CRYPTO_ALGAPI
@@ -187,14 +168,6 @@ config CRYPTO_DES
187 help 168 help
188 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 169 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
189 170
190config CRYPTO_DES_S390
191 tristate "DES and Triple DES cipher algorithms (s390)"
192 depends on S390
193 select CRYPTO_ALGAPI
194 select CRYPTO_BLKCIPHER
195 help
196 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
197
198config CRYPTO_BLOWFISH 171config CRYPTO_BLOWFISH
199 tristate "Blowfish cipher algorithm" 172 tristate "Blowfish cipher algorithm"
200 select CRYPTO_ALGAPI 173 select CRYPTO_ALGAPI
@@ -336,28 +309,6 @@ config CRYPTO_AES_X86_64
336 309
337 See <http://csrc.nist.gov/encryption/aes/> for more information. 310 See <http://csrc.nist.gov/encryption/aes/> for more information.
338 311
339config CRYPTO_AES_S390
340 tristate "AES cipher algorithms (s390)"
341 depends on S390
342 select CRYPTO_ALGAPI
343 select CRYPTO_BLKCIPHER
344 help
345 This is the s390 hardware accelerated implementation of the
346 AES cipher algorithms (FIPS-197). AES uses the Rijndael
347 algorithm.
348
349 Rijndael appears to be consistently a very good performer in
350 both hardware and software across a wide range of computing
351 environments regardless of its use in feedback or non-feedback
352 modes. Its key setup time is excellent, and its key agility is
353 good. Rijndael's very low memory requirements make it very well
354 suited for restricted-space environments, in which it also
355 demonstrates excellent performance. Rijndael's operations are
356 among the easiest to defend against power and timing attacks.
357
358 On s390 the System z9-109 currently only supports the key size
359 of 128 bit.
360
361config CRYPTO_CAST5 312config CRYPTO_CAST5
362 tristate "CAST5 (CAST-128) cipher algorithm" 313 tristate "CAST5 (CAST-128) cipher algorithm"
363 select CRYPTO_ALGAPI 314 select CRYPTO_ALGAPI
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index f4f000abc4e9..20eacc2c9e0e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "ACPI (Advanced Configuration and Power Interface) Support" 5menu "ACPI (Advanced Configuration and Power Interface) Support"
6 depends on !X86_NUMAQ
6 depends on !X86_VISWS 7 depends on !X86_VISWS
7 depends on !IA64_HP_SIM 8 depends on !IA64_HP_SIM
8 depends on IA64 || X86 9 depends on IA64 || X86
@@ -77,6 +78,20 @@ config ACPI_SLEEP_PROC_SLEEP
77 Create /proc/acpi/sleep 78 Create /proc/acpi/sleep
78 Deprecated by /sys/power/state 79 Deprecated by /sys/power/state
79 80
81config ACPI_PROCFS
82 bool "Procfs interface (deprecated)"
83 depends on ACPI
84 default y
85 ---help---
86 Procfs interface for ACPI is made optional for back-compatible.
87 As the same functions are duplicated in sysfs interface
88 and this proc interface will be removed some time later,
89 it's marked as deprecated.
90 ( /proc/acpi/debug_layer && debug_level are deprecated by
91 /sys/module/acpi/parameters/debug_layer && debug_level.
92 /proc/acpi/info is deprecated by
93 /sys/module/acpi/parameters/acpica_version )
94
80config ACPI_AC 95config ACPI_AC
81 tristate "AC Adapter" 96 tristate "AC Adapter"
82 depends on X86 97 depends on X86
@@ -107,7 +122,7 @@ config ACPI_BUTTON
107 122
108config ACPI_VIDEO 123config ACPI_VIDEO
109 tristate "Video" 124 tristate "Video"
110 depends on X86 125 depends on X86 && BACKLIGHT_CLASS_DEVICE
111 help 126 help
112 This driver implement the ACPI Extensions For Display Adapters 127 This driver implement the ACPI Extensions For Display Adapters
113 for integrated graphics devices on motherboard, as specified in 128 for integrated graphics devices on motherboard, as specified in
@@ -139,6 +154,13 @@ config ACPI_DOCK
139 help 154 help
140 This driver adds support for ACPI controlled docking stations 155 This driver adds support for ACPI controlled docking stations
141 156
157config ACPI_BAY
158 tristate "Removable Drive Bay (EXPERIMENTAL)"
159 depends on EXPERIMENTAL
160 help
161 This driver adds support for ACPI controlled removable drive
162 bays such as the IBM ultrabay or the Dell Module Bay.
163
142config ACPI_PROCESSOR 164config ACPI_PROCESSOR
143 tristate "Processor" 165 tristate "Processor"
144 default y 166 default y
@@ -186,19 +208,22 @@ config ACPI_ASUS
186 208
187 Note: display switching code is currently considered EXPERIMENTAL, 209 Note: display switching code is currently considered EXPERIMENTAL,
188 toying with these values may even lock your machine. 210 toying with these values may even lock your machine.
189 211
190 All settings are changed via /proc/acpi/asus directory entries. Owner 212 All settings are changed via /proc/acpi/asus directory entries. Owner
191 and group for these entries can be set with asus_uid and asus_gid 213 and group for these entries can be set with asus_uid and asus_gid
192 parameters. 214 parameters.
193 215
194 More information and a userspace daemon for handling the extra buttons 216 More information and a userspace daemon for handling the extra buttons
195 at <http://sourceforge.net/projects/acpi4asus/>. 217 at <http://sourceforge.net/projects/acpi4asus/>.
196 218
197 If you have an ACPI-compatible ASUS laptop, say Y or M here. This 219 If you have an ACPI-compatible ASUS laptop, say Y or M here. This
198 driver is still under development, so if your laptop is unsupported or 220 driver is still under development, so if your laptop is unsupported or
199 something works not quite as expected, please use the mailing list 221 something works not quite as expected, please use the mailing list
200 available on the above page (acpi4asus-user@lists.sourceforge.net) 222 available on the above page (acpi4asus-user@lists.sourceforge.net).
201 223
224 NOTE: This driver is deprecated and will probably be removed soon,
225 use asus-laptop instead.
226
202config ACPI_IBM 227config ACPI_IBM
203 tristate "IBM ThinkPad Laptop Extras" 228 tristate "IBM ThinkPad Laptop Extras"
204 depends on X86 229 depends on X86
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bce7ca27b429..856c32bccacb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,13 +37,15 @@ endif
37 37
38obj-y += sleep/ 38obj-y += sleep/
39obj-y += bus.o glue.o 39obj-y += bus.o glue.o
40obj-y += scan.o
40obj-$(CONFIG_ACPI_AC) += ac.o 41obj-$(CONFIG_ACPI_AC) += ac.o
41obj-$(CONFIG_ACPI_BATTERY) += battery.o 42obj-$(CONFIG_ACPI_BATTERY) += battery.o
42obj-$(CONFIG_ACPI_BUTTON) += button.o 43obj-$(CONFIG_ACPI_BUTTON) += button.o
43obj-$(CONFIG_ACPI_EC) += ec.o 44obj-$(CONFIG_ACPI_EC) += ec.o
44obj-$(CONFIG_ACPI_FAN) += fan.o 45obj-$(CONFIG_ACPI_FAN) += fan.o
45obj-$(CONFIG_ACPI_DOCK) += dock.o 46obj-$(CONFIG_ACPI_DOCK) += dock.o
46obj-$(CONFIG_ACPI_VIDEO) += video.o 47obj-$(CONFIG_ACPI_BAY) += bay.o
48obj-$(CONFIG_ACPI_VIDEO) += video.o
47obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o 49obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o
48obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
49obj-$(CONFIG_ACPI_POWER) += power.o 51obj-$(CONFIG_ACPI_POWER) += power.o
@@ -56,7 +58,6 @@ obj-$(CONFIG_ACPI_NUMA) += numa.o
56obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o 58obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
57obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o 59obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o
58obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o 60obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
59obj-y += scan.o motherboard.o
60obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o 61obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
61obj-y += cm_sbs.o 62obj-y += cm_sbs.o
62obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o 63obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 396140bbbe57..31ad70a6e22e 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -26,7 +26,7 @@
26 * Pontus Fuchs - Helper functions, cleanup 26 * Pontus Fuchs - Helper functions, cleanup
27 * Johann Wiesner - Small compile fixes 27 * Johann Wiesner - Small compile fixes
28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point. 28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
29 * Éric Burghard - LED display support for W1N 29 * �ic Burghard - LED display support for W1N
30 * 30 *
31 */ 31 */
32 32
@@ -1128,7 +1128,6 @@ static int asus_model_match(char *model)
1128static int asus_hotk_get_info(void) 1128static int asus_hotk_get_info(void)
1129{ 1129{
1130 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1130 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1131 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
1132 union acpi_object *model = NULL; 1131 union acpi_object *model = NULL;
1133 int bsts_result; 1132 int bsts_result;
1134 char *string = NULL; 1133 char *string = NULL;
@@ -1142,11 +1141,9 @@ static int asus_hotk_get_info(void)
1142 * HID), this bit will be moved. A global variable asus_info contains 1141 * HID), this bit will be moved. A global variable asus_info contains
1143 * the DSDT header. 1142 * the DSDT header.
1144 */ 1143 */
1145 status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt); 1144 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
1146 if (ACPI_FAILURE(status)) 1145 if (ACPI_FAILURE(status))
1147 printk(KERN_WARNING " Couldn't get the DSDT table header\n"); 1146 printk(KERN_WARNING " Couldn't get the DSDT table header\n");
1148 else
1149 asus_info = dsdt.pointer;
1150 1147
1151 /* We have to write 0 on init this far for all ASUS models */ 1148 /* We have to write 0 on init this far for all ASUS models */
1152 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { 1149 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
@@ -1358,8 +1355,6 @@ static void __exit asus_acpi_exit(void)
1358 acpi_bus_unregister_driver(&asus_hotk_driver); 1355 acpi_bus_unregister_driver(&asus_hotk_driver);
1359 remove_proc_entry(PROC_ASUS, acpi_root_dir); 1356 remove_proc_entry(PROC_ASUS, acpi_root_dir);
1360 1357
1361 kfree(asus_info);
1362
1363 return; 1358 return;
1364} 1359}
1365 1360
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5f43e0d14899..2f4521a48fe7 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -64,7 +64,7 @@ extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
64 64
65static int acpi_battery_add(struct acpi_device *device); 65static int acpi_battery_add(struct acpi_device *device);
66static int acpi_battery_remove(struct acpi_device *device, int type); 66static int acpi_battery_remove(struct acpi_device *device, int type);
67static int acpi_battery_resume(struct acpi_device *device, int status); 67static int acpi_battery_resume(struct acpi_device *device);
68 68
69static struct acpi_driver acpi_battery_driver = { 69static struct acpi_driver acpi_battery_driver = {
70 .name = ACPI_BATTERY_DRIVER_NAME, 70 .name = ACPI_BATTERY_DRIVER_NAME,
@@ -753,7 +753,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
753} 753}
754 754
755/* this is needed to learn about changes made in suspended state */ 755/* this is needed to learn about changes made in suspended state */
756static int acpi_battery_resume(struct acpi_device *device, int state) 756static int acpi_battery_resume(struct acpi_device *device)
757{ 757{
758 struct acpi_battery *battery; 758 struct acpi_battery *battery;
759 759
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
new file mode 100644
index 000000000000..667fa1dfa1a3
--- /dev/null
+++ b/drivers/acpi/bay.c
@@ -0,0 +1,490 @@
1/*
2 * bay.c - ACPI removable drive bay driver
3 *
4 * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/notifier.h>
29#include <acpi/acpi_bus.h>
30#include <acpi/acpi_drivers.h>
31#include <linux/seq_file.h>
32#include <asm/uaccess.h>
33#include <linux/platform_device.h>
34
35#define ACPI_BAY_DRIVER_NAME "ACPI Removable Drive Bay Driver"
36
37ACPI_MODULE_NAME("bay")
38MODULE_AUTHOR("Kristen Carlson Accardi");
39MODULE_DESCRIPTION(ACPI_BAY_DRIVER_NAME);
40MODULE_LICENSE("GPL");
41#define ACPI_BAY_CLASS "bay"
42#define ACPI_BAY_COMPONENT 0x10000000
43#define _COMPONENT ACPI_BAY_COMPONENT
44#define bay_dprintk(h,s) {\
45 char prefix[80] = {'\0'};\
46 struct acpi_buffer buffer = {sizeof(prefix), prefix};\
47 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\
48 printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
49static void bay_notify(acpi_handle handle, u32 event, void *data);
50static int acpi_bay_add(struct acpi_device *device);
51static int acpi_bay_remove(struct acpi_device *device, int type);
52
53static struct acpi_driver acpi_bay_driver = {
54 .name = ACPI_BAY_DRIVER_NAME,
55 .class = ACPI_BAY_CLASS,
56 .ids = ACPI_BAY_HID,
57 .ops = {
58 .add = acpi_bay_add,
59 .remove = acpi_bay_remove,
60 },
61};
62
63struct bay {
64 acpi_handle handle;
65 char *name;
66 struct list_head list;
67 struct platform_device *pdev;
68};
69
70static LIST_HEAD(drive_bays);
71
72
73/*****************************************************************************
74 * Drive Bay functions *
75 *****************************************************************************/
76/**
77 * is_ejectable - see if a device is ejectable
78 * @handle: acpi handle of the device
79 *
80 * If an acpi object has a _EJ0 method, then it is ejectable
81 */
82static int is_ejectable(acpi_handle handle)
83{
84 acpi_status status;
85 acpi_handle tmp;
86
87 status = acpi_get_handle(handle, "_EJ0", &tmp);
88 if (ACPI_FAILURE(status))
89 return 0;
90 return 1;
91}
92
93/**
94 * bay_present - see if the bay device is present
95 * @bay: the drive bay
96 *
97 * execute the _STA method.
98 */
99static int bay_present(struct bay *bay)
100{
101 unsigned long sta;
102 acpi_status status;
103
104 if (bay) {
105 status = acpi_evaluate_integer(bay->handle, "_STA", NULL, &sta);
106 if (ACPI_SUCCESS(status) && sta)
107 return 1;
108 }
109 return 0;
110}
111
112/**
113 * eject_device - respond to an eject request
114 * @handle - the device to eject
115 *
116 * Call this devices _EJ0 method.
117 */
118static void eject_device(acpi_handle handle)
119{
120 struct acpi_object_list arg_list;
121 union acpi_object arg;
122
123 bay_dprintk(handle, "Ejecting device");
124
125 arg_list.count = 1;
126 arg_list.pointer = &arg;
127 arg.type = ACPI_TYPE_INTEGER;
128 arg.integer.value = 1;
129
130 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
131 &arg_list, NULL)))
132 pr_debug("Failed to evaluate _EJ0!\n");
133}
134
135/*
136 * show_present - read method for "present" file in sysfs
137 */
138static ssize_t show_present(struct device *dev,
139 struct device_attribute *attr, char *buf)
140{
141 struct bay *bay = dev_get_drvdata(dev);
142 return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
143
144}
145DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
146
147/*
148 * write_eject - write method for "eject" file in sysfs
149 */
150static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
151 const char *buf, size_t count)
152{
153 struct bay *bay = dev_get_drvdata(dev);
154
155 if (!count)
156 return -EINVAL;
157
158 eject_device(bay->handle);
159 return count;
160}
161DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
162
163/**
164 * is_ata - see if a device is an ata device
165 * @handle: acpi handle of the device
166 *
167 * If an acpi object has one of 4 ATA ACPI methods defined,
168 * then it is an ATA device
169 */
170static int is_ata(acpi_handle handle)
171{
172 acpi_handle tmp;
173
174 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
175 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
176 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
177 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
178 return 1;
179
180 return 0;
181}
182
183/**
184 * parent_is_ata(acpi_handle handle)
185 *
186 */
187static int parent_is_ata(acpi_handle handle)
188{
189 acpi_handle phandle;
190
191 if (acpi_get_parent(handle, &phandle))
192 return 0;
193
194 return is_ata(phandle);
195}
196
197/**
198 * is_ejectable_bay - see if a device is an ejectable drive bay
199 * @handle: acpi handle of the device
200 *
201 * If an acpi object is ejectable and has one of the ACPI ATA
202 * methods defined, then we can safely call it an ejectable
203 * drive bay
204 */
205static int is_ejectable_bay(acpi_handle handle)
206{
207 if ((is_ata(handle) || parent_is_ata(handle)) && is_ejectable(handle))
208 return 1;
209 return 0;
210}
211
212/**
213 * eject_removable_drive - try to eject this drive
214 * @dev : the device structure of the drive
215 *
216 * If a device is a removable drive that requires an _EJ0 method
217 * to be executed in order to safely remove from the system, do
218 * it. ATM - always returns success
219 */
220int eject_removable_drive(struct device *dev)
221{
222 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
223
224 if (handle) {
225 bay_dprintk(handle, "Got device handle");
226 if (is_ejectable_bay(handle))
227 eject_device(handle);
228 } else {
229 printk("No acpi handle for device\n");
230 }
231
232 /* should I return an error code? */
233 return 0;
234}
235EXPORT_SYMBOL_GPL(eject_removable_drive);
236
237static int acpi_bay_add(struct acpi_device *device)
238{
239 bay_dprintk(device->handle, "adding bay device");
240 strcpy(acpi_device_name(device), "Dockable Bay");
241 strcpy(acpi_device_class(device), "bay");
242 return 0;
243}
244
245static int acpi_bay_add_fs(struct bay *bay)
246{
247 int ret;
248 struct device *dev = &bay->pdev->dev;
249
250 ret = device_create_file(dev, &dev_attr_present);
251 if (ret)
252 goto add_fs_err;
253 ret = device_create_file(dev, &dev_attr_eject);
254 if (ret) {
255 device_remove_file(dev, &dev_attr_present);
256 goto add_fs_err;
257 }
258 return 0;
259
260 add_fs_err:
261 bay_dprintk(bay->handle, "Error adding sysfs files\n");
262 return ret;
263}
264
265static void acpi_bay_remove_fs(struct bay *bay)
266{
267 struct device *dev = &bay->pdev->dev;
268
269 /* cleanup sysfs */
270 device_remove_file(dev, &dev_attr_present);
271 device_remove_file(dev, &dev_attr_eject);
272}
273
274static int bay_is_dock_device(acpi_handle handle)
275{
276 acpi_handle parent;
277
278 acpi_get_parent(handle, &parent);
279
280 /* if the device or it's parent is dependent on the
281 * dock, then we are a dock device
282 */
283 return (is_dock_device(handle) || is_dock_device(parent));
284}
285
286static int bay_add(acpi_handle handle, int id)
287{
288 acpi_status status;
289 struct bay *new_bay;
290 struct platform_device *pdev;
291 struct acpi_buffer nbuffer = {ACPI_ALLOCATE_BUFFER, NULL};
292 acpi_get_name(handle, ACPI_FULL_PATHNAME, &nbuffer);
293
294 bay_dprintk(handle, "Adding notify handler");
295
296 /*
297 * Initialize bay device structure
298 */
299 new_bay = kzalloc(GFP_ATOMIC, sizeof(*new_bay));
300 INIT_LIST_HEAD(&new_bay->list);
301 new_bay->handle = handle;
302 new_bay->name = (char *)nbuffer.pointer;
303
304 /* initialize platform device stuff */
305 pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0);
306 if (pdev == NULL) {
307 printk(KERN_ERR PREFIX "Error registering bay device\n");
308 goto bay_add_err;
309 }
310 new_bay->pdev = pdev;
311 platform_set_drvdata(pdev, new_bay);
312
313 if (acpi_bay_add_fs(new_bay)) {
314 platform_device_unregister(new_bay->pdev);
315 goto bay_add_err;
316 }
317
318 /* register for events on this device */
319 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
320 bay_notify, new_bay);
321 if (ACPI_FAILURE(status)) {
322 printk(KERN_ERR PREFIX "Error installing bay notify handler\n");
323 }
324
325 /* if we are on a dock station, we should register for dock
326 * notifications.
327 */
328 if (bay_is_dock_device(handle)) {
329 bay_dprintk(handle, "Is dependent on dock\n");
330 register_hotplug_dock_device(handle, bay_notify, new_bay);
331 }
332 list_add(&new_bay->list, &drive_bays);
333 printk(KERN_INFO PREFIX "Bay [%s] Added\n", new_bay->name);
334 return 0;
335
336bay_add_err:
337 kfree(new_bay->name);
338 kfree(new_bay);
339 return -ENODEV;
340}
341
342static int acpi_bay_remove(struct acpi_device *device, int type)
343{
344 /*** FIXME: do something here */
345 return 0;
346}
347
348/**
349 * bay_create_acpi_device - add new devices to acpi
350 * @handle - handle of the device to add
351 *
352 * This function will create a new acpi_device for the given
353 * handle if one does not exist already. This should cause
354 * acpi to scan for drivers for the given devices, and call
355 * matching driver's add routine.
356 *
357 * Returns a pointer to the acpi_device corresponding to the handle.
358 */
359static struct acpi_device * bay_create_acpi_device(acpi_handle handle)
360{
361 struct acpi_device *device = NULL;
362 struct acpi_device *parent_device;
363 acpi_handle parent;
364 int ret;
365
366 bay_dprintk(handle, "Trying to get device");
367 if (acpi_bus_get_device(handle, &device)) {
368 /*
369 * no device created for this object,
370 * so we should create one.
371 */
372 bay_dprintk(handle, "No device for handle");
373 acpi_get_parent(handle, &parent);
374 if (acpi_bus_get_device(parent, &parent_device))
375 parent_device = NULL;
376
377 ret = acpi_bus_add(&device, parent_device, handle,
378 ACPI_BUS_TYPE_DEVICE);
379 if (ret) {
380 pr_debug("error adding bus, %x\n",
381 -ret);
382 return NULL;
383 }
384 }
385 return device;
386}
387
388/**
389 * bay_notify - act upon an acpi bay notification
390 * @handle: the bay handle
391 * @event: the acpi event
392 * @data: our driver data struct
393 *
394 */
395static void bay_notify(acpi_handle handle, u32 event, void *data)
396{
397 struct acpi_device *dev;
398
399 bay_dprintk(handle, "Bay event");
400
401 switch(event) {
402 case ACPI_NOTIFY_BUS_CHECK:
403 printk("Bus Check\n");
404 case ACPI_NOTIFY_DEVICE_CHECK:
405 printk("Device Check\n");
406 dev = bay_create_acpi_device(handle);
407 if (dev)
408 acpi_bus_generate_event(dev, event, 0);
409 else
410 printk("No device for generating event\n");
411 /* wouldn't it be a good idea to just rescan SATA
412 * right here?
413 */
414 break;
415 case ACPI_NOTIFY_EJECT_REQUEST:
416 printk("Eject request\n");
417 dev = bay_create_acpi_device(handle);
418 if (dev)
419 acpi_bus_generate_event(dev, event, 0);
420 else
421 printk("No device for generating eventn");
422
423 /* wouldn't it be a good idea to just call the
424 * eject_device here if we were a SATA device?
425 */
426 break;
427 default:
428 printk("unknown event %d\n", event);
429 }
430}
431
432static acpi_status
433find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
434{
435 int *count = (int *)context;
436
437 /*
438 * there could be more than one ejectable bay.
439 * so, just return AE_OK always so that every object
440 * will be checked.
441 */
442 if (is_ejectable_bay(handle)) {
443 bay_dprintk(handle, "found ejectable bay");
444 if (!bay_add(handle, *count))
445 (*count)++;
446 }
447 return AE_OK;
448}
449
450static int __init bay_init(void)
451{
452 int bays = 0;
453
454 INIT_LIST_HEAD(&drive_bays);
455
456 /* look for dockable drive bays */
457 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
458 ACPI_UINT32_MAX, find_bay, &bays, NULL);
459
460 if (bays)
461 if ((acpi_bus_register_driver(&acpi_bay_driver) < 0))
462 printk(KERN_ERR "Unable to register bay driver\n");
463
464 if (!bays)
465 return -ENODEV;
466
467 return 0;
468}
469
470static void __exit bay_exit(void)
471{
472 struct bay *bay, *tmp;
473
474 list_for_each_entry_safe(bay, tmp, &drive_bays, list) {
475 if (is_dock_device(bay->handle))
476 unregister_hotplug_dock_device(bay->handle);
477 acpi_bay_remove_fs(bay);
478 acpi_remove_notify_handler(bay->handle, ACPI_SYSTEM_NOTIFY,
479 bay_notify);
480 platform_device_unregister(bay->pdev);
481 kfree(bay->name);
482 kfree(bay);
483 }
484
485 acpi_bus_unregister_driver(&acpi_bay_driver);
486}
487
488postcore_initcall(bay_init);
489module_exit(bay_exit);
490
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index f9c972b26f4f..f289fd41e77d 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -44,7 +44,7 @@ struct acpi_blacklist_item {
44 char oem_id[7]; 44 char oem_id[7];
45 char oem_table_id[9]; 45 char oem_table_id[9];
46 u32 oem_revision; 46 u32 oem_revision;
47 acpi_table_type table; 47 char *table;
48 enum acpi_blacklist_predicates oem_revision_predicate; 48 enum acpi_blacklist_predicates oem_revision_predicate;
49 char *reason; 49 char *reason;
50 u32 is_critical_error; 50 u32 is_critical_error;
@@ -56,18 +56,18 @@ struct acpi_blacklist_item {
56 */ 56 */
57static struct acpi_blacklist_item acpi_blacklist[] __initdata = { 57static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
58 /* Compaq Presario 1700 */ 58 /* Compaq Presario 1700 */
59 {"PTLTD ", " DSDT ", 0x06040000, ACPI_DSDT, less_than_or_equal, 59 {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
60 "Multiple problems", 1}, 60 "Multiple problems", 1},
61 /* Sony FX120, FX140, FX150? */ 61 /* Sony FX120, FX140, FX150? */
62 {"SONY ", "U0 ", 0x20010313, ACPI_DSDT, less_than_or_equal, 62 {"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
63 "ACPI driver problem", 1}, 63 "ACPI driver problem", 1},
64 /* Compaq Presario 800, Insyde BIOS */ 64 /* Compaq Presario 800, Insyde BIOS */
65 {"INT440", "SYSFexxx", 0x00001001, ACPI_DSDT, less_than_or_equal, 65 {"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
66 "Does not use _REG to protect EC OpRegions", 1}, 66 "Does not use _REG to protect EC OpRegions", 1},
67 /* IBM 600E - _ADR should return 7, but it returns 1 */ 67 /* IBM 600E - _ADR should return 7, but it returns 1 */
68 {"IBM ", "TP600E ", 0x00000105, ACPI_DSDT, less_than_or_equal, 68 {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
69 "Incorrect _ADR", 1}, 69 "Incorrect _ADR", 1},
70 {"ASUS\0\0", "P2B-S ", 0, ACPI_DSDT, all_versions, 70 {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
71 "Bogus PCI routing", 1}, 71 "Bogus PCI routing", 1},
72 72
73 {""} 73 {""}
@@ -79,7 +79,7 @@ static int __init blacklist_by_year(void)
79{ 79{
80 int year = dmi_get_year(DMI_BIOS_DATE); 80 int year = dmi_get_year(DMI_BIOS_DATE);
81 /* Doesn't exist? Likely an old system */ 81 /* Doesn't exist? Likely an old system */
82 if (year == -1) 82 if (year == -1)
83 return 1; 83 return 1;
84 /* 0? Likely a buggy new BIOS */ 84 /* 0? Likely a buggy new BIOS */
85 if (year == 0) 85 if (year == 0)
@@ -103,22 +103,21 @@ int __init acpi_blacklisted(void)
103{ 103{
104 int i = 0; 104 int i = 0;
105 int blacklisted = 0; 105 int blacklisted = 0;
106 struct acpi_table_header *table_header; 106 struct acpi_table_header table_header;
107 107
108 while (acpi_blacklist[i].oem_id[0] != '\0') { 108 while (acpi_blacklist[i].oem_id[0] != '\0') {
109 if (acpi_get_table_header_early 109 if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
110 (acpi_blacklist[i].table, &table_header)) {
111 i++; 110 i++;
112 continue; 111 continue;
113 } 112 }
114 113
115 if (strncmp(acpi_blacklist[i].oem_id, table_header->oem_id, 6)) { 114 if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
116 i++; 115 i++;
117 continue; 116 continue;
118 } 117 }
119 118
120 if (strncmp 119 if (strncmp
121 (acpi_blacklist[i].oem_table_id, table_header->oem_table_id, 120 (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
122 8)) { 121 8)) {
123 i++; 122 i++;
124 continue; 123 continue;
@@ -127,14 +126,14 @@ int __init acpi_blacklisted(void)
127 if ((acpi_blacklist[i].oem_revision_predicate == all_versions) 126 if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
128 || (acpi_blacklist[i].oem_revision_predicate == 127 || (acpi_blacklist[i].oem_revision_predicate ==
129 less_than_or_equal 128 less_than_or_equal
130 && table_header->oem_revision <= 129 && table_header.oem_revision <=
131 acpi_blacklist[i].oem_revision) 130 acpi_blacklist[i].oem_revision)
132 || (acpi_blacklist[i].oem_revision_predicate == 131 || (acpi_blacklist[i].oem_revision_predicate ==
133 greater_than_or_equal 132 greater_than_or_equal
134 && table_header->oem_revision >= 133 && table_header.oem_revision >=
135 acpi_blacklist[i].oem_revision) 134 acpi_blacklist[i].oem_revision)
136 || (acpi_blacklist[i].oem_revision_predicate == equal 135 || (acpi_blacklist[i].oem_revision_predicate == equal
137 && table_header->oem_revision == 136 && table_header.oem_revision ==
138 acpi_blacklist[i].oem_revision)) { 137 acpi_blacklist[i].oem_revision)) {
139 138
140 printk(KERN_ERR PREFIX 139 printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 766332e45592..c26468da4295 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -44,9 +44,6 @@ ACPI_MODULE_NAME("acpi_bus")
44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger); 44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
45#endif 45#endif
46 46
47struct fadt_descriptor acpi_fadt;
48EXPORT_SYMBOL(acpi_fadt);
49
50struct acpi_device *acpi_root; 47struct acpi_device *acpi_root;
51struct proc_dir_entry *acpi_root_dir; 48struct proc_dir_entry *acpi_root_dir;
52EXPORT_SYMBOL(acpi_root_dir); 49EXPORT_SYMBOL(acpi_root_dir);
@@ -195,7 +192,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
195 192
196 if (!device->flags.power_manageable) { 193 if (!device->flags.power_manageable) {
197 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", 194 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
198 device->kobj.name)); 195 device->dev.kobj.name));
199 return -ENODEV; 196 return -ENODEV;
200 } 197 }
201 /* 198 /*
@@ -582,11 +579,12 @@ static int __init acpi_bus_init_irq(void)
582 return 0; 579 return 0;
583} 580}
584 581
582acpi_native_uint acpi_gbl_permanent_mmap;
583
584
585void __init acpi_early_init(void) 585void __init acpi_early_init(void)
586{ 586{
587 acpi_status status = AE_OK; 587 acpi_status status = AE_OK;
588 struct acpi_buffer buffer = { sizeof(acpi_fadt), &acpi_fadt };
589
590 588
591 if (acpi_disabled) 589 if (acpi_disabled)
592 return; 590 return;
@@ -597,6 +595,15 @@ void __init acpi_early_init(void)
597 if (!acpi_strict) 595 if (!acpi_strict)
598 acpi_gbl_enable_interpreter_slack = TRUE; 596 acpi_gbl_enable_interpreter_slack = TRUE;
599 597
598 acpi_gbl_permanent_mmap = 1;
599
600 status = acpi_reallocate_root_table();
601 if (ACPI_FAILURE(status)) {
602 printk(KERN_ERR PREFIX
603 "Unable to reallocate ACPI tables\n");
604 goto error0;
605 }
606
600 status = acpi_initialize_subsystem(); 607 status = acpi_initialize_subsystem();
601 if (ACPI_FAILURE(status)) { 608 if (ACPI_FAILURE(status)) {
602 printk(KERN_ERR PREFIX 609 printk(KERN_ERR PREFIX
@@ -611,32 +618,25 @@ void __init acpi_early_init(void)
611 goto error0; 618 goto error0;
612 } 619 }
613 620
614 /*
615 * Get a separate copy of the FADT for use by other drivers.
616 */
617 status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
618 if (ACPI_FAILURE(status)) {
619 printk(KERN_ERR PREFIX "Unable to get the FADT\n");
620 goto error0;
621 }
622#ifdef CONFIG_X86 621#ifdef CONFIG_X86
623 if (!acpi_ioapic) { 622 if (!acpi_ioapic) {
624 extern acpi_interrupt_flags acpi_sci_flags; 623 extern u8 acpi_sci_flags;
625 624
626 /* compatible (0) means level (3) */ 625 /* compatible (0) means level (3) */
627 if (acpi_sci_flags.trigger == 0) 626 if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
628 acpi_sci_flags.trigger = 3; 627 acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
629 628 acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
629 }
630 /* Set PIC-mode SCI trigger type */ 630 /* Set PIC-mode SCI trigger type */
631 acpi_pic_sci_set_trigger(acpi_fadt.sci_int, 631 acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
632 acpi_sci_flags.trigger); 632 (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
633 } else { 633 } else {
634 extern int acpi_sci_override_gsi; 634 extern int acpi_sci_override_gsi;
635 /* 635 /*
636 * now that acpi_fadt is initialized, 636 * now that acpi_gbl_FADT is initialized,
637 * update it with result from INT_SRC_OVR parsing 637 * update it with result from INT_SRC_OVR parsing
638 */ 638 */
639 acpi_fadt.sci_int = acpi_sci_override_gsi; 639 acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
640 } 640 }
641#endif 641#endif
642 642
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index ac860583c203..c726612fafb6 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -75,7 +75,7 @@ static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
75static struct acpi_driver acpi_button_driver = { 75static struct acpi_driver acpi_button_driver = {
76 .name = ACPI_BUTTON_DRIVER_NAME, 76 .name = ACPI_BUTTON_DRIVER_NAME,
77 .class = ACPI_BUTTON_CLASS, 77 .class = ACPI_BUTTON_CLASS,
78 .ids = "ACPI_FPB,ACPI_FSB,PNP0C0D,PNP0C0C,PNP0C0E", 78 .ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E",
79 .ops = { 79 .ops = {
80 .add = acpi_button_add, 80 .add = acpi_button_add,
81 .remove = acpi_button_remove, 81 .remove = acpi_button_remove,
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0a1863ec91f3..69a68fd394cf 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -167,7 +167,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
167 if (ACPI_FAILURE(status) || !device) { 167 if (ACPI_FAILURE(status) || !device) {
168 result = container_device_add(&device, handle); 168 result = container_device_add(&device, handle);
169 if (!result) 169 if (!result)
170 kobject_uevent(&device->kobj, 170 kobject_uevent(&device->dev.kobj,
171 KOBJ_ONLINE); 171 KOBJ_ONLINE);
172 else 172 else
173 printk("Failed to add container\n"); 173 printk("Failed to add container\n");
@@ -175,13 +175,13 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
175 } else { 175 } else {
176 if (ACPI_SUCCESS(status)) { 176 if (ACPI_SUCCESS(status)) {
177 /* device exist and this is a remove request */ 177 /* device exist and this is a remove request */
178 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 178 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
179 } 179 }
180 } 180 }
181 break; 181 break;
182 case ACPI_NOTIFY_EJECT_REQUEST: 182 case ACPI_NOTIFY_EJECT_REQUEST:
183 if (!acpi_bus_get_device(handle, &device) && device) { 183 if (!acpi_bus_get_device(handle, &device) && device) {
184 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 184 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
185 } 185 }
186 break; 186 break;
187 default: 187 default:
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 35c6af8a83cd..d48f65a8f658 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -13,14 +13,11 @@
13 13
14#define _COMPONENT ACPI_SYSTEM_COMPONENT 14#define _COMPONENT ACPI_SYSTEM_COMPONENT
15ACPI_MODULE_NAME("debug") 15ACPI_MODULE_NAME("debug")
16#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer" 16
17#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
18#ifdef MODULE_PARAM_PREFIX 17#ifdef MODULE_PARAM_PREFIX
19#undef MODULE_PARAM_PREFIX 18#undef MODULE_PARAM_PREFIX
20#endif 19#endif
21#define MODULE_PARAM_PREFIX 20#define MODULE_PARAM_PREFIX "acpi."
22 module_param(acpi_dbg_layer, uint, 0400);
23module_param(acpi_dbg_level, uint, 0400);
24 21
25struct acpi_dlayer { 22struct acpi_dlayer {
26 const char *name; 23 const char *name;
@@ -86,6 +83,60 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
86 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 83 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
87}; 84};
88 85
86/* --------------------------------------------------------------------------
87 FS Interface (/sys)
88 -------------------------------------------------------------------------- */
89static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
90 int result = 0;
91 int i;
92
93 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
94
95 for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) {
96 result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
97 acpi_debug_layers[i].name,
98 acpi_debug_layers[i].value,
99 (acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' ');
100 }
101 result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
102 ACPI_ALL_DRIVERS,
103 (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
104 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
105 ACPI_ALL_DRIVERS) == 0 ? ' ' : '-');
106 result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer);
107
108 return result;
109}
110
111static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
112 int result = 0;
113 int i;
114
115 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
116
117 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
118 result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
119 acpi_debug_levels[i].name,
120 acpi_debug_levels[i].value,
121 (acpi_dbg_level & acpi_debug_levels[i].
122 value) ? '*' : ' ');
123 }
124 result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n",
125 acpi_dbg_level);
126
127 return result;
128}
129
130module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
131module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
132
133/* --------------------------------------------------------------------------
134 FS Interface (/proc)
135 -------------------------------------------------------------------------- */
136#ifdef CONFIG_ACPI_PROCFS
137#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
138#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
139
89static int 140static int
90acpi_system_read_debug(char *page, 141acpi_system_read_debug(char *page,
91 char **start, off_t off, int count, int *eof, void *data) 142 char **start, off_t off, int count, int *eof, void *data)
@@ -221,3 +272,4 @@ static int __init acpi_debug_init(void)
221} 272}
222 273
223subsys_initcall(acpi_debug_init); 274subsys_initcall(acpi_debug_init);
275#endif
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
index a6d77efb41a0..f049639bac35 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -133,7 +133,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
133 } 133 }
134 } 134 }
135 135
136 /* We could put the returned object (Node) on the object stack for later, 136 /*
137 * We could put the returned object (Node) on the object stack for later,
137 * but for now, we will put it in the "op" object that the parser uses, 138 * but for now, we will put it in the "op" object that the parser uses,
138 * so we can get it again at the end of this scope 139 * so we can get it again at the end of this scope
139 */ 140 */
@@ -514,8 +515,33 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
514 515
515 /* Third arg is the bank_value */ 516 /* Third arg is the bank_value */
516 517
518 /* TBD: This arg is a term_arg, not a constant, and must be evaluated */
519
517 arg = arg->common.next; 520 arg = arg->common.next;
518 info.bank_value = (u32) arg->common.value.integer; 521
522 /* Currently, only the following constants are supported */
523
524 switch (arg->common.aml_opcode) {
525 case AML_ZERO_OP:
526 info.bank_value = 0;
527 break;
528
529 case AML_ONE_OP:
530 info.bank_value = 1;
531 break;
532
533 case AML_BYTE_OP:
534 case AML_WORD_OP:
535 case AML_DWORD_OP:
536 case AML_QWORD_OP:
537 info.bank_value = (u32) arg->common.value.integer;
538 break;
539
540 default:
541 info.bank_value = 0;
542 ACPI_ERROR((AE_INFO,
543 "Non-constant BankValue for BankField is not implemented"));
544 }
519 545
520 /* Fourth arg is the field flags */ 546 /* Fourth arg is the field flags */
521 547
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
index 1888c055d10f..af923c388520 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include <acpi/acdispat.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_DISPATCHER 49#define _COMPONENT ACPI_DISPATCHER
49ACPI_MODULE_NAME("dsinit") 50ACPI_MODULE_NAME("dsinit")
@@ -90,7 +91,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
90 * We are only interested in NS nodes owned by the table that 91 * We are only interested in NS nodes owned by the table that
91 * was just loaded 92 * was just loaded
92 */ 93 */
93 if (node->owner_id != info->table_desc->owner_id) { 94 if (node->owner_id != info->owner_id) {
94 return (AE_OK); 95 return (AE_OK);
95 } 96 }
96 97
@@ -150,14 +151,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
150 ******************************************************************************/ 151 ******************************************************************************/
151 152
152acpi_status 153acpi_status
153acpi_ds_initialize_objects(struct acpi_table_desc * table_desc, 154acpi_ds_initialize_objects(acpi_native_uint table_index,
154 struct acpi_namespace_node * start_node) 155 struct acpi_namespace_node * start_node)
155{ 156{
156 acpi_status status; 157 acpi_status status;
157 struct acpi_init_walk_info info; 158 struct acpi_init_walk_info info;
159 struct acpi_table_header *table;
160 acpi_owner_id owner_id;
158 161
159 ACPI_FUNCTION_TRACE(ds_initialize_objects); 162 ACPI_FUNCTION_TRACE(ds_initialize_objects);
160 163
164 status = acpi_tb_get_owner_id(table_index, &owner_id);
165 if (ACPI_FAILURE(status)) {
166 return_ACPI_STATUS(status);
167 }
168
161 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 169 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
162 "**** Starting initialization of namespace objects ****\n")); 170 "**** Starting initialization of namespace objects ****\n"));
163 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:")); 171 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
@@ -166,7 +174,8 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
166 info.op_region_count = 0; 174 info.op_region_count = 0;
167 info.object_count = 0; 175 info.object_count = 0;
168 info.device_count = 0; 176 info.device_count = 0;
169 info.table_desc = table_desc; 177 info.table_index = table_index;
178 info.owner_id = owner_id;
170 179
171 /* Walk entire namespace from the supplied root */ 180 /* Walk entire namespace from the supplied root */
172 181
@@ -176,10 +185,14 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
176 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); 185 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
177 } 186 }
178 187
188 status = acpi_get_table_by_index(table_index, &table);
189 if (ACPI_FAILURE(status)) {
190 return_ACPI_STATUS(status);
191 }
192
179 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 193 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
180 "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n", 194 "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
181 table_desc->pointer->signature, 195 table->signature, owner_id, info.object_count,
182 table_desc->owner_id, info.object_count,
183 info.device_count, info.method_count, 196 info.device_count, info.method_count,
184 info.op_region_count)); 197 info.op_region_count));
185 198
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index cf888add3191..1cbe61905824 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -327,7 +327,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
327 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); 327 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
328 328
329 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 329 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
330 "Execute method %p, currentstate=%p\n", 330 "Calling method %p, currentstate=%p\n",
331 this_walk_state->prev_op, this_walk_state)); 331 this_walk_state->prev_op, this_walk_state));
332 332
333 /* 333 /*
@@ -351,49 +351,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
351 return_ACPI_STATUS(status); 351 return_ACPI_STATUS(status);
352 } 352 }
353 353
354 /* 354 /* Begin method parse/execution. Create a new walk state */
355 * 1) Parse the method. All "normal" methods are parsed for each execution.
356 * Internal methods (_OSI, etc.) do not require parsing.
357 */
358 if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
359
360 /* Create a new walk state for the parse */
361
362 next_walk_state =
363 acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
364 obj_desc, NULL);
365 if (!next_walk_state) {
366 status = AE_NO_MEMORY;
367 goto cleanup;
368 }
369
370 /* Create and init a parse tree root */
371
372 op = acpi_ps_create_scope_op();
373 if (!op) {
374 status = AE_NO_MEMORY;
375 goto cleanup;
376 }
377
378 status = acpi_ds_init_aml_walk(next_walk_state, op, method_node,
379 obj_desc->method.aml_start,
380 obj_desc->method.aml_length,
381 NULL, 1);
382 if (ACPI_FAILURE(status)) {
383 acpi_ps_delete_parse_tree(op);
384 goto cleanup;
385 }
386
387 /* Begin AML parse (deletes next_walk_state) */
388
389 status = acpi_ps_parse_aml(next_walk_state);
390 acpi_ps_delete_parse_tree(op);
391 if (ACPI_FAILURE(status)) {
392 goto cleanup;
393 }
394 }
395
396 /* 2) Begin method execution. Create a new walk state */
397 355
398 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, 356 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
399 NULL, obj_desc, thread); 357 NULL, obj_desc, thread);
@@ -424,7 +382,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
424 382
425 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, 383 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
426 obj_desc->method.aml_start, 384 obj_desc->method.aml_start,
427 obj_desc->method.aml_length, info, 3); 385 obj_desc->method.aml_length, info,
386 ACPI_IMODE_EXECUTE);
428 387
429 ACPI_FREE(info); 388 ACPI_FREE(info);
430 if (ACPI_FAILURE(status)) { 389 if (ACPI_FAILURE(status)) {
@@ -445,8 +404,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
445 this_walk_state->num_operands = 0; 404 this_walk_state->num_operands = 0;
446 405
447 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 406 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
448 "Starting nested execution, newstate=%p\n", 407 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
449 next_walk_state)); 408 method_node->name.ascii, next_walk_state));
450 409
451 /* Invoke an internal method if necessary */ 410 /* Invoke an internal method if necessary */
452 411
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index 459160ff9058..ba4626e06a5e 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 72190abb1d59..a474ca2334d5 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -260,7 +260,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
260 } 260 }
261 261
262 obj_desc->buffer.flags |= AOPOBJ_DATA_VALID; 262 obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
263 op->common.node = (struct acpi_namespace_node *)obj_desc; 263 op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
264 return_ACPI_STATUS(AE_OK); 264 return_ACPI_STATUS(AE_OK);
265} 265}
266 266
@@ -270,7 +270,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
270 * 270 *
271 * PARAMETERS: walk_state - Current walk state 271 * PARAMETERS: walk_state - Current walk state
272 * Op - Parser object to be translated 272 * Op - Parser object to be translated
273 * package_length - Number of elements in the package 273 * element_count - Number of elements in the package - this is
274 * the num_elements argument to Package()
274 * obj_desc_ptr - Where the ACPI internal object is returned 275 * obj_desc_ptr - Where the ACPI internal object is returned
275 * 276 *
276 * RETURN: Status 277 * RETURN: Status
@@ -278,18 +279,29 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
278 * DESCRIPTION: Translate a parser Op package object to the equivalent 279 * DESCRIPTION: Translate a parser Op package object to the equivalent
279 * namespace object 280 * namespace object
280 * 281 *
282 * NOTE: The number of elements in the package will be always be the num_elements
283 * count, regardless of the number of elements in the package list. If
284 * num_elements is smaller, only that many package list elements are used.
285 * if num_elements is larger, the Package object is padded out with
286 * objects of type Uninitialized (as per ACPI spec.)
287 *
288 * Even though the ASL compilers do not allow num_elements to be smaller
289 * than the Package list length (for the fixed length package opcode), some
290 * BIOS code modifies the AML on the fly to adjust the num_elements, and
291 * this code compensates for that. This also provides compatibility with
292 * other AML interpreters.
293 *
281 ******************************************************************************/ 294 ******************************************************************************/
282 295
283acpi_status 296acpi_status
284acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, 297acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
285 union acpi_parse_object *op, 298 union acpi_parse_object *op,
286 u32 package_length, 299 u32 element_count,
287 union acpi_operand_object **obj_desc_ptr) 300 union acpi_operand_object **obj_desc_ptr)
288{ 301{
289 union acpi_parse_object *arg; 302 union acpi_parse_object *arg;
290 union acpi_parse_object *parent; 303 union acpi_parse_object *parent;
291 union acpi_operand_object *obj_desc = NULL; 304 union acpi_operand_object *obj_desc = NULL;
292 u32 package_list_length;
293 acpi_status status = AE_OK; 305 acpi_status status = AE_OK;
294 acpi_native_uint i; 306 acpi_native_uint i;
295 307
@@ -318,32 +330,13 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
318 obj_desc->package.node = parent->common.node; 330 obj_desc->package.node = parent->common.node;
319 } 331 }
320 332
321 obj_desc->package.count = package_length;
322
323 /* Count the number of items in the package list */
324
325 arg = op->common.value.arg;
326 arg = arg->common.next;
327 for (package_list_length = 0; arg; package_list_length++) {
328 arg = arg->common.next;
329 }
330
331 /*
332 * The package length (number of elements) will be the greater
333 * of the specified length and the length of the initializer list
334 */
335 if (package_list_length > package_length) {
336 obj_desc->package.count = package_list_length;
337 }
338
339 /* 333 /*
340 * Allocate the pointer array (array of pointers to the 334 * Allocate the element array (array of pointers to the individual
341 * individual objects). Add an extra pointer slot so 335 * objects) based on the num_elements parameter. Add an extra pointer slot
342 * that the list is always null terminated. 336 * so that the list is always null terminated.
343 */ 337 */
344 obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) 338 obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
345 obj_desc->package. 339 element_count +
346 count +
347 1) * sizeof(void *)); 340 1) * sizeof(void *));
348 341
349 if (!obj_desc->package.elements) { 342 if (!obj_desc->package.elements) {
@@ -351,15 +344,20 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
351 return_ACPI_STATUS(AE_NO_MEMORY); 344 return_ACPI_STATUS(AE_NO_MEMORY);
352 } 345 }
353 346
347 obj_desc->package.count = element_count;
348
354 /* 349 /*
355 * Initialize all elements of the package 350 * Initialize the elements of the package, up to the num_elements count.
351 * Package is automatically padded with uninitialized (NULL) elements
352 * if num_elements is greater than the package list length. Likewise,
353 * Package is truncated if num_elements is less than the list length.
356 */ 354 */
357 arg = op->common.value.arg; 355 arg = op->common.value.arg;
358 arg = arg->common.next; 356 arg = arg->common.next;
359 for (i = 0; arg; i++) { 357 for (i = 0; arg && (i < element_count); i++) {
360 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { 358 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
361 359
362 /* Object (package or buffer) is already built */ 360 /* This package element is already built, just get it */
363 361
364 obj_desc->package.elements[i] = 362 obj_desc->package.elements[i] =
365 ACPI_CAST_PTR(union acpi_operand_object, 363 ACPI_CAST_PTR(union acpi_operand_object,
@@ -373,8 +371,14 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
373 arg = arg->common.next; 371 arg = arg->common.next;
374 } 372 }
375 373
374 if (!arg) {
375 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
376 "Package List length larger than NumElements count (%X), truncated\n",
377 element_count));
378 }
379
376 obj_desc->package.flags |= AOPOBJ_DATA_VALID; 380 obj_desc->package.flags |= AOPOBJ_DATA_VALID;
377 op->common.node = (struct acpi_namespace_node *)obj_desc; 381 op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
378 return_ACPI_STATUS(status); 382 return_ACPI_STATUS(status);
379} 383}
380 384
@@ -488,8 +492,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
488 /* 492 /*
489 * Defer evaluation of Buffer term_arg operand 493 * Defer evaluation of Buffer term_arg operand
490 */ 494 */
491 obj_desc->buffer.node = (struct acpi_namespace_node *) 495 obj_desc->buffer.node =
492 walk_state->operands[0]; 496 ACPI_CAST_PTR(struct acpi_namespace_node,
497 walk_state->operands[0]);
493 obj_desc->buffer.aml_start = op->named.data; 498 obj_desc->buffer.aml_start = op->named.data;
494 obj_desc->buffer.aml_length = op->named.length; 499 obj_desc->buffer.aml_length = op->named.length;
495 break; 500 break;
@@ -499,8 +504,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
499 /* 504 /*
500 * Defer evaluation of Package term_arg operand 505 * Defer evaluation of Package term_arg operand
501 */ 506 */
502 obj_desc->package.node = (struct acpi_namespace_node *) 507 obj_desc->package.node =
503 walk_state->operands[0]; 508 ACPI_CAST_PTR(struct acpi_namespace_node,
509 walk_state->operands[0]);
504 obj_desc->package.aml_start = op->named.data; 510 obj_desc->package.aml_start = op->named.data;
505 obj_desc->package.aml_length = op->named.length; 511 obj_desc->package.aml_length = op->named.length;
506 break; 512 break;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 5b974a8fe614..6c6104a7a247 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -114,7 +114,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
114 } 114 }
115 115
116 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, 116 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
117 aml_length, NULL, 1); 117 aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
118 if (ACPI_FAILURE(status)) { 118 if (ACPI_FAILURE(status)) {
119 acpi_ds_delete_walk_state(walk_state); 119 acpi_ds_delete_walk_state(walk_state);
120 goto cleanup; 120 goto cleanup;
@@ -157,7 +157,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
157 /* Execute the opcode and arguments */ 157 /* Execute the opcode and arguments */
158 158
159 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, 159 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
160 aml_length, NULL, 3); 160 aml_length, NULL, ACPI_IMODE_EXECUTE);
161 if (ACPI_FAILURE(status)) { 161 if (ACPI_FAILURE(status)) {
162 acpi_ds_delete_walk_state(walk_state); 162 acpi_ds_delete_walk_state(walk_state);
163 goto cleanup; 163 goto cleanup;
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index 05230baf5de8..e4073e05a75c 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index d7a616c3104e..69693fa07224 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -219,7 +219,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
219 if (!op) { 219 if (!op) {
220 status = acpi_ds_load2_begin_op(walk_state, out_op); 220 status = acpi_ds_load2_begin_op(walk_state, out_op);
221 if (ACPI_FAILURE(status)) { 221 if (ACPI_FAILURE(status)) {
222 return_ACPI_STATUS(status); 222 goto error_exit;
223 } 223 }
224 224
225 op = *out_op; 225 op = *out_op;
@@ -238,7 +238,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
238 238
239 status = acpi_ds_scope_stack_pop(walk_state); 239 status = acpi_ds_scope_stack_pop(walk_state);
240 if (ACPI_FAILURE(status)) { 240 if (ACPI_FAILURE(status)) {
241 return_ACPI_STATUS(status); 241 goto error_exit;
242 } 242 }
243 } 243 }
244 } 244 }
@@ -287,7 +287,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
287 287
288 status = acpi_ds_result_stack_push(walk_state); 288 status = acpi_ds_result_stack_push(walk_state);
289 if (ACPI_FAILURE(status)) { 289 if (ACPI_FAILURE(status)) {
290 return_ACPI_STATUS(status); 290 goto error_exit;
291 } 291 }
292 292
293 status = acpi_ds_exec_begin_control_op(walk_state, op); 293 status = acpi_ds_exec_begin_control_op(walk_state, op);
@@ -328,6 +328,10 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
328 /* Nothing to do here during method execution */ 328 /* Nothing to do here during method execution */
329 329
330 return_ACPI_STATUS(status); 330 return_ACPI_STATUS(status);
331
332 error_exit:
333 status = acpi_ds_method_error(status, walk_state);
334 return_ACPI_STATUS(status);
331} 335}
332 336
333/***************************************************************************** 337/*****************************************************************************
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index e3ca7f6539c1..8ab9d1b29a4c 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -196,6 +196,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
196 * one of the opcodes that actually opens a scope 196 * one of the opcodes that actually opens a scope
197 */ 197 */
198 switch (node->type) { 198 switch (node->type) {
199 case ACPI_TYPE_ANY:
199 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ 200 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
200 case ACPI_TYPE_DEVICE: 201 case ACPI_TYPE_DEVICE:
201 case ACPI_TYPE_POWER: 202 case ACPI_TYPE_POWER:
@@ -546,6 +547,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
546 acpi_status status; 547 acpi_status status;
547 acpi_object_type object_type; 548 acpi_object_type object_type;
548 char *buffer_ptr; 549 char *buffer_ptr;
550 u32 flags;
549 551
550 ACPI_FUNCTION_TRACE(ds_load2_begin_op); 552 ACPI_FUNCTION_TRACE(ds_load2_begin_op);
551 553
@@ -669,6 +671,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
669 * one of the opcodes that actually opens a scope 671 * one of the opcodes that actually opens a scope
670 */ 672 */
671 switch (node->type) { 673 switch (node->type) {
674 case ACPI_TYPE_ANY:
672 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ 675 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
673 case ACPI_TYPE_DEVICE: 676 case ACPI_TYPE_DEVICE:
674 case ACPI_TYPE_POWER: 677 case ACPI_TYPE_POWER:
@@ -750,12 +753,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
750 break; 753 break;
751 } 754 }
752 755
753 /* Add new entry into namespace */ 756 flags = ACPI_NS_NO_UPSEARCH;
757 if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
758
759 /* Execution mode, node cannot already exist, node is temporary */
760
761 flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
762 }
763
764 /* Add new entry or lookup existing entry */
754 765
755 status = 766 status =
756 acpi_ns_lookup(walk_state->scope_info, buffer_ptr, 767 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
757 object_type, ACPI_IMODE_LOAD_PASS2, 768 object_type, ACPI_IMODE_LOAD_PASS2, flags,
758 ACPI_NS_NO_UPSEARCH, walk_state, &(node)); 769 walk_state, &node);
759 break; 770 break;
760 } 771 }
761 772
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
index c9228972f5f6..3927c495e4bf 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 7817e5522679..16c8e38b51ef 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 90990a4b6526..688e83a16906 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -615,20 +615,28 @@ static acpi_status
615find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv) 615find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
616{ 616{
617 acpi_status status; 617 acpi_status status;
618 acpi_handle tmp; 618 acpi_handle tmp, parent;
619 struct dock_station *ds = context; 619 struct dock_station *ds = context;
620 struct dock_dependent_device *dd; 620 struct dock_dependent_device *dd;
621 621
622 status = acpi_bus_get_ejd(handle, &tmp); 622 status = acpi_bus_get_ejd(handle, &tmp);
623 if (ACPI_FAILURE(status)) 623 if (ACPI_FAILURE(status)) {
624 return AE_OK; 624 /* try the parent device as well */
625 status = acpi_get_parent(handle, &parent);
626 if (ACPI_FAILURE(status))
627 goto fdd_out;
628 /* see if parent is dependent on dock */
629 status = acpi_bus_get_ejd(parent, &tmp);
630 if (ACPI_FAILURE(status))
631 goto fdd_out;
632 }
625 633
626 if (tmp == ds->handle) { 634 if (tmp == ds->handle) {
627 dd = alloc_dock_dependent_device(handle); 635 dd = alloc_dock_dependent_device(handle);
628 if (dd) 636 if (dd)
629 add_dock_dependent_device(ds, dd); 637 add_dock_dependent_device(ds, dd);
630 } 638 }
631 639fdd_out:
632 return AE_OK; 640 return AE_OK;
633} 641}
634 642
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index cbdf031f3c09..743ce27fa0bb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -872,9 +872,8 @@ static int __init acpi_ec_get_real_ecdt(void)
872 acpi_status status; 872 acpi_status status;
873 struct acpi_table_ecdt *ecdt_ptr; 873 struct acpi_table_ecdt *ecdt_ptr;
874 874
875 status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, 875 status = acpi_get_table(ACPI_SIG_ECDT, 1,
876 (struct acpi_table_header **) 876 (struct acpi_table_header **)&ecdt_ptr);
877 &ecdt_ptr);
878 if (ACPI_FAILURE(status)) 877 if (ACPI_FAILURE(status))
879 return -ENODEV; 878 return -ENODEV;
880 879
@@ -891,14 +890,14 @@ static int __init acpi_ec_get_real_ecdt(void)
891 if (acpi_ec_mode == EC_INTR) { 890 if (acpi_ec_mode == EC_INTR) {
892 init_waitqueue_head(&ec_ecdt->wait); 891 init_waitqueue_head(&ec_ecdt->wait);
893 } 892 }
894 ec_ecdt->command_addr = ecdt_ptr->ec_control.address; 893 ec_ecdt->command_addr = ecdt_ptr->control.address;
895 ec_ecdt->data_addr = ecdt_ptr->ec_data.address; 894 ec_ecdt->data_addr = ecdt_ptr->data.address;
896 ec_ecdt->gpe = ecdt_ptr->gpe_bit; 895 ec_ecdt->gpe = ecdt_ptr->gpe;
897 /* use the GL just to be safe */ 896 /* use the GL just to be safe */
898 ec_ecdt->global_lock = TRUE; 897 ec_ecdt->global_lock = TRUE;
899 ec_ecdt->uid = ecdt_ptr->uid; 898 ec_ecdt->uid = ecdt_ptr->uid;
900 899
901 status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); 900 status = acpi_get_handle(NULL, ecdt_ptr->id, &ec_ecdt->handle);
902 if (ACPI_FAILURE(status)) { 901 if (ACPI_FAILURE(status)) {
903 goto error; 902 goto error;
904 } 903 }
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index 919037d6acff..a1f87b5def2a 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void)
70 70
71 ACPI_FUNCTION_TRACE(ev_initialize_events); 71 ACPI_FUNCTION_TRACE(ev_initialize_events);
72 72
73 /* Make sure we have ACPI tables */
74
75 if (!acpi_gbl_DSDT) {
76 ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
77 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
78 }
79
80 /* 73 /*
81 * Initialize the Fixed and General Purpose Events. This is done prior to 74 * Initialize the Fixed and General Purpose Events. This is done prior to
82 * enabling SCIs to prevent interrupts from occurring before the handlers are 75 * enabling SCIs to prevent interrupts from occurring before the handlers are
@@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
211 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { 204 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
212 status = 205 status =
213 acpi_set_register(acpi_gbl_fixed_event_info[i]. 206 acpi_set_register(acpi_gbl_fixed_event_info[i].
214 enable_register_id, 0, 207 enable_register_id, 0);
215 ACPI_MTX_LOCK);
216 if (ACPI_FAILURE(status)) { 208 if (ACPI_FAILURE(status)) {
217 return (status); 209 return (status);
218 } 210 }
@@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
298 /* Clear the status bit */ 290 /* Clear the status bit */
299 291
300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 292 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
301 status_register_id, 1, ACPI_MTX_DO_NOT_LOCK); 293 status_register_id, 1);
302 294
303 /* 295 /*
304 * Make sure we've got a handler. If not, report an error. 296 * Make sure we've got a handler. If not, report an error.
@@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
306 */ 298 */
307 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { 299 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
308 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
309 enable_register_id, 0, 301 enable_register_id, 0);
310 ACPI_MTX_DO_NOT_LOCK);
311 302
312 ACPI_ERROR((AE_INFO, 303 ACPI_ERROR((AE_INFO,
313 "No installed handler for fixed event [%08X]", 304 "No installed handler for fixed event [%08X]",
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index c76c0583ca6a..dfac3ecc596e 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
121 if (!gpe_register_info) { 121 if (!gpe_register_info) {
122 return_ACPI_STATUS(AE_NOT_EXIST); 122 return_ACPI_STATUS(AE_NOT_EXIST);
123 } 123 }
124 register_bit = gpe_event_info->register_bit; 124 register_bit = (u8)
125 (1 <<
126 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
125 127
126 /* 1) Disable case. Simply clear all enable bits */ 128 /* 1) Disable case. Simply clear all enable bits */
127 129
@@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
458 460
459 /* Examine one GPE bit */ 461 /* Examine one GPE bit */
460 462
461 if (enabled_status_byte & 463 if (enabled_status_byte & (1 << j)) {
462 acpi_gbl_decode_to8bit[j]) {
463 /* 464 /*
464 * Found an active GPE. Dispatch the event to a handler 465 * Found an active GPE. Dispatch the event to a handler
465 * or method. 466 * or method.
@@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
570 571
571 if (ACPI_FAILURE(status)) { 572 if (ACPI_FAILURE(status)) {
572 ACPI_EXCEPTION((AE_INFO, status, 573 ACPI_EXCEPTION((AE_INFO, status,
573 "While evaluating GPE method [%4.4s]", 574 "while evaluating GPE method [%4.4s]",
574 acpi_ut_get_node_name 575 acpi_ut_get_node_name
575 (local_gpe_event_info.dispatch. 576 (local_gpe_event_info.dispatch.
576 method_node))); 577 method_node)));
@@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
618 619
619 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 620 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
620 621
622 acpi_gpe_count++;
623
621 /* 624 /*
622 * If edge-triggered, clear the GPE status bit now. Note that 625 * If edge-triggered, clear the GPE status bit now. Note that
623 * level-triggered events are cleared after the GPE is serviced. 626 * level-triggered events are cleared after the GPE is serviced.
@@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
633 } 636 }
634 } 637 }
635 638
636 /* Save current system state */ 639 if (!acpi_gbl_system_awake_and_running) {
637 640 /*
638 if (acpi_gbl_system_awake_and_running) { 641 * We just woke up because of a wake GPE. Disable any further GPEs
639 ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 642 * until we are fully up and running (Only wake GPEs should be enabled
640 } else { 643 * at this time, but we just brute-force disable them all.)
641 ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 644 * 1) We must disable this particular wake GPE so it won't fire again
645 * 2) We want to disable all wake GPEs, since we are now awake
646 */
647 (void)acpi_hw_disable_all_gpes();
642 } 648 }
643 649
644 /* 650 /*
645 * Dispatch the GPE to either an installed handler, or the control 651 * Dispatch the GPE to either an installed handler, or the control method
646 * method associated with this GPE (_Lxx or _Exx). 652 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
647 * If a handler exists, we invoke it and do not attempt to run the method. 653 * it and do not attempt to run the method. If there is neither a handler
648 * If there is neither a handler nor a method, we disable the level to 654 * nor a method, we disable this GPE to prevent further such pointless
649 * prevent further events from coming in here. 655 * events from firing.
650 */ 656 */
651 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 657 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
652 case ACPI_GPE_DISPATCH_HANDLER: 658 case ACPI_GPE_DISPATCH_HANDLER:
@@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
677 case ACPI_GPE_DISPATCH_METHOD: 683 case ACPI_GPE_DISPATCH_METHOD:
678 684
679 /* 685 /*
680 * Disable GPE, so it doesn't keep firing before the method has a 686 * Disable the GPE, so it doesn't keep firing before the method has a
681 * chance to run. 687 * chance to run (it runs asynchronously with interrupts enabled).
682 */ 688 */
683 status = acpi_ev_disable_gpe(gpe_event_info); 689 status = acpi_ev_disable_gpe(gpe_event_info);
684 if (ACPI_FAILURE(status)) { 690 if (ACPI_FAILURE(status)) {
@@ -711,7 +717,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
711 gpe_number)); 717 gpe_number));
712 718
713 /* 719 /*
714 * Disable the GPE. The GPE will remain disabled until the ACPI 720 * Disable the GPE. The GPE will remain disabled until the ACPI
715 * Core Subsystem is restarted, or a handler is installed. 721 * Core Subsystem is restarted, or a handler is installed.
716 */ 722 */
717 status = acpi_ev_disable_gpe(gpe_event_info); 723 status = acpi_ev_disable_gpe(gpe_event_info);
@@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
726 732
727 return_UINT32(ACPI_INTERRUPT_HANDLED); 733 return_UINT32(ACPI_INTERRUPT_HANDLED);
728} 734}
729
730#ifdef ACPI_GPE_NOTIFY_CHECK
731/*******************************************************************************
732 * TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
733 *
734 * FUNCTION: acpi_ev_check_for_wake_only_gpe
735 *
736 * PARAMETERS: gpe_event_info - info for this GPE
737 *
738 * RETURN: Status
739 *
740 * DESCRIPTION: Determine if a a GPE is "wake-only".
741 *
742 * Called from Notify() code in interpreter when a "DeviceWake"
743 * Notify comes in.
744 *
745 ******************************************************************************/
746
747acpi_status
748acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
749{
750 acpi_status status;
751
752 ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
753
754 if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
755 ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
756 /* This must be a wake-only GPE, disable it */
757
758 status = acpi_ev_disable_gpe(gpe_event_info);
759
760 /* Set GPE to wake-only. Do not change wake disabled/enabled status */
761
762 acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
763
764 ACPI_INFO((AE_INFO,
765 "GPE %p was updated from wake/run to wake-only",
766 gpe_event_info));
767
768 /* This was a wake-only GPE */
769
770 return_ACPI_STATUS(AE_WAKE_ONLY_GPE);
771 }
772
773 return_ACPI_STATUS(AE_OK);
774}
775#endif
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 95ddeb48bc0f..ad5bc76edf46 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
529 529
530 /* Install new interrupt handler if not SCI_INT */ 530 /* Install new interrupt handler if not SCI_INT */
531 531
532 if (interrupt_number != acpi_gbl_FADT->sci_int) { 532 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
533 status = acpi_os_install_interrupt_handler(interrupt_number, 533 status = acpi_os_install_interrupt_handler(interrupt_number,
534 acpi_ev_gpe_xrupt_handler, 534 acpi_ev_gpe_xrupt_handler,
535 gpe_xrupt); 535 gpe_xrupt);
@@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
567 567
568 /* We never want to remove the SCI interrupt handler */ 568 /* We never want to remove the SCI interrupt handler */
569 569
570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) { 570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
571 gpe_xrupt->gpe_block_list_head = NULL; 571 gpe_xrupt->gpe_block_list_head = NULL;
572 return_ACPI_STATUS(AE_OK); 572 return_ACPI_STATUS(AE_OK);
573 } 573 }
@@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
796 (u8) (gpe_block->block_base_number + 796 (u8) (gpe_block->block_base_number +
797 (i * ACPI_GPE_REGISTER_WIDTH)); 797 (i * ACPI_GPE_REGISTER_WIDTH));
798 798
799 ACPI_STORE_ADDRESS(this_register->status_address.address, 799 this_register->status_address.address =
800 (gpe_block->block_address.address + i)); 800 gpe_block->block_address.address + i;
801 801
802 ACPI_STORE_ADDRESS(this_register->enable_address.address, 802 this_register->enable_address.address =
803 (gpe_block->block_address.address 803 gpe_block->block_address.address + i +
804 + i + gpe_block->register_count)); 804 gpe_block->register_count;
805 805
806 this_register->status_address.address_space_id = 806 this_register->status_address.space_id =
807 gpe_block->block_address.address_space_id; 807 gpe_block->block_address.space_id;
808 this_register->enable_address.address_space_id = 808 this_register->enable_address.space_id =
809 gpe_block->block_address.address_space_id; 809 gpe_block->block_address.space_id;
810 this_register->status_address.register_bit_width = 810 this_register->status_address.bit_width =
811 ACPI_GPE_REGISTER_WIDTH; 811 ACPI_GPE_REGISTER_WIDTH;
812 this_register->enable_address.register_bit_width = 812 this_register->enable_address.bit_width =
813 ACPI_GPE_REGISTER_WIDTH; 813 ACPI_GPE_REGISTER_WIDTH;
814 this_register->status_address.register_bit_offset = 814 this_register->status_address.bit_offset =
815 ACPI_GPE_REGISTER_WIDTH; 815 ACPI_GPE_REGISTER_WIDTH;
816 this_register->enable_address.register_bit_offset = 816 this_register->enable_address.bit_offset =
817 ACPI_GPE_REGISTER_WIDTH; 817 ACPI_GPE_REGISTER_WIDTH;
818 818
819 /* Init the event_info for each GPE within this register */ 819 /* Init the event_info for each GPE within this register */
820 820
821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
822 this_event->register_bit = acpi_gbl_decode_to8bit[j]; 822 this_event->gpe_number =
823 (u8) (this_register->base_gpe_number + j);
823 this_event->register_info = this_register; 824 this_event->register_info = this_register;
824 this_event++; 825 this_event++;
825 } 826 }
@@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void)
1109 * If EITHER the register length OR the block address are zero, then that 1110 * If EITHER the register length OR the block address are zero, then that
1110 * particular block is not supported. 1111 * particular block is not supported.
1111 */ 1112 */
1112 if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) { 1113 if (acpi_gbl_FADT.gpe0_block_length &&
1114 acpi_gbl_FADT.xgpe0_block.address) {
1113 1115
1114 /* GPE block 0 exists (has both length and address > 0) */ 1116 /* GPE block 0 exists (has both length and address > 0) */
1115 1117
1116 register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2); 1118 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1117 1119
1118 gpe_number_max = 1120 gpe_number_max =
1119 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; 1121 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
@@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void)
1121 /* Install GPE Block 0 */ 1123 /* Install GPE Block 0 */
1122 1124
1123 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1125 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1124 &acpi_gbl_FADT->xgpe0_blk, 1126 &acpi_gbl_FADT.xgpe0_block,
1125 register_count0, 0, 1127 register_count0, 0,
1126 acpi_gbl_FADT->sci_int, 1128 acpi_gbl_FADT.sci_interrupt,
1127 &acpi_gbl_gpe_fadt_blocks[0]); 1129 &acpi_gbl_gpe_fadt_blocks[0]);
1128 1130
1129 if (ACPI_FAILURE(status)) { 1131 if (ACPI_FAILURE(status)) {
@@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) { 1137 if (acpi_gbl_FADT.gpe1_block_length &&
1138 acpi_gbl_FADT.xgpe1_block.address) {
1136 1139
1137 /* GPE block 1 exists (has both length and address > 0) */ 1140 /* GPE block 1 exists (has both length and address > 0) */
1138 1141
1139 register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2); 1142 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1140 1143
1141 /* Check for GPE0/GPE1 overlap (if both banks exist) */ 1144 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1142 1145
1143 if ((register_count0) && 1146 if ((register_count0) &&
1144 (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { 1147 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1145 ACPI_ERROR((AE_INFO, 1148 ACPI_ERROR((AE_INFO,
1146 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1", 1149 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
1147 gpe_number_max, acpi_gbl_FADT->gpe1_base, 1150 gpe_number_max, acpi_gbl_FADT.gpe1_base,
1148 acpi_gbl_FADT->gpe1_base + 1151 acpi_gbl_FADT.gpe1_base +
1149 ((register_count1 * 1152 ((register_count1 *
1150 ACPI_GPE_REGISTER_WIDTH) - 1))); 1153 ACPI_GPE_REGISTER_WIDTH) - 1)));
1151 1154
@@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void)
1157 1160
1158 status = 1161 status =
1159 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1162 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1160 &acpi_gbl_FADT->xgpe1_blk, 1163 &acpi_gbl_FADT.xgpe1_block,
1161 register_count1, 1164 register_count1,
1162 acpi_gbl_FADT->gpe1_base, 1165 acpi_gbl_FADT.gpe1_base,
1163 acpi_gbl_FADT->sci_int, 1166 acpi_gbl_FADT.
1167 sci_interrupt,
1164 &acpi_gbl_gpe_fadt_blocks 1168 &acpi_gbl_gpe_fadt_blocks
1165 [1]); 1169 [1]);
1166 1170
@@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void)
1173 * GPE0 and GPE1 do not have to be contiguous in the GPE number 1177 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1174 * space. However, GPE0 always starts at GPE number zero. 1178 * space. However, GPE0 always starts at GPE number zero.
1175 */ 1179 */
1176 gpe_number_max = acpi_gbl_FADT->gpe1_base + 1180 gpe_number_max = acpi_gbl_FADT.gpe1_base +
1177 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); 1181 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1178 } 1182 }
1179 } 1183 }
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index bf63edc6608d..1b784ffe54c3 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,18 @@ static const char *acpi_notify_value_names[] = {
63}; 63};
64#endif 64#endif
65 65
66/* Pointer to FACS needed for the Global Lock */
67
68static struct acpi_table_facs *facs = NULL;
69
66/* Local prototypes */ 70/* Local prototypes */
67 71
68static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 72static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
69 73
70static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context);
71
72static u32 acpi_ev_global_lock_handler(void *context); 74static u32 acpi_ev_global_lock_handler(void *context);
73 75
76static acpi_status acpi_ev_remove_global_lock_handler(void);
77
74/******************************************************************************* 78/*******************************************************************************
75 * 79 *
76 * FUNCTION: acpi_ev_is_notify_object 80 * FUNCTION: acpi_ev_is_notify_object
@@ -282,49 +286,19 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
282 286
283/******************************************************************************* 287/*******************************************************************************
284 * 288 *
285 * FUNCTION: acpi_ev_global_lock_thread
286 *
287 * PARAMETERS: Context - From thread interface, not used
288 *
289 * RETURN: None
290 *
291 * DESCRIPTION: Invoked by SCI interrupt handler upon acquisition of the
292 * Global Lock. Simply signal all threads that are waiting
293 * for the lock.
294 *
295 ******************************************************************************/
296
297static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
298{
299 acpi_status status;
300
301 /* Signal threads that are waiting for the lock */
302
303 if (acpi_gbl_global_lock_thread_count) {
304
305 /* Send sufficient units to the semaphore */
306
307 status =
308 acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
309 acpi_gbl_global_lock_thread_count);
310 if (ACPI_FAILURE(status)) {
311 ACPI_ERROR((AE_INFO,
312 "Could not signal Global Lock semaphore"));
313 }
314 }
315}
316
317/*******************************************************************************
318 *
319 * FUNCTION: acpi_ev_global_lock_handler 289 * FUNCTION: acpi_ev_global_lock_handler
320 * 290 *
321 * PARAMETERS: Context - From thread interface, not used 291 * PARAMETERS: Context - From thread interface, not used
322 * 292 *
323 * RETURN: ACPI_INTERRUPT_HANDLED or ACPI_INTERRUPT_NOT_HANDLED 293 * RETURN: ACPI_INTERRUPT_HANDLED
324 * 294 *
325 * DESCRIPTION: Invoked directly from the SCI handler when a global lock 295 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
326 * release interrupt occurs. Grab the global lock and queue 296 * release interrupt occurs. Attempt to acquire the global lock,
327 * the global lock thread for execution 297 * if successful, signal the thread waiting for the lock.
298 *
299 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
300 * this is not possible for some reason, a separate thread will have to be
301 * scheduled to do this.
328 * 302 *
329 ******************************************************************************/ 303 ******************************************************************************/
330 304
@@ -333,16 +307,24 @@ static u32 acpi_ev_global_lock_handler(void *context)
333 u8 acquired = FALSE; 307 u8 acquired = FALSE;
334 308
335 /* 309 /*
336 * Attempt to get the lock 310 * Attempt to get the lock.
311 *
337 * If we don't get it now, it will be marked pending and we will 312 * If we don't get it now, it will be marked pending and we will
338 * take another interrupt when it becomes free. 313 * take another interrupt when it becomes free.
339 */ 314 */
340 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 315 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
341 if (acquired) { 316 if (acquired) {
342 317
343 /* Got the lock, now wake all threads waiting for it */ 318 /* Got the lock, now wake all threads waiting for it */
319
344 acpi_gbl_global_lock_acquired = TRUE; 320 acpi_gbl_global_lock_acquired = TRUE;
345 acpi_ev_global_lock_thread(context); 321 /* Send a unit to the semaphore */
322
323 if (ACPI_FAILURE(acpi_os_signal_semaphore(
324 acpi_gbl_global_lock_semaphore, 1))) {
325 ACPI_ERROR((AE_INFO,
326 "Could not signal Global Lock semaphore"));
327 }
346 } 328 }
347 329
348 return (ACPI_INTERRUPT_HANDLED); 330 return (ACPI_INTERRUPT_HANDLED);
@@ -366,6 +348,13 @@ acpi_status acpi_ev_init_global_lock_handler(void)
366 348
367 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 349 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
368 350
351 status =
352 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
353 (struct acpi_table_header **)&facs);
354 if (ACPI_FAILURE(status)) {
355 return_ACPI_STATUS(status);
356 }
357
369 acpi_gbl_global_lock_present = TRUE; 358 acpi_gbl_global_lock_present = TRUE;
370 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, 359 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
371 acpi_ev_global_lock_handler, 360 acpi_ev_global_lock_handler,
@@ -389,6 +378,31 @@ acpi_status acpi_ev_init_global_lock_handler(void)
389 return_ACPI_STATUS(status); 378 return_ACPI_STATUS(status);
390} 379}
391 380
381/*******************************************************************************
382 *
383 * FUNCTION: acpi_ev_remove_global_lock_handler
384 *
385 * PARAMETERS: None
386 *
387 * RETURN: Status
388 *
389 * DESCRIPTION: Remove the handler for the Global Lock
390 *
391 ******************************************************************************/
392
393static acpi_status acpi_ev_remove_global_lock_handler(void)
394{
395 acpi_status status;
396
397 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
398
399 acpi_gbl_global_lock_present = FALSE;
400 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
401 acpi_ev_global_lock_handler);
402
403 return_ACPI_STATUS(status);
404}
405
392/****************************************************************************** 406/******************************************************************************
393 * 407 *
394 * FUNCTION: acpi_ev_acquire_global_lock 408 * FUNCTION: acpi_ev_acquire_global_lock
@@ -399,6 +413,16 @@ acpi_status acpi_ev_init_global_lock_handler(void)
399 * 413 *
400 * DESCRIPTION: Attempt to gain ownership of the Global Lock. 414 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
401 * 415 *
416 * MUTEX: Interpreter must be locked
417 *
418 * Note: The original implementation allowed multiple threads to "acquire" the
419 * Global Lock, and the OS would hold the lock until the last thread had
420 * released it. However, this could potentially starve the BIOS out of the
421 * lock, especially in the case where there is a tight handshake between the
422 * Embedded Controller driver and the BIOS. Therefore, this implementation
423 * allows only one thread to acquire the HW Global Lock at a time, and makes
424 * the global lock appear as a standard mutex on the OS side.
425 *
402 *****************************************************************************/ 426 *****************************************************************************/
403 427
404acpi_status acpi_ev_acquire_global_lock(u16 timeout) 428acpi_status acpi_ev_acquire_global_lock(u16 timeout)
@@ -408,53 +432,51 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
408 432
409 ACPI_FUNCTION_TRACE(ev_acquire_global_lock); 433 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
410 434
411#ifndef ACPI_APPLICATION 435 /*
412 /* Make sure that we actually have a global lock */ 436 * Only one thread can acquire the GL at a time, the global_lock_mutex
413 437 * enforces this. This interface releases the interpreter if we must wait.
414 if (!acpi_gbl_global_lock_present) { 438 */
415 return_ACPI_STATUS(AE_NO_GLOBAL_LOCK); 439 status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
440 if (ACPI_FAILURE(status)) {
441 return_ACPI_STATUS(status);
416 } 442 }
417#endif
418
419 /* One more thread wants the global lock */
420
421 acpi_gbl_global_lock_thread_count++;
422 443
423 /* 444 /*
424 * If we (OS side vs. BIOS side) have the hardware lock already, 445 * Make sure that a global lock actually exists. If not, just treat
425 * we are done 446 * the lock as a standard mutex.
426 */ 447 */
427 if (acpi_gbl_global_lock_acquired) { 448 if (!acpi_gbl_global_lock_present) {
449 acpi_gbl_global_lock_acquired = TRUE;
428 return_ACPI_STATUS(AE_OK); 450 return_ACPI_STATUS(AE_OK);
429 } 451 }
430 452
431 /* We must acquire the actual hardware lock */ 453 /* Attempt to acquire the actual hardware lock */
432 454
433 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 455 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
434 if (acquired) { 456 if (acquired) {
435 457
436 /* We got the lock */ 458 /* We got the lock */
437 459
438 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 460 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
439 "Acquired the HW Global Lock\n")); 461 "Acquired hardware Global Lock\n"));
440 462
441 acpi_gbl_global_lock_acquired = TRUE; 463 acpi_gbl_global_lock_acquired = TRUE;
442 return_ACPI_STATUS(AE_OK); 464 return_ACPI_STATUS(AE_OK);
443 } 465 }
444 466
445 /* 467 /*
446 * Did not get the lock. The pending bit was set above, and we must now 468 * Did not get the lock. The pending bit was set above, and we must now
447 * wait until we get the global lock released interrupt. 469 * wait until we get the global lock released interrupt.
448 */ 470 */
449 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for the HW Global Lock\n")); 471 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
450 472
451 /* 473 /*
452 * Acquire the global lock semaphore first. 474 * Wait for handshake with the global lock interrupt handler.
453 * Since this wait will block, we must release the interpreter 475 * This interface releases the interpreter if we must wait.
454 */ 476 */
455 status = 477 status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
456 acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, 478 ACPI_WAIT_FOREVER);
457 timeout); 479
458 return_ACPI_STATUS(status); 480 return_ACPI_STATUS(status);
459} 481}
460 482
@@ -477,38 +499,39 @@ acpi_status acpi_ev_release_global_lock(void)
477 499
478 ACPI_FUNCTION_TRACE(ev_release_global_lock); 500 ACPI_FUNCTION_TRACE(ev_release_global_lock);
479 501
480 if (!acpi_gbl_global_lock_thread_count) { 502 /* Lock must be already acquired */
503
504 if (!acpi_gbl_global_lock_acquired) {
481 ACPI_WARNING((AE_INFO, 505 ACPI_WARNING((AE_INFO,
482 "Cannot release HW Global Lock, it has not been acquired")); 506 "Cannot release the ACPI Global Lock, it has not been acquired"));
483 return_ACPI_STATUS(AE_NOT_ACQUIRED); 507 return_ACPI_STATUS(AE_NOT_ACQUIRED);
484 } 508 }
485 509
486 /* One fewer thread has the global lock */ 510 if (acpi_gbl_global_lock_present) {
487 511
488 acpi_gbl_global_lock_thread_count--; 512 /* Allow any thread to release the lock */
489 if (acpi_gbl_global_lock_thread_count) {
490 513
491 /* There are still some threads holding the lock, cannot release */ 514 ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
492 515
493 return_ACPI_STATUS(AE_OK); 516 /*
517 * If the pending bit was set, we must write GBL_RLS to the control
518 * register
519 */
520 if (pending) {
521 status =
522 acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
523 1);
524 }
525
526 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
527 "Released hardware Global Lock\n"));
494 } 528 }
495 529
496 /*
497 * No more threads holding lock, we can do the actual hardware
498 * release
499 */
500 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, pending);
501 acpi_gbl_global_lock_acquired = FALSE; 530 acpi_gbl_global_lock_acquired = FALSE;
502 531
503 /* 532 /* Release the local GL mutex */
504 * If the pending bit was set, we must write GBL_RLS to the control
505 * register
506 */
507 if (pending) {
508 status = acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
509 1, ACPI_MTX_LOCK);
510 }
511 533
534 acpi_os_release_mutex(acpi_gbl_global_lock_mutex);
512 return_ACPI_STATUS(status); 535 return_ACPI_STATUS(status);
513} 536}
514 537
@@ -558,6 +581,12 @@ void acpi_ev_terminate(void)
558 if (ACPI_FAILURE(status)) { 581 if (ACPI_FAILURE(status)) {
559 ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); 582 ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
560 } 583 }
584
585 status = acpi_ev_remove_global_lock_handler();
586 if (ACPI_FAILURE(status)) {
587 ACPI_ERROR((AE_INFO,
588 "Could not remove Global Lock handler"));
589 }
561 } 590 }
562 591
563 /* Deallocate all handler objects installed within GPE info structs */ 592 /* Deallocate all handler objects installed within GPE info structs */
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 21caae04fe85..e99f0c435a47 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
291 u32 bit_width, acpi_integer * value) 291 u32 bit_width, acpi_integer * value)
292{ 292{
293 acpi_status status; 293 acpi_status status;
294 acpi_status status2;
295 acpi_adr_space_handler handler; 294 acpi_adr_space_handler handler;
296 acpi_adr_space_setup region_setup; 295 acpi_adr_space_setup region_setup;
297 union acpi_operand_object *handler_desc; 296 union acpi_operand_object *handler_desc;
@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
345 * setup will potentially execute control methods 344 * setup will potentially execute control methods
346 * (e.g., _REG method for this region) 345 * (e.g., _REG method for this region)
347 */ 346 */
348 acpi_ex_exit_interpreter(); 347 acpi_ex_relinquish_interpreter();
349 348
350 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 349 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
351 handler_desc->address_space.context, 350 handler_desc->address_space.context,
@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 352
354 /* Re-enter the interpreter */ 353 /* Re-enter the interpreter */
355 354
356 status2 = acpi_ex_enter_interpreter(); 355 acpi_ex_reacquire_interpreter();
357 if (ACPI_FAILURE(status2)) {
358 return_ACPI_STATUS(status2);
359 }
360 356
361 /* Check for failure of the Region Setup */ 357 /* Check for failure of the Region Setup */
362 358
@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
409 * exit the interpreter because the handler *might* block -- we don't 405 * exit the interpreter because the handler *might* block -- we don't
410 * know what it will do, so we can't hold the lock on the intepreter. 406 * know what it will do, so we can't hold the lock on the intepreter.
411 */ 407 */
412 acpi_ex_exit_interpreter(); 408 acpi_ex_relinquish_interpreter();
413 } 409 }
414 410
415 /* Call the handler */ 411 /* Call the handler */
@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
430 * We just returned from a non-default handler, we must re-enter the 426 * We just returned from a non-default handler, we must re-enter the
431 * interpreter 427 * interpreter
432 */ 428 */
433 status2 = acpi_ex_enter_interpreter(); 429 acpi_ex_reacquire_interpreter();
434 if (ACPI_FAILURE(status2)) {
435 return_ACPI_STATUS(status2);
436 }
437 } 430 }
438 431
439 return_ACPI_STATUS(status); 432 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 203d1359190a..a4fa7e6822a3 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,11 @@
48#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evrgnini") 49ACPI_MODULE_NAME("evrgnini")
50 50
51/* Local prototypes */
52static u8 acpi_ev_match_pci_root_bridge(char *id);
53
54static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
55
51/******************************************************************************* 56/*******************************************************************************
52 * 57 *
53 * FUNCTION: acpi_ev_system_memory_region_setup 58 * FUNCTION: acpi_ev_system_memory_region_setup
@@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini")
62 * DESCRIPTION: Setup a system_memory operation region 67 * DESCRIPTION: Setup a system_memory operation region
63 * 68 *
64 ******************************************************************************/ 69 ******************************************************************************/
70
65acpi_status 71acpi_status
66acpi_ev_system_memory_region_setup(acpi_handle handle, 72acpi_ev_system_memory_region_setup(acpi_handle handle,
67 u32 function, 73 u32 function,
@@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
168 union acpi_operand_object *handler_obj; 174 union acpi_operand_object *handler_obj;
169 struct acpi_namespace_node *parent_node; 175 struct acpi_namespace_node *parent_node;
170 struct acpi_namespace_node *pci_root_node; 176 struct acpi_namespace_node *pci_root_node;
177 struct acpi_namespace_node *pci_device_node;
171 union acpi_operand_object *region_obj = 178 union acpi_operand_object *region_obj =
172 (union acpi_operand_object *)handle; 179 (union acpi_operand_object *)handle;
173 struct acpi_device_id object_hID;
174 180
175 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup); 181 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
176 182
@@ -215,45 +221,30 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
215 221
216 pci_root_node = parent_node; 222 pci_root_node = parent_node;
217 while (pci_root_node != acpi_gbl_root_node) { 223 while (pci_root_node != acpi_gbl_root_node) {
218 status =
219 acpi_ut_execute_HID(pci_root_node, &object_hID);
220 if (ACPI_SUCCESS(status)) {
221 /*
222 * Got a valid _HID string, check if this is a PCI root.
223 * New for ACPI 3.0: check for a PCI Express root also.
224 */
225 if (!
226 (ACPI_STRNCMP
227 (object_hID.value, PCI_ROOT_HID_STRING,
228 sizeof(PCI_ROOT_HID_STRING)))
229 ||
230 !(ACPI_STRNCMP
231 (object_hID.value,
232 PCI_EXPRESS_ROOT_HID_STRING,
233 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
234
235 /* Install a handler for this PCI root bridge */
236 224
237 status = 225 /* Get the _HID/_CID in order to detect a root_bridge */
238 acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); 226
239 if (ACPI_FAILURE(status)) { 227 if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
240 if (status == AE_SAME_HANDLER) { 228
241 /* 229 /* Install a handler for this PCI root bridge */
242 * It is OK if the handler is already installed on the root 230
243 * bridge. Still need to return a context object for the 231 status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
244 * new PCI_Config operation region, however. 232 if (ACPI_FAILURE(status)) {
245 */ 233 if (status == AE_SAME_HANDLER) {
246 status = AE_OK; 234 /*
247 } else { 235 * It is OK if the handler is already installed on the root
248 ACPI_EXCEPTION((AE_INFO, 236 * bridge. Still need to return a context object for the
249 status, 237 * new PCI_Config operation region, however.
250 "Could not install PciConfig handler for Root Bridge %4.4s", 238 */
251 acpi_ut_get_node_name 239 status = AE_OK;
252 (pci_root_node))); 240 } else {
253 } 241 ACPI_EXCEPTION((AE_INFO, status,
242 "Could not install PciConfig handler for Root Bridge %4.4s",
243 acpi_ut_get_node_name
244 (pci_root_node)));
254 } 245 }
255 break;
256 } 246 }
247 break;
257 } 248 }
258 249
259 pci_root_node = acpi_ns_get_parent_node(pci_root_node); 250 pci_root_node = acpi_ns_get_parent_node(pci_root_node);
@@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
282 /* 273 /*
283 * For PCI_Config space access, we need the segment, bus, 274 * For PCI_Config space access, we need the segment, bus,
284 * device and function numbers. Acquire them here. 275 * device and function numbers. Acquire them here.
276 *
277 * Find the parent device object. (This allows the operation region to be
278 * within a subscope under the device, such as a control method.)
285 */ 279 */
280 pci_device_node = region_obj->region.node;
281 while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
282 pci_device_node = acpi_ns_get_parent_node(pci_device_node);
283 }
284
285 if (!pci_device_node) {
286 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
287 }
286 288
287 /* 289 /*
288 * Get the PCI device and function numbers from the _ADR object 290 * Get the PCI device and function numbers from the _ADR object
289 * contained in the parent's scope. 291 * contained in the parent's scope.
290 */ 292 */
291 status = 293 status =
292 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, parent_node, 294 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
293 &pci_value); 295 &pci_value);
294 296
295 /* 297 /*
@@ -329,6 +331,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
329 331
330/******************************************************************************* 332/*******************************************************************************
331 * 333 *
334 * FUNCTION: acpi_ev_match_pci_root_bridge
335 *
336 * PARAMETERS: Id - The HID/CID in string format
337 *
338 * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
339 *
340 * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
341 *
342 ******************************************************************************/
343
344static u8 acpi_ev_match_pci_root_bridge(char *id)
345{
346
347 /*
348 * Check if this is a PCI root.
349 * ACPI 3.0+: check for a PCI Express root also.
350 */
351 if (!(ACPI_STRNCMP(id,
352 PCI_ROOT_HID_STRING,
353 sizeof(PCI_ROOT_HID_STRING))) ||
354 !(ACPI_STRNCMP(id,
355 PCI_EXPRESS_ROOT_HID_STRING,
356 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
357 return (TRUE);
358 }
359
360 return (FALSE);
361}
362
363/*******************************************************************************
364 *
365 * FUNCTION: acpi_ev_is_pci_root_bridge
366 *
367 * PARAMETERS: Node - Device node being examined
368 *
369 * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
370 *
371 * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
372 * examining the _HID and _CID for the device.
373 *
374 ******************************************************************************/
375
376static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
377{
378 acpi_status status;
379 struct acpi_device_id hid;
380 struct acpi_compatible_id_list *cid;
381 acpi_native_uint i;
382
383 /*
384 * Get the _HID and check for a PCI Root Bridge
385 */
386 status = acpi_ut_execute_HID(node, &hid);
387 if (ACPI_FAILURE(status)) {
388 return (FALSE);
389 }
390
391 if (acpi_ev_match_pci_root_bridge(hid.value)) {
392 return (TRUE);
393 }
394
395 /*
396 * The _HID did not match.
397 * Get the _CID and check for a PCI Root Bridge
398 */
399 status = acpi_ut_execute_CID(node, &cid);
400 if (ACPI_FAILURE(status)) {
401 return (FALSE);
402 }
403
404 /* Check all _CIDs in the returned list */
405
406 for (i = 0; i < cid->count; i++) {
407 if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
408 ACPI_FREE(cid);
409 return (TRUE);
410 }
411 }
412
413 ACPI_FREE(cid);
414 return (FALSE);
415}
416
417/*******************************************************************************
418 *
332 * FUNCTION: acpi_ev_pci_bar_region_setup 419 * FUNCTION: acpi_ev_pci_bar_region_setup
333 * 420 *
334 * PARAMETERS: Handle - Region we are interested in 421 * PARAMETERS: Handle - Region we are interested in
@@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
432 * a PCI address in the scope of the definition. This address is 519 * a PCI address in the scope of the definition. This address is
433 * required to perform an access to PCI config space. 520 * required to perform an access to PCI config space.
434 * 521 *
522 * MUTEX: Interpreter should be unlocked, because we may run the _REG
523 * method for this region.
524 *
435 ******************************************************************************/ 525 ******************************************************************************/
436 526
437acpi_status 527acpi_status
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 8106215ad554..7e5d15ce2395 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -142,9 +142,10 @@ u32 acpi_ev_install_sci_handler(void)
142 142
143 ACPI_FUNCTION_TRACE(ev_install_sci_handler); 143 ACPI_FUNCTION_TRACE(ev_install_sci_handler);
144 144
145 status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 145 status =
146 acpi_ev_sci_xrupt_handler, 146 acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
147 acpi_gbl_gpe_xrupt_list_head); 147 acpi_ev_sci_xrupt_handler,
148 acpi_gbl_gpe_xrupt_list_head);
148 return_ACPI_STATUS(status); 149 return_ACPI_STATUS(status);
149} 150}
150 151
@@ -175,8 +176,9 @@ acpi_status acpi_ev_remove_sci_handler(void)
175 176
176 /* Just let the OS remove the handler and disable the level */ 177 /* Just let the OS remove the handler and disable the level */
177 178
178 status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 179 status =
179 acpi_ev_sci_xrupt_handler); 180 acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
181 acpi_ev_sci_xrupt_handler);
180 182
181 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
182} 184}
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index 923fd2b46955..685a103a3587 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
768 return (AE_BAD_PARAMETER); 768 return (AE_BAD_PARAMETER);
769 } 769 }
770 770
771 status = acpi_ex_enter_interpreter(); 771 /* Must lock interpreter to prevent race conditions */
772 if (ACPI_FAILURE(status)) {
773 return (status);
774 }
775 772
773 acpi_ex_enter_interpreter();
776 status = acpi_ev_acquire_global_lock(timeout); 774 status = acpi_ev_acquire_global_lock(timeout);
777 acpi_ex_exit_interpreter(); 775 acpi_ex_exit_interpreter();
778 776
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 7ebc2efac936..17065e98807c 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include <acpi/acevents.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evxfevnt") 50ACPI_MODULE_NAME("evxfevnt")
@@ -65,13 +66,14 @@ acpi_status acpi_enable(void)
65 66
66 ACPI_FUNCTION_TRACE(acpi_enable); 67 ACPI_FUNCTION_TRACE(acpi_enable);
67 68
68 /* Make sure we have the FADT */ 69 /* ACPI tables must be present */
69 70
70 if (!acpi_gbl_FADT) { 71 if (!acpi_tb_tables_loaded()) {
71 ACPI_WARNING((AE_INFO, "No FADT information present!"));
72 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 72 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
73 } 73 }
74 74
75 /* Check current mode */
76
75 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 77 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
76 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 78 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
77 "System is already in ACPI mode\n")); 79 "System is already in ACPI mode\n"));
@@ -111,11 +113,6 @@ acpi_status acpi_disable(void)
111 113
112 ACPI_FUNCTION_TRACE(acpi_disable); 114 ACPI_FUNCTION_TRACE(acpi_disable);
113 115
114 if (!acpi_gbl_FADT) {
115 ACPI_WARNING((AE_INFO, "No FADT information present!"));
116 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
117 }
118
119 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { 116 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
120 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 117 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
121 "System is already in legacy (non-ACPI) mode\n")); 118 "System is already in legacy (non-ACPI) mode\n"));
@@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
169 */ 166 */
170 status = 167 status =
171 acpi_set_register(acpi_gbl_fixed_event_info[event]. 168 acpi_set_register(acpi_gbl_fixed_event_info[event].
172 enable_register_id, 1, ACPI_MTX_LOCK); 169 enable_register_id, 1);
173 if (ACPI_FAILURE(status)) { 170 if (ACPI_FAILURE(status)) {
174 return_ACPI_STATUS(status); 171 return_ACPI_STATUS(status);
175 } 172 }
@@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
178 175
179 status = 176 status =
180 acpi_get_register(acpi_gbl_fixed_event_info[event]. 177 acpi_get_register(acpi_gbl_fixed_event_info[event].
181 enable_register_id, &value, ACPI_MTX_LOCK); 178 enable_register_id, &value);
182 if (ACPI_FAILURE(status)) { 179 if (ACPI_FAILURE(status)) {
183 return_ACPI_STATUS(status); 180 return_ACPI_STATUS(status);
184 } 181 }
@@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
368 */ 365 */
369 status = 366 status =
370 acpi_set_register(acpi_gbl_fixed_event_info[event]. 367 acpi_set_register(acpi_gbl_fixed_event_info[event].
371 enable_register_id, 0, ACPI_MTX_LOCK); 368 enable_register_id, 0);
372 if (ACPI_FAILURE(status)) { 369 if (ACPI_FAILURE(status)) {
373 return_ACPI_STATUS(status); 370 return_ACPI_STATUS(status);
374 } 371 }
375 372
376 status = 373 status =
377 acpi_get_register(acpi_gbl_fixed_event_info[event]. 374 acpi_get_register(acpi_gbl_fixed_event_info[event].
378 enable_register_id, &value, ACPI_MTX_LOCK); 375 enable_register_id, &value);
379 if (ACPI_FAILURE(status)) { 376 if (ACPI_FAILURE(status)) {
380 return_ACPI_STATUS(status); 377 return_ACPI_STATUS(status);
381 } 378 }
@@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event)
421 */ 418 */
422 status = 419 status =
423 acpi_set_register(acpi_gbl_fixed_event_info[event]. 420 acpi_set_register(acpi_gbl_fixed_event_info[event].
424 status_register_id, 1, ACPI_MTX_LOCK); 421 status_register_id, 1);
425 422
426 return_ACPI_STATUS(status); 423 return_ACPI_STATUS(status);
427} 424}
@@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
510 507
511 status = 508 status =
512 acpi_get_register(acpi_gbl_fixed_event_info[event]. 509 acpi_get_register(acpi_gbl_fixed_event_info[event].
513 status_register_id, event_status, ACPI_MTX_LOCK); 510 status_register_id, event_status);
514 511
515 return_ACPI_STATUS(status); 512 return_ACPI_STATUS(status);
516} 513}
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index 83b12a9afa32..7bf09c5fb242 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index c8341fa5fe01..25802f302ffe 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("exconfig")
54 54
55/* Local prototypes */ 55/* Local prototypes */
56static acpi_status 56static acpi_status
57acpi_ex_add_table(struct acpi_table_header *table, 57acpi_ex_add_table(acpi_native_uint table_index,
58 struct acpi_namespace_node *parent_node, 58 struct acpi_namespace_node *parent_node,
59 union acpi_operand_object **ddb_handle); 59 union acpi_operand_object **ddb_handle);
60 60
@@ -74,12 +74,11 @@ acpi_ex_add_table(struct acpi_table_header *table,
74 ******************************************************************************/ 74 ******************************************************************************/
75 75
76static acpi_status 76static acpi_status
77acpi_ex_add_table(struct acpi_table_header *table, 77acpi_ex_add_table(acpi_native_uint table_index,
78 struct acpi_namespace_node *parent_node, 78 struct acpi_namespace_node *parent_node,
79 union acpi_operand_object **ddb_handle) 79 union acpi_operand_object **ddb_handle)
80{ 80{
81 acpi_status status; 81 acpi_status status;
82 struct acpi_table_desc table_info;
83 union acpi_operand_object *obj_desc; 82 union acpi_operand_object *obj_desc;
84 83
85 ACPI_FUNCTION_TRACE(ex_add_table); 84 ACPI_FUNCTION_TRACE(ex_add_table);
@@ -98,42 +97,16 @@ acpi_ex_add_table(struct acpi_table_header *table,
98 97
99 /* Install the new table into the local data structures */ 98 /* Install the new table into the local data structures */
100 99
101 ACPI_MEMSET(&table_info, 0, sizeof(struct acpi_table_desc)); 100 obj_desc->reference.object = ACPI_CAST_PTR(void, table_index);
102
103 table_info.type = ACPI_TABLE_ID_SSDT;
104 table_info.pointer = table;
105 table_info.length = (acpi_size) table->length;
106 table_info.allocation = ACPI_MEM_ALLOCATED;
107
108 status = acpi_tb_install_table(&table_info);
109 obj_desc->reference.object = table_info.installed_desc;
110
111 if (ACPI_FAILURE(status)) {
112 if (status == AE_ALREADY_EXISTS) {
113
114 /* Table already exists, just return the handle */
115
116 return_ACPI_STATUS(AE_OK);
117 }
118 goto cleanup;
119 }
120 101
121 /* Add the table to the namespace */ 102 /* Add the table to the namespace */
122 103
123 status = acpi_ns_load_table(table_info.installed_desc, parent_node); 104 status = acpi_ns_load_table(table_index, parent_node);
124 if (ACPI_FAILURE(status)) { 105 if (ACPI_FAILURE(status)) {
125 106 acpi_ut_remove_reference(obj_desc);
126 /* Uninstall table on error */ 107 *ddb_handle = NULL;
127
128 (void)acpi_tb_uninstall_table(table_info.installed_desc);
129 goto cleanup;
130 } 108 }
131 109
132 return_ACPI_STATUS(AE_OK);
133
134 cleanup:
135 acpi_ut_remove_reference(obj_desc);
136 *ddb_handle = NULL;
137 return_ACPI_STATUS(status); 110 return_ACPI_STATUS(status);
138} 111}
139 112
@@ -146,7 +119,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
146 * 119 *
147 * RETURN: Status 120 * RETURN: Status
148 * 121 *
149 * DESCRIPTION: Load an ACPI table 122 * DESCRIPTION: Load an ACPI table from the RSDT/XSDT
150 * 123 *
151 ******************************************************************************/ 124 ******************************************************************************/
152 125
@@ -156,33 +129,20 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
156{ 129{
157 acpi_status status; 130 acpi_status status;
158 union acpi_operand_object **operand = &walk_state->operands[0]; 131 union acpi_operand_object **operand = &walk_state->operands[0];
159 struct acpi_table_header *table; 132 acpi_native_uint table_index;
160 struct acpi_namespace_node *parent_node; 133 struct acpi_namespace_node *parent_node;
161 struct acpi_namespace_node *start_node; 134 struct acpi_namespace_node *start_node;
162 struct acpi_namespace_node *parameter_node = NULL; 135 struct acpi_namespace_node *parameter_node = NULL;
163 union acpi_operand_object *ddb_handle; 136 union acpi_operand_object *ddb_handle;
137 struct acpi_table_header *table;
164 138
165 ACPI_FUNCTION_TRACE(ex_load_table_op); 139 ACPI_FUNCTION_TRACE(ex_load_table_op);
166 140
167#if 0 141 /* Find the ACPI table in the RSDT/XSDT */
168 /*
169 * Make sure that the signature does not match one of the tables that
170 * is already loaded.
171 */
172 status = acpi_tb_match_signature(operand[0]->string.pointer, NULL);
173 if (status == AE_OK) {
174
175 /* Signature matched -- don't allow override */
176
177 return_ACPI_STATUS(AE_ALREADY_EXISTS);
178 }
179#endif
180
181 /* Find the ACPI table */
182 142
183 status = acpi_tb_find_table(operand[0]->string.pointer, 143 status = acpi_tb_find_table(operand[0]->string.pointer,
184 operand[1]->string.pointer, 144 operand[1]->string.pointer,
185 operand[2]->string.pointer, &table); 145 operand[2]->string.pointer, &table_index);
186 if (ACPI_FAILURE(status)) { 146 if (ACPI_FAILURE(status)) {
187 if (status != AE_NOT_FOUND) { 147 if (status != AE_NOT_FOUND) {
188 return_ACPI_STATUS(status); 148 return_ACPI_STATUS(status);
@@ -245,7 +205,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
245 205
246 /* Load the table into the namespace */ 206 /* Load the table into the namespace */
247 207
248 status = acpi_ex_add_table(table, parent_node, &ddb_handle); 208 status = acpi_ex_add_table(table_index, parent_node, &ddb_handle);
249 if (ACPI_FAILURE(status)) { 209 if (ACPI_FAILURE(status)) {
250 return_ACPI_STATUS(status); 210 return_ACPI_STATUS(status);
251 } 211 }
@@ -266,9 +226,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
266 } 226 }
267 } 227 }
268 228
269 ACPI_INFO((AE_INFO, 229 status = acpi_get_table_by_index(table_index, &table);
270 "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]", 230 if (ACPI_SUCCESS(status)) {
271 table->signature, table->oem_id, table->oem_table_id)); 231 ACPI_INFO((AE_INFO,
232 "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]",
233 table->signature, table->oem_id,
234 table->oem_table_id));
235 }
272 236
273 *return_desc = ddb_handle; 237 *return_desc = ddb_handle;
274 return_ACPI_STATUS(status); 238 return_ACPI_STATUS(status);
@@ -278,7 +242,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
278 * 242 *
279 * FUNCTION: acpi_ex_load_op 243 * FUNCTION: acpi_ex_load_op
280 * 244 *
281 * PARAMETERS: obj_desc - Region or Field where the table will be 245 * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
282 * obtained 246 * obtained
283 * Target - Where a handle to the table will be stored 247 * Target - Where a handle to the table will be stored
284 * walk_state - Current state 248 * walk_state - Current state
@@ -287,6 +251,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
287 * 251 *
288 * DESCRIPTION: Load an ACPI table from a field or operation region 252 * DESCRIPTION: Load an ACPI table from a field or operation region
289 * 253 *
254 * NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer
255 * objects before this code is reached.
256 *
257 * If source is an operation region, it must refer to system_memory, as
258 * per the ACPI specification.
259 *
290 ******************************************************************************/ 260 ******************************************************************************/
291 261
292acpi_status 262acpi_status
@@ -294,22 +264,26 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
294 union acpi_operand_object *target, 264 union acpi_operand_object *target,
295 struct acpi_walk_state *walk_state) 265 struct acpi_walk_state *walk_state)
296{ 266{
297 acpi_status status;
298 union acpi_operand_object *ddb_handle; 267 union acpi_operand_object *ddb_handle;
299 union acpi_operand_object *buffer_desc = NULL; 268 struct acpi_table_desc table_desc;
300 struct acpi_table_header *table_ptr = NULL; 269 acpi_native_uint table_index;
301 acpi_physical_address address; 270 acpi_status status;
302 struct acpi_table_header table_header;
303 acpi_integer temp;
304 u32 i;
305 271
306 ACPI_FUNCTION_TRACE(ex_load_op); 272 ACPI_FUNCTION_TRACE(ex_load_op);
307 273
308 /* Object can be either an op_region or a Field */ 274 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
275
276 /* Source Object can be either an op_region or a Buffer/Field */
309 277
310 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 278 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
311 case ACPI_TYPE_REGION: 279 case ACPI_TYPE_REGION:
312 280
281 /* Region must be system_memory (from ACPI spec) */
282
283 if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
284 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
285 }
286
313 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n", 287 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
314 obj_desc, 288 obj_desc,
315 acpi_ut_get_object_type_name(obj_desc))); 289 acpi_ut_get_object_type_name(obj_desc)));
@@ -325,113 +299,41 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
325 } 299 }
326 } 300 }
327 301
328 /* Get the base physical address of the region */ 302 table_desc.address = obj_desc->region.address;
329 303 table_desc.length = obj_desc->region.length;
330 address = obj_desc->region.address; 304 table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
331
332 /* Get part of the table header to get the table length */
333
334 table_header.length = 0;
335 for (i = 0; i < 8; i++) {
336 status =
337 acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
338 (acpi_physical_address)
339 (i + address), 8,
340 &temp);
341 if (ACPI_FAILURE(status)) {
342 return_ACPI_STATUS(status);
343 }
344
345 /* Get the one valid byte of the returned 64-bit value */
346
347 ACPI_CAST_PTR(u8, &table_header)[i] = (u8) temp;
348 }
349
350 /* Sanity check the table length */
351
352 if (table_header.length < sizeof(struct acpi_table_header)) {
353 return_ACPI_STATUS(AE_BAD_HEADER);
354 }
355
356 /* Allocate a buffer for the entire table */
357
358 table_ptr = ACPI_ALLOCATE(table_header.length);
359 if (!table_ptr) {
360 return_ACPI_STATUS(AE_NO_MEMORY);
361 }
362
363 /* Get the entire table from the op region */
364
365 for (i = 0; i < table_header.length; i++) {
366 status =
367 acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
368 (acpi_physical_address)
369 (i + address), 8,
370 &temp);
371 if (ACPI_FAILURE(status)) {
372 goto cleanup;
373 }
374
375 /* Get the one valid byte of the returned 64-bit value */
376
377 ACPI_CAST_PTR(u8, table_ptr)[i] = (u8) temp;
378 }
379 break; 305 break;
380 306
381 case ACPI_TYPE_LOCAL_REGION_FIELD: 307 case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
382 case ACPI_TYPE_LOCAL_BANK_FIELD:
383 case ACPI_TYPE_LOCAL_INDEX_FIELD:
384 308
385 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Field %p %s\n", 309 /* Simply extract the buffer from the buffer object */
386 obj_desc,
387 acpi_ut_get_object_type_name(obj_desc)));
388 310
389 /* 311 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
390 * The length of the field must be at least as large as the table. 312 "Load from Buffer or Field %p %s\n", obj_desc,
391 * Read the entire field and thus the entire table. Buffer is 313 acpi_ut_get_object_type_name(obj_desc)));
392 * allocated during the read.
393 */
394 status =
395 acpi_ex_read_data_from_field(walk_state, obj_desc,
396 &buffer_desc);
397 if (ACPI_FAILURE(status)) {
398 return_ACPI_STATUS(status);
399 }
400
401 table_ptr = ACPI_CAST_PTR(struct acpi_table_header,
402 buffer_desc->buffer.pointer);
403
404 /* All done with the buffer_desc, delete it */
405
406 buffer_desc->buffer.pointer = NULL;
407 acpi_ut_remove_reference(buffer_desc);
408 314
409 /* Sanity check the table length */ 315 table_desc.pointer = ACPI_CAST_PTR(struct acpi_table_header,
316 obj_desc->buffer.pointer);
317 table_desc.length = table_desc.pointer->length;
318 table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
410 319
411 if (table_ptr->length < sizeof(struct acpi_table_header)) { 320 obj_desc->buffer.pointer = NULL;
412 status = AE_BAD_HEADER;
413 goto cleanup;
414 }
415 break; 321 break;
416 322
417 default: 323 default:
418 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 324 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
419 } 325 }
420 326
421 /* The table must be either an SSDT or a PSDT */ 327 /*
422 328 * Install the new table into the local data structures
423 if ((!ACPI_COMPARE_NAME(table_ptr->signature, PSDT_SIG)) && 329 */
424 (!ACPI_COMPARE_NAME(table_ptr->signature, SSDT_SIG))) { 330 status = acpi_tb_add_table(&table_desc, &table_index);
425 ACPI_ERROR((AE_INFO, 331 if (ACPI_FAILURE(status)) {
426 "Table has invalid signature [%4.4s], must be SSDT or PSDT",
427 table_ptr->signature));
428 status = AE_BAD_SIGNATURE;
429 goto cleanup; 332 goto cleanup;
430 } 333 }
431 334
432 /* Install the new table into the local data structures */ 335 status =
433 336 acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
434 status = acpi_ex_add_table(table_ptr, acpi_gbl_root_node, &ddb_handle);
435 if (ACPI_FAILURE(status)) { 337 if (ACPI_FAILURE(status)) {
436 338
437 /* On error, table_ptr was deallocated above */ 339 /* On error, table_ptr was deallocated above */
@@ -450,13 +352,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
450 return_ACPI_STATUS(status); 352 return_ACPI_STATUS(status);
451 } 353 }
452 354
453 ACPI_INFO((AE_INFO,
454 "Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]",
455 table_ptr->oem_id, table_ptr->oem_table_id));
456
457 cleanup: 355 cleanup:
458 if (ACPI_FAILURE(status)) { 356 if (ACPI_FAILURE(status)) {
459 ACPI_FREE(table_ptr); 357 acpi_tb_delete_table(&table_desc);
460 } 358 }
461 return_ACPI_STATUS(status); 359 return_ACPI_STATUS(status);
462} 360}
@@ -477,7 +375,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
477{ 375{
478 acpi_status status = AE_OK; 376 acpi_status status = AE_OK;
479 union acpi_operand_object *table_desc = ddb_handle; 377 union acpi_operand_object *table_desc = ddb_handle;
480 struct acpi_table_desc *table_info; 378 acpi_native_uint table_index;
481 379
482 ACPI_FUNCTION_TRACE(ex_unload_table); 380 ACPI_FUNCTION_TRACE(ex_unload_table);
483 381
@@ -493,19 +391,18 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
493 return_ACPI_STATUS(AE_BAD_PARAMETER); 391 return_ACPI_STATUS(AE_BAD_PARAMETER);
494 } 392 }
495 393
496 /* Get the actual table descriptor from the ddb_handle */ 394 /* Get the table index from the ddb_handle */
497 395
498 table_info = (struct acpi_table_desc *)table_desc->reference.object; 396 table_index = (acpi_native_uint) table_desc->reference.object;
499 397
500 /* 398 /*
501 * Delete the entire namespace under this table Node 399 * Delete the entire namespace under this table Node
502 * (Offset contains the table_id) 400 * (Offset contains the table_id)
503 */ 401 */
504 acpi_ns_delete_namespace_by_owner(table_info->owner_id); 402 acpi_tb_delete_namespace_by_owner(table_index);
505 403 acpi_tb_release_owner_id(table_index);
506 /* Delete the table itself */
507 404
508 (void)acpi_tb_uninstall_table(table_info->installed_desc); 405 acpi_tb_set_table_loaded_flag(table_index, FALSE);
509 406
510 /* Delete the table descriptor (ddb_handle) */ 407 /* Delete the table descriptor (ddb_handle) */
511 408
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index 544e81a6a438..d470e8b1f4ea 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index 34eec82c1b1e..7c38528a7e83 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -359,8 +359,9 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
359 union acpi_operand_object **operand = &walk_state->operands[0]; 359 union acpi_operand_object **operand = &walk_state->operands[0];
360 union acpi_operand_object *obj_desc; 360 union acpi_operand_object *obj_desc;
361 struct acpi_namespace_node *node; 361 struct acpi_namespace_node *node;
362 struct acpi_table_header *table;
363 union acpi_operand_object *region_obj2; 362 union acpi_operand_object *region_obj2;
363 acpi_native_uint table_index;
364 struct acpi_table_header *table;
364 365
365 ACPI_FUNCTION_TRACE(ex_create_table_region); 366 ACPI_FUNCTION_TRACE(ex_create_table_region);
366 367
@@ -380,7 +381,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
380 381
381 status = acpi_tb_find_table(operand[1]->string.pointer, 382 status = acpi_tb_find_table(operand[1]->string.pointer,
382 operand[2]->string.pointer, 383 operand[2]->string.pointer,
383 operand[3]->string.pointer, &table); 384 operand[3]->string.pointer, &table_index);
384 if (ACPI_FAILURE(status)) { 385 if (ACPI_FAILURE(status)) {
385 return_ACPI_STATUS(status); 386 return_ACPI_STATUS(status);
386 } 387 }
@@ -395,6 +396,11 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
395 region_obj2 = obj_desc->common.next_object; 396 region_obj2 = obj_desc->common.next_object;
396 region_obj2->extra.region_context = NULL; 397 region_obj2->extra.region_context = NULL;
397 398
399 status = acpi_get_table_by_index(table_index, &table);
400 if (ACPI_FAILURE(status)) {
401 return_ACPI_STATUS(status);
402 }
403
398 /* Init the region from the operands */ 404 /* Init the region from the operands */
399 405
400 obj_desc->region.space_id = REGION_DATA_TABLE; 406 obj_desc->region.space_id = REGION_DATA_TABLE;
@@ -553,7 +559,8 @@ acpi_ex_create_method(u8 * aml_start,
553 559
554 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); 560 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
555 if (!obj_desc) { 561 if (!obj_desc) {
556 return_ACPI_STATUS(AE_NO_MEMORY); 562 status = AE_NO_MEMORY;
563 goto exit;
557 } 564 }
558 565
559 /* Save the method's AML pointer and length */ 566 /* Save the method's AML pointer and length */
@@ -576,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start,
576 * Get the sync_level. If method is serialized, a mutex will be 583 * Get the sync_level. If method is serialized, a mutex will be
577 * created for this method when it is parsed. 584 * created for this method when it is parsed.
578 */ 585 */
579 if (acpi_gbl_all_methods_serialized) { 586 if (method_flags & AML_METHOD_SERIALIZED) {
580 obj_desc->method.sync_level = 0;
581 obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
582 } else if (method_flags & AML_METHOD_SERIALIZED) {
583 /* 587 /*
584 * ACPI 1.0: sync_level = 0 588 * ACPI 1.0: sync_level = 0
585 * ACPI 2.0: sync_level = sync_level in method declaration 589 * ACPI 2.0: sync_level = sync_level in method declaration
@@ -597,6 +601,7 @@ acpi_ex_create_method(u8 * aml_start,
597 601
598 acpi_ut_remove_reference(obj_desc); 602 acpi_ut_remove_reference(obj_desc);
599 603
604 exit:
600 /* Remove a reference to the operand */ 605 /* Remove a reference to the operand */
601 606
602 acpi_ut_remove_reference(operand[1]); 607 acpi_ut_remove_reference(operand[1]);
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 2450943add33..68d283fd60e7 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -59,8 +59,6 @@ static void acpi_ex_out_string(char *title, char *value);
59 59
60static void acpi_ex_out_pointer(char *title, void *value); 60static void acpi_ex_out_pointer(char *title, void *value);
61 61
62static void acpi_ex_out_address(char *title, acpi_physical_address value);
63
64static void 62static void
65acpi_ex_dump_object(union acpi_operand_object *obj_desc, 63acpi_ex_dump_object(union acpi_operand_object *obj_desc,
66 struct acpi_exdump_info *info); 64 struct acpi_exdump_info *info);
@@ -92,10 +90,11 @@ static struct acpi_exdump_info acpi_ex_dump_string[4] = {
92 {ACPI_EXD_STRING, 0, NULL} 90 {ACPI_EXD_STRING, 0, NULL}
93}; 91};
94 92
95static struct acpi_exdump_info acpi_ex_dump_buffer[4] = { 93static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
96 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL}, 94 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
97 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"}, 95 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
98 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"}, 96 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
97 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
99 {ACPI_EXD_BUFFER, 0, NULL} 98 {ACPI_EXD_BUFFER, 0, NULL}
100}; 99};
101 100
@@ -165,8 +164,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
165 164
166static struct acpi_exdump_info acpi_ex_dump_processor[7] = { 165static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
167 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL}, 166 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
168 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"}, 167 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
169 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.length), "Length"}, 168 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
170 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"}, 169 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
171 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify), 170 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
172 "System Notify"}, 171 "System Notify"},
@@ -379,18 +378,12 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
379 break; 378 break;
380 379
381 case ACPI_EXD_POINTER: 380 case ACPI_EXD_POINTER:
381 case ACPI_EXD_ADDRESS:
382 382
383 acpi_ex_out_pointer(name, 383 acpi_ex_out_pointer(name,
384 *ACPI_CAST_PTR(void *, target)); 384 *ACPI_CAST_PTR(void *, target));
385 break; 385 break;
386 386
387 case ACPI_EXD_ADDRESS:
388
389 acpi_ex_out_address(name,
390 *ACPI_CAST_PTR
391 (acpi_physical_address, target));
392 break;
393
394 case ACPI_EXD_STRING: 387 case ACPI_EXD_STRING:
395 388
396 acpi_ut_print_string(obj_desc->string.pointer, 389 acpi_ut_print_string(obj_desc->string.pointer,
@@ -834,16 +827,6 @@ static void acpi_ex_out_pointer(char *title, void *value)
834 acpi_os_printf("%20s : %p\n", title, value); 827 acpi_os_printf("%20s : %p\n", title, value);
835} 828}
836 829
837static void acpi_ex_out_address(char *title, acpi_physical_address value)
838{
839
840#if ACPI_MACHINE_WIDTH == 16
841 acpi_os_printf("%20s : %p\n", title, value);
842#else
843 acpi_os_printf("%20s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
844#endif
845}
846
847/******************************************************************************* 830/*******************************************************************************
848 * 831 *
849 * FUNCTION: acpi_ex_dump_namespace_node 832 * FUNCTION: acpi_ex_dump_namespace_node
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
index 9ea9c3a67ca9..2d88a3d8d1ad 100644
--- a/drivers/acpi/executer/exfield.c
+++ b/drivers/acpi/executer/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
index 40f0bee6faa5..65a48b6170ee 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/executer/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -257,14 +257,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
257 } 257 }
258 258
259 ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, 259 ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
260 " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", 260 " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
261 acpi_ut_get_region_name(rgn_desc->region. 261 acpi_ut_get_region_name(rgn_desc->region.
262 space_id), 262 space_id),
263 rgn_desc->region.space_id, 263 rgn_desc->region.space_id,
264 obj_desc->common_field.access_byte_width, 264 obj_desc->common_field.access_byte_width,
265 obj_desc->common_field.base_byte_offset, 265 obj_desc->common_field.base_byte_offset,
266 field_datum_byte_offset, 266 field_datum_byte_offset, (void *)address));
267 ACPI_FORMAT_UINT64(address)));
268 267
269 /* Invoke the appropriate address_space/op_region handler */ 268 /* Invoke the appropriate address_space/op_region handler */
270 269
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index bd98aab017cf..f13d1cec2d6d 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index bf90f04f2c60..5101bad5baf8 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include <acpi/acinterp.h>
47#include <acpi/acevents.h>
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exmutex") 50ACPI_MODULE_NAME("exmutex")
@@ -150,7 +151,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
150 return_ACPI_STATUS(AE_BAD_PARAMETER); 151 return_ACPI_STATUS(AE_BAD_PARAMETER);
151 } 152 }
152 153
153 /* Sanity check -- we must have a valid thread ID */ 154 /* Sanity check: we must have a valid thread ID */
154 155
155 if (!walk_state->thread) { 156 if (!walk_state->thread) {
156 ACPI_ERROR((AE_INFO, 157 ACPI_ERROR((AE_INFO,
@@ -174,24 +175,28 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
174 /* Support for multiple acquires by the owning thread */ 175 /* Support for multiple acquires by the owning thread */
175 176
176 if (obj_desc->mutex.owner_thread) { 177 if (obj_desc->mutex.owner_thread) {
177 178 if (obj_desc->mutex.owner_thread->thread_id ==
178 /* Special case for Global Lock, allow all threads */ 179 walk_state->thread->thread_id) {
179
180 if ((obj_desc->mutex.owner_thread->thread_id ==
181 walk_state->thread->thread_id) ||
182 (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) {
183 /* 180 /*
184 * The mutex is already owned by this thread, 181 * The mutex is already owned by this thread, just increment the
185 * just increment the acquisition depth 182 * acquisition depth
186 */ 183 */
187 obj_desc->mutex.acquisition_depth++; 184 obj_desc->mutex.acquisition_depth++;
188 return_ACPI_STATUS(AE_OK); 185 return_ACPI_STATUS(AE_OK);
189 } 186 }
190 } 187 }
191 188
192 /* Acquire the mutex, wait if necessary */ 189 /* Acquire the mutex, wait if necessary. Special case for Global Lock */
190
191 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
192 status =
193 acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
194 } else {
195 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
196 (u16) time_desc->integer.
197 value);
198 }
193 199
194 status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
195 if (ACPI_FAILURE(status)) { 200 if (ACPI_FAILURE(status)) {
196 201
197 /* Includes failure from a timeout on time_desc */ 202 /* Includes failure from a timeout on time_desc */
@@ -211,7 +216,6 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
211 /* Link the mutex to the current thread for force-unlock at method exit */ 216 /* Link the mutex to the current thread for force-unlock at method exit */
212 217
213 acpi_ex_link_mutex(obj_desc, walk_state->thread); 218 acpi_ex_link_mutex(obj_desc, walk_state->thread);
214
215 return_ACPI_STATUS(AE_OK); 219 return_ACPI_STATUS(AE_OK);
216} 220}
217 221
@@ -232,7 +236,7 @@ acpi_status
232acpi_ex_release_mutex(union acpi_operand_object *obj_desc, 236acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
233 struct acpi_walk_state *walk_state) 237 struct acpi_walk_state *walk_state)
234{ 238{
235 acpi_status status; 239 acpi_status status = AE_OK;
236 240
237 ACPI_FUNCTION_TRACE(ex_release_mutex); 241 ACPI_FUNCTION_TRACE(ex_release_mutex);
238 242
@@ -249,7 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
249 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); 253 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
250 } 254 }
251 255
252 /* Sanity check -- we must have a valid thread ID */ 256 /* Sanity check: we must have a valid thread ID */
253 257
254 if (!walk_state->thread) { 258 if (!walk_state->thread) {
255 ACPI_ERROR((AE_INFO, 259 ACPI_ERROR((AE_INFO,
@@ -264,7 +268,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
264 */ 268 */
265 if ((obj_desc->mutex.owner_thread->thread_id != 269 if ((obj_desc->mutex.owner_thread->thread_id !=
266 walk_state->thread->thread_id) 270 walk_state->thread->thread_id)
267 && (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) { 271 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
268 ACPI_ERROR((AE_INFO, 272 ACPI_ERROR((AE_INFO,
269 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", 273 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
270 (unsigned long)walk_state->thread->thread_id, 274 (unsigned long)walk_state->thread->thread_id,
@@ -274,8 +278,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
274 } 278 }
275 279
276 /* 280 /*
277 * The sync level of the mutex must be less than or 281 * The sync level of the mutex must be less than or equal to the current
278 * equal to the current sync level 282 * sync level
279 */ 283 */
280 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { 284 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
281 ACPI_ERROR((AE_INFO, 285 ACPI_ERROR((AE_INFO,
@@ -298,11 +302,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
298 302
299 acpi_ex_unlink_mutex(obj_desc); 303 acpi_ex_unlink_mutex(obj_desc);
300 304
301 /* Release the mutex */ 305 /* Release the mutex, special case for Global Lock */
302 306
303 status = acpi_ex_system_release_mutex(obj_desc); 307 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
308 status = acpi_ev_release_global_lock();
309 } else {
310 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
311 }
304 312
305 /* Update the mutex and walk state, restore sync_level before acquire */ 313 /* Update the mutex and restore sync_level */
306 314
307 obj_desc->mutex.owner_thread = NULL; 315 obj_desc->mutex.owner_thread = NULL;
308 walk_state->thread->current_sync_level = 316 walk_state->thread->current_sync_level =
@@ -321,39 +329,49 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
321 * 329 *
322 * DESCRIPTION: Release all mutexes held by this thread 330 * DESCRIPTION: Release all mutexes held by this thread
323 * 331 *
332 * NOTE: This function is called as the thread is exiting the interpreter.
333 * Mutexes are not released when an individual control method is exited, but
334 * only when the parent thread actually exits the interpreter. This allows one
335 * method to acquire a mutex, and a different method to release it, as long as
336 * this is performed underneath a single parent control method.
337 *
324 ******************************************************************************/ 338 ******************************************************************************/
325 339
326void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) 340void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
327{ 341{
328 union acpi_operand_object *next = thread->acquired_mutex_list; 342 union acpi_operand_object *next = thread->acquired_mutex_list;
329 union acpi_operand_object *this; 343 union acpi_operand_object *obj_desc;
330 acpi_status status;
331 344
332 ACPI_FUNCTION_ENTRY(); 345 ACPI_FUNCTION_ENTRY();
333 346
334 /* Traverse the list of owned mutexes, releasing each one */ 347 /* Traverse the list of owned mutexes, releasing each one */
335 348
336 while (next) { 349 while (next) {
337 this = next; 350 obj_desc = next;
338 next = this->mutex.next; 351 next = obj_desc->mutex.next;
352
353 obj_desc->mutex.prev = NULL;
354 obj_desc->mutex.next = NULL;
355 obj_desc->mutex.acquisition_depth = 0;
356
357 /* Release the mutex, special case for Global Lock */
339 358
340 this->mutex.acquisition_depth = 1; 359 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
341 this->mutex.prev = NULL;
342 this->mutex.next = NULL;
343 360
344 /* Release the mutex */ 361 /* Ignore errors */
345 362
346 status = acpi_ex_system_release_mutex(this); 363 (void)acpi_ev_release_global_lock();
347 if (ACPI_FAILURE(status)) { 364 } else {
348 continue; 365 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
349 } 366 }
350 367
351 /* Mark mutex unowned */ 368 /* Mark mutex unowned */
352 369
353 this->mutex.owner_thread = NULL; 370 obj_desc->mutex.owner_thread = NULL;
354 371
355 /* Update Thread sync_level (Last mutex is the important one) */ 372 /* Update Thread sync_level (Last mutex is the important one) */
356 373
357 thread->current_sync_level = this->mutex.original_sync_level; 374 thread->current_sync_level =
375 obj_desc->mutex.original_sync_level;
358 } 376 }
359} 377}
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index d3d70364626c..1ee4fb1175c6 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 6374d8be88e0..252f10acbbcc 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -104,9 +104,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
104 status = AE_NO_MEMORY; 104 status = AE_NO_MEMORY;
105 goto cleanup; 105 goto cleanup;
106 } 106 }
107#if ACPI_MACHINE_WIDTH != 16
108 return_desc->integer.value = acpi_os_get_timer(); 107 return_desc->integer.value = acpi_os_get_timer();
109#endif
110 break; 108 break;
111 109
112 default: /* Unknown opcode */ 110 default: /* Unknown opcode */
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index 7d2cbc113160..17e652e65379 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
index e2d945dfd509..7fe67cf82cee 100644
--- a/drivers/acpi/executer/exoparg3.c
+++ b/drivers/acpi/executer/exoparg3.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
index f0c0ba6eb408..bd80a9cb3d65 100644
--- a/drivers/acpi/executer/exoparg6.c
+++ b/drivers/acpi/executer/exoparg6.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index 44d064f427b9..a6696621ff1b 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 3cc97ba48b36..2e9ce94798c7 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -155,16 +155,15 @@ acpi_ex_system_memory_space_handler(u32 function,
155 155
156 /* Create a new mapping starting at the address given */ 156 /* Create a new mapping starting at the address given */
157 157
158 status = acpi_os_map_memory(address, window_size, 158 mem_info->mapped_logical_address =
159 (void **)&mem_info-> 159 acpi_os_map_memory((acpi_native_uint) address, window_size);
160 mapped_logical_address); 160 if (!mem_info->mapped_logical_address) {
161 if (ACPI_FAILURE(status)) {
162 ACPI_ERROR((AE_INFO, 161 ACPI_ERROR((AE_INFO,
163 "Could not map memory at %8.8X%8.8X, size %X", 162 "Could not map memory at %8.8X%8.8X, size %X",
164 ACPI_FORMAT_UINT64(address), 163 ACPI_FORMAT_UINT64(address),
165 (u32) window_size)); 164 (u32) window_size));
166 mem_info->mapped_length = 0; 165 mem_info->mapped_length = 0;
167 return_ACPI_STATUS(status); 166 return_ACPI_STATUS(AE_NO_MEMORY);
168 } 167 }
169 168
170 /* Save the physical address and mapping size */ 169 /* Save the physical address and mapping size */
@@ -210,11 +209,10 @@ acpi_ex_system_memory_space_handler(u32 function,
210 *value = (acpi_integer) ACPI_GET32(logical_addr_ptr); 209 *value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
211 break; 210 break;
212 211
213#if ACPI_MACHINE_WIDTH != 16
214 case 64: 212 case 64:
215 *value = (acpi_integer) ACPI_GET64(logical_addr_ptr); 213 *value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
216 break; 214 break;
217#endif 215
218 default: 216 default:
219 /* bit_width was already validated */ 217 /* bit_width was already validated */
220 break; 218 break;
@@ -236,11 +234,9 @@ acpi_ex_system_memory_space_handler(u32 function,
236 ACPI_SET32(logical_addr_ptr) = (u32) * value; 234 ACPI_SET32(logical_addr_ptr) = (u32) * value;
237 break; 235 break;
238 236
239#if ACPI_MACHINE_WIDTH != 16
240 case 64: 237 case 64:
241 ACPI_SET64(logical_addr_ptr) = (u64) * value; 238 ACPI_SET64(logical_addr_ptr) = (u64) * value;
242 break; 239 break;
243#endif
244 240
245 default: 241 default:
246 /* bit_width was already validated */ 242 /* bit_width was already validated */
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 3089b05a1368..2b3a01cc4929 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index 6499de878017..6c64e55dab0e 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -141,7 +141,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
141 acpi_status status = AE_OK; 141 acpi_status status = AE_OK;
142 union acpi_operand_object *stack_desc; 142 union acpi_operand_object *stack_desc;
143 void *temp_node; 143 void *temp_node;
144 union acpi_operand_object *obj_desc; 144 union acpi_operand_object *obj_desc = NULL;
145 u16 opcode; 145 u16 opcode;
146 146
147 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value); 147 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
@@ -299,8 +299,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
299 status = acpi_ds_get_package_arguments(stack_desc); 299 status = acpi_ds_get_package_arguments(stack_desc);
300 break; 300 break;
301 301
302 /* These cases may never happen here, but just in case.. */
303
304 case ACPI_TYPE_BUFFER_FIELD: 302 case ACPI_TYPE_BUFFER_FIELD:
305 case ACPI_TYPE_LOCAL_REGION_FIELD: 303 case ACPI_TYPE_LOCAL_REGION_FIELD:
306 case ACPI_TYPE_LOCAL_BANK_FIELD: 304 case ACPI_TYPE_LOCAL_BANK_FIELD:
@@ -314,6 +312,10 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
314 status = 312 status =
315 acpi_ex_read_data_from_field(walk_state, stack_desc, 313 acpi_ex_read_data_from_field(walk_state, stack_desc,
316 &obj_desc); 314 &obj_desc);
315
316 /* Remove a reference to the original operand, then override */
317
318 acpi_ut_remove_reference(*stack_ptr);
317 *stack_ptr = (void *)obj_desc; 319 *stack_ptr = (void *)obj_desc;
318 break; 320 break;
319 321
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index 4c93d0972333..ba761862a599 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -611,22 +611,20 @@ acpi_ex_resolve_operands(u16 opcode,
611 } 611 }
612 goto next_operand; 612 goto next_operand;
613 613
614 case ARGI_REGION_OR_FIELD: 614 case ARGI_REGION_OR_BUFFER: /* Used by Load() only */
615 615
616 /* Need an operand of type REGION or a FIELD in a region */ 616 /* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
617 617
618 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 618 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
619 case ACPI_TYPE_BUFFER:
619 case ACPI_TYPE_REGION: 620 case ACPI_TYPE_REGION:
620 case ACPI_TYPE_LOCAL_REGION_FIELD:
621 case ACPI_TYPE_LOCAL_BANK_FIELD:
622 case ACPI_TYPE_LOCAL_INDEX_FIELD:
623 621
624 /* Valid operand */ 622 /* Valid operand */
625 break; 623 break;
626 624
627 default: 625 default:
628 ACPI_ERROR((AE_INFO, 626 ACPI_ERROR((AE_INFO,
629 "Needed [Region/RegionField], found [%s] %p", 627 "Needed [Region/Buffer], found [%s] %p",
630 acpi_ut_get_object_type_name 628 acpi_ut_get_object_type_name
631 (obj_desc), obj_desc)); 629 (obj_desc), obj_desc));
632 630
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 0456405ba019..f4b69a637820 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index 591aaf0e18b3..1d622c625c64 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
index 99ebe5adfcda..8233d40178ee 100644
--- a/drivers/acpi/executer/exstorob.c
+++ b/drivers/acpi/executer/exstorob.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index 28aef3e69ecc..9460baff3032 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) 66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
67{ 67{
68 acpi_status status; 68 acpi_status status;
69 acpi_status status2;
70 69
71 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); 70 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
72 71
@@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
79 78
80 /* We must wait, so unlock the interpreter */ 79 /* We must wait, so unlock the interpreter */
81 80
82 acpi_ex_exit_interpreter(); 81 acpi_ex_relinquish_interpreter();
83 82
84 status = acpi_os_wait_semaphore(semaphore, 1, timeout); 83 status = acpi_os_wait_semaphore(semaphore, 1, timeout);
85 84
@@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
89 88
90 /* Reacquire the interpreter */ 89 /* Reacquire the interpreter */
91 90
92 status2 = acpi_ex_enter_interpreter(); 91 acpi_ex_reacquire_interpreter();
93 if (ACPI_FAILURE(status2)) {
94
95 /* Report fatal error, could not acquire interpreter */
96
97 return_ACPI_STATUS(status2);
98 }
99 } 92 }
100 93
101 return_ACPI_STATUS(status); 94 return_ACPI_STATUS(status);
@@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
119acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) 112acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
120{ 113{
121 acpi_status status; 114 acpi_status status;
122 acpi_status status2;
123 115
124 ACPI_FUNCTION_TRACE(ex_system_wait_mutex); 116 ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
125 117
@@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
132 124
133 /* We must wait, so unlock the interpreter */ 125 /* We must wait, so unlock the interpreter */
134 126
135 acpi_ex_exit_interpreter(); 127 acpi_ex_relinquish_interpreter();
136 128
137 status = acpi_os_acquire_mutex(mutex, timeout); 129 status = acpi_os_acquire_mutex(mutex, timeout);
138 130
@@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
142 134
143 /* Reacquire the interpreter */ 135 /* Reacquire the interpreter */
144 136
145 status2 = acpi_ex_enter_interpreter(); 137 acpi_ex_reacquire_interpreter();
146 if (ACPI_FAILURE(status2)) {
147
148 /* Report fatal error, could not acquire interpreter */
149
150 return_ACPI_STATUS(status2);
151 }
152 } 138 }
153 139
154 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
@@ -209,96 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
209 195
210acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) 196acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
211{ 197{
212 acpi_status status;
213
214 ACPI_FUNCTION_ENTRY(); 198 ACPI_FUNCTION_ENTRY();
215 199
216 /* Since this thread will sleep, we must release the interpreter */ 200 /* Since this thread will sleep, we must release the interpreter */
217 201
218 acpi_ex_exit_interpreter(); 202 acpi_ex_relinquish_interpreter();
219 203
220 acpi_os_sleep(how_long); 204 acpi_os_sleep(how_long);
221 205
222 /* And now we must get the interpreter again */ 206 /* And now we must get the interpreter again */
223 207
224 status = acpi_ex_enter_interpreter(); 208 acpi_ex_reacquire_interpreter();
225 return (status); 209 return (AE_OK);
226}
227
228/*******************************************************************************
229 *
230 * FUNCTION: acpi_ex_system_acquire_mutex
231 *
232 * PARAMETERS: time_desc - Maximum time to wait for the mutex
233 * obj_desc - The object descriptor for this op
234 *
235 * RETURN: Status
236 *
237 * DESCRIPTION: Provides an access point to perform synchronization operations
238 * within the AML. This function will cause a lock to be generated
239 * for the Mutex pointed to by obj_desc.
240 *
241 ******************************************************************************/
242
243acpi_status
244acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
245 union acpi_operand_object * obj_desc)
246{
247 acpi_status status = AE_OK;
248
249 ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);
250
251 if (!obj_desc) {
252 return_ACPI_STATUS(AE_BAD_PARAMETER);
253 }
254
255 /* Support for the _GL_ Mutex object -- go get the global lock */
256
257 if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
258 status =
259 acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
260 return_ACPI_STATUS(status);
261 }
262
263 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
264 (u16) time_desc->integer.value);
265 return_ACPI_STATUS(status);
266}
267
268/*******************************************************************************
269 *
270 * FUNCTION: acpi_ex_system_release_mutex
271 *
272 * PARAMETERS: obj_desc - The object descriptor for this op
273 *
274 * RETURN: Status
275 *
276 * DESCRIPTION: Provides an access point to perform synchronization operations
277 * within the AML. This operation is a request to release a
278 * previously acquired Mutex. If the Mutex variable is set then
279 * it will be decremented.
280 *
281 ******************************************************************************/
282
283acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
284{
285 acpi_status status = AE_OK;
286
287 ACPI_FUNCTION_TRACE(ex_system_release_mutex);
288
289 if (!obj_desc) {
290 return_ACPI_STATUS(AE_BAD_PARAMETER);
291 }
292
293 /* Support for the _GL_ Mutex object -- release the global lock */
294
295 if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
296 status = acpi_ev_release_global_lock();
297 return_ACPI_STATUS(status);
298 }
299
300 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
301 return_ACPI_STATUS(AE_OK);
302} 210}
303 211
304/******************************************************************************* 212/*******************************************************************************
@@ -314,7 +222,7 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
314 * 222 *
315 ******************************************************************************/ 223 ******************************************************************************/
316 224
317acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc) 225acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
318{ 226{
319 acpi_status status = AE_OK; 227 acpi_status status = AE_OK;
320 228
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index 982c8b65876f..6b0aeccbb69b 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
76 * 76 *
77 * PARAMETERS: None 77 * PARAMETERS: None
78 * 78 *
79 * RETURN: Status 79 * RETURN: None
80 * 80 *
81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter 81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter
82 * the interpreter region is a fatal system error 82 * the interpreter region is a fatal system error. Used in
83 * conjunction with exit_interpreter.
83 * 84 *
84 ******************************************************************************/ 85 ******************************************************************************/
85 86
86acpi_status acpi_ex_enter_interpreter(void) 87void acpi_ex_enter_interpreter(void)
87{ 88{
88 acpi_status status; 89 acpi_status status;
89 90
@@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void)
91 92
92 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); 93 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
93 if (ACPI_FAILURE(status)) { 94 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex")); 95 ACPI_ERROR((AE_INFO,
96 "Could not acquire AML Interpreter mutex"));
95 } 97 }
96 98
97 return_ACPI_STATUS(status); 99 return_VOID;
98} 100}
99 101
100/******************************************************************************* 102/*******************************************************************************
101 * 103 *
102 * FUNCTION: acpi_ex_exit_interpreter 104 * FUNCTION: acpi_ex_reacquire_interpreter
103 * 105 *
104 * PARAMETERS: None 106 * PARAMETERS: None
105 * 107 *
106 * RETURN: None 108 * RETURN: None
107 * 109 *
108 * DESCRIPTION: Exit the interpreter execution region 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with
113 * relinquish_interpreter
114 *
115 ******************************************************************************/
116
117void acpi_ex_reacquire_interpreter(void)
118{
119 ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
120
121 /*
122 * If the global serialized flag is set, do not release the interpreter,
123 * since it was not actually released by acpi_ex_relinquish_interpreter.
124 * This forces the interpreter to be single threaded.
125 */
126 if (!acpi_gbl_all_methods_serialized) {
127 acpi_ex_enter_interpreter();
128 }
129
130 return_VOID;
131}
132
133/*******************************************************************************
134 *
135 * FUNCTION: acpi_ex_exit_interpreter
136 *
137 * PARAMETERS: None
138 *
139 * RETURN: None
109 * 140 *
110 * Cases where the interpreter is unlocked: 141 * DESCRIPTION: Exit the interpreter execution region. This is the top level
111 * 1) Completion of the execution of a control method 142 * routine used to exit the interpreter when all processing has
112 * 2) Method blocked on a Sleep() AML opcode 143 * been completed.
113 * 3) Method blocked on an Acquire() AML opcode
114 * 4) Method blocked on a Wait() AML opcode
115 * 5) Method blocked to acquire the global lock
116 * 6) Method blocked to execute a serialized control method that is
117 * already executing
118 * 7) About to invoke a user-installed opregion handler
119 * 144 *
120 ******************************************************************************/ 145 ******************************************************************************/
121 146
@@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
127 152
128 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); 153 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
129 if (ACPI_FAILURE(status)) { 154 if (ACPI_FAILURE(status)) {
130 ACPI_ERROR((AE_INFO, "Could not release interpreter mutex")); 155 ACPI_ERROR((AE_INFO,
156 "Could not release AML Interpreter mutex"));
157 }
158
159 return_VOID;
160}
161
162/*******************************************************************************
163 *
164 * FUNCTION: acpi_ex_relinquish_interpreter
165 *
166 * PARAMETERS: None
167 *
168 * RETURN: None
169 *
170 * DESCRIPTION: Exit the interpreter execution region, from within the
171 * interpreter - before attempting an operation that will possibly
172 * block the running thread.
173 *
174 * Cases where the interpreter is unlocked internally
175 * 1) Method to be blocked on a Sleep() AML opcode
176 * 2) Method to be blocked on an Acquire() AML opcode
177 * 3) Method to be blocked on a Wait() AML opcode
178 * 4) Method to be blocked to acquire the global lock
179 * 5) Method to be blocked waiting to execute a serialized control method
180 * that is currently executing
181 * 6) About to invoke a user-installed opregion handler
182 *
183 ******************************************************************************/
184
185void acpi_ex_relinquish_interpreter(void)
186{
187 ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
188
189 /*
190 * If the global serialized flag is set, do not release the interpreter.
191 * This forces the interpreter to be single threaded.
192 */
193 if (!acpi_gbl_all_methods_serialized) {
194 acpi_ex_exit_interpreter();
131 } 195 }
132 196
133 return_VOID; 197 return_VOID;
@@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
141 * 205 *
142 * RETURN: none 206 * RETURN: none
143 * 207 *
144 * DESCRIPTION: Truncate a number to 32-bits if the currently executing method 208 * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
145 * belongs to a 32-bit ACPI table. 209 * 32-bit, as determined by the revision of the DSDT.
146 * 210 *
147 ******************************************************************************/ 211 ******************************************************************************/
148 212
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f305a826ca2d..af22fdf73413 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -48,8 +48,8 @@ MODULE_LICENSE("GPL");
48 48
49static int acpi_fan_add(struct acpi_device *device); 49static int acpi_fan_add(struct acpi_device *device);
50static int acpi_fan_remove(struct acpi_device *device, int type); 50static int acpi_fan_remove(struct acpi_device *device, int type);
51static int acpi_fan_suspend(struct acpi_device *device, int state); 51static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
52static int acpi_fan_resume(struct acpi_device *device, int state); 52static int acpi_fan_resume(struct acpi_device *device);
53 53
54static struct acpi_driver acpi_fan_driver = { 54static struct acpi_driver acpi_fan_driver = {
55 .name = ACPI_FAN_DRIVER_NAME, 55 .name = ACPI_FAN_DRIVER_NAME,
@@ -237,7 +237,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
237 return 0; 237 return 0;
238} 238}
239 239
240static int acpi_fan_suspend(struct acpi_device *device, int state) 240static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
241{ 241{
242 if (!device) 242 if (!device)
243 return -EINVAL; 243 return -EINVAL;
@@ -247,7 +247,7 @@ static int acpi_fan_suspend(struct acpi_device *device, int state)
247 return AE_OK; 247 return AE_OK;
248} 248}
249 249
250static int acpi_fan_resume(struct acpi_device *device, int state) 250static int acpi_fan_resume(struct acpi_device *device)
251{ 251{
252 int result = 0; 252 int result = 0;
253 int power_state = 0; 253 int power_state = 0;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8a0324b43e53..7b6c9ff9bebe 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -86,129 +86,6 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
86 return ret; 86 return ret;
87} 87}
88 88
89/* Get PCI root bridge's handle from its segment and bus number */
90struct acpi_find_pci_root {
91 unsigned int seg;
92 unsigned int bus;
93 acpi_handle handle;
94};
95
96static acpi_status
97do_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
98{
99 unsigned long *busnr = data;
100 struct acpi_resource_address64 address;
101
102 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
103 resource->type != ACPI_RESOURCE_TYPE_ADDRESS32 &&
104 resource->type != ACPI_RESOURCE_TYPE_ADDRESS64)
105 return AE_OK;
106
107 acpi_resource_to_address64(resource, &address);
108 if ((address.address_length > 0) &&
109 (address.resource_type == ACPI_BUS_NUMBER_RANGE))
110 *busnr = address.minimum;
111
112 return AE_OK;
113}
114
115static int get_root_bridge_busnr(acpi_handle handle)
116{
117 acpi_status status;
118 unsigned long bus, bbn;
119 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
120
121 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
122
123 status = acpi_evaluate_integer(handle, METHOD_NAME__BBN, NULL,
124 &bbn);
125 if (status == AE_NOT_FOUND) {
126 /* Assume bus = 0 */
127 printk(KERN_INFO PREFIX
128 "Assume root bridge [%s] bus is 0\n",
129 (char *)buffer.pointer);
130 status = AE_OK;
131 bbn = 0;
132 }
133 if (ACPI_FAILURE(status)) {
134 bbn = -ENODEV;
135 goto exit;
136 }
137 if (bbn > 0)
138 goto exit;
139
140 /* _BBN in some systems return 0 for all root bridges */
141 bus = -1;
142 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
143 do_root_bridge_busnr_callback, &bus);
144 /* If _CRS failed, we just use _BBN */
145 if (ACPI_FAILURE(status) || (bus == -1))
146 goto exit;
147 /* We select _CRS */
148 if (bbn != bus) {
149 printk(KERN_INFO PREFIX
150 "_BBN and _CRS returns different value for %s. Select _CRS\n",
151 (char *)buffer.pointer);
152 bbn = bus;
153 }
154 exit:
155 kfree(buffer.pointer);
156 return (int)bbn;
157}
158
159static acpi_status
160find_pci_rootbridge(acpi_handle handle, u32 lvl, void *context, void **rv)
161{
162 struct acpi_find_pci_root *find = (struct acpi_find_pci_root *)context;
163 unsigned long seg, bus;
164 acpi_status status;
165 int tmp;
166 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
167
168 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
169
170 status = acpi_evaluate_integer(handle, METHOD_NAME__SEG, NULL, &seg);
171 if (status == AE_NOT_FOUND) {
172 /* Assume seg = 0 */
173 status = AE_OK;
174 seg = 0;
175 }
176 if (ACPI_FAILURE(status)) {
177 status = AE_CTRL_DEPTH;
178 goto exit;
179 }
180
181 tmp = get_root_bridge_busnr(handle);
182 if (tmp < 0) {
183 printk(KERN_ERR PREFIX
184 "Find root bridge failed for %s\n",
185 (char *)buffer.pointer);
186 status = AE_CTRL_DEPTH;
187 goto exit;
188 }
189 bus = tmp;
190
191 if (seg == find->seg && bus == find->bus)
192 {
193 find->handle = handle;
194 status = AE_CTRL_TERMINATE;
195 }
196 else
197 status = AE_OK;
198 exit:
199 kfree(buffer.pointer);
200 return status;
201}
202
203acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
204{
205 struct acpi_find_pci_root find = { seg, bus, NULL };
206
207 acpi_get_devices(PCI_ROOT_HID_STRING, find_pci_rootbridge, &find, NULL);
208 return find.handle;
209}
210EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
211
212/* Get device's handler per its address under its parent */ 89/* Get device's handler per its address under its parent */
213struct acpi_find_child { 90struct acpi_find_child {
214 acpi_handle handle; 91 acpi_handle handle;
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
index de50fab2a910..6031ca13dd2f 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -49,41 +49,6 @@ ACPI_MODULE_NAME("hwacpi")
49 49
50/****************************************************************************** 50/******************************************************************************
51 * 51 *
52 * FUNCTION: acpi_hw_initialize
53 *
54 * PARAMETERS: None
55 *
56 * RETURN: Status
57 *
58 * DESCRIPTION: Initialize and validate the various ACPI registers defined in
59 * the FADT.
60 *
61 ******************************************************************************/
62acpi_status acpi_hw_initialize(void)
63{
64 acpi_status status;
65
66 ACPI_FUNCTION_TRACE(hw_initialize);
67
68 /* We must have the ACPI tables by the time we get here */
69
70 if (!acpi_gbl_FADT) {
71 ACPI_ERROR((AE_INFO, "No FADT is present"));
72 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
73 }
74
75 /* Sanity check the FADT for valid values */
76
77 status = acpi_ut_validate_fadt();
78 if (ACPI_FAILURE(status)) {
79 return_ACPI_STATUS(status);
80 }
81
82 return_ACPI_STATUS(AE_OK);
83}
84
85/******************************************************************************
86 *
87 * FUNCTION: acpi_hw_set_mode 52 * FUNCTION: acpi_hw_set_mode
88 * 53 *
89 * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY 54 * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
@@ -93,7 +58,6 @@ acpi_status acpi_hw_initialize(void)
93 * DESCRIPTION: Transitions the system into the requested mode. 58 * DESCRIPTION: Transitions the system into the requested mode.
94 * 59 *
95 ******************************************************************************/ 60 ******************************************************************************/
96
97acpi_status acpi_hw_set_mode(u32 mode) 61acpi_status acpi_hw_set_mode(u32 mode)
98{ 62{
99 63
@@ -106,7 +70,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
106 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 70 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
107 * system does not support mode transition. 71 * system does not support mode transition.
108 */ 72 */
109 if (!acpi_gbl_FADT->smi_cmd) { 73 if (!acpi_gbl_FADT.smi_command) {
110 ACPI_ERROR((AE_INFO, 74 ACPI_ERROR((AE_INFO,
111 "No SMI_CMD in FADT, mode transition failed")); 75 "No SMI_CMD in FADT, mode transition failed"));
112 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); 76 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
@@ -119,7 +83,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
119 * we make sure both the numbers are zero to determine these 83 * we make sure both the numbers are zero to determine these
120 * transitions are not supported. 84 * transitions are not supported.
121 */ 85 */
122 if (!acpi_gbl_FADT->acpi_enable && !acpi_gbl_FADT->acpi_disable) { 86 if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
123 ACPI_ERROR((AE_INFO, 87 ACPI_ERROR((AE_INFO,
124 "No ACPI mode transition supported in this system (enable/disable both zero)")); 88 "No ACPI mode transition supported in this system (enable/disable both zero)"));
125 return_ACPI_STATUS(AE_OK); 89 return_ACPI_STATUS(AE_OK);
@@ -130,9 +94,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
130 94
131 /* BIOS should have disabled ALL fixed and GP events */ 95 /* BIOS should have disabled ALL fixed and GP events */
132 96
133 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 97 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
134 (u32) acpi_gbl_FADT->acpi_enable, 98 (u32) acpi_gbl_FADT.acpi_enable, 8);
135 8);
136 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 99 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
137 "Attempting to enable ACPI mode\n")); 100 "Attempting to enable ACPI mode\n"));
138 break; 101 break;
@@ -143,8 +106,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
143 * BIOS should clear all fixed status bits and restore fixed event 106 * BIOS should clear all fixed status bits and restore fixed event
144 * enable bits to default 107 * enable bits to default
145 */ 108 */
146 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 109 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
147 (u32) acpi_gbl_FADT->acpi_disable, 110 (u32) acpi_gbl_FADT.acpi_disable,
148 8); 111 8);
149 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 112 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
150 "Attempting to enable Legacy (non-ACPI) mode\n")); 113 "Attempting to enable Legacy (non-ACPI) mode\n"));
@@ -204,12 +167,11 @@ u32 acpi_hw_get_mode(void)
204 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 167 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
205 * system does not support mode transition. 168 * system does not support mode transition.
206 */ 169 */
207 if (!acpi_gbl_FADT->smi_cmd) { 170 if (!acpi_gbl_FADT.smi_command) {
208 return_UINT32(ACPI_SYS_MODE_ACPI); 171 return_UINT32(ACPI_SYS_MODE_ACPI);
209 } 172 }
210 173
211 status = 174 status = acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value);
212 acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value, ACPI_MTX_LOCK);
213 if (ACPI_FAILURE(status)) { 175 if (ACPI_FAILURE(status)) {
214 return_UINT32(ACPI_SYS_MODE_LEGACY); 176 return_UINT32(ACPI_SYS_MODE_LEGACY);
215 } 177 }
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index 608a3a60ee11..117a05cadaaa 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -105,14 +105,20 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info)
105acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) 105acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
106{ 106{
107 acpi_status status; 107 acpi_status status;
108 u8 register_bit;
108 109
109 ACPI_FUNCTION_ENTRY(); 110 ACPI_FUNCTION_ENTRY();
110 111
112 register_bit = (u8)
113 (1 <<
114 (gpe_event_info->gpe_number -
115 gpe_event_info->register_info->base_gpe_number));
116
111 /* 117 /*
112 * Write a one to the appropriate bit in the status register to 118 * Write a one to the appropriate bit in the status register to
113 * clear this GPE. 119 * clear this GPE.
114 */ 120 */
115 status = acpi_hw_low_level_write(8, gpe_event_info->register_bit, 121 status = acpi_hw_low_level_write(8, register_bit,
116 &gpe_event_info->register_info-> 122 &gpe_event_info->register_info->
117 status_address); 123 status_address);
118 124
@@ -155,7 +161,10 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
155 161
156 /* Get the register bitmask for this GPE */ 162 /* Get the register bitmask for this GPE */
157 163
158 register_bit = gpe_event_info->register_bit; 164 register_bit = (u8)
165 (1 <<
166 (gpe_event_info->gpe_number -
167 gpe_event_info->register_info->base_gpe_number));
159 168
160 /* GPE currently enabled? (enabled for runtime?) */ 169 /* GPE currently enabled? (enabled for runtime?) */
161 170
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
index fa58c1edce1e..1d371fa663f2 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/hardware/hwregs.c
@@ -7,7 +7,7 @@
7 ******************************************************************************/ 7 ******************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -54,17 +54,15 @@ ACPI_MODULE_NAME("hwregs")
54 * 54 *
55 * FUNCTION: acpi_hw_clear_acpi_status 55 * FUNCTION: acpi_hw_clear_acpi_status
56 * 56 *
57 * PARAMETERS: Flags - Lock the hardware or not 57 * PARAMETERS: None
58 * 58 *
59 * RETURN: none 59 * RETURN: None
60 * 60 *
61 * DESCRIPTION: Clears all fixed and general purpose status bits 61 * DESCRIPTION: Clears all fixed and general purpose status bits
62 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 62 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
63 * 63 *
64 * NOTE: TBD: Flags parameter is obsolete, to be removed
65 *
66 ******************************************************************************/ 64 ******************************************************************************/
67acpi_status acpi_hw_clear_acpi_status(u32 flags) 65acpi_status acpi_hw_clear_acpi_status(void)
68{ 66{
69 acpi_status status; 67 acpi_status status;
70 acpi_cpu_flags lock_flags = 0; 68 acpi_cpu_flags lock_flags = 0;
@@ -73,7 +71,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
73 71
74 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n", 72 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
75 ACPI_BITMASK_ALL_FIXED_STATUS, 73 ACPI_BITMASK_ALL_FIXED_STATUS,
76 (u16) acpi_gbl_FADT->xpm1a_evt_blk.address)); 74 (u16) acpi_gbl_FADT.xpm1a_event_block.address));
77 75
78 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); 76 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
79 77
@@ -86,10 +84,10 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
86 84
87 /* Clear the fixed events */ 85 /* Clear the fixed events */
88 86
89 if (acpi_gbl_FADT->xpm1b_evt_blk.address) { 87 if (acpi_gbl_FADT.xpm1b_event_block.address) {
90 status = 88 status =
91 acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS, 89 acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS,
92 &acpi_gbl_FADT->xpm1b_evt_blk); 90 &acpi_gbl_FADT.xpm1b_event_block);
93 if (ACPI_FAILURE(status)) { 91 if (ACPI_FAILURE(status)) {
94 goto unlock_and_exit; 92 goto unlock_and_exit;
95 } 93 }
@@ -253,18 +251,15 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
253 * 251 *
254 * PARAMETERS: register_id - ID of ACPI bit_register to access 252 * PARAMETERS: register_id - ID of ACPI bit_register to access
255 * return_value - Value that was read from the register 253 * return_value - Value that was read from the register
256 * Flags - Lock the hardware or not
257 * 254 *
258 * RETURN: Status and the value read from specified Register. Value 255 * RETURN: Status and the value read from specified Register. Value
259 * returned is normalized to bit0 (is shifted all the way right) 256 * returned is normalized to bit0 (is shifted all the way right)
260 * 257 *
261 * DESCRIPTION: ACPI bit_register read function. 258 * DESCRIPTION: ACPI bit_register read function.
262 * 259 *
263 * NOTE: TBD: Flags parameter is obsolete, to be removed
264 *
265 ******************************************************************************/ 260 ******************************************************************************/
266 261
267acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags) 262acpi_status acpi_get_register(u32 register_id, u32 * return_value)
268{ 263{
269 u32 register_value = 0; 264 u32 register_value = 0;
270 struct acpi_bit_register_info *bit_reg_info; 265 struct acpi_bit_register_info *bit_reg_info;
@@ -312,16 +307,13 @@ ACPI_EXPORT_SYMBOL(acpi_get_register)
312 * PARAMETERS: register_id - ID of ACPI bit_register to access 307 * PARAMETERS: register_id - ID of ACPI bit_register to access
313 * Value - (only used on write) value to write to the 308 * Value - (only used on write) value to write to the
314 * Register, NOT pre-normalized to the bit pos 309 * Register, NOT pre-normalized to the bit pos
315 * Flags - Lock the hardware or not
316 * 310 *
317 * RETURN: Status 311 * RETURN: Status
318 * 312 *
319 * DESCRIPTION: ACPI Bit Register write function. 313 * DESCRIPTION: ACPI Bit Register write function.
320 * 314 *
321 * NOTE: TBD: Flags parameter is obsolete, to be removed
322 *
323 ******************************************************************************/ 315 ******************************************************************************/
324acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags) 316acpi_status acpi_set_register(u32 register_id, u32 value)
325{ 317{
326 u32 register_value = 0; 318 u32 register_value = 0;
327 struct acpi_bit_register_info *bit_reg_info; 319 struct acpi_bit_register_info *bit_reg_info;
@@ -422,8 +414,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
422 ACPI_DEBUG_PRINT((ACPI_DB_IO, 414 ACPI_DEBUG_PRINT((ACPI_DB_IO,
423 "PM2 control: Read %X from %8.8X%8.8X\n", 415 "PM2 control: Read %X from %8.8X%8.8X\n",
424 register_value, 416 register_value,
425 ACPI_FORMAT_UINT64(acpi_gbl_FADT-> 417 ACPI_FORMAT_UINT64(acpi_gbl_FADT.
426 xpm2_cnt_blk.address))); 418 xpm2_control_block.
419 address)));
427 420
428 ACPI_REGISTER_INSERT_VALUE(register_value, 421 ACPI_REGISTER_INSERT_VALUE(register_value,
429 bit_reg_info->bit_position, 422 bit_reg_info->bit_position,
@@ -433,8 +426,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
433 ACPI_DEBUG_PRINT((ACPI_DB_IO, 426 ACPI_DEBUG_PRINT((ACPI_DB_IO,
434 "About to write %4.4X to %8.8X%8.8X\n", 427 "About to write %4.4X to %8.8X%8.8X\n",
435 register_value, 428 register_value,
436 ACPI_FORMAT_UINT64(acpi_gbl_FADT-> 429 ACPI_FORMAT_UINT64(acpi_gbl_FADT.
437 xpm2_cnt_blk.address))); 430 xpm2_control_block.
431 address)));
438 432
439 status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, 433 status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
440 ACPI_REGISTER_PM2_CONTROL, 434 ACPI_REGISTER_PM2_CONTROL,
@@ -495,7 +489,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
495 489
496 status = 490 status =
497 acpi_hw_low_level_read(16, &value1, 491 acpi_hw_low_level_read(16, &value1,
498 &acpi_gbl_FADT->xpm1a_evt_blk); 492 &acpi_gbl_FADT.xpm1a_event_block);
499 if (ACPI_FAILURE(status)) { 493 if (ACPI_FAILURE(status)) {
500 goto unlock_and_exit; 494 goto unlock_and_exit;
501 } 495 }
@@ -504,7 +498,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
504 498
505 status = 499 status =
506 acpi_hw_low_level_read(16, &value2, 500 acpi_hw_low_level_read(16, &value2,
507 &acpi_gbl_FADT->xpm1b_evt_blk); 501 &acpi_gbl_FADT.xpm1b_event_block);
508 value1 |= value2; 502 value1 |= value2;
509 break; 503 break;
510 504
@@ -527,14 +521,14 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
527 521
528 status = 522 status =
529 acpi_hw_low_level_read(16, &value1, 523 acpi_hw_low_level_read(16, &value1,
530 &acpi_gbl_FADT->xpm1a_cnt_blk); 524 &acpi_gbl_FADT.xpm1a_control_block);
531 if (ACPI_FAILURE(status)) { 525 if (ACPI_FAILURE(status)) {
532 goto unlock_and_exit; 526 goto unlock_and_exit;
533 } 527 }
534 528
535 status = 529 status =
536 acpi_hw_low_level_read(16, &value2, 530 acpi_hw_low_level_read(16, &value2,
537 &acpi_gbl_FADT->xpm1b_cnt_blk); 531 &acpi_gbl_FADT.xpm1b_control_block);
538 value1 |= value2; 532 value1 |= value2;
539 break; 533 break;
540 534
@@ -542,19 +536,20 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
542 536
543 status = 537 status =
544 acpi_hw_low_level_read(8, &value1, 538 acpi_hw_low_level_read(8, &value1,
545 &acpi_gbl_FADT->xpm2_cnt_blk); 539 &acpi_gbl_FADT.xpm2_control_block);
546 break; 540 break;
547 541
548 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ 542 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
549 543
550 status = 544 status =
551 acpi_hw_low_level_read(32, &value1, 545 acpi_hw_low_level_read(32, &value1,
552 &acpi_gbl_FADT->xpm_tmr_blk); 546 &acpi_gbl_FADT.xpm_timer_block);
553 break; 547 break;
554 548
555 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ 549 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
556 550
557 status = acpi_os_read_port(acpi_gbl_FADT->smi_cmd, &value1, 8); 551 status =
552 acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
558 break; 553 break;
559 554
560 default: 555 default:
@@ -635,7 +630,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
635 630
636 status = 631 status =
637 acpi_hw_low_level_write(16, value, 632 acpi_hw_low_level_write(16, value,
638 &acpi_gbl_FADT->xpm1a_evt_blk); 633 &acpi_gbl_FADT.xpm1a_event_block);
639 if (ACPI_FAILURE(status)) { 634 if (ACPI_FAILURE(status)) {
640 goto unlock_and_exit; 635 goto unlock_and_exit;
641 } 636 }
@@ -644,7 +639,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
644 639
645 status = 640 status =
646 acpi_hw_low_level_write(16, value, 641 acpi_hw_low_level_write(16, value,
647 &acpi_gbl_FADT->xpm1b_evt_blk); 642 &acpi_gbl_FADT.xpm1b_event_block);
648 break; 643 break;
649 644
650 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ 645 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
@@ -682,49 +677,50 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
682 677
683 status = 678 status =
684 acpi_hw_low_level_write(16, value, 679 acpi_hw_low_level_write(16, value,
685 &acpi_gbl_FADT->xpm1a_cnt_blk); 680 &acpi_gbl_FADT.xpm1a_control_block);
686 if (ACPI_FAILURE(status)) { 681 if (ACPI_FAILURE(status)) {
687 goto unlock_and_exit; 682 goto unlock_and_exit;
688 } 683 }
689 684
690 status = 685 status =
691 acpi_hw_low_level_write(16, value, 686 acpi_hw_low_level_write(16, value,
692 &acpi_gbl_FADT->xpm1b_cnt_blk); 687 &acpi_gbl_FADT.xpm1b_control_block);
693 break; 688 break;
694 689
695 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ 690 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */
696 691
697 status = 692 status =
698 acpi_hw_low_level_write(16, value, 693 acpi_hw_low_level_write(16, value,
699 &acpi_gbl_FADT->xpm1a_cnt_blk); 694 &acpi_gbl_FADT.xpm1a_control_block);
700 break; 695 break;
701 696
702 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ 697 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */
703 698
704 status = 699 status =
705 acpi_hw_low_level_write(16, value, 700 acpi_hw_low_level_write(16, value,
706 &acpi_gbl_FADT->xpm1b_cnt_blk); 701 &acpi_gbl_FADT.xpm1b_control_block);
707 break; 702 break;
708 703
709 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ 704 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
710 705
711 status = 706 status =
712 acpi_hw_low_level_write(8, value, 707 acpi_hw_low_level_write(8, value,
713 &acpi_gbl_FADT->xpm2_cnt_blk); 708 &acpi_gbl_FADT.xpm2_control_block);
714 break; 709 break;
715 710
716 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ 711 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
717 712
718 status = 713 status =
719 acpi_hw_low_level_write(32, value, 714 acpi_hw_low_level_write(32, value,
720 &acpi_gbl_FADT->xpm_tmr_blk); 715 &acpi_gbl_FADT.xpm_timer_block);
721 break; 716 break;
722 717
723 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ 718 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
724 719
725 /* SMI_CMD is currently always in IO space */ 720 /* SMI_CMD is currently always in IO space */
726 721
727 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, value, 8); 722 status =
723 acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
728 break; 724 break;
729 725
730 default: 726 default:
@@ -783,7 +779,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
783 * Two address spaces supported: Memory or IO. 779 * Two address spaces supported: Memory or IO.
784 * PCI_Config is not supported here because the GAS struct is insufficient 780 * PCI_Config is not supported here because the GAS struct is insufficient
785 */ 781 */
786 switch (reg->address_space_id) { 782 switch (reg->space_id) {
787 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 783 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
788 784
789 status = acpi_os_read_memory((acpi_physical_address) address, 785 status = acpi_os_read_memory((acpi_physical_address) address,
@@ -792,22 +788,20 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
792 788
793 case ACPI_ADR_SPACE_SYSTEM_IO: 789 case ACPI_ADR_SPACE_SYSTEM_IO:
794 790
795 status = acpi_os_read_port((acpi_io_address) address, 791 status =
796 value, width); 792 acpi_os_read_port((acpi_io_address) address, value, width);
797 break; 793 break;
798 794
799 default: 795 default:
800 ACPI_ERROR((AE_INFO, 796 ACPI_ERROR((AE_INFO,
801 "Unsupported address space: %X", 797 "Unsupported address space: %X", reg->space_id));
802 reg->address_space_id));
803 return (AE_BAD_PARAMETER); 798 return (AE_BAD_PARAMETER);
804 } 799 }
805 800
806 ACPI_DEBUG_PRINT((ACPI_DB_IO, 801 ACPI_DEBUG_PRINT((ACPI_DB_IO,
807 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", 802 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
808 *value, width, 803 *value, width, ACPI_FORMAT_UINT64(address),
809 ACPI_FORMAT_UINT64(address), 804 acpi_ut_get_region_name(reg->space_id)));
810 acpi_ut_get_region_name(reg->address_space_id)));
811 805
812 return (status); 806 return (status);
813} 807}
@@ -854,7 +848,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
854 * Two address spaces supported: Memory or IO. 848 * Two address spaces supported: Memory or IO.
855 * PCI_Config is not supported here because the GAS struct is insufficient 849 * PCI_Config is not supported here because the GAS struct is insufficient
856 */ 850 */
857 switch (reg->address_space_id) { 851 switch (reg->space_id) {
858 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 852 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
859 853
860 status = acpi_os_write_memory((acpi_physical_address) address, 854 status = acpi_os_write_memory((acpi_physical_address) address,
@@ -863,22 +857,20 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
863 857
864 case ACPI_ADR_SPACE_SYSTEM_IO: 858 case ACPI_ADR_SPACE_SYSTEM_IO:
865 859
866 status = acpi_os_write_port((acpi_io_address) address, 860 status = acpi_os_write_port((acpi_io_address) address, value,
867 value, width); 861 width);
868 break; 862 break;
869 863
870 default: 864 default:
871 ACPI_ERROR((AE_INFO, 865 ACPI_ERROR((AE_INFO,
872 "Unsupported address space: %X", 866 "Unsupported address space: %X", reg->space_id));
873 reg->address_space_id));
874 return (AE_BAD_PARAMETER); 867 return (AE_BAD_PARAMETER);
875 } 868 }
876 869
877 ACPI_DEBUG_PRINT((ACPI_DB_IO, 870 ACPI_DEBUG_PRINT((ACPI_DB_IO,
878 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", 871 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
879 value, width, 872 value, width, ACPI_FORMAT_UINT64(address),
880 ACPI_FORMAT_UINT64(address), 873 acpi_ut_get_region_name(reg->space_id)));
881 acpi_ut_get_region_name(reg->address_space_id)));
882 874
883 return (status); 875 return (status);
884} 876}
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 8bb43cae60c2..57901ca3ade9 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/actables.h>
46 47
47#define _COMPONENT ACPI_HARDWARE 48#define _COMPONENT ACPI_HARDWARE
48ACPI_MODULE_NAME("hwsleep") 49ACPI_MODULE_NAME("hwsleep")
@@ -62,17 +63,32 @@ ACPI_MODULE_NAME("hwsleep")
62acpi_status 63acpi_status
63acpi_set_firmware_waking_vector(acpi_physical_address physical_address) 64acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
64{ 65{
66 struct acpi_table_facs *facs;
67 acpi_status status;
65 68
66 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); 69 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
67 70
71 /* Get the FACS */
72
73 status =
74 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
75 (struct acpi_table_header **)&facs);
76 if (ACPI_FAILURE(status)) {
77 return_ACPI_STATUS(status);
78 }
79
68 /* Set the vector */ 80 /* Set the vector */
69 81
70 if (acpi_gbl_common_fACS.vector_width == 32) { 82 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
71 *(ACPI_CAST_PTR 83 /*
72 (u32, acpi_gbl_common_fACS.firmware_waking_vector)) 84 * ACPI 1.0 FACS or short table or optional X_ field is zero
73 = (u32) physical_address; 85 */
86 facs->firmware_waking_vector = (u32) physical_address;
74 } else { 87 } else {
75 *acpi_gbl_common_fACS.firmware_waking_vector = physical_address; 88 /*
89 * ACPI 2.0 FACS with valid X_ field
90 */
91 facs->xfirmware_waking_vector = physical_address;
76 } 92 }
77 93
78 return_ACPI_STATUS(AE_OK); 94 return_ACPI_STATUS(AE_OK);
@@ -97,6 +113,8 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
97acpi_status 113acpi_status
98acpi_get_firmware_waking_vector(acpi_physical_address * physical_address) 114acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
99{ 115{
116 struct acpi_table_facs *facs;
117 acpi_status status;
100 118
101 ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector); 119 ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
102 120
@@ -104,16 +122,29 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
104 return_ACPI_STATUS(AE_BAD_PARAMETER); 122 return_ACPI_STATUS(AE_BAD_PARAMETER);
105 } 123 }
106 124
125 /* Get the FACS */
126
127 status =
128 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
129 (struct acpi_table_header **)&facs);
130 if (ACPI_FAILURE(status)) {
131 return_ACPI_STATUS(status);
132 }
133
107 /* Get the vector */ 134 /* Get the vector */
108 135
109 if (acpi_gbl_common_fACS.vector_width == 32) { 136 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
110 *physical_address = (acpi_physical_address) 137 /*
111 * 138 * ACPI 1.0 FACS or short table or optional X_ field is zero
112 (ACPI_CAST_PTR 139 */
113 (u32, acpi_gbl_common_fACS.firmware_waking_vector)); 140 *physical_address =
141 (acpi_physical_address) facs->firmware_waking_vector;
114 } else { 142 } else {
143 /*
144 * ACPI 2.0 FACS with valid X_ field
145 */
115 *physical_address = 146 *physical_address =
116 *acpi_gbl_common_fACS.firmware_waking_vector; 147 (acpi_physical_address) facs->xfirmware_waking_vector;
117 } 148 }
118 149
119 return_ACPI_STATUS(AE_OK); 150 return_ACPI_STATUS(AE_OK);
@@ -246,15 +277,14 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
246 277
247 /* Clear wake status */ 278 /* Clear wake status */
248 279
249 status = 280 status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
250 acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
251 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
252 return_ACPI_STATUS(status); 282 return_ACPI_STATUS(status);
253 } 283 }
254 284
255 /* Clear all fixed and general purpose status bits */ 285 /* Clear all fixed and general purpose status bits */
256 286
257 status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK); 287 status = acpi_hw_clear_acpi_status();
258 if (ACPI_FAILURE(status)) { 288 if (ACPI_FAILURE(status)) {
259 return_ACPI_STATUS(status); 289 return_ACPI_STATUS(status);
260 } 290 }
@@ -367,8 +397,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
367 /* Wait until we enter sleep state */ 397 /* Wait until we enter sleep state */
368 398
369 do { 399 do {
370 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value, 400 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
371 ACPI_MTX_DO_NOT_LOCK);
372 if (ACPI_FAILURE(status)) { 401 if (ACPI_FAILURE(status)) {
373 return_ACPI_STATUS(status); 402 return_ACPI_STATUS(status);
374 } 403 }
@@ -401,13 +430,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
401 430
402 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios); 431 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
403 432
404 status = 433 status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
405 acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
406 if (ACPI_FAILURE(status)) { 434 if (ACPI_FAILURE(status)) {
407 return_ACPI_STATUS(status); 435 return_ACPI_STATUS(status);
408 } 436 }
409 437
410 status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK); 438 status = acpi_hw_clear_acpi_status();
411 if (ACPI_FAILURE(status)) { 439 if (ACPI_FAILURE(status)) {
412 return_ACPI_STATUS(status); 440 return_ACPI_STATUS(status);
413 } 441 }
@@ -429,13 +457,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
429 457
430 ACPI_FLUSH_CPU_CACHE(); 458 ACPI_FLUSH_CPU_CACHE();
431 459
432 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 460 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
433 (u32) acpi_gbl_FADT->S4bios_req, 8); 461 (u32) acpi_gbl_FADT.S4bios_request, 8);
434 462
435 do { 463 do {
436 acpi_os_stall(1000); 464 acpi_os_stall(1000);
437 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value, 465 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
438 ACPI_MTX_DO_NOT_LOCK);
439 if (ACPI_FAILURE(status)) { 466 if (ACPI_FAILURE(status)) {
440 return_ACPI_STATUS(status); 467 return_ACPI_STATUS(status);
441 } 468 }
@@ -568,13 +595,11 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
568 595
569 (void) 596 (void)
570 acpi_set_register(acpi_gbl_fixed_event_info 597 acpi_set_register(acpi_gbl_fixed_event_info
571 [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1, 598 [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1);
572 ACPI_MTX_DO_NOT_LOCK);
573 599
574 (void) 600 (void)
575 acpi_set_register(acpi_gbl_fixed_event_info 601 acpi_set_register(acpi_gbl_fixed_event_info
576 [ACPI_EVENT_POWER_BUTTON].status_register_id, 1, 602 [ACPI_EVENT_POWER_BUTTON].status_register_id, 1);
577 ACPI_MTX_DO_NOT_LOCK);
578 603
579 arg.integer.value = ACPI_SST_WORKING; 604 arg.integer.value = ACPI_SST_WORKING;
580 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL); 605 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
index c4ec47c939fd..c32eab696acd 100644
--- a/drivers/acpi/hardware/hwtimer.c
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
66 return_ACPI_STATUS(AE_BAD_PARAMETER); 66 return_ACPI_STATUS(AE_BAD_PARAMETER);
67 } 67 }
68 68
69 if (acpi_gbl_FADT->tmr_val_ext == 0) { 69 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
70 *resolution = 24; 70 *resolution = 24;
71 } else { 71 } else {
72 *resolution = 32; 72 *resolution = 32;
@@ -98,7 +98,8 @@ acpi_status acpi_get_timer(u32 * ticks)
98 return_ACPI_STATUS(AE_BAD_PARAMETER); 98 return_ACPI_STATUS(AE_BAD_PARAMETER);
99 } 99 }
100 100
101 status = acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT->xpm_tmr_blk); 101 status =
102 acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
102 103
103 return_ACPI_STATUS(status); 104 return_ACPI_STATUS(status);
104} 105}
@@ -153,7 +154,7 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
153 if (start_ticks < end_ticks) { 154 if (start_ticks < end_ticks) {
154 delta_ticks = end_ticks - start_ticks; 155 delta_ticks = end_ticks - start_ticks;
155 } else if (start_ticks > end_ticks) { 156 } else if (start_ticks > end_ticks) {
156 if (acpi_gbl_FADT->tmr_val_ext == 0) { 157 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
157 158
158 /* 24-bit Timer */ 159 /* 24-bit Timer */
159 160
diff --git a/drivers/acpi/motherboard.c b/drivers/acpi/motherboard.c
deleted file mode 100644
index 2e17ec75af03..000000000000
--- a/drivers/acpi/motherboard.c
+++ /dev/null
@@ -1,191 +0,0 @@
1/*
2 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or (at
6 * your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 */
19
20/* Purpose: Prevent PCMCIA cards from using motherboard resources. */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/ioport.h>
27#include <asm/io.h>
28
29#include <acpi/acpi_bus.h>
30#include <acpi/acpi_drivers.h>
31
32#define _COMPONENT ACPI_SYSTEM_COMPONENT
33ACPI_MODULE_NAME("acpi_motherboard")
34
35/* Dell use PNP0C01 instead of PNP0C02 */
36#define ACPI_MB_HID1 "PNP0C01"
37#define ACPI_MB_HID2 "PNP0C02"
38/**
39 * Doesn't care about legacy IO ports, only IO ports beyond 0x1000 are reserved
40 * Doesn't care about the failure of 'request_region', since other may reserve
41 * the io ports as well
42 */
43#define IS_RESERVED_ADDR(base, len) \
44 (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \
45 && ((base) + (len) > PCIBIOS_MIN_IO))
46/*
47 * Clearing the flag (IORESOURCE_BUSY) allows drivers to use
48 * the io ports if they really know they can use it, while
49 * still preventing hotplug PCI devices from using it.
50 */
51
52/*
53 * When CONFIG_PNP is enabled, pnp/system.c binds to PNP0C01
54 * and PNP0C02, redundant with acpi_reserve_io_ranges().
55 * But acpi_reserve_io_ranges() is necessary for !CONFIG_PNP.
56 */
57static acpi_status acpi_reserve_io_ranges(struct acpi_resource *res, void *data)
58{
59 struct resource *requested_res = NULL;
60
61
62 if (res->type == ACPI_RESOURCE_TYPE_IO) {
63 struct acpi_resource_io *io_res = &res->data.io;
64
65 if (io_res->minimum != io_res->maximum)
66 return AE_OK;
67 if (IS_RESERVED_ADDR
68 (io_res->minimum, io_res->address_length)) {
69 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
70 "Motherboard resources 0x%08x - 0x%08x\n",
71 io_res->minimum,
72 io_res->minimum +
73 io_res->address_length));
74 requested_res =
75 request_region(io_res->minimum,
76 io_res->address_length, "motherboard");
77 }
78 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_IO) {
79 struct acpi_resource_fixed_io *fixed_io_res =
80 &res->data.fixed_io;
81
82 if (IS_RESERVED_ADDR
83 (fixed_io_res->address, fixed_io_res->address_length)) {
84 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
85 "Motherboard resources 0x%08x - 0x%08x\n",
86 fixed_io_res->address,
87 fixed_io_res->address +
88 fixed_io_res->address_length));
89 requested_res =
90 request_region(fixed_io_res->address,
91 fixed_io_res->address_length,
92 "motherboard");
93 }
94 } else {
95 /* Memory mapped IO? */
96 }
97
98 if (requested_res)
99 requested_res->flags &= ~IORESOURCE_BUSY;
100 return AE_OK;
101}
102
103static int acpi_motherboard_add(struct acpi_device *device)
104{
105 if (!device)
106 return -EINVAL;
107 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
108 acpi_reserve_io_ranges, NULL);
109
110 return 0;
111}
112
113static struct acpi_driver acpi_motherboard_driver1 = {
114 .name = "motherboard",
115 .class = "",
116 .ids = ACPI_MB_HID1,
117 .ops = {
118 .add = acpi_motherboard_add,
119 },
120};
121
122static struct acpi_driver acpi_motherboard_driver2 = {
123 .name = "motherboard",
124 .class = "",
125 .ids = ACPI_MB_HID2,
126 .ops = {
127 .add = acpi_motherboard_add,
128 },
129};
130
131static void __init acpi_request_region (struct acpi_generic_address *addr,
132 unsigned int length, char *desc)
133{
134 if (!addr->address || !length)
135 return;
136
137 if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_IO)
138 request_region(addr->address, length, desc);
139 else if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
140 request_mem_region(addr->address, length, desc);
141}
142
143static void __init acpi_reserve_resources(void)
144{
145 acpi_request_region(&acpi_gbl_FADT->xpm1a_evt_blk,
146 acpi_gbl_FADT->pm1_evt_len, "ACPI PM1a_EVT_BLK");
147
148 acpi_request_region(&acpi_gbl_FADT->xpm1b_evt_blk,
149 acpi_gbl_FADT->pm1_evt_len, "ACPI PM1b_EVT_BLK");
150
151 acpi_request_region(&acpi_gbl_FADT->xpm1a_cnt_blk,
152 acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1a_CNT_BLK");
153
154 acpi_request_region(&acpi_gbl_FADT->xpm1b_cnt_blk,
155 acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1b_CNT_BLK");
156
157 if (acpi_gbl_FADT->pm_tm_len == 4)
158 acpi_request_region(&acpi_gbl_FADT->xpm_tmr_blk, 4, "ACPI PM_TMR");
159
160 acpi_request_region(&acpi_gbl_FADT->xpm2_cnt_blk,
161 acpi_gbl_FADT->pm2_cnt_len, "ACPI PM2_CNT_BLK");
162
163 /* Length of GPE blocks must be a non-negative multiple of 2 */
164
165 if (!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
166 acpi_request_region(&acpi_gbl_FADT->xgpe0_blk,
167 acpi_gbl_FADT->gpe0_blk_len, "ACPI GPE0_BLK");
168
169 if (!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
170 acpi_request_region(&acpi_gbl_FADT->xgpe1_blk,
171 acpi_gbl_FADT->gpe1_blk_len, "ACPI GPE1_BLK");
172}
173
174static int __init acpi_motherboard_init(void)
175{
176 acpi_bus_register_driver(&acpi_motherboard_driver1);
177 acpi_bus_register_driver(&acpi_motherboard_driver2);
178 /*
179 * Guarantee motherboard IO reservation first
180 * This module must run after scan.c
181 */
182 if (!acpi_disabled)
183 acpi_reserve_resources();
184 return 0;
185}
186
187/**
188 * Reserve motherboard resources after PCI claim BARs,
189 * but before PCI assign resources for uninitialized PCI devices
190 */
191fs_initcall(acpi_motherboard_init);
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
index c1c6c236df9a..57faf598bad8 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -195,31 +195,27 @@ acpi_status acpi_ns_root_initialize(void)
195 obj_desc->mutex.sync_level = 195 obj_desc->mutex.sync_level =
196 (u8) (ACPI_TO_INTEGER(val) - 1); 196 (u8) (ACPI_TO_INTEGER(val) - 1);
197 197
198 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { 198 /* Create a mutex */
199 199
200 /* Create a counting semaphore for the global lock */ 200 status =
201 acpi_os_create_mutex(&obj_desc->mutex.
202 os_mutex);
203 if (ACPI_FAILURE(status)) {
204 acpi_ut_remove_reference(obj_desc);
205 goto unlock_and_exit;
206 }
201 207
202 status = 208 /* Special case for ACPI Global Lock */
203 acpi_os_create_semaphore
204 (ACPI_NO_UNIT_LIMIT, 1,
205 &acpi_gbl_global_lock_semaphore);
206 if (ACPI_FAILURE(status)) {
207 acpi_ut_remove_reference
208 (obj_desc);
209 goto unlock_and_exit;
210 }
211 209
212 /* Mark this mutex as very special */ 210 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
211 acpi_gbl_global_lock_mutex =
212 obj_desc->mutex.os_mutex;
213 213
214 obj_desc->mutex.os_mutex = 214 /* Create additional counting semaphore for global lock */
215 ACPI_GLOBAL_LOCK;
216 } else {
217 /* Create a mutex */
218 215
219 status = 216 status =
220 acpi_os_create_mutex(&obj_desc-> 217 acpi_os_create_semaphore(1, 0,
221 mutex. 218 &acpi_gbl_global_lock_semaphore);
222 os_mutex);
223 if (ACPI_FAILURE(status)) { 219 if (ACPI_FAILURE(status)) {
224 acpi_ut_remove_reference 220 acpi_ut_remove_reference
225 (obj_desc); 221 (obj_desc);
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
index 55b407aae266..1d693d8ad2d8 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,6 +61,9 @@ ACPI_MODULE_NAME("nsalloc")
61struct acpi_namespace_node *acpi_ns_create_node(u32 name) 61struct acpi_namespace_node *acpi_ns_create_node(u32 name)
62{ 62{
63 struct acpi_namespace_node *node; 63 struct acpi_namespace_node *node;
64#ifdef ACPI_DBG_TRACK_ALLOCATIONS
65 u32 temp;
66#endif
64 67
65 ACPI_FUNCTION_TRACE(ns_create_node); 68 ACPI_FUNCTION_TRACE(ns_create_node);
66 69
@@ -71,6 +74,15 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
71 74
72 ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++); 75 ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
73 76
77#ifdef ACPI_DBG_TRACK_ALLOCATIONS
78 temp =
79 acpi_gbl_ns_node_list->total_allocated -
80 acpi_gbl_ns_node_list->total_freed;
81 if (temp > acpi_gbl_ns_node_list->max_occupied) {
82 acpi_gbl_ns_node_list->max_occupied = temp;
83 }
84#endif
85
74 node->name.integer = name; 86 node->name.integer = name;
75 ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED); 87 ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
76 return_PTR(node); 88 return_PTR(node);
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index d72df66aa965..1fc4f86676e1 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -205,7 +205,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
205 205
206 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) { 206 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
207 this_node->name.integer = 207 this_node->name.integer =
208 acpi_ut_repair_name(this_node->name.integer); 208 acpi_ut_repair_name(this_node->name.ascii);
209 209
210 ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X", 210 ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
211 this_node->name.integer)); 211 this_node->name.integer));
@@ -226,6 +226,12 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
226 obj_desc = acpi_ns_get_attached_object(this_node); 226 obj_desc = acpi_ns_get_attached_object(this_node);
227 acpi_dbg_level = dbg_level; 227 acpi_dbg_level = dbg_level;
228 228
229 /* Temp nodes are those nodes created by a control method */
230
231 if (this_node->flags & ANOBJ_TEMPORARY) {
232 acpi_os_printf("(T) ");
233 }
234
229 switch (info->display_type & ACPI_DISPLAY_MASK) { 235 switch (info->display_type & ACPI_DISPLAY_MASK) {
230 case ACPI_DISPLAY_SUMMARY: 236 case ACPI_DISPLAY_SUMMARY:
231 237
@@ -623,7 +629,8 @@ acpi_ns_dump_objects(acpi_object_type type,
623 info.display_type = display_type; 629 info.display_type = display_type;
624 630
625 (void)acpi_ns_walk_namespace(type, start_handle, max_depth, 631 (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
626 ACPI_NS_WALK_NO_UNLOCK, 632 ACPI_NS_WALK_NO_UNLOCK |
633 ACPI_NS_WALK_TEMP_NODES,
627 acpi_ns_dump_one_object, (void *)&info, 634 acpi_ns_dump_one_object, (void *)&info,
628 NULL); 635 NULL);
629} 636}
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
index c6bf5d30fca3..5097e167939e 100644
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ b/drivers/acpi/namespace/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 4b0a4a8c9843..aa6370c67ec1 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
154 * Execute the method via the interpreter. The interpreter is locked 154 * Execute the method via the interpreter. The interpreter is locked
155 * here before calling into the AML parser 155 * here before calling into the AML parser
156 */ 156 */
157 status = acpi_ex_enter_interpreter(); 157 acpi_ex_enter_interpreter();
158 if (ACPI_FAILURE(status)) {
159 return_ACPI_STATUS(status);
160 }
161
162 status = acpi_ps_execute_method(info); 158 status = acpi_ps_execute_method(info);
163 acpi_ex_exit_interpreter(); 159 acpi_ex_exit_interpreter();
164 } else { 160 } else {
@@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
182 * resolution, we must lock it because we could access an opregion. 178 * resolution, we must lock it because we could access an opregion.
183 * The opregion access code assumes that the interpreter is locked. 179 * The opregion access code assumes that the interpreter is locked.
184 */ 180 */
185 status = acpi_ex_enter_interpreter(); 181 acpi_ex_enter_interpreter();
186 if (ACPI_FAILURE(status)) {
187 return_ACPI_STATUS(status);
188 }
189 182
190 /* Function has a strange interface */ 183 /* Function has a strange interface */
191 184
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index aec8488c0019..326af8fc0ce7 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -213,7 +213,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
213 u32 level, void *context, void **return_value) 213 u32 level, void *context, void **return_value)
214{ 214{
215 acpi_object_type type; 215 acpi_object_type type;
216 acpi_status status; 216 acpi_status status = AE_OK;
217 struct acpi_init_walk_info *info = 217 struct acpi_init_walk_info *info =
218 (struct acpi_init_walk_info *)context; 218 (struct acpi_init_walk_info *)context;
219 struct acpi_namespace_node *node = 219 struct acpi_namespace_node *node =
@@ -267,10 +267,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
267 /* 267 /*
268 * Must lock the interpreter before executing AML code 268 * Must lock the interpreter before executing AML code
269 */ 269 */
270 status = acpi_ex_enter_interpreter(); 270 acpi_ex_enter_interpreter();
271 if (ACPI_FAILURE(status)) {
272 return (status);
273 }
274 271
275 /* 272 /*
276 * Each of these types can contain executable AML code within the 273 * Each of these types can contain executable AML code within the
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index fe75d888e183..d4f9654fd20f 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,13 +44,12 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acdispat.h> 46#include <acpi/acdispat.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsload") 50ACPI_MODULE_NAME("nsload")
50 51
51/* Local prototypes */ 52/* Local prototypes */
52static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type);
53
54#ifdef ACPI_FUTURE_IMPLEMENTATION 53#ifdef ACPI_FUTURE_IMPLEMENTATION
55acpi_status acpi_ns_unload_namespace(acpi_handle handle); 54acpi_status acpi_ns_unload_namespace(acpi_handle handle);
56 55
@@ -62,7 +61,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
62 * 61 *
63 * FUNCTION: acpi_ns_load_table 62 * FUNCTION: acpi_ns_load_table
64 * 63 *
65 * PARAMETERS: table_desc - Descriptor for table to be loaded 64 * PARAMETERS: table_index - Index for table to be loaded
66 * Node - Owning NS node 65 * Node - Owning NS node
67 * 66 *
68 * RETURN: Status 67 * RETURN: Status
@@ -72,42 +71,13 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
72 ******************************************************************************/ 71 ******************************************************************************/
73 72
74acpi_status 73acpi_status
75acpi_ns_load_table(struct acpi_table_desc *table_desc, 74acpi_ns_load_table(acpi_native_uint table_index,
76 struct acpi_namespace_node *node) 75 struct acpi_namespace_node *node)
77{ 76{
78 acpi_status status; 77 acpi_status status;
79 78
80 ACPI_FUNCTION_TRACE(ns_load_table); 79 ACPI_FUNCTION_TRACE(ns_load_table);
81 80
82 /* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */
83
84 if (!
85 (acpi_gbl_table_data[table_desc->type].
86 flags & ACPI_TABLE_EXECUTABLE)) {
87
88 /* Just ignore this table */
89
90 return_ACPI_STATUS(AE_OK);
91 }
92
93 /* Check validity of the AML start and length */
94
95 if (!table_desc->aml_start) {
96 ACPI_ERROR((AE_INFO, "Null AML pointer"));
97 return_ACPI_STATUS(AE_BAD_PARAMETER);
98 }
99
100 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AML block at %p\n",
101 table_desc->aml_start));
102
103 /* Ignore table if there is no AML contained within */
104
105 if (!table_desc->aml_length) {
106 ACPI_WARNING((AE_INFO, "Zero-length AML block in table [%4.4s]",
107 table_desc->pointer->signature));
108 return_ACPI_STATUS(AE_OK);
109 }
110
111 /* 81 /*
112 * Parse the table and load the namespace with all named 82 * Parse the table and load the namespace with all named
113 * objects found within. Control methods are NOT parsed 83 * objects found within. Control methods are NOT parsed
@@ -117,15 +87,34 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
117 * to another control method, we can't continue parsing 87 * to another control method, we can't continue parsing
118 * because we don't know how many arguments to parse next! 88 * because we don't know how many arguments to parse next!
119 */ 89 */
90 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
91 if (ACPI_FAILURE(status)) {
92 return_ACPI_STATUS(status);
93 }
94
95 /* If table already loaded into namespace, just return */
96
97 if (acpi_tb_is_table_loaded(table_index)) {
98 status = AE_ALREADY_EXISTS;
99 goto unlock;
100 }
101
120 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 102 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
121 "**** Loading table into namespace ****\n")); 103 "**** Loading table into namespace ****\n"));
122 104
123 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 105 status = acpi_tb_allocate_owner_id(table_index);
124 if (ACPI_FAILURE(status)) { 106 if (ACPI_FAILURE(status)) {
125 return_ACPI_STATUS(status); 107 goto unlock;
108 }
109
110 status = acpi_ns_parse_table(table_index, node->child);
111 if (ACPI_SUCCESS(status)) {
112 acpi_tb_set_table_loaded_flag(table_index, TRUE);
113 } else {
114 acpi_tb_release_owner_id(table_index);
126 } 115 }
127 116
128 status = acpi_ns_parse_table(table_desc, node->child); 117 unlock:
129 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 118 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
130 119
131 if (ACPI_FAILURE(status)) { 120 if (ACPI_FAILURE(status)) {
@@ -141,7 +130,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
141 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 130 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
142 "**** Begin Table Method Parsing and Object Initialization ****\n")); 131 "**** Begin Table Method Parsing and Object Initialization ****\n"));
143 132
144 status = acpi_ds_initialize_objects(table_desc, node); 133 status = acpi_ds_initialize_objects(table_index, node);
145 134
146 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 135 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
147 "**** Completed Table Method Parsing and Object Initialization ****\n")); 136 "**** Completed Table Method Parsing and Object Initialization ****\n"));
@@ -149,99 +138,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
149 return_ACPI_STATUS(status); 138 return_ACPI_STATUS(status);
150} 139}
151 140
152/******************************************************************************* 141#ifdef ACPI_OBSOLETE_FUNCTIONS
153 *
154 * FUNCTION: acpi_ns_load_table_by_type
155 *
156 * PARAMETERS: table_type - Id of the table type to load
157 *
158 * RETURN: Status
159 *
160 * DESCRIPTION: Load an ACPI table or tables into the namespace. All tables
161 * of the given type are loaded. The mechanism allows this
162 * routine to be called repeatedly.
163 *
164 ******************************************************************************/
165
166static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
167{
168 u32 i;
169 acpi_status status;
170 struct acpi_table_desc *table_desc;
171
172 ACPI_FUNCTION_TRACE(ns_load_table_by_type);
173
174 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
175 if (ACPI_FAILURE(status)) {
176 return_ACPI_STATUS(status);
177 }
178
179 /*
180 * Table types supported are:
181 * DSDT (one), SSDT/PSDT (multiple)
182 */
183 switch (table_type) {
184 case ACPI_TABLE_ID_DSDT:
185
186 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace load: DSDT\n"));
187
188 table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_DSDT].next;
189
190 /* If table already loaded into namespace, just return */
191
192 if (table_desc->loaded_into_namespace) {
193 goto unlock_and_exit;
194 }
195
196 /* Now load the single DSDT */
197
198 status = acpi_ns_load_table(table_desc, acpi_gbl_root_node);
199 if (ACPI_SUCCESS(status)) {
200 table_desc->loaded_into_namespace = TRUE;
201 }
202 break;
203
204 case ACPI_TABLE_ID_SSDT:
205 case ACPI_TABLE_ID_PSDT:
206
207 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
208 "Namespace load: %d SSDT or PSDTs\n",
209 acpi_gbl_table_lists[table_type].count));
210
211 /*
212 * Traverse list of SSDT or PSDT tables
213 */
214 table_desc = acpi_gbl_table_lists[table_type].next;
215 for (i = 0; i < acpi_gbl_table_lists[table_type].count; i++) {
216 /*
217 * Only attempt to load table into namespace if it is not
218 * already loaded!
219 */
220 if (!table_desc->loaded_into_namespace) {
221 status =
222 acpi_ns_load_table(table_desc,
223 acpi_gbl_root_node);
224 if (ACPI_FAILURE(status)) {
225 break;
226 }
227
228 table_desc->loaded_into_namespace = TRUE;
229 }
230
231 table_desc = table_desc->next;
232 }
233 break;
234
235 default:
236 status = AE_SUPPORT;
237 break;
238 }
239
240 unlock_and_exit:
241 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
242 return_ACPI_STATUS(status);
243}
244
245/******************************************************************************* 142/*******************************************************************************
246 * 143 *
247 * FUNCTION: acpi_load_namespace 144 * FUNCTION: acpi_load_namespace
@@ -288,6 +185,7 @@ acpi_status acpi_ns_load_namespace(void)
288 185
289 return_ACPI_STATUS(status); 186 return_ACPI_STATUS(status);
290} 187}
188#endif
291 189
292#ifdef ACPI_FUTURE_IMPLEMENTATION 190#ifdef ACPI_FUTURE_IMPLEMENTATION
293/******************************************************************************* 191/*******************************************************************************
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 97b8332c9746..cbd94af08cc5 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
index aabe8794b908..d9d7377bc6e6 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/namespace/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
index 155505a4ef69..e696aa847990 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/namespace/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acparser.h> 46#include <acpi/acparser.h>
47#include <acpi/acdispat.h> 47#include <acpi/acdispat.h>
48#include <acpi/actables.h>
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsparse") 51ACPI_MODULE_NAME("nsparse")
@@ -62,14 +63,24 @@ ACPI_MODULE_NAME("nsparse")
62 * 63 *
63 ******************************************************************************/ 64 ******************************************************************************/
64acpi_status 65acpi_status
65acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc) 66acpi_ns_one_complete_parse(acpi_native_uint pass_number,
67 acpi_native_uint table_index)
66{ 68{
67 union acpi_parse_object *parse_root; 69 union acpi_parse_object *parse_root;
68 acpi_status status; 70 acpi_status status;
71 acpi_native_uint aml_length;
72 u8 *aml_start;
69 struct acpi_walk_state *walk_state; 73 struct acpi_walk_state *walk_state;
74 struct acpi_table_header *table;
75 acpi_owner_id owner_id;
70 76
71 ACPI_FUNCTION_TRACE(ns_one_complete_parse); 77 ACPI_FUNCTION_TRACE(ns_one_complete_parse);
72 78
79 status = acpi_tb_get_owner_id(table_index, &owner_id);
80 if (ACPI_FAILURE(status)) {
81 return_ACPI_STATUS(status);
82 }
83
73 /* Create and init a Root Node */ 84 /* Create and init a Root Node */
74 85
75 parse_root = acpi_ps_create_scope_op(); 86 parse_root = acpi_ps_create_scope_op();
@@ -79,26 +90,41 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
79 90
80 /* Create and initialize a new walk state */ 91 /* Create and initialize a new walk state */
81 92
82 walk_state = acpi_ds_create_walk_state(table_desc->owner_id, 93 walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL);
83 NULL, NULL, NULL);
84 if (!walk_state) { 94 if (!walk_state) {
85 acpi_ps_free_op(parse_root); 95 acpi_ps_free_op(parse_root);
86 return_ACPI_STATUS(AE_NO_MEMORY); 96 return_ACPI_STATUS(AE_NO_MEMORY);
87 } 97 }
88 98
89 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, 99 status = acpi_get_table_by_index(table_index, &table);
90 table_desc->aml_start, 100 if (ACPI_FAILURE(status)) {
91 table_desc->aml_length, NULL, 101 acpi_ds_delete_walk_state(walk_state);
92 pass_number); 102 acpi_ps_free_op(parse_root);
103 return_ACPI_STATUS(status);
104 }
105
106 /* Table must consist of at least a complete header */
107
108 if (table->length < sizeof(struct acpi_table_header)) {
109 status = AE_BAD_HEADER;
110 } else {
111 aml_start = (u8 *) table + sizeof(struct acpi_table_header);
112 aml_length = table->length - sizeof(struct acpi_table_header);
113 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
114 aml_start, aml_length, NULL,
115 (u8) pass_number);
116 }
117
93 if (ACPI_FAILURE(status)) { 118 if (ACPI_FAILURE(status)) {
94 acpi_ds_delete_walk_state(walk_state); 119 acpi_ds_delete_walk_state(walk_state);
120 acpi_ps_delete_parse_tree(parse_root);
95 return_ACPI_STATUS(status); 121 return_ACPI_STATUS(status);
96 } 122 }
97 123
98 /* Parse the AML */ 124 /* Parse the AML */
99 125
100 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n", 126 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
101 pass_number)); 127 (unsigned)pass_number));
102 status = acpi_ps_parse_aml(walk_state); 128 status = acpi_ps_parse_aml(walk_state);
103 129
104 acpi_ps_delete_parse_tree(parse_root); 130 acpi_ps_delete_parse_tree(parse_root);
@@ -119,7 +145,7 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
119 ******************************************************************************/ 145 ******************************************************************************/
120 146
121acpi_status 147acpi_status
122acpi_ns_parse_table(struct acpi_table_desc *table_desc, 148acpi_ns_parse_table(acpi_native_uint table_index,
123 struct acpi_namespace_node *start_node) 149 struct acpi_namespace_node *start_node)
124{ 150{
125 acpi_status status; 151 acpi_status status;
@@ -134,10 +160,10 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
134 * each Parser Op subtree is deleted when it is finished. This saves 160 * each Parser Op subtree is deleted when it is finished. This saves
135 * a great deal of memory, and allows a small cache of parse objects 161 * a great deal of memory, and allows a small cache of parse objects
136 * to service the entire parse. The second pass of the parse then 162 * to service the entire parse. The second pass of the parse then
137 * performs another complete parse of the AML.. 163 * performs another complete parse of the AML.
138 */ 164 */
139 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); 165 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
140 status = acpi_ns_one_complete_parse(1, table_desc); 166 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index);
141 if (ACPI_FAILURE(status)) { 167 if (ACPI_FAILURE(status)) {
142 return_ACPI_STATUS(status); 168 return_ACPI_STATUS(status);
143 } 169 }
@@ -152,7 +178,7 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
152 * parse objects are all cached. 178 * parse objects are all cached.
153 */ 179 */
154 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); 180 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
155 status = acpi_ns_one_complete_parse(2, table_desc); 181 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index);
156 if (ACPI_FAILURE(status)) { 182 if (ACPI_FAILURE(status)) {
157 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
158 } 184 }
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index 500e2bbcfaf7..e863be665ce8 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -321,7 +321,8 @@ acpi_ns_search_and_enter(u32 target_name,
321 * even though there are a few bad names. 321 * even though there are a few bad names.
322 */ 322 */
323 if (!acpi_ut_valid_acpi_name(target_name)) { 323 if (!acpi_ut_valid_acpi_name(target_name)) {
324 target_name = acpi_ut_repair_name(target_name); 324 target_name =
325 acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
325 326
326 /* Report warning only if in strict mode or debug mode */ 327 /* Report warning only if in strict mode or debug mode */
327 328
@@ -401,6 +402,10 @@ acpi_ns_search_and_enter(u32 target_name,
401 } 402 }
402#endif 403#endif
403 404
405 if (flags & ACPI_NS_TEMPORARY) {
406 new_node->flags |= ANOBJ_TEMPORARY;
407 }
408
404 /* Install the new object into the parent's list of children */ 409 /* Install the new object into the parent's list of children */
405 410
406 acpi_ns_install_node(walk_state, node, new_node, type); 411 acpi_ns_install_node(walk_state, node, new_node, type);
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index aa4e799d9a8c..90fd059615ff 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -770,13 +770,6 @@ void acpi_ns_terminate(void)
770 } 770 }
771 771
772 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); 772 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
773
774 /*
775 * 2) Now we can delete the ACPI tables
776 */
777 acpi_tb_delete_all_tables();
778 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
779
780 return_VOID; 773 return_VOID;
781} 774}
782 775
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index c8f6bef16ed0..94eb8f332d94 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -126,7 +126,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
126 * PARAMETERS: Type - acpi_object_type to search for 126 * PARAMETERS: Type - acpi_object_type to search for
127 * start_node - Handle in namespace where search begins 127 * start_node - Handle in namespace where search begins
128 * max_depth - Depth to which search is to reach 128 * max_depth - Depth to which search is to reach
129 * unlock_before_callback- Whether to unlock the NS before invoking 129 * Flags - Whether to unlock the NS before invoking
130 * the callback routine 130 * the callback routine
131 * user_function - Called when an object of "Type" is found 131 * user_function - Called when an object of "Type" is found
132 * Context - Passed to user function 132 * Context - Passed to user function
@@ -153,7 +153,7 @@ acpi_status
153acpi_ns_walk_namespace(acpi_object_type type, 153acpi_ns_walk_namespace(acpi_object_type type,
154 acpi_handle start_node, 154 acpi_handle start_node,
155 u32 max_depth, 155 u32 max_depth,
156 u8 unlock_before_callback, 156 u32 flags,
157 acpi_walk_callback user_function, 157 acpi_walk_callback user_function,
158 void *context, void **return_value) 158 void *context, void **return_value)
159{ 159{
@@ -193,20 +193,34 @@ acpi_ns_walk_namespace(acpi_object_type type,
193 acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node, 193 acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
194 child_node); 194 child_node);
195 if (child_node) { 195 if (child_node) {
196 /* 196
197 * Found node, Get the type if we are not 197 /* Found next child, get the type if we are not searching for ANY */
198 * searching for ANY 198
199 */
200 if (type != ACPI_TYPE_ANY) { 199 if (type != ACPI_TYPE_ANY) {
201 child_type = child_node->type; 200 child_type = child_node->type;
202 } 201 }
203 202
204 if (child_type == type) { 203 /*
204 * Ignore all temporary namespace nodes (created during control
205 * method execution) unless told otherwise. These temporary nodes
206 * can cause a race condition because they can be deleted during the
207 * execution of the user function (if the namespace is unlocked before
208 * invocation of the user function.) Only the debugger namespace dump
209 * will examine the temporary nodes.
210 */
211 if ((child_node->flags & ANOBJ_TEMPORARY) &&
212 !(flags & ACPI_NS_WALK_TEMP_NODES)) {
213 status = AE_CTRL_DEPTH;
214 }
215
216 /* Type must match requested type */
217
218 else if (child_type == type) {
205 /* 219 /*
206 * Found a matching node, invoke the user 220 * Found a matching node, invoke the user callback function.
207 * callback function 221 * Unlock the namespace if flag is set.
208 */ 222 */
209 if (unlock_before_callback) { 223 if (flags & ACPI_NS_WALK_UNLOCK) {
210 mutex_status = 224 mutex_status =
211 acpi_ut_release_mutex 225 acpi_ut_release_mutex
212 (ACPI_MTX_NAMESPACE); 226 (ACPI_MTX_NAMESPACE);
@@ -216,10 +230,11 @@ acpi_ns_walk_namespace(acpi_object_type type,
216 } 230 }
217 } 231 }
218 232
219 status = user_function(child_node, level, 233 status =
220 context, return_value); 234 user_function(child_node, level, context,
235 return_value);
221 236
222 if (unlock_before_callback) { 237 if (flags & ACPI_NS_WALK_UNLOCK) {
223 mutex_status = 238 mutex_status =
224 acpi_ut_acquire_mutex 239 acpi_ut_acquire_mutex
225 (ACPI_MTX_NAMESPACE); 240 (ACPI_MTX_NAMESPACE);
@@ -251,20 +266,17 @@ acpi_ns_walk_namespace(acpi_object_type type,
251 } 266 }
252 267
253 /* 268 /*
254 * Depth first search: 269 * Depth first search: Attempt to go down another level in the
255 * Attempt to go down another level in the namespace 270 * namespace if we are allowed to. Don't go any further if we have
256 * if we are allowed to. Don't go any further if we 271 * reached the caller specified maximum depth or if the user
257 * have reached the caller specified maximum depth 272 * function has specified that the maximum depth has been reached.
258 * or if the user function has specified that the
259 * maximum depth has been reached.
260 */ 273 */
261 if ((level < max_depth) && (status != AE_CTRL_DEPTH)) { 274 if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
262 if (acpi_ns_get_next_node 275 if (acpi_ns_get_next_node
263 (ACPI_TYPE_ANY, child_node, NULL)) { 276 (ACPI_TYPE_ANY, child_node, NULL)) {
264 /* 277
265 * There is at least one child of this 278 /* There is at least one child of this node, visit it */
266 * node, visit the onde 279
267 */
268 level++; 280 level++;
269 parent_node = child_node; 281 parent_node = child_node;
270 child_node = NULL; 282 child_node = NULL;
@@ -272,9 +284,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
272 } 284 }
273 } else { 285 } else {
274 /* 286 /*
275 * No more children of this node (acpi_ns_get_next_node 287 * No more children of this node (acpi_ns_get_next_node failed), go
276 * failed), go back upwards in the namespace tree to 288 * back upwards in the namespace tree to the node's parent.
277 * the node's parent.
278 */ 289 */
279 level--; 290 level--;
280 child_node = parent_node; 291 child_node = parent_node;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index dca6799ac678..7ac6ace50059 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -170,7 +170,6 @@ acpi_evaluate_object(acpi_handle handle,
170 struct acpi_buffer *return_buffer) 170 struct acpi_buffer *return_buffer)
171{ 171{
172 acpi_status status; 172 acpi_status status;
173 acpi_status status2;
174 struct acpi_evaluate_info *info; 173 struct acpi_evaluate_info *info;
175 acpi_size buffer_space_needed; 174 acpi_size buffer_space_needed;
176 u32 i; 175 u32 i;
@@ -329,14 +328,12 @@ acpi_evaluate_object(acpi_handle handle,
329 * Delete the internal return object. NOTE: Interpreter must be 328 * Delete the internal return object. NOTE: Interpreter must be
330 * locked to avoid race condition. 329 * locked to avoid race condition.
331 */ 330 */
332 status2 = acpi_ex_enter_interpreter(); 331 acpi_ex_enter_interpreter();
333 if (ACPI_SUCCESS(status2)) {
334 332
335 /* Remove one reference on the return object (should delete it) */ 333 /* Remove one reference on the return object (should delete it) */
336 334
337 acpi_ut_remove_reference(info->return_object); 335 acpi_ut_remove_reference(info->return_object);
338 acpi_ex_exit_interpreter(); 336 acpi_ex_exit_interpreter();
339 }
340 } 337 }
341 338
342 cleanup: 339 cleanup:
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index 978213a6c19f..b489781b22a8 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -84,38 +84,41 @@ acpi_get_handle(acpi_handle parent,
84 /* Convert a parent handle to a prefix node */ 84 /* Convert a parent handle to a prefix node */
85 85
86 if (parent) { 86 if (parent) {
87 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
88 if (ACPI_FAILURE(status)) {
89 return (status);
90 }
91
92 prefix_node = acpi_ns_map_handle_to_node(parent); 87 prefix_node = acpi_ns_map_handle_to_node(parent);
93 if (!prefix_node) { 88 if (!prefix_node) {
94 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
95 return (AE_BAD_PARAMETER); 89 return (AE_BAD_PARAMETER);
96 } 90 }
91 }
92
93 /*
94 * Valid cases are:
95 * 1) Fully qualified pathname
96 * 2) Parent + Relative pathname
97 *
98 * Error for <null Parent + relative path>
99 */
100 if (acpi_ns_valid_root_prefix(pathname[0])) {
97 101
98 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 102 /* Pathname is fully qualified (starts with '\') */
99 if (ACPI_FAILURE(status)) { 103
100 return (status); 104 /* Special case for root-only, since we can't search for it */
105
106 if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
107 *ret_handle =
108 acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
109 return (AE_OK);
101 } 110 }
102 } 111 } else if (!prefix_node) {
103 112
104 /* Special case for root, since we can't search for it */ 113 /* Relative path with null prefix is disallowed */
105 114
106 if (ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH) == 0) { 115 return (AE_BAD_PARAMETER);
107 *ret_handle =
108 acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
109 return (AE_OK);
110 } 116 }
111 117
112 /* 118 /* Find the Node and convert to a handle */
113 * Find the Node and convert to a handle
114 */
115 status = acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH,
116 &node);
117 119
118 *ret_handle = NULL; 120 status =
121 acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
119 if (ACPI_SUCCESS(status)) { 122 if (ACPI_SUCCESS(status)) {
120 *ret_handle = acpi_ns_convert_entry_to_handle(node); 123 *ret_handle = acpi_ns_convert_entry_to_handle(node);
121 } 124 }
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
index a18b1c223129..faa375887201 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index bd96a7045925..4a9faff4c01d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,7 +45,7 @@ int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] 45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 47
48extern int __init acpi_table_parse_madt_family(enum acpi_table_id id, 48extern int __init acpi_table_parse_madt_family(char *id,
49 unsigned long madt_size, 49 unsigned long madt_size,
50 int entry_id, 50 int entry_id,
51 acpi_madt_entry_handler handler, 51 acpi_madt_entry_handler handler,
@@ -89,7 +89,7 @@ void __cpuinit acpi_unmap_pxm_to_node(int node)
89 node_clear(node, nodes_found_map); 89 node_clear(node, nodes_found_map);
90} 90}
91 91
92void __init acpi_table_print_srat_entry(acpi_table_entry_header * header) 92void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header)
93{ 93{
94 94
95 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); 95 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
@@ -99,36 +99,35 @@ void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
99 99
100 switch (header->type) { 100 switch (header->type) {
101 101
102 case ACPI_SRAT_PROCESSOR_AFFINITY: 102 case ACPI_SRAT_TYPE_CPU_AFFINITY:
103#ifdef ACPI_DEBUG_OUTPUT 103#ifdef ACPI_DEBUG_OUTPUT
104 { 104 {
105 struct acpi_table_processor_affinity *p = 105 struct acpi_srat_cpu_affinity *p =
106 (struct acpi_table_processor_affinity *)header; 106 (struct acpi_srat_cpu_affinity *)header;
107 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 107 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
108 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", 108 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
109 p->apic_id, p->lsapic_eid, 109 p->apic_id, p->local_sapic_eid,
110 p->proximity_domain, 110 p->proximity_domain_lo,
111 p->flags. 111 (p->flags & ACPI_SRAT_CPU_ENABLED)?
112 enabled ? "enabled" : "disabled")); 112 "enabled" : "disabled"));
113 } 113 }
114#endif /* ACPI_DEBUG_OUTPUT */ 114#endif /* ACPI_DEBUG_OUTPUT */
115 break; 115 break;
116 116
117 case ACPI_SRAT_MEMORY_AFFINITY: 117 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
118#ifdef ACPI_DEBUG_OUTPUT 118#ifdef ACPI_DEBUG_OUTPUT
119 { 119 {
120 struct acpi_table_memory_affinity *p = 120 struct acpi_srat_mem_affinity *p =
121 (struct acpi_table_memory_affinity *)header; 121 (struct acpi_srat_mem_affinity *)header;
122 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 122 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
123 "SRAT Memory (0x%08x%08x length 0x%08x%08x type 0x%x) in proximity domain %d %s%s\n", 123 "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n",
124 p->base_addr_hi, p->base_addr_lo, 124 (unsigned long)p->base_address,
125 p->length_hi, p->length_lo, 125 (unsigned long)p->length,
126 p->memory_type, p->proximity_domain, 126 p->memory_type, p->proximity_domain,
127 p->flags. 127 (p->flags & ACPI_SRAT_MEM_ENABLED)?
128 enabled ? "enabled" : "disabled", 128 "enabled" : "disabled",
129 p->flags. 129 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
130 hot_pluggable ? " hot-pluggable" : 130 " hot-pluggable" : ""));
131 ""));
132 } 131 }
133#endif /* ACPI_DEBUG_OUTPUT */ 132#endif /* ACPI_DEBUG_OUTPUT */
134 break; 133 break;
@@ -141,18 +140,18 @@ void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
141 } 140 }
142} 141}
143 142
144static int __init acpi_parse_slit(unsigned long phys_addr, unsigned long size) 143static int __init acpi_parse_slit(struct acpi_table_header *table)
145{ 144{
146 struct acpi_table_slit *slit; 145 struct acpi_table_slit *slit;
147 u32 localities; 146 u32 localities;
148 147
149 if (!phys_addr || !size) 148 if (!table)
150 return -EINVAL; 149 return -EINVAL;
151 150
152 slit = (struct acpi_table_slit *)__va(phys_addr); 151 slit = (struct acpi_table_slit *)table;
153 152
154 /* downcast just for %llu vs %lu for i386/ia64 */ 153 /* downcast just for %llu vs %lu for i386/ia64 */
155 localities = (u32) slit->localities; 154 localities = (u32) slit->locality_count;
156 155
157 acpi_numa_slit_init(slit); 156 acpi_numa_slit_init(slit);
158 157
@@ -160,12 +159,12 @@ static int __init acpi_parse_slit(unsigned long phys_addr, unsigned long size)
160} 159}
161 160
162static int __init 161static int __init
163acpi_parse_processor_affinity(acpi_table_entry_header * header, 162acpi_parse_processor_affinity(struct acpi_subtable_header * header,
164 const unsigned long end) 163 const unsigned long end)
165{ 164{
166 struct acpi_table_processor_affinity *processor_affinity; 165 struct acpi_srat_cpu_affinity *processor_affinity;
167 166
168 processor_affinity = (struct acpi_table_processor_affinity *)header; 167 processor_affinity = (struct acpi_srat_cpu_affinity *)header;
169 if (!processor_affinity) 168 if (!processor_affinity)
170 return -EINVAL; 169 return -EINVAL;
171 170
@@ -178,12 +177,12 @@ acpi_parse_processor_affinity(acpi_table_entry_header * header,
178} 177}
179 178
180static int __init 179static int __init
181acpi_parse_memory_affinity(acpi_table_entry_header * header, 180acpi_parse_memory_affinity(struct acpi_subtable_header * header,
182 const unsigned long end) 181 const unsigned long end)
183{ 182{
184 struct acpi_table_memory_affinity *memory_affinity; 183 struct acpi_srat_mem_affinity *memory_affinity;
185 184
186 memory_affinity = (struct acpi_table_memory_affinity *)header; 185 memory_affinity = (struct acpi_srat_mem_affinity *)header;
187 if (!memory_affinity) 186 if (!memory_affinity)
188 return -EINVAL; 187 return -EINVAL;
189 188
@@ -195,23 +194,23 @@ acpi_parse_memory_affinity(acpi_table_entry_header * header,
195 return 0; 194 return 0;
196} 195}
197 196
198static int __init acpi_parse_srat(unsigned long phys_addr, unsigned long size) 197static int __init acpi_parse_srat(struct acpi_table_header *table)
199{ 198{
200 struct acpi_table_srat *srat; 199 struct acpi_table_srat *srat;
201 200
202 if (!phys_addr || !size) 201 if (!table)
203 return -EINVAL; 202 return -EINVAL;
204 203
205 srat = (struct acpi_table_srat *)__va(phys_addr); 204 srat = (struct acpi_table_srat *)table;
206 205
207 return 0; 206 return 0;
208} 207}
209 208
210int __init 209int __init
211acpi_table_parse_srat(enum acpi_srat_entry_id id, 210acpi_table_parse_srat(enum acpi_srat_type id,
212 acpi_madt_entry_handler handler, unsigned int max_entries) 211 acpi_madt_entry_handler handler, unsigned int max_entries)
213{ 212{
214 return acpi_table_parse_madt_family(ACPI_SRAT, 213 return acpi_table_parse_madt_family(ACPI_SIG_SRAT,
215 sizeof(struct acpi_table_srat), id, 214 sizeof(struct acpi_table_srat), id,
216 handler, max_entries); 215 handler, max_entries);
217} 216}
@@ -221,17 +220,17 @@ int __init acpi_numa_init(void)
221 int result; 220 int result;
222 221
223 /* SRAT: Static Resource Affinity Table */ 222 /* SRAT: Static Resource Affinity Table */
224 result = acpi_table_parse(ACPI_SRAT, acpi_parse_srat); 223 result = acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat);
225 224
226 if (result > 0) { 225 if (result > 0) {
227 result = acpi_table_parse_srat(ACPI_SRAT_PROCESSOR_AFFINITY, 226 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
228 acpi_parse_processor_affinity, 227 acpi_parse_processor_affinity,
229 NR_CPUS); 228 NR_CPUS);
230 result = acpi_table_parse_srat(ACPI_SRAT_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific 229 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific
231 } 230 }
232 231
233 /* SLIT: System Locality Information Table */ 232 /* SLIT: System Locality Information Table */
234 result = acpi_table_parse(ACPI_SLIT, acpi_parse_slit); 233 result = acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
235 234
236 acpi_numa_arch_fixup(); 235 acpi_numa_arch_fixup();
237 return 0; 236 return 0;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 57ae1e5cde0a..0f6f3bcbc8eb 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/nmi.h> 38#include <linux/nmi.h>
39#include <linux/acpi.h>
39#include <acpi/acpi.h> 40#include <acpi/acpi.h>
40#include <asm/io.h> 41#include <asm/io.h>
41#include <acpi/acpi_bus.h> 42#include <acpi/acpi_bus.h>
@@ -75,6 +76,54 @@ static acpi_osd_handler acpi_irq_handler;
75static void *acpi_irq_context; 76static void *acpi_irq_context;
76static struct workqueue_struct *kacpid_wq; 77static struct workqueue_struct *kacpid_wq;
77 78
79static void __init acpi_request_region (struct acpi_generic_address *addr,
80 unsigned int length, char *desc)
81{
82 struct resource *res;
83
84 if (!addr->address || !length)
85 return;
86
87 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
88 res = request_region(addr->address, length, desc);
89 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
90 res = request_mem_region(addr->address, length, desc);
91}
92
93static int __init acpi_reserve_resources(void)
94{
95 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
96 "ACPI PM1a_EVT_BLK");
97
98 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
99 "ACPI PM1b_EVT_BLK");
100
101 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
102 "ACPI PM1a_CNT_BLK");
103
104 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
105 "ACPI PM1b_CNT_BLK");
106
107 if (acpi_gbl_FADT.pm_timer_length == 4)
108 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
109
110 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
111 "ACPI PM2_CNT_BLK");
112
113 /* Length of GPE blocks must be a non-negative multiple of 2 */
114
115 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
116 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
117 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
118
119 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
120 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
121 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
122
123 return 0;
124}
125device_initcall(acpi_reserve_resources);
126
78acpi_status acpi_os_initialize(void) 127acpi_status acpi_os_initialize(void)
79{ 128{
80 return AE_OK; 129 return AE_OK;
@@ -136,53 +185,43 @@ void acpi_os_vprintf(const char *fmt, va_list args)
136#endif 185#endif
137} 186}
138 187
139acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) 188acpi_physical_address __init acpi_os_get_root_pointer(void)
140{ 189{
141 if (efi_enabled) { 190 if (efi_enabled) {
142 addr->pointer_type = ACPI_PHYSICAL_POINTER;
143 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 191 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
144 addr->pointer.physical = efi.acpi20; 192 return efi.acpi20;
145 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 193 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
146 addr->pointer.physical = efi.acpi; 194 return efi.acpi;
147 else { 195 else {
148 printk(KERN_ERR PREFIX 196 printk(KERN_ERR PREFIX
149 "System description tables not found\n"); 197 "System description tables not found\n");
150 return AE_NOT_FOUND; 198 return 0;
151 } 199 }
152 } else { 200 } else
153 if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) { 201 return acpi_find_rsdp();
154 printk(KERN_ERR PREFIX
155 "System description tables not found\n");
156 return AE_NOT_FOUND;
157 }
158 }
159
160 return AE_OK;
161} 202}
162 203
163acpi_status 204void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
164acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
165 void __iomem ** virt)
166{ 205{
167 if (phys > ULONG_MAX) { 206 if (phys > ULONG_MAX) {
168 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 207 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
169 return AE_BAD_PARAMETER; 208 return 0;
170 } 209 }
171 /* 210 if (acpi_gbl_permanent_mmap)
172 * ioremap checks to ensure this is in reserved space 211 /*
173 */ 212 * ioremap checks to ensure this is in reserved space
174 *virt = ioremap((unsigned long)phys, size); 213 */
175 214 return ioremap((unsigned long)phys, size);
176 if (!*virt) 215 else
177 return AE_NO_MEMORY; 216 return __acpi_map_table((unsigned long)phys, size);
178
179 return AE_OK;
180} 217}
181EXPORT_SYMBOL_GPL(acpi_os_map_memory); 218EXPORT_SYMBOL_GPL(acpi_os_map_memory);
182 219
183void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 220void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
184{ 221{
185 iounmap(virt); 222 if (acpi_gbl_permanent_mmap) {
223 iounmap(virt);
224 }
186} 225}
187EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 226EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
188 227
@@ -254,7 +293,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
254 * FADT. It may not be the same if an interrupt source override exists 293 * FADT. It may not be the same if an interrupt source override exists
255 * for the SCI. 294 * for the SCI.
256 */ 295 */
257 gsi = acpi_fadt.sci_int; 296 gsi = acpi_gbl_FADT.sci_interrupt;
258 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 297 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
259 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 298 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
260 gsi); 299 gsi);
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index bf88e076c3e9..c2b9835c890b 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index e1541db3753a..773aee82fbb8 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,11 @@
42 */ 42 */
43 43
44/* 44/*
45 * Parse the AML and build an operation tree as most interpreters, 45 * Parse the AML and build an operation tree as most interpreters, (such as
46 * like Perl, do. Parsing is done by hand rather than with a YACC 46 * Perl) do. Parsing is done by hand rather than with a YACC generated parser
47 * generated parser to tightly constrain stack and dynamic memory 47 * to tightly constrain stack and dynamic memory usage. Parsing is kept
48 * usage. At the same time, parsing is kept flexible and the code 48 * flexible and the code fairly compact by parsing based on a list of AML
49 * fairly compact by parsing based on a list of AML opcode 49 * opcode templates in aml_op_info[].
50 * templates in aml_op_info[]
51 */ 50 */
52 51
53#include <acpi/acpi.h> 52#include <acpi/acpi.h>
@@ -60,766 +59,679 @@ ACPI_MODULE_NAME("psloop")
60 59
61static u32 acpi_gbl_depth = 0; 60static u32 acpi_gbl_depth = 0;
62 61
62/* Local prototypes */
63
64static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
65
66static acpi_status
67acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
68 u8 * aml_op_start,
69 union acpi_parse_object *unnamed_op,
70 union acpi_parse_object **op);
71
72static acpi_status
73acpi_ps_create_op(struct acpi_walk_state *walk_state,
74 u8 * aml_op_start, union acpi_parse_object **new_op);
75
76static acpi_status
77acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
78 u8 * aml_op_start, union acpi_parse_object *op);
79
80static acpi_status
81acpi_ps_complete_op(struct acpi_walk_state *walk_state,
82 union acpi_parse_object **op, acpi_status status);
83
84static acpi_status
85acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
86 union acpi_parse_object *op, acpi_status status);
87
63/******************************************************************************* 88/*******************************************************************************
64 * 89 *
65 * FUNCTION: acpi_ps_parse_loop 90 * FUNCTION: acpi_ps_get_aml_opcode
66 * 91 *
67 * PARAMETERS: walk_state - Current state 92 * PARAMETERS: walk_state - Current state
68 * 93 *
69 * RETURN: Status 94 * RETURN: Status
70 * 95 *
71 * DESCRIPTION: Parse AML (pointed to by the current parser state) and return 96 * DESCRIPTION: Extract the next AML opcode from the input stream.
72 * a tree of ops.
73 * 97 *
74 ******************************************************************************/ 98 ******************************************************************************/
75 99
76acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) 100static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
77{ 101{
78 acpi_status status = AE_OK;
79 acpi_status status2;
80 union acpi_parse_object *op = NULL; /* current op */
81 union acpi_parse_object *arg = NULL;
82 union acpi_parse_object *pre_op = NULL;
83 struct acpi_parse_state *parser_state;
84 u8 *aml_op_start = NULL;
85 102
86 ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state); 103 ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
87 104
88 if (walk_state->descending_callback == NULL) { 105 walk_state->aml_offset =
89 return_ACPI_STATUS(AE_BAD_PARAMETER); 106 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
90 } 107 walk_state->parser_state.aml_start);
108 walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
91 109
92 parser_state = &walk_state->parser_state; 110 /*
93 walk_state->arg_types = 0; 111 * First cut to determine what we have found:
112 * 1) A valid AML opcode
113 * 2) A name string
114 * 3) An unknown/invalid opcode
115 */
116 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
94 117
95#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY)) 118 switch (walk_state->op_info->class) {
119 case AML_CLASS_ASCII:
120 case AML_CLASS_PREFIX:
121 /*
122 * Starts with a valid prefix or ASCII char, this is a name
123 * string. Convert the bare name string to a namepath.
124 */
125 walk_state->opcode = AML_INT_NAMEPATH_OP;
126 walk_state->arg_types = ARGP_NAMESTRING;
127 break;
96 128
97 if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) { 129 case AML_CLASS_UNKNOWN:
98 130
99 /* We are restarting a preempted control method */ 131 /* The opcode is unrecognized. Just skip unknown opcodes */
100 132
101 if (acpi_ps_has_completed_scope(parser_state)) { 133 ACPI_ERROR((AE_INFO,
102 /* 134 "Found unknown opcode %X at AML address %p offset %X, ignoring",
103 * We must check if a predicate to an IF or WHILE statement 135 walk_state->opcode, walk_state->parser_state.aml,
104 * was just completed 136 walk_state->aml_offset));
105 */
106 if ((parser_state->scope->parse_scope.op) &&
107 ((parser_state->scope->parse_scope.op->common.
108 aml_opcode == AML_IF_OP)
109 || (parser_state->scope->parse_scope.op->common.
110 aml_opcode == AML_WHILE_OP))
111 && (walk_state->control_state)
112 && (walk_state->control_state->common.state ==
113 ACPI_CONTROL_PREDICATE_EXECUTING)) {
114 /*
115 * A predicate was just completed, get the value of the
116 * predicate and branch based on that value
117 */
118 walk_state->op = NULL;
119 status =
120 acpi_ds_get_predicate_value(walk_state,
121 ACPI_TO_POINTER
122 (TRUE));
123 if (ACPI_FAILURE(status)
124 && ((status & AE_CODE_MASK) !=
125 AE_CODE_CONTROL)) {
126 if (status == AE_AML_NO_RETURN_VALUE) {
127 ACPI_EXCEPTION((AE_INFO, status,
128 "Invoked method did not return a value"));
129 137
130 } 138 ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
131 ACPI_EXCEPTION((AE_INFO, status,
132 "GetPredicate Failed"));
133 return_ACPI_STATUS(status);
134 }
135 139
136 status = 140 /* Assume one-byte bad opcode */
137 acpi_ps_next_parse_state(walk_state, op,
138 status);
139 }
140 141
141 acpi_ps_pop_scope(parser_state, &op, 142 walk_state->parser_state.aml++;
142 &walk_state->arg_types, 143 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
143 &walk_state->arg_count);
144 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
145 "Popped scope, Op=%p\n", op));
146 } else if (walk_state->prev_op) {
147 144
148 /* We were in the middle of an op */ 145 default:
149 146
150 op = walk_state->prev_op; 147 /* Found opcode info, this is a normal opcode */
151 walk_state->arg_types = walk_state->prev_arg_types; 148
152 } 149 walk_state->parser_state.aml +=
150 acpi_ps_get_opcode_size(walk_state->opcode);
151 walk_state->arg_types = walk_state->op_info->parse_args;
152 break;
153 } 153 }
154#endif
155 154
156 /* Iterative parsing loop, while there is more AML to process: */ 155 return_ACPI_STATUS(AE_OK);
156}
157 157
158 while ((parser_state->aml < parser_state->aml_end) || (op)) { 158/*******************************************************************************
159 aml_op_start = parser_state->aml; 159 *
160 if (!op) { 160 * FUNCTION: acpi_ps_build_named_op
161 *
162 * PARAMETERS: walk_state - Current state
163 * aml_op_start - Begin of named Op in AML
164 * unnamed_op - Early Op (not a named Op)
165 * Op - Returned Op
166 *
167 * RETURN: Status
168 *
169 * DESCRIPTION: Parse a named Op
170 *
171 ******************************************************************************/
161 172
162 /* Get the next opcode from the AML stream */ 173static acpi_status
174acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
175 u8 * aml_op_start,
176 union acpi_parse_object *unnamed_op,
177 union acpi_parse_object **op)
178{
179 acpi_status status = AE_OK;
180 union acpi_parse_object *arg = NULL;
163 181
164 walk_state->aml_offset = 182 ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
165 (u32) ACPI_PTR_DIFF(parser_state->aml,
166 parser_state->aml_start);
167 walk_state->opcode = acpi_ps_peek_opcode(parser_state);
168 183
169 /* 184 unnamed_op->common.value.arg = NULL;
170 * First cut to determine what we have found: 185 unnamed_op->common.aml_opcode = walk_state->opcode;
171 * 1) A valid AML opcode
172 * 2) A name string
173 * 3) An unknown/invalid opcode
174 */
175 walk_state->op_info =
176 acpi_ps_get_opcode_info(walk_state->opcode);
177 switch (walk_state->op_info->class) {
178 case AML_CLASS_ASCII:
179 case AML_CLASS_PREFIX:
180 /*
181 * Starts with a valid prefix or ASCII char, this is a name
182 * string. Convert the bare name string to a namepath.
183 */
184 walk_state->opcode = AML_INT_NAMEPATH_OP;
185 walk_state->arg_types = ARGP_NAMESTRING;
186 break;
187 186
188 case AML_CLASS_UNKNOWN: 187 /*
188 * Get and append arguments until we find the node that contains
189 * the name (the type ARGP_NAME).
190 */
191 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
192 (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
193 status =
194 acpi_ps_get_next_arg(walk_state,
195 &(walk_state->parser_state),
196 GET_CURRENT_ARG_TYPE(walk_state->
197 arg_types), &arg);
198 if (ACPI_FAILURE(status)) {
199 return_ACPI_STATUS(status);
200 }
189 201
190 /* The opcode is unrecognized. Just skip unknown opcodes */ 202 acpi_ps_append_arg(unnamed_op, arg);
203 INCREMENT_ARG_LIST(walk_state->arg_types);
204 }
191 205
192 ACPI_ERROR((AE_INFO, 206 /*
193 "Found unknown opcode %X at AML address %p offset %X, ignoring", 207 * Make sure that we found a NAME and didn't run out of arguments
194 walk_state->opcode, 208 */
195 parser_state->aml, 209 if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
196 walk_state->aml_offset)); 210 return_ACPI_STATUS(AE_AML_NO_OPERAND);
211 }
197 212
198 ACPI_DUMP_BUFFER(parser_state->aml, 128); 213 /* We know that this arg is a name, move to next arg */
199 214
200 /* Assume one-byte bad opcode */ 215 INCREMENT_ARG_LIST(walk_state->arg_types);
201 216
202 parser_state->aml++; 217 /*
203 continue; 218 * Find the object. This will either insert the object into
219 * the namespace or simply look it up
220 */
221 walk_state->op = NULL;
204 222
205 default: 223 status = walk_state->descending_callback(walk_state, op);
224 if (ACPI_FAILURE(status)) {
225 ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
226 return_ACPI_STATUS(status);
227 }
206 228
207 /* Found opcode info, this is a normal opcode */ 229 if (!*op) {
230 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
231 }
208 232
209 parser_state->aml += 233 status = acpi_ps_next_parse_state(walk_state, *op, status);
210 acpi_ps_get_opcode_size(walk_state->opcode); 234 if (ACPI_FAILURE(status)) {
211 walk_state->arg_types = 235 if (status == AE_CTRL_PENDING) {
212 walk_state->op_info->parse_args; 236 return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
213 break; 237 }
214 } 238 return_ACPI_STATUS(status);
239 }
215 240
216 /* Create Op structure and append to parent's argument list */ 241 acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
242 acpi_gbl_depth++;
217 243
218 if (walk_state->op_info->flags & AML_NAMED) { 244 if ((*op)->common.aml_opcode == AML_REGION_OP) {
245 /*
246 * Defer final parsing of an operation_region body, because we don't
247 * have enough info in the first pass to parse it correctly (i.e.,
248 * there may be method calls within the term_arg elements of the body.)
249 *
250 * However, we must continue parsing because the opregion is not a
251 * standalone package -- we don't know where the end is at this point.
252 *
253 * (Length is unknown until parse of the body complete)
254 */
255 (*op)->named.data = aml_op_start;
256 (*op)->named.length = 0;
257 }
219 258
220 /* Allocate a new pre_op if necessary */ 259 return_ACPI_STATUS(AE_OK);
260}
221 261
222 if (!pre_op) { 262/*******************************************************************************
223 pre_op = 263 *
224 acpi_ps_alloc_op(walk_state-> 264 * FUNCTION: acpi_ps_create_op
225 opcode); 265 *
226 if (!pre_op) { 266 * PARAMETERS: walk_state - Current state
227 status = AE_NO_MEMORY; 267 * aml_op_start - Op start in AML
228 goto close_this_op; 268 * new_op - Returned Op
229 } 269 *
230 } 270 * RETURN: Status
271 *
272 * DESCRIPTION: Get Op from AML
273 *
274 ******************************************************************************/
231 275
232 pre_op->common.value.arg = NULL; 276static acpi_status
233 pre_op->common.aml_opcode = walk_state->opcode; 277acpi_ps_create_op(struct acpi_walk_state *walk_state,
278 u8 * aml_op_start, union acpi_parse_object **new_op)
279{
280 acpi_status status = AE_OK;
281 union acpi_parse_object *op;
282 union acpi_parse_object *named_op = NULL;
234 283
235 /* 284 ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
236 * Get and append arguments until we find the node that contains
237 * the name (the type ARGP_NAME).
238 */
239 while (GET_CURRENT_ARG_TYPE
240 (walk_state->arg_types)
241 &&
242 (GET_CURRENT_ARG_TYPE
243 (walk_state->arg_types) != ARGP_NAME)) {
244 status =
245 acpi_ps_get_next_arg(walk_state,
246 parser_state,
247 GET_CURRENT_ARG_TYPE
248 (walk_state->
249 arg_types),
250 &arg);
251 if (ACPI_FAILURE(status)) {
252 goto close_this_op;
253 }
254 285
255 acpi_ps_append_arg(pre_op, arg); 286 status = acpi_ps_get_aml_opcode(walk_state);
256 INCREMENT_ARG_LIST(walk_state-> 287 if (status == AE_CTRL_PARSE_CONTINUE) {
257 arg_types); 288 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
258 } 289 }
259 290
260 /* 291 /* Create Op structure and append to parent's argument list */
261 * Make sure that we found a NAME and didn't run out of
262 * arguments
263 */
264 if (!GET_CURRENT_ARG_TYPE
265 (walk_state->arg_types)) {
266 status = AE_AML_NO_OPERAND;
267 goto close_this_op;
268 }
269 292
270 /* We know that this arg is a name, move to next arg */ 293 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
294 op = acpi_ps_alloc_op(walk_state->opcode);
295 if (!op) {
296 return_ACPI_STATUS(AE_NO_MEMORY);
297 }
271 298
272 INCREMENT_ARG_LIST(walk_state->arg_types); 299 if (walk_state->op_info->flags & AML_NAMED) {
300 status =
301 acpi_ps_build_named_op(walk_state, aml_op_start, op,
302 &named_op);
303 acpi_ps_free_op(op);
304 if (ACPI_FAILURE(status)) {
305 return_ACPI_STATUS(status);
306 }
273 307
274 /* 308 *new_op = named_op;
275 * Find the object. This will either insert the object into 309 return_ACPI_STATUS(AE_OK);
276 * the namespace or simply look it up 310 }
277 */
278 walk_state->op = NULL;
279 311
280 status = 312 /* Not a named opcode, just allocate Op and append to parent */
281 walk_state->descending_callback(walk_state,
282 &op);
283 if (ACPI_FAILURE(status)) {
284 ACPI_EXCEPTION((AE_INFO, status,
285 "During name lookup/catalog"));
286 goto close_this_op;
287 }
288 313
289 if (!op) { 314 if (walk_state->op_info->flags & AML_CREATE) {
290 continue; 315 /*
291 } 316 * Backup to beginning of create_xXXfield declaration
317 * body_length is unknown until we parse the body
318 */
319 op->named.data = aml_op_start;
320 op->named.length = 0;
321 }
292 322
293 status = 323 acpi_ps_append_arg(acpi_ps_get_parent_scope
294 acpi_ps_next_parse_state(walk_state, op, 324 (&(walk_state->parser_state)), op);
295 status);
296 if (status == AE_CTRL_PENDING) {
297 status = AE_OK;
298 goto close_this_op;
299 }
300 325
301 if (ACPI_FAILURE(status)) { 326 if (walk_state->descending_callback != NULL) {
302 goto close_this_op; 327 /*
303 } 328 * Find the object. This will either insert the object into
329 * the namespace or simply look it up
330 */
331 walk_state->op = *new_op = op;
304 332
305 acpi_ps_append_arg(op, 333 status = walk_state->descending_callback(walk_state, &op);
306 pre_op->common.value.arg); 334 status = acpi_ps_next_parse_state(walk_state, op, status);
307 acpi_gbl_depth++; 335 if (status == AE_CTRL_PENDING) {
308 336 status = AE_CTRL_PARSE_PENDING;
309 if (op->common.aml_opcode == AML_REGION_OP) { 337 }
310 /* 338 }
311 * Defer final parsing of an operation_region body,
312 * because we don't have enough info in the first pass
313 * to parse it correctly (i.e., there may be method
314 * calls within the term_arg elements of the body.)
315 *
316 * However, we must continue parsing because
317 * the opregion is not a standalone package --
318 * we don't know where the end is at this point.
319 *
320 * (Length is unknown until parse of the body complete)
321 */
322 op->named.data = aml_op_start;
323 op->named.length = 0;
324 }
325 } else {
326 /* Not a named opcode, just allocate Op and append to parent */
327 339
328 walk_state->op_info = 340 return_ACPI_STATUS(status);
329 acpi_ps_get_opcode_info(walk_state->opcode); 341}
330 op = acpi_ps_alloc_op(walk_state->opcode);
331 if (!op) {
332 status = AE_NO_MEMORY;
333 goto close_this_op;
334 }
335 342
336 if (walk_state->op_info->flags & AML_CREATE) { 343/*******************************************************************************
337 /* 344 *
338 * Backup to beginning of create_xXXfield declaration 345 * FUNCTION: acpi_ps_get_arguments
339 * body_length is unknown until we parse the body 346 *
340 */ 347 * PARAMETERS: walk_state - Current state
341 op->named.data = aml_op_start; 348 * aml_op_start - Op start in AML
342 op->named.length = 0; 349 * Op - Current Op
343 } 350 *
351 * RETURN: Status
352 *
353 * DESCRIPTION: Get arguments for passed Op.
354 *
355 ******************************************************************************/
344 356
345 acpi_ps_append_arg(acpi_ps_get_parent_scope 357static acpi_status
346 (parser_state), op); 358acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
359 u8 * aml_op_start, union acpi_parse_object *op)
360{
361 acpi_status status = AE_OK;
362 union acpi_parse_object *arg = NULL;
347 363
348 if ((walk_state->descending_callback != NULL)) { 364 ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
349 /*
350 * Find the object. This will either insert the object into
351 * the namespace or simply look it up
352 */
353 walk_state->op = op;
354 365
355 status = 366 switch (op->common.aml_opcode) {
356 walk_state-> 367 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
357 descending_callback(walk_state, 368 case AML_WORD_OP: /* AML_WORDDATA_ARG */
358 &op); 369 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
359 status = 370 case AML_QWORD_OP: /* AML_QWORDATA_ARG */
360 acpi_ps_next_parse_state(walk_state, 371 case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
361 op,
362 status);
363 if (status == AE_CTRL_PENDING) {
364 status = AE_OK;
365 goto close_this_op;
366 }
367 372
368 if (ACPI_FAILURE(status)) { 373 /* Fill in constant or string argument directly */
369 goto close_this_op;
370 }
371 }
372 }
373 374
374 op->common.aml_offset = walk_state->aml_offset; 375 acpi_ps_get_next_simple_arg(&(walk_state->parser_state),
376 GET_CURRENT_ARG_TYPE(walk_state->
377 arg_types),
378 op);
379 break;
375 380
376 if (walk_state->op_info) { 381 case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
377 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 382
378 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n", 383 status =
379 (u32) op->common.aml_opcode, 384 acpi_ps_get_next_namepath(walk_state,
380 walk_state->op_info->name, op, 385 &(walk_state->parser_state), op,
381 parser_state->aml, 386 1);
382 op->common.aml_offset)); 387 if (ACPI_FAILURE(status)) {
383 } 388 return_ACPI_STATUS(status);
384 } 389 }
385 390
391 walk_state->arg_types = 0;
392 break;
393
394 default:
386 /* 395 /*
387 * Start arg_count at zero because we don't know if there are 396 * Op is not a constant or string, append each argument to the Op
388 * any args yet
389 */ 397 */
390 walk_state->arg_count = 0; 398 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
391 399 && !walk_state->arg_count) {
392 /* Are there any arguments that must be processed? */ 400 walk_state->aml_offset =
393 401 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
394 if (walk_state->arg_types) { 402 walk_state->parser_state.
395 403 aml_start);
396 /* Get arguments */
397
398 switch (op->common.aml_opcode) {
399 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
400 case AML_WORD_OP: /* AML_WORDDATA_ARG */
401 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
402 case AML_QWORD_OP: /* AML_QWORDATA_ARG */
403 case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
404
405 /* Fill in constant or string argument directly */
406
407 acpi_ps_get_next_simple_arg(parser_state,
408 GET_CURRENT_ARG_TYPE
409 (walk_state->
410 arg_types), op);
411 break;
412
413 case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
414
415 status =
416 acpi_ps_get_next_namepath(walk_state,
417 parser_state, op,
418 1);
419 if (ACPI_FAILURE(status)) {
420 goto close_this_op;
421 }
422
423 walk_state->arg_types = 0;
424 break;
425 404
426 default: 405 status =
427 /* 406 acpi_ps_get_next_arg(walk_state,
428 * Op is not a constant or string, append each argument 407 &(walk_state->parser_state),
429 * to the Op 408 GET_CURRENT_ARG_TYPE
430 */ 409 (walk_state->arg_types), &arg);
431 while (GET_CURRENT_ARG_TYPE 410 if (ACPI_FAILURE(status)) {
432 (walk_state->arg_types) 411 return_ACPI_STATUS(status);
433 && !walk_state->arg_count) { 412 }
434 walk_state->aml_offset = (u32)
435 ACPI_PTR_DIFF(parser_state->aml,
436 parser_state->
437 aml_start);
438 413
439 status = 414 if (arg) {
440 acpi_ps_get_next_arg(walk_state, 415 arg->common.aml_offset = walk_state->aml_offset;
441 parser_state, 416 acpi_ps_append_arg(op, arg);
442 GET_CURRENT_ARG_TYPE 417 }
443 (walk_state->
444 arg_types),
445 &arg);
446 if (ACPI_FAILURE(status)) {
447 goto close_this_op;
448 }
449 418
450 if (arg) { 419 INCREMENT_ARG_LIST(walk_state->arg_types);
451 arg->common.aml_offset = 420 }
452 walk_state->aml_offset;
453 acpi_ps_append_arg(op, arg);
454 }
455 INCREMENT_ARG_LIST(walk_state->
456 arg_types);
457 }
458 421
459 /* Special processing for certain opcodes */ 422 /* Special processing for certain opcodes */
460 423
461 /* TBD (remove): Temporary mechanism to disable this code if needed */ 424 /* TBD (remove): Temporary mechanism to disable this code if needed */
462 425
463#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE 426#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
464 427
465 if ((walk_state->pass_number <= 428 if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
466 ACPI_IMODE_LOAD_PASS1) 429 ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
467 && 430 /*
468 ((walk_state-> 431 * We want to skip If/Else/While constructs during Pass1 because we
469 parse_flags & ACPI_PARSE_DISASSEMBLE) == 432 * want to actually conditionally execute the code during Pass2.
470 0)) { 433 *
471 /* 434 * Except for disassembly, where we always want to walk the
472 * We want to skip If/Else/While constructs during Pass1 435 * If/Else/While packages
473 * because we want to actually conditionally execute the 436 */
474 * code during Pass2. 437 switch (op->common.aml_opcode) {
475 * 438 case AML_IF_OP:
476 * Except for disassembly, where we always want to 439 case AML_ELSE_OP:
477 * walk the If/Else/While packages 440 case AML_WHILE_OP:
478 */
479 switch (op->common.aml_opcode) {
480 case AML_IF_OP:
481 case AML_ELSE_OP:
482 case AML_WHILE_OP:
483
484 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
485 "Pass1: Skipping an If/Else/While body\n"));
486
487 /* Skip body of if/else/while in pass 1 */
488
489 parser_state->aml =
490 parser_state->pkg_end;
491 walk_state->arg_count = 0;
492 break;
493
494 default:
495 break;
496 }
497 }
498#endif
499 switch (op->common.aml_opcode) {
500 case AML_METHOD_OP:
501
502 /*
503 * Skip parsing of control method
504 * because we don't have enough info in the first pass
505 * to parse it correctly.
506 *
507 * Save the length and address of the body
508 */
509 op->named.data = parser_state->aml;
510 op->named.length =
511 (u32) (parser_state->pkg_end -
512 parser_state->aml);
513
514 /* Skip body of method */
515
516 parser_state->aml =
517 parser_state->pkg_end;
518 walk_state->arg_count = 0;
519 break;
520
521 case AML_BUFFER_OP:
522 case AML_PACKAGE_OP:
523 case AML_VAR_PACKAGE_OP:
524
525 if ((op->common.parent) &&
526 (op->common.parent->common.
527 aml_opcode == AML_NAME_OP)
528 && (walk_state->pass_number <=
529 ACPI_IMODE_LOAD_PASS2)) {
530 /*
531 * Skip parsing of Buffers and Packages
532 * because we don't have enough info in the first pass
533 * to parse them correctly.
534 */
535 op->named.data = aml_op_start;
536 op->named.length =
537 (u32) (parser_state->
538 pkg_end -
539 aml_op_start);
540
541 /* Skip body */
542
543 parser_state->aml =
544 parser_state->pkg_end;
545 walk_state->arg_count = 0;
546 }
547 break;
548 441
549 case AML_WHILE_OP: 442 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
443 "Pass1: Skipping an If/Else/While body\n"));
550 444
551 if (walk_state->control_state) { 445 /* Skip body of if/else/while in pass 1 */
552 walk_state->control_state->
553 control.package_end =
554 parser_state->pkg_end;
555 }
556 break;
557 446
558 default: 447 walk_state->parser_state.aml =
448 walk_state->parser_state.pkg_end;
449 walk_state->arg_count = 0;
450 break;
559 451
560 /* No action for all other opcodes */ 452 default:
561 break;
562 }
563 break; 453 break;
564 } 454 }
565 } 455 }
456#endif
566 457
567 /* Check for arguments that need to be processed */ 458 switch (op->common.aml_opcode) {
568 459 case AML_METHOD_OP:
569 if (walk_state->arg_count) {
570 /* 460 /*
571 * There are arguments (complex ones), push Op and 461 * Skip parsing of control method because we don't have enough
572 * prepare for argument 462 * info in the first pass to parse it correctly.
463 *
464 * Save the length and address of the body
573 */ 465 */
574 status = acpi_ps_push_scope(parser_state, op, 466 op->named.data = walk_state->parser_state.aml;
575 walk_state->arg_types, 467 op->named.length = (u32)
576 walk_state->arg_count); 468 (walk_state->parser_state.pkg_end -
577 if (ACPI_FAILURE(status)) { 469 walk_state->parser_state.aml);
578 goto close_this_op;
579 }
580 op = NULL;
581 continue;
582 }
583 470
584 /* 471 /* Skip body of method */
585 * All arguments have been processed -- Op is complete,
586 * prepare for next
587 */
588 walk_state->op_info =
589 acpi_ps_get_opcode_info(op->common.aml_opcode);
590 if (walk_state->op_info->flags & AML_NAMED) {
591 if (acpi_gbl_depth) {
592 acpi_gbl_depth--;
593 }
594 472
595 if (op->common.aml_opcode == AML_REGION_OP) { 473 walk_state->parser_state.aml =
474 walk_state->parser_state.pkg_end;
475 walk_state->arg_count = 0;
476 break;
477
478 case AML_BUFFER_OP:
479 case AML_PACKAGE_OP:
480 case AML_VAR_PACKAGE_OP:
481
482 if ((op->common.parent) &&
483 (op->common.parent->common.aml_opcode ==
484 AML_NAME_OP)
485 && (walk_state->pass_number <=
486 ACPI_IMODE_LOAD_PASS2)) {
596 /* 487 /*
597 * Skip parsing of control method or opregion body, 488 * Skip parsing of Buffers and Packages because we don't have
598 * because we don't have enough info in the first pass 489 * enough info in the first pass to parse them correctly.
599 * to parse them correctly.
600 *
601 * Completed parsing an op_region declaration, we now
602 * know the length.
603 */ 490 */
604 op->named.length = 491 op->named.data = aml_op_start;
605 (u32) (parser_state->aml - op->named.data); 492 op->named.length = (u32)
606 } 493 (walk_state->parser_state.pkg_end -
607 } 494 aml_op_start);
608 495
609 if (walk_state->op_info->flags & AML_CREATE) { 496 /* Skip body */
610 /*
611 * Backup to beginning of create_xXXfield declaration (1 for
612 * Opcode)
613 *
614 * body_length is unknown until we parse the body
615 */
616 op->named.length =
617 (u32) (parser_state->aml - op->named.data);
618 }
619 497
620 /* This op complete, notify the dispatcher */ 498 walk_state->parser_state.aml =
499 walk_state->parser_state.pkg_end;
500 walk_state->arg_count = 0;
501 }
502 break;
621 503
622 if (walk_state->ascending_callback != NULL) { 504 case AML_WHILE_OP:
623 walk_state->op = op;
624 walk_state->opcode = op->common.aml_opcode;
625 505
626 status = walk_state->ascending_callback(walk_state); 506 if (walk_state->control_state) {
627 status = 507 walk_state->control_state->control.package_end =
628 acpi_ps_next_parse_state(walk_state, op, status); 508 walk_state->parser_state.pkg_end;
629 if (status == AE_CTRL_PENDING) {
630 status = AE_OK;
631 goto close_this_op;
632 } 509 }
633 } 510 break;
634
635 close_this_op:
636 /*
637 * Finished one argument of the containing scope
638 */
639 parser_state->scope->parse_scope.arg_count--;
640 511
641 /* Finished with pre_op */ 512 default:
642 513
643 if (pre_op) { 514 /* No action for all other opcodes */
644 acpi_ps_free_op(pre_op); 515 break;
645 pre_op = NULL;
646 } 516 }
647 517
648 /* Close this Op (will result in parse subtree deletion) */ 518 break;
519 }
649 520
650 status2 = acpi_ps_complete_this_op(walk_state, op); 521 return_ACPI_STATUS(AE_OK);
651 if (ACPI_FAILURE(status2)) { 522}
652 return_ACPI_STATUS(status2);
653 }
654 op = NULL;
655 523
656 switch (status) { 524/*******************************************************************************
657 case AE_OK: 525 *
658 break; 526 * FUNCTION: acpi_ps_complete_op
527 *
528 * PARAMETERS: walk_state - Current state
529 * Op - Returned Op
530 * Status - Parse status before complete Op
531 *
532 * RETURN: Status
533 *
534 * DESCRIPTION: Complete Op
535 *
536 ******************************************************************************/
659 537
660 case AE_CTRL_TRANSFER: 538static acpi_status
539acpi_ps_complete_op(struct acpi_walk_state *walk_state,
540 union acpi_parse_object **op, acpi_status status)
541{
542 acpi_status status2;
661 543
662 /* We are about to transfer to a called method. */ 544 ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
663 545
664 walk_state->prev_op = op; 546 /*
665 walk_state->prev_arg_types = walk_state->arg_types; 547 * Finished one argument of the containing scope
666 return_ACPI_STATUS(status); 548 */
549 walk_state->parser_state.scope->parse_scope.arg_count--;
667 550
668 case AE_CTRL_END: 551 /* Close this Op (will result in parse subtree deletion) */
669 552
670 acpi_ps_pop_scope(parser_state, &op, 553 status2 = acpi_ps_complete_this_op(walk_state, *op);
671 &walk_state->arg_types, 554 if (ACPI_FAILURE(status2)) {
672 &walk_state->arg_count); 555 return_ACPI_STATUS(status2);
556 }
673 557
674 if (op) { 558 *op = NULL;
675 walk_state->op = op;
676 walk_state->op_info =
677 acpi_ps_get_opcode_info(op->common.
678 aml_opcode);
679 walk_state->opcode = op->common.aml_opcode;
680 559
681 status = 560 switch (status) {
682 walk_state->ascending_callback(walk_state); 561 case AE_OK:
683 status = 562 break;
684 acpi_ps_next_parse_state(walk_state, op,
685 status);
686 563
687 status2 = 564 case AE_CTRL_TRANSFER:
688 acpi_ps_complete_this_op(walk_state, op);
689 if (ACPI_FAILURE(status2)) {
690 return_ACPI_STATUS(status2);
691 }
692 op = NULL;
693 }
694 status = AE_OK;
695 break;
696 565
697 case AE_CTRL_BREAK: 566 /* We are about to transfer to a called method */
698 case AE_CTRL_CONTINUE:
699 567
700 /* Pop off scopes until we find the While */ 568 walk_state->prev_op = NULL;
569 walk_state->prev_arg_types = walk_state->arg_types;
570 return_ACPI_STATUS(status);
701 571
702 while (!op || (op->common.aml_opcode != AML_WHILE_OP)) { 572 case AE_CTRL_END:
703 acpi_ps_pop_scope(parser_state, &op,
704 &walk_state->arg_types,
705 &walk_state->arg_count);
706 573
707 if (op->common.aml_opcode != AML_WHILE_OP) { 574 acpi_ps_pop_scope(&(walk_state->parser_state), op,
708 status2 = 575 &walk_state->arg_types,
709 acpi_ds_result_stack_pop 576 &walk_state->arg_count);
710 (walk_state);
711 if (ACPI_FAILURE(status2)) {
712 return_ACPI_STATUS(status2);
713 }
714 }
715 }
716
717 /* Close this iteration of the While loop */
718 577
719 walk_state->op = op; 578 if (*op) {
579 walk_state->op = *op;
720 walk_state->op_info = 580 walk_state->op_info =
721 acpi_ps_get_opcode_info(op->common.aml_opcode); 581 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
722 walk_state->opcode = op->common.aml_opcode; 582 walk_state->opcode = (*op)->common.aml_opcode;
723 583
724 status = walk_state->ascending_callback(walk_state); 584 status = walk_state->ascending_callback(walk_state);
725 status = 585 status =
726 acpi_ps_next_parse_state(walk_state, op, status); 586 acpi_ps_next_parse_state(walk_state, *op, status);
727 587
728 status2 = acpi_ps_complete_this_op(walk_state, op); 588 status2 = acpi_ps_complete_this_op(walk_state, *op);
729 if (ACPI_FAILURE(status2)) { 589 if (ACPI_FAILURE(status2)) {
730 return_ACPI_STATUS(status2); 590 return_ACPI_STATUS(status2);
731 } 591 }
732 op = NULL; 592 }
733
734 status = AE_OK;
735 break;
736 593
737 case AE_CTRL_TERMINATE: 594 status = AE_OK;
595 break;
738 596
739 status = AE_OK; 597 case AE_CTRL_BREAK:
598 case AE_CTRL_CONTINUE:
740 599
741 /* Clean up */ 600 /* Pop off scopes until we find the While */
742 do {
743 if (op) {
744 status2 =
745 acpi_ps_complete_this_op(walk_state,
746 op);
747 if (ACPI_FAILURE(status2)) {
748 return_ACPI_STATUS(status2);
749 }
750 601
751 status2 = 602 while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
752 acpi_ds_result_stack_pop 603 acpi_ps_pop_scope(&(walk_state->parser_state), op,
753 (walk_state); 604 &walk_state->arg_types,
754 if (ACPI_FAILURE(status2)) { 605 &walk_state->arg_count);
755 return_ACPI_STATUS(status2);
756 }
757 606
758 acpi_ut_delete_generic_state 607 if ((*op)->common.aml_opcode != AML_WHILE_OP) {
759 (acpi_ut_pop_generic_state 608 status2 = acpi_ds_result_stack_pop(walk_state);
760 (&walk_state->control_state)); 609 if (ACPI_FAILURE(status2)) {
610 return_ACPI_STATUS(status2);
761 } 611 }
612 }
613 }
762 614
763 acpi_ps_pop_scope(parser_state, &op, 615 /* Close this iteration of the While loop */
764 &walk_state->arg_types,
765 &walk_state->arg_count);
766 616
767 } while (op); 617 walk_state->op = *op;
618 walk_state->op_info =
619 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
620 walk_state->opcode = (*op)->common.aml_opcode;
768 621
769 return_ACPI_STATUS(status); 622 status = walk_state->ascending_callback(walk_state);
623 status = acpi_ps_next_parse_state(walk_state, *op, status);
770 624
771 default: /* All other non-AE_OK status */ 625 status2 = acpi_ps_complete_this_op(walk_state, *op);
626 if (ACPI_FAILURE(status2)) {
627 return_ACPI_STATUS(status2);
628 }
772 629
773 do { 630 status = AE_OK;
774 if (op) { 631 break;
775 status2 = 632
776 acpi_ps_complete_this_op(walk_state, 633 case AE_CTRL_TERMINATE:
777 op); 634
778 if (ACPI_FAILURE(status2)) { 635 /* Clean up */
779 return_ACPI_STATUS(status2); 636 do {
780 } 637 if (*op) {
638 status2 =
639 acpi_ps_complete_this_op(walk_state, *op);
640 if (ACPI_FAILURE(status2)) {
641 return_ACPI_STATUS(status2);
642 }
643 status2 = acpi_ds_result_stack_pop(walk_state);
644 if (ACPI_FAILURE(status2)) {
645 return_ACPI_STATUS(status2);
781 } 646 }
782 647
783 acpi_ps_pop_scope(parser_state, &op, 648 acpi_ut_delete_generic_state
784 &walk_state->arg_types, 649 (acpi_ut_pop_generic_state
785 &walk_state->arg_count); 650 (&walk_state->control_state));
651 }
786 652
787 } while (op); 653 acpi_ps_pop_scope(&(walk_state->parser_state), op,
654 &walk_state->arg_types,
655 &walk_state->arg_count);
788 656
789 /* 657 } while (*op);
790 * TBD: Cleanup parse ops on error 658
791 */ 659 return_ACPI_STATUS(AE_OK);
792#if 0 660
793 if (op == NULL) { 661 default: /* All other non-AE_OK status */
794 acpi_ps_pop_scope(parser_state, &op, 662
795 &walk_state->arg_types, 663 do {
796 &walk_state->arg_count); 664 if (*op) {
665 status2 =
666 acpi_ps_complete_this_op(walk_state, *op);
667 if (ACPI_FAILURE(status2)) {
668 return_ACPI_STATUS(status2);
669 }
797 } 670 }
798#endif
799 walk_state->prev_op = op;
800 walk_state->prev_arg_types = walk_state->arg_types;
801 return_ACPI_STATUS(status);
802 }
803 671
804 /* This scope complete? */ 672 acpi_ps_pop_scope(&(walk_state->parser_state), op,
673 &walk_state->arg_types,
674 &walk_state->arg_count);
805 675
806 if (acpi_ps_has_completed_scope(parser_state)) { 676 } while (*op);
807 acpi_ps_pop_scope(parser_state, &op, 677
678#if 0
679 /*
680 * TBD: Cleanup parse ops on error
681 */
682 if (*op == NULL) {
683 acpi_ps_pop_scope(parser_state, op,
808 &walk_state->arg_types, 684 &walk_state->arg_types,
809 &walk_state->arg_count); 685 &walk_state->arg_count);
810 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
811 "Popped scope, Op=%p\n", op));
812 } else {
813 op = NULL;
814 } 686 }
687#endif
688 walk_state->prev_op = NULL;
689 walk_state->prev_arg_types = walk_state->arg_types;
690 return_ACPI_STATUS(status);
691 }
815 692
816 } /* while parser_state->Aml */ 693 /* This scope complete? */
694
695 if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
696 acpi_ps_pop_scope(&(walk_state->parser_state), op,
697 &walk_state->arg_types,
698 &walk_state->arg_count);
699 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
700 } else {
701 *op = NULL;
702 }
703
704 return_ACPI_STATUS(AE_OK);
705}
706
707/*******************************************************************************
708 *
709 * FUNCTION: acpi_ps_complete_final_op
710 *
711 * PARAMETERS: walk_state - Current state
712 * Op - Current Op
713 * Status - Current parse status before complete last
714 * Op
715 *
716 * RETURN: Status
717 *
718 * DESCRIPTION: Complete last Op.
719 *
720 ******************************************************************************/
721
722static acpi_status
723acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
724 union acpi_parse_object *op, acpi_status status)
725{
726 acpi_status status2;
727
728 ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
817 729
818 /* 730 /*
819 * Complete the last Op (if not completed), and clear the scope stack. 731 * Complete the last Op (if not completed), and clear the scope stack.
820 * It is easily possible to end an AML "package" with an unbounded number 732 * It is easily possible to end an AML "package" with an unbounded number
821 * of open scopes (such as when several ASL blocks are closed with 733 * of open scopes (such as when several ASL blocks are closed with
822 * sequential closing braces). We want to terminate each one cleanly. 734 * sequential closing braces). We want to terminate each one cleanly.
823 */ 735 */
824 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n", 736 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
825 op)); 737 op));
@@ -838,8 +750,12 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
838 acpi_ps_next_parse_state(walk_state, op, 750 acpi_ps_next_parse_state(walk_state, op,
839 status); 751 status);
840 if (status == AE_CTRL_PENDING) { 752 if (status == AE_CTRL_PENDING) {
841 status = AE_OK; 753 status =
842 goto close_this_op; 754 acpi_ps_complete_op(walk_state, &op,
755 AE_OK);
756 if (ACPI_FAILURE(status)) {
757 return_ACPI_STATUS(status);
758 }
843 } 759 }
844 760
845 if (status == AE_CTRL_TERMINATE) { 761 if (status == AE_CTRL_TERMINATE) {
@@ -858,7 +774,9 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
858 } 774 }
859 } 775 }
860 776
861 acpi_ps_pop_scope(parser_state, 777 acpi_ps_pop_scope(&
778 (walk_state->
779 parser_state),
862 &op, 780 &op,
863 &walk_state-> 781 &walk_state->
864 arg_types, 782 arg_types,
@@ -887,10 +805,252 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
887 } 805 }
888 } 806 }
889 807
890 acpi_ps_pop_scope(parser_state, &op, &walk_state->arg_types, 808 acpi_ps_pop_scope(&(walk_state->parser_state), &op,
809 &walk_state->arg_types,
891 &walk_state->arg_count); 810 &walk_state->arg_count);
892 811
893 } while (op); 812 } while (op);
894 813
895 return_ACPI_STATUS(status); 814 return_ACPI_STATUS(status);
896} 815}
816
817/*******************************************************************************
818 *
819 * FUNCTION: acpi_ps_parse_loop
820 *
821 * PARAMETERS: walk_state - Current state
822 *
823 * RETURN: Status
824 *
825 * DESCRIPTION: Parse AML (pointed to by the current parser state) and return
826 * a tree of ops.
827 *
828 ******************************************************************************/
829
830acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
831{
832 acpi_status status = AE_OK;
833 union acpi_parse_object *op = NULL; /* current op */
834 struct acpi_parse_state *parser_state;
835 u8 *aml_op_start = NULL;
836
837 ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
838
839 if (walk_state->descending_callback == NULL) {
840 return_ACPI_STATUS(AE_BAD_PARAMETER);
841 }
842
843 parser_state = &walk_state->parser_state;
844 walk_state->arg_types = 0;
845
846#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
847
848 if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
849
850 /* We are restarting a preempted control method */
851
852 if (acpi_ps_has_completed_scope(parser_state)) {
853 /*
854 * We must check if a predicate to an IF or WHILE statement
855 * was just completed
856 */
857 if ((parser_state->scope->parse_scope.op) &&
858 ((parser_state->scope->parse_scope.op->common.
859 aml_opcode == AML_IF_OP)
860 || (parser_state->scope->parse_scope.op->common.
861 aml_opcode == AML_WHILE_OP))
862 && (walk_state->control_state)
863 && (walk_state->control_state->common.state ==
864 ACPI_CONTROL_PREDICATE_EXECUTING)) {
865 /*
866 * A predicate was just completed, get the value of the
867 * predicate and branch based on that value
868 */
869 walk_state->op = NULL;
870 status =
871 acpi_ds_get_predicate_value(walk_state,
872 ACPI_TO_POINTER
873 (TRUE));
874 if (ACPI_FAILURE(status)
875 && ((status & AE_CODE_MASK) !=
876 AE_CODE_CONTROL)) {
877 if (status == AE_AML_NO_RETURN_VALUE) {
878 ACPI_EXCEPTION((AE_INFO, status,
879 "Invoked method did not return a value"));
880
881 }
882
883 ACPI_EXCEPTION((AE_INFO, status,
884 "GetPredicate Failed"));
885 return_ACPI_STATUS(status);
886 }
887
888 status =
889 acpi_ps_next_parse_state(walk_state, op,
890 status);
891 }
892
893 acpi_ps_pop_scope(parser_state, &op,
894 &walk_state->arg_types,
895 &walk_state->arg_count);
896 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
897 "Popped scope, Op=%p\n", op));
898 } else if (walk_state->prev_op) {
899
900 /* We were in the middle of an op */
901
902 op = walk_state->prev_op;
903 walk_state->arg_types = walk_state->prev_arg_types;
904 }
905 }
906#endif
907
908 /* Iterative parsing loop, while there is more AML to process: */
909
910 while ((parser_state->aml < parser_state->aml_end) || (op)) {
911 aml_op_start = parser_state->aml;
912 if (!op) {
913 status =
914 acpi_ps_create_op(walk_state, aml_op_start, &op);
915 if (ACPI_FAILURE(status)) {
916 if (status == AE_CTRL_PARSE_CONTINUE) {
917 continue;
918 }
919
920 if (status == AE_CTRL_PARSE_PENDING) {
921 status = AE_OK;
922 }
923
924 status =
925 acpi_ps_complete_op(walk_state, &op,
926 status);
927 if (ACPI_FAILURE(status)) {
928 return_ACPI_STATUS(status);
929 }
930
931 continue;
932 }
933
934 op->common.aml_offset = walk_state->aml_offset;
935
936 if (walk_state->op_info) {
937 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
938 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
939 (u32) op->common.aml_opcode,
940 walk_state->op_info->name, op,
941 parser_state->aml,
942 op->common.aml_offset));
943 }
944 }
945
946 /*
947 * Start arg_count at zero because we don't know if there are
948 * any args yet
949 */
950 walk_state->arg_count = 0;
951
952 /* Are there any arguments that must be processed? */
953
954 if (walk_state->arg_types) {
955
956 /* Get arguments */
957
958 status =
959 acpi_ps_get_arguments(walk_state, aml_op_start, op);
960 if (ACPI_FAILURE(status)) {
961 status =
962 acpi_ps_complete_op(walk_state, &op,
963 status);
964 if (ACPI_FAILURE(status)) {
965 return_ACPI_STATUS(status);
966 }
967
968 continue;
969 }
970 }
971
972 /* Check for arguments that need to be processed */
973
974 if (walk_state->arg_count) {
975 /*
976 * There are arguments (complex ones), push Op and
977 * prepare for argument
978 */
979 status = acpi_ps_push_scope(parser_state, op,
980 walk_state->arg_types,
981 walk_state->arg_count);
982 if (ACPI_FAILURE(status)) {
983 status =
984 acpi_ps_complete_op(walk_state, &op,
985 status);
986 if (ACPI_FAILURE(status)) {
987 return_ACPI_STATUS(status);
988 }
989
990 continue;
991 }
992
993 op = NULL;
994 continue;
995 }
996
997 /*
998 * All arguments have been processed -- Op is complete,
999 * prepare for next
1000 */
1001 walk_state->op_info =
1002 acpi_ps_get_opcode_info(op->common.aml_opcode);
1003 if (walk_state->op_info->flags & AML_NAMED) {
1004 if (acpi_gbl_depth) {
1005 acpi_gbl_depth--;
1006 }
1007
1008 if (op->common.aml_opcode == AML_REGION_OP) {
1009 /*
1010 * Skip parsing of control method or opregion body,
1011 * because we don't have enough info in the first pass
1012 * to parse them correctly.
1013 *
1014 * Completed parsing an op_region declaration, we now
1015 * know the length.
1016 */
1017 op->named.length =
1018 (u32) (parser_state->aml - op->named.data);
1019 }
1020 }
1021
1022 if (walk_state->op_info->flags & AML_CREATE) {
1023 /*
1024 * Backup to beginning of create_xXXfield declaration (1 for
1025 * Opcode)
1026 *
1027 * body_length is unknown until we parse the body
1028 */
1029 op->named.length =
1030 (u32) (parser_state->aml - op->named.data);
1031 }
1032
1033 /* This op complete, notify the dispatcher */
1034
1035 if (walk_state->ascending_callback != NULL) {
1036 walk_state->op = op;
1037 walk_state->opcode = op->common.aml_opcode;
1038
1039 status = walk_state->ascending_callback(walk_state);
1040 status =
1041 acpi_ps_next_parse_state(walk_state, op, status);
1042 if (status == AE_CTRL_PENDING) {
1043 status = AE_OK;
1044 }
1045 }
1046
1047 status = acpi_ps_complete_op(walk_state, &op, status);
1048 if (ACPI_FAILURE(status)) {
1049 return_ACPI_STATUS(status);
1050 }
1051
1052 } /* while parser_state->Aml */
1053
1054 status = acpi_ps_complete_final_op(walk_state, op, status);
1055 return_ACPI_STATUS(status);
1056}
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 4bd25e32769f..16d8b6cc3c22 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index a02aa62fe1e5..5d63f48e56b5 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -540,6 +540,11 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
540 540
541 if ((status == AE_ALREADY_EXISTS) && 541 if ((status == AE_ALREADY_EXISTS) &&
542 (!walk_state->method_desc->method.mutex)) { 542 (!walk_state->method_desc->method.mutex)) {
543 ACPI_INFO((AE_INFO,
544 "Marking method %4.4s as Serialized",
545 walk_state->method_node->name.
546 ascii));
547
543 /* 548 /*
544 * Method tried to create an object twice. The probable cause is 549 * Method tried to create an object twice. The probable cause is
545 * that the method cannot handle reentrancy. 550 * that the method cannot handle reentrancy.
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
index a3e0314de24d..77cfa4ed0cfe 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/parser/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
index 0015717ef096..966e7ea2a0c4 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/parser/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
index d405387b7414..8ca52002db55 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/parser/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
index a84a547a0f1b..49f9757434e4 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/parser/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index 5d996c1140af..94103bced75e 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,8 +54,6 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
54 54
55static void acpi_ps_stop_trace(struct acpi_evaluate_info *info); 55static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
56 56
57static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info);
58
59static void 57static void
60acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action); 58acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
61 59
@@ -215,6 +213,8 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
215acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) 213acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
216{ 214{
217 acpi_status status; 215 acpi_status status;
216 union acpi_parse_object *op;
217 struct acpi_walk_state *walk_state;
218 218
219 ACPI_FUNCTION_TRACE(ps_execute_method); 219 ACPI_FUNCTION_TRACE(ps_execute_method);
220 220
@@ -234,8 +234,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
234 } 234 }
235 235
236 /* 236 /*
237 * The caller "owns" the parameters, so give each one an extra 237 * The caller "owns" the parameters, so give each one an extra reference
238 * reference
239 */ 238 */
240 acpi_ps_update_parameter_list(info, REF_INCREMENT); 239 acpi_ps_update_parameter_list(info, REF_INCREMENT);
241 240
@@ -244,30 +243,50 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
244 acpi_ps_start_trace(info); 243 acpi_ps_start_trace(info);
245 244
246 /* 245 /*
247 * 1) Perform the first pass parse of the method to enter any 246 * Execute the method. Performs parse simultaneously
248 * named objects that it creates into the namespace
249 */ 247 */
250 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 248 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
251 "**** Begin Method Parse **** Entry=%p obj=%p\n", 249 "**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
252 info->resolved_node, info->obj_desc)); 250 info->resolved_node->name.ascii, info->resolved_node,
251 info->obj_desc));
252
253 /* Create and init a Root Node */
254
255 op = acpi_ps_create_scope_op();
256 if (!op) {
257 status = AE_NO_MEMORY;
258 goto cleanup;
259 }
260
261 /* Create and initialize a new walk state */
262
263 info->pass_number = ACPI_IMODE_EXECUTE;
264 walk_state =
265 acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
266 NULL, NULL);
267 if (!walk_state) {
268 status = AE_NO_MEMORY;
269 goto cleanup;
270 }
253 271
254 info->pass_number = 1; 272 status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
255 status = acpi_ps_execute_pass(info); 273 info->obj_desc->method.aml_start,
274 info->obj_desc->method.aml_length, info,
275 info->pass_number);
256 if (ACPI_FAILURE(status)) { 276 if (ACPI_FAILURE(status)) {
277 acpi_ds_delete_walk_state(walk_state);
257 goto cleanup; 278 goto cleanup;
258 } 279 }
259 280
260 /* 281 /* Parse the AML */
261 * 2) Execute the method. Performs second pass parse simultaneously
262 */
263 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
264 "**** Begin Method Execution **** Entry=%p obj=%p\n",
265 info->resolved_node, info->obj_desc));
266 282
267 info->pass_number = 3; 283 status = acpi_ps_parse_aml(walk_state);
268 status = acpi_ps_execute_pass(info); 284
285 /* walk_state was deleted by parse_aml */
269 286
270 cleanup: 287 cleanup:
288 acpi_ps_delete_parse_tree(op);
289
271 /* End optional tracing */ 290 /* End optional tracing */
272 291
273 acpi_ps_stop_trace(info); 292 acpi_ps_stop_trace(info);
@@ -330,62 +349,3 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
330 } 349 }
331 } 350 }
332} 351}
333
334/*******************************************************************************
335 *
336 * FUNCTION: acpi_ps_execute_pass
337 *
338 * PARAMETERS: Info - See struct acpi_evaluate_info
339 * (Used: pass_number, Node, and obj_desc)
340 *
341 * RETURN: Status
342 *
343 * DESCRIPTION: Single AML pass: Parse or Execute a control method
344 *
345 ******************************************************************************/
346
347static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info)
348{
349 acpi_status status;
350 union acpi_parse_object *op;
351 struct acpi_walk_state *walk_state;
352
353 ACPI_FUNCTION_TRACE(ps_execute_pass);
354
355 /* Create and init a Root Node */
356
357 op = acpi_ps_create_scope_op();
358 if (!op) {
359 return_ACPI_STATUS(AE_NO_MEMORY);
360 }
361
362 /* Create and initialize a new walk state */
363
364 walk_state =
365 acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
366 NULL, NULL);
367 if (!walk_state) {
368 status = AE_NO_MEMORY;
369 goto cleanup;
370 }
371
372 status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
373 info->obj_desc->method.aml_start,
374 info->obj_desc->method.aml_length,
375 info->pass_number == 1 ? NULL : info,
376 info->pass_number);
377 if (ACPI_FAILURE(status)) {
378 acpi_ds_delete_walk_state(walk_state);
379 goto cleanup;
380 }
381
382 /* Parse the AML */
383
384 status = acpi_ps_parse_aml(walk_state);
385
386 /* Walk state was deleted by parse_aml */
387
388 cleanup:
389 acpi_ps_delete_parse_tree(op);
390 return_ACPI_STATUS(status);
391}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 481e633bbf41..0f683c8c6fbc 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -513,7 +513,7 @@ int __init acpi_irq_penalty_init(void)
513 } 513 }
514 } 514 }
515 /* Add a penalty for the SCI */ 515 /* Add a penalty for the SCI */
516 acpi_irq_penalty[acpi_fadt.sci_int] += PIRQ_PENALTY_PCI_USING; 516 acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
517 517
518 return 0; 518 return 0;
519} 519}
@@ -785,7 +785,7 @@ static int irqrouter_resume(struct sys_device *dev)
785 785
786 786
787 /* Make sure SCI is enabled again (Apple firmware bug?) */ 787 /* Make sure SCI is enabled again (Apple firmware bug?) */
788 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1, ACPI_MTX_DO_NOT_LOCK); 788 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
789 789
790 list_for_each(node, &acpi_link.entries) { 790 list_for_each(node, &acpi_link.entries) {
791 link = list_entry(node, struct acpi_pci_link, node); 791 link = list_entry(node, struct acpi_pci_link, node);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index a860efa2c562..4ecf701687e8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -117,6 +117,19 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
117 117
118EXPORT_SYMBOL(acpi_pci_unregister_driver); 118EXPORT_SYMBOL(acpi_pci_unregister_driver);
119 119
120acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
121{
122 struct acpi_pci_root *tmp;
123
124 list_for_each_entry(tmp, &acpi_pci_roots, node) {
125 if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus))
126 return tmp->device->handle;
127 }
128 return NULL;
129}
130
131EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
132
120static acpi_status 133static acpi_status
121get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) 134get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
122{ 135{
@@ -152,6 +165,21 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum)
152 return AE_OK; 165 return AE_OK;
153} 166}
154 167
168static void acpi_pci_bridge_scan(struct acpi_device *device)
169{
170 int status;
171 struct acpi_device *child = NULL;
172
173 if (device->flags.bus_address)
174 if (device->parent && device->parent->ops.bind) {
175 status = device->parent->ops.bind(device);
176 if (!status) {
177 list_for_each_entry(child, &device->children, node)
178 acpi_pci_bridge_scan(child);
179 }
180 }
181}
182
155static int acpi_pci_root_add(struct acpi_device *device) 183static int acpi_pci_root_add(struct acpi_device *device)
156{ 184{
157 int result = 0; 185 int result = 0;
@@ -160,6 +188,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
160 acpi_status status = AE_OK; 188 acpi_status status = AE_OK;
161 unsigned long value = 0; 189 unsigned long value = 0;
162 acpi_handle handle = NULL; 190 acpi_handle handle = NULL;
191 struct acpi_device *child;
163 192
164 193
165 if (!device) 194 if (!device)
@@ -175,9 +204,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
175 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 204 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
176 acpi_driver_data(device) = root; 205 acpi_driver_data(device) = root;
177 206
178 /*
179 * TBD: Doesn't the bus driver automatically set this?
180 */
181 device->ops.bind = acpi_pci_bind; 207 device->ops.bind = acpi_pci_bind;
182 208
183 /* 209 /*
@@ -299,6 +325,12 @@ static int acpi_pci_root_add(struct acpi_device *device)
299 result = acpi_pci_irq_add_prt(device->handle, root->id.segment, 325 result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
300 root->id.bus); 326 root->id.bus);
301 327
328 /*
329 * Scan and bind all _ADR-Based Devices
330 */
331 list_for_each_entry(child, &device->children, node)
332 acpi_pci_bridge_scan(child);
333
302 end: 334 end:
303 if (result) { 335 if (result) {
304 if (!list_empty(&root->node)) 336 if (!list_empty(&root->node))
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5f9496d59ed6..0079bc51082c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -375,30 +375,126 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
375} 375}
376 376
377/* Use the acpiid in MADT to map cpus in case of SMP */ 377/* Use the acpiid in MADT to map cpus in case of SMP */
378
378#ifndef CONFIG_SMP 379#ifndef CONFIG_SMP
379#define convert_acpiid_to_cpu(acpi_id) (-1) 380static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
380#else 381#else
381 382
383static struct acpi_table_madt *madt;
384
385static int map_lapic_id(struct acpi_subtable_header *entry,
386 u32 acpi_id, int *apic_id)
387{
388 struct acpi_madt_local_apic *lapic =
389 (struct acpi_madt_local_apic *)entry;
390 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
391 lapic->processor_id == acpi_id) {
392 *apic_id = lapic->id;
393 return 1;
394 }
395 return 0;
396}
397
398static int map_lsapic_id(struct acpi_subtable_header *entry,
399 u32 acpi_id, int *apic_id)
400{
401 struct acpi_madt_local_sapic *lsapic =
402 (struct acpi_madt_local_sapic *)entry;
403 /* Only check enabled APICs*/
404 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
405 /* First check against id */
406 if (lsapic->processor_id == acpi_id) {
407 *apic_id = lsapic->id;
408 return 1;
409 /* Check against optional uid */
410 } else if (entry->length >= 16 &&
411 lsapic->uid == acpi_id) {
412 *apic_id = lsapic->uid;
413 return 1;
414 }
415 }
416 return 0;
417}
418
382#ifdef CONFIG_IA64 419#ifdef CONFIG_IA64
383#define arch_acpiid_to_apicid ia64_acpiid_to_sapicid
384#define arch_cpu_to_apicid ia64_cpu_to_sapicid 420#define arch_cpu_to_apicid ia64_cpu_to_sapicid
385#define ARCH_BAD_APICID (0xffff)
386#else 421#else
387#define arch_acpiid_to_apicid x86_acpiid_to_apicid
388#define arch_cpu_to_apicid x86_cpu_to_apicid 422#define arch_cpu_to_apicid x86_cpu_to_apicid
389#define ARCH_BAD_APICID (0xff)
390#endif 423#endif
391 424
392static int convert_acpiid_to_cpu(u8 acpi_id) 425static int map_madt_entry(u32 acpi_id)
426{
427 unsigned long madt_end, entry;
428 int apic_id = -1;
429
430 if (!madt)
431 return apic_id;
432
433 entry = (unsigned long)madt;
434 madt_end = entry + madt->header.length;
435
436 /* Parse all entries looking for a match. */
437
438 entry += sizeof(struct acpi_table_madt);
439 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
440 struct acpi_subtable_header *header =
441 (struct acpi_subtable_header *)entry;
442 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
443 if (map_lapic_id(header, acpi_id, &apic_id))
444 break;
445 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
446 if (map_lsapic_id(header, acpi_id, &apic_id))
447 break;
448 }
449 entry += header->length;
450 }
451 return apic_id;
452}
453
454static int map_mat_entry(acpi_handle handle, u32 acpi_id)
455{
456 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
457 union acpi_object *obj;
458 struct acpi_subtable_header *header;
459 int apic_id = -1;
460
461 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
462 goto exit;
463
464 if (!buffer.length || !buffer.pointer)
465 goto exit;
466
467 obj = buffer.pointer;
468 if (obj->type != ACPI_TYPE_BUFFER ||
469 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
470 goto exit;
471 }
472
473 header = (struct acpi_subtable_header *)obj->buffer.pointer;
474 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
475 map_lapic_id(header, acpi_id, &apic_id);
476 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
477 map_lsapic_id(header, acpi_id, &apic_id);
478 }
479
480exit:
481 if (buffer.pointer)
482 kfree(buffer.pointer);
483 return apic_id;
484}
485
486static int get_cpu_id(acpi_handle handle, u32 acpi_id)
393{ 487{
394 u16 apic_id;
395 int i; 488 int i;
489 int apic_id = -1;
396 490
397 apic_id = arch_acpiid_to_apicid[acpi_id]; 491 apic_id = map_mat_entry(handle, acpi_id);
398 if (apic_id == ARCH_BAD_APICID) 492 if (apic_id == -1)
399 return -1; 493 apic_id = map_madt_entry(acpi_id);
494 if (apic_id == -1)
495 return apic_id;
400 496
401 for (i = 0; i < NR_CPUS; i++) { 497 for (i = 0; i < NR_CPUS; ++i) {
402 if (arch_cpu_to_apicid[i] == apic_id) 498 if (arch_cpu_to_apicid[i] == apic_id)
403 return i; 499 return i;
404 } 500 }
@@ -410,7 +506,7 @@ static int convert_acpiid_to_cpu(u8 acpi_id)
410 Driver Interface 506 Driver Interface
411 -------------------------------------------------------------------------- */ 507 -------------------------------------------------------------------------- */
412 508
413static int acpi_processor_get_info(struct acpi_processor *pr) 509static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
414{ 510{
415 acpi_status status = 0; 511 acpi_status status = 0;
416 union acpi_object object = { 0 }; 512 union acpi_object object = { 0 };
@@ -431,7 +527,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
431 * Check to see if we have bus mastering arbitration control. This 527 * Check to see if we have bus mastering arbitration control. This
432 * is required for proper C3 usage (to maintain cache coherency). 528 * is required for proper C3 usage (to maintain cache coherency).
433 */ 529 */
434 if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) { 530 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
435 pr->flags.bm_control = 1; 531 pr->flags.bm_control = 1;
436 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 532 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
437 "Bus mastering arbitration control present\n")); 533 "Bus mastering arbitration control present\n"));
@@ -439,24 +535,35 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
439 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 535 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
440 "No bus mastering arbitration control\n")); 536 "No bus mastering arbitration control\n"));
441 537
442 /* 538 /* Check if it is a Device with HID and UID */
443 * Evalute the processor object. Note that it is common on SMP to 539 if (has_uid) {
444 * have the first (boot) processor with a valid PBLK address while 540 unsigned long value;
445 * all others have a NULL address. 541 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
446 */ 542 NULL, &value);
447 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 543 if (ACPI_FAILURE(status)) {
448 if (ACPI_FAILURE(status)) { 544 printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
449 printk(KERN_ERR PREFIX "Evaluating processor object\n"); 545 return -ENODEV;
450 return -ENODEV; 546 }
451 } 547 pr->acpi_id = value;
452 548 } else {
453 /* 549 /*
454 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 550 * Evalute the processor object. Note that it is common on SMP to
455 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c 551 * have the first (boot) processor with a valid PBLK address while
456 */ 552 * all others have a NULL address.
457 pr->acpi_id = object.processor.proc_id; 553 */
554 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
555 if (ACPI_FAILURE(status)) {
556 printk(KERN_ERR PREFIX "Evaluating processor object\n");
557 return -ENODEV;
558 }
458 559
459 cpu_index = convert_acpiid_to_cpu(pr->acpi_id); 560 /*
561 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
562 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
563 */
564 pr->acpi_id = object.processor.proc_id;
565 }
566 cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
460 567
461 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 568 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
462 if (!cpu0_initialized && (cpu_index == -1) && 569 if (!cpu0_initialized && (cpu_index == -1) &&
@@ -473,7 +580,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
473 * less than the max # of CPUs. They should be ignored _iff 580 * less than the max # of CPUs. They should be ignored _iff
474 * they are physically not present. 581 * they are physically not present.
475 */ 582 */
476 if (cpu_index == -1) { 583 if (pr->id == -1) {
477 if (ACPI_FAILURE 584 if (ACPI_FAILURE
478 (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 585 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
479 return -ENODEV; 586 return -ENODEV;
@@ -490,8 +597,8 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
490 object.processor.pblk_length); 597 object.processor.pblk_length);
491 else { 598 else {
492 pr->throttling.address = object.processor.pblk_address; 599 pr->throttling.address = object.processor.pblk_address;
493 pr->throttling.duty_offset = acpi_fadt.duty_offset; 600 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
494 pr->throttling.duty_width = acpi_fadt.duty_width; 601 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
495 602
496 pr->pblk = object.processor.pblk_address; 603 pr->pblk = object.processor.pblk_address;
497 604
@@ -525,7 +632,7 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
525 632
526 pr = acpi_driver_data(device); 633 pr = acpi_driver_data(device);
527 634
528 result = acpi_processor_get_info(pr); 635 result = acpi_processor_get_info(pr, device->flags.unique_id);
529 if (result) { 636 if (result) {
530 /* Processor is physically not present */ 637 /* Processor is physically not present */
531 return 0; 638 return 0;
@@ -707,7 +814,7 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
707 return -ENODEV; 814 return -ENODEV;
708 815
709 if ((pr->id >= 0) && (pr->id < NR_CPUS)) { 816 if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
710 kobject_uevent(&(*device)->kobj, KOBJ_ONLINE); 817 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
711 } 818 }
712 return 0; 819 return 0;
713} 820}
@@ -745,13 +852,13 @@ acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
745 } 852 }
746 853
747 if (pr->id >= 0 && (pr->id < NR_CPUS)) { 854 if (pr->id >= 0 && (pr->id < NR_CPUS)) {
748 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 855 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
749 break; 856 break;
750 } 857 }
751 858
752 result = acpi_processor_start(device); 859 result = acpi_processor_start(device);
753 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) { 860 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) {
754 kobject_uevent(&device->kobj, KOBJ_ONLINE); 861 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
755 } else { 862 } else {
756 printk(KERN_ERR PREFIX "Device [%s] failed to start\n", 863 printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
757 acpi_device_bid(device)); 864 acpi_device_bid(device));
@@ -774,7 +881,7 @@ acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
774 } 881 }
775 882
776 if ((pr->id < NR_CPUS) && (cpu_present(pr->id))) 883 if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
777 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 884 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
778 break; 885 break;
779 default: 886 default:
780 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 887 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -895,6 +1002,12 @@ static int __init acpi_processor_init(void)
895 memset(&processors, 0, sizeof(processors)); 1002 memset(&processors, 0, sizeof(processors));
896 memset(&errata, 0, sizeof(errata)); 1003 memset(&errata, 0, sizeof(errata));
897 1004
1005#ifdef CONFIG_SMP
1006 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1007 (struct acpi_table_header **)&madt)))
1008 madt = 0;
1009#endif
1010
898 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1011 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
899 if (!acpi_processor_dir) 1012 if (!acpi_processor_dir)
900 return -ENOMEM; 1013 return -ENOMEM;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3f30af21574e..6c6751b1405b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -160,7 +160,7 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2)
160{ 160{
161 if (t2 >= t1) 161 if (t2 >= t1)
162 return (t2 - t1); 162 return (t2 - t1);
163 else if (!acpi_fadt.tmr_val_ext) 163 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
165 else 165 else
166 return ((0xFFFFFFFF - t1) + t2); 166 return ((0xFFFFFFFF - t1) + t2);
@@ -187,8 +187,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
187 case ACPI_STATE_C3: 187 case ACPI_STATE_C3:
188 /* Disable bus master reload */ 188 /* Disable bus master reload */
189 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 189 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
190 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 190 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
191 ACPI_MTX_DO_NOT_LOCK);
192 break; 191 break;
193 } 192 }
194 } 193 }
@@ -198,8 +197,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
198 case ACPI_STATE_C3: 197 case ACPI_STATE_C3:
199 /* Enable bus master reload */ 198 /* Enable bus master reload */
200 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 199 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
201 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, 200 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
202 ACPI_MTX_DO_NOT_LOCK);
203 break; 201 break;
204 } 202 }
205 203
@@ -236,7 +234,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
236 /* Dummy wait op - must do something useless after P_LVL2 read 234 /* Dummy wait op - must do something useless after P_LVL2 read
237 because chipsets cannot guarantee that STPCLK# signal 235 because chipsets cannot guarantee that STPCLK# signal
238 gets asserted in time to freeze execution properly. */ 236 gets asserted in time to freeze execution properly. */
239 unused = inl(acpi_fadt.xpm_tmr_blk.address); 237 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
240 } 238 }
241} 239}
242 240
@@ -291,12 +289,10 @@ static void acpi_processor_idle(void)
291 289
292 pr->power.bm_activity <<= diff; 290 pr->power.bm_activity <<= diff;
293 291
294 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, 292 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
295 &bm_status, ACPI_MTX_DO_NOT_LOCK);
296 if (bm_status) { 293 if (bm_status) {
297 pr->power.bm_activity |= 0x1; 294 pr->power.bm_activity |= 0x1;
298 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 295 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
299 1, ACPI_MTX_DO_NOT_LOCK);
300 } 296 }
301 /* 297 /*
302 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 298 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
@@ -338,7 +334,7 @@ static void acpi_processor_idle(void)
338 * detection phase, to work cleanly with logical CPU hotplug. 334 * detection phase, to work cleanly with logical CPU hotplug.
339 */ 335 */
340 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 336 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
341 !pr->flags.has_cst && !acpi_fadt.plvl2_up) 337 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
342 cx = &pr->power.states[ACPI_STATE_C1]; 338 cx = &pr->power.states[ACPI_STATE_C1];
343#endif 339#endif
344 340
@@ -384,11 +380,11 @@ static void acpi_processor_idle(void)
384 380
385 case ACPI_STATE_C2: 381 case ACPI_STATE_C2:
386 /* Get start time (ticks) */ 382 /* Get start time (ticks) */
387 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 383 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
388 /* Invoke C2 */ 384 /* Invoke C2 */
389 acpi_cstate_enter(cx); 385 acpi_cstate_enter(cx);
390 /* Get end time (ticks) */ 386 /* Get end time (ticks) */
391 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 387 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
392 388
393#ifdef CONFIG_GENERIC_TIME 389#ifdef CONFIG_GENERIC_TIME
394 /* TSC halts in C2, so notify users */ 390 /* TSC halts in C2, so notify users */
@@ -411,8 +407,7 @@ static void acpi_processor_idle(void)
411 * All CPUs are trying to go to C3 407 * All CPUs are trying to go to C3
412 * Disable bus master arbitration 408 * Disable bus master arbitration
413 */ 409 */
414 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 410 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
415 ACPI_MTX_DO_NOT_LOCK);
416 } 411 }
417 } else { 412 } else {
418 /* SMP with no shared cache... Invalidate cache */ 413 /* SMP with no shared cache... Invalidate cache */
@@ -420,16 +415,15 @@ static void acpi_processor_idle(void)
420 } 415 }
421 416
422 /* Get start time (ticks) */ 417 /* Get start time (ticks) */
423 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 418 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
424 /* Invoke C3 */ 419 /* Invoke C3 */
425 acpi_cstate_enter(cx); 420 acpi_cstate_enter(cx);
426 /* Get end time (ticks) */ 421 /* Get end time (ticks) */
427 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 422 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
428 if (pr->flags.bm_check) { 423 if (pr->flags.bm_check) {
429 /* Enable bus master arbitration */ 424 /* Enable bus master arbitration */
430 atomic_dec(&c3_cpu_count); 425 atomic_dec(&c3_cpu_count);
431 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 426 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
432 ACPI_MTX_DO_NOT_LOCK);
433 } 427 }
434 428
435#ifdef CONFIG_GENERIC_TIME 429#ifdef CONFIG_GENERIC_TIME
@@ -457,7 +451,7 @@ static void acpi_processor_idle(void)
457#ifdef CONFIG_HOTPLUG_CPU 451#ifdef CONFIG_HOTPLUG_CPU
458 /* Don't do promotion/demotion */ 452 /* Don't do promotion/demotion */
459 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 453 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
460 !pr->flags.has_cst && !acpi_fadt.plvl2_up) { 454 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
461 next_state = cx; 455 next_state = cx;
462 goto end; 456 goto end;
463 } 457 }
@@ -627,7 +621,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
627 * Check for P_LVL2_UP flag before entering C2 and above on 621 * Check for P_LVL2_UP flag before entering C2 and above on
628 * an SMP system. 622 * an SMP system.
629 */ 623 */
630 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) 624 if ((num_online_cpus() > 1) &&
625 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
631 return -ENODEV; 626 return -ENODEV;
632#endif 627#endif
633 628
@@ -636,8 +631,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
636 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 631 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
637 632
638 /* determine latencies from FADT */ 633 /* determine latencies from FADT */
639 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; 634 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
640 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; 635 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
641 636
642 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 637 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
643 "lvl2[0x%08x] lvl3[0x%08x]\n", 638 "lvl2[0x%08x] lvl3[0x%08x]\n",
@@ -883,14 +878,13 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
883 * WBINVD should be set in fadt, for C3 state to be 878 * WBINVD should be set in fadt, for C3 state to be
884 * supported on when bm_check is not required. 879 * supported on when bm_check is not required.
885 */ 880 */
886 if (acpi_fadt.wb_invd != 1) { 881 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
887 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 882 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
888 "Cache invalidation should work properly" 883 "Cache invalidation should work properly"
889 " for C3 to be enabled on SMP systems\n")); 884 " for C3 to be enabled on SMP systems\n"));
890 return; 885 return;
891 } 886 }
892 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 887 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
893 0, ACPI_MTX_DO_NOT_LOCK);
894 } 888 }
895 889
896 /* 890 /*
@@ -1096,7 +1090,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1096 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 1090 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1097 pr->power.states[i].latency, 1091 pr->power.states[i].latency,
1098 pr->power.states[i].usage, 1092 pr->power.states[i].usage,
1099 pr->power.states[i].time); 1093 (unsigned long long)pr->power.states[i].time);
1100 } 1094 }
1101 1095
1102 end: 1096 end:
@@ -1164,9 +1158,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1164 if (!pr) 1158 if (!pr)
1165 return -EINVAL; 1159 return -EINVAL;
1166 1160
1167 if (acpi_fadt.cst_cnt && !nocst) { 1161 if (acpi_gbl_FADT.cst_control && !nocst) {
1168 status = 1162 status =
1169 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); 1163 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1170 if (ACPI_FAILURE(status)) { 1164 if (ACPI_FAILURE(status)) {
1171 ACPI_EXCEPTION((AE_INFO, status, 1165 ACPI_EXCEPTION((AE_INFO, status,
1172 "Notifying BIOS of _CST ability failed")); 1166 "Notifying BIOS of _CST ability failed"));
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cbb6f0814ce2..058f13cf3b79 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -352,31 +352,24 @@ int acpi_processor_notify_smm(struct module *calling_module)
352 352
353 is_done = -EIO; 353 is_done = -EIO;
354 354
355 /* Can't write pstate_cnt to smi_cmd if either value is zero */ 355 /* Can't write pstate_control to smi_command if either value is zero */
356 if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) { 356 if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
357 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n")); 357 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
358 module_put(calling_module); 358 module_put(calling_module);
359 return 0; 359 return 0;
360 } 360 }
361 361
362 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 362 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
363 "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", 363 "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
364 acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd)); 364 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
365 365
366 /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use 366 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
367 * it anyway, so we need to support it... */ 367 (u32) acpi_gbl_FADT.pstate_control, 8);
368 if (acpi_fadt_is_v1) {
369 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
370 "Using v1.0 FADT reserved value for pstate_cnt\n"));
371 }
372
373 status = acpi_os_write_port(acpi_fadt.smi_cmd,
374 (u32) acpi_fadt.pstate_cnt, 8);
375 if (ACPI_FAILURE(status)) { 368 if (ACPI_FAILURE(status)) {
376 ACPI_EXCEPTION((AE_INFO, status, 369 ACPI_EXCEPTION((AE_INFO, status,
377 "Failed to write pstate_cnt [0x%x] to " 370 "Failed to write pstate_control [0x%x] to "
378 "smi_cmd [0x%x]", acpi_fadt.pstate_cnt, 371 "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
379 acpi_fadt.smi_cmd)); 372 acpi_gbl_FADT.smi_command));
380 module_put(calling_module); 373 module_put(calling_module);
381 return status; 374 return status;
382 } 375 }
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0ec7dcde0063..89dff3639abe 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -125,7 +125,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
125 /* Used to clear all duty_value bits */ 125 /* Used to clear all duty_value bits */
126 duty_mask = pr->throttling.state_count - 1; 126 duty_mask = pr->throttling.state_count - 1;
127 127
128 duty_mask <<= acpi_fadt.duty_offset; 128 duty_mask <<= acpi_gbl_FADT.duty_offset;
129 duty_mask = ~duty_mask; 129 duty_mask = ~duty_mask;
130 } 130 }
131 131
@@ -208,7 +208,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
208 return 0; 208 return 0;
209 } 209 }
210 210
211 pr->throttling.state_count = 1 << acpi_fadt.duty_width; 211 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
212 212
213 /* 213 /*
214 * Compute state values. Note that throttling displays a linear power/ 214 * Compute state values. Note that throttling displays a linear power/
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/resources/rsaddr.c
index 8fa3213ce000..271e61509eeb 100644
--- a/drivers/acpi/resources/rsaddr.c
+++ b/drivers/acpi/resources/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index cf87b0230026..8c6d3fdec38a 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 008058acdd39..1358c06a969c 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index 9c99a723a860..de20a5d6decf 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index 9e7ae2f8a1d3..7e3c335ab320 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/resources/rsio.c
index ea567167c4f2..b297bc3e4419 100644
--- a/drivers/acpi/resources/rsio.c
+++ b/drivers/acpi/resources/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/resources/rsirq.c
index 1fa63bc2e36f..5657f7b95039 100644
--- a/drivers/acpi/resources/rsirq.c
+++ b/drivers/acpi/resources/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index 29423ce030ca..a92755c8877d 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/resources/rsmemory.c
index a5131936d690..521eab7dd8df 100644
--- a/drivers/acpi/resources/rsmemory.c
+++ b/drivers/acpi/resources/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index faf6e106b785..3b63b561b94e 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index a9cbee8e8b44..2442a8f8df57 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 1999e2ab7daa..991f8901498c 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 283d87522c5d..64f26db10c8e 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -21,101 +21,305 @@ extern struct acpi_device *acpi_root;
21#define ACPI_BUS_DEVICE_NAME "System Bus" 21#define ACPI_BUS_DEVICE_NAME "System Bus"
22 22
23static LIST_HEAD(acpi_device_list); 23static LIST_HEAD(acpi_device_list);
24static LIST_HEAD(acpi_bus_id_list);
24DEFINE_SPINLOCK(acpi_device_lock); 25DEFINE_SPINLOCK(acpi_device_lock);
25LIST_HEAD(acpi_wakeup_device_list); 26LIST_HEAD(acpi_wakeup_device_list);
26 27
28struct acpi_device_bus_id{
29 char bus_id[15];
30 unsigned int instance_no;
31 struct list_head node;
32};
33static int acpi_eject_operation(acpi_handle handle, int lockable)
34{
35 struct acpi_object_list arg_list;
36 union acpi_object arg;
37 acpi_status status = AE_OK;
38
39 /*
40 * TBD: evaluate _PS3?
41 */
42
43 if (lockable) {
44 arg_list.count = 1;
45 arg_list.pointer = &arg;
46 arg.type = ACPI_TYPE_INTEGER;
47 arg.integer.value = 0;
48 acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
49 }
50
51 arg_list.count = 1;
52 arg_list.pointer = &arg;
53 arg.type = ACPI_TYPE_INTEGER;
54 arg.integer.value = 1;
55
56 /*
57 * TBD: _EJD support.
58 */
59
60 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
61 if (ACPI_FAILURE(status)) {
62 return (-ENODEV);
63 }
64
65 return (0);
66}
27 67
28static void acpi_device_release(struct kobject *kobj) 68static ssize_t
69acpi_eject_store(struct device *d, struct device_attribute *attr,
70 const char *buf, size_t count)
29{ 71{
30 struct acpi_device *dev = container_of(kobj, struct acpi_device, kobj); 72 int result;
31 kfree(dev->pnp.cid_list); 73 int ret = count;
32 kfree(dev); 74 int islockable;
75 acpi_status status;
76 acpi_handle handle;
77 acpi_object_type type = 0;
78 struct acpi_device *acpi_device = to_acpi_device(d);
79
80 if ((!count) || (buf[0] != '1')) {
81 return -EINVAL;
82 }
83#ifndef FORCE_EJECT
84 if (acpi_device->driver == NULL) {
85 ret = -ENODEV;
86 goto err;
87 }
88#endif
89 status = acpi_get_type(acpi_device->handle, &type);
90 if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
91 ret = -ENODEV;
92 goto err;
93 }
94
95 islockable = acpi_device->flags.lockable;
96 handle = acpi_device->handle;
97
98 result = acpi_bus_trim(acpi_device, 1);
99
100 if (!result)
101 result = acpi_eject_operation(handle, islockable);
102
103 if (result) {
104 ret = -EBUSY;
105 }
106 err:
107 return ret;
33} 108}
34 109
35struct acpi_device_attribute { 110static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
36 struct attribute attr; 111
37 ssize_t(*show) (struct acpi_device *, char *); 112static ssize_t
38 ssize_t(*store) (struct acpi_device *, const char *, size_t); 113acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
39}; 114 struct acpi_device *acpi_dev = to_acpi_device(dev);
115
116 return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id);
117}
118static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
119
120static ssize_t
121acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
122 struct acpi_device *acpi_dev = to_acpi_device(dev);
123 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
124 int result;
125
126 result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
127 if(result)
128 goto end;
129
130 result = sprintf(buf, "%s\n", (char*)path.pointer);
131 kfree(path.pointer);
132 end:
133 return result;
134}
135static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
136
137static int acpi_device_setup_files(struct acpi_device *dev)
138{
139 acpi_status status;
140 acpi_handle temp;
141 int result = 0;
142
143 /*
144 * Devices gotten from FADT don't have a "path" attribute
145 */
146 if(dev->handle) {
147 result = device_create_file(&dev->dev, &dev_attr_path);
148 if(result)
149 goto end;
150 }
40 151
41typedef void acpi_device_sysfs_files(struct kobject *, 152 if(dev->flags.hardware_id) {
42 const struct attribute *); 153 result = device_create_file(&dev->dev, &dev_attr_hid);
154 if(result)
155 goto end;
156 }
43 157
44static void setup_sys_fs_device_files(struct acpi_device *dev, 158 /*
45 acpi_device_sysfs_files * func); 159 * If device has _EJ0, 'eject' file is created that is used to trigger
160 * hot-removal function from userland.
161 */
162 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
163 if (ACPI_SUCCESS(status))
164 result = device_create_file(&dev->dev, &dev_attr_eject);
165 end:
166 return result;
167}
46 168
47#define create_sysfs_device_files(dev) \ 169static void acpi_device_remove_files(struct acpi_device *dev)
48 setup_sys_fs_device_files(dev, (acpi_device_sysfs_files *)&sysfs_create_file) 170{
49#define remove_sysfs_device_files(dev) \ 171 acpi_status status;
50 setup_sys_fs_device_files(dev, (acpi_device_sysfs_files *)&sysfs_remove_file) 172 acpi_handle temp;
51 173
52#define to_acpi_device(n) container_of(n, struct acpi_device, kobj) 174 /*
53#define to_handle_attr(n) container_of(n, struct acpi_device_attribute, attr); 175 * If device has _EJ0, 'eject' file is created that is used to trigger
176 * hot-removal function from userland.
177 */
178 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
179 if (ACPI_SUCCESS(status))
180 device_remove_file(&dev->dev, &dev_attr_eject);
54 181
55static ssize_t acpi_device_attr_show(struct kobject *kobj, 182 if(dev->flags.hardware_id)
56 struct attribute *attr, char *buf) 183 device_remove_file(&dev->dev, &dev_attr_hid);
184 if(dev->handle)
185 device_remove_file(&dev->dev, &dev_attr_path);
186}
187/* --------------------------------------------------------------------------
188 ACPI Bus operations
189 -------------------------------------------------------------------------- */
190static void acpi_device_release(struct device *dev)
57{ 191{
58 struct acpi_device *device = to_acpi_device(kobj); 192 struct acpi_device *acpi_dev = to_acpi_device(dev);
59 struct acpi_device_attribute *attribute = to_handle_attr(attr); 193
60 return attribute->show ? attribute->show(device, buf) : -EIO; 194 kfree(acpi_dev->pnp.cid_list);
195 kfree(acpi_dev);
61} 196}
62static ssize_t acpi_device_attr_store(struct kobject *kobj, 197
63 struct attribute *attr, const char *buf, 198static int acpi_device_suspend(struct device *dev, pm_message_t state)
64 size_t len)
65{ 199{
66 struct acpi_device *device = to_acpi_device(kobj); 200 struct acpi_device *acpi_dev = to_acpi_device(dev);
67 struct acpi_device_attribute *attribute = to_handle_attr(attr); 201 struct acpi_driver *acpi_drv = acpi_dev->driver;
68 return attribute->store ? attribute->store(device, buf, len) : -EIO; 202
203 if (acpi_drv && acpi_drv->ops.suspend)
204 return acpi_drv->ops.suspend(acpi_dev, state);
205 return 0;
69} 206}
70 207
71static struct sysfs_ops acpi_device_sysfs_ops = { 208static int acpi_device_resume(struct device *dev)
72 .show = acpi_device_attr_show, 209{
73 .store = acpi_device_attr_store, 210 struct acpi_device *acpi_dev = to_acpi_device(dev);
74}; 211 struct acpi_driver *acpi_drv = acpi_dev->driver;
75 212
76static struct kobj_type ktype_acpi_ns = { 213 if (acpi_drv && acpi_drv->ops.resume)
77 .sysfs_ops = &acpi_device_sysfs_ops, 214 return acpi_drv->ops.resume(acpi_dev);
78 .release = acpi_device_release, 215 return 0;
79}; 216}
80 217
81static int namespace_uevent(struct kset *kset, struct kobject *kobj, 218static int acpi_bus_match(struct device *dev, struct device_driver *drv)
82 char **envp, int num_envp, char *buffer,
83 int buffer_size)
84{ 219{
85 struct acpi_device *dev = to_acpi_device(kobj); 220 struct acpi_device *acpi_dev = to_acpi_device(dev);
86 int i = 0; 221 struct acpi_driver *acpi_drv = to_acpi_driver(drv);
87 int len = 0;
88 222
89 if (!dev->driver) 223 return !acpi_match_ids(acpi_dev, acpi_drv->ids);
90 return 0; 224}
91 225
92 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 226static int acpi_device_uevent(struct device *dev, char **envp, int num_envp,
93 "PHYSDEVDRIVER=%s", dev->driver->name)) 227 char *buffer, int buffer_size)
228{
229 struct acpi_device *acpi_dev = to_acpi_device(dev);
230 int i = 0, length = 0, ret = 0;
231
232 if (acpi_dev->flags.hardware_id)
233 ret = add_uevent_var(envp, num_envp, &i,
234 buffer, buffer_size, &length,
235 "HWID=%s", acpi_dev->pnp.hardware_id);
236 if (ret)
94 return -ENOMEM; 237 return -ENOMEM;
238 if (acpi_dev->flags.compatible_ids) {
239 int j;
240 struct acpi_compatible_id_list *cid_list;
241
242 cid_list = acpi_dev->pnp.cid_list;
243
244 for (j = 0; j < cid_list->count; j++) {
245 ret = add_uevent_var(envp, num_envp, &i, buffer,
246 buffer_size, &length, "COMPTID=%s",
247 cid_list->id[j].value);
248 if (ret)
249 return -ENOMEM;
250 }
251 }
95 252
96 envp[i] = NULL; 253 envp[i] = NULL;
254 return 0;
255}
256
257static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
258static int acpi_start_single_object(struct acpi_device *);
259static int acpi_device_probe(struct device * dev)
260{
261 struct acpi_device *acpi_dev = to_acpi_device(dev);
262 struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
263 int ret;
264
265 ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
266 if (!ret) {
267 if (acpi_dev->bus_ops.acpi_op_start)
268 acpi_start_single_object(acpi_dev);
269 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
270 "Found driver [%s] for device [%s]\n",
271 acpi_drv->name, acpi_dev->pnp.bus_id));
272 get_device(dev);
273 }
274 return ret;
275}
97 276
277static int acpi_device_remove(struct device * dev)
278{
279 struct acpi_device *acpi_dev = to_acpi_device(dev);
280 struct acpi_driver *acpi_drv = acpi_dev->driver;
281
282 if (acpi_drv) {
283 if (acpi_drv->ops.stop)
284 acpi_drv->ops.stop(acpi_dev, acpi_dev->removal_type);
285 if (acpi_drv->ops.remove)
286 acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
287 }
288 acpi_dev->driver = NULL;
289 acpi_driver_data(dev) = NULL;
290
291 put_device(dev);
98 return 0; 292 return 0;
99} 293}
100 294
101static struct kset_uevent_ops namespace_uevent_ops = { 295static void acpi_device_shutdown(struct device *dev)
102 .uevent = &namespace_uevent, 296{
103}; 297 struct acpi_device *acpi_dev = to_acpi_device(dev);
298 struct acpi_driver *acpi_drv = acpi_dev->driver;
299
300 if (acpi_drv && acpi_drv->ops.shutdown)
301 acpi_drv->ops.shutdown(acpi_dev);
104 302
105static struct kset acpi_namespace_kset = { 303 return ;
106 .kobj = { 304}
107 .name = "namespace", 305
108 }, 306static struct bus_type acpi_bus_type = {
109 .subsys = &acpi_subsys, 307 .name = "acpi",
110 .ktype = &ktype_acpi_ns, 308 .suspend = acpi_device_suspend,
111 .uevent_ops = &namespace_uevent_ops, 309 .resume = acpi_device_resume,
310 .shutdown = acpi_device_shutdown,
311 .match = acpi_bus_match,
312 .probe = acpi_device_probe,
313 .remove = acpi_device_remove,
314 .uevent = acpi_device_uevent,
112}; 315};
113 316
114static void acpi_device_register(struct acpi_device *device, 317static int acpi_device_register(struct acpi_device *device,
115 struct acpi_device *parent) 318 struct acpi_device *parent)
116{ 319{
117 int err; 320 int result;
118 321 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
322 int found = 0;
119 /* 323 /*
120 * Linkage 324 * Linkage
121 * ------- 325 * -------
@@ -126,7 +330,33 @@ static void acpi_device_register(struct acpi_device *device,
126 INIT_LIST_HEAD(&device->g_list); 330 INIT_LIST_HEAD(&device->g_list);
127 INIT_LIST_HEAD(&device->wakeup_list); 331 INIT_LIST_HEAD(&device->wakeup_list);
128 332
333 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
334 if (!new_bus_id) {
335 printk(KERN_ERR PREFIX "Memory allocation error\n");
336 return -ENOMEM;
337 }
338
129 spin_lock(&acpi_device_lock); 339 spin_lock(&acpi_device_lock);
340 /*
341 * Find suitable bus_id and instance number in acpi_bus_id_list
342 * If failed, create one and link it into acpi_bus_id_list
343 */
344 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
345 if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) {
346 acpi_device_bus_id->instance_no ++;
347 found = 1;
348 kfree(new_bus_id);
349 break;
350 }
351 }
352 if(!found) {
353 acpi_device_bus_id = new_bus_id;
354 strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
355 acpi_device_bus_id->instance_no = 0;
356 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
357 }
358 sprintf(device->dev.bus_id, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
359
130 if (device->parent) { 360 if (device->parent) {
131 list_add_tail(&device->node, &device->parent->children); 361 list_add_tail(&device->node, &device->parent->children);
132 list_add_tail(&device->g_list, &device->parent->g_list); 362 list_add_tail(&device->g_list, &device->parent->g_list);
@@ -136,16 +366,33 @@ static void acpi_device_register(struct acpi_device *device,
136 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); 366 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
137 spin_unlock(&acpi_device_lock); 367 spin_unlock(&acpi_device_lock);
138 368
139 strlcpy(device->kobj.name, device->pnp.bus_id, KOBJ_NAME_LEN); 369 if (device->parent)
140 if (parent) 370 device->dev.parent = &parent->dev;
141 device->kobj.parent = &parent->kobj; 371 device->dev.bus = &acpi_bus_type;
142 device->kobj.ktype = &ktype_acpi_ns; 372 device_initialize(&device->dev);
143 device->kobj.kset = &acpi_namespace_kset; 373 device->dev.release = &acpi_device_release;
144 err = kobject_register(&device->kobj); 374 result = device_add(&device->dev);
145 if (err < 0) 375 if(result) {
146 printk(KERN_WARNING "%s: kobject_register error: %d\n", 376 printk("Error adding device %s", device->dev.bus_id);
147 __FUNCTION__, err); 377 goto end;
148 create_sysfs_device_files(device); 378 }
379
380 result = acpi_device_setup_files(device);
381 if(result)
382 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error creating sysfs interface for device %s\n", device->dev.bus_id));
383
384 device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
385 return 0;
386 end:
387 spin_lock(&acpi_device_lock);
388 if (device->parent) {
389 list_del(&device->node);
390 list_del(&device->g_list);
391 } else
392 list_del(&device->g_list);
393 list_del(&device->wakeup_list);
394 spin_unlock(&acpi_device_lock);
395 return result;
149} 396}
150 397
151static void acpi_device_unregister(struct acpi_device *device, int type) 398static void acpi_device_unregister(struct acpi_device *device, int type)
@@ -158,81 +405,143 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
158 list_del(&device->g_list); 405 list_del(&device->g_list);
159 406
160 list_del(&device->wakeup_list); 407 list_del(&device->wakeup_list);
161
162 spin_unlock(&acpi_device_lock); 408 spin_unlock(&acpi_device_lock);
163 409
164 acpi_detach_data(device->handle, acpi_bus_data_handler); 410 acpi_detach_data(device->handle, acpi_bus_data_handler);
165 remove_sysfs_device_files(device); 411
166 kobject_unregister(&device->kobj); 412 acpi_device_remove_files(device);
413 device_unregister(&device->dev);
167} 414}
168 415
169void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context) 416/* --------------------------------------------------------------------------
417 Driver Management
418 -------------------------------------------------------------------------- */
419/**
420 * acpi_bus_driver_init - add a device to a driver
421 * @device: the device to add and initialize
422 * @driver: driver for the device
423 *
424 * Used to initialize a device via its device driver. Called whenever a
425 * driver is bound to a device. Invokes the driver's add() ops.
426 */
427static int
428acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
170{ 429{
430 int result = 0;
171 431
172 /* TBD */
173 432
174 return; 433 if (!device || !driver)
175} 434 return -EINVAL;
176 435
177static int acpi_bus_get_power_flags(struct acpi_device *device) 436 if (!driver->ops.add)
178{ 437 return -ENOSYS;
179 acpi_status status = 0;
180 acpi_handle handle = NULL;
181 u32 i = 0;
182 438
439 result = driver->ops.add(device);
440 if (result) {
441 device->driver = NULL;
442 acpi_driver_data(device) = NULL;
443 return result;
444 }
183 445
184 /* 446 device->driver = driver;
185 * Power Management Flags
186 */
187 status = acpi_get_handle(device->handle, "_PSC", &handle);
188 if (ACPI_SUCCESS(status))
189 device->power.flags.explicit_get = 1;
190 status = acpi_get_handle(device->handle, "_IRC", &handle);
191 if (ACPI_SUCCESS(status))
192 device->power.flags.inrush_current = 1;
193 447
194 /* 448 /*
195 * Enumerate supported power management states 449 * TBD - Configuration Management: Assign resources to device based
450 * upon possible configuration and currently allocated resources.
196 */ 451 */
197 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
198 struct acpi_device_power_state *ps = &device->power.states[i];
199 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
200 452
201 /* Evaluate "_PRx" to se if power resources are referenced */ 453 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
202 acpi_evaluate_reference(device->handle, object_name, NULL, 454 "Driver successfully bound to device\n"));
203 &ps->resources); 455 return 0;
204 if (ps->resources.count) { 456}
205 device->power.flags.power_resources = 1;
206 ps->flags.valid = 1;
207 }
208 457
209 /* Evaluate "_PSx" to see if we can do explicit sets */ 458static int acpi_start_single_object(struct acpi_device *device)
210 object_name[2] = 'S'; 459{
211 status = acpi_get_handle(device->handle, object_name, &handle); 460 int result = 0;
212 if (ACPI_SUCCESS(status)) { 461 struct acpi_driver *driver;
213 ps->flags.explicit_set = 1;
214 ps->flags.valid = 1;
215 }
216 462
217 /* State is valid if we have some power control */
218 if (ps->resources.count || ps->flags.explicit_set)
219 ps->flags.valid = 1;
220 463
221 ps->power = -1; /* Unknown - driver assigned */ 464 if (!(driver = device->driver))
222 ps->latency = -1; /* Unknown - driver assigned */ 465 return 0;
466
467 if (driver->ops.start) {
468 result = driver->ops.start(device);
469 if (result && driver->ops.remove)
470 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
223 } 471 }
224 472
225 /* Set defaults for D0 and D3 states (always valid) */ 473 return result;
226 device->power.states[ACPI_STATE_D0].flags.valid = 1; 474}
227 device->power.states[ACPI_STATE_D0].power = 100;
228 device->power.states[ACPI_STATE_D3].flags.valid = 1;
229 device->power.states[ACPI_STATE_D3].power = 0;
230 475
231 /* TBD: System wake support and resource requirements. */ 476/**
477 * acpi_bus_register_driver - register a driver with the ACPI bus
478 * @driver: driver being registered
479 *
480 * Registers a driver with the ACPI bus. Searches the namespace for all
481 * devices that match the driver's criteria and binds. Returns zero for
482 * success or a negative error status for failure.
483 */
484int acpi_bus_register_driver(struct acpi_driver *driver)
485{
486 int ret;
232 487
233 device->power.state = ACPI_STATE_UNKNOWN; 488 if (acpi_disabled)
489 return -ENODEV;
490 driver->drv.name = driver->name;
491 driver->drv.bus = &acpi_bus_type;
492 driver->drv.owner = driver->owner;
234 493
235 return 0; 494 ret = driver_register(&driver->drv);
495 return ret;
496}
497
498EXPORT_SYMBOL(acpi_bus_register_driver);
499
500/**
501 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
502 * @driver: driver to unregister
503 *
504 * Unregisters a driver with the ACPI bus. Searches the namespace for all
505 * devices that match the driver's criteria and unbinds.
506 */
507void acpi_bus_unregister_driver(struct acpi_driver *driver)
508{
509 driver_unregister(&driver->drv);
510}
511
512EXPORT_SYMBOL(acpi_bus_unregister_driver);
513
514/* --------------------------------------------------------------------------
515 Device Enumeration
516 -------------------------------------------------------------------------- */
517acpi_status
518acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
519{
520 acpi_status status;
521 acpi_handle tmp;
522 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
523 union acpi_object *obj;
524
525 status = acpi_get_handle(handle, "_EJD", &tmp);
526 if (ACPI_FAILURE(status))
527 return status;
528
529 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
530 if (ACPI_SUCCESS(status)) {
531 obj = buffer.pointer;
532 status = acpi_get_handle(NULL, obj->string.pointer, ejd);
533 kfree(buffer.pointer);
534 }
535 return status;
536}
537EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
538
539void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
540{
541
542 /* TBD */
543
544 return;
236} 545}
237 546
238int acpi_match_ids(struct acpi_device *device, char *ids) 547int acpi_match_ids(struct acpi_device *device, char *ids)
@@ -254,6 +563,12 @@ int acpi_match_ids(struct acpi_device *device, char *ids)
254 return -ENOENT; 563 return -ENOENT;
255} 564}
256 565
566static int acpi_bus_get_perf_flags(struct acpi_device *device)
567{
568 device->performance.state = ACPI_STATE_UNKNOWN;
569 return 0;
570}
571
257static acpi_status 572static acpi_status
258acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device, 573acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
259 union acpi_object *package) 574 union acpi_object *package)
@@ -338,359 +653,66 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
338 return 0; 653 return 0;
339} 654}
340 655
341/* -------------------------------------------------------------------------- 656static int acpi_bus_get_power_flags(struct acpi_device *device)
342 ACPI sysfs device file support
343 -------------------------------------------------------------------------- */
344static ssize_t acpi_eject_store(struct acpi_device *device,
345 const char *buf, size_t count);
346
347#define ACPI_DEVICE_ATTR(_name,_mode,_show,_store) \
348static struct acpi_device_attribute acpi_device_attr_##_name = \
349 __ATTR(_name, _mode, _show, _store)
350
351ACPI_DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
352
353/**
354 * setup_sys_fs_device_files - sets up the device files under device namespace
355 * @dev: acpi_device object
356 * @func: function pointer to create or destroy the device file
357 */
358static void
359setup_sys_fs_device_files(struct acpi_device *dev,
360 acpi_device_sysfs_files * func)
361{
362 acpi_status status;
363 acpi_handle temp = NULL;
364
365 /*
366 * If device has _EJ0, 'eject' file is created that is used to trigger
367 * hot-removal function from userland.
368 */
369 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
370 if (ACPI_SUCCESS(status))
371 (*(func)) (&dev->kobj, &acpi_device_attr_eject.attr);
372}
373
374static int acpi_eject_operation(acpi_handle handle, int lockable)
375{ 657{
376 struct acpi_object_list arg_list; 658 acpi_status status = 0;
377 union acpi_object arg; 659 acpi_handle handle = NULL;
378 acpi_status status = AE_OK; 660 u32 i = 0;
379
380 /*
381 * TBD: evaluate _PS3?
382 */
383
384 if (lockable) {
385 arg_list.count = 1;
386 arg_list.pointer = &arg;
387 arg.type = ACPI_TYPE_INTEGER;
388 arg.integer.value = 0;
389 acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
390 }
391 661
392 arg_list.count = 1;
393 arg_list.pointer = &arg;
394 arg.type = ACPI_TYPE_INTEGER;
395 arg.integer.value = 1;
396 662
397 /* 663 /*
398 * TBD: _EJD support. 664 * Power Management Flags
399 */ 665 */
400 666 status = acpi_get_handle(device->handle, "_PSC", &handle);
401 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 667 if (ACPI_SUCCESS(status))
402 if (ACPI_FAILURE(status)) { 668 device->power.flags.explicit_get = 1;
403 return (-ENODEV); 669 status = acpi_get_handle(device->handle, "_IRC", &handle);
404 } 670 if (ACPI_SUCCESS(status))
405 671 device->power.flags.inrush_current = 1;
406 return (0);
407}
408
409static ssize_t
410acpi_eject_store(struct acpi_device *device, const char *buf, size_t count)
411{
412 int result;
413 int ret = count;
414 int islockable;
415 acpi_status status;
416 acpi_handle handle;
417 acpi_object_type type = 0;
418
419 if ((!count) || (buf[0] != '1')) {
420 return -EINVAL;
421 }
422#ifndef FORCE_EJECT
423 if (device->driver == NULL) {
424 ret = -ENODEV;
425 goto err;
426 }
427#endif
428 status = acpi_get_type(device->handle, &type);
429 if (ACPI_FAILURE(status) || (!device->flags.ejectable)) {
430 ret = -ENODEV;
431 goto err;
432 }
433
434 islockable = device->flags.lockable;
435 handle = device->handle;
436
437 result = acpi_bus_trim(device, 1);
438
439 if (!result)
440 result = acpi_eject_operation(handle, islockable);
441
442 if (result) {
443 ret = -EBUSY;
444 }
445 err:
446 return ret;
447}
448
449/* --------------------------------------------------------------------------
450 Performance Management
451 -------------------------------------------------------------------------- */
452
453static int acpi_bus_get_perf_flags(struct acpi_device *device)
454{
455 device->performance.state = ACPI_STATE_UNKNOWN;
456 return 0;
457}
458
459/* --------------------------------------------------------------------------
460 Driver Management
461 -------------------------------------------------------------------------- */
462
463static LIST_HEAD(acpi_bus_drivers);
464
465/**
466 * acpi_bus_match - match device IDs to driver's supported IDs
467 * @device: the device that we are trying to match to a driver
468 * @driver: driver whose device id table is being checked
469 *
470 * Checks the device's hardware (_HID) or compatible (_CID) ids to see if it
471 * matches the specified driver's criteria.
472 */
473static int
474acpi_bus_match(struct acpi_device *device, struct acpi_driver *driver)
475{
476 if (driver && driver->ops.match)
477 return driver->ops.match(device, driver);
478 return acpi_match_ids(device, driver->ids);
479}
480
481/**
482 * acpi_bus_driver_init - add a device to a driver
483 * @device: the device to add and initialize
484 * @driver: driver for the device
485 *
486 * Used to initialize a device via its device driver. Called whenever a
487 * driver is bound to a device. Invokes the driver's add() and start() ops.
488 */
489static int
490acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
491{
492 int result = 0;
493
494
495 if (!device || !driver)
496 return -EINVAL;
497
498 if (!driver->ops.add)
499 return -ENOSYS;
500
501 result = driver->ops.add(device);
502 if (result) {
503 device->driver = NULL;
504 acpi_driver_data(device) = NULL;
505 return result;
506 }
507
508 device->driver = driver;
509 672
510 /* 673 /*
511 * TBD - Configuration Management: Assign resources to device based 674 * Enumerate supported power management states
512 * upon possible configuration and currently allocated resources.
513 */ 675 */
676 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
677 struct acpi_device_power_state *ps = &device->power.states[i];
678 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
514 679
515 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 680 /* Evaluate "_PRx" to se if power resources are referenced */
516 "Driver successfully bound to device\n")); 681 acpi_evaluate_reference(device->handle, object_name, NULL,
517 return 0; 682 &ps->resources);
518} 683 if (ps->resources.count) {
519 684 device->power.flags.power_resources = 1;
520static int acpi_start_single_object(struct acpi_device *device) 685 ps->flags.valid = 1;
521{
522 int result = 0;
523 struct acpi_driver *driver;
524
525
526 if (!(driver = device->driver))
527 return 0;
528
529 if (driver->ops.start) {
530 result = driver->ops.start(device);
531 if (result && driver->ops.remove)
532 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
533 }
534
535 return result;
536}
537
538static void acpi_driver_attach(struct acpi_driver *drv)
539{
540 struct list_head *node, *next;
541
542
543 spin_lock(&acpi_device_lock);
544 list_for_each_safe(node, next, &acpi_device_list) {
545 struct acpi_device *dev =
546 container_of(node, struct acpi_device, g_list);
547
548 if (dev->driver || !dev->status.present)
549 continue;
550 spin_unlock(&acpi_device_lock);
551
552 if (!acpi_bus_match(dev, drv)) {
553 if (!acpi_bus_driver_init(dev, drv)) {
554 acpi_start_single_object(dev);
555 atomic_inc(&drv->references);
556 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
557 "Found driver [%s] for device [%s]\n",
558 drv->name, dev->pnp.bus_id));
559 }
560 } 686 }
561 spin_lock(&acpi_device_lock);
562 }
563 spin_unlock(&acpi_device_lock);
564}
565
566static void acpi_driver_detach(struct acpi_driver *drv)
567{
568 struct list_head *node, *next;
569 687
570 688 /* Evaluate "_PSx" to see if we can do explicit sets */
571 spin_lock(&acpi_device_lock); 689 object_name[2] = 'S';
572 list_for_each_safe(node, next, &acpi_device_list) { 690 status = acpi_get_handle(device->handle, object_name, &handle);
573 struct acpi_device *dev = 691 if (ACPI_SUCCESS(status)) {
574 container_of(node, struct acpi_device, g_list); 692 ps->flags.explicit_set = 1;
575 693 ps->flags.valid = 1;
576 if (dev->driver == drv) {
577 spin_unlock(&acpi_device_lock);
578 if (drv->ops.remove)
579 drv->ops.remove(dev, ACPI_BUS_REMOVAL_NORMAL);
580 spin_lock(&acpi_device_lock);
581 dev->driver = NULL;
582 dev->driver_data = NULL;
583 atomic_dec(&drv->references);
584 } 694 }
585 }
586 spin_unlock(&acpi_device_lock);
587}
588
589/**
590 * acpi_bus_register_driver - register a driver with the ACPI bus
591 * @driver: driver being registered
592 *
593 * Registers a driver with the ACPI bus. Searches the namespace for all
594 * devices that match the driver's criteria and binds. Returns zero for
595 * success or a negative error status for failure.
596 */
597int acpi_bus_register_driver(struct acpi_driver *driver)
598{
599
600 if (acpi_disabled)
601 return -ENODEV;
602
603 spin_lock(&acpi_device_lock);
604 list_add_tail(&driver->node, &acpi_bus_drivers);
605 spin_unlock(&acpi_device_lock);
606 acpi_driver_attach(driver);
607
608 return 0;
609}
610
611EXPORT_SYMBOL(acpi_bus_register_driver);
612
613/**
614 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
615 * @driver: driver to unregister
616 *
617 * Unregisters a driver with the ACPI bus. Searches the namespace for all
618 * devices that match the driver's criteria and unbinds.
619 */
620void acpi_bus_unregister_driver(struct acpi_driver *driver)
621{
622 acpi_driver_detach(driver);
623
624 if (!atomic_read(&driver->references)) {
625 spin_lock(&acpi_device_lock);
626 list_del_init(&driver->node);
627 spin_unlock(&acpi_device_lock);
628 }
629 return;
630}
631
632EXPORT_SYMBOL(acpi_bus_unregister_driver);
633
634/**
635 * acpi_bus_find_driver - check if there is a driver installed for the device
636 * @device: device that we are trying to find a supporting driver for
637 *
638 * Parses the list of registered drivers looking for a driver applicable for
639 * the specified device.
640 */
641static int acpi_bus_find_driver(struct acpi_device *device)
642{
643 int result = 0;
644 struct list_head *node, *next;
645 695
696 /* State is valid if we have some power control */
697 if (ps->resources.count || ps->flags.explicit_set)
698 ps->flags.valid = 1;
646 699
647 spin_lock(&acpi_device_lock); 700 ps->power = -1; /* Unknown - driver assigned */
648 list_for_each_safe(node, next, &acpi_bus_drivers) { 701 ps->latency = -1; /* Unknown - driver assigned */
649 struct acpi_driver *driver =
650 container_of(node, struct acpi_driver, node);
651
652 atomic_inc(&driver->references);
653 spin_unlock(&acpi_device_lock);
654 if (!acpi_bus_match(device, driver)) {
655 result = acpi_bus_driver_init(device, driver);
656 if (!result)
657 goto Done;
658 }
659 atomic_dec(&driver->references);
660 spin_lock(&acpi_device_lock);
661 } 702 }
662 spin_unlock(&acpi_device_lock);
663
664 Done:
665 return result;
666}
667 703
668/* -------------------------------------------------------------------------- 704 /* Set defaults for D0 and D3 states (always valid) */
669 Device Enumeration 705 device->power.states[ACPI_STATE_D0].flags.valid = 1;
670 -------------------------------------------------------------------------- */ 706 device->power.states[ACPI_STATE_D0].power = 100;
707 device->power.states[ACPI_STATE_D3].flags.valid = 1;
708 device->power.states[ACPI_STATE_D3].power = 0;
671 709
672acpi_status 710 /* TBD: System wake support and resource requirements. */
673acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
674{
675 acpi_status status;
676 acpi_handle tmp;
677 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
678 union acpi_object *obj;
679 711
680 status = acpi_get_handle(handle, "_EJD", &tmp); 712 device->power.state = ACPI_STATE_UNKNOWN;
681 if (ACPI_FAILURE(status))
682 return status;
683 713
684 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); 714 return 0;
685 if (ACPI_SUCCESS(status)) {
686 obj = buffer.pointer;
687 status = acpi_get_handle(NULL, obj->string.pointer, ejd);
688 kfree(buffer.pointer);
689 }
690 return status;
691} 715}
692EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
693
694 716
695static int acpi_bus_get_flags(struct acpi_device *device) 717static int acpi_bus_get_flags(struct acpi_device *device)
696{ 718{
@@ -782,6 +804,75 @@ static void acpi_device_get_busid(struct acpi_device *device,
782 } 804 }
783} 805}
784 806
807static int
808acpi_video_bus_match(struct acpi_device *device)
809{
810 acpi_handle h_dummy1;
811 acpi_handle h_dummy2;
812 acpi_handle h_dummy3;
813
814
815 if (!device)
816 return -EINVAL;
817
818 /* Since there is no HID, CID for ACPI Video drivers, we have
819 * to check well known required nodes for each feature we support.
820 */
821
822 /* Does this device able to support video switching ? */
823 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) &&
824 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2)))
825 return 0;
826
827 /* Does this device able to retrieve a video ROM ? */
828 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1)))
829 return 0;
830
831 /* Does this device able to configure which video head to be POSTed ? */
832 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) &&
833 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) &&
834 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3)))
835 return 0;
836
837 return -ENODEV;
838}
839
840/*
841 * acpi_bay_match - see if a device is an ejectable driver bay
842 *
843 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
844 * then we can safely call it an ejectable drive bay
845 */
846static int acpi_bay_match(struct acpi_device *device){
847 acpi_status status;
848 acpi_handle handle;
849 acpi_handle tmp;
850 acpi_handle phandle;
851
852 handle = device->handle;
853
854 status = acpi_get_handle(handle, "_EJ0", &tmp);
855 if (ACPI_FAILURE(status))
856 return -ENODEV;
857
858 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
859 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
860 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
861 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
862 return 0;
863
864 if (acpi_get_parent(handle, &phandle))
865 return -ENODEV;
866
867 if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) ||
868 (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
869 (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
870 (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
871 return 0;
872
873 return -ENODEV;
874}
875
785static void acpi_device_set_id(struct acpi_device *device, 876static void acpi_device_set_id(struct acpi_device *device,
786 struct acpi_device *parent, acpi_handle handle, 877 struct acpi_device *parent, acpi_handle handle,
787 int type) 878 int type)
@@ -812,6 +903,16 @@ static void acpi_device_set_id(struct acpi_device *device,
812 device->pnp.bus_address = info->address; 903 device->pnp.bus_address = info->address;
813 device->flags.bus_address = 1; 904 device->flags.bus_address = 1;
814 } 905 }
906
907 if(!(info->valid & (ACPI_VALID_HID | ACPI_VALID_CID))){
908 status = acpi_video_bus_match(device);
909 if(ACPI_SUCCESS(status))
910 hid = ACPI_VIDEO_HID;
911
912 status = acpi_bay_match(device);
913 if (ACPI_SUCCESS(status))
914 hid = ACPI_BAY_HID;
915 }
815 break; 916 break;
816 case ACPI_BUS_TYPE_POWER: 917 case ACPI_BUS_TYPE_POWER:
817 hid = ACPI_POWER_HID; 918 hid = ACPI_POWER_HID;
@@ -888,86 +989,24 @@ static int acpi_device_set_context(struct acpi_device *device, int type)
888 return result; 989 return result;
889} 990}
890 991
891static void acpi_device_get_debug_info(struct acpi_device *device,
892 acpi_handle handle, int type)
893{
894#ifdef CONFIG_ACPI_DEBUG_OUTPUT
895 char *type_string = NULL;
896 char name[80] = { '?', '\0' };
897 struct acpi_buffer buffer = { sizeof(name), name };
898
899 switch (type) {
900 case ACPI_BUS_TYPE_DEVICE:
901 type_string = "Device";
902 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
903 break;
904 case ACPI_BUS_TYPE_POWER:
905 type_string = "Power Resource";
906 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
907 break;
908 case ACPI_BUS_TYPE_PROCESSOR:
909 type_string = "Processor";
910 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
911 break;
912 case ACPI_BUS_TYPE_SYSTEM:
913 type_string = "System";
914 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
915 break;
916 case ACPI_BUS_TYPE_THERMAL:
917 type_string = "Thermal Zone";
918 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
919 break;
920 case ACPI_BUS_TYPE_POWER_BUTTON:
921 type_string = "Power Button";
922 sprintf(name, "PWRB");
923 break;
924 case ACPI_BUS_TYPE_SLEEP_BUTTON:
925 type_string = "Sleep Button";
926 sprintf(name, "SLPB");
927 break;
928 }
929
930 printk(KERN_DEBUG "Found %s %s [%p]\n", type_string, name, handle);
931#endif /*CONFIG_ACPI_DEBUG_OUTPUT */
932}
933
934static int acpi_bus_remove(struct acpi_device *dev, int rmdevice) 992static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
935{ 993{
936 int result = 0;
937 struct acpi_driver *driver;
938
939
940 if (!dev) 994 if (!dev)
941 return -EINVAL; 995 return -EINVAL;
942 996
943 driver = dev->driver; 997 dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
944 998 device_release_driver(&dev->dev);
945 if ((driver) && (driver->ops.remove)) {
946
947 if (driver->ops.stop) {
948 result = driver->ops.stop(dev, ACPI_BUS_REMOVAL_EJECT);
949 if (result)
950 return result;
951 }
952
953 result = dev->driver->ops.remove(dev, ACPI_BUS_REMOVAL_EJECT);
954 if (result) {
955 return result;
956 }
957
958 atomic_dec(&dev->driver->references);
959 dev->driver = NULL;
960 acpi_driver_data(dev) = NULL;
961 }
962 999
963 if (!rmdevice) 1000 if (!rmdevice)
964 return 0; 1001 return 0;
965 1002
1003 /*
1004 * unbind _ADR-Based Devices when hot removal
1005 */
966 if (dev->flags.bus_address) { 1006 if (dev->flags.bus_address) {
967 if ((dev->parent) && (dev->parent->ops.unbind)) 1007 if ((dev->parent) && (dev->parent->ops.unbind))
968 dev->parent->ops.unbind(dev); 1008 dev->parent->ops.unbind(dev);
969 } 1009 }
970
971 acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT); 1010 acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
972 1011
973 return 0; 1012 return 0;
@@ -975,7 +1014,8 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
975 1014
976static int 1015static int
977acpi_add_single_object(struct acpi_device **child, 1016acpi_add_single_object(struct acpi_device **child,
978 struct acpi_device *parent, acpi_handle handle, int type) 1017 struct acpi_device *parent, acpi_handle handle, int type,
1018 struct acpi_bus_ops *ops)
979{ 1019{
980 int result = 0; 1020 int result = 0;
981 struct acpi_device *device = NULL; 1021 struct acpi_device *device = NULL;
@@ -992,6 +1032,8 @@ acpi_add_single_object(struct acpi_device **child,
992 1032
993 device->handle = handle; 1033 device->handle = handle;
994 device->parent = parent; 1034 device->parent = parent;
1035 device->bus_ops = *ops; /* workround for not call .start */
1036
995 1037
996 acpi_device_get_busid(device, handle, type); 1038 acpi_device_get_busid(device, handle, type);
997 1039
@@ -1076,33 +1118,16 @@ acpi_add_single_object(struct acpi_device **child,
1076 if ((result = acpi_device_set_context(device, type))) 1118 if ((result = acpi_device_set_context(device, type)))
1077 goto end; 1119 goto end;
1078 1120
1079 acpi_device_get_debug_info(device, handle, type); 1121 result = acpi_device_register(device, parent);
1080
1081 acpi_device_register(device, parent);
1082 1122
1083 /* 1123 /*
1084 * Bind _ADR-Based Devices 1124 * Bind _ADR-Based Devices when hot add
1085 * -----------------------
1086 * If there's a a bus address (_ADR) then we utilize the parent's
1087 * 'bind' function (if exists) to bind the ACPI- and natively-
1088 * enumerated device representations.
1089 */ 1125 */
1090 if (device->flags.bus_address) { 1126 if (device->flags.bus_address) {
1091 if (device->parent && device->parent->ops.bind) 1127 if (device->parent && device->parent->ops.bind)
1092 device->parent->ops.bind(device); 1128 device->parent->ops.bind(device);
1093 } 1129 }
1094 1130
1095 /*
1096 * Locate & Attach Driver
1097 * ----------------------
1098 * If there's a hardware id (_HID) or compatible ids (_CID) we check
1099 * to see if there's a driver installed for this kind of device. Note
1100 * that drivers can install before or after a device is enumerated.
1101 *
1102 * TBD: Assumes LDM provides driver hot-plug capability.
1103 */
1104 acpi_bus_find_driver(device);
1105
1106 end: 1131 end:
1107 if (!result) 1132 if (!result)
1108 *child = device; 1133 *child = device;
@@ -1188,14 +1213,14 @@ static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
1188 1213
1189 if (ops->acpi_op_add) 1214 if (ops->acpi_op_add)
1190 status = acpi_add_single_object(&child, parent, 1215 status = acpi_add_single_object(&child, parent,
1191 chandle, type); 1216 chandle, type, ops);
1192 else 1217 else
1193 status = acpi_bus_get_device(chandle, &child); 1218 status = acpi_bus_get_device(chandle, &child);
1194 1219
1195 if (ACPI_FAILURE(status)) 1220 if (ACPI_FAILURE(status))
1196 continue; 1221 continue;
1197 1222
1198 if (ops->acpi_op_start) { 1223 if (ops->acpi_op_start && !(ops->acpi_op_add)) {
1199 status = acpi_start_single_object(child); 1224 status = acpi_start_single_object(child);
1200 if (ACPI_FAILURE(status)) 1225 if (ACPI_FAILURE(status))
1201 continue; 1226 continue;
@@ -1233,13 +1258,13 @@ acpi_bus_add(struct acpi_device **child,
1233 int result; 1258 int result;
1234 struct acpi_bus_ops ops; 1259 struct acpi_bus_ops ops;
1235 1260
1261 memset(&ops, 0, sizeof(ops));
1262 ops.acpi_op_add = 1;
1236 1263
1237 result = acpi_add_single_object(child, parent, handle, type); 1264 result = acpi_add_single_object(child, parent, handle, type, &ops);
1238 if (!result) { 1265 if (!result)
1239 memset(&ops, 0, sizeof(ops));
1240 ops.acpi_op_add = 1;
1241 result = acpi_bus_scan(*child, &ops); 1266 result = acpi_bus_scan(*child, &ops);
1242 } 1267
1243 return result; 1268 return result;
1244} 1269}
1245 1270
@@ -1325,127 +1350,35 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
1325{ 1350{
1326 int result = 0; 1351 int result = 0;
1327 struct acpi_device *device = NULL; 1352 struct acpi_device *device = NULL;
1328 1353 struct acpi_bus_ops ops;
1329 1354
1330 if (!root) 1355 if (!root)
1331 return -ENODEV; 1356 return -ENODEV;
1332 1357
1358 memset(&ops, 0, sizeof(ops));
1359 ops.acpi_op_add = 1;
1360 ops.acpi_op_start = 1;
1361
1333 /* 1362 /*
1334 * Enumerate all fixed-feature devices. 1363 * Enumerate all fixed-feature devices.
1335 */ 1364 */
1336 if (acpi_fadt.pwr_button == 0) { 1365 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
1337 result = acpi_add_single_object(&device, acpi_root, 1366 result = acpi_add_single_object(&device, acpi_root,
1338 NULL, 1367 NULL,
1339 ACPI_BUS_TYPE_POWER_BUTTON); 1368 ACPI_BUS_TYPE_POWER_BUTTON,
1340 if (!result) 1369 &ops);
1341 result = acpi_start_single_object(device);
1342 } 1370 }
1343 1371
1344 if (acpi_fadt.sleep_button == 0) { 1372 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
1345 result = acpi_add_single_object(&device, acpi_root, 1373 result = acpi_add_single_object(&device, acpi_root,
1346 NULL, 1374 NULL,
1347 ACPI_BUS_TYPE_SLEEP_BUTTON); 1375 ACPI_BUS_TYPE_SLEEP_BUTTON,
1348 if (!result) 1376 &ops);
1349 result = acpi_start_single_object(device);
1350 } 1377 }
1351 1378
1352 return result; 1379 return result;
1353} 1380}
1354 1381
1355
1356static inline struct acpi_device * to_acpi_dev(struct device * dev)
1357{
1358 return container_of(dev, struct acpi_device, dev);
1359}
1360
1361
1362static int root_suspend(struct acpi_device * acpi_dev, pm_message_t state)
1363{
1364 struct acpi_device * dev, * next;
1365 int result;
1366
1367 spin_lock(&acpi_device_lock);
1368 list_for_each_entry_safe_reverse(dev, next, &acpi_device_list, g_list) {
1369 if (dev->driver && dev->driver->ops.suspend) {
1370 spin_unlock(&acpi_device_lock);
1371 result = dev->driver->ops.suspend(dev, 0);
1372 if (result) {
1373 printk(KERN_ERR PREFIX "[%s - %s] Suspend failed: %d\n",
1374 acpi_device_name(dev),
1375 acpi_device_bid(dev), result);
1376 }
1377 spin_lock(&acpi_device_lock);
1378 }
1379 }
1380 spin_unlock(&acpi_device_lock);
1381 return 0;
1382}
1383
1384
1385static int acpi_device_suspend(struct device * dev, pm_message_t state)
1386{
1387 struct acpi_device * acpi_dev = to_acpi_dev(dev);
1388
1389 /*
1390 * For now, we should only register 1 generic device -
1391 * the ACPI root device - and from there, we walk the
1392 * tree of ACPI devices to suspend each one using the
1393 * ACPI driver methods.
1394 */
1395 if (acpi_dev->handle == ACPI_ROOT_OBJECT)
1396 root_suspend(acpi_dev, state);
1397 return 0;
1398}
1399
1400
1401
1402static int root_resume(struct acpi_device * acpi_dev)
1403{
1404 struct acpi_device * dev, * next;
1405 int result;
1406
1407 spin_lock(&acpi_device_lock);
1408 list_for_each_entry_safe(dev, next, &acpi_device_list, g_list) {
1409 if (dev->driver && dev->driver->ops.resume) {
1410 spin_unlock(&acpi_device_lock);
1411 result = dev->driver->ops.resume(dev, 0);
1412 if (result) {
1413 printk(KERN_ERR PREFIX "[%s - %s] resume failed: %d\n",
1414 acpi_device_name(dev),
1415 acpi_device_bid(dev), result);
1416 }
1417 spin_lock(&acpi_device_lock);
1418 }
1419 }
1420 spin_unlock(&acpi_device_lock);
1421 return 0;
1422}
1423
1424
1425static int acpi_device_resume(struct device * dev)
1426{
1427 struct acpi_device * acpi_dev = to_acpi_dev(dev);
1428
1429 /*
1430 * For now, we should only register 1 generic device -
1431 * the ACPI root device - and from there, we walk the
1432 * tree of ACPI devices to resume each one using the
1433 * ACPI driver methods.
1434 */
1435 if (acpi_dev->handle == ACPI_ROOT_OBJECT)
1436 root_resume(acpi_dev);
1437 return 0;
1438}
1439
1440
1441static struct bus_type acpi_bus_type = {
1442 .name = "acpi",
1443 .suspend = acpi_device_suspend,
1444 .resume = acpi_device_resume,
1445};
1446
1447
1448
1449static int __init acpi_scan_init(void) 1382static int __init acpi_scan_init(void)
1450{ 1383{
1451 int result; 1384 int result;
@@ -1455,9 +1388,9 @@ static int __init acpi_scan_init(void)
1455 if (acpi_disabled) 1388 if (acpi_disabled)
1456 return 0; 1389 return 0;
1457 1390
1458 result = kset_register(&acpi_namespace_kset); 1391 memset(&ops, 0, sizeof(ops));
1459 if (result < 0) 1392 ops.acpi_op_add = 1;
1460 printk(KERN_ERR PREFIX "kset_register error: %d\n", result); 1393 ops.acpi_op_start = 1;
1461 1394
1462 result = bus_register(&acpi_bus_type); 1395 result = bus_register(&acpi_bus_type);
1463 if (result) { 1396 if (result) {
@@ -1469,32 +1402,16 @@ static int __init acpi_scan_init(void)
1469 * Create the root device in the bus's device tree 1402 * Create the root device in the bus's device tree
1470 */ 1403 */
1471 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT, 1404 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
1472 ACPI_BUS_TYPE_SYSTEM); 1405 ACPI_BUS_TYPE_SYSTEM, &ops);
1473 if (result) 1406 if (result)
1474 goto Done; 1407 goto Done;
1475 1408
1476 result = acpi_start_single_object(acpi_root);
1477 if (result)
1478 goto Done;
1479
1480 acpi_root->dev.bus = &acpi_bus_type;
1481 snprintf(acpi_root->dev.bus_id, BUS_ID_SIZE, "%s", acpi_bus_type.name);
1482 result = device_register(&acpi_root->dev);
1483 if (result) {
1484 /* We don't want to quit even if we failed to add suspend/resume */
1485 printk(KERN_ERR PREFIX "Could not register device\n");
1486 }
1487
1488 /* 1409 /*
1489 * Enumerate devices in the ACPI namespace. 1410 * Enumerate devices in the ACPI namespace.
1490 */ 1411 */
1491 result = acpi_bus_scan_fixed(acpi_root); 1412 result = acpi_bus_scan_fixed(acpi_root);
1492 if (!result) { 1413 if (!result)
1493 memset(&ops, 0, sizeof(ops));
1494 ops.acpi_op_add = 1;
1495 ops.acpi_op_start = 1;
1496 result = acpi_bus_scan(acpi_root, &ops); 1414 result = acpi_bus_scan(acpi_root, &ops);
1497 }
1498 1415
1499 if (result) 1416 if (result)
1500 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1417 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 34962578039d..ccc11b33d89c 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -73,7 +73,7 @@ acpi_system_write_sleep(struct file *file,
73static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) 73static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
74{ 74{
75 u32 sec, min, hr; 75 u32 sec, min, hr;
76 u32 day, mo, yr; 76 u32 day, mo, yr, cent = 0;
77 unsigned char rtc_control = 0; 77 unsigned char rtc_control = 0;
78 unsigned long flags; 78 unsigned long flags;
79 79
@@ -87,20 +87,19 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
87 rtc_control = CMOS_READ(RTC_CONTROL); 87 rtc_control = CMOS_READ(RTC_CONTROL);
88 88
89 /* If we ever get an FACP with proper values... */ 89 /* If we ever get an FACP with proper values... */
90 if (acpi_gbl_FADT->day_alrm) 90 if (acpi_gbl_FADT.day_alarm)
91 /* ACPI spec: only low 6 its should be cared */ 91 /* ACPI spec: only low 6 its should be cared */
92 day = CMOS_READ(acpi_gbl_FADT->day_alrm) & 0x3F; 92 day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
93 else 93 else
94 day = CMOS_READ(RTC_DAY_OF_MONTH); 94 day = CMOS_READ(RTC_DAY_OF_MONTH);
95 if (acpi_gbl_FADT->mon_alrm) 95 if (acpi_gbl_FADT.month_alarm)
96 mo = CMOS_READ(acpi_gbl_FADT->mon_alrm); 96 mo = CMOS_READ(acpi_gbl_FADT.month_alarm);
97 else 97 else
98 mo = CMOS_READ(RTC_MONTH); 98 mo = CMOS_READ(RTC_MONTH);
99 if (acpi_gbl_FADT->century) 99 if (acpi_gbl_FADT.century)
100 yr = CMOS_READ(acpi_gbl_FADT->century) * 100 + 100 cent = CMOS_READ(acpi_gbl_FADT.century);
101 CMOS_READ(RTC_YEAR); 101
102 else 102 yr = CMOS_READ(RTC_YEAR);
103 yr = CMOS_READ(RTC_YEAR);
104 103
105 spin_unlock_irqrestore(&rtc_lock, flags); 104 spin_unlock_irqrestore(&rtc_lock, flags);
106 105
@@ -111,10 +110,11 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
111 BCD_TO_BIN(day); 110 BCD_TO_BIN(day);
112 BCD_TO_BIN(mo); 111 BCD_TO_BIN(mo);
113 BCD_TO_BIN(yr); 112 BCD_TO_BIN(yr);
113 BCD_TO_BIN(cent);
114 } 114 }
115 115
116 /* we're trusting the FADT (see above) */ 116 /* we're trusting the FADT (see above) */
117 if (!acpi_gbl_FADT->century) 117 if (!acpi_gbl_FADT.century)
118 /* If we're not trusting the FADT, we should at least make it 118 /* If we're not trusting the FADT, we should at least make it
119 * right for _this_ century... ehm, what is _this_ century? 119 * right for _this_ century... ehm, what is _this_ century?
120 * 120 *
@@ -134,6 +134,8 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
134 * 134 *
135 */ 135 */
136 yr += 2000; 136 yr += 2000;
137 else
138 yr += cent * 100;
137 139
138 seq_printf(seq, "%4.4u-", yr); 140 seq_printf(seq, "%4.4u-", yr);
139 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo); 141 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
@@ -317,12 +319,12 @@ acpi_system_write_alarm(struct file *file,
317 * offsets into the CMOS RAM here -- which for some reason are pointing 319 * offsets into the CMOS RAM here -- which for some reason are pointing
318 * to the RTC area of memory. 320 * to the RTC area of memory.
319 */ 321 */
320 if (acpi_gbl_FADT->day_alrm) 322 if (acpi_gbl_FADT.day_alarm)
321 CMOS_WRITE(day, acpi_gbl_FADT->day_alrm); 323 CMOS_WRITE(day, acpi_gbl_FADT.day_alarm);
322 if (acpi_gbl_FADT->mon_alrm) 324 if (acpi_gbl_FADT.month_alarm)
323 CMOS_WRITE(mo, acpi_gbl_FADT->mon_alrm); 325 CMOS_WRITE(mo, acpi_gbl_FADT.month_alarm);
324 if (acpi_gbl_FADT->century) 326 if (acpi_gbl_FADT.century)
325 CMOS_WRITE(yr / 100, acpi_gbl_FADT->century); 327 CMOS_WRITE(yr / 100, acpi_gbl_FADT.century);
326 /* enable the rtc alarm interrupt */ 328 /* enable the rtc alarm interrupt */
327 rtc_control |= RTC_AIE; 329 rtc_control |= RTC_AIE;
328 CMOS_WRITE(rtc_control, RTC_CONTROL); 330 CMOS_WRITE(rtc_control, RTC_CONTROL);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index d86dcb3c2366..7147b0bdab0a 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -32,6 +32,11 @@
32 32
33#define _COMPONENT ACPI_SYSTEM_COMPONENT 33#define _COMPONENT ACPI_SYSTEM_COMPONENT
34ACPI_MODULE_NAME("acpi_system") 34ACPI_MODULE_NAME("acpi_system")
35#ifdef MODULE_PARAM_PREFIX
36#undef MODULE_PARAM_PREFIX
37#endif
38#define MODULE_PARAM_PREFIX "acpi."
39
35#define ACPI_SYSTEM_CLASS "system" 40#define ACPI_SYSTEM_CLASS "system"
36#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver" 41#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver"
37#define ACPI_SYSTEM_DEVICE_NAME "System" 42#define ACPI_SYSTEM_DEVICE_NAME "System"
@@ -39,11 +44,24 @@ ACPI_MODULE_NAME("acpi_system")
39#define ACPI_SYSTEM_FILE_EVENT "event" 44#define ACPI_SYSTEM_FILE_EVENT "event"
40#define ACPI_SYSTEM_FILE_DSDT "dsdt" 45#define ACPI_SYSTEM_FILE_DSDT "dsdt"
41#define ACPI_SYSTEM_FILE_FADT "fadt" 46#define ACPI_SYSTEM_FILE_FADT "fadt"
42extern struct fadt_descriptor acpi_fadt; 47
48/*
49 * Make ACPICA version work as module param
50 */
51static int param_get_acpica_version(char *buffer, struct kernel_param *kp) {
52 int result;
53
54 result = sprintf(buffer, "%x", ACPI_CA_VERSION);
55
56 return result;
57}
58
59module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
43 60
44/* -------------------------------------------------------------------------- 61/* --------------------------------------------------------------------------
45 FS Interface (/proc) 62 FS Interface (/proc)
46 -------------------------------------------------------------------------- */ 63 -------------------------------------------------------------------------- */
64#ifdef CONFIG_ACPI_PROCFS
47 65
48static int acpi_system_read_info(struct seq_file *seq, void *offset) 66static int acpi_system_read_info(struct seq_file *seq, void *offset)
49{ 67{
@@ -63,6 +81,7 @@ static const struct file_operations acpi_system_info_ops = {
63 .llseek = seq_lseek, 81 .llseek = seq_lseek,
64 .release = single_release, 82 .release = single_release,
65}; 83};
84#endif
66 85
67static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t, 86static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
68 loff_t *); 87 loff_t *);
@@ -76,17 +95,16 @@ acpi_system_read_dsdt(struct file *file,
76 char __user * buffer, size_t count, loff_t * ppos) 95 char __user * buffer, size_t count, loff_t * ppos)
77{ 96{
78 acpi_status status = AE_OK; 97 acpi_status status = AE_OK;
79 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL }; 98 struct acpi_table_header *dsdt = NULL;
80 ssize_t res; 99 ssize_t res;
81 100
82 101
83 status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt); 102 status = acpi_get_table(ACPI_SIG_DSDT, 1, &dsdt);
84 if (ACPI_FAILURE(status)) 103 if (ACPI_FAILURE(status))
85 return -ENODEV; 104 return -ENODEV;
86 105
87 res = simple_read_from_buffer(buffer, count, ppos, 106 res = simple_read_from_buffer(buffer, count, ppos,
88 dsdt.pointer, dsdt.length); 107 dsdt, dsdt->length);
89 kfree(dsdt.pointer);
90 108
91 return res; 109 return res;
92} 110}
@@ -103,17 +121,16 @@ acpi_system_read_fadt(struct file *file,
103 char __user * buffer, size_t count, loff_t * ppos) 121 char __user * buffer, size_t count, loff_t * ppos)
104{ 122{
105 acpi_status status = AE_OK; 123 acpi_status status = AE_OK;
106 struct acpi_buffer fadt = { ACPI_ALLOCATE_BUFFER, NULL }; 124 struct acpi_table_header *fadt = NULL;
107 ssize_t res; 125 ssize_t res;
108 126
109 127
110 status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &fadt); 128 status = acpi_get_table(ACPI_SIG_FADT, 1, &fadt);
111 if (ACPI_FAILURE(status)) 129 if (ACPI_FAILURE(status))
112 return -ENODEV; 130 return -ENODEV;
113 131
114 res = simple_read_from_buffer(buffer, count, ppos, 132 res = simple_read_from_buffer(buffer, count, ppos,
115 fadt.pointer, fadt.length); 133 fadt, fadt->length);
116 kfree(fadt.pointer);
117 134
118 return res; 135 return res;
119} 136}
@@ -128,6 +145,7 @@ static int __init acpi_system_init(void)
128 if (acpi_disabled) 145 if (acpi_disabled)
129 return 0; 146 return 0;
130 147
148#ifdef CONFIG_ACPI_PROCFS
131 /* 'info' [R] */ 149 /* 'info' [R] */
132 name = ACPI_SYSTEM_FILE_INFO; 150 name = ACPI_SYSTEM_FILE_INFO;
133 entry = create_proc_entry(name, S_IRUGO, acpi_root_dir); 151 entry = create_proc_entry(name, S_IRUGO, acpi_root_dir);
@@ -136,6 +154,7 @@ static int __init acpi_system_init(void)
136 else { 154 else {
137 entry->proc_fops = &acpi_system_info_ops; 155 entry->proc_fops = &acpi_system_info_ops;
138 } 156 }
157#endif
139 158
140 /* 'dsdt' [R] */ 159 /* 'dsdt' [R] */
141 name = ACPI_SYSTEM_FILE_DSDT; 160 name = ACPI_SYSTEM_FILE_DSDT;
@@ -159,7 +178,9 @@ static int __init acpi_system_init(void)
159 Error: 178 Error:
160 remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir); 179 remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir);
161 remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir); 180 remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir);
181#ifdef CONFIG_ACPI_PROCFS
162 remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir); 182 remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir);
183#endif
163 184
164 error = -EFAULT; 185 error = -EFAULT;
165 goto Done; 186 goto Done;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index ffa30c9fccbf..ba4cb200314a 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -38,154 +38,97 @@
38 38
39#define ACPI_MAX_TABLES 128 39#define ACPI_MAX_TABLES 128
40 40
41static char *acpi_table_signatures[ACPI_TABLE_COUNT] = {
42 [ACPI_TABLE_UNKNOWN] = "????",
43 [ACPI_APIC] = "APIC",
44 [ACPI_BOOT] = "BOOT",
45 [ACPI_DBGP] = "DBGP",
46 [ACPI_DSDT] = "DSDT",
47 [ACPI_ECDT] = "ECDT",
48 [ACPI_ETDT] = "ETDT",
49 [ACPI_FADT] = "FACP",
50 [ACPI_FACS] = "FACS",
51 [ACPI_OEMX] = "OEM",
52 [ACPI_PSDT] = "PSDT",
53 [ACPI_SBST] = "SBST",
54 [ACPI_SLIT] = "SLIT",
55 [ACPI_SPCR] = "SPCR",
56 [ACPI_SRAT] = "SRAT",
57 [ACPI_SSDT] = "SSDT",
58 [ACPI_SPMI] = "SPMI",
59 [ACPI_HPET] = "HPET",
60 [ACPI_MCFG] = "MCFG",
61};
62
63static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" }; 41static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
64static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" }; 42static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
65 43
66/* System Description Table (RSDT/XSDT) */ 44static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
67struct acpi_table_sdt {
68 unsigned long pa;
69 enum acpi_table_id id;
70 unsigned long size;
71} __attribute__ ((packed));
72
73static unsigned long sdt_pa; /* Physical Address */
74static unsigned long sdt_count; /* Table count */
75 45
76static struct acpi_table_sdt sdt_entry[ACPI_MAX_TABLES] __initdata; 46void acpi_table_print_madt_entry(struct acpi_subtable_header * header)
77
78void acpi_table_print(struct acpi_table_header *header, unsigned long phys_addr)
79{
80 char *name = NULL;
81
82 if (!header)
83 return;
84
85 /* Some table signatures aren't good table names */
86
87 if (!strncmp((char *)&header->signature,
88 acpi_table_signatures[ACPI_APIC],
89 sizeof(header->signature))) {
90 name = "MADT";
91 } else if (!strncmp((char *)&header->signature,
92 acpi_table_signatures[ACPI_FADT],
93 sizeof(header->signature))) {
94 name = "FADT";
95 } else
96 name = header->signature;
97
98 printk(KERN_DEBUG PREFIX
99 "%.4s (v%3.3d %6.6s %8.8s 0x%08x %.4s 0x%08x) @ 0x%p\n", name,
100 header->revision, header->oem_id, header->oem_table_id,
101 header->oem_revision, header->asl_compiler_id,
102 header->asl_compiler_revision, (void *)phys_addr);
103}
104
105void acpi_table_print_madt_entry(acpi_table_entry_header * header)
106{ 47{
107 if (!header) 48 if (!header)
108 return; 49 return;
109 50
110 switch (header->type) { 51 switch (header->type) {
111 52
112 case ACPI_MADT_LAPIC: 53 case ACPI_MADT_TYPE_LOCAL_APIC:
113 { 54 {
114 struct acpi_table_lapic *p = 55 struct acpi_madt_local_apic *p =
115 (struct acpi_table_lapic *)header; 56 (struct acpi_madt_local_apic *)header;
116 printk(KERN_INFO PREFIX 57 printk(KERN_INFO PREFIX
117 "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", 58 "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
118 p->acpi_id, p->id, 59 p->processor_id, p->id,
119 p->flags.enabled ? "enabled" : "disabled"); 60 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
120 } 61 }
121 break; 62 break;
122 63
123 case ACPI_MADT_IOAPIC: 64 case ACPI_MADT_TYPE_IO_APIC:
124 { 65 {
125 struct acpi_table_ioapic *p = 66 struct acpi_madt_io_apic *p =
126 (struct acpi_table_ioapic *)header; 67 (struct acpi_madt_io_apic *)header;
127 printk(KERN_INFO PREFIX 68 printk(KERN_INFO PREFIX
128 "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", 69 "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
129 p->id, p->address, p->global_irq_base); 70 p->id, p->address, p->global_irq_base);
130 } 71 }
131 break; 72 break;
132 73
133 case ACPI_MADT_INT_SRC_OVR: 74 case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE:
134 { 75 {
135 struct acpi_table_int_src_ovr *p = 76 struct acpi_madt_interrupt_override *p =
136 (struct acpi_table_int_src_ovr *)header; 77 (struct acpi_madt_interrupt_override *)header;
137 printk(KERN_INFO PREFIX 78 printk(KERN_INFO PREFIX
138 "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n", 79 "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
139 p->bus, p->bus_irq, p->global_irq, 80 p->bus, p->source_irq, p->global_irq,
140 mps_inti_flags_polarity[p->flags.polarity], 81 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
141 mps_inti_flags_trigger[p->flags.trigger]); 82 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
142 if (p->flags.reserved) 83 if (p->inti_flags &
84 ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
143 printk(KERN_INFO PREFIX 85 printk(KERN_INFO PREFIX
144 "INT_SRC_OVR unexpected reserved flags: 0x%x\n", 86 "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
145 p->flags.reserved); 87 p->inti_flags &
88 ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
146 89
147 } 90 }
148 break; 91 break;
149 92
150 case ACPI_MADT_NMI_SRC: 93 case ACPI_MADT_TYPE_NMI_SOURCE:
151 { 94 {
152 struct acpi_table_nmi_src *p = 95 struct acpi_madt_nmi_source *p =
153 (struct acpi_table_nmi_src *)header; 96 (struct acpi_madt_nmi_source *)header;
154 printk(KERN_INFO PREFIX 97 printk(KERN_INFO PREFIX
155 "NMI_SRC (%s %s global_irq %d)\n", 98 "NMI_SRC (%s %s global_irq %d)\n",
156 mps_inti_flags_polarity[p->flags.polarity], 99 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
157 mps_inti_flags_trigger[p->flags.trigger], 100 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
158 p->global_irq); 101 p->global_irq);
159 } 102 }
160 break; 103 break;
161 104
162 case ACPI_MADT_LAPIC_NMI: 105 case ACPI_MADT_TYPE_LOCAL_APIC_NMI:
163 { 106 {
164 struct acpi_table_lapic_nmi *p = 107 struct acpi_madt_local_apic_nmi *p =
165 (struct acpi_table_lapic_nmi *)header; 108 (struct acpi_madt_local_apic_nmi *)header;
166 printk(KERN_INFO PREFIX 109 printk(KERN_INFO PREFIX
167 "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n", 110 "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
168 p->acpi_id, 111 p->processor_id,
169 mps_inti_flags_polarity[p->flags.polarity], 112 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
170 mps_inti_flags_trigger[p->flags.trigger], 113 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
171 p->lint); 114 p->lint);
172 } 115 }
173 break; 116 break;
174 117
175 case ACPI_MADT_LAPIC_ADDR_OVR: 118 case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE:
176 { 119 {
177 struct acpi_table_lapic_addr_ovr *p = 120 struct acpi_madt_local_apic_override *p =
178 (struct acpi_table_lapic_addr_ovr *)header; 121 (struct acpi_madt_local_apic_override *)header;
179 printk(KERN_INFO PREFIX 122 printk(KERN_INFO PREFIX
180 "LAPIC_ADDR_OVR (address[%p])\n", 123 "LAPIC_ADDR_OVR (address[%p])\n",
181 (void *)(unsigned long)p->address); 124 (void *)(unsigned long)p->address);
182 } 125 }
183 break; 126 break;
184 127
185 case ACPI_MADT_IOSAPIC: 128 case ACPI_MADT_TYPE_IO_SAPIC:
186 { 129 {
187 struct acpi_table_iosapic *p = 130 struct acpi_madt_io_sapic *p =
188 (struct acpi_table_iosapic *)header; 131 (struct acpi_madt_io_sapic *)header;
189 printk(KERN_INFO PREFIX 132 printk(KERN_INFO PREFIX
190 "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n", 133 "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
191 p->id, (void *)(unsigned long)p->address, 134 p->id, (void *)(unsigned long)p->address,
@@ -193,26 +136,26 @@ void acpi_table_print_madt_entry(acpi_table_entry_header * header)
193 } 136 }
194 break; 137 break;
195 138
196 case ACPI_MADT_LSAPIC: 139 case ACPI_MADT_TYPE_LOCAL_SAPIC:
197 { 140 {
198 struct acpi_table_lsapic *p = 141 struct acpi_madt_local_sapic *p =
199 (struct acpi_table_lsapic *)header; 142 (struct acpi_madt_local_sapic *)header;
200 printk(KERN_INFO PREFIX 143 printk(KERN_INFO PREFIX
201 "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", 144 "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
202 p->acpi_id, p->id, p->eid, 145 p->processor_id, p->id, p->eid,
203 p->flags.enabled ? "enabled" : "disabled"); 146 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
204 } 147 }
205 break; 148 break;
206 149
207 case ACPI_MADT_PLAT_INT_SRC: 150 case ACPI_MADT_TYPE_INTERRUPT_SOURCE:
208 { 151 {
209 struct acpi_table_plat_int_src *p = 152 struct acpi_madt_interrupt_source *p =
210 (struct acpi_table_plat_int_src *)header; 153 (struct acpi_madt_interrupt_source *)header;
211 printk(KERN_INFO PREFIX 154 printk(KERN_INFO PREFIX
212 "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n", 155 "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
213 mps_inti_flags_polarity[p->flags.polarity], 156 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
214 mps_inti_flags_trigger[p->flags.trigger], 157 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
215 p->type, p->id, p->eid, p->iosapic_vector, 158 p->type, p->id, p->eid, p->io_sapic_vector,
216 p->global_irq); 159 p->global_irq);
217 } 160 }
218 break; 161 break;
@@ -225,342 +168,76 @@ void acpi_table_print_madt_entry(acpi_table_entry_header * header)
225 } 168 }
226} 169}
227 170
228static int
229acpi_table_compute_checksum(void *table_pointer, unsigned long length)
230{
231 u8 *p = table_pointer;
232 unsigned long remains = length;
233 unsigned long sum = 0;
234
235 if (!p || !length)
236 return -EINVAL;
237
238 while (remains--)
239 sum += *p++;
240
241 return (sum & 0xFF);
242}
243 171
244/*
245 * acpi_get_table_header_early()
246 * for acpi_blacklisted(), acpi_table_get_sdt()
247 */
248int __init 172int __init
249acpi_get_table_header_early(enum acpi_table_id id, 173acpi_table_parse_madt_family(char *id,
250 struct acpi_table_header **header)
251{
252 unsigned int i;
253 enum acpi_table_id temp_id;
254
255 /* DSDT is different from the rest */
256 if (id == ACPI_DSDT)
257 temp_id = ACPI_FADT;
258 else
259 temp_id = id;
260
261 /* Locate the table. */
262
263 for (i = 0; i < sdt_count; i++) {
264 if (sdt_entry[i].id != temp_id)
265 continue;
266 *header = (void *)
267 __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
268 if (!*header) {
269 printk(KERN_WARNING PREFIX "Unable to map %s\n",
270 acpi_table_signatures[temp_id]);
271 return -ENODEV;
272 }
273 break;
274 }
275
276 if (!*header) {
277 printk(KERN_WARNING PREFIX "%s not present\n",
278 acpi_table_signatures[id]);
279 return -ENODEV;
280 }
281
282 /* Map the DSDT header via the pointer in the FADT */
283 if (id == ACPI_DSDT) {
284 struct fadt_descriptor *fadt =
285 (struct fadt_descriptor *)*header;
286
287 if (fadt->revision == 3 && fadt->Xdsdt) {
288 *header = (void *)__acpi_map_table(fadt->Xdsdt,
289 sizeof(struct
290 acpi_table_header));
291 } else if (fadt->V1_dsdt) {
292 *header = (void *)__acpi_map_table(fadt->V1_dsdt,
293 sizeof(struct
294 acpi_table_header));
295 } else
296 *header = NULL;
297
298 if (!*header) {
299 printk(KERN_WARNING PREFIX "Unable to map DSDT\n");
300 return -ENODEV;
301 }
302 }
303
304 return 0;
305}
306
307int __init
308acpi_table_parse_madt_family(enum acpi_table_id id,
309 unsigned long madt_size, 174 unsigned long madt_size,
310 int entry_id, 175 int entry_id,
311 acpi_madt_entry_handler handler, 176 acpi_madt_entry_handler handler,
312 unsigned int max_entries) 177 unsigned int max_entries)
313{ 178{
314 void *madt = NULL; 179 struct acpi_table_header *madt = NULL;
315 acpi_table_entry_header *entry; 180 struct acpi_subtable_header *entry;
316 unsigned int count = 0; 181 unsigned int count = 0;
317 unsigned long madt_end; 182 unsigned long madt_end;
318 unsigned int i;
319 183
320 if (!handler) 184 if (!handler)
321 return -EINVAL; 185 return -EINVAL;
322 186
323 /* Locate the MADT (if exists). There should only be one. */ 187 /* Locate the MADT (if exists). There should only be one. */
324 188 acpi_get_table(id, 0, &madt);
325 for (i = 0; i < sdt_count; i++) {
326 if (sdt_entry[i].id != id)
327 continue;
328 madt = (void *)
329 __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
330 if (!madt) {
331 printk(KERN_WARNING PREFIX "Unable to map %s\n",
332 acpi_table_signatures[id]);
333 return -ENODEV;
334 }
335 break;
336 }
337 189
338 if (!madt) { 190 if (!madt) {
339 printk(KERN_WARNING PREFIX "%s not present\n", 191 printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
340 acpi_table_signatures[id]);
341 return -ENODEV; 192 return -ENODEV;
342 } 193 }
343 194
344 madt_end = (unsigned long)madt + sdt_entry[i].size; 195 madt_end = (unsigned long)madt + madt->length;
345 196
346 /* Parse all entries looking for a match. */ 197 /* Parse all entries looking for a match. */
347 198
348 entry = (acpi_table_entry_header *) 199 entry = (struct acpi_subtable_header *)
349 ((unsigned long)madt + madt_size); 200 ((unsigned long)madt + madt_size);
350 201
351 while (((unsigned long)entry) + sizeof(acpi_table_entry_header) < 202 while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
352 madt_end) { 203 madt_end) {
353 if (entry->type == entry_id 204 if (entry->type == entry_id
354 && (!max_entries || count++ < max_entries)) 205 && (!max_entries || count++ < max_entries))
355 if (handler(entry, madt_end)) 206 if (handler(entry, madt_end))
356 return -EINVAL; 207 return -EINVAL;
357 208
358 entry = (acpi_table_entry_header *) 209 entry = (struct acpi_subtable_header *)
359 ((unsigned long)entry + entry->length); 210 ((unsigned long)entry + entry->length);
360 } 211 }
361 if (max_entries && count > max_entries) { 212 if (max_entries && count > max_entries) {
362 printk(KERN_WARNING PREFIX "[%s:0x%02x] ignored %i entries of " 213 printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
363 "%i found\n", acpi_table_signatures[id], entry_id, 214 "%i found\n", id, entry_id, count - max_entries, count);
364 count - max_entries, count);
365 } 215 }
366 216
367 return count; 217 return count;
368} 218}
369 219
370int __init 220int __init
371acpi_table_parse_madt(enum acpi_madt_entry_id id, 221acpi_table_parse_madt(enum acpi_madt_type id,
372 acpi_madt_entry_handler handler, unsigned int max_entries) 222 acpi_madt_entry_handler handler, unsigned int max_entries)
373{ 223{
374 return acpi_table_parse_madt_family(ACPI_APIC, 224 return acpi_table_parse_madt_family(ACPI_SIG_MADT,
375 sizeof(struct acpi_table_madt), id, 225 sizeof(struct acpi_table_madt), id,
376 handler, max_entries); 226 handler, max_entries);
377} 227}
378 228
379int __init acpi_table_parse(enum acpi_table_id id, acpi_table_handler handler) 229int __init acpi_table_parse(char *id, acpi_table_handler handler)
380{ 230{
381 int count = 0; 231 struct acpi_table_header *table = NULL;
382 unsigned int i = 0;
383
384 if (!handler) 232 if (!handler)
385 return -EINVAL; 233 return -EINVAL;
386 234
387 for (i = 0; i < sdt_count; i++) { 235 acpi_get_table(id, 0, &table);
388 if (sdt_entry[i].id != id) 236 if (table) {
389 continue; 237 handler(table);
390 count++; 238 return 1;
391 if (count == 1) 239 } else
392 handler(sdt_entry[i].pa, sdt_entry[i].size); 240 return 0;
393
394 else
395 printk(KERN_WARNING PREFIX
396 "%d duplicate %s table ignored.\n", count,
397 acpi_table_signatures[id]);
398 }
399
400 return count;
401}
402
403static int __init acpi_table_get_sdt(struct acpi_table_rsdp *rsdp)
404{
405 struct acpi_table_header *header = NULL;
406 unsigned int i, id = 0;
407
408 if (!rsdp)
409 return -EINVAL;
410
411 /* First check XSDT (but only on ACPI 2.0-compatible systems) */
412
413 if ((rsdp->revision >= 2) &&
414 (((struct acpi20_table_rsdp *)rsdp)->xsdt_address)) {
415
416 struct acpi_table_xsdt *mapped_xsdt = NULL;
417
418 sdt_pa = ((struct acpi20_table_rsdp *)rsdp)->xsdt_address;
419
420 /* map in just the header */
421 header = (struct acpi_table_header *)
422 __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
423
424 if (!header) {
425 printk(KERN_WARNING PREFIX
426 "Unable to map XSDT header\n");
427 return -ENODEV;
428 }
429
430 /* remap in the entire table before processing */
431 mapped_xsdt = (struct acpi_table_xsdt *)
432 __acpi_map_table(sdt_pa, header->length);
433 if (!mapped_xsdt) {
434 printk(KERN_WARNING PREFIX "Unable to map XSDT\n");
435 return -ENODEV;
436 }
437 header = &mapped_xsdt->header;
438
439 if (strncmp(header->signature, "XSDT", 4)) {
440 printk(KERN_WARNING PREFIX
441 "XSDT signature incorrect\n");
442 return -ENODEV;
443 }
444
445 if (acpi_table_compute_checksum(header, header->length)) {
446 printk(KERN_WARNING PREFIX "Invalid XSDT checksum\n");
447 return -ENODEV;
448 }
449
450 sdt_count =
451 (header->length - sizeof(struct acpi_table_header)) >> 3;
452 if (sdt_count > ACPI_MAX_TABLES) {
453 printk(KERN_WARNING PREFIX
454 "Truncated %lu XSDT entries\n",
455 (sdt_count - ACPI_MAX_TABLES));
456 sdt_count = ACPI_MAX_TABLES;
457 }
458
459 for (i = 0; i < sdt_count; i++)
460 sdt_entry[i].pa = (unsigned long)mapped_xsdt->entry[i];
461 }
462
463 /* Then check RSDT */
464
465 else if (rsdp->rsdt_address) {
466
467 struct acpi_table_rsdt *mapped_rsdt = NULL;
468
469 sdt_pa = rsdp->rsdt_address;
470
471 /* map in just the header */
472 header = (struct acpi_table_header *)
473 __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
474 if (!header) {
475 printk(KERN_WARNING PREFIX
476 "Unable to map RSDT header\n");
477 return -ENODEV;
478 }
479
480 /* remap in the entire table before processing */
481 mapped_rsdt = (struct acpi_table_rsdt *)
482 __acpi_map_table(sdt_pa, header->length);
483 if (!mapped_rsdt) {
484 printk(KERN_WARNING PREFIX "Unable to map RSDT\n");
485 return -ENODEV;
486 }
487 header = &mapped_rsdt->header;
488
489 if (strncmp(header->signature, "RSDT", 4)) {
490 printk(KERN_WARNING PREFIX
491 "RSDT signature incorrect\n");
492 return -ENODEV;
493 }
494
495 if (acpi_table_compute_checksum(header, header->length)) {
496 printk(KERN_WARNING PREFIX "Invalid RSDT checksum\n");
497 return -ENODEV;
498 }
499
500 sdt_count =
501 (header->length - sizeof(struct acpi_table_header)) >> 2;
502 if (sdt_count > ACPI_MAX_TABLES) {
503 printk(KERN_WARNING PREFIX
504 "Truncated %lu RSDT entries\n",
505 (sdt_count - ACPI_MAX_TABLES));
506 sdt_count = ACPI_MAX_TABLES;
507 }
508
509 for (i = 0; i < sdt_count; i++)
510 sdt_entry[i].pa = (unsigned long)mapped_rsdt->entry[i];
511 }
512
513 else {
514 printk(KERN_WARNING PREFIX
515 "No System Description Table (RSDT/XSDT) specified in RSDP\n");
516 return -ENODEV;
517 }
518
519 acpi_table_print(header, sdt_pa);
520
521 for (i = 0; i < sdt_count; i++) {
522
523 /* map in just the header */
524 header = (struct acpi_table_header *)
525 __acpi_map_table(sdt_entry[i].pa,
526 sizeof(struct acpi_table_header));
527 if (!header)
528 continue;
529
530 /* remap in the entire table before processing */
531 header = (struct acpi_table_header *)
532 __acpi_map_table(sdt_entry[i].pa, header->length);
533 if (!header)
534 continue;
535
536 acpi_table_print(header, sdt_entry[i].pa);
537
538 if (acpi_table_compute_checksum(header, header->length)) {
539 printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
540 continue;
541 }
542
543 sdt_entry[i].size = header->length;
544
545 for (id = 0; id < ACPI_TABLE_COUNT; id++) {
546 if (!strncmp((char *)&header->signature,
547 acpi_table_signatures[id],
548 sizeof(header->signature))) {
549 sdt_entry[i].id = id;
550 }
551 }
552 }
553
554 /*
555 * The DSDT is *not* in the RSDT (why not? no idea.) but we want
556 * to print its info, because this is what people usually blacklist
557 * against. Unfortunately, we don't know the phys_addr, so just
558 * print 0. Maybe no one will notice.
559 */
560 if (!acpi_get_table_header_early(ACPI_DSDT, &header))
561 acpi_table_print(header, 0);
562
563 return 0;
564} 241}
565 242
566/* 243/*
@@ -568,54 +245,13 @@ static int __init acpi_table_get_sdt(struct acpi_table_rsdp *rsdp)
568 * 245 *
569 * find RSDP, find and checksum SDT/XSDT. 246 * find RSDP, find and checksum SDT/XSDT.
570 * checksum all tables, print SDT/XSDT 247 * checksum all tables, print SDT/XSDT
571 * 248 *
572 * result: sdt_entry[] is initialized 249 * result: sdt_entry[] is initialized
573 */ 250 */
574 251
252
575int __init acpi_table_init(void) 253int __init acpi_table_init(void)
576{ 254{
577 struct acpi_table_rsdp *rsdp = NULL; 255 acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
578 unsigned long rsdp_phys = 0;
579 int result = 0;
580
581 /* Locate and map the Root System Description Table (RSDP) */
582
583 rsdp_phys = acpi_find_rsdp();
584 if (!rsdp_phys) {
585 printk(KERN_ERR PREFIX "Unable to locate RSDP\n");
586 return -ENODEV;
587 }
588
589 rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys,
590 sizeof(struct acpi_table_rsdp));
591 if (!rsdp) {
592 printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
593 return -ENODEV;
594 }
595
596 printk(KERN_DEBUG PREFIX
597 "RSDP (v%3.3d %6.6s ) @ 0x%p\n",
598 rsdp->revision, rsdp->oem_id, (void *)rsdp_phys);
599
600 if (rsdp->revision < 2)
601 result =
602 acpi_table_compute_checksum(rsdp,
603 sizeof(struct acpi_table_rsdp));
604 else
605 result =
606 acpi_table_compute_checksum(rsdp,
607 ((struct acpi20_table_rsdp *)
608 rsdp)->length);
609
610 if (result) {
611 printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
612 return -ENODEV;
613 }
614
615 /* Locate and map the System Description table (RSDT/XSDT) */
616
617 if (acpi_table_get_sdt(rsdp))
618 return -ENODEV;
619
620 return 0; 256 return 0;
621} 257}
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
index aa4c69594d97..0a7d7afac255 100644
--- a/drivers/acpi/tables/Makefile
+++ b/drivers/acpi/tables/Makefile
@@ -2,7 +2,6 @@
2# Makefile for all Linux ACPI interpreter subdirectories 2# Makefile for all Linux ACPI interpreter subdirectories
3# 3#
4 4
5obj-y := tbconvrt.o tbget.o tbrsdt.o tbxface.o \ 5obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o
6 tbgetall.o tbinstal.o tbutils.o tbxfroot.o
7 6
8EXTRA_CFLAGS += $(ACPI_CFLAGS) 7EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/tables/tbconvrt.c b/drivers/acpi/tables/tbconvrt.c
deleted file mode 100644
index d697fcb35d52..000000000000
--- a/drivers/acpi/tables/tbconvrt.c
+++ /dev/null
@@ -1,622 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbconvrt - ACPI Table conversion utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbconvrt")
49
50/* Local prototypes */
51static void
52acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
53 u8 register_bit_width,
54 acpi_physical_address address);
55
56static void
57acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
58 struct fadt_descriptor_rev1 *original_fadt);
59
60static void
61acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
62 struct fadt_descriptor *original_fadt);
63
64u8 acpi_fadt_is_v1;
65ACPI_EXPORT_SYMBOL(acpi_fadt_is_v1)
66
67/*******************************************************************************
68 *
69 * FUNCTION: acpi_tb_get_table_count
70 *
71 * PARAMETERS: RSDP - Pointer to the RSDP
72 * RSDT - Pointer to the RSDT/XSDT
73 *
74 * RETURN: The number of tables pointed to by the RSDT or XSDT.
75 *
76 * DESCRIPTION: Calculate the number of tables. Automatically handles either
77 * an RSDT or XSDT.
78 *
79 ******************************************************************************/
80
81u32
82acpi_tb_get_table_count(struct rsdp_descriptor *RSDP,
83 struct acpi_table_header *RSDT)
84{
85 u32 pointer_size;
86
87 ACPI_FUNCTION_ENTRY();
88
89 /* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
90
91 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
92 pointer_size = sizeof(u32);
93 } else {
94 pointer_size = sizeof(u64);
95 }
96
97 /*
98 * Determine the number of tables pointed to by the RSDT/XSDT.
99 * This is defined by the ACPI Specification to be the number of
100 * pointers contained within the RSDT/XSDT. The size of the pointers
101 * is architecture-dependent.
102 */
103 return ((RSDT->length -
104 sizeof(struct acpi_table_header)) / pointer_size);
105}
106
107/*******************************************************************************
108 *
109 * FUNCTION: acpi_tb_convert_to_xsdt
110 *
111 * PARAMETERS: table_info - Info about the RSDT
112 *
113 * RETURN: Status
114 *
115 * DESCRIPTION: Convert an RSDT to an XSDT (internal common format)
116 *
117 ******************************************************************************/
118
119acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
120{
121 acpi_size table_size;
122 u32 i;
123 struct xsdt_descriptor *new_table;
124
125 ACPI_FUNCTION_ENTRY();
126
127 /* Compute size of the converted XSDT */
128
129 table_size = ((acpi_size) acpi_gbl_rsdt_table_count * sizeof(u64)) +
130 sizeof(struct acpi_table_header);
131
132 /* Allocate an XSDT */
133
134 new_table = ACPI_ALLOCATE_ZEROED(table_size);
135 if (!new_table) {
136 return (AE_NO_MEMORY);
137 }
138
139 /* Copy the header and set the length */
140
141 ACPI_MEMCPY(new_table, table_info->pointer,
142 sizeof(struct acpi_table_header));
143 new_table->length = (u32) table_size;
144
145 /* Copy the table pointers */
146
147 for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
148
149 /* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
150
151 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
152 ACPI_STORE_ADDRESS(new_table->table_offset_entry[i],
153 (ACPI_CAST_PTR
154 (struct rsdt_descriptor,
155 table_info->pointer))->
156 table_offset_entry[i]);
157 } else {
158 new_table->table_offset_entry[i] =
159 (ACPI_CAST_PTR(struct xsdt_descriptor,
160 table_info->pointer))->
161 table_offset_entry[i];
162 }
163 }
164
165 /* Delete the original table (either mapped or in a buffer) */
166
167 acpi_tb_delete_single_table(table_info);
168
169 /* Point the table descriptor to the new table */
170
171 table_info->pointer =
172 ACPI_CAST_PTR(struct acpi_table_header, new_table);
173 table_info->length = table_size;
174 table_info->allocation = ACPI_MEM_ALLOCATED;
175
176 return (AE_OK);
177}
178
179/*******************************************************************************
180 *
181 * FUNCTION: acpi_tb_init_generic_address
182 *
183 * PARAMETERS: new_gas_struct - GAS struct to be initialized
184 * register_bit_width - Width of this register
185 * Address - Address of the register
186 *
187 * RETURN: None
188 *
189 * DESCRIPTION: Initialize a GAS structure.
190 *
191 ******************************************************************************/
192
193static void
194acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
195 u8 register_bit_width,
196 acpi_physical_address address)
197{
198
199 ACPI_STORE_ADDRESS(new_gas_struct->address, address);
200
201 new_gas_struct->address_space_id = ACPI_ADR_SPACE_SYSTEM_IO;
202 new_gas_struct->register_bit_width = register_bit_width;
203 new_gas_struct->register_bit_offset = 0;
204 new_gas_struct->access_width = 0;
205}
206
207/*******************************************************************************
208 *
209 * FUNCTION: acpi_tb_convert_fadt1
210 *
211 * PARAMETERS: local_fadt - Pointer to new FADT
212 * original_fadt - Pointer to old FADT
213 *
214 * RETURN: None, populates local_fadt
215 *
216 * DESCRIPTION: Convert an ACPI 1.0 FADT to common internal format
217 *
218 ******************************************************************************/
219
220static void
221acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
222 struct fadt_descriptor_rev1 *original_fadt)
223{
224
225 /* ACPI 1.0 FACS */
226 /* The BIOS stored FADT should agree with Revision 1.0 */
227 acpi_fadt_is_v1 = 1;
228
229 /*
230 * Copy the table header and the common part of the tables.
231 *
232 * The 2.0 table is an extension of the 1.0 table, so the entire 1.0
233 * table can be copied first, then expand some fields to 64 bits.
234 */
235 ACPI_MEMCPY(local_fadt, original_fadt,
236 sizeof(struct fadt_descriptor_rev1));
237
238 /* Convert table pointers to 64-bit fields */
239
240 ACPI_STORE_ADDRESS(local_fadt->xfirmware_ctrl,
241 local_fadt->V1_firmware_ctrl);
242 ACPI_STORE_ADDRESS(local_fadt->Xdsdt, local_fadt->V1_dsdt);
243
244 /*
245 * System Interrupt Model isn't used in ACPI 2.0
246 * (local_fadt->Reserved1 = 0;)
247 */
248
249 /*
250 * This field is set by the OEM to convey the preferred power management
251 * profile to OSPM. It doesn't have any 1.0 equivalence. Since we don't
252 * know what kind of 32-bit system this is, we will use "unspecified".
253 */
254 local_fadt->prefer_PM_profile = PM_UNSPECIFIED;
255
256 /*
257 * Processor Performance State Control. This is the value OSPM writes to
258 * the SMI_CMD register to assume processor performance state control
259 * responsibility. There isn't any equivalence in 1.0, but as many 1.x
260 * ACPI tables contain _PCT and _PSS we also keep this value, unless
261 * acpi_strict is set.
262 */
263 if (acpi_strict)
264 local_fadt->pstate_cnt = 0;
265
266 /*
267 * Support for the _CST object and C States change notification.
268 * This data item hasn't any 1.0 equivalence so leave it zero.
269 */
270 local_fadt->cst_cnt = 0;
271
272 /*
273 * FADT Rev 2 was an interim FADT released between ACPI 1.0 and ACPI 2.0.
274 * It primarily adds the FADT reset mechanism.
275 */
276 if ((original_fadt->revision == 2) &&
277 (original_fadt->length ==
278 sizeof(struct fadt_descriptor_rev2_minus))) {
279 /*
280 * Grab the entire generic address struct, plus the 1-byte reset value
281 * that immediately follows.
282 */
283 ACPI_MEMCPY(&local_fadt->reset_register,
284 &(ACPI_CAST_PTR(struct fadt_descriptor_rev2_minus,
285 original_fadt))->reset_register,
286 sizeof(struct acpi_generic_address) + 1);
287 } else {
288 /*
289 * Since there isn't any equivalence in 1.0 and since it is highly
290 * likely that a 1.0 system has legacy support.
291 */
292 local_fadt->iapc_boot_arch = BAF_LEGACY_DEVICES;
293 }
294
295 /*
296 * Convert the V1.0 block addresses to V2.0 GAS structures
297 */
298 acpi_tb_init_generic_address(&local_fadt->xpm1a_evt_blk,
299 local_fadt->pm1_evt_len,
300 (acpi_physical_address) local_fadt->
301 V1_pm1a_evt_blk);
302 acpi_tb_init_generic_address(&local_fadt->xpm1b_evt_blk,
303 local_fadt->pm1_evt_len,
304 (acpi_physical_address) local_fadt->
305 V1_pm1b_evt_blk);
306 acpi_tb_init_generic_address(&local_fadt->xpm1a_cnt_blk,
307 local_fadt->pm1_cnt_len,
308 (acpi_physical_address) local_fadt->
309 V1_pm1a_cnt_blk);
310 acpi_tb_init_generic_address(&local_fadt->xpm1b_cnt_blk,
311 local_fadt->pm1_cnt_len,
312 (acpi_physical_address) local_fadt->
313 V1_pm1b_cnt_blk);
314 acpi_tb_init_generic_address(&local_fadt->xpm2_cnt_blk,
315 local_fadt->pm2_cnt_len,
316 (acpi_physical_address) local_fadt->
317 V1_pm2_cnt_blk);
318 acpi_tb_init_generic_address(&local_fadt->xpm_tmr_blk,
319 local_fadt->pm_tm_len,
320 (acpi_physical_address) local_fadt->
321 V1_pm_tmr_blk);
322 acpi_tb_init_generic_address(&local_fadt->xgpe0_blk, 0,
323 (acpi_physical_address) local_fadt->
324 V1_gpe0_blk);
325 acpi_tb_init_generic_address(&local_fadt->xgpe1_blk, 0,
326 (acpi_physical_address) local_fadt->
327 V1_gpe1_blk);
328
329 /* Create separate GAS structs for the PM1 Enable registers */
330
331 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
332 (u8) ACPI_DIV_2(acpi_gbl_FADT->
333 pm1_evt_len),
334 (acpi_physical_address)
335 (local_fadt->xpm1a_evt_blk.address +
336 ACPI_DIV_2(acpi_gbl_FADT->pm1_evt_len)));
337
338 /* PM1B is optional; leave null if not present */
339
340 if (local_fadt->xpm1b_evt_blk.address) {
341 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
342 (u8) ACPI_DIV_2(acpi_gbl_FADT->
343 pm1_evt_len),
344 (acpi_physical_address)
345 (local_fadt->xpm1b_evt_blk.
346 address +
347 ACPI_DIV_2(acpi_gbl_FADT->
348 pm1_evt_len)));
349 }
350}
351
352/*******************************************************************************
353 *
354 * FUNCTION: acpi_tb_convert_fadt2
355 *
356 * PARAMETERS: local_fadt - Pointer to new FADT
357 * original_fadt - Pointer to old FADT
358 *
359 * RETURN: None, populates local_fadt
360 *
361 * DESCRIPTION: Convert an ACPI 2.0 FADT to common internal format.
362 * Handles optional "X" fields.
363 *
364 ******************************************************************************/
365
366static void
367acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
368 struct fadt_descriptor *original_fadt)
369{
370
371 /* We have an ACPI 2.0 FADT but we must copy it to our local buffer */
372
373 ACPI_MEMCPY(local_fadt, original_fadt, sizeof(struct fadt_descriptor));
374
375 /*
376 * "X" fields are optional extensions to the original V1.0 fields, so
377 * we must selectively expand V1.0 fields if the corresponding X field
378 * is zero.
379 */
380 if (!(local_fadt->xfirmware_ctrl)) {
381 ACPI_STORE_ADDRESS(local_fadt->xfirmware_ctrl,
382 local_fadt->V1_firmware_ctrl);
383 }
384
385 if (!(local_fadt->Xdsdt)) {
386 ACPI_STORE_ADDRESS(local_fadt->Xdsdt, local_fadt->V1_dsdt);
387 }
388
389 if (!(local_fadt->xpm1a_evt_blk.address)) {
390 acpi_tb_init_generic_address(&local_fadt->xpm1a_evt_blk,
391 local_fadt->pm1_evt_len,
392 (acpi_physical_address)
393 local_fadt->V1_pm1a_evt_blk);
394 }
395
396 if (!(local_fadt->xpm1b_evt_blk.address)) {
397 acpi_tb_init_generic_address(&local_fadt->xpm1b_evt_blk,
398 local_fadt->pm1_evt_len,
399 (acpi_physical_address)
400 local_fadt->V1_pm1b_evt_blk);
401 }
402
403 if (!(local_fadt->xpm1a_cnt_blk.address)) {
404 acpi_tb_init_generic_address(&local_fadt->xpm1a_cnt_blk,
405 local_fadt->pm1_cnt_len,
406 (acpi_physical_address)
407 local_fadt->V1_pm1a_cnt_blk);
408 }
409
410 if (!(local_fadt->xpm1b_cnt_blk.address)) {
411 acpi_tb_init_generic_address(&local_fadt->xpm1b_cnt_blk,
412 local_fadt->pm1_cnt_len,
413 (acpi_physical_address)
414 local_fadt->V1_pm1b_cnt_blk);
415 }
416
417 if (!(local_fadt->xpm2_cnt_blk.address)) {
418 acpi_tb_init_generic_address(&local_fadt->xpm2_cnt_blk,
419 local_fadt->pm2_cnt_len,
420 (acpi_physical_address)
421 local_fadt->V1_pm2_cnt_blk);
422 }
423
424 if (!(local_fadt->xpm_tmr_blk.address)) {
425 acpi_tb_init_generic_address(&local_fadt->xpm_tmr_blk,
426 local_fadt->pm_tm_len,
427 (acpi_physical_address)
428 local_fadt->V1_pm_tmr_blk);
429 }
430
431 if (!(local_fadt->xgpe0_blk.address)) {
432 acpi_tb_init_generic_address(&local_fadt->xgpe0_blk,
433 0,
434 (acpi_physical_address)
435 local_fadt->V1_gpe0_blk);
436 }
437
438 if (!(local_fadt->xgpe1_blk.address)) {
439 acpi_tb_init_generic_address(&local_fadt->xgpe1_blk,
440 0,
441 (acpi_physical_address)
442 local_fadt->V1_gpe1_blk);
443 }
444
445 /* Create separate GAS structs for the PM1 Enable registers */
446
447 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
448 (u8) ACPI_DIV_2(acpi_gbl_FADT->
449 pm1_evt_len),
450 (acpi_physical_address)
451 (local_fadt->xpm1a_evt_blk.address +
452 ACPI_DIV_2(acpi_gbl_FADT->pm1_evt_len)));
453
454 acpi_gbl_xpm1a_enable.address_space_id =
455 local_fadt->xpm1a_evt_blk.address_space_id;
456
457 /* PM1B is optional; leave null if not present */
458
459 if (local_fadt->xpm1b_evt_blk.address) {
460 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
461 (u8) ACPI_DIV_2(acpi_gbl_FADT->
462 pm1_evt_len),
463 (acpi_physical_address)
464 (local_fadt->xpm1b_evt_blk.
465 address +
466 ACPI_DIV_2(acpi_gbl_FADT->
467 pm1_evt_len)));
468
469 acpi_gbl_xpm1b_enable.address_space_id =
470 local_fadt->xpm1b_evt_blk.address_space_id;
471 }
472}
473
474/*******************************************************************************
475 *
476 * FUNCTION: acpi_tb_convert_table_fadt
477 *
478 * PARAMETERS: None
479 *
480 * RETURN: Status
481 *
482 * DESCRIPTION: Converts a BIOS supplied ACPI 1.0 FADT to a local
483 * ACPI 2.0 FADT. If the BIOS supplied a 2.0 FADT then it is simply
484 * copied to the local FADT. The ACPI CA software uses this
485 * local FADT. Thus a significant amount of special #ifdef
486 * type codeing is saved.
487 *
488 ******************************************************************************/
489
490acpi_status acpi_tb_convert_table_fadt(void)
491{
492 struct fadt_descriptor *local_fadt;
493 struct acpi_table_desc *table_desc;
494
495 ACPI_FUNCTION_TRACE(tb_convert_table_fadt);
496
497 /*
498 * acpi_gbl_FADT is valid. Validate the FADT length. The table must be
499 * at least as long as the version 1.0 FADT
500 */
501 if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor_rev1)) {
502 ACPI_ERROR((AE_INFO, "FADT is invalid, too short: 0x%X",
503 acpi_gbl_FADT->length));
504 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
505 }
506
507 /* Allocate buffer for the ACPI 2.0(+) FADT */
508
509 local_fadt = ACPI_ALLOCATE_ZEROED(sizeof(struct fadt_descriptor));
510 if (!local_fadt) {
511 return_ACPI_STATUS(AE_NO_MEMORY);
512 }
513
514 if (acpi_gbl_FADT->revision >= FADT2_REVISION_ID) {
515 if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor)) {
516
517 /* Length is too short to be a V2.0 table */
518
519 ACPI_WARNING((AE_INFO,
520 "Inconsistent FADT length (0x%X) and revision (0x%X), using FADT V1.0 portion of table",
521 acpi_gbl_FADT->length,
522 acpi_gbl_FADT->revision));
523
524 acpi_tb_convert_fadt1(local_fadt,
525 (void *)acpi_gbl_FADT);
526 } else {
527 /* Valid V2.0 table */
528
529 acpi_tb_convert_fadt2(local_fadt, acpi_gbl_FADT);
530 }
531 } else {
532 /* Valid V1.0 table */
533
534 acpi_tb_convert_fadt1(local_fadt, (void *)acpi_gbl_FADT);
535 }
536
537 /* Global FADT pointer will point to the new common V2.0 FADT */
538
539 acpi_gbl_FADT = local_fadt;
540 acpi_gbl_FADT->length = sizeof(struct fadt_descriptor);
541
542 /* Free the original table */
543
544 table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_FADT].next;
545 acpi_tb_delete_single_table(table_desc);
546
547 /* Install the new table */
548
549 table_desc->pointer =
550 ACPI_CAST_PTR(struct acpi_table_header, acpi_gbl_FADT);
551 table_desc->allocation = ACPI_MEM_ALLOCATED;
552 table_desc->length = sizeof(struct fadt_descriptor);
553
554 /* Dump the entire FADT */
555
556 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
557 "Hex dump of common internal FADT, size %d (%X)\n",
558 acpi_gbl_FADT->length, acpi_gbl_FADT->length));
559
560 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(u8, acpi_gbl_FADT),
561 acpi_gbl_FADT->length);
562
563 return_ACPI_STATUS(AE_OK);
564}
565
566/*******************************************************************************
567 *
568 * FUNCTION: acpi_tb_build_common_facs
569 *
570 * PARAMETERS: table_info - Info for currently installed FACS
571 *
572 * RETURN: Status
573 *
574 * DESCRIPTION: Convert ACPI 1.0 and ACPI 2.0 FACS to a common internal
575 * table format.
576 *
577 ******************************************************************************/
578
579acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info)
580{
581
582 ACPI_FUNCTION_TRACE(tb_build_common_facs);
583
584 /* Absolute minimum length is 24, but the ACPI spec says 64 */
585
586 if (acpi_gbl_FACS->length < 24) {
587 ACPI_ERROR((AE_INFO, "Invalid FACS table length: 0x%X",
588 acpi_gbl_FACS->length));
589 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
590 }
591
592 if (acpi_gbl_FACS->length < 64) {
593 ACPI_WARNING((AE_INFO,
594 "FACS is shorter than the ACPI specification allows: 0x%X, using anyway",
595 acpi_gbl_FACS->length));
596 }
597
598 /* Copy fields to the new FACS */
599
600 acpi_gbl_common_fACS.global_lock = &(acpi_gbl_FACS->global_lock);
601
602 if ((acpi_gbl_RSDP->revision < 2) ||
603 (acpi_gbl_FACS->length < 32) ||
604 (!(acpi_gbl_FACS->xfirmware_waking_vector))) {
605
606 /* ACPI 1.0 FACS or short table or optional X_ field is zero */
607
608 acpi_gbl_common_fACS.firmware_waking_vector = ACPI_CAST_PTR(u64,
609 &
610 (acpi_gbl_FACS->
611 firmware_waking_vector));
612 acpi_gbl_common_fACS.vector_width = 32;
613 } else {
614 /* ACPI 2.0 FACS with valid X_ field */
615
616 acpi_gbl_common_fACS.firmware_waking_vector =
617 &acpi_gbl_FACS->xfirmware_waking_vector;
618 acpi_gbl_common_fACS.vector_width = 64;
619 }
620
621 return_ACPI_STATUS(AE_OK);
622}
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
new file mode 100644
index 000000000000..807c7116e94b
--- /dev/null
+++ b/drivers/acpi/tables/tbfadt.c
@@ -0,0 +1,434 @@
1/******************************************************************************
2 *
3 * Module Name: tbfadt - FADT table utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfadt")
49
50/* Local prototypes */
51static void inline
52acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
53 u8 bit_width, u64 address);
54
55static void acpi_tb_convert_fadt(void);
56
57static void acpi_tb_validate_fadt(void);
58
59/* Table for conversion of FADT to common internal format and FADT validation */
60
61typedef struct acpi_fadt_info {
62 char *name;
63 u8 target;
64 u8 source;
65 u8 length;
66 u8 type;
67
68} acpi_fadt_info;
69
70#define ACPI_FADT_REQUIRED 1
71#define ACPI_FADT_SEPARATE_LENGTH 2
72
73static struct acpi_fadt_info fadt_info_table[] = {
74 {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block),
75 ACPI_FADT_OFFSET(pm1a_event_block),
76 ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED},
77
78 {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block),
79 ACPI_FADT_OFFSET(pm1b_event_block),
80 ACPI_FADT_OFFSET(pm1_event_length), 0},
81
82 {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block),
83 ACPI_FADT_OFFSET(pm1a_control_block),
84 ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED},
85
86 {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block),
87 ACPI_FADT_OFFSET(pm1b_control_block),
88 ACPI_FADT_OFFSET(pm1_control_length), 0},
89
90 {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block),
91 ACPI_FADT_OFFSET(pm2_control_block),
92 ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH},
93
94 {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block),
95 ACPI_FADT_OFFSET(pm_timer_block),
96 ACPI_FADT_OFFSET(pm_timer_length), ACPI_FADT_REQUIRED},
97
98 {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block),
99 ACPI_FADT_OFFSET(gpe0_block),
100 ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH},
101
102 {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block),
103 ACPI_FADT_OFFSET(gpe1_block),
104 ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH}
105};
106
107#define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info))
108
109/*******************************************************************************
110 *
111 * FUNCTION: acpi_tb_init_generic_address
112 *
113 * PARAMETERS: generic_address - GAS struct to be initialized
114 * bit_width - Width of this register
115 * Address - Address of the register
116 *
117 * RETURN: None
118 *
119 * DESCRIPTION: Initialize a Generic Address Structure (GAS)
120 * See the ACPI specification for a full description and
121 * definition of this structure.
122 *
123 ******************************************************************************/
124
125static void inline
126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
127 u8 bit_width, u64 address)
128{
129
130 /*
131 * The 64-bit Address field is non-aligned in the byte packed
132 * GAS struct.
133 */
134 ACPI_MOVE_64_TO_64(&generic_address->address, &address);
135
136 /* All other fields are byte-wide */
137
138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
139 generic_address->bit_width = bit_width;
140 generic_address->bit_offset = 0;
141 generic_address->access_width = 0;
142}
143
144/*******************************************************************************
145 *
146 * FUNCTION: acpi_tb_parse_fadt
147 *
148 * PARAMETERS: table_index - Index for the FADT
149 * Flags - Flags
150 *
151 * RETURN: None
152 *
153 * DESCRIPTION: Initialize the FADT, DSDT and FACS tables
154 * (FADT contains the addresses of the DSDT and FACS)
155 *
156 ******************************************************************************/
157
158void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags)
159{
160 u32 length;
161 struct acpi_table_header *table;
162
163 /*
164 * The FADT has multiple versions with different lengths,
165 * and it contains pointers to both the DSDT and FACS tables.
166 *
167 * Get a local copy of the FADT and convert it to a common format
168 * Map entire FADT, assumed to be smaller than one page.
169 */
170 length = acpi_gbl_root_table_list.tables[table_index].length;
171
172 table =
173 acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
174 address, length);
175 if (!table) {
176 return;
177 }
178
179 /*
180 * Validate the FADT checksum before we copy the table. Ignore
181 * checksum error as we want to try to get the DSDT and FACS.
182 */
183 (void)acpi_tb_verify_checksum(table, length);
184
185 /* Obtain a local copy of the FADT in common ACPI 2.0+ format */
186
187 acpi_tb_create_local_fadt(table, length);
188
189 /* All done with the real FADT, unmap it */
190
191 acpi_os_unmap_memory(table, length);
192
193 /* Obtain the DSDT and FACS tables via their addresses within the FADT */
194
195 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
196 flags, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
197
198 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
199 flags, ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
200}
201
202/*******************************************************************************
203 *
204 * FUNCTION: acpi_tb_create_local_fadt
205 *
206 * PARAMETERS: Table - Pointer to BIOS FADT
207 * Length - Length of the table
208 *
209 * RETURN: None
210 *
211 * DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
212 * Performs validation on some important FADT fields.
213 *
214 ******************************************************************************/
215
216void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
217{
218
219 /*
220 * Check if the FADT is larger than what we know about (ACPI 2.0 version).
221 * Truncate the table, but make some noise.
222 */
223 if (length > sizeof(struct acpi_table_fadt)) {
224 ACPI_WARNING((AE_INFO,
225 "FADT (revision %u) is longer than ACPI 2.0 version, truncating length 0x%X to 0x%zX",
226 table->revision, (unsigned)length,
227 sizeof(struct acpi_table_fadt)));
228 }
229
230 /* Copy the entire FADT locally. Zero first for tb_convert_fadt */
231
232 ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
233
234 ACPI_MEMCPY(&acpi_gbl_FADT, table,
235 ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
236
237 /*
238 * 1) Convert the local copy of the FADT to the common internal format
239 * 2) Validate some of the important values within the FADT
240 */
241 acpi_tb_convert_fadt();
242 acpi_tb_validate_fadt();
243}
244
245/*******************************************************************************
246 *
247 * FUNCTION: acpi_tb_convert_fadt
248 *
249 * PARAMETERS: None, uses acpi_gbl_FADT
250 *
251 * RETURN: None
252 *
253 * DESCRIPTION: Converts all versions of the FADT to a common internal format.
254 * -> Expand all 32-bit addresses to 64-bit.
255 *
256 * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
257 * and must contain a copy of the actual FADT.
258 *
259 * ACPICA will use the "X" fields of the FADT for all addresses.
260 *
261 * "X" fields are optional extensions to the original V1.0 fields. Even if
262 * they are present in the structure, they can be optionally not used by
263 * setting them to zero. Therefore, we must selectively expand V1.0 fields
264 * if the corresponding X field is zero.
265 *
266 * For ACPI 1.0 FADTs, all address fields are expanded to the corresponding
267 * "X" fields.
268 *
269 * For ACPI 2.0 FADTs, any "X" fields that are NULL are filled in by
270 * expanding the corresponding ACPI 1.0 field.
271 *
272 ******************************************************************************/
273
274static void acpi_tb_convert_fadt(void)
275{
276 u8 pm1_register_length;
277 struct acpi_generic_address *target;
278 acpi_native_uint i;
279
280 /* Update the local FADT table header length */
281
282 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
283
284 /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */
285
286 if (!acpi_gbl_FADT.Xfacs) {
287 acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
288 }
289
290 if (!acpi_gbl_FADT.Xdsdt) {
291 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
292 }
293
294 /*
295 * Expand the 32-bit V1.0 addresses to the 64-bit "X" generic address
296 * structures as necessary.
297 */
298 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
299 target =
300 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
301 fadt_info_table[i].target);
302
303 /* Expand only if the X target is null */
304
305 if (!target->address) {
306 acpi_tb_init_generic_address(target,
307 *ACPI_ADD_PTR(u8,
308 &acpi_gbl_FADT,
309 fadt_info_table
310 [i].length),
311 (u64) * ACPI_ADD_PTR(u32,
312 &acpi_gbl_FADT,
313 fadt_info_table
314 [i].
315 source));
316 }
317 }
318
319 /*
320 * Calculate separate GAS structs for the PM1 Enable registers.
321 * These addresses do not appear (directly) in the FADT, so it is
322 * useful to calculate them once, here.
323 *
324 * The PM event blocks are split into two register blocks, first is the
325 * PM Status Register block, followed immediately by the PM Enable Register
326 * block. Each is of length (pm1_event_length/2)
327 */
328 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
329
330 /* The PM1A register block is required */
331
332 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
333 pm1_register_length,
334 (acpi_gbl_FADT.xpm1a_event_block.address +
335 pm1_register_length));
336 /* Don't forget to copy space_id of the GAS */
337 acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
338
339 /* The PM1B register block is optional, ignore if not present */
340
341 if (acpi_gbl_FADT.xpm1b_event_block.address) {
342 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
343 pm1_register_length,
344 (acpi_gbl_FADT.xpm1b_event_block.
345 address + pm1_register_length));
346 /* Don't forget to copy space_id of the GAS */
347 acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
348
349 }
350}
351
352/******************************************************************************
353 *
354 * FUNCTION: acpi_tb_validate_fadt
355 *
356 * PARAMETERS: Table - Pointer to the FADT to be validated
357 *
358 * RETURN: None
359 *
360 * DESCRIPTION: Validate various important fields within the FADT. If a problem
361 * is found, issue a message, but no status is returned.
362 * Used by both the table manager and the disassembler.
363 *
364 * Possible additional checks:
365 * (acpi_gbl_FADT.pm1_event_length >= 4)
366 * (acpi_gbl_FADT.pm1_control_length >= 2)
367 * (acpi_gbl_FADT.pm_timer_length >= 4)
368 * Gpe block lengths must be multiple of 2
369 *
370 ******************************************************************************/
371
372static void acpi_tb_validate_fadt(void)
373{
374 u32 *address32;
375 struct acpi_generic_address *address64;
376 u8 length;
377 acpi_native_uint i;
378
379 /* Examine all of the 64-bit extended address fields (X fields) */
380
381 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
382
383 /* Generate pointers to the 32-bit and 64-bit addresses and get the length */
384
385 address64 =
386 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
387 fadt_info_table[i].target);
388 address32 =
389 ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
390 fadt_info_table[i].source);
391 length =
392 *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
393 fadt_info_table[i].length);
394
395 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
396 /*
397 * Field is required (Pm1a_event, Pm1a_control, pm_timer).
398 * Both the address and length must be non-zero.
399 */
400 if (!address64->address || !length) {
401 ACPI_ERROR((AE_INFO,
402 "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X",
403 fadt_info_table[i].name,
404 ACPI_FORMAT_UINT64(address64->
405 address),
406 length));
407 }
408 } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
409 /*
410 * Field is optional (PM2Control, GPE0, GPE1) AND has its own
411 * length field. If present, both the address and length must be valid.
412 */
413 if ((address64->address && !length)
414 || (!address64->address && length)) {
415 ACPI_WARNING((AE_INFO,
416 "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X",
417 fadt_info_table[i].name,
418 ACPI_FORMAT_UINT64(address64->
419 address),
420 length));
421 }
422 }
423
424 /* If both 32- and 64-bit addresses are valid (non-zero), they must match */
425
426 if (address64->address && *address32 &&
427 (address64->address != (u64) * address32)) {
428 ACPI_ERROR((AE_INFO,
429 "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X",
430 fadt_info_table[i].name, *address32,
431 ACPI_FORMAT_UINT64(address64->address)));
432 }
433 }
434}
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c
new file mode 100644
index 000000000000..058c064948e1
--- /dev/null
+++ b/drivers/acpi/tables/tbfind.c
@@ -0,0 +1,126 @@
1/******************************************************************************
2 *
3 * Module Name: tbfind - find table
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfind")
49
50/*******************************************************************************
51 *
52 * FUNCTION: acpi_tb_find_table
53 *
54 * PARAMETERS: Signature - String with ACPI table signature
55 * oem_id - String with the table OEM ID
56 * oem_table_id - String with the OEM Table ID
57 * table_index - Where the table index is returned
58 *
59 * RETURN: Status and table index
60 *
61 * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the
62 * Signature, OEM ID and OEM Table ID. Returns an index that can
63 * be used to get the table header or entire table.
64 *
65 ******************************************************************************/
66acpi_status
67acpi_tb_find_table(char *signature,
68 char *oem_id,
69 char *oem_table_id, acpi_native_uint * table_index)
70{
71 acpi_native_uint i;
72 acpi_status status;
73
74 ACPI_FUNCTION_TRACE(tb_find_table);
75
76 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
77 if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
78 signature, ACPI_NAME_SIZE)) {
79
80 /* Not the requested table */
81
82 continue;
83 }
84
85 /* Table with matching signature has been found */
86
87 if (!acpi_gbl_root_table_list.tables[i].pointer) {
88
89 /* Table is not currently mapped, map it */
90
91 status =
92 acpi_tb_verify_table(&acpi_gbl_root_table_list.
93 tables[i]);
94 if (ACPI_FAILURE(status)) {
95 return_ACPI_STATUS(status);
96 }
97
98 if (!acpi_gbl_root_table_list.tables[i].pointer) {
99 continue;
100 }
101 }
102
103 /* Check for table match on all IDs */
104
105 if (!ACPI_MEMCMP
106 (acpi_gbl_root_table_list.tables[i].pointer->signature,
107 signature, ACPI_NAME_SIZE) && (!oem_id[0]
108 ||
109 !ACPI_MEMCMP
110 (acpi_gbl_root_table_list.
111 tables[i].pointer->oem_id,
112 oem_id, ACPI_OEM_ID_SIZE))
113 && (!oem_table_id[0]
114 || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
115 pointer->oem_table_id, oem_table_id,
116 ACPI_OEM_TABLE_ID_SIZE))) {
117 *table_index = i;
118
119 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
120 "Found table [%4.4s]\n", signature));
121 return_ACPI_STATUS(AE_OK);
122 }
123 }
124
125 return_ACPI_STATUS(AE_NOT_FOUND);
126}
diff --git a/drivers/acpi/tables/tbget.c b/drivers/acpi/tables/tbget.c
deleted file mode 100644
index 11e2d4454e05..000000000000
--- a/drivers/acpi/tables/tbget.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbget - ACPI Table get* routines
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbget")
49
50/* Local prototypes */
51static acpi_status
52acpi_tb_get_this_table(struct acpi_pointer *address,
53 struct acpi_table_header *header,
54 struct acpi_table_desc *table_info);
55
56static acpi_status
57acpi_tb_table_override(struct acpi_table_header *header,
58 struct acpi_table_desc *table_info);
59
60/*******************************************************************************
61 *
62 * FUNCTION: acpi_tb_get_table
63 *
64 * PARAMETERS: Address - Address of table to retrieve. Can be
65 * Logical or Physical
66 * table_info - Where table info is returned
67 *
68 * RETURN: None
69 *
70 * DESCRIPTION: Get entire table of unknown size.
71 *
72 ******************************************************************************/
73
74acpi_status
75acpi_tb_get_table(struct acpi_pointer *address,
76 struct acpi_table_desc *table_info)
77{
78 acpi_status status;
79 struct acpi_table_header header;
80
81 ACPI_FUNCTION_TRACE(tb_get_table);
82
83 /* Get the header in order to get signature and table size */
84
85 status = acpi_tb_get_table_header(address, &header);
86 if (ACPI_FAILURE(status)) {
87 return_ACPI_STATUS(status);
88 }
89
90 /* Get the entire table */
91
92 status = acpi_tb_get_table_body(address, &header, table_info);
93 if (ACPI_FAILURE(status)) {
94 ACPI_EXCEPTION((AE_INFO, status,
95 "Could not get ACPI table (size %X)",
96 header.length));
97 return_ACPI_STATUS(status);
98 }
99
100 return_ACPI_STATUS(AE_OK);
101}
102
103/*******************************************************************************
104 *
105 * FUNCTION: acpi_tb_get_table_header
106 *
107 * PARAMETERS: Address - Address of table to retrieve. Can be
108 * Logical or Physical
109 * return_header - Where the table header is returned
110 *
111 * RETURN: Status
112 *
113 * DESCRIPTION: Get an ACPI table header. Works in both physical or virtual
114 * addressing mode. Works with both physical or logical pointers.
115 * Table is either copied or mapped, depending on the pointer
116 * type and mode of the processor.
117 *
118 ******************************************************************************/
119
120acpi_status
121acpi_tb_get_table_header(struct acpi_pointer *address,
122 struct acpi_table_header *return_header)
123{
124 acpi_status status = AE_OK;
125 struct acpi_table_header *header = NULL;
126
127 ACPI_FUNCTION_TRACE(tb_get_table_header);
128
129 /*
130 * Flags contains the current processor mode (Virtual or Physical
131 * addressing) The pointer_type is either Logical or Physical
132 */
133 switch (address->pointer_type) {
134 case ACPI_PHYSMODE_PHYSPTR:
135 case ACPI_LOGMODE_LOGPTR:
136
137 /* Pointer matches processor mode, copy the header */
138
139 ACPI_MEMCPY(return_header, address->pointer.logical,
140 sizeof(struct acpi_table_header));
141 break;
142
143 case ACPI_LOGMODE_PHYSPTR:
144
145 /* Create a logical address for the physical pointer */
146
147 status = acpi_os_map_memory(address->pointer.physical,
148 sizeof(struct acpi_table_header),
149 (void *)&header);
150 if (ACPI_FAILURE(status)) {
151 ACPI_ERROR((AE_INFO,
152 "Could not map memory at %8.8X%8.8X for table header",
153 ACPI_FORMAT_UINT64(address->pointer.
154 physical)));
155 return_ACPI_STATUS(status);
156 }
157
158 /* Copy header and delete mapping */
159
160 ACPI_MEMCPY(return_header, header,
161 sizeof(struct acpi_table_header));
162 acpi_os_unmap_memory(header, sizeof(struct acpi_table_header));
163 break;
164
165 default:
166
167 ACPI_ERROR((AE_INFO, "Invalid address flags %X",
168 address->pointer_type));
169 return_ACPI_STATUS(AE_BAD_PARAMETER);
170 }
171
172 ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Table Signature: [%4.4s]\n",
173 return_header->signature));
174
175 return_ACPI_STATUS(AE_OK);
176}
177
178/*******************************************************************************
179 *
180 * FUNCTION: acpi_tb_get_table_body
181 *
182 * PARAMETERS: Address - Address of table to retrieve. Can be
183 * Logical or Physical
184 * Header - Header of the table to retrieve
185 * table_info - Where the table info is returned
186 *
187 * RETURN: Status
188 *
189 * DESCRIPTION: Get an entire ACPI table with support to allow the host OS to
190 * replace the table with a newer version (table override.)
191 * Works in both physical or virtual
192 * addressing mode. Works with both physical or logical pointers.
193 * Table is either copied or mapped, depending on the pointer
194 * type and mode of the processor.
195 *
196 ******************************************************************************/
197
198acpi_status
199acpi_tb_get_table_body(struct acpi_pointer *address,
200 struct acpi_table_header *header,
201 struct acpi_table_desc *table_info)
202{
203 acpi_status status;
204
205 ACPI_FUNCTION_TRACE(tb_get_table_body);
206
207 if (!table_info || !address) {
208 return_ACPI_STATUS(AE_BAD_PARAMETER);
209 }
210
211 /* Attempt table override. */
212
213 status = acpi_tb_table_override(header, table_info);
214 if (ACPI_SUCCESS(status)) {
215
216 /* Table was overridden by the host OS */
217
218 return_ACPI_STATUS(status);
219 }
220
221 /* No override, get the original table */
222
223 status = acpi_tb_get_this_table(address, header, table_info);
224 return_ACPI_STATUS(status);
225}
226
227/*******************************************************************************
228 *
229 * FUNCTION: acpi_tb_table_override
230 *
231 * PARAMETERS: Header - Pointer to table header
232 * table_info - Return info if table is overridden
233 *
234 * RETURN: None
235 *
236 * DESCRIPTION: Attempts override of current table with a new one if provided
237 * by the host OS.
238 *
239 ******************************************************************************/
240
241static acpi_status
242acpi_tb_table_override(struct acpi_table_header *header,
243 struct acpi_table_desc *table_info)
244{
245 struct acpi_table_header *new_table;
246 acpi_status status;
247 struct acpi_pointer address;
248
249 ACPI_FUNCTION_TRACE(tb_table_override);
250
251 /*
252 * The OSL will examine the header and decide whether to override this
253 * table. If it decides to override, a table will be returned in new_table,
254 * which we will then copy.
255 */
256 status = acpi_os_table_override(header, &new_table);
257 if (ACPI_FAILURE(status)) {
258
259 /* Some severe error from the OSL, but we basically ignore it */
260
261 ACPI_EXCEPTION((AE_INFO, status,
262 "Could not override ACPI table"));
263 return_ACPI_STATUS(status);
264 }
265
266 if (!new_table) {
267
268 /* No table override */
269
270 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
271 }
272
273 /*
274 * We have a new table to override the old one. Get a copy of
275 * the new one. We know that the new table has a logical pointer.
276 */
277 address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING;
278 address.pointer.logical = new_table;
279
280 status = acpi_tb_get_this_table(&address, new_table, table_info);
281 if (ACPI_FAILURE(status)) {
282 ACPI_EXCEPTION((AE_INFO, status, "Could not copy ACPI table"));
283 return_ACPI_STATUS(status);
284 }
285
286 /* Copy the table info */
287
288 ACPI_INFO((AE_INFO, "Table [%4.4s] replaced by host OS",
289 table_info->pointer->signature));
290
291 return_ACPI_STATUS(AE_OK);
292}
293
294/*******************************************************************************
295 *
296 * FUNCTION: acpi_tb_get_this_table
297 *
298 * PARAMETERS: Address - Address of table to retrieve. Can be
299 * Logical or Physical
300 * Header - Header of the table to retrieve
301 * table_info - Where the table info is returned
302 *
303 * RETURN: Status
304 *
305 * DESCRIPTION: Get an entire ACPI table. Works in both physical or virtual
306 * addressing mode. Works with both physical or logical pointers.
307 * Table is either copied or mapped, depending on the pointer
308 * type and mode of the processor.
309 *
310 ******************************************************************************/
311
312static acpi_status
313acpi_tb_get_this_table(struct acpi_pointer *address,
314 struct acpi_table_header *header,
315 struct acpi_table_desc *table_info)
316{
317 struct acpi_table_header *full_table = NULL;
318 u8 allocation;
319 acpi_status status = AE_OK;
320
321 ACPI_FUNCTION_TRACE(tb_get_this_table);
322
323 /* Validate minimum length */
324
325 if (header->length < sizeof(struct acpi_table_header)) {
326 ACPI_ERROR((AE_INFO,
327 "Table length (%X) is smaller than minimum (%zX)",
328 header->length, sizeof(struct acpi_table_header)));
329
330 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
331 }
332
333 /*
334 * Flags contains the current processor mode (Virtual or Physical
335 * addressing) The pointer_type is either Logical or Physical
336 */
337 switch (address->pointer_type) {
338 case ACPI_PHYSMODE_PHYSPTR:
339 case ACPI_LOGMODE_LOGPTR:
340
341 /* Pointer matches processor mode, copy the table to a new buffer */
342
343 full_table = ACPI_ALLOCATE(header->length);
344 if (!full_table) {
345 ACPI_ERROR((AE_INFO,
346 "Could not allocate table memory for [%4.4s] length %X",
347 header->signature, header->length));
348 return_ACPI_STATUS(AE_NO_MEMORY);
349 }
350
351 /* Copy the entire table (including header) to the local buffer */
352
353 ACPI_MEMCPY(full_table, address->pointer.logical,
354 header->length);
355
356 /* Save allocation type */
357
358 allocation = ACPI_MEM_ALLOCATED;
359 break;
360
361 case ACPI_LOGMODE_PHYSPTR:
362
363 /*
364 * Just map the table's physical memory
365 * into our address space.
366 */
367 status = acpi_os_map_memory(address->pointer.physical,
368 (acpi_size) header->length,
369 ACPI_CAST_PTR(void, &full_table));
370 if (ACPI_FAILURE(status)) {
371 ACPI_ERROR((AE_INFO,
372 "Could not map memory for table [%4.4s] at %8.8X%8.8X for length %X",
373 header->signature,
374 ACPI_FORMAT_UINT64(address->pointer.
375 physical),
376 header->length));
377 return (status);
378 }
379
380 /* Save allocation type */
381
382 allocation = ACPI_MEM_MAPPED;
383 break;
384
385 default:
386
387 ACPI_ERROR((AE_INFO, "Invalid address flags %X",
388 address->pointer_type));
389 return_ACPI_STATUS(AE_BAD_PARAMETER);
390 }
391
392 /*
393 * Validate checksum for _most_ tables,
394 * even the ones whose signature we don't recognize
395 */
396 if (table_info->type != ACPI_TABLE_ID_FACS) {
397 status = acpi_tb_verify_table_checksum(full_table);
398
399#if (!ACPI_CHECKSUM_ABORT)
400 if (ACPI_FAILURE(status)) {
401
402 /* Ignore the error if configuration says so */
403
404 status = AE_OK;
405 }
406#endif
407 }
408
409 /* Return values */
410
411 table_info->pointer = full_table;
412 table_info->length = (acpi_size) header->length;
413 table_info->allocation = allocation;
414
415 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
416 "Found table [%4.4s] at %8.8X%8.8X, mapped/copied to %p\n",
417 full_table->signature,
418 ACPI_FORMAT_UINT64(address->pointer.physical),
419 full_table));
420
421 return_ACPI_STATUS(status);
422}
423
424/*******************************************************************************
425 *
426 * FUNCTION: acpi_tb_get_table_ptr
427 *
428 * PARAMETERS: table_type - one of the defined table types
429 * Instance - Which table of this type
430 * return_table - pointer to location to place the pointer for
431 * return
432 *
433 * RETURN: Status
434 *
435 * DESCRIPTION: This function is called to get the pointer to an ACPI table.
436 *
437 ******************************************************************************/
438
439acpi_status
440acpi_tb_get_table_ptr(acpi_table_type table_type,
441 u32 instance, struct acpi_table_header **return_table)
442{
443 struct acpi_table_desc *table_desc;
444 u32 i;
445
446 ACPI_FUNCTION_TRACE(tb_get_table_ptr);
447
448 if (table_type > ACPI_TABLE_ID_MAX) {
449 return_ACPI_STATUS(AE_BAD_PARAMETER);
450 }
451
452 /* Check for instance out of range of the current table count */
453
454 if (instance > acpi_gbl_table_lists[table_type].count) {
455 return_ACPI_STATUS(AE_NOT_EXIST);
456 }
457
458 /*
459 * Walk the list to get the desired table
460 * Note: Instance is one-based
461 */
462 table_desc = acpi_gbl_table_lists[table_type].next;
463 for (i = 1; i < instance; i++) {
464 table_desc = table_desc->next;
465 }
466
467 /* We are now pointing to the requested table's descriptor */
468
469 *return_table = table_desc->pointer;
470 return_ACPI_STATUS(AE_OK);
471}
diff --git a/drivers/acpi/tables/tbgetall.c b/drivers/acpi/tables/tbgetall.c
deleted file mode 100644
index ad982112e4c6..000000000000
--- a/drivers/acpi/tables/tbgetall.c
+++ /dev/null
@@ -1,311 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbgetall - Get all required ACPI tables
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbgetall")
49
50/* Local prototypes */
51static acpi_status
52acpi_tb_get_primary_table(struct acpi_pointer *address,
53 struct acpi_table_desc *table_info);
54
55static acpi_status
56acpi_tb_get_secondary_table(struct acpi_pointer *address,
57 acpi_string signature,
58 struct acpi_table_desc *table_info);
59
60/*******************************************************************************
61 *
62 * FUNCTION: acpi_tb_get_primary_table
63 *
64 * PARAMETERS: Address - Physical address of table to retrieve
65 * *table_info - Where the table info is returned
66 *
67 * RETURN: Status
68 *
69 * DESCRIPTION: Maps the physical address of table into a logical address
70 *
71 ******************************************************************************/
72
73static acpi_status
74acpi_tb_get_primary_table(struct acpi_pointer *address,
75 struct acpi_table_desc *table_info)
76{
77 acpi_status status;
78 struct acpi_table_header header;
79
80 ACPI_FUNCTION_TRACE(tb_get_primary_table);
81
82 /* Ignore a NULL address in the RSDT */
83
84 if (!address->pointer.value) {
85 return_ACPI_STATUS(AE_OK);
86 }
87
88 /* Get the header in order to get signature and table size */
89
90 status = acpi_tb_get_table_header(address, &header);
91 if (ACPI_FAILURE(status)) {
92 return_ACPI_STATUS(status);
93 }
94
95 /* Clear the table_info */
96
97 ACPI_MEMSET(table_info, 0, sizeof(struct acpi_table_desc));
98
99 /*
100 * Check the table signature and make sure it is recognized.
101 * Also checks the header checksum
102 */
103 table_info->pointer = &header;
104 status = acpi_tb_recognize_table(table_info, ACPI_TABLE_PRIMARY);
105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
107 }
108
109 /* Get the entire table */
110
111 status = acpi_tb_get_table_body(address, &header, table_info);
112 if (ACPI_FAILURE(status)) {
113 return_ACPI_STATUS(status);
114 }
115
116 /* Install the table */
117
118 status = acpi_tb_install_table(table_info);
119 return_ACPI_STATUS(status);
120}
121
122/*******************************************************************************
123 *
124 * FUNCTION: acpi_tb_get_secondary_table
125 *
126 * PARAMETERS: Address - Physical address of table to retrieve
127 * *table_info - Where the table info is returned
128 *
129 * RETURN: Status
130 *
131 * DESCRIPTION: Maps the physical address of table into a logical address
132 *
133 ******************************************************************************/
134
135static acpi_status
136acpi_tb_get_secondary_table(struct acpi_pointer *address,
137 acpi_string signature,
138 struct acpi_table_desc *table_info)
139{
140 acpi_status status;
141 struct acpi_table_header header;
142
143 ACPI_FUNCTION_TRACE_STR(tb_get_secondary_table, signature);
144
145 /* Get the header in order to match the signature */
146
147 status = acpi_tb_get_table_header(address, &header);
148 if (ACPI_FAILURE(status)) {
149 return_ACPI_STATUS(status);
150 }
151
152 /* Signature must match request */
153
154 if (!ACPI_COMPARE_NAME(header.signature, signature)) {
155 ACPI_ERROR((AE_INFO,
156 "Incorrect table signature - wanted [%s] found [%4.4s]",
157 signature, header.signature));
158 return_ACPI_STATUS(AE_BAD_SIGNATURE);
159 }
160
161 /*
162 * Check the table signature and make sure it is recognized.
163 * Also checks the header checksum
164 */
165 table_info->pointer = &header;
166 status = acpi_tb_recognize_table(table_info, ACPI_TABLE_SECONDARY);
167 if (ACPI_FAILURE(status)) {
168 return_ACPI_STATUS(status);
169 }
170
171 /* Get the entire table */
172
173 status = acpi_tb_get_table_body(address, &header, table_info);
174 if (ACPI_FAILURE(status)) {
175 return_ACPI_STATUS(status);
176 }
177
178 /* Install the table */
179
180 status = acpi_tb_install_table(table_info);
181 return_ACPI_STATUS(status);
182}
183
184/*******************************************************************************
185 *
186 * FUNCTION: acpi_tb_get_required_tables
187 *
188 * PARAMETERS: None
189 *
190 * RETURN: Status
191 *
192 * DESCRIPTION: Load and validate tables other than the RSDT. The RSDT must
193 * already be loaded and validated.
194 *
195 * Get the minimum set of ACPI tables, namely:
196 *
197 * 1) FADT (via RSDT in loop below)
198 * 2) FACS (via FADT)
199 * 3) DSDT (via FADT)
200 *
201 ******************************************************************************/
202
203acpi_status acpi_tb_get_required_tables(void)
204{
205 acpi_status status = AE_OK;
206 u32 i;
207 struct acpi_table_desc table_info;
208 struct acpi_pointer address;
209
210 ACPI_FUNCTION_TRACE(tb_get_required_tables);
211
212 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%d ACPI tables in RSDT\n",
213 acpi_gbl_rsdt_table_count));
214
215 address.pointer_type = acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING;
216
217 /*
218 * Loop through all table pointers found in RSDT.
219 * This will NOT include the FACS and DSDT - we must get
220 * them after the loop.
221 *
222 * The only tables we are interested in getting here is the FADT and
223 * any SSDTs.
224 */
225 for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
226
227 /* Get the table address from the common internal XSDT */
228
229 address.pointer.value = acpi_gbl_XSDT->table_offset_entry[i];
230
231 /*
232 * Get the tables needed by this subsystem (FADT and any SSDTs).
233 * NOTE: All other tables are completely ignored at this time.
234 */
235 status = acpi_tb_get_primary_table(&address, &table_info);
236 if ((status != AE_OK) && (status != AE_TABLE_NOT_SUPPORTED)) {
237 ACPI_WARNING((AE_INFO,
238 "%s, while getting table at %8.8X%8.8X",
239 acpi_format_exception(status),
240 ACPI_FORMAT_UINT64(address.pointer.
241 value)));
242 }
243 }
244
245 /* We must have a FADT to continue */
246
247 if (!acpi_gbl_FADT) {
248 ACPI_ERROR((AE_INFO, "No FADT present in RSDT/XSDT"));
249 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
250 }
251
252 /*
253 * Convert the FADT to a common format. This allows earlier revisions of
254 * the table to coexist with newer versions, using common access code.
255 */
256 status = acpi_tb_convert_table_fadt();
257 if (ACPI_FAILURE(status)) {
258 ACPI_ERROR((AE_INFO,
259 "Could not convert FADT to internal common format"));
260 return_ACPI_STATUS(status);
261 }
262
263 /* Get the FACS (Pointed to by the FADT) */
264
265 address.pointer.value = acpi_gbl_FADT->xfirmware_ctrl;
266
267 status = acpi_tb_get_secondary_table(&address, FACS_SIG, &table_info);
268 if (ACPI_FAILURE(status)) {
269 ACPI_EXCEPTION((AE_INFO, status,
270 "Could not get/install the FACS"));
271 return_ACPI_STATUS(status);
272 }
273
274 /*
275 * Create the common FACS pointer table
276 * (Contains pointers to the original table)
277 */
278 status = acpi_tb_build_common_facs(&table_info);
279 if (ACPI_FAILURE(status)) {
280 return_ACPI_STATUS(status);
281 }
282
283 /* Get/install the DSDT (Pointed to by the FADT) */
284
285 address.pointer.value = acpi_gbl_FADT->Xdsdt;
286
287 status = acpi_tb_get_secondary_table(&address, DSDT_SIG, &table_info);
288 if (ACPI_FAILURE(status)) {
289 ACPI_ERROR((AE_INFO, "Could not get/install the DSDT"));
290 return_ACPI_STATUS(status);
291 }
292
293 /* Set Integer Width (32/64) based upon DSDT revision */
294
295 acpi_ut_set_integer_width(acpi_gbl_DSDT->revision);
296
297 /* Dump the entire DSDT */
298
299 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
300 "Hex dump of entire DSDT, size %d (0x%X), Integer width = %d\n",
301 acpi_gbl_DSDT->length, acpi_gbl_DSDT->length,
302 acpi_gbl_integer_bit_width));
303
304 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(u8, acpi_gbl_DSDT),
305 acpi_gbl_DSDT->length);
306
307 /* Always delete the RSDP mapping, we are done with it */
308
309 acpi_tb_delete_tables_by_type(ACPI_TABLE_ID_RSDP);
310 return_ACPI_STATUS(status);
311}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index 1668a232fb67..0e7b121a99ce 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,510 +42,498 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h>
45#include <acpi/actables.h> 46#include <acpi/actables.h>
46 47
47#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbinstal") 49ACPI_MODULE_NAME("tbinstal")
49 50
50/* Local prototypes */ 51/******************************************************************************
51static acpi_status
52acpi_tb_match_signature(char *signature,
53 struct acpi_table_desc *table_info, u8 search_type);
54
55/*******************************************************************************
56 * 52 *
57 * FUNCTION: acpi_tb_match_signature 53 * FUNCTION: acpi_tb_verify_table
58 * 54 *
59 * PARAMETERS: Signature - Table signature to match 55 * PARAMETERS: table_desc - table
60 * table_info - Return data
61 * search_type - Table type to match (primary/secondary)
62 * 56 *
63 * RETURN: Status 57 * RETURN: Status
64 * 58 *
65 * DESCRIPTION: Compare signature against the list of "ACPI-subsystem-owned" 59 * DESCRIPTION: this function is called to verify and map table
66 * tables (DSDT/FADT/SSDT, etc.) Returns the table_type_iD on match.
67 * 60 *
68 ******************************************************************************/ 61 *****************************************************************************/
69 62acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
70static acpi_status
71acpi_tb_match_signature(char *signature,
72 struct acpi_table_desc *table_info, u8 search_type)
73{ 63{
74 acpi_native_uint i; 64 acpi_status status = AE_OK;
75 65
76 ACPI_FUNCTION_TRACE(tb_match_signature); 66 ACPI_FUNCTION_TRACE(tb_verify_table);
77 67
78 /* Search for a signature match among the known table types */ 68 /* Map the table if necessary */
79 69
80 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) { 70 if (!table_desc->pointer) {
81 if (!(acpi_gbl_table_data[i].flags & search_type)) { 71 if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
82 continue; 72 ACPI_TABLE_ORIGIN_MAPPED) {
73 table_desc->pointer =
74 acpi_os_map_memory(table_desc->address,
75 table_desc->length);
83 } 76 }
77 if (!table_desc->pointer) {
78 return_ACPI_STATUS(AE_NO_MEMORY);
79 }
80 }
84 81
85 if (!ACPI_STRNCMP(signature, acpi_gbl_table_data[i].signature, 82 /* FACS is the odd table, has no standard ACPI header and no checksum */
86 acpi_gbl_table_data[i].sig_length)) {
87
88 /* Found a signature match, return index if requested */
89 83
90 if (table_info) { 84 if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
91 table_info->type = (u8) i;
92 }
93 85
94 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 86 /* Always calculate checksum, ignore bad checksum if requested */
95 "Table [%4.4s] is an ACPI table consumed by the core subsystem\n",
96 (char *)acpi_gbl_table_data[i].
97 signature));
98 87
99 return_ACPI_STATUS(AE_OK); 88 status =
100 } 89 acpi_tb_verify_checksum(table_desc->pointer,
90 table_desc->length);
101 } 91 }
102 92
103 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 93 return_ACPI_STATUS(status);
104 "Table [%4.4s] is not an ACPI table consumed by the core subsystem - ignored\n",
105 (char *)signature));
106
107 return_ACPI_STATUS(AE_TABLE_NOT_SUPPORTED);
108} 94}
109 95
110/******************************************************************************* 96/*******************************************************************************
111 * 97 *
112 * FUNCTION: acpi_tb_install_table 98 * FUNCTION: acpi_tb_add_table
113 * 99 *
114 * PARAMETERS: table_info - Return value from acpi_tb_get_table_body 100 * PARAMETERS: table_desc - Table descriptor
101 * table_index - Where the table index is returned
115 * 102 *
116 * RETURN: Status 103 * RETURN: Status
117 * 104 *
118 * DESCRIPTION: Install the table into the global data structures. 105 * DESCRIPTION: This function is called to add the ACPI table
119 * 106 *
120 ******************************************************************************/ 107 ******************************************************************************/
121 108
122acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info) 109acpi_status
110acpi_tb_add_table(struct acpi_table_desc *table_desc,
111 acpi_native_uint * table_index)
123{ 112{
124 acpi_status status; 113 acpi_native_uint i;
125 114 acpi_native_uint length;
126 ACPI_FUNCTION_TRACE(tb_install_table); 115 acpi_status status = AE_OK;
127 116
128 /* Lock tables while installing */ 117 ACPI_FUNCTION_TRACE(tb_add_table);
129 118
130 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 119 if (!table_desc->pointer) {
131 if (ACPI_FAILURE(status)) { 120 status = acpi_tb_verify_table(table_desc);
132 ACPI_EXCEPTION((AE_INFO, status, 121 if (ACPI_FAILURE(status) || !table_desc->pointer) {
133 "Could not acquire table mutex")); 122 return_ACPI_STATUS(status);
134 return_ACPI_STATUS(status); 123 }
135 } 124 }
136 125
137 /* 126 /* The table must be either an SSDT or a PSDT */
138 * Ignore a table that is already installed. For example, some BIOS 127
139 * ASL code will repeatedly attempt to load the same SSDT. 128 if ((!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT))
140 */ 129 &&
141 status = acpi_tb_is_table_installed(table_info); 130 (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)))
142 if (ACPI_FAILURE(status)) { 131 {
143 goto unlock_and_exit; 132 ACPI_ERROR((AE_INFO,
133 "Table has invalid signature [%4.4s], must be SSDT or PSDT",
134 table_desc->pointer->signature));
135 return_ACPI_STATUS(AE_BAD_SIGNATURE);
144 } 136 }
145 137
146 /* Install the table into the global data structure */ 138 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
139
140 /* Check if table is already registered */
141
142 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
143 if (!acpi_gbl_root_table_list.tables[i].pointer) {
144 status =
145 acpi_tb_verify_table(&acpi_gbl_root_table_list.
146 tables[i]);
147 if (ACPI_FAILURE(status)
148 || !acpi_gbl_root_table_list.tables[i].pointer) {
149 continue;
150 }
151 }
152
153 length = ACPI_MIN(table_desc->length,
154 acpi_gbl_root_table_list.tables[i].length);
155 if (ACPI_MEMCMP(table_desc->pointer,
156 acpi_gbl_root_table_list.tables[i].pointer,
157 length)) {
158 continue;
159 }
160
161 /* Table is already registered */
162
163 acpi_tb_delete_table(table_desc);
164 *table_index = i;
165 goto release;
166 }
147 167
148 status = acpi_tb_init_table_descriptor(table_info->type, table_info); 168 /*
169 * Add the table to the global table list
170 */
171 status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
172 table_desc->length, table_desc->flags,
173 table_index);
149 if (ACPI_FAILURE(status)) { 174 if (ACPI_FAILURE(status)) {
150 ACPI_EXCEPTION((AE_INFO, status, 175 goto release;
151 "Could not install table [%4.4s]",
152 table_info->pointer->signature));
153 } 176 }
154 177
155 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s located at %p\n", 178 acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
156 acpi_gbl_table_data[table_info->type].name,
157 table_info->pointer));
158 179
159 unlock_and_exit: 180 release:
160 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 181 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
161 return_ACPI_STATUS(status); 182 return_ACPI_STATUS(status);
162} 183}
163 184
164/******************************************************************************* 185/*******************************************************************************
165 * 186 *
166 * FUNCTION: acpi_tb_recognize_table 187 * FUNCTION: acpi_tb_resize_root_table_list
167 * 188 *
168 * PARAMETERS: table_info - Return value from acpi_tb_get_table_body 189 * PARAMETERS: None
169 * search_type - Table type to match (primary/secondary)
170 * 190 *
171 * RETURN: Status 191 * RETURN: Status
172 * 192 *
173 * DESCRIPTION: Check a table signature for a match against known table types 193 * DESCRIPTION: Expand the size of global table array
174 *
175 * NOTE: All table pointers are validated as follows:
176 * 1) Table pointer must point to valid physical memory
177 * 2) Signature must be 4 ASCII chars, even if we don't recognize the
178 * name
179 * 3) Table must be readable for length specified in the header
180 * 4) Table checksum must be valid (with the exception of the FACS
181 * which has no checksum for some odd reason)
182 * 194 *
183 ******************************************************************************/ 195 ******************************************************************************/
184 196
185acpi_status 197acpi_status acpi_tb_resize_root_table_list(void)
186acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type)
187{ 198{
188 struct acpi_table_header *table_header; 199 struct acpi_table_desc *tables;
189 acpi_status status;
190 200
191 ACPI_FUNCTION_TRACE(tb_recognize_table); 201 ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
192 202
193 /* Ensure that we have a valid table pointer */ 203 /* allow_resize flag is a parameter to acpi_initialize_tables */
194 204
195 table_header = (struct acpi_table_header *)table_info->pointer; 205 if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) {
196 if (!table_header) { 206 ACPI_ERROR((AE_INFO,
197 return_ACPI_STATUS(AE_BAD_PARAMETER); 207 "Resize of Root Table Array is not allowed"));
208 return_ACPI_STATUS(AE_SUPPORT);
198 } 209 }
199 210
200 /* 211 /* Increase the Table Array size */
201 * We only "recognize" a limited number of ACPI tables -- namely, the 212
202 * ones that are used by the subsystem (DSDT, FADT, etc.) 213 tables = ACPI_ALLOCATE_ZEROED((acpi_gbl_root_table_list.size +
203 * 214 ACPI_ROOT_TABLE_SIZE_INCREMENT)
204 * An AE_TABLE_NOT_SUPPORTED means that the table was not recognized. 215 * sizeof(struct acpi_table_desc));
205 * This can be any one of many valid ACPI tables, it just isn't one of 216 if (!tables) {
206 * the tables that is consumed by the core subsystem 217 ACPI_ERROR((AE_INFO,
207 */ 218 "Could not allocate new root table array"));
208 status = acpi_tb_match_signature(table_header->signature, 219 return_ACPI_STATUS(AE_NO_MEMORY);
209 table_info, search_type);
210 if (ACPI_FAILURE(status)) {
211 return_ACPI_STATUS(status);
212 } 220 }
213 221
214 status = acpi_tb_validate_table_header(table_header); 222 /* Copy and free the previous table array */
215 if (ACPI_FAILURE(status)) { 223
216 return_ACPI_STATUS(status); 224 if (acpi_gbl_root_table_list.tables) {
225 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
226 acpi_gbl_root_table_list.size *
227 sizeof(struct acpi_table_desc));
228
229 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
230 ACPI_FREE(acpi_gbl_root_table_list.tables);
231 }
217 } 232 }
218 233
219 /* Return the table type and length via the info struct */ 234 acpi_gbl_root_table_list.tables = tables;
235 acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
236 acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
220 237
221 table_info->length = (acpi_size) table_header->length; 238 return_ACPI_STATUS(AE_OK);
222 return_ACPI_STATUS(status);
223} 239}
224 240
225/******************************************************************************* 241/*******************************************************************************
226 * 242 *
227 * FUNCTION: acpi_tb_init_table_descriptor 243 * FUNCTION: acpi_tb_store_table
228 * 244 *
229 * PARAMETERS: table_type - The type of the table 245 * PARAMETERS: Address - Table address
230 * table_info - A table info struct 246 * Table - Table header
247 * Length - Table length
248 * Flags - flags
231 * 249 *
232 * RETURN: None. 250 * RETURN: Status and table index.
233 * 251 *
234 * DESCRIPTION: Install a table into the global data structs. 252 * DESCRIPTION: Add an ACPI table to the global table list
235 * 253 *
236 ******************************************************************************/ 254 ******************************************************************************/
237 255
238acpi_status 256acpi_status
239acpi_tb_init_table_descriptor(acpi_table_type table_type, 257acpi_tb_store_table(acpi_physical_address address,
240 struct acpi_table_desc *table_info) 258 struct acpi_table_header *table,
259 u32 length, u8 flags, acpi_native_uint * table_index)
241{ 260{
242 struct acpi_table_list *list_head; 261 acpi_status status = AE_OK;
243 struct acpi_table_desc *table_desc;
244 acpi_status status;
245
246 ACPI_FUNCTION_TRACE_U32(tb_init_table_descriptor, table_type);
247 262
248 /* Allocate a descriptor for this table */ 263 /* Ensure that there is room for the table in the Root Table List */
249 264
250 table_desc = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc)); 265 if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
251 if (!table_desc) { 266 status = acpi_tb_resize_root_table_list();
252 return_ACPI_STATUS(AE_NO_MEMORY); 267 if (ACPI_FAILURE(status)) {
268 return (status);
269 }
253 } 270 }
254 271
255 /* Get a new owner ID for the table */ 272 /* Initialize added table */
273
274 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
275 address = address;
276 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
277 pointer = table;
278 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
279 length;
280 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
281 owner_id = 0;
282 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
283 flags;
284
285 ACPI_MOVE_32_TO_32(&
286 (acpi_gbl_root_table_list.
287 tables[acpi_gbl_root_table_list.count].signature),
288 table->signature);
289
290 *table_index = acpi_gbl_root_table_list.count;
291 acpi_gbl_root_table_list.count++;
292 return (status);
293}
256 294
257 status = acpi_ut_allocate_owner_id(&table_desc->owner_id); 295/*******************************************************************************
258 if (ACPI_FAILURE(status)) { 296 *
259 goto error_exit1; 297 * FUNCTION: acpi_tb_delete_table
260 } 298 *
299 * PARAMETERS: table_index - Table index
300 *
301 * RETURN: None
302 *
303 * DESCRIPTION: Delete one internal ACPI table
304 *
305 ******************************************************************************/
261 306
262 /* Install the table into the global data structure */ 307void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
308{
309 /* Table must be mapped or allocated */
310 if (!table_desc->pointer) {
311 return;
312 }
313 switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
314 case ACPI_TABLE_ORIGIN_MAPPED:
315 acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
316 break;
317 case ACPI_TABLE_ORIGIN_ALLOCATED:
318 ACPI_FREE(table_desc->pointer);
319 break;
320 default:;
321 }
263 322
264 list_head = &acpi_gbl_table_lists[table_type]; 323 table_desc->pointer = NULL;
324}
265 325
266 /* 326/*******************************************************************************
267 * Two major types of tables: 1) Only one instance is allowed. This 327 *
268 * includes most ACPI tables such as the DSDT. 2) Multiple instances of 328 * FUNCTION: acpi_tb_terminate
269 * the table are allowed. This includes SSDT and PSDTs. 329 *
270 */ 330 * PARAMETERS: None
271 if (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags)) { 331 *
272 /* 332 * RETURN: None
273 * Only one table allowed, and a table has alread been installed 333 *
274 * at this location, so return an error. 334 * DESCRIPTION: Delete all internal ACPI tables
275 */ 335 *
276 if (list_head->next) { 336 ******************************************************************************/
277 status = AE_ALREADY_EXISTS;
278 goto error_exit2;
279 }
280 337
281 table_desc->next = list_head->next; 338void acpi_tb_terminate(void)
282 list_head->next = table_desc; 339{
340 acpi_native_uint i;
283 341
284 if (table_desc->next) { 342 ACPI_FUNCTION_TRACE(tb_terminate);
285 table_desc->next->prev = table_desc;
286 }
287 343
288 list_head->count++; 344 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
289 } else {
290 /*
291 * Link the new table in to the list of tables of this type.
292 * Insert at the end of the list, order IS IMPORTANT.
293 *
294 * table_desc->Prev & Next are already NULL from calloc()
295 */
296 list_head->count++;
297
298 if (!list_head->next) {
299 list_head->next = table_desc;
300 } else {
301 table_desc->next = list_head->next;
302 345
303 while (table_desc->next->next) { 346 /* Delete the individual tables */
304 table_desc->next = table_desc->next->next;
305 }
306 347
307 table_desc->next->next = table_desc; 348 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
308 table_desc->prev = table_desc->next; 349 acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
309 table_desc->next = NULL;
310 }
311 } 350 }
312 351
313 /* Finish initialization of the table descriptor */
314
315 table_desc->loaded_into_namespace = FALSE;
316 table_desc->type = (u8) table_type;
317 table_desc->pointer = table_info->pointer;
318 table_desc->length = table_info->length;
319 table_desc->allocation = table_info->allocation;
320 table_desc->aml_start = (u8 *) (table_desc->pointer + 1),
321 table_desc->aml_length = (u32)
322 (table_desc->length - (u32) sizeof(struct acpi_table_header));
323
324 /* 352 /*
325 * Set the appropriate global pointer (if there is one) to point to the 353 * Delete the root table array if allocated locally. Array cannot be
326 * newly installed table 354 * mapped, so we don't need to check for that flag.
327 */ 355 */
328 if (acpi_gbl_table_data[table_type].global_ptr) { 356 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
329 *(acpi_gbl_table_data[table_type].global_ptr) = 357 ACPI_FREE(acpi_gbl_root_table_list.tables);
330 table_info->pointer;
331 } 358 }
332 359
333 /* Return Data */ 360 acpi_gbl_root_table_list.tables = NULL;
334 361 acpi_gbl_root_table_list.flags = 0;
335 table_info->owner_id = table_desc->owner_id; 362 acpi_gbl_root_table_list.count = 0;
336 table_info->installed_desc = table_desc;
337 return_ACPI_STATUS(AE_OK);
338
339 /* Error exit with cleanup */
340
341 error_exit2:
342
343 acpi_ut_release_owner_id(&table_desc->owner_id);
344 363
345 error_exit1: 364 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
346 365 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
347 ACPI_FREE(table_desc);
348 return_ACPI_STATUS(status);
349} 366}
350 367
351/******************************************************************************* 368/*******************************************************************************
352 * 369 *
353 * FUNCTION: acpi_tb_delete_all_tables 370 * FUNCTION: acpi_tb_delete_namespace_by_owner
354 * 371 *
355 * PARAMETERS: None. 372 * PARAMETERS: table_index - Table index
356 * 373 *
357 * RETURN: None. 374 * RETURN: None
358 * 375 *
359 * DESCRIPTION: Delete all internal ACPI tables 376 * DESCRIPTION: Delete all namespace objects created when this table was loaded.
360 * 377 *
361 ******************************************************************************/ 378 ******************************************************************************/
362 379
363void acpi_tb_delete_all_tables(void) 380void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index)
364{ 381{
365 acpi_table_type type; 382 acpi_owner_id owner_id;
366 383
367 /* 384 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
368 * Free memory allocated for ACPI tables 385 if (table_index < acpi_gbl_root_table_list.count) {
369 * Memory can either be mapped or allocated 386 owner_id =
370 */ 387 acpi_gbl_root_table_list.tables[table_index].owner_id;
371 for (type = 0; type < (ACPI_TABLE_ID_MAX + 1); type++) { 388 } else {
372 acpi_tb_delete_tables_by_type(type); 389 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
390 return;
373 } 391 }
392
393 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
394 acpi_ns_delete_namespace_by_owner(owner_id);
374} 395}
375 396
376/******************************************************************************* 397/*******************************************************************************
377 * 398 *
378 * FUNCTION: acpi_tb_delete_tables_by_type 399 * FUNCTION: acpi_tb_allocate_owner_id
379 * 400 *
380 * PARAMETERS: Type - The table type to be deleted 401 * PARAMETERS: table_index - Table index
381 * 402 *
382 * RETURN: None. 403 * RETURN: Status
383 * 404 *
384 * DESCRIPTION: Delete an internal ACPI table 405 * DESCRIPTION: Allocates owner_id in table_desc
385 * Locks the ACPI table mutex
386 * 406 *
387 ******************************************************************************/ 407 ******************************************************************************/
388 408
389void acpi_tb_delete_tables_by_type(acpi_table_type type) 409acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index)
390{ 410{
391 struct acpi_table_desc *table_desc; 411 acpi_status status = AE_BAD_PARAMETER;
392 u32 count;
393 u32 i;
394 412
395 ACPI_FUNCTION_TRACE_U32(tb_delete_tables_by_type, type); 413 ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
396 414
397 if (type > ACPI_TABLE_ID_MAX) { 415 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
398 return_VOID; 416 if (table_index < acpi_gbl_root_table_list.count) {
399 } 417 status = acpi_ut_allocate_owner_id
400 418 (&(acpi_gbl_root_table_list.tables[table_index].owner_id));
401 if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_TABLES))) {
402 return;
403 }
404
405 /* Clear the appropriate "typed" global table pointer */
406
407 switch (type) {
408 case ACPI_TABLE_ID_RSDP:
409 acpi_gbl_RSDP = NULL;
410 break;
411
412 case ACPI_TABLE_ID_DSDT:
413 acpi_gbl_DSDT = NULL;
414 break;
415
416 case ACPI_TABLE_ID_FADT:
417 acpi_gbl_FADT = NULL;
418 break;
419
420 case ACPI_TABLE_ID_FACS:
421 acpi_gbl_FACS = NULL;
422 break;
423
424 case ACPI_TABLE_ID_XSDT:
425 acpi_gbl_XSDT = NULL;
426 break;
427
428 case ACPI_TABLE_ID_SSDT:
429 case ACPI_TABLE_ID_PSDT:
430 default:
431 break;
432 }
433
434 /*
435 * Free the table
436 * 1) Get the head of the list
437 */
438 table_desc = acpi_gbl_table_lists[type].next;
439 count = acpi_gbl_table_lists[type].count;
440
441 /*
442 * 2) Walk the entire list, deleting both the allocated tables
443 * and the table descriptors
444 */
445 for (i = 0; i < count; i++) {
446 table_desc = acpi_tb_uninstall_table(table_desc);
447 } 419 }
448 420
449 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 421 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
450 return_VOID; 422 return_ACPI_STATUS(status);
451} 423}
452 424
453/******************************************************************************* 425/*******************************************************************************
454 * 426 *
455 * FUNCTION: acpi_tb_delete_single_table 427 * FUNCTION: acpi_tb_release_owner_id
456 * 428 *
457 * PARAMETERS: table_info - A table info struct 429 * PARAMETERS: table_index - Table index
458 * 430 *
459 * RETURN: None. 431 * RETURN: Status
460 * 432 *
461 * DESCRIPTION: Low-level free for a single ACPI table. Handles cases where 433 * DESCRIPTION: Releases owner_id in table_desc
462 * the table was allocated a buffer or was mapped.
463 * 434 *
464 ******************************************************************************/ 435 ******************************************************************************/
465 436
466void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc) 437acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index)
467{ 438{
439 acpi_status status = AE_BAD_PARAMETER;
468 440
469 /* Must have a valid table descriptor and pointer */ 441 ACPI_FUNCTION_TRACE(tb_release_owner_id);
470 442
471 if ((!table_desc) || (!table_desc->pointer)) { 443 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
472 return; 444 if (table_index < acpi_gbl_root_table_list.count) {
445 acpi_ut_release_owner_id(&
446 (acpi_gbl_root_table_list.
447 tables[table_index].owner_id));
448 status = AE_OK;
473 } 449 }
474 450
475 /* Valid table, determine type of memory allocation */ 451 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
476 452 return_ACPI_STATUS(status);
477 switch (table_desc->allocation) {
478 case ACPI_MEM_NOT_ALLOCATED:
479 break;
480
481 case ACPI_MEM_ALLOCATED:
482
483 ACPI_FREE(table_desc->pointer);
484 break;
485
486 case ACPI_MEM_MAPPED:
487
488 acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
489 break;
490
491 default:
492 break;
493 }
494} 453}
495 454
496/******************************************************************************* 455/*******************************************************************************
497 * 456 *
498 * FUNCTION: acpi_tb_uninstall_table 457 * FUNCTION: acpi_tb_get_owner_id
499 * 458 *
500 * PARAMETERS: table_info - A table info struct 459 * PARAMETERS: table_index - Table index
460 * owner_id - Where the table owner_id is returned
501 * 461 *
502 * RETURN: Pointer to the next table in the list (of same type) 462 * RETURN: Status
503 * 463 *
504 * DESCRIPTION: Free the memory associated with an internal ACPI table that 464 * DESCRIPTION: returns owner_id for the ACPI table
505 * is either installed or has never been installed.
506 * Table mutex should be locked.
507 * 465 *
508 ******************************************************************************/ 466 ******************************************************************************/
509 467
510struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc 468acpi_status
511 *table_desc) 469acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id)
512{ 470{
513 struct acpi_table_desc *next_desc; 471 acpi_status status = AE_BAD_PARAMETER;
514 472
515 ACPI_FUNCTION_TRACE_PTR(tb_uninstall_table, table_desc); 473 ACPI_FUNCTION_TRACE(tb_get_owner_id);
516 474
517 if (!table_desc) { 475 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
518 return_PTR(NULL); 476 if (table_index < acpi_gbl_root_table_list.count) {
477 *owner_id =
478 acpi_gbl_root_table_list.tables[table_index].owner_id;
479 status = AE_OK;
519 } 480 }
520 481
521 /* Unlink the descriptor from the doubly linked list */ 482 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
483 return_ACPI_STATUS(status);
484}
522 485
523 if (table_desc->prev) { 486/*******************************************************************************
524 table_desc->prev->next = table_desc->next; 487 *
525 } else { 488 * FUNCTION: acpi_tb_is_table_loaded
526 /* Is first on list, update list head */ 489 *
490 * PARAMETERS: table_index - Table index
491 *
492 * RETURN: Table Loaded Flag
493 *
494 ******************************************************************************/
527 495
528 acpi_gbl_table_lists[table_desc->type].next = table_desc->next; 496u8 acpi_tb_is_table_loaded(acpi_native_uint table_index)
529 } 497{
498 u8 is_loaded = FALSE;
530 499
531 if (table_desc->next) { 500 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
532 table_desc->next->prev = table_desc->prev; 501 if (table_index < acpi_gbl_root_table_list.count) {
502 is_loaded = (u8)
503 (acpi_gbl_root_table_list.tables[table_index].
504 flags & ACPI_TABLE_IS_LOADED);
533 } 505 }
534 506
535 /* Free the memory allocated for the table itself */ 507 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
536 508 return (is_loaded);
537 acpi_tb_delete_single_table(table_desc); 509}
538
539 /* Free the owner ID associated with this table */
540
541 acpi_ut_release_owner_id(&table_desc->owner_id);
542 510
543 /* Free the table descriptor */ 511/*******************************************************************************
512 *
513 * FUNCTION: acpi_tb_set_table_loaded_flag
514 *
515 * PARAMETERS: table_index - Table index
516 * is_loaded - TRUE if table is loaded, FALSE otherwise
517 *
518 * RETURN: None
519 *
520 * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE.
521 *
522 ******************************************************************************/
544 523
545 next_desc = table_desc->next; 524void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded)
546 ACPI_FREE(table_desc); 525{
547 526
548 /* Return pointer to the next descriptor */ 527 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
528 if (table_index < acpi_gbl_root_table_list.count) {
529 if (is_loaded) {
530 acpi_gbl_root_table_list.tables[table_index].flags |=
531 ACPI_TABLE_IS_LOADED;
532 } else {
533 acpi_gbl_root_table_list.tables[table_index].flags &=
534 ~ACPI_TABLE_IS_LOADED;
535 }
536 }
549 537
550 return_PTR(next_desc); 538 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
551} 539}
diff --git a/drivers/acpi/tables/tbrsdt.c b/drivers/acpi/tables/tbrsdt.c
deleted file mode 100644
index 86a5fca9b739..000000000000
--- a/drivers/acpi/tables/tbrsdt.c
+++ /dev/null
@@ -1,307 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbrsdt - ACPI RSDT table utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbrsdt")
49
50/*******************************************************************************
51 *
52 * FUNCTION: acpi_tb_verify_rsdp
53 *
54 * PARAMETERS: Address - RSDP (Pointer to RSDT)
55 *
56 * RETURN: Status
57 *
58 * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table)
59 *
60 ******************************************************************************/
61acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
62{
63 struct acpi_table_desc table_info;
64 acpi_status status;
65 struct rsdp_descriptor *rsdp;
66
67 ACPI_FUNCTION_TRACE(tb_verify_rsdp);
68
69 switch (address->pointer_type) {
70 case ACPI_LOGICAL_POINTER:
71
72 rsdp = address->pointer.logical;
73 break;
74
75 case ACPI_PHYSICAL_POINTER:
76 /*
77 * Obtain access to the RSDP structure
78 */
79 status = acpi_os_map_memory(address->pointer.physical,
80 sizeof(struct rsdp_descriptor),
81 ACPI_CAST_PTR(void, &rsdp));
82 if (ACPI_FAILURE(status)) {
83 return_ACPI_STATUS(status);
84 }
85 break;
86
87 default:
88 return_ACPI_STATUS(AE_BAD_PARAMETER);
89 }
90
91 /* Verify RSDP signature and checksum */
92
93 status = acpi_tb_validate_rsdp(rsdp);
94 if (ACPI_FAILURE(status)) {
95 goto cleanup;
96 }
97
98 /* RSDP is ok. Init the table info */
99
100 table_info.pointer = ACPI_CAST_PTR(struct acpi_table_header, rsdp);
101 table_info.length = sizeof(struct rsdp_descriptor);
102
103 if (address->pointer_type == ACPI_PHYSICAL_POINTER) {
104 table_info.allocation = ACPI_MEM_MAPPED;
105 } else {
106 table_info.allocation = ACPI_MEM_NOT_ALLOCATED;
107 }
108
109 /* Save the table pointers and allocation info */
110
111 status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_RSDP, &table_info);
112 if (ACPI_FAILURE(status)) {
113 goto cleanup;
114 }
115
116 /* Save the RSDP in a global for easy access */
117
118 acpi_gbl_RSDP =
119 ACPI_CAST_PTR(struct rsdp_descriptor, table_info.pointer);
120 return_ACPI_STATUS(status);
121
122 /* Error exit */
123 cleanup:
124
125 if (acpi_gbl_table_flags & ACPI_PHYSICAL_POINTER) {
126 acpi_os_unmap_memory(rsdp, sizeof(struct rsdp_descriptor));
127 }
128 return_ACPI_STATUS(status);
129}
130
131/*******************************************************************************
132 *
133 * FUNCTION: acpi_tb_get_rsdt_address
134 *
135 * PARAMETERS: out_address - Where the address is returned
136 *
137 * RETURN: None, Address
138 *
139 * DESCRIPTION: Extract the address of either the RSDT or XSDT, depending on the
140 * version of the RSDP and whether the XSDT pointer is valid
141 *
142 ******************************************************************************/
143
144void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address)
145{
146
147 ACPI_FUNCTION_ENTRY();
148
149 out_address->pointer_type =
150 acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING;
151
152 /* Use XSDT if it is present */
153
154 if ((acpi_gbl_RSDP->revision >= 2) &&
155 acpi_gbl_RSDP->xsdt_physical_address) {
156 out_address->pointer.value =
157 acpi_gbl_RSDP->xsdt_physical_address;
158 acpi_gbl_root_table_type = ACPI_TABLE_TYPE_XSDT;
159 } else {
160 /* No XSDT, use the RSDT */
161
162 out_address->pointer.value =
163 acpi_gbl_RSDP->rsdt_physical_address;
164 acpi_gbl_root_table_type = ACPI_TABLE_TYPE_RSDT;
165 }
166}
167
168/*******************************************************************************
169 *
170 * FUNCTION: acpi_tb_validate_rsdt
171 *
172 * PARAMETERS: table_ptr - Addressable pointer to the RSDT.
173 *
174 * RETURN: Status
175 *
176 * DESCRIPTION: Validate signature for the RSDT or XSDT
177 *
178 ******************************************************************************/
179
180acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
181{
182 char *signature;
183
184 ACPI_FUNCTION_ENTRY();
185
186 /* Validate minimum length */
187
188 if (table_ptr->length < sizeof(struct acpi_table_header)) {
189 ACPI_ERROR((AE_INFO,
190 "RSDT/XSDT length (%X) is smaller than minimum (%zX)",
191 table_ptr->length,
192 sizeof(struct acpi_table_header)));
193
194 return (AE_INVALID_TABLE_LENGTH);
195 }
196
197 /* Search for appropriate signature, RSDT or XSDT */
198
199 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
200 signature = RSDT_SIG;
201 } else {
202 signature = XSDT_SIG;
203 }
204
205 if (!ACPI_COMPARE_NAME(table_ptr->signature, signature)) {
206
207 /* Invalid RSDT or XSDT signature */
208
209 ACPI_ERROR((AE_INFO,
210 "Invalid signature where RSDP indicates RSDT/XSDT should be located. RSDP:"));
211
212 ACPI_DUMP_BUFFER(acpi_gbl_RSDP, 20);
213
214 ACPI_ERROR((AE_INFO,
215 "RSDT/XSDT signature at %X is invalid",
216 acpi_gbl_RSDP->rsdt_physical_address));
217
218 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
219 ACPI_ERROR((AE_INFO, "Looking for RSDT"));
220 } else {
221 ACPI_ERROR((AE_INFO, "Looking for XSDT"));
222 }
223
224 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(char, table_ptr), 48);
225 return (AE_BAD_SIGNATURE);
226 }
227
228 return (AE_OK);
229}
230
231/*******************************************************************************
232 *
233 * FUNCTION: acpi_tb_get_table_rsdt
234 *
235 * PARAMETERS: None
236 *
237 * RETURN: Status
238 *
239 * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table)
240 *
241 ******************************************************************************/
242
243acpi_status acpi_tb_get_table_rsdt(void)
244{
245 struct acpi_table_desc table_info;
246 acpi_status status;
247 struct acpi_pointer address;
248
249 ACPI_FUNCTION_TRACE(tb_get_table_rsdt);
250
251 /* Get the RSDT/XSDT via the RSDP */
252
253 acpi_tb_get_rsdt_address(&address);
254
255 table_info.type = ACPI_TABLE_ID_XSDT;
256 status = acpi_tb_get_table(&address, &table_info);
257 if (ACPI_FAILURE(status)) {
258 ACPI_EXCEPTION((AE_INFO, status,
259 "Could not get the RSDT/XSDT"));
260 return_ACPI_STATUS(status);
261 }
262
263 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
264 "RSDP located at %p, points to RSDT physical=%8.8X%8.8X\n",
265 acpi_gbl_RSDP,
266 ACPI_FORMAT_UINT64(address.pointer.value)));
267
268 /* Check the RSDT or XSDT signature */
269
270 status = acpi_tb_validate_rsdt(table_info.pointer);
271 if (ACPI_FAILURE(status)) {
272 goto error_cleanup;
273 }
274
275 /* Get the number of tables defined in the RSDT or XSDT */
276
277 acpi_gbl_rsdt_table_count = acpi_tb_get_table_count(acpi_gbl_RSDP,
278 table_info.pointer);
279
280 /* Convert and/or copy to an XSDT structure */
281
282 status = acpi_tb_convert_to_xsdt(&table_info);
283 if (ACPI_FAILURE(status)) {
284 goto error_cleanup;
285 }
286
287 /* Save the table pointers and allocation info */
288
289 status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_XSDT, &table_info);
290 if (ACPI_FAILURE(status)) {
291 goto error_cleanup;
292 }
293
294 acpi_gbl_XSDT =
295 ACPI_CAST_PTR(struct xsdt_descriptor, table_info.pointer);
296
297 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "XSDT located at %p\n", acpi_gbl_XSDT));
298 return_ACPI_STATUS(status);
299
300 error_cleanup:
301
302 /* Free table allocated by acpi_tb_get_table */
303
304 acpi_tb_delete_single_table(&table_info);
305
306 return_ACPI_STATUS(status);
307}
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index 209a401801e3..1da64b4518c0 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -1,11 +1,11 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: tbutils - Table manipulation utilities 3 * Module Name: tbutils - table utilities
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,137 +48,119 @@
48ACPI_MODULE_NAME("tbutils") 48ACPI_MODULE_NAME("tbutils")
49 49
50/* Local prototypes */ 50/* Local prototypes */
51#ifdef ACPI_OBSOLETE_FUNCTIONS 51static acpi_physical_address
52acpi_status 52acpi_tb_get_root_table_entry(u8 * table_entry,
53acpi_tb_handle_to_object(u16 table_id, struct acpi_table_desc **table_desc); 53 acpi_native_uint table_entry_size);
54#endif
55 54
56/******************************************************************************* 55/*******************************************************************************
57 * 56 *
58 * FUNCTION: acpi_tb_is_table_installed 57 * FUNCTION: acpi_tb_tables_loaded
59 *
60 * PARAMETERS: new_table_desc - Descriptor for new table being installed
61 * 58 *
62 * RETURN: Status - AE_ALREADY_EXISTS if the table is already installed 59 * PARAMETERS: None
63 * 60 *
64 * DESCRIPTION: Determine if an ACPI table is already installed 61 * RETURN: TRUE if required ACPI tables are loaded
65 * 62 *
66 * MUTEX: Table data structures should be locked 63 * DESCRIPTION: Determine if the minimum required ACPI tables are present
64 * (FADT, FACS, DSDT)
67 * 65 *
68 ******************************************************************************/ 66 ******************************************************************************/
69 67
70acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc) 68u8 acpi_tb_tables_loaded(void)
71{ 69{
72 struct acpi_table_desc *table_desc;
73
74 ACPI_FUNCTION_TRACE(tb_is_table_installed);
75 70
76 /* Get the list descriptor and first table descriptor */ 71 if (acpi_gbl_root_table_list.count >= 3) {
77 72 return (TRUE);
78 table_desc = acpi_gbl_table_lists[new_table_desc->type].next; 73 }
79 74
80 /* Examine all installed tables of this type */ 75 return (FALSE);
76}
81 77
82 while (table_desc) { 78/*******************************************************************************
83 /* 79 *
84 * If the table lengths match, perform a full bytewise compare. This 80 * FUNCTION: acpi_tb_print_table_header
85 * means that we will allow tables with duplicate oem_table_id(s), as 81 *
86 * long as the tables are different in some way. 82 * PARAMETERS: Address - Table physical address
87 * 83 * Header - Table header
88 * Checking if the table has been loaded into the namespace means that 84 *
89 * we don't check for duplicate tables during the initial installation 85 * RETURN: None
90 * of tables within the RSDT/XSDT. 86 *
91 */ 87 * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP.
92 if ((table_desc->loaded_into_namespace) && 88 *
93 (table_desc->pointer->length == 89 ******************************************************************************/
94 new_table_desc->pointer->length)
95 &&
96 (!ACPI_MEMCMP
97 (table_desc->pointer, new_table_desc->pointer,
98 new_table_desc->pointer->length))) {
99
100 /* Match: this table is already installed */
101
102 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
103 "Table [%4.4s] already installed: Rev %X OemTableId [%8.8s]\n",
104 new_table_desc->pointer->signature,
105 new_table_desc->pointer->revision,
106 new_table_desc->pointer->
107 oem_table_id));
108
109 new_table_desc->owner_id = table_desc->owner_id;
110 new_table_desc->installed_desc = table_desc;
111
112 return_ACPI_STATUS(AE_ALREADY_EXISTS);
113 }
114 90
115 /* Get next table on the list */ 91void
92acpi_tb_print_table_header(acpi_physical_address address,
93 struct acpi_table_header *header)
94{
116 95
117 table_desc = table_desc->next; 96 if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
97
98 /* FACS only has signature and length fields of common table header */
99
100 ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X",
101 header->signature, (unsigned long)address,
102 header->length));
103 } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
104
105 /* RSDP has no common fields */
106
107 ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)",
108 (unsigned long)address,
109 (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
110 revision >
111 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
112 header)->length : 20,
113 ACPI_CAST_PTR(struct acpi_table_rsdp,
114 header)->revision,
115 ACPI_CAST_PTR(struct acpi_table_rsdp,
116 header)->oem_id));
117 } else {
118 /* Standard ACPI table with full common header */
119
120 ACPI_INFO((AE_INFO,
121 "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)",
122 header->signature, (unsigned long)address,
123 header->length, header->revision, header->oem_id,
124 header->oem_table_id, header->oem_revision,
125 header->asl_compiler_id,
126 header->asl_compiler_revision));
118 } 127 }
119
120 return_ACPI_STATUS(AE_OK);
121} 128}
122 129
123/******************************************************************************* 130/*******************************************************************************
124 * 131 *
125 * FUNCTION: acpi_tb_validate_table_header 132 * FUNCTION: acpi_tb_validate_checksum
126 * 133 *
127 * PARAMETERS: table_header - Logical pointer to the table 134 * PARAMETERS: Table - ACPI table to verify
135 * Length - Length of entire table
128 * 136 *
129 * RETURN: Status 137 * RETURN: Status
130 * 138 *
131 * DESCRIPTION: Check an ACPI table header for validity 139 * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns
132 * 140 * exception on bad checksum.
133 * NOTE: Table pointers are validated as follows:
134 * 1) Table pointer must point to valid physical memory
135 * 2) Signature must be 4 ASCII chars, even if we don't recognize the
136 * name
137 * 3) Table must be readable for length specified in the header
138 * 4) Table checksum must be valid (with the exception of the FACS
139 * which has no checksum because it contains variable fields)
140 * 141 *
141 ******************************************************************************/ 142 ******************************************************************************/
142 143
143acpi_status 144acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
144acpi_tb_validate_table_header(struct acpi_table_header *table_header)
145{ 145{
146 acpi_name signature; 146 u8 checksum;
147
148 ACPI_FUNCTION_ENTRY();
149
150 /* Verify that this is a valid address */
151
152 if (!acpi_os_readable(table_header, sizeof(struct acpi_table_header))) {
153 ACPI_ERROR((AE_INFO,
154 "Cannot read table header at %p", table_header));
155
156 return (AE_BAD_ADDRESS);
157 }
158 147
159 /* Ensure that the signature is 4 ASCII characters */ 148 /* Compute the checksum on the table */
160 149
161 ACPI_MOVE_32_TO_32(&signature, table_header->signature); 150 checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
162 if (!acpi_ut_valid_acpi_name(signature)) {
163 ACPI_ERROR((AE_INFO, "Invalid table signature 0x%8.8X",
164 signature));
165 151
166 ACPI_DUMP_BUFFER(table_header, 152 /* Checksum ok? (should be zero) */
167 sizeof(struct acpi_table_header));
168 return (AE_BAD_SIGNATURE);
169 }
170 153
171 /* Validate the table length */ 154 if (checksum) {
155 ACPI_WARNING((AE_INFO,
156 "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
157 table->signature, table->checksum,
158 (u8) (table->checksum - checksum)));
172 159
173 if (table_header->length < sizeof(struct acpi_table_header)) { 160#if (ACPI_CHECKSUM_ABORT)
174 ACPI_ERROR((AE_INFO,
175 "Invalid length 0x%X in table with signature %4.4s",
176 (u32) table_header->length,
177 ACPI_CAST_PTR(char, &signature)));
178 161
179 ACPI_DUMP_BUFFER(table_header, 162 return (AE_BAD_CHECKSUM);
180 sizeof(struct acpi_table_header)); 163#endif
181 return (AE_BAD_HEADER);
182 } 164 }
183 165
184 return (AE_OK); 166 return (AE_OK);
@@ -186,157 +168,320 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
186 168
187/******************************************************************************* 169/*******************************************************************************
188 * 170 *
189 * FUNCTION: acpi_tb_sum_table 171 * FUNCTION: acpi_tb_checksum
190 * 172 *
191 * PARAMETERS: Buffer - Buffer to sum 173 * PARAMETERS: Buffer - Pointer to memory region to be checked
192 * Length - Size of the buffer 174 * Length - Length of this memory region
193 * 175 *
194 * RETURN: 8 bit sum of buffer 176 * RETURN: Checksum (u8)
195 * 177 *
196 * DESCRIPTION: Computes an 8 bit sum of the buffer(length) and returns it. 178 * DESCRIPTION: Calculates circular checksum of memory region.
197 * 179 *
198 ******************************************************************************/ 180 ******************************************************************************/
199 181
200u8 acpi_tb_sum_table(void *buffer, u32 length) 182u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length)
201{ 183{
202 acpi_native_uint i;
203 u8 sum = 0; 184 u8 sum = 0;
185 u8 *end = buffer + length;
204 186
205 if (!buffer || !length) { 187 while (buffer < end) {
206 return (0); 188 sum = (u8) (sum + *(buffer++));
207 } 189 }
208 190
209 for (i = 0; i < length; i++) { 191 return sum;
210 sum = (u8) (sum + ((u8 *) buffer)[i]);
211 }
212 return (sum);
213} 192}
214 193
215/******************************************************************************* 194/*******************************************************************************
216 * 195 *
217 * FUNCTION: acpi_tb_generate_checksum 196 * FUNCTION: acpi_tb_install_table
218 * 197 *
219 * PARAMETERS: Table - Pointer to a valid ACPI table (with a 198 * PARAMETERS: Address - Physical address of DSDT or FACS
220 * standard ACPI header) 199 * Flags - Flags
200 * Signature - Table signature, NULL if no need to
201 * match
202 * table_index - Index into root table array
221 * 203 *
222 * RETURN: 8 bit checksum of buffer 204 * RETURN: None
223 * 205 *
224 * DESCRIPTION: Computes an 8 bit checksum of the table. 206 * DESCRIPTION: Install an ACPI table into the global data structure.
225 * 207 *
226 ******************************************************************************/ 208 ******************************************************************************/
227 209
228u8 acpi_tb_generate_checksum(struct acpi_table_header * table) 210void
211acpi_tb_install_table(acpi_physical_address address,
212 u8 flags, char *signature, acpi_native_uint table_index)
229{ 213{
230 u8 checksum; 214 struct acpi_table_header *table;
215
216 if (!address) {
217 ACPI_ERROR((AE_INFO,
218 "Null physical address for ACPI table [%s]",
219 signature));
220 return;
221 }
222
223 /* Map just the table header */
224
225 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
226 if (!table) {
227 return;
228 }
229
230 /* If a particular signature is expected, signature must match */
231
232 if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
233 ACPI_ERROR((AE_INFO,
234 "Invalid signature 0x%X for ACPI table [%s]",
235 *ACPI_CAST_PTR(u32, table->signature), signature));
236 goto unmap_and_exit;
237 }
231 238
232 /* Sum the entire table as-is */ 239 /* Initialize the table entry */
233 240
234 checksum = acpi_tb_sum_table(table, table->length); 241 acpi_gbl_root_table_list.tables[table_index].address = address;
242 acpi_gbl_root_table_list.tables[table_index].length = table->length;
243 acpi_gbl_root_table_list.tables[table_index].flags = flags;
235 244
236 /* Subtract off the existing checksum value in the table */ 245 ACPI_MOVE_32_TO_32(&
246 (acpi_gbl_root_table_list.tables[table_index].
247 signature), table->signature);
237 248
238 checksum = (u8) (checksum - table->checksum); 249 acpi_tb_print_table_header(address, table);
239 250
240 /* Compute the final checksum */ 251 if (table_index == ACPI_TABLE_INDEX_DSDT) {
241 252
242 checksum = (u8) (0 - checksum); 253 /* Global integer width is based upon revision of the DSDT */
243 return (checksum); 254
255 acpi_ut_set_integer_width(table->revision);
256 }
257
258 unmap_and_exit:
259 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
244} 260}
245 261
246/******************************************************************************* 262/*******************************************************************************
247 * 263 *
248 * FUNCTION: acpi_tb_set_checksum 264 * FUNCTION: acpi_tb_get_root_table_entry
249 * 265 *
250 * PARAMETERS: Table - Pointer to a valid ACPI table (with a 266 * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry
251 * standard ACPI header) 267 * table_entry_size - sizeof 32 or 64 (RSDT or XSDT)
252 * 268 *
253 * RETURN: None. Sets the table checksum field 269 * RETURN: Physical address extracted from the root table
254 * 270 *
255 * DESCRIPTION: Computes an 8 bit checksum of the table and inserts the 271 * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on
256 * checksum into the table header. 272 * both 32-bit and 64-bit platforms
273 *
274 * NOTE: acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on
275 * 64-bit platforms.
257 * 276 *
258 ******************************************************************************/ 277 ******************************************************************************/
259 278
260void acpi_tb_set_checksum(struct acpi_table_header *table) 279static acpi_physical_address
280acpi_tb_get_root_table_entry(u8 * table_entry,
281 acpi_native_uint table_entry_size)
261{ 282{
283 u64 address64;
284
285 /*
286 * Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
287 * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
288 */
289 if (table_entry_size == sizeof(u32)) {
290 /*
291 * 32-bit platform, RSDT: Return 32-bit table entry
292 * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
293 */
294 return ((acpi_physical_address)
295 (*ACPI_CAST_PTR(u32, table_entry)));
296 } else {
297 /*
298 * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return
299 * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return 64-bit
300 */
301 ACPI_MOVE_64_TO_64(&address64, table_entry);
262 302
263 table->checksum = acpi_tb_generate_checksum(table); 303#if ACPI_MACHINE_WIDTH == 32
304 if (address64 > ACPI_UINT32_MAX) {
305
306 /* Will truncate 64-bit address to 32 bits, issue warning */
307
308 ACPI_WARNING((AE_INFO,
309 "64-bit Physical Address in XSDT is too large (%8.8X%8.8X), truncating",
310 ACPI_FORMAT_UINT64(address64)));
311 }
312#endif
313 return ((acpi_physical_address) (address64));
314 }
264} 315}
265 316
266/******************************************************************************* 317/*******************************************************************************
267 * 318 *
268 * FUNCTION: acpi_tb_verify_table_checksum 319 * FUNCTION: acpi_tb_parse_root_table
320 *
321 * PARAMETERS: Rsdp - Pointer to the RSDP
322 * Flags - Flags
269 * 323 *
270 * PARAMETERS: *table_header - ACPI table to verify 324 * RETURN: Status
271 * 325 *
272 * RETURN: 8 bit checksum of table 326 * DESCRIPTION: This function is called to parse the Root System Description
327 * Table (RSDT or XSDT)
273 * 328 *
274 * DESCRIPTION: Generates an 8 bit checksum of table and returns and compares 329 * NOTE: Tables are mapped (not copied) for efficiency. The FACS must
275 * it to the existing checksum value. 330 * be mapped and cannot be copied because it contains the actual
331 * memory location of the ACPI Global Lock.
276 * 332 *
277 ******************************************************************************/ 333 ******************************************************************************/
278 334
279acpi_status 335acpi_status __init
280acpi_tb_verify_table_checksum(struct acpi_table_header *table_header) 336acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
281{ 337{
282 u8 checksum; 338 struct acpi_table_rsdp *rsdp;
339 acpi_native_uint table_entry_size;
340 acpi_native_uint i;
341 u32 table_count;
342 struct acpi_table_header *table;
343 acpi_physical_address address;
344 u32 length;
345 u8 *table_entry;
346 acpi_status status;
347
348 ACPI_FUNCTION_TRACE(tb_parse_root_table);
349
350 /*
351 * Map the entire RSDP and extract the address of the RSDT or XSDT
352 */
353 rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
354 if (!rsdp) {
355 return_ACPI_STATUS(AE_NO_MEMORY);
356 }
283 357
284 ACPI_FUNCTION_TRACE(tb_verify_table_checksum); 358 acpi_tb_print_table_header(rsdp_address,
359 ACPI_CAST_PTR(struct acpi_table_header,
360 rsdp));
285 361
286 /* Compute the checksum on the table */ 362 /* Differentiate between RSDT and XSDT root tables */
287 363
288 checksum = acpi_tb_generate_checksum(table_header); 364 if (rsdp->revision > 1 && rsdp->xsdt_physical_address) {
365 /*
366 * Root table is an XSDT (64-bit physical addresses). We must use the
367 * XSDT if the revision is > 1 and the XSDT pointer is present, as per
368 * the ACPI specification.
369 */
370 address = (acpi_physical_address) rsdp->xsdt_physical_address;
371 table_entry_size = sizeof(u64);
372 } else {
373 /* Root table is an RSDT (32-bit physical addresses) */
289 374
290 /* Checksum ok? */ 375 address = (acpi_physical_address) rsdp->rsdt_physical_address;
376 table_entry_size = sizeof(u32);
377 }
291 378
292 if (checksum == table_header->checksum) { 379 /*
293 return_ACPI_STATUS(AE_OK); 380 * It is not possible to map more than one entry in some environments,
381 * so unmap the RSDP here before mapping other tables
382 */
383 acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
384
385 /* Map the RSDT/XSDT table header to get the full table length */
386
387 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
388 if (!table) {
389 return_ACPI_STATUS(AE_NO_MEMORY);
294 } 390 }
295 391
296 ACPI_WARNING((AE_INFO, 392 acpi_tb_print_table_header(address, table);
297 "Incorrect checksum in table [%4.4s] - is %2.2X, should be %2.2X",
298 table_header->signature, table_header->checksum,
299 checksum));
300 393
301 return_ACPI_STATUS(AE_BAD_CHECKSUM); 394 /* Get the length of the full table, verify length and map entire table */
302}
303 395
304#ifdef ACPI_OBSOLETE_FUNCTIONS 396 length = table->length;
305/******************************************************************************* 397 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
306 *
307 * FUNCTION: acpi_tb_handle_to_object
308 *
309 * PARAMETERS: table_id - Id for which the function is searching
310 * table_desc - Pointer to return the matching table
311 * descriptor.
312 *
313 * RETURN: Search the tables to find one with a matching table_id and
314 * return a pointer to that table descriptor.
315 *
316 ******************************************************************************/
317 398
318acpi_status 399 if (length < sizeof(struct acpi_table_header)) {
319acpi_tb_handle_to_object(u16 table_id, 400 ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
320 struct acpi_table_desc **return_table_desc) 401 length));
321{ 402 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
322 u32 i; 403 }
323 struct acpi_table_desc *table_desc;
324 404
325 ACPI_FUNCTION_NAME(tb_handle_to_object); 405 table = acpi_os_map_memory(address, length);
406 if (!table) {
407 return_ACPI_STATUS(AE_NO_MEMORY);
408 }
409
410 /* Validate the root table checksum */
411
412 status = acpi_tb_verify_checksum(table, length);
413 if (ACPI_FAILURE(status)) {
414 acpi_os_unmap_memory(table, length);
415 return_ACPI_STATUS(status);
416 }
326 417
327 for (i = 0; i < ACPI_TABLE_MAX; i++) { 418 /* Calculate the number of tables described in the root table */
328 table_desc = acpi_gbl_table_lists[i].next; 419
329 while (table_desc) { 420 table_count =
330 if (table_desc->table_id == table_id) { 421 (u32) ((table->length -
331 *return_table_desc = table_desc; 422 sizeof(struct acpi_table_header)) / table_entry_size);
332 return (AE_OK); 423
424 /*
425 * First two entries in the table array are reserved for the DSDT and FACS,
426 * which are not actually present in the RSDT/XSDT - they come from the FADT
427 */
428 table_entry =
429 ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
430 acpi_gbl_root_table_list.count = 2;
431
432 /*
433 * Initialize the root table array from the RSDT/XSDT
434 */
435 for (i = 0; i < table_count; i++) {
436 if (acpi_gbl_root_table_list.count >=
437 acpi_gbl_root_table_list.size) {
438
439 /* There is no more room in the root table array, attempt resize */
440
441 status = acpi_tb_resize_root_table_list();
442 if (ACPI_FAILURE(status)) {
443 ACPI_WARNING((AE_INFO,
444 "Truncating %u table entries!",
445 (unsigned)
446 (acpi_gbl_root_table_list.size -
447 acpi_gbl_root_table_list.
448 count)));
449 break;
333 } 450 }
451 }
452
453 /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
334 454
335 table_desc = table_desc->next; 455 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
456 address =
457 acpi_tb_get_root_table_entry(table_entry, table_entry_size);
458
459 table_entry += table_entry_size;
460 acpi_gbl_root_table_list.count++;
461 }
462
463 /*
464 * It is not possible to map more than one entry in some environments,
465 * so unmap the root table here before mapping other tables
466 */
467 acpi_os_unmap_memory(table, length);
468
469 /*
470 * Complete the initialization of the root table array by examining
471 * the header of each table
472 */
473 for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
474 acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
475 address, flags, NULL, i);
476
477 /* Special case for FADT - get the DSDT and FACS */
478
479 if (ACPI_COMPARE_NAME
480 (&acpi_gbl_root_table_list.tables[i].signature,
481 ACPI_SIG_FADT)) {
482 acpi_tb_parse_fadt(i, flags);
336 } 483 }
337 } 484 }
338 485
339 ACPI_ERROR((AE_INFO, "TableId=%X does not exist", table_id)); 486 return_ACPI_STATUS(AE_OK);
340 return (AE_BAD_PARAMETER);
341} 487}
342#endif
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 5ba9303293ad..807978d5381a 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -49,80 +49,158 @@
49#define _COMPONENT ACPI_TABLES 49#define _COMPONENT ACPI_TABLES
50ACPI_MODULE_NAME("tbxface") 50ACPI_MODULE_NAME("tbxface")
51 51
52/* Local prototypes */
53static acpi_status acpi_tb_load_namespace(void);
54
52/******************************************************************************* 55/*******************************************************************************
53 * 56 *
54 * FUNCTION: acpi_load_tables 57 * FUNCTION: acpi_allocate_root_table
55 * 58 *
56 * PARAMETERS: None 59 * PARAMETERS: initial_table_count - Size of initial_table_array, in number of
60 * struct acpi_table_desc structures
57 * 61 *
58 * RETURN: Status 62 * RETURN: Status
59 * 63 *
60 * DESCRIPTION: This function is called to load the ACPI tables from the 64 * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
61 * provided RSDT 65 * acpi_initialize_tables.
62 * 66 *
63 ******************************************************************************/ 67 ******************************************************************************/
64acpi_status acpi_load_tables(void) 68
69acpi_status acpi_allocate_root_table(u32 initial_table_count)
65{ 70{
66 struct acpi_pointer rsdp_address;
67 acpi_status status;
68 71
69 ACPI_FUNCTION_TRACE(acpi_load_tables); 72 acpi_gbl_root_table_list.size = initial_table_count;
73 acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
70 74
71 /* Get the RSDP */ 75 return (acpi_tb_resize_root_table_list());
76}
72 77
73 status = acpi_os_get_root_pointer(ACPI_LOGICAL_ADDRESSING, 78/*******************************************************************************
74 &rsdp_address); 79 *
75 if (ACPI_FAILURE(status)) { 80 * FUNCTION: acpi_initialize_tables
76 ACPI_EXCEPTION((AE_INFO, status, "Could not get the RSDP")); 81 *
77 goto error_exit; 82 * PARAMETERS: initial_table_array - Pointer to an array of pre-allocated
78 } 83 * struct acpi_table_desc structures. If NULL, the
84 * array is dynamically allocated.
85 * initial_table_count - Size of initial_table_array, in number of
86 * struct acpi_table_desc structures
87 * allow_realloc - Flag to tell Table Manager if resize of
88 * pre-allocated array is allowed. Ignored
89 * if initial_table_array is NULL.
90 *
91 * RETURN: Status
92 *
93 * DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT.
94 *
95 * NOTE: Allows static allocation of the initial table array in order
96 * to avoid the use of dynamic memory in confined environments
97 * such as the kernel boot sequence where it may not be available.
98 *
99 * If the host OS memory managers are initialized, use NULL for
100 * initial_table_array, and the table will be dynamically allocated.
101 *
102 ******************************************************************************/
79 103
80 /* Map and validate the RSDP */ 104acpi_status __init
105acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
106 u32 initial_table_count, u8 allow_resize)
107{
108 acpi_physical_address rsdp_address;
109 acpi_status status;
81 110
82 acpi_gbl_table_flags = rsdp_address.pointer_type; 111 ACPI_FUNCTION_TRACE(acpi_initialize_tables);
83 112
84 status = acpi_tb_verify_rsdp(&rsdp_address); 113 /*
85 if (ACPI_FAILURE(status)) { 114 * Set up the Root Table Array
86 ACPI_EXCEPTION((AE_INFO, status, "During RSDP validation")); 115 * Allocate the table array if requested
87 goto error_exit; 116 */
117 if (!initial_table_array) {
118 status = acpi_allocate_root_table(initial_table_count);
119 if (ACPI_FAILURE(status)) {
120 return_ACPI_STATUS(status);
121 }
122 } else {
123 /* Root Table Array has been statically allocated by the host */
124
125 ACPI_MEMSET(initial_table_array, 0,
126 initial_table_count *
127 sizeof(struct acpi_table_desc));
128
129 acpi_gbl_root_table_list.tables = initial_table_array;
130 acpi_gbl_root_table_list.size = initial_table_count;
131 acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
132 if (allow_resize) {
133 acpi_gbl_root_table_list.flags |=
134 ACPI_ROOT_ALLOW_RESIZE;
135 }
88 } 136 }
89 137
90 /* Get the RSDT via the RSDP */ 138 /* Get the address of the RSDP */
91 139
92 status = acpi_tb_get_table_rsdt(); 140 rsdp_address = acpi_os_get_root_pointer();
93 if (ACPI_FAILURE(status)) { 141 if (!rsdp_address) {
94 ACPI_EXCEPTION((AE_INFO, status, "Could not load RSDT")); 142 return_ACPI_STATUS(AE_NOT_FOUND);
95 goto error_exit;
96 } 143 }
97 144
98 /* Now get the tables needed by this subsystem (FADT, DSDT, etc.) */ 145 /*
146 * Get the root table (RSDT or XSDT) and extract all entries to the local
147 * Root Table Array. This array contains the information of the RSDT/XSDT
148 * in a common, more useable format.
149 */
150 status =
151 acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED);
152 return_ACPI_STATUS(status);
153}
99 154
100 status = acpi_tb_get_required_tables(); 155/*******************************************************************************
101 if (ACPI_FAILURE(status)) { 156 *
102 ACPI_EXCEPTION((AE_INFO, status, 157 * FUNCTION: acpi_reallocate_root_table
103 "Could not get all required tables (DSDT/FADT/FACS)")); 158 *
104 goto error_exit; 159 * PARAMETERS: None
160 *
161 * RETURN: Status
162 *
163 * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
164 * root list from the previously provided scratch area. Should
165 * be called once dynamic memory allocation is available in the
166 * kernel
167 *
168 ******************************************************************************/
169acpi_status acpi_reallocate_root_table(void)
170{
171 struct acpi_table_desc *tables;
172 acpi_size new_size;
173
174 ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
175
176 /*
177 * Only reallocate the root table if the host provided a static buffer
178 * for the table array in the call to acpi_initialize_tables.
179 */
180 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
181 return_ACPI_STATUS(AE_SUPPORT);
105 } 182 }
106 183
107 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n")); 184 new_size =
185 (acpi_gbl_root_table_list.count +
186 ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc);
108 187
109 /* Load the namespace from the tables */ 188 /* Create new array and copy the old array */
110 189
111 status = acpi_ns_load_namespace(); 190 tables = ACPI_ALLOCATE_ZEROED(new_size);
112 if (ACPI_FAILURE(status)) { 191 if (!tables) {
113 ACPI_EXCEPTION((AE_INFO, status, "Could not load namespace")); 192 return_ACPI_STATUS(AE_NO_MEMORY);
114 goto error_exit;
115 } 193 }
116 194
117 return_ACPI_STATUS(AE_OK); 195 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
118 196
119 error_exit: 197 acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
120 ACPI_EXCEPTION((AE_INFO, status, "Could not load tables")); 198 acpi_gbl_root_table_list.tables = tables;
121 return_ACPI_STATUS(status); 199 acpi_gbl_root_table_list.flags =
122} 200 ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
123
124ACPI_EXPORT_SYMBOL(acpi_load_tables)
125 201
202 return_ACPI_STATUS(AE_OK);
203}
126/******************************************************************************* 204/*******************************************************************************
127 * 205 *
128 * FUNCTION: acpi_load_table 206 * FUNCTION: acpi_load_table
@@ -141,342 +219,405 @@ ACPI_EXPORT_SYMBOL(acpi_load_tables)
141acpi_status acpi_load_table(struct acpi_table_header *table_ptr) 219acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
142{ 220{
143 acpi_status status; 221 acpi_status status;
144 struct acpi_table_desc table_info; 222 acpi_native_uint table_index;
145 struct acpi_pointer address; 223 struct acpi_table_desc table_desc;
146
147 ACPI_FUNCTION_TRACE(acpi_load_table);
148
149 if (!table_ptr) {
150 return_ACPI_STATUS(AE_BAD_PARAMETER);
151 }
152
153 /* Copy the table to a local buffer */
154 224
155 address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING; 225 if (!table_ptr)
156 address.pointer.logical = table_ptr; 226 return AE_BAD_PARAMETER;
157
158 status = acpi_tb_get_table_body(&address, table_ptr, &table_info);
159 if (ACPI_FAILURE(status)) {
160 return_ACPI_STATUS(status);
161 }
162
163 /* Check signature for a valid table type */
164
165 status = acpi_tb_recognize_table(&table_info, ACPI_TABLE_ALL);
166 if (ACPI_FAILURE(status)) {
167 return_ACPI_STATUS(status);
168 }
169 227
170 /* Install the new table into the local data structures */ 228 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
229 table_desc.pointer = table_ptr;
230 table_desc.length = table_ptr->length;
231 table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
171 232
172 status = acpi_tb_install_table(&table_info); 233 /*
234 * Install the new table into the local data structures
235 */
236 status = acpi_tb_add_table(&table_desc, &table_index);
173 if (ACPI_FAILURE(status)) { 237 if (ACPI_FAILURE(status)) {
174 if (status == AE_ALREADY_EXISTS) { 238 return status;
175
176 /* Table already exists, no error */
177
178 status = AE_OK;
179 }
180
181 /* Free table allocated by acpi_tb_get_table_body */
182
183 acpi_tb_delete_single_table(&table_info);
184 return_ACPI_STATUS(status);
185 } 239 }
240 status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
241 return status;
242}
186 243
187 /* Convert the table to common format if necessary */ 244ACPI_EXPORT_SYMBOL(acpi_load_table)
188
189 switch (table_info.type) {
190 case ACPI_TABLE_ID_FADT:
191
192 status = acpi_tb_convert_table_fadt();
193 break;
194
195 case ACPI_TABLE_ID_FACS:
196 245
197 status = acpi_tb_build_common_facs(&table_info); 246/******************************************************************************
198 break; 247 *
248 * FUNCTION: acpi_get_table_header
249 *
250 * PARAMETERS: Signature - ACPI signature of needed table
251 * Instance - Which instance (for SSDTs)
252 * out_table_header - The pointer to the table header to fill
253 *
254 * RETURN: Status and pointer to mapped table header
255 *
256 * DESCRIPTION: Finds an ACPI table header.
257 *
258 * NOTE: Caller is responsible in unmapping the header with
259 * acpi_os_unmap_memory
260 *
261 *****************************************************************************/
262acpi_status
263acpi_get_table_header(char *signature,
264 acpi_native_uint instance,
265 struct acpi_table_header *out_table_header)
266{
267 acpi_native_uint i;
268 acpi_native_uint j;
269 struct acpi_table_header *header;
199 270
200 default: 271 /* Parameter validation */
201 /* Load table into namespace if it contains executable AML */
202 272
203 status = 273 if (!signature || !out_table_header) {
204 acpi_ns_load_table(table_info.installed_desc, 274 return (AE_BAD_PARAMETER);
205 acpi_gbl_root_node);
206 break;
207 } 275 }
208 276
209 if (ACPI_FAILURE(status)) { 277 /*
278 * Walk the root table list
279 */
280 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
281 if (!ACPI_COMPARE_NAME
282 (&(acpi_gbl_root_table_list.tables[i].signature),
283 signature)) {
284 continue;
285 }
210 286
211 /* Uninstall table and free the buffer */ 287 if (++j < instance) {
288 continue;
289 }
212 290
213 (void)acpi_tb_uninstall_table(table_info.installed_desc); 291 if (!acpi_gbl_root_table_list.tables[i].pointer) {
292 if ((acpi_gbl_root_table_list.tables[i].
293 flags & ACPI_TABLE_ORIGIN_MASK) ==
294 ACPI_TABLE_ORIGIN_MAPPED) {
295 header =
296 acpi_os_map_memory(acpi_gbl_root_table_list.
297 tables[i].address,
298 sizeof(struct
299 acpi_table_header));
300 if (!header) {
301 return AE_NO_MEMORY;
302 }
303 ACPI_MEMCPY(out_table_header, header,
304 sizeof(struct acpi_table_header));
305 acpi_os_unmap_memory(header,
306 sizeof(struct
307 acpi_table_header));
308 } else {
309 return AE_NOT_FOUND;
310 }
311 } else {
312 ACPI_MEMCPY(out_table_header,
313 acpi_gbl_root_table_list.tables[i].pointer,
314 sizeof(struct acpi_table_header));
315 }
316 return (AE_OK);
214 } 317 }
215 318
216 return_ACPI_STATUS(status); 319 return (AE_NOT_FOUND);
217} 320}
218 321
219ACPI_EXPORT_SYMBOL(acpi_load_table) 322ACPI_EXPORT_SYMBOL(acpi_get_table_header)
220 323
221/******************************************************************************* 324
325/******************************************************************************
222 * 326 *
223 * FUNCTION: acpi_unload_table_id 327 * FUNCTION: acpi_unload_table_id
224 * 328 *
225 * PARAMETERS: table_type - Type of table to be unloaded 329 * PARAMETERS: id - Owner ID of the table to be removed.
226 * id - Owner ID of the table to be removed.
227 * 330 *
228 * RETURN: Status 331 * RETURN: Status
229 * 332 *
230 * DESCRIPTION: This routine is used to force the unload of a table (by id) 333 * DESCRIPTION: This routine is used to force the unload of a table (by id)
231 * 334 *
232 ******************************************************************************/ 335 ******************************************************************************/
233acpi_status acpi_unload_table_id(acpi_table_type table_type, acpi_owner_id id) 336acpi_status acpi_unload_table_id(acpi_owner_id id)
234{ 337{
235 struct acpi_table_desc *table_desc; 338 int i;
236 acpi_status status; 339 acpi_status status = AE_NOT_EXIST;
237 340
238 ACPI_FUNCTION_TRACE(acpi_unload_table); 341 ACPI_FUNCTION_TRACE(acpi_unload_table);
239 342
240 /* Parameter validation */
241 if (table_type > ACPI_TABLE_ID_MAX)
242 return_ACPI_STATUS(AE_BAD_PARAMETER);
243
244 /* Find table from the requested type list */ 343 /* Find table from the requested type list */
245 table_desc = acpi_gbl_table_lists[table_type].next; 344 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
246 while (table_desc && table_desc->owner_id != id) 345 if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
247 table_desc = table_desc->next; 346 continue;
248 347 }
249 if (!table_desc) 348 /*
250 return_ACPI_STATUS(AE_NOT_EXIST); 349 * Delete all namespace objects owned by this table. Note that these
251 350 * objects can appear anywhere in the namespace by virtue of the AML
252 /* 351 * "Scope" operator. Thus, we need to track ownership by an ID, not
253 * Delete all namespace objects owned by this table. Note that these 352 * simply a position within the hierarchy
254 * objects can appear anywhere in the namespace by virtue of the AML 353 */
255 * "Scope" operator. Thus, we need to track ownership by an ID, not 354 acpi_tb_delete_namespace_by_owner(i);
256 * simply a position within the hierarchy 355 acpi_tb_release_owner_id(i);
257 */ 356 acpi_tb_set_table_loaded_flag(i, FALSE);
258 acpi_ns_delete_namespace_by_owner(table_desc->owner_id); 357 }
259 358 return_ACPI_STATUS(status);
260 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
261 if (ACPI_FAILURE(status))
262 return_ACPI_STATUS(status);
263
264 (void)acpi_tb_uninstall_table(table_desc);
265
266 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
267
268 return_ACPI_STATUS(AE_OK);
269} 359}
270 360
271ACPI_EXPORT_SYMBOL(acpi_unload_table_id) 361ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
272 362
273#ifdef ACPI_FUTURE_USAGE
274/******************************************************************************* 363/*******************************************************************************
275 * 364 *
276 * FUNCTION: acpi_unload_table 365 * FUNCTION: acpi_get_table
277 * 366 *
278 * PARAMETERS: table_type - Type of table to be unloaded 367 * PARAMETERS: Signature - ACPI signature of needed table
368 * Instance - Which instance (for SSDTs)
369 * out_table - Where the pointer to the table is returned
279 * 370 *
280 * RETURN: Status 371 * RETURN: Status and pointer to table
281 * 372 *
282 * DESCRIPTION: This routine is used to force the unload of a table 373 * DESCRIPTION: Finds and verifies an ACPI table.
283 * 374 *
284 ******************************************************************************/ 375 *****************************************************************************/
285acpi_status acpi_unload_table(acpi_table_type table_type) 376acpi_status
377acpi_get_table(char *signature,
378 acpi_native_uint instance, struct acpi_table_header ** out_table)
286{ 379{
287 struct acpi_table_desc *table_desc; 380 acpi_native_uint i;
288 381 acpi_native_uint j;
289 ACPI_FUNCTION_TRACE(acpi_unload_table); 382 acpi_status status;
290 383
291 /* Parameter validation */ 384 /* Parameter validation */
292 385
293 if (table_type > ACPI_TABLE_ID_MAX) { 386 if (!signature || !out_table) {
294 return_ACPI_STATUS(AE_BAD_PARAMETER); 387 return (AE_BAD_PARAMETER);
295 } 388 }
296 389
297 /* Find all tables of the requested type */ 390 /*
391 * Walk the root table list
392 */
393 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
394 if (!ACPI_COMPARE_NAME
395 (&(acpi_gbl_root_table_list.tables[i].signature),
396 signature)) {
397 continue;
398 }
298 399
299 table_desc = acpi_gbl_table_lists[table_type].next; 400 if (++j < instance) {
300 if (!table_desc) { 401 continue;
301 return_ACPI_STATUS(AE_NOT_EXIST); 402 }
302 }
303 403
304 while (table_desc) { 404 status =
305 /* 405 acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
306 * Delete all namespace objects owned by this table. Note that these 406 if (ACPI_SUCCESS(status)) {
307 * objects can appear anywhere in the namespace by virtue of the AML 407 *out_table = acpi_gbl_root_table_list.tables[i].pointer;
308 * "Scope" operator. Thus, we need to track ownership by an ID, not 408 }
309 * simply a position within the hierarchy
310 */
311 acpi_ns_delete_namespace_by_owner(table_desc->owner_id);
312 table_desc = table_desc->next;
313 }
314 409
315 /* Delete (or unmap) all tables of this type */ 410 if (!acpi_gbl_permanent_mmap) {
411 acpi_gbl_root_table_list.tables[i].pointer = 0;
412 }
316 413
317 acpi_tb_delete_tables_by_type(table_type); 414 return (status);
318 return_ACPI_STATUS(AE_OK); 415 }
416
417 return (AE_NOT_FOUND);
319} 418}
320 419
321ACPI_EXPORT_SYMBOL(acpi_unload_table) 420ACPI_EXPORT_SYMBOL(acpi_get_table)
322 421
323/******************************************************************************* 422/*******************************************************************************
324 * 423 *
325 * FUNCTION: acpi_get_table_header 424 * FUNCTION: acpi_get_table_by_index
326 * 425 *
327 * PARAMETERS: table_type - one of the defined table types 426 * PARAMETERS: table_index - Table index
328 * Instance - the non zero instance of the table, allows 427 * Table - Where the pointer to the table is returned
329 * support for multiple tables of the same type
330 * see acpi_gbl_acpi_table_flag
331 * out_table_header - pointer to the struct acpi_table_header if successful
332 * 428 *
333 * DESCRIPTION: This function is called to get an ACPI table header. The caller 429 * RETURN: Status and pointer to the table
334 * supplies an pointer to a data area sufficient to contain an ACPI
335 * struct acpi_table_header structure.
336 * 430 *
337 * The header contains a length field that can be used to determine 431 * DESCRIPTION: Obtain a table by an index into the global table list.
338 * the size of the buffer needed to contain the entire table. This
339 * function is not valid for the RSD PTR table since it does not
340 * have a standard header and is fixed length.
341 * 432 *
342 ******************************************************************************/ 433 ******************************************************************************/
343acpi_status 434acpi_status
344acpi_get_table_header(acpi_table_type table_type, 435acpi_get_table_by_index(acpi_native_uint table_index,
345 u32 instance, struct acpi_table_header *out_table_header) 436 struct acpi_table_header ** table)
346{ 437{
347 struct acpi_table_header *tbl_ptr;
348 acpi_status status; 438 acpi_status status;
349 439
350 ACPI_FUNCTION_TRACE(acpi_get_table_header); 440 ACPI_FUNCTION_TRACE(acpi_get_table_by_index);
441
442 /* Parameter validation */
351 443
352 if ((instance == 0) || 444 if (!table) {
353 (table_type == ACPI_TABLE_ID_RSDP) || (!out_table_header)) {
354 return_ACPI_STATUS(AE_BAD_PARAMETER); 445 return_ACPI_STATUS(AE_BAD_PARAMETER);
355 } 446 }
356 447
357 /* Check the table type and instance */ 448 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
449
450 /* Validate index */
358 451
359 if ((table_type > ACPI_TABLE_ID_MAX) || 452 if (table_index >= acpi_gbl_root_table_list.count) {
360 (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) && 453 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
361 instance > 1)) {
362 return_ACPI_STATUS(AE_BAD_PARAMETER); 454 return_ACPI_STATUS(AE_BAD_PARAMETER);
363 } 455 }
364 456
365 /* Get a pointer to the entire table */ 457 if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
366 458
367 status = acpi_tb_get_table_ptr(table_type, instance, &tbl_ptr); 459 /* Table is not mapped, map it */
368 if (ACPI_FAILURE(status)) {
369 return_ACPI_STATUS(status);
370 }
371 460
372 /* The function will return a NULL pointer if the table is not loaded */ 461 status =
373 462 acpi_tb_verify_table(&acpi_gbl_root_table_list.
374 if (tbl_ptr == NULL) { 463 tables[table_index]);
375 return_ACPI_STATUS(AE_NOT_EXIST); 464 if (ACPI_FAILURE(status)) {
465 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
466 return_ACPI_STATUS(status);
467 }
376 } 468 }
377 469
378 /* Copy the header to the caller's buffer */ 470 *table = acpi_gbl_root_table_list.tables[table_index].pointer;
379 471 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
380 ACPI_MEMCPY(ACPI_CAST_PTR(void, out_table_header), 472 return_ACPI_STATUS(AE_OK);
381 ACPI_CAST_PTR(void, tbl_ptr),
382 sizeof(struct acpi_table_header));
383
384 return_ACPI_STATUS(status);
385} 473}
386 474
387ACPI_EXPORT_SYMBOL(acpi_get_table_header) 475ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
388#endif /* ACPI_FUTURE_USAGE */
389 476
390/******************************************************************************* 477/*******************************************************************************
391 * 478 *
392 * FUNCTION: acpi_get_table 479 * FUNCTION: acpi_tb_load_namespace
393 * 480 *
394 * PARAMETERS: table_type - one of the defined table types 481 * PARAMETERS: None
395 * Instance - the non zero instance of the table, allows
396 * support for multiple tables of the same type
397 * see acpi_gbl_acpi_table_flag
398 * ret_buffer - pointer to a structure containing a buffer to
399 * receive the table
400 * 482 *
401 * RETURN: Status 483 * RETURN: Status
402 * 484 *
403 * DESCRIPTION: This function is called to get an ACPI table. The caller 485 * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
404 * supplies an out_buffer large enough to contain the entire ACPI 486 * the RSDT/XSDT.
405 * table. The caller should call the acpi_get_table_header function
406 * first to determine the buffer size needed. Upon completion
407 * the out_buffer->Length field will indicate the number of bytes
408 * copied into the out_buffer->buf_ptr buffer. This table will be
409 * a complete table including the header.
410 * 487 *
411 ******************************************************************************/ 488 ******************************************************************************/
412acpi_status 489static acpi_status acpi_tb_load_namespace(void)
413acpi_get_table(acpi_table_type table_type,
414 u32 instance, struct acpi_buffer *ret_buffer)
415{ 490{
416 struct acpi_table_header *tbl_ptr;
417 acpi_status status; 491 acpi_status status;
418 acpi_size table_length; 492 struct acpi_table_header *table;
493 acpi_native_uint i;
419 494
420 ACPI_FUNCTION_TRACE(acpi_get_table); 495 ACPI_FUNCTION_TRACE(tb_load_namespace);
421 496
422 /* Parameter validation */ 497 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
423 498
424 if (instance == 0) { 499 /*
425 return_ACPI_STATUS(AE_BAD_PARAMETER); 500 * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
501 * are optional.
502 */
503 if (!acpi_gbl_root_table_list.count ||
504 !ACPI_COMPARE_NAME(&
505 (acpi_gbl_root_table_list.
506 tables[ACPI_TABLE_INDEX_DSDT].signature),
507 ACPI_SIG_DSDT)
508 ||
509 ACPI_FAILURE(acpi_tb_verify_table
510 (&acpi_gbl_root_table_list.
511 tables[ACPI_TABLE_INDEX_DSDT]))) {
512 status = AE_NO_ACPI_TABLES;
513 goto unlock_and_exit;
426 } 514 }
427 515
428 status = acpi_ut_validate_buffer(ret_buffer); 516 /*
429 if (ACPI_FAILURE(status)) { 517 * Find DSDT table
430 return_ACPI_STATUS(status); 518 */
519 status =
520 acpi_os_table_override(acpi_gbl_root_table_list.
521 tables[ACPI_TABLE_INDEX_DSDT].pointer,
522 &table);
523 if (ACPI_SUCCESS(status) && table) {
524 /*
525 * DSDT table has been found
526 */
527 acpi_tb_delete_table(&acpi_gbl_root_table_list.
528 tables[ACPI_TABLE_INDEX_DSDT]);
529 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer =
530 table;
531 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].length =
532 table->length;
533 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].flags =
534 ACPI_TABLE_ORIGIN_UNKNOWN;
535
536 ACPI_INFO((AE_INFO, "Table DSDT replaced by host OS"));
537 acpi_tb_print_table_header(0, table);
431 } 538 }
432 539
433 /* Check the table type and instance */ 540 status =
541 acpi_tb_verify_table(&acpi_gbl_root_table_list.
542 tables[ACPI_TABLE_INDEX_DSDT]);
543 if (ACPI_FAILURE(status)) {
434 544
435 if ((table_type > ACPI_TABLE_ID_MAX) || 545 /* A valid DSDT is required */
436 (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) && 546
437 instance > 1)) { 547 status = AE_NO_ACPI_TABLES;
438 return_ACPI_STATUS(AE_BAD_PARAMETER); 548 goto unlock_and_exit;
439 } 549 }
440 550
441 /* Get a pointer to the entire table */ 551 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
442 552
443 status = acpi_tb_get_table_ptr(table_type, instance, &tbl_ptr); 553 /*
554 * Load and parse tables.
555 */
556 status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
444 if (ACPI_FAILURE(status)) { 557 if (ACPI_FAILURE(status)) {
445 return_ACPI_STATUS(status); 558 return_ACPI_STATUS(status);
446 } 559 }
447 560
448 /* 561 /*
449 * acpi_tb_get_table_ptr will return a NULL pointer if the 562 * Load any SSDT or PSDT tables. Note: Loop leaves tables locked
450 * table is not loaded.
451 */ 563 */
452 if (tbl_ptr == NULL) { 564 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
453 return_ACPI_STATUS(AE_NOT_EXIST); 565 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
566 if ((!ACPI_COMPARE_NAME
567 (&(acpi_gbl_root_table_list.tables[i].signature),
568 ACPI_SIG_SSDT)
569 &&
570 !ACPI_COMPARE_NAME(&
571 (acpi_gbl_root_table_list.tables[i].
572 signature), ACPI_SIG_PSDT))
573 ||
574 ACPI_FAILURE(acpi_tb_verify_table
575 (&acpi_gbl_root_table_list.tables[i]))) {
576 continue;
577 }
578
579 /* Ignore errors while loading tables, get as many as possible */
580
581 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
582 (void)acpi_ns_load_table(i, acpi_gbl_root_node);
583 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
454 } 584 }
455 585
456 /* Get the table length */ 586 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
457 587
458 if (table_type == ACPI_TABLE_ID_RSDP) { 588 unlock_and_exit:
589 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
590 return_ACPI_STATUS(status);
591}
459 592
460 /* RSD PTR is the only "table" without a header */ 593/*******************************************************************************
594 *
595 * FUNCTION: acpi_load_tables
596 *
597 * PARAMETERS: None
598 *
599 * RETURN: Status
600 *
601 * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
602 *
603 ******************************************************************************/
461 604
462 table_length = sizeof(struct rsdp_descriptor); 605acpi_status acpi_load_tables(void)
463 } else { 606{
464 table_length = (acpi_size) tbl_ptr->length; 607 acpi_status status;
465 }
466 608
467 /* Validate/Allocate/Clear caller buffer */ 609 ACPI_FUNCTION_TRACE(acpi_load_tables);
468 610
469 status = acpi_ut_initialize_buffer(ret_buffer, table_length); 611 /*
612 * Load the namespace from the tables
613 */
614 status = acpi_tb_load_namespace();
470 if (ACPI_FAILURE(status)) { 615 if (ACPI_FAILURE(status)) {
471 return_ACPI_STATUS(status); 616 ACPI_EXCEPTION((AE_INFO, status,
617 "While loading namespace from ACPI tables"));
472 } 618 }
473 619
474 /* Copy the table to the buffer */ 620 return_ACPI_STATUS(status);
475
476 ACPI_MEMCPY(ACPI_CAST_PTR(void, ret_buffer->pointer),
477 ACPI_CAST_PTR(void, tbl_ptr), table_length);
478
479 return_ACPI_STATUS(AE_OK);
480} 621}
481 622
482ACPI_EXPORT_SYMBOL(acpi_get_table) 623ACPI_EXPORT_SYMBOL(acpi_load_tables)
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index da2648bbdbc0..cf8fa514189f 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,16 +48,15 @@
48ACPI_MODULE_NAME("tbxfroot") 48ACPI_MODULE_NAME("tbxfroot")
49 49
50/* Local prototypes */ 50/* Local prototypes */
51static acpi_status
52acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags);
53
54static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length); 51static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
55 52
53static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
54
56/******************************************************************************* 55/*******************************************************************************
57 * 56 *
58 * FUNCTION: acpi_tb_validate_rsdp 57 * FUNCTION: acpi_tb_validate_rsdp
59 * 58 *
60 * PARAMETERS: Rsdp - Pointer to unvalidated RSDP 59 * PARAMETERS: Rsdp - Pointer to unvalidated RSDP
61 * 60 *
62 * RETURN: Status 61 * RETURN: Status
63 * 62 *
@@ -65,14 +64,18 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
65 * 64 *
66 ******************************************************************************/ 65 ******************************************************************************/
67 66
68acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp) 67static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
69{ 68{
70 ACPI_FUNCTION_ENTRY(); 69 ACPI_FUNCTION_ENTRY();
71 70
72 /* 71 /*
73 * The signature and checksum must both be correct 72 * The signature and checksum must both be correct
73 *
74 * Note: Sometimes there exists more than one RSDP in memory; the valid
75 * RSDP has a valid checksum, all others have an invalid checksum.
74 */ 76 */
75 if (ACPI_STRNCMP((char *)rsdp, RSDP_SIG, sizeof(RSDP_SIG) - 1) != 0) { 77 if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)
78 != 0) {
76 79
77 /* Nope, BAD Signature */ 80 /* Nope, BAD Signature */
78 81
@@ -81,14 +84,14 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
81 84
82 /* Check the standard checksum */ 85 /* Check the standard checksum */
83 86
84 if (acpi_tb_sum_table(rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { 87 if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
85 return (AE_BAD_CHECKSUM); 88 return (AE_BAD_CHECKSUM);
86 } 89 }
87 90
88 /* Check extended checksum if table version >= 2 */ 91 /* Check extended checksum if table version >= 2 */
89 92
90 if ((rsdp->revision >= 2) && 93 if ((rsdp->revision >= 2) &&
91 (acpi_tb_sum_table(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { 94 (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
92 return (AE_BAD_CHECKSUM); 95 return (AE_BAD_CHECKSUM);
93 } 96 }
94 97
@@ -97,314 +100,123 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
97 100
98/******************************************************************************* 101/*******************************************************************************
99 * 102 *
100 * FUNCTION: acpi_tb_find_table 103 * FUNCTION: acpi_tb_find_rsdp
101 *
102 * PARAMETERS: Signature - String with ACPI table signature
103 * oem_id - String with the table OEM ID
104 * oem_table_id - String with the OEM Table ID
105 * table_ptr - Where the table pointer is returned
106 *
107 * RETURN: Status
108 * 104 *
109 * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the 105 * PARAMETERS: table_address - Where the table pointer is returned
110 * Signature, OEM ID and OEM Table ID.
111 * 106 *
112 ******************************************************************************/ 107 * RETURN: Status, RSDP physical address
113
114acpi_status
115acpi_tb_find_table(char *signature,
116 char *oem_id,
117 char *oem_table_id, struct acpi_table_header ** table_ptr)
118{
119 acpi_status status;
120 struct acpi_table_header *table;
121
122 ACPI_FUNCTION_TRACE(tb_find_table);
123
124 /* Validate string lengths */
125
126 if ((ACPI_STRLEN(signature) > ACPI_NAME_SIZE) ||
127 (ACPI_STRLEN(oem_id) > sizeof(table->oem_id)) ||
128 (ACPI_STRLEN(oem_table_id) > sizeof(table->oem_table_id))) {
129 return_ACPI_STATUS(AE_AML_STRING_LIMIT);
130 }
131
132 if (ACPI_COMPARE_NAME(signature, DSDT_SIG)) {
133 /*
134 * The DSDT pointer is contained in the FADT, not the RSDT.
135 * This code should suffice, because the only code that would perform
136 * a "find" on the DSDT is the data_table_region() AML opcode -- in
137 * which case, the DSDT is guaranteed to be already loaded.
138 * If this becomes insufficient, the FADT will have to be found first.
139 */
140 if (!acpi_gbl_DSDT) {
141 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
142 }
143 table = acpi_gbl_DSDT;
144 } else {
145 /* Find the table */
146
147 status = acpi_get_firmware_table(signature, 1,
148 ACPI_LOGICAL_ADDRESSING,
149 &table);
150 if (ACPI_FAILURE(status)) {
151 return_ACPI_STATUS(status);
152 }
153 }
154
155 /* Check oem_id and oem_table_id */
156
157 if ((oem_id[0] &&
158 ACPI_STRNCMP(oem_id, table->oem_id,
159 sizeof(table->oem_id))) ||
160 (oem_table_id[0] &&
161 ACPI_STRNCMP(oem_table_id, table->oem_table_id,
162 sizeof(table->oem_table_id)))) {
163 return_ACPI_STATUS(AE_AML_NAME_NOT_FOUND);
164 }
165
166 ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Found table [%4.4s]\n",
167 table->signature));
168
169 *table_ptr = table;
170 return_ACPI_STATUS(AE_OK);
171}
172
173/*******************************************************************************
174 *
175 * FUNCTION: acpi_get_firmware_table
176 * 108 *
177 * PARAMETERS: Signature - Any ACPI table signature 109 * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
178 * Instance - the non zero instance of the table, allows 110 * pointer structure. If it is found, set *RSDP to point to it.
179 * support for multiple tables of the same type
180 * Flags - Physical/Virtual support
181 * table_pointer - Where a buffer containing the table is
182 * returned
183 * 111 *
184 * RETURN: Status 112 * NOTE1: The RSDP must be either in the first 1_k of the Extended
113 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
114 * Only a 32-bit physical address is necessary.
185 * 115 *
186 * DESCRIPTION: This function is called to get an ACPI table. A buffer is 116 * NOTE2: This function is always available, regardless of the
187 * allocated for the table and returned in table_pointer. 117 * initialization state of the rest of ACPI.
188 * This table will be a complete table including the header.
189 * 118 *
190 ******************************************************************************/ 119 ******************************************************************************/
191 120
192acpi_status 121acpi_status acpi_find_root_pointer(acpi_native_uint * table_address)
193acpi_get_firmware_table(acpi_string signature,
194 u32 instance,
195 u32 flags, struct acpi_table_header **table_pointer)
196{ 122{
197 acpi_status status; 123 u8 *table_ptr;
198 struct acpi_pointer address; 124 u8 *mem_rover;
199 struct acpi_table_header *header = NULL; 125 u32 physical_address;
200 struct acpi_table_desc *table_info = NULL;
201 struct acpi_table_desc *rsdt_info;
202 u32 table_count;
203 u32 i;
204 u32 j;
205
206 ACPI_FUNCTION_TRACE(acpi_get_firmware_table);
207
208 /*
209 * Ensure that at least the table manager is initialized. We don't
210 * require that the entire ACPI subsystem is up for this interface.
211 * If we have a buffer, we must have a length too
212 */
213 if ((instance == 0) || (!signature) || (!table_pointer)) {
214 return_ACPI_STATUS(AE_BAD_PARAMETER);
215 }
216
217 /* Ensure that we have a RSDP */
218
219 if (!acpi_gbl_RSDP) {
220
221 /* Get the RSDP */
222
223 status = acpi_os_get_root_pointer(flags, &address);
224 if (ACPI_FAILURE(status)) {
225 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "RSDP not found\n"));
226 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
227 }
228
229 /* Map and validate the RSDP */
230
231 if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
232 status = acpi_os_map_memory(address.pointer.physical,
233 sizeof(struct
234 rsdp_descriptor),
235 (void *)&acpi_gbl_RSDP);
236 if (ACPI_FAILURE(status)) {
237 return_ACPI_STATUS(status);
238 }
239 } else {
240 acpi_gbl_RSDP = address.pointer.logical;
241 }
242
243 /* The RDSP signature and checksum must both be correct */
244
245 status = acpi_tb_validate_rsdp(acpi_gbl_RSDP);
246 if (ACPI_FAILURE(status)) {
247 return_ACPI_STATUS(status);
248 }
249 }
250
251 /* Get the RSDT address via the RSDP */
252
253 acpi_tb_get_rsdt_address(&address);
254 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
255 "RSDP located at %p, RSDT physical=%8.8X%8.8X\n",
256 acpi_gbl_RSDP,
257 ACPI_FORMAT_UINT64(address.pointer.value)));
258 126
259 /* Insert processor_mode flags */ 127 ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
260 128
261 address.pointer_type |= flags; 129 /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
262 130
263 /* Get and validate the RSDT */ 131 table_ptr = acpi_os_map_memory((acpi_physical_address)
132 ACPI_EBDA_PTR_LOCATION,
133 ACPI_EBDA_PTR_LENGTH);
134 if (!table_ptr) {
135 ACPI_ERROR((AE_INFO,
136 "Could not map memory at %8.8X for length %X",
137 ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
264 138
265 rsdt_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc));
266 if (!rsdt_info) {
267 return_ACPI_STATUS(AE_NO_MEMORY); 139 return_ACPI_STATUS(AE_NO_MEMORY);
268 } 140 }
269 141
270 status = acpi_tb_get_table(&address, rsdt_info); 142 ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
271 if (ACPI_FAILURE(status)) {
272 goto cleanup;
273 }
274
275 status = acpi_tb_validate_rsdt(rsdt_info->pointer);
276 if (ACPI_FAILURE(status)) {
277 goto cleanup;
278 }
279 143
280 /* Allocate a scratch table header and table descriptor */ 144 /* Convert segment part to physical address */
281 145
282 header = ACPI_ALLOCATE(sizeof(struct acpi_table_header)); 146 physical_address <<= 4;
283 if (!header) { 147 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
284 status = AE_NO_MEMORY;
285 goto cleanup;
286 }
287 148
288 table_info = ACPI_ALLOCATE(sizeof(struct acpi_table_desc)); 149 /* EBDA present? */
289 if (!table_info) {
290 status = AE_NO_MEMORY;
291 goto cleanup;
292 }
293 150
294 /* Get the number of table pointers within the RSDT */ 151 if (physical_address > 0x400) {
295
296 table_count =
297 acpi_tb_get_table_count(acpi_gbl_RSDP, rsdt_info->pointer);
298 address.pointer_type = acpi_gbl_table_flags | flags;
299
300 /*
301 * Search the RSDT/XSDT for the correct instance of the
302 * requested table
303 */
304 for (i = 0, j = 0; i < table_count; i++) {
305 /* 152 /*
306 * Get the next table pointer, handle RSDT vs. XSDT 153 * 1b) Search EBDA paragraphs (EBDA is required to be a
307 * RSDT pointers are 32 bits, XSDT pointers are 64 bits 154 * minimum of 1_k length)
308 */ 155 */
309 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) { 156 table_ptr = acpi_os_map_memory((acpi_native_uint)
310 address.pointer.value = 157 physical_address,
311 (ACPI_CAST_PTR 158 ACPI_EBDA_WINDOW_SIZE);
312 (struct rsdt_descriptor, 159 if (!table_ptr) {
313 rsdt_info->pointer))->table_offset_entry[i]; 160 ACPI_ERROR((AE_INFO,
314 } else { 161 "Could not map memory at %8.8X for length %X",
315 address.pointer.value = 162 physical_address, ACPI_EBDA_WINDOW_SIZE));
316 (ACPI_CAST_PTR
317 (struct xsdt_descriptor,
318 rsdt_info->pointer))->table_offset_entry[i];
319 }
320
321 /* Get the table header */
322 163
323 status = acpi_tb_get_table_header(&address, header); 164 return_ACPI_STATUS(AE_NO_MEMORY);
324 if (ACPI_FAILURE(status)) {
325 goto cleanup;
326 } 165 }
327 166
328 /* Compare table signatures and table instance */ 167 mem_rover =
329 168 acpi_tb_scan_memory_for_rsdp(table_ptr,
330 if (ACPI_COMPARE_NAME(header->signature, signature)) { 169 ACPI_EBDA_WINDOW_SIZE);
331 170 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
332 /* An instance of the table was found */
333 171
334 j++; 172 if (mem_rover) {
335 if (j >= instance) {
336 173
337 /* Found the correct instance, get the entire table */ 174 /* Return the physical address */
338 175
339 status = 176 physical_address +=
340 acpi_tb_get_table_body(&address, header, 177 (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
341 table_info);
342 if (ACPI_FAILURE(status)) {
343 goto cleanup;
344 }
345 178
346 *table_pointer = table_info->pointer; 179 *table_address = physical_address;
347 goto cleanup; 180 return_ACPI_STATUS(AE_OK);
348 }
349 } 181 }
350 } 182 }
351 183
352 /* Did not find the table */ 184 /*
185 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
186 */
187 table_ptr = acpi_os_map_memory((acpi_physical_address)
188 ACPI_HI_RSDP_WINDOW_BASE,
189 ACPI_HI_RSDP_WINDOW_SIZE);
353 190
354 status = AE_NOT_EXIST; 191 if (!table_ptr) {
192 ACPI_ERROR((AE_INFO,
193 "Could not map memory at %8.8X for length %X",
194 ACPI_HI_RSDP_WINDOW_BASE,
195 ACPI_HI_RSDP_WINDOW_SIZE));
355 196
356 cleanup: 197 return_ACPI_STATUS(AE_NO_MEMORY);
357 if (rsdt_info->pointer) {
358 acpi_os_unmap_memory(rsdt_info->pointer,
359 (acpi_size) rsdt_info->pointer->length);
360 } 198 }
361 ACPI_FREE(rsdt_info);
362 199
363 if (header) { 200 mem_rover =
364 ACPI_FREE(header); 201 acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
365 } 202 acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
366 if (table_info) {
367 ACPI_FREE(table_info);
368 }
369 return_ACPI_STATUS(status);
370}
371 203
372ACPI_EXPORT_SYMBOL(acpi_get_firmware_table) 204 if (mem_rover) {
373 205
374/* TBD: Move to a new file */ 206 /* Return the physical address */
375#if ACPI_MACHINE_WIDTH != 16
376/*******************************************************************************
377 *
378 * FUNCTION: acpi_find_root_pointer
379 *
380 * PARAMETERS: Flags - Logical/Physical addressing
381 * rsdp_address - Where to place the RSDP address
382 *
383 * RETURN: Status, Physical address of the RSDP
384 *
385 * DESCRIPTION: Find the RSDP
386 *
387 ******************************************************************************/
388acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
389{
390 struct acpi_table_desc table_info;
391 acpi_status status;
392
393 ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
394
395 /* Get the RSDP */
396 207
397 status = acpi_tb_find_rsdp(&table_info, flags); 208 physical_address = (u32)
398 if (ACPI_FAILURE(status)) { 209 (ACPI_HI_RSDP_WINDOW_BASE +
399 ACPI_EXCEPTION((AE_INFO, status, 210 ACPI_PTR_DIFF(mem_rover, table_ptr));
400 "RSDP structure not found - Flags=%X", flags));
401 211
402 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 212 *table_address = physical_address;
213 return_ACPI_STATUS(AE_OK);
403 } 214 }
404 215
405 rsdp_address->pointer_type = ACPI_PHYSICAL_POINTER; 216 /* A valid RSDP was not found */
406 rsdp_address->pointer.physical = table_info.physical_address; 217
407 return_ACPI_STATUS(AE_OK); 218 ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
219 return_ACPI_STATUS(AE_NOT_FOUND);
408} 220}
409 221
410ACPI_EXPORT_SYMBOL(acpi_find_root_pointer) 222ACPI_EXPORT_SYMBOL(acpi_find_root_pointer)
@@ -440,7 +252,7 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
440 252
441 status = 253 status =
442 acpi_tb_validate_rsdp(ACPI_CAST_PTR 254 acpi_tb_validate_rsdp(ACPI_CAST_PTR
443 (struct rsdp_descriptor, mem_rover)); 255 (struct acpi_table_rsdp, mem_rover));
444 if (ACPI_SUCCESS(status)) { 256 if (ACPI_SUCCESS(status)) {
445 257
446 /* Sig and checksum valid, we have found a real RSDP */ 258 /* Sig and checksum valid, we have found a real RSDP */
@@ -461,189 +273,3 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
461 start_address)); 273 start_address));
462 return_PTR(NULL); 274 return_PTR(NULL);
463} 275}
464
465/*******************************************************************************
466 *
467 * FUNCTION: acpi_tb_find_rsdp
468 *
469 * PARAMETERS: table_info - Where the table info is returned
470 * Flags - Current memory mode (logical vs.
471 * physical addressing)
472 *
473 * RETURN: Status, RSDP physical address
474 *
475 * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
476 * pointer structure. If it is found, set *RSDP to point to it.
477 *
478 * NOTE1: The RSDP must be either in the first 1_k of the Extended
479 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
480 * Only a 32-bit physical address is necessary.
481 *
482 * NOTE2: This function is always available, regardless of the
483 * initialization state of the rest of ACPI.
484 *
485 ******************************************************************************/
486
487static acpi_status
488acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
489{
490 u8 *table_ptr;
491 u8 *mem_rover;
492 u32 physical_address;
493 acpi_status status;
494
495 ACPI_FUNCTION_TRACE(tb_find_rsdp);
496
497 /*
498 * Scan supports either logical addressing or physical addressing
499 */
500 if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
501
502 /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
503
504 status = acpi_os_map_memory((acpi_physical_address)
505 ACPI_EBDA_PTR_LOCATION,
506 ACPI_EBDA_PTR_LENGTH,
507 (void *)&table_ptr);
508 if (ACPI_FAILURE(status)) {
509 ACPI_ERROR((AE_INFO,
510 "Could not map memory at %8.8X for length %X",
511 ACPI_EBDA_PTR_LOCATION,
512 ACPI_EBDA_PTR_LENGTH));
513
514 return_ACPI_STATUS(status);
515 }
516
517 ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
518
519 /* Convert segment part to physical address */
520
521 physical_address <<= 4;
522 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
523
524 /* EBDA present? */
525
526 if (physical_address > 0x400) {
527 /*
528 * 1b) Search EBDA paragraphs (EBDA is required to be a
529 * minimum of 1_k length)
530 */
531 status = acpi_os_map_memory((acpi_physical_address)
532 physical_address,
533 ACPI_EBDA_WINDOW_SIZE,
534 (void *)&table_ptr);
535 if (ACPI_FAILURE(status)) {
536 ACPI_ERROR((AE_INFO,
537 "Could not map memory at %8.8X for length %X",
538 physical_address,
539 ACPI_EBDA_WINDOW_SIZE));
540
541 return_ACPI_STATUS(status);
542 }
543
544 mem_rover = acpi_tb_scan_memory_for_rsdp(table_ptr,
545 ACPI_EBDA_WINDOW_SIZE);
546 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
547
548 if (mem_rover) {
549
550 /* Return the physical address */
551
552 physical_address +=
553 (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
554
555 table_info->physical_address =
556 (acpi_physical_address) physical_address;
557 return_ACPI_STATUS(AE_OK);
558 }
559 }
560
561 /*
562 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
563 */
564 status = acpi_os_map_memory((acpi_physical_address)
565 ACPI_HI_RSDP_WINDOW_BASE,
566 ACPI_HI_RSDP_WINDOW_SIZE,
567 (void *)&table_ptr);
568
569 if (ACPI_FAILURE(status)) {
570 ACPI_ERROR((AE_INFO,
571 "Could not map memory at %8.8X for length %X",
572 ACPI_HI_RSDP_WINDOW_BASE,
573 ACPI_HI_RSDP_WINDOW_SIZE));
574
575 return_ACPI_STATUS(status);
576 }
577
578 mem_rover =
579 acpi_tb_scan_memory_for_rsdp(table_ptr,
580 ACPI_HI_RSDP_WINDOW_SIZE);
581 acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
582
583 if (mem_rover) {
584
585 /* Return the physical address */
586
587 physical_address = (u32)
588 (ACPI_HI_RSDP_WINDOW_BASE +
589 ACPI_PTR_DIFF(mem_rover, table_ptr));
590
591 table_info->physical_address =
592 (acpi_physical_address) physical_address;
593 return_ACPI_STATUS(AE_OK);
594 }
595 }
596
597 /*
598 * Physical addressing
599 */
600 else {
601 /* 1a) Get the location of the EBDA */
602
603 ACPI_MOVE_16_TO_32(&physical_address, ACPI_EBDA_PTR_LOCATION);
604 physical_address <<= 4; /* Convert segment to physical address */
605
606 /* EBDA present? */
607
608 if (physical_address > 0x400) {
609 /*
610 * 1b) Search EBDA paragraphs (EBDA is required to be a minimum of
611 * 1_k length)
612 */
613 mem_rover =
614 acpi_tb_scan_memory_for_rsdp(ACPI_PHYSADDR_TO_PTR
615 (physical_address),
616 ACPI_EBDA_WINDOW_SIZE);
617 if (mem_rover) {
618
619 /* Return the physical address */
620
621 table_info->physical_address =
622 ACPI_TO_INTEGER(mem_rover);
623 return_ACPI_STATUS(AE_OK);
624 }
625 }
626
627 /* 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh */
628
629 mem_rover =
630 acpi_tb_scan_memory_for_rsdp(ACPI_PHYSADDR_TO_PTR
631 (ACPI_HI_RSDP_WINDOW_BASE),
632 ACPI_HI_RSDP_WINDOW_SIZE);
633 if (mem_rover) {
634
635 /* Found it, return the physical address */
636
637 table_info->physical_address =
638 ACPI_TO_INTEGER(mem_rover);
639 return_ACPI_STATUS(AE_OK);
640 }
641 }
642
643 /* A valid RSDP was not found */
644
645 ACPI_ERROR((AE_INFO, "No valid RSDP was found"));
646 return_ACPI_STATUS(AE_NOT_FOUND);
647}
648
649#endif
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 40ddb4dd9631..f76d3168c2b2 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.\n");
82 82
83static int acpi_thermal_add(struct acpi_device *device); 83static int acpi_thermal_add(struct acpi_device *device);
84static int acpi_thermal_remove(struct acpi_device *device, int type); 84static int acpi_thermal_remove(struct acpi_device *device, int type);
85static int acpi_thermal_resume(struct acpi_device *device, int state); 85static int acpi_thermal_resume(struct acpi_device *device);
86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); 86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); 87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); 88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
@@ -1353,7 +1353,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
1353 return 0; 1353 return 0;
1354} 1354}
1355 1355
1356static int acpi_thermal_resume(struct acpi_device *device, int state) 1356static int acpi_thermal_resume(struct acpi_device *device)
1357{ 1357{
1358 struct acpi_thermal *tz = NULL; 1358 struct acpi_thermal *tz = NULL;
1359 int i; 1359 int i;
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index f6cbc0b1bfd0..55a764807499 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdebug.h>
45 46
46#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utalloc") 48ACPI_MODULE_NAME("utalloc")
@@ -142,6 +143,14 @@ acpi_status acpi_ut_create_caches(void)
142 143
143acpi_status acpi_ut_delete_caches(void) 144acpi_status acpi_ut_delete_caches(void)
144{ 145{
146#ifdef ACPI_DBG_TRACK_ALLOCATIONS
147 char buffer[7];
148
149 if (acpi_gbl_display_final_mem_stats) {
150 ACPI_STRCPY(buffer, "MEMORY");
151 acpi_db_display_statistics(buffer);
152 }
153#endif
145 154
146 (void)acpi_os_delete_cache(acpi_gbl_namespace_cache); 155 (void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
147 acpi_gbl_namespace_cache = NULL; 156 acpi_gbl_namespace_cache = NULL;
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 1a1f8109159c..870f6edeb5f2 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -289,6 +289,14 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
289 289
290 ACPI_MEM_TRACKING(cache->total_allocated++); 290 ACPI_MEM_TRACKING(cache->total_allocated++);
291 291
292#ifdef ACPI_DBG_TRACK_ALLOCATIONS
293 if ((cache->total_allocated - cache->total_freed) >
294 cache->max_occupied) {
295 cache->max_occupied =
296 cache->total_allocated - cache->total_freed;
297 }
298#endif
299
292 /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */ 300 /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
293 301
294 status = acpi_ut_release_mutex(ACPI_MTX_CACHES); 302 status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 5e1a80d1bc36..84d529db0a66 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -719,6 +719,15 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
719 acpi_ut_add_reference(source_desc->reference.object); 719 acpi_ut_add_reference(source_desc->reference.object);
720 break; 720 break;
721 721
722 case ACPI_TYPE_REGION:
723 /*
724 * We copied the Region Handler, so we now must add a reference
725 */
726 if (dest_desc->region.handler) {
727 acpi_ut_add_reference(dest_desc->region.handler);
728 }
729 break;
730
722 default: 731 default:
723 /* Nothing to do for other simple objects */ 732 /* Nothing to do for other simple objects */
724 break; 733 break;
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 9e9054e155c1..61ad4f2daee2 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -181,8 +181,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
181 if (ACPI_LV_THREADS & acpi_dbg_level) { 181 if (ACPI_LV_THREADS & acpi_dbg_level) {
182 acpi_os_printf 182 acpi_os_printf
183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n", 183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
184 (unsigned long) acpi_gbl_prev_thread_id, 184 (unsigned long)acpi_gbl_prev_thread_id, (unsigned long)thread_id);
185 (unsigned long) thread_id);
186 } 185 }
187 186
188 acpi_gbl_prev_thread_id = thread_id; 187 acpi_gbl_prev_thread_id = thread_id;
@@ -195,7 +194,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
195 acpi_os_printf("%8s-%04ld ", module_name, line_number); 194 acpi_os_printf("%8s-%04ld ", module_name, line_number);
196 195
197 if (ACPI_LV_THREADS & acpi_dbg_level) { 196 if (ACPI_LV_THREADS & acpi_dbg_level) {
198 acpi_os_printf("[%04lX] ", thread_id); 197 acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
199 } 198 }
200 199
201 acpi_os_printf("[%02ld] %-22.22s: ", 200 acpi_os_printf("[%02ld] %-22.22s: ",
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 9d3f1149ba21..f777cebdc46d 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -158,16 +158,20 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
158 "***** Mutex %p, OS Mutex %p\n", 158 "***** Mutex %p, OS Mutex %p\n",
159 object, object->mutex.os_mutex)); 159 object, object->mutex.os_mutex));
160 160
161 if (object->mutex.os_mutex != ACPI_GLOBAL_LOCK) { 161 if (object->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
162 acpi_ex_unlink_mutex(object); 162
163 acpi_os_delete_mutex(object->mutex.os_mutex); 163 /* Global Lock has extra semaphore */
164 } else {
165 /* Global Lock "mutex" is actually a counting semaphore */
166 164
167 (void) 165 (void)
168 acpi_os_delete_semaphore 166 acpi_os_delete_semaphore
169 (acpi_gbl_global_lock_semaphore); 167 (acpi_gbl_global_lock_semaphore);
170 acpi_gbl_global_lock_semaphore = NULL; 168 acpi_gbl_global_lock_semaphore = NULL;
169
170 acpi_os_delete_mutex(object->mutex.os_mutex);
171 acpi_gbl_global_lock_mutex = NULL;
172 } else {
173 acpi_ex_unlink_mutex(object);
174 acpi_os_delete_mutex(object->mutex.os_mutex);
171 } 175 }
172 break; 176 break;
173 177
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index d6d7121583c0..13d5879cd98b 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index 014030af8b50..af33358a964b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -46,89 +46,9 @@
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include <acpi/acnamesp.h> 47#include <acpi/acnamesp.h>
48 48
49ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
49#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utglobal") 51 ACPI_MODULE_NAME("utglobal")
51
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_format_exception
55 *
56 * PARAMETERS: Status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. A valid pointer is
59 * always returned.
60 *
61 * DESCRIPTION: This function translates an ACPI exception into an ASCII string.
62 *
63 ******************************************************************************/
64const char *acpi_format_exception(acpi_status status)
65{
66 acpi_status sub_status;
67 const char *exception = NULL;
68
69 ACPI_FUNCTION_ENTRY();
70
71 /*
72 * Status is composed of two parts, a "type" and an actual code
73 */
74 sub_status = (status & ~AE_CODE_MASK);
75
76 switch (status & AE_CODE_MASK) {
77 case AE_CODE_ENVIRONMENTAL:
78
79 if (sub_status <= AE_CODE_ENV_MAX) {
80 exception = acpi_gbl_exception_names_env[sub_status];
81 }
82 break;
83
84 case AE_CODE_PROGRAMMER:
85
86 if (sub_status <= AE_CODE_PGM_MAX) {
87 exception =
88 acpi_gbl_exception_names_pgm[sub_status - 1];
89 }
90 break;
91
92 case AE_CODE_ACPI_TABLES:
93
94 if (sub_status <= AE_CODE_TBL_MAX) {
95 exception =
96 acpi_gbl_exception_names_tbl[sub_status - 1];
97 }
98 break;
99
100 case AE_CODE_AML:
101
102 if (sub_status <= AE_CODE_AML_MAX) {
103 exception =
104 acpi_gbl_exception_names_aml[sub_status - 1];
105 }
106 break;
107
108 case AE_CODE_CONTROL:
109
110 if (sub_status <= AE_CODE_CTRL_MAX) {
111 exception =
112 acpi_gbl_exception_names_ctrl[sub_status - 1];
113 }
114 break;
115
116 default:
117 break;
118 }
119
120 if (!exception) {
121
122 /* Exception code was not recognized */
123
124 ACPI_ERROR((AE_INFO,
125 "Unknown exception code: 0x%8.8X", status));
126
127 exception = "UNKNOWN_STATUS_CODE";
128 }
129
130 return (ACPI_CAST_PTR(const char, exception));
131}
132 52
133/******************************************************************************* 53/*******************************************************************************
134 * 54 *
@@ -163,8 +83,6 @@ u32 acpi_gbl_startup_flags = 0;
163 83
164u8 acpi_gbl_shutdown = TRUE; 84u8 acpi_gbl_shutdown = TRUE;
165 85
166const u8 acpi_gbl_decode_to8bit[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
167
168const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = { 86const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
169 "\\_S0_", 87 "\\_S0_",
170 "\\_S1_", 88 "\\_S1_",
@@ -183,10 +101,45 @@ const char *acpi_gbl_highest_dstate_names[4] = {
183 101
184/******************************************************************************* 102/*******************************************************************************
185 * 103 *
186 * Namespace globals 104 * FUNCTION: acpi_format_exception
105 *
106 * PARAMETERS: Status - The acpi_status code to be formatted
107 *
108 * RETURN: A string containing the exception text. A valid pointer is
109 * always returned.
110 *
111 * DESCRIPTION: This function translates an ACPI exception into an ASCII string
112 * It is here instead of utxface.c so it is always present.
187 * 113 *
188 ******************************************************************************/ 114 ******************************************************************************/
189 115
116const char *acpi_format_exception(acpi_status status)
117{
118 const char *exception = NULL;
119
120 ACPI_FUNCTION_ENTRY();
121
122 exception = acpi_ut_validate_exception(status);
123 if (!exception) {
124
125 /* Exception code was not recognized */
126
127 ACPI_ERROR((AE_INFO,
128 "Unknown exception code: 0x%8.8X", status));
129
130 exception = "UNKNOWN_STATUS_CODE";
131 }
132
133 return (ACPI_CAST_PTR(const char, exception));
134}
135
136ACPI_EXPORT_SYMBOL(acpi_format_exception)
137
138/*******************************************************************************
139 *
140 * Namespace globals
141 *
142 ******************************************************************************/
190/* 143/*
191 * Predefined ACPI Names (Built-in to the Interpreter) 144 * Predefined ACPI Names (Built-in to the Interpreter)
192 * 145 *
@@ -280,53 +233,6 @@ char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
280 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]); 233 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
281} 234}
282 235
283/*******************************************************************************
284 *
285 * Table name globals
286 *
287 * NOTE: This table includes ONLY the ACPI tables that the subsystem consumes.
288 * it is NOT an exhaustive list of all possible ACPI tables. All ACPI tables
289 * that are not used by the subsystem are simply ignored.
290 *
291 * Do NOT add any table to this list that is not consumed directly by this
292 * subsystem (No MADT, ECDT, SBST, etc.)
293 *
294 ******************************************************************************/
295
296struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
297
298struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1] = {
299 /*********** Name, Signature, Global typed pointer Signature size, Type How many allowed?, Contains valid AML? */
300
301 /* RSDP 0 */ {RSDP_NAME, RSDP_SIG, NULL, sizeof(RSDP_SIG) - 1,
302 ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}
303 ,
304 /* DSDT 1 */ {DSDT_SIG, DSDT_SIG, (void *)&acpi_gbl_DSDT,
305 sizeof(DSDT_SIG) - 1,
306 ACPI_TABLE_SECONDARY | ACPI_TABLE_SINGLE |
307 ACPI_TABLE_EXECUTABLE}
308 ,
309 /* FADT 2 */ {FADT_SIG, FADT_SIG, (void *)&acpi_gbl_FADT,
310 sizeof(FADT_SIG) - 1,
311 ACPI_TABLE_PRIMARY | ACPI_TABLE_SINGLE}
312 ,
313 /* FACS 3 */ {FACS_SIG, FACS_SIG, (void *)&acpi_gbl_FACS,
314 sizeof(FACS_SIG) - 1,
315 ACPI_TABLE_SECONDARY | ACPI_TABLE_SINGLE}
316 ,
317 /* PSDT 4 */ {PSDT_SIG, PSDT_SIG, NULL, sizeof(PSDT_SIG) - 1,
318 ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE |
319 ACPI_TABLE_EXECUTABLE}
320 ,
321 /* SSDT 5 */ {SSDT_SIG, SSDT_SIG, NULL, sizeof(SSDT_SIG) - 1,
322 ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE |
323 ACPI_TABLE_EXECUTABLE}
324 ,
325 /* XSDT 6 */ {XSDT_SIG, XSDT_SIG, NULL, sizeof(RSDT_SIG) - 1,
326 ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}
327 ,
328};
329
330/****************************************************************************** 236/******************************************************************************
331 * 237 *
332 * Event and Hardware globals 238 * Event and Hardware globals
@@ -612,7 +518,7 @@ char *acpi_ut_get_node_name(void *object)
612 /* Name must be a valid ACPI name */ 518 /* Name must be a valid ACPI name */
613 519
614 if (!acpi_ut_valid_acpi_name(node->name.integer)) { 520 if (!acpi_ut_valid_acpi_name(node->name.integer)) {
615 node->name.integer = acpi_ut_repair_name(node->name.integer); 521 node->name.integer = acpi_ut_repair_name(node->name.ascii);
616 } 522 }
617 523
618 /* Return the name */ 524 /* Return the name */
@@ -751,13 +657,6 @@ void acpi_ut_init_globals(void)
751 return; 657 return;
752 } 658 }
753 659
754 /* ACPI table structure */
755
756 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
757 acpi_gbl_table_lists[i].next = NULL;
758 acpi_gbl_table_lists[i].count = 0;
759 }
760
761 /* Mutex locked flags */ 660 /* Mutex locked flags */
762 661
763 for (i = 0; i < ACPI_NUM_MUTEX; i++) { 662 for (i = 0; i < ACPI_NUM_MUTEX; i++) {
@@ -773,6 +672,7 @@ void acpi_ut_init_globals(void)
773 672
774 /* GPE support */ 673 /* GPE support */
775 674
675 acpi_gpe_count = 0;
776 acpi_gbl_gpe_xrupt_list_head = NULL; 676 acpi_gbl_gpe_xrupt_list_head = NULL;
777 acpi_gbl_gpe_fadt_blocks[0] = NULL; 677 acpi_gbl_gpe_fadt_blocks[0] = NULL;
778 acpi_gbl_gpe_fadt_blocks[1] = NULL; 678 acpi_gbl_gpe_fadt_blocks[1] = NULL;
@@ -784,25 +684,15 @@ void acpi_ut_init_globals(void)
784 acpi_gbl_exception_handler = NULL; 684 acpi_gbl_exception_handler = NULL;
785 acpi_gbl_init_handler = NULL; 685 acpi_gbl_init_handler = NULL;
786 686
787 /* Global "typed" ACPI table pointers */
788
789 acpi_gbl_RSDP = NULL;
790 acpi_gbl_XSDT = NULL;
791 acpi_gbl_FACS = NULL;
792 acpi_gbl_FADT = NULL;
793 acpi_gbl_DSDT = NULL;
794
795 /* Global Lock support */ 687 /* Global Lock support */
796 688
797 acpi_gbl_global_lock_semaphore = NULL; 689 acpi_gbl_global_lock_semaphore = NULL;
690 acpi_gbl_global_lock_mutex = NULL;
798 acpi_gbl_global_lock_acquired = FALSE; 691 acpi_gbl_global_lock_acquired = FALSE;
799 acpi_gbl_global_lock_thread_count = 0;
800 acpi_gbl_global_lock_handle = 0; 692 acpi_gbl_global_lock_handle = 0;
801 693
802 /* Miscellaneous variables */ 694 /* Miscellaneous variables */
803 695
804 acpi_gbl_table_flags = ACPI_PHYSICAL_POINTER;
805 acpi_gbl_rsdp_original_location = 0;
806 acpi_gbl_cm_single_step = FALSE; 696 acpi_gbl_cm_single_step = FALSE;
807 acpi_gbl_db_terminate_threads = FALSE; 697 acpi_gbl_db_terminate_threads = FALSE;
808 acpi_gbl_shutdown = FALSE; 698 acpi_gbl_shutdown = FALSE;
@@ -837,8 +727,13 @@ void acpi_ut_init_globals(void)
837 acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX; 727 acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX;
838#endif 728#endif
839 729
730#ifdef ACPI_DBG_TRACK_ALLOCATIONS
731 acpi_gbl_display_final_mem_stats = FALSE;
732#endif
733
840 return_VOID; 734 return_VOID;
841} 735}
842 736
843ACPI_EXPORT_SYMBOL(acpi_dbg_level) 737ACPI_EXPORT_SYMBOL(acpi_dbg_level)
844ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 738ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
739ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
index ff76055eb7d6..ad3c0d0a5cf8 100644
--- a/drivers/acpi/utilities/utinit.c
+++ b/drivers/acpi/utilities/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,119 +44,14 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acevents.h> 46#include <acpi/acevents.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utinit") 50ACPI_MODULE_NAME("utinit")
50 51
51/* Local prototypes */ 52/* Local prototypes */
52static void
53acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset);
54
55static void acpi_ut_terminate(void); 53static void acpi_ut_terminate(void);
56 54
57/*******************************************************************************
58 *
59 * FUNCTION: acpi_ut_fadt_register_error
60 *
61 * PARAMETERS: register_name - Pointer to string identifying register
62 * Value - Actual register contents value
63 * Offset - Byte offset in the FADT
64 *
65 * RETURN: AE_BAD_VALUE
66 *
67 * DESCRIPTION: Display failure message
68 *
69 ******************************************************************************/
70
71static void
72acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset)
73{
74
75 ACPI_WARNING((AE_INFO,
76 "Invalid FADT value %s=%X at offset %X FADT=%p",
77 register_name, value, offset, acpi_gbl_FADT));
78}
79
80/******************************************************************************
81 *
82 * FUNCTION: acpi_ut_validate_fadt
83 *
84 * PARAMETERS: None
85 *
86 * RETURN: Status
87 *
88 * DESCRIPTION: Validate various ACPI registers in the FADT
89 *
90 ******************************************************************************/
91
92acpi_status acpi_ut_validate_fadt(void)
93{
94
95 /*
96 * Verify Fixed ACPI Description Table fields,
97 * but don't abort on any problems, just display error
98 */
99 if (acpi_gbl_FADT->pm1_evt_len < 4) {
100 acpi_ut_fadt_register_error("PM1_EVT_LEN",
101 (u32) acpi_gbl_FADT->pm1_evt_len,
102 ACPI_FADT_OFFSET(pm1_evt_len));
103 }
104
105 if (!acpi_gbl_FADT->pm1_cnt_len) {
106 acpi_ut_fadt_register_error("PM1_CNT_LEN", 0,
107 ACPI_FADT_OFFSET(pm1_cnt_len));
108 }
109
110 if (!acpi_gbl_FADT->xpm1a_evt_blk.address) {
111 acpi_ut_fadt_register_error("X_PM1a_EVT_BLK", 0,
112 ACPI_FADT_OFFSET(xpm1a_evt_blk.
113 address));
114 }
115
116 if (!acpi_gbl_FADT->xpm1a_cnt_blk.address) {
117 acpi_ut_fadt_register_error("X_PM1a_CNT_BLK", 0,
118 ACPI_FADT_OFFSET(xpm1a_cnt_blk.
119 address));
120 }
121
122 if (!acpi_gbl_FADT->xpm_tmr_blk.address) {
123 acpi_ut_fadt_register_error("X_PM_TMR_BLK", 0,
124 ACPI_FADT_OFFSET(xpm_tmr_blk.
125 address));
126 }
127
128 if ((acpi_gbl_FADT->xpm2_cnt_blk.address &&
129 !acpi_gbl_FADT->pm2_cnt_len)) {
130 acpi_ut_fadt_register_error("PM2_CNT_LEN",
131 (u32) acpi_gbl_FADT->pm2_cnt_len,
132 ACPI_FADT_OFFSET(pm2_cnt_len));
133 }
134
135 if (acpi_gbl_FADT->pm_tm_len < 4) {
136 acpi_ut_fadt_register_error("PM_TM_LEN",
137 (u32) acpi_gbl_FADT->pm_tm_len,
138 ACPI_FADT_OFFSET(pm_tm_len));
139 }
140
141 /* Length of GPE blocks must be a multiple of 2 */
142
143 if (acpi_gbl_FADT->xgpe0_blk.address &&
144 (acpi_gbl_FADT->gpe0_blk_len & 1)) {
145 acpi_ut_fadt_register_error("(x)GPE0_BLK_LEN",
146 (u32) acpi_gbl_FADT->gpe0_blk_len,
147 ACPI_FADT_OFFSET(gpe0_blk_len));
148 }
149
150 if (acpi_gbl_FADT->xgpe1_blk.address &&
151 (acpi_gbl_FADT->gpe1_blk_len & 1)) {
152 acpi_ut_fadt_register_error("(x)GPE1_BLK_LEN",
153 (u32) acpi_gbl_FADT->gpe1_blk_len,
154 ACPI_FADT_OFFSET(gpe1_blk_len));
155 }
156
157 return (AE_OK);
158}
159
160/****************************************************************************** 55/******************************************************************************
161 * 56 *
162 * FUNCTION: acpi_ut_terminate 57 * FUNCTION: acpi_ut_terminate
@@ -178,7 +73,6 @@ static void acpi_ut_terminate(void)
178 73
179 ACPI_FUNCTION_TRACE(ut_terminate); 74 ACPI_FUNCTION_TRACE(ut_terminate);
180 75
181 /* Free global tables, etc. */
182 /* Free global GPE blocks and related info structures */ 76 /* Free global GPE blocks and related info structures */
183 77
184 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; 78 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
@@ -239,6 +133,10 @@ void acpi_ut_subsystem_shutdown(void)
239 133
240 acpi_ns_terminate(); 134 acpi_ns_terminate();
241 135
136 /* Delete the ACPI tables */
137
138 acpi_tb_terminate();
139
242 /* Close the globals */ 140 /* Close the globals */
243 141
244 acpi_ut_terminate(); 142 acpi_ut_terminate();
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
index 19d74bedce27..0c56a0d20b29 100644
--- a/drivers/acpi/utilities/utmath.c
+++ b/drivers/acpi/utilities/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 6d8a8211be90..50133fffe420 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,78 @@ ACPI_MODULE_NAME("utmisc")
51 51
52/******************************************************************************* 52/*******************************************************************************
53 * 53 *
54 * FUNCTION: acpi_ut_validate_exception
55 *
56 * PARAMETERS: Status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. NULL if exception is
59 * not valid.
60 *
61 * DESCRIPTION: This function validates and translates an ACPI exception into
62 * an ASCII string.
63 *
64 ******************************************************************************/
65const char *acpi_ut_validate_exception(acpi_status status)
66{
67 acpi_status sub_status;
68 const char *exception = NULL;
69
70 ACPI_FUNCTION_ENTRY();
71
72 /*
73 * Status is composed of two parts, a "type" and an actual code
74 */
75 sub_status = (status & ~AE_CODE_MASK);
76
77 switch (status & AE_CODE_MASK) {
78 case AE_CODE_ENVIRONMENTAL:
79
80 if (sub_status <= AE_CODE_ENV_MAX) {
81 exception = acpi_gbl_exception_names_env[sub_status];
82 }
83 break;
84
85 case AE_CODE_PROGRAMMER:
86
87 if (sub_status <= AE_CODE_PGM_MAX) {
88 exception =
89 acpi_gbl_exception_names_pgm[sub_status - 1];
90 }
91 break;
92
93 case AE_CODE_ACPI_TABLES:
94
95 if (sub_status <= AE_CODE_TBL_MAX) {
96 exception =
97 acpi_gbl_exception_names_tbl[sub_status - 1];
98 }
99 break;
100
101 case AE_CODE_AML:
102
103 if (sub_status <= AE_CODE_AML_MAX) {
104 exception =
105 acpi_gbl_exception_names_aml[sub_status - 1];
106 }
107 break;
108
109 case AE_CODE_CONTROL:
110
111 if (sub_status <= AE_CODE_CTRL_MAX) {
112 exception =
113 acpi_gbl_exception_names_ctrl[sub_status - 1];
114 }
115 break;
116
117 default:
118 break;
119 }
120
121 return (ACPI_CAST_PTR(const char, exception));
122}
123
124/*******************************************************************************
125 *
54 * FUNCTION: acpi_ut_is_aml_table 126 * FUNCTION: acpi_ut_is_aml_table
55 * 127 *
56 * PARAMETERS: Table - An ACPI table 128 * PARAMETERS: Table - An ACPI table
@@ -62,14 +134,15 @@ ACPI_MODULE_NAME("utmisc")
62 * data tables that do not contain AML code. 134 * data tables that do not contain AML code.
63 * 135 *
64 ******************************************************************************/ 136 ******************************************************************************/
137
65u8 acpi_ut_is_aml_table(struct acpi_table_header *table) 138u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
66{ 139{
67 140
68 /* These are the only tables that contain executable AML */ 141 /* These are the only tables that contain executable AML */
69 142
70 if (ACPI_COMPARE_NAME(table->signature, DSDT_SIG) || 143 if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
71 ACPI_COMPARE_NAME(table->signature, PSDT_SIG) || 144 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
72 ACPI_COMPARE_NAME(table->signature, SSDT_SIG)) { 145 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
73 return (TRUE); 146 return (TRUE);
74 } 147 }
75 148
@@ -418,7 +491,7 @@ u32 acpi_ut_dword_byte_swap(u32 value)
418void acpi_ut_set_integer_width(u8 revision) 491void acpi_ut_set_integer_width(u8 revision)
419{ 492{
420 493
421 if (revision <= 1) { 494 if (revision < 2) {
422 495
423 /* 32-bit case */ 496 /* 32-bit case */
424 497
@@ -582,26 +655,25 @@ u8 acpi_ut_valid_acpi_name(u32 name)
582 * 655 *
583 ******************************************************************************/ 656 ******************************************************************************/
584 657
585acpi_name acpi_ut_repair_name(acpi_name name) 658acpi_name acpi_ut_repair_name(char *name)
586{ 659{
587 char *name_ptr = ACPI_CAST_PTR(char, &name);
588 char new_name[ACPI_NAME_SIZE];
589 acpi_native_uint i; 660 acpi_native_uint i;
661 char new_name[ACPI_NAME_SIZE];
590 662
591 for (i = 0; i < ACPI_NAME_SIZE; i++) { 663 for (i = 0; i < ACPI_NAME_SIZE; i++) {
592 new_name[i] = name_ptr[i]; 664 new_name[i] = name[i];
593 665
594 /* 666 /*
595 * Replace a bad character with something printable, yet technically 667 * Replace a bad character with something printable, yet technically
596 * still invalid. This prevents any collisions with existing "good" 668 * still invalid. This prevents any collisions with existing "good"
597 * names in the namespace. 669 * names in the namespace.
598 */ 670 */
599 if (!acpi_ut_valid_acpi_char(name_ptr[i], i)) { 671 if (!acpi_ut_valid_acpi_char(name[i], i)) {
600 new_name[i] = '*'; 672 new_name[i] = '*';
601 } 673 }
602 } 674 }
603 675
604 return (*ACPI_CAST_PTR(u32, new_name)); 676 return (*(u32 *) new_name);
605} 677}
606 678
607/******************************************************************************* 679/*******************************************************************************
@@ -996,9 +1068,13 @@ acpi_ut_info(char *module_name, u32 line_number, char *format, ...)
996{ 1068{
997 va_list args; 1069 va_list args;
998 1070
999 acpi_os_printf("ACPI (%s-%04d): ", module_name, line_number); 1071 /*
1072 * Removed module_name, line_number, and acpica version, not needed
1073 * for info output
1074 */
1075 acpi_os_printf("ACPI: ");
1000 1076
1001 va_start(args, format); 1077 va_start(args, format);
1002 acpi_os_vprintf(format, args); 1078 acpi_os_vprintf(format, args);
1003 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); 1079 acpi_os_printf("\n");
1004} 1080}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index 180e73ceb6e2..cbad2ef5987d 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index ba7d8ac702df..4696124759e1 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index 5a2de92831d3..e8fe1ba6cc24 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
index eaa13d05c859..edcaafad0a31 100644
--- a/drivers/acpi/utilities/utstate.c
+++ b/drivers/acpi/utilities/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index 3538f69c82a1..de3276f4f468 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -67,6 +67,7 @@ acpi_status acpi_initialize_subsystem(void)
67 67
68 ACPI_FUNCTION_TRACE(acpi_initialize_subsystem); 68 ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
69 69
70 acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
70 ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace()); 71 ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
71 72
72 /* Initialize the OS-Dependent layer */ 73 /* Initialize the OS-Dependent layer */
@@ -127,20 +128,6 @@ acpi_status acpi_enable_subsystem(u32 flags)
127 128
128 ACPI_FUNCTION_TRACE(acpi_enable_subsystem); 129 ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
129 130
130 /*
131 * We must initialize the hardware before we can enable ACPI.
132 * The values from the FADT are validated here.
133 */
134 if (!(flags & ACPI_NO_HARDWARE_INIT)) {
135 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
136 "[Init] Initializing ACPI hardware\n"));
137
138 status = acpi_hw_initialize();
139 if (ACPI_FAILURE(status)) {
140 return_ACPI_STATUS(status);
141 }
142 }
143
144 /* Enable ACPI mode */ 131 /* Enable ACPI mode */
145 132
146 if (!(flags & ACPI_NO_ACPI_ENABLE)) { 133 if (!(flags & ACPI_NO_ACPI_ENABLE)) {
@@ -398,7 +385,6 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
398{ 385{
399 struct acpi_system_info *info_ptr; 386 struct acpi_system_info *info_ptr;
400 acpi_status status; 387 acpi_status status;
401 u32 i;
402 388
403 ACPI_FUNCTION_TRACE(acpi_get_system_info); 389 ACPI_FUNCTION_TRACE(acpi_get_system_info);
404 390
@@ -431,9 +417,7 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
431 417
432 /* Timer resolution - 24 or 32 bits */ 418 /* Timer resolution - 24 or 32 bits */
433 419
434 if (!acpi_gbl_FADT) { 420 if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
435 info_ptr->timer_resolution = 0;
436 } else if (acpi_gbl_FADT->tmr_val_ext == 0) {
437 info_ptr->timer_resolution = 24; 421 info_ptr->timer_resolution = 24;
438 } else { 422 } else {
439 info_ptr->timer_resolution = 32; 423 info_ptr->timer_resolution = 32;
@@ -449,13 +433,6 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
449 info_ptr->debug_layer = acpi_dbg_layer; 433 info_ptr->debug_layer = acpi_dbg_layer;
450 info_ptr->debug_level = acpi_dbg_level; 434 info_ptr->debug_level = acpi_dbg_level;
451 435
452 /* Current status of the ACPI tables, per table type */
453
454 info_ptr->num_table_types = ACPI_TABLE_ID_MAX + 1;
455 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
456 info_ptr->table_info[i].count = acpi_gbl_table_lists[i].count;
457 }
458
459 return_ACPI_STATUS(AE_OK); 436 return_ACPI_STATUS(AE_OK);
460} 437}
461 438
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 3d54680d0333..e0b97add8c63 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34 34
35#include <linux/backlight.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36 37
37#include <acpi/acpi_bus.h> 38#include <acpi/acpi_bus.h>
@@ -56,6 +57,12 @@
56 57
57#define ACPI_VIDEO_HEAD_INVALID (~0u - 1) 58#define ACPI_VIDEO_HEAD_INVALID (~0u - 1)
58#define ACPI_VIDEO_HEAD_END (~0u) 59#define ACPI_VIDEO_HEAD_END (~0u)
60#define MAX_NAME_LEN 20
61
62#define ACPI_VIDEO_DISPLAY_CRT 1
63#define ACPI_VIDEO_DISPLAY_TV 2
64#define ACPI_VIDEO_DISPLAY_DVI 3
65#define ACPI_VIDEO_DISPLAY_LCD 4
59 66
60#define _COMPONENT ACPI_VIDEO_COMPONENT 67#define _COMPONENT ACPI_VIDEO_COMPONENT
61ACPI_MODULE_NAME("acpi_video") 68ACPI_MODULE_NAME("acpi_video")
@@ -66,16 +73,14 @@ MODULE_LICENSE("GPL");
66 73
67static int acpi_video_bus_add(struct acpi_device *device); 74static int acpi_video_bus_add(struct acpi_device *device);
68static int acpi_video_bus_remove(struct acpi_device *device, int type); 75static int acpi_video_bus_remove(struct acpi_device *device, int type);
69static int acpi_video_bus_match(struct acpi_device *device,
70 struct acpi_driver *driver);
71 76
72static struct acpi_driver acpi_video_bus = { 77static struct acpi_driver acpi_video_bus = {
73 .name = ACPI_VIDEO_DRIVER_NAME, 78 .name = ACPI_VIDEO_DRIVER_NAME,
74 .class = ACPI_VIDEO_CLASS, 79 .class = ACPI_VIDEO_CLASS,
80 .ids = ACPI_VIDEO_HID,
75 .ops = { 81 .ops = {
76 .add = acpi_video_bus_add, 82 .add = acpi_video_bus_add,
77 .remove = acpi_video_bus_remove, 83 .remove = acpi_video_bus_remove,
78 .match = acpi_video_bus_match,
79 }, 84 },
80}; 85};
81 86
@@ -133,20 +138,21 @@ struct acpi_video_device_flags {
133 u8 crt:1; 138 u8 crt:1;
134 u8 lcd:1; 139 u8 lcd:1;
135 u8 tvout:1; 140 u8 tvout:1;
141 u8 dvi:1;
136 u8 bios:1; 142 u8 bios:1;
137 u8 unknown:1; 143 u8 unknown:1;
138 u8 reserved:3; 144 u8 reserved:2;
139}; 145};
140 146
141struct acpi_video_device_cap { 147struct acpi_video_device_cap {
142 u8 _ADR:1; /*Return the unique ID */ 148 u8 _ADR:1; /*Return the unique ID */
143 u8 _BCL:1; /*Query list of brightness control levels supported */ 149 u8 _BCL:1; /*Query list of brightness control levels supported */
144 u8 _BCM:1; /*Set the brightness level */ 150 u8 _BCM:1; /*Set the brightness level */
151 u8 _BQC:1; /* Get current brightness level */
145 u8 _DDC:1; /*Return the EDID for this device */ 152 u8 _DDC:1; /*Return the EDID for this device */
146 u8 _DCS:1; /*Return status of output device */ 153 u8 _DCS:1; /*Return status of output device */
147 u8 _DGS:1; /*Query graphics state */ 154 u8 _DGS:1; /*Query graphics state */
148 u8 _DSS:1; /*Device state set */ 155 u8 _DSS:1; /*Device state set */
149 u8 _reserved:1;
150}; 156};
151 157
152struct acpi_video_device_brightness { 158struct acpi_video_device_brightness {
@@ -163,6 +169,8 @@ struct acpi_video_device {
163 struct acpi_video_bus *video; 169 struct acpi_video_bus *video;
164 struct acpi_device *dev; 170 struct acpi_device *dev;
165 struct acpi_video_device_brightness *brightness; 171 struct acpi_video_device_brightness *brightness;
172 struct backlight_device *backlight;
173 struct backlight_properties *data;
166}; 174};
167 175
168/* bus */ 176/* bus */
@@ -257,11 +265,35 @@ static void acpi_video_device_bind(struct acpi_video_bus *video,
257 struct acpi_video_device *device); 265 struct acpi_video_device *device);
258static int acpi_video_device_enumerate(struct acpi_video_bus *video); 266static int acpi_video_device_enumerate(struct acpi_video_bus *video);
259static int acpi_video_switch_output(struct acpi_video_bus *video, int event); 267static int acpi_video_switch_output(struct acpi_video_bus *video, int event);
268static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
269 int level);
270static int acpi_video_device_lcd_get_level_current(
271 struct acpi_video_device *device,
272 unsigned long *level);
260static int acpi_video_get_next_level(struct acpi_video_device *device, 273static int acpi_video_get_next_level(struct acpi_video_device *device,
261 u32 level_current, u32 event); 274 u32 level_current, u32 event);
262static void acpi_video_switch_brightness(struct acpi_video_device *device, 275static void acpi_video_switch_brightness(struct acpi_video_device *device,
263 int event); 276 int event);
264 277
278/*backlight device sysfs support*/
279static int acpi_video_get_brightness(struct backlight_device *bd)
280{
281 unsigned long cur_level;
282 struct acpi_video_device *vd =
283 (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
284 acpi_video_device_lcd_get_level_current(vd, &cur_level);
285 return (int) cur_level;
286}
287
288static int acpi_video_set_brightness(struct backlight_device *bd)
289{
290 int request_level = bd->props->brightness;
291 struct acpi_video_device *vd =
292 (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
293 acpi_video_device_lcd_set_level(vd, request_level);
294 return 0;
295}
296
265/* -------------------------------------------------------------------------- 297/* --------------------------------------------------------------------------
266 Video Management 298 Video Management
267 -------------------------------------------------------------------------- */ 299 -------------------------------------------------------------------------- */
@@ -499,6 +531,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
499 acpi_integer status; 531 acpi_integer status;
500 acpi_handle h_dummy1; 532 acpi_handle h_dummy1;
501 int i; 533 int i;
534 u32 max_level = 0;
502 union acpi_object *obj = NULL; 535 union acpi_object *obj = NULL;
503 struct acpi_video_device_brightness *br = NULL; 536 struct acpi_video_device_brightness *br = NULL;
504 537
@@ -514,6 +547,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
514 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) { 547 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
515 device->cap._BCM = 1; 548 device->cap._BCM = 1;
516 } 549 }
550 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
551 device->cap._BQC = 1;
517 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { 552 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
518 device->cap._DDC = 1; 553 device->cap._DDC = 1;
519 } 554 }
@@ -550,6 +585,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
550 continue; 585 continue;
551 } 586 }
552 br->levels[count] = (u32) o->integer.value; 587 br->levels[count] = (u32) o->integer.value;
588 if (br->levels[count] > max_level)
589 max_level = br->levels[count];
553 count++; 590 count++;
554 } 591 }
555 out: 592 out:
@@ -568,6 +605,37 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
568 605
569 kfree(obj); 606 kfree(obj);
570 607
608 if (device->cap._BCL && device->cap._BCM && device->cap._BQC){
609 unsigned long tmp;
610 static int count = 0;
611 char *name;
612 struct backlight_properties *acpi_video_data;
613
614 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
615 if (!name)
616 return;
617
618 acpi_video_data = kzalloc(
619 sizeof(struct backlight_properties),
620 GFP_KERNEL);
621 if (!acpi_video_data){
622 kfree(name);
623 return;
624 }
625 acpi_video_data->owner = THIS_MODULE;
626 acpi_video_data->get_brightness =
627 acpi_video_get_brightness;
628 acpi_video_data->update_status =
629 acpi_video_set_brightness;
630 sprintf(name, "acpi_video%d", count++);
631 device->data = acpi_video_data;
632 acpi_video_data->max_brightness = max_level;
633 acpi_video_device_lcd_get_level_current(device, &tmp);
634 acpi_video_data->brightness = (int)tmp;
635 device->backlight = backlight_device_register(name,
636 NULL, device, acpi_video_data);
637 kfree(name);
638 }
571 return; 639 return;
572} 640}
573 641
@@ -668,6 +736,8 @@ static int acpi_video_device_info_seq_show(struct seq_file *seq, void *offset)
668 seq_printf(seq, "LCD\n"); 736 seq_printf(seq, "LCD\n");
669 else if (dev->flags.tvout) 737 else if (dev->flags.tvout)
670 seq_printf(seq, "TVOUT\n"); 738 seq_printf(seq, "TVOUT\n");
739 else if (dev->flags.dvi)
740 seq_printf(seq, "DVI\n");
671 else 741 else
672 seq_printf(seq, "UNKNOWN\n"); 742 seq_printf(seq, "UNKNOWN\n");
673 743
@@ -1242,6 +1312,16 @@ static int acpi_video_bus_remove_fs(struct acpi_device *device)
1242 -------------------------------------------------------------------------- */ 1312 -------------------------------------------------------------------------- */
1243 1313
1244/* device interface */ 1314/* device interface */
1315static struct acpi_video_device_attrib*
1316acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
1317{
1318 int count;
1319
1320 for(count = 0; count < video->attached_count; count++)
1321 if((video->attached_array[count].value.int_val & 0xffff) == device_id)
1322 return &(video->attached_array[count].value.attrib);
1323 return NULL;
1324}
1245 1325
1246static int 1326static int
1247acpi_video_bus_get_one_device(struct acpi_device *device, 1327acpi_video_bus_get_one_device(struct acpi_device *device,
@@ -1250,7 +1330,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1250 unsigned long device_id; 1330 unsigned long device_id;
1251 int status; 1331 int status;
1252 struct acpi_video_device *data; 1332 struct acpi_video_device *data;
1253 1333 struct acpi_video_device_attrib* attribute;
1254 1334
1255 if (!device || !video) 1335 if (!device || !video)
1256 return -EINVAL; 1336 return -EINVAL;
@@ -1271,20 +1351,30 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1271 data->video = video; 1351 data->video = video;
1272 data->dev = device; 1352 data->dev = device;
1273 1353
1274 switch (device_id & 0xffff) { 1354 attribute = acpi_video_get_device_attr(video, device_id);
1275 case 0x0100: 1355
1276 data->flags.crt = 1; 1356 if((attribute != NULL) && attribute->device_id_scheme) {
1277 break; 1357 switch (attribute->display_type) {
1278 case 0x0400: 1358 case ACPI_VIDEO_DISPLAY_CRT:
1279 data->flags.lcd = 1; 1359 data->flags.crt = 1;
1280 break; 1360 break;
1281 case 0x0200: 1361 case ACPI_VIDEO_DISPLAY_TV:
1282 data->flags.tvout = 1; 1362 data->flags.tvout = 1;
1283 break; 1363 break;
1284 default: 1364 case ACPI_VIDEO_DISPLAY_DVI:
1365 data->flags.dvi = 1;
1366 break;
1367 case ACPI_VIDEO_DISPLAY_LCD:
1368 data->flags.lcd = 1;
1369 break;
1370 default:
1371 data->flags.unknown = 1;
1372 break;
1373 }
1374 if(attribute->bios_can_detect)
1375 data->flags.bios = 1;
1376 } else
1285 data->flags.unknown = 1; 1377 data->flags.unknown = 1;
1286 break;
1287 }
1288 1378
1289 acpi_video_device_bind(video, data); 1379 acpi_video_device_bind(video, data);
1290 acpi_video_device_find_cap(data); 1380 acpi_video_device_find_cap(data);
@@ -1588,7 +1678,10 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
1588 status = acpi_remove_notify_handler(device->dev->handle, 1678 status = acpi_remove_notify_handler(device->dev->handle,
1589 ACPI_DEVICE_NOTIFY, 1679 ACPI_DEVICE_NOTIFY,
1590 acpi_video_device_notify); 1680 acpi_video_device_notify);
1591 1681 if (device->backlight){
1682 backlight_device_unregister(device->backlight);
1683 kfree(device->data);
1684 }
1592 return 0; 1685 return 0;
1593} 1686}
1594 1687
@@ -1790,39 +1883,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
1790 return 0; 1883 return 0;
1791} 1884}
1792 1885
1793static int
1794acpi_video_bus_match(struct acpi_device *device, struct acpi_driver *driver)
1795{
1796 acpi_handle h_dummy1;
1797 acpi_handle h_dummy2;
1798 acpi_handle h_dummy3;
1799
1800
1801 if (!device || !driver)
1802 return -EINVAL;
1803
1804 /* Since there is no HID, CID for ACPI Video drivers, we have
1805 * to check well known required nodes for each feature we support.
1806 */
1807
1808 /* Does this device able to support video switching ? */
1809 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) &&
1810 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2)))
1811 return 0;
1812
1813 /* Does this device able to retrieve a video ROM ? */
1814 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1)))
1815 return 0;
1816
1817 /* Does this device able to configure which video head to be POSTed ? */
1818 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) &&
1819 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) &&
1820 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3)))
1821 return 0;
1822
1823 return -ENODEV;
1824}
1825
1826static int __init acpi_video_init(void) 1886static int __init acpi_video_init(void)
1827{ 1887{
1828 int result = 0; 1888 int result = 0;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8bf2ca2e56b5..96def1ddba19 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -364,7 +364,7 @@ char *make_class_name(const char *name, struct kobject *kobj)
364 364
365 class_name = kmalloc(size, GFP_KERNEL); 365 class_name = kmalloc(size, GFP_KERNEL);
366 if (!class_name) 366 if (!class_name)
367 return ERR_PTR(-ENOMEM); 367 return NULL;
368 368
369 strcpy(class_name, name); 369 strcpy(class_name, name);
370 strcat(class_name, ":"); 370 strcat(class_name, ":");
@@ -411,8 +411,11 @@ static int make_deprecated_class_device_links(struct class_device *class_dev)
411 return 0; 411 return 0;
412 412
413 class_name = make_class_name(class_dev->class->name, &class_dev->kobj); 413 class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
414 error = sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj, 414 if (class_name)
415 class_name); 415 error = sysfs_create_link(&class_dev->dev->kobj,
416 &class_dev->kobj, class_name);
417 else
418 error = -ENOMEM;
416 kfree(class_name); 419 kfree(class_name);
417 return error; 420 return error;
418} 421}
@@ -425,7 +428,8 @@ static void remove_deprecated_class_device_links(struct class_device *class_dev)
425 return; 428 return;
426 429
427 class_name = make_class_name(class_dev->class->name, &class_dev->kobj); 430 class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
428 sysfs_remove_link(&class_dev->dev->kobj, class_name); 431 if (class_name)
432 sysfs_remove_link(&class_dev->dev->kobj, class_name);
429 kfree(class_name); 433 kfree(class_name);
430} 434}
431#else 435#else
@@ -863,9 +867,12 @@ int class_device_rename(struct class_device *class_dev, char *new_name)
863 if (class_dev->dev) { 867 if (class_dev->dev) {
864 new_class_name = make_class_name(class_dev->class->name, 868 new_class_name = make_class_name(class_dev->class->name,
865 &class_dev->kobj); 869 &class_dev->kobj);
866 sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj, 870 if (new_class_name)
867 new_class_name); 871 sysfs_create_link(&class_dev->dev->kobj,
868 sysfs_remove_link(&class_dev->dev->kobj, old_class_name); 872 &class_dev->kobj, new_class_name);
873 if (old_class_name)
874 sysfs_remove_link(&class_dev->dev->kobj,
875 old_class_name);
869 } 876 }
870#endif 877#endif
871 class_device_put(class_dev); 878 class_device_put(class_dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 67b79a7592a9..e13614241c9e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -95,6 +95,8 @@ static void device_release(struct kobject * kobj)
95 95
96 if (dev->release) 96 if (dev->release)
97 dev->release(dev); 97 dev->release(dev);
98 else if (dev->type && dev->type->release)
99 dev->type->release(dev);
98 else if (dev->class && dev->class->dev_release) 100 else if (dev->class && dev->class->dev_release)
99 dev->class->dev_release(dev); 101 dev->class->dev_release(dev);
100 else { 102 else {
@@ -154,25 +156,47 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp,
154 "MINOR=%u", MINOR(dev->devt)); 156 "MINOR=%u", MINOR(dev->devt));
155 } 157 }
156 158
157#ifdef CONFIG_SYSFS_DEPRECATED 159 if (dev->driver)
158 /* add bus name (same as SUBSYSTEM, deprecated) */
159 if (dev->bus)
160 add_uevent_var(envp, num_envp, &i,
161 buffer, buffer_size, &length,
162 "PHYSDEVBUS=%s", dev->bus->name);
163#endif
164
165 /* add driver name (PHYSDEV* values are deprecated)*/
166 if (dev->driver) {
167 add_uevent_var(envp, num_envp, &i, 160 add_uevent_var(envp, num_envp, &i,
168 buffer, buffer_size, &length, 161 buffer, buffer_size, &length,
169 "DRIVER=%s", dev->driver->name); 162 "DRIVER=%s", dev->driver->name);
163
170#ifdef CONFIG_SYSFS_DEPRECATED 164#ifdef CONFIG_SYSFS_DEPRECATED
165 if (dev->class) {
166 struct device *parent = dev->parent;
167
168 /* find first bus device in parent chain */
169 while (parent && !parent->bus)
170 parent = parent->parent;
171 if (parent && parent->bus) {
172 const char *path;
173
174 path = kobject_get_path(&parent->kobj, GFP_KERNEL);
175 add_uevent_var(envp, num_envp, &i,
176 buffer, buffer_size, &length,
177 "PHYSDEVPATH=%s", path);
178 kfree(path);
179
180 add_uevent_var(envp, num_envp, &i,
181 buffer, buffer_size, &length,
182 "PHYSDEVBUS=%s", parent->bus->name);
183
184 if (parent->driver)
185 add_uevent_var(envp, num_envp, &i,
186 buffer, buffer_size, &length,
187 "PHYSDEVDRIVER=%s", parent->driver->name);
188 }
189 } else if (dev->bus) {
171 add_uevent_var(envp, num_envp, &i, 190 add_uevent_var(envp, num_envp, &i,
172 buffer, buffer_size, &length, 191 buffer, buffer_size, &length,
173 "PHYSDEVDRIVER=%s", dev->driver->name); 192 "PHYSDEVBUS=%s", dev->bus->name);
174#endif 193
194 if (dev->driver)
195 add_uevent_var(envp, num_envp, &i,
196 buffer, buffer_size, &length,
197 "PHYSDEVDRIVER=%s", dev->driver->name);
175 } 198 }
199#endif
176 200
177 /* terminate, set to next free slot, shrink available space */ 201 /* terminate, set to next free slot, shrink available space */
178 envp[i] = NULL; 202 envp[i] = NULL;
@@ -184,19 +208,25 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp,
184 if (dev->bus && dev->bus->uevent) { 208 if (dev->bus && dev->bus->uevent) {
185 /* have the bus specific function add its stuff */ 209 /* have the bus specific function add its stuff */
186 retval = dev->bus->uevent(dev, envp, num_envp, buffer, buffer_size); 210 retval = dev->bus->uevent(dev, envp, num_envp, buffer, buffer_size);
187 if (retval) { 211 if (retval)
188 pr_debug ("%s - uevent() returned %d\n", 212 pr_debug ("%s: bus uevent() returned %d\n",
189 __FUNCTION__, retval); 213 __FUNCTION__, retval);
190 }
191 } 214 }
192 215
193 if (dev->class && dev->class->dev_uevent) { 216 if (dev->class && dev->class->dev_uevent) {
194 /* have the class specific function add its stuff */ 217 /* have the class specific function add its stuff */
195 retval = dev->class->dev_uevent(dev, envp, num_envp, buffer, buffer_size); 218 retval = dev->class->dev_uevent(dev, envp, num_envp, buffer, buffer_size);
196 if (retval) { 219 if (retval)
197 pr_debug("%s - dev_uevent() returned %d\n", 220 pr_debug("%s: class uevent() returned %d\n",
198 __FUNCTION__, retval); 221 __FUNCTION__, retval);
199 } 222 }
223
224 if (dev->type && dev->type->uevent) {
225 /* have the device type specific fuction add its stuff */
226 retval = dev->type->uevent(dev, envp, num_envp, buffer, buffer_size);
227 if (retval)
228 pr_debug("%s: dev_type uevent() returned %d\n",
229 __FUNCTION__, retval);
200 } 230 }
201 231
202 return retval; 232 return retval;
@@ -247,37 +277,50 @@ static void device_remove_groups(struct device *dev)
247static int device_add_attrs(struct device *dev) 277static int device_add_attrs(struct device *dev)
248{ 278{
249 struct class *class = dev->class; 279 struct class *class = dev->class;
280 struct device_type *type = dev->type;
250 int error = 0; 281 int error = 0;
251 int i; 282 int i;
252 283
253 if (!class) 284 if (class && class->dev_attrs) {
254 return 0;
255
256 if (class->dev_attrs) {
257 for (i = 0; attr_name(class->dev_attrs[i]); i++) { 285 for (i = 0; attr_name(class->dev_attrs[i]); i++) {
258 error = device_create_file(dev, &class->dev_attrs[i]); 286 error = device_create_file(dev, &class->dev_attrs[i]);
259 if (error) 287 if (error)
260 break; 288 break;
261 } 289 }
290 if (error)
291 while (--i >= 0)
292 device_remove_file(dev, &class->dev_attrs[i]);
262 } 293 }
263 if (error) 294
264 while (--i >= 0) 295 if (type && type->attrs) {
265 device_remove_file(dev, &class->dev_attrs[i]); 296 for (i = 0; attr_name(type->attrs[i]); i++) {
297 error = device_create_file(dev, &type->attrs[i]);
298 if (error)
299 break;
300 }
301 if (error)
302 while (--i >= 0)
303 device_remove_file(dev, &type->attrs[i]);
304 }
305
266 return error; 306 return error;
267} 307}
268 308
269static void device_remove_attrs(struct device *dev) 309static void device_remove_attrs(struct device *dev)
270{ 310{
271 struct class *class = dev->class; 311 struct class *class = dev->class;
312 struct device_type *type = dev->type;
272 int i; 313 int i;
273 314
274 if (!class) 315 if (class && class->dev_attrs) {
275 return;
276
277 if (class->dev_attrs) {
278 for (i = 0; attr_name(class->dev_attrs[i]); i++) 316 for (i = 0; attr_name(class->dev_attrs[i]); i++)
279 device_remove_file(dev, &class->dev_attrs[i]); 317 device_remove_file(dev, &class->dev_attrs[i]);
280 } 318 }
319
320 if (type && type->attrs) {
321 for (i = 0; attr_name(type->attrs[i]); i++)
322 device_remove_file(dev, &type->attrs[i]);
323 }
281} 324}
282 325
283 326
@@ -390,22 +433,23 @@ void device_initialize(struct device *dev)
390} 433}
391 434
392#ifdef CONFIG_SYSFS_DEPRECATED 435#ifdef CONFIG_SYSFS_DEPRECATED
393static int setup_parent(struct device *dev, struct device *parent) 436static struct kobject * get_device_parent(struct device *dev,
437 struct device *parent)
394{ 438{
395 /* Set the parent to the class, not the parent device */ 439 /* Set the parent to the class, not the parent device */
396 /* this keeps sysfs from having a symlink to make old udevs happy */ 440 /* this keeps sysfs from having a symlink to make old udevs happy */
397 if (dev->class) 441 if (dev->class)
398 dev->kobj.parent = &dev->class->subsys.kset.kobj; 442 return &dev->class->subsys.kset.kobj;
399 else if (parent) 443 else if (parent)
400 dev->kobj.parent = &parent->kobj; 444 return &parent->kobj;
401 445
402 return 0; 446 return NULL;
403} 447}
404#else 448#else
405static int virtual_device_parent(struct device *dev) 449static struct kobject * virtual_device_parent(struct device *dev)
406{ 450{
407 if (!dev->class) 451 if (!dev->class)
408 return -ENODEV; 452 return ERR_PTR(-ENODEV);
409 453
410 if (!dev->class->virtual_dir) { 454 if (!dev->class->virtual_dir) {
411 static struct kobject *virtual_dir = NULL; 455 static struct kobject *virtual_dir = NULL;
@@ -415,25 +459,31 @@ static int virtual_device_parent(struct device *dev)
415 dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name); 459 dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name);
416 } 460 }
417 461
418 dev->kobj.parent = dev->class->virtual_dir; 462 return dev->class->virtual_dir;
419 return 0;
420} 463}
421 464
422static int setup_parent(struct device *dev, struct device *parent) 465static struct kobject * get_device_parent(struct device *dev,
466 struct device *parent)
423{ 467{
424 int error;
425
426 /* if this is a class device, and has no parent, create one */ 468 /* if this is a class device, and has no parent, create one */
427 if ((dev->class) && (parent == NULL)) { 469 if ((dev->class) && (parent == NULL)) {
428 error = virtual_device_parent(dev); 470 return virtual_device_parent(dev);
429 if (error)
430 return error;
431 } else if (parent) 471 } else if (parent)
432 dev->kobj.parent = &parent->kobj; 472 return &parent->kobj;
473 return NULL;
474}
433 475
476#endif
477static int setup_parent(struct device *dev, struct device *parent)
478{
479 struct kobject *kobj;
480 kobj = get_device_parent(dev, parent);
481 if (IS_ERR(kobj))
482 return PTR_ERR(kobj);
483 if (kobj)
484 dev->kobj.parent = kobj;
434 return 0; 485 return 0;
435} 486}
436#endif
437 487
438/** 488/**
439 * device_add - add device to device hierarchy. 489 * device_add - add device to device hierarchy.
@@ -520,9 +570,13 @@ int device_add(struct device *dev)
520 &dev->kobj, dev->bus_id); 570 &dev->kobj, dev->bus_id);
521#ifdef CONFIG_SYSFS_DEPRECATED 571#ifdef CONFIG_SYSFS_DEPRECATED
522 if (parent) { 572 if (parent) {
523 sysfs_create_link(&dev->kobj, &dev->parent->kobj, "device"); 573 sysfs_create_link(&dev->kobj, &dev->parent->kobj,
524 class_name = make_class_name(dev->class->name, &dev->kobj); 574 "device");
525 sysfs_create_link(&dev->parent->kobj, &dev->kobj, class_name); 575 class_name = make_class_name(dev->class->name,
576 &dev->kobj);
577 if (class_name)
578 sysfs_create_link(&dev->parent->kobj,
579 &dev->kobj, class_name);
526 } 580 }
527#endif 581#endif
528 } 582 }
@@ -535,7 +589,8 @@ int device_add(struct device *dev)
535 goto PMError; 589 goto PMError;
536 if ((error = bus_add_device(dev))) 590 if ((error = bus_add_device(dev)))
537 goto BusError; 591 goto BusError;
538 kobject_uevent(&dev->kobj, KOBJ_ADD); 592 if (!dev->uevent_suppress)
593 kobject_uevent(&dev->kobj, KOBJ_ADD);
539 if ((error = bus_attach_device(dev))) 594 if ((error = bus_attach_device(dev)))
540 goto AttachError; 595 goto AttachError;
541 if (parent) 596 if (parent)
@@ -665,7 +720,9 @@ void device_del(struct device * dev)
665 if (parent) { 720 if (parent) {
666 char *class_name = make_class_name(dev->class->name, 721 char *class_name = make_class_name(dev->class->name,
667 &dev->kobj); 722 &dev->kobj);
668 sysfs_remove_link(&dev->parent->kobj, class_name); 723 if (class_name)
724 sysfs_remove_link(&dev->parent->kobj,
725 class_name);
669 kfree(class_name); 726 kfree(class_name);
670 sysfs_remove_link(&dev->kobj, "device"); 727 sysfs_remove_link(&dev->kobj, "device");
671 } 728 }
@@ -968,20 +1025,25 @@ static int device_move_class_links(struct device *dev,
968 1025
969 class_name = make_class_name(dev->class->name, &dev->kobj); 1026 class_name = make_class_name(dev->class->name, &dev->kobj);
970 if (!class_name) { 1027 if (!class_name) {
971 error = PTR_ERR(class_name); 1028 error = -ENOMEM;
972 class_name = NULL;
973 goto out; 1029 goto out;
974 } 1030 }
975 if (old_parent) { 1031 if (old_parent) {
976 sysfs_remove_link(&dev->kobj, "device"); 1032 sysfs_remove_link(&dev->kobj, "device");
977 sysfs_remove_link(&old_parent->kobj, class_name); 1033 sysfs_remove_link(&old_parent->kobj, class_name);
978 } 1034 }
979 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, "device"); 1035 if (new_parent) {
980 if (error) 1036 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
981 goto out; 1037 "device");
982 error = sysfs_create_link(&new_parent->kobj, &dev->kobj, class_name); 1038 if (error)
983 if (error) 1039 goto out;
984 sysfs_remove_link(&dev->kobj, "device"); 1040 error = sysfs_create_link(&new_parent->kobj, &dev->kobj,
1041 class_name);
1042 if (error)
1043 sysfs_remove_link(&dev->kobj, "device");
1044 }
1045 else
1046 error = 0;
985out: 1047out:
986 kfree(class_name); 1048 kfree(class_name);
987 return error; 1049 return error;
@@ -993,29 +1055,28 @@ out:
993/** 1055/**
994 * device_move - moves a device to a new parent 1056 * device_move - moves a device to a new parent
995 * @dev: the pointer to the struct device to be moved 1057 * @dev: the pointer to the struct device to be moved
996 * @new_parent: the new parent of the device 1058 * @new_parent: the new parent of the device (can by NULL)
997 */ 1059 */
998int device_move(struct device *dev, struct device *new_parent) 1060int device_move(struct device *dev, struct device *new_parent)
999{ 1061{
1000 int error; 1062 int error;
1001 struct device *old_parent; 1063 struct device *old_parent;
1064 struct kobject *new_parent_kobj;
1002 1065
1003 dev = get_device(dev); 1066 dev = get_device(dev);
1004 if (!dev) 1067 if (!dev)
1005 return -EINVAL; 1068 return -EINVAL;
1006 1069
1007 if (!device_is_registered(dev)) {
1008 error = -EINVAL;
1009 goto out;
1010 }
1011 new_parent = get_device(new_parent); 1070 new_parent = get_device(new_parent);
1012 if (!new_parent) { 1071 new_parent_kobj = get_device_parent (dev, new_parent);
1013 error = -EINVAL; 1072 if (IS_ERR(new_parent_kobj)) {
1073 error = PTR_ERR(new_parent_kobj);
1074 put_device(new_parent);
1014 goto out; 1075 goto out;
1015 } 1076 }
1016 pr_debug("DEVICE: moving '%s' to '%s'\n", dev->bus_id, 1077 pr_debug("DEVICE: moving '%s' to '%s'\n", dev->bus_id,
1017 new_parent->bus_id); 1078 new_parent ? new_parent->bus_id : "<NULL>");
1018 error = kobject_move(&dev->kobj, &new_parent->kobj); 1079 error = kobject_move(&dev->kobj, new_parent_kobj);
1019 if (error) { 1080 if (error) {
1020 put_device(new_parent); 1081 put_device(new_parent);
1021 goto out; 1082 goto out;
@@ -1024,7 +1085,8 @@ int device_move(struct device *dev, struct device *new_parent)
1024 dev->parent = new_parent; 1085 dev->parent = new_parent;
1025 if (old_parent) 1086 if (old_parent)
1026 klist_remove(&dev->knode_parent); 1087 klist_remove(&dev->knode_parent);
1027 klist_add_tail(&dev->knode_parent, &new_parent->klist_children); 1088 if (new_parent)
1089 klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
1028 if (!dev->class) 1090 if (!dev->class)
1029 goto out_put; 1091 goto out_put;
1030 error = device_move_class_links(dev, old_parent, new_parent); 1092 error = device_move_class_links(dev, old_parent, new_parent);
@@ -1032,7 +1094,8 @@ int device_move(struct device *dev, struct device *new_parent)
1032 /* We ignore errors on cleanup since we're hosed anyway... */ 1094 /* We ignore errors on cleanup since we're hosed anyway... */
1033 device_move_class_links(dev, new_parent, old_parent); 1095 device_move_class_links(dev, new_parent, old_parent);
1034 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 1096 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
1035 klist_remove(&dev->knode_parent); 1097 if (new_parent)
1098 klist_remove(&dev->knode_parent);
1036 if (old_parent) 1099 if (old_parent)
1037 klist_add_tail(&dev->knode_parent, 1100 klist_add_tail(&dev->knode_parent,
1038 &old_parent->klist_children); 1101 &old_parent->klist_children);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 510e7884975f..b5bf243d9cd6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -86,8 +86,12 @@ static void driver_sysfs_remove(struct device *dev)
86 */ 86 */
87int device_bind_driver(struct device *dev) 87int device_bind_driver(struct device *dev)
88{ 88{
89 driver_bound(dev); 89 int ret;
90 return driver_sysfs_add(dev); 90
91 ret = driver_sysfs_add(dev);
92 if (!ret)
93 driver_bound(dev);
94 return ret;
91} 95}
92 96
93struct stupid_thread_structure { 97struct stupid_thread_structure {
@@ -136,18 +140,17 @@ probe_failed:
136 driver_sysfs_remove(dev); 140 driver_sysfs_remove(dev);
137 dev->driver = NULL; 141 dev->driver = NULL;
138 142
139 if (ret == -ENODEV || ret == -ENXIO) { 143 if (ret != -ENODEV && ret != -ENXIO) {
140 /* Driver matched, but didn't support device
141 * or device not found.
142 * Not an error; keep going.
143 */
144 ret = 0;
145 } else {
146 /* driver matched but the probe failed */ 144 /* driver matched but the probe failed */
147 printk(KERN_WARNING 145 printk(KERN_WARNING
148 "%s: probe of %s failed with error %d\n", 146 "%s: probe of %s failed with error %d\n",
149 drv->name, dev->bus_id, ret); 147 drv->name, dev->bus_id, ret);
150 } 148 }
149 /*
150 * Ignore errors returned by ->probe so that the next driver can try
151 * its luck.
152 */
153 ret = 0;
151done: 154done:
152 kfree(data); 155 kfree(data);
153 atomic_dec(&probe_count); 156 atomic_dec(&probe_count);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 64558f45e6bc..c0a979a5074b 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -35,7 +35,7 @@ enum {
35 FW_STATUS_READY_NOHOTPLUG, 35 FW_STATUS_READY_NOHOTPLUG,
36}; 36};
37 37
38static int loading_timeout = 10; /* In seconds */ 38static int loading_timeout = 60; /* In seconds */
39 39
40/* fw_lock could be moved to 'struct firmware_priv' but since it is just 40/* fw_lock could be moved to 'struct firmware_priv' but since it is just
41 * guarding for corner cases a global lock should be OK */ 41 * guarding for corner cases a global lock should be OK */
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f9c903ba9fcd..30480f6f2af2 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -611,8 +611,15 @@ EXPORT_SYMBOL_GPL(platform_bus_type);
611 611
612int __init platform_bus_init(void) 612int __init platform_bus_init(void)
613{ 613{
614 device_register(&platform_bus); 614 int error;
615 return bus_register(&platform_bus_type); 615
616 error = device_register(&platform_bus);
617 if (error)
618 return error;
619 error = bus_register(&platform_bus_type);
620 if (error)
621 device_unregister(&platform_bus);
622 return error;
616} 623}
617 624
618#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK 625#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 6dcdceb81203..85d99e21e188 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -532,11 +532,13 @@ typedef struct drm_mm_node {
532 int free; 532 int free;
533 unsigned long start; 533 unsigned long start;
534 unsigned long size; 534 unsigned long size;
535 struct drm_mm *mm;
535 void *private; 536 void *private;
536} drm_mm_node_t; 537} drm_mm_node_t;
537 538
538typedef struct drm_mm { 539typedef struct drm_mm {
539 drm_mm_node_t root_node; 540 struct list_head fl_entry;
541 struct list_head ml_entry;
540} drm_mm_t; 542} drm_mm_t;
541 543
542/** 544/**
@@ -843,9 +845,6 @@ extern void drm_mem_init(void);
843extern int drm_mem_info(char *buf, char **start, off_t offset, 845extern int drm_mem_info(char *buf, char **start, off_t offset,
844 int request, int *eof, void *data); 846 int request, int *eof, void *data);
845extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); 847extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
846extern void *drm_ioremap(unsigned long offset, unsigned long size,
847 drm_device_t * dev);
848extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev);
849 848
850extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type); 849extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
851extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 850extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
@@ -1053,33 +1052,18 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
1053extern drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, 1052extern drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
1054 unsigned long size, 1053 unsigned long size,
1055 unsigned alignment); 1054 unsigned alignment);
1056extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur); 1055void drm_mm_put_block(drm_mm_node_t * cur);
1057extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size, 1056extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
1058 unsigned alignment, int best_match); 1057 unsigned alignment, int best_match);
1059extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size); 1058extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
1060extern void drm_mm_takedown(drm_mm_t *mm); 1059extern void drm_mm_takedown(drm_mm_t *mm);
1060extern int drm_mm_clean(drm_mm_t *mm);
1061extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
1062extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
1063extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
1061 1064
1062/* Inline replacements for DRM_IOREMAP macros */ 1065extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
1063static __inline__ void drm_core_ioremap(struct drm_map *map, 1066extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
1064 struct drm_device *dev)
1065{
1066 map->handle = drm_ioremap(map->offset, map->size, dev);
1067}
1068
1069#if 0
1070static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
1071 struct drm_device *dev)
1072{
1073 map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
1074}
1075#endif /* 0 */
1076
1077static __inline__ void drm_core_ioremapfree(struct drm_map *map,
1078 struct drm_device *dev)
1079{
1080 if (map->handle && map->size)
1081 drm_ioremapfree(map->handle, map->size, dev);
1082}
1083 1067
1084static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, 1068static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
1085 unsigned int token) 1069 unsigned int token)
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 9f65f5697ba8..a6828cc14e58 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -79,14 +79,14 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
79 79
80 if (!use_hashed_handle) { 80 if (!use_hashed_handle) {
81 int ret; 81 int ret;
82 hash->key = user_token; 82 hash->key = user_token >> PAGE_SHIFT;
83 ret = drm_ht_insert_item(&dev->map_hash, hash); 83 ret = drm_ht_insert_item(&dev->map_hash, hash);
84 if (ret != -EINVAL) 84 if (ret != -EINVAL)
85 return ret; 85 return ret;
86 } 86 }
87 return drm_ht_just_insert_please(&dev->map_hash, hash, 87 return drm_ht_just_insert_please(&dev->map_hash, hash,
88 user_token, 32 - PAGE_SHIFT - 3, 88 user_token, 32 - PAGE_SHIFT - 3,
89 PAGE_SHIFT, DRM_MAP_HASH_OFFSET); 89 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
90} 90}
91 91
92/** 92/**
@@ -178,11 +178,11 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
178 } 178 }
179 } 179 }
180 if (map->type == _DRM_REGISTERS) 180 if (map->type == _DRM_REGISTERS)
181 map->handle = drm_ioremap(map->offset, map->size, dev); 181 map->handle = ioremap(map->offset, map->size);
182 break; 182 break;
183 183
184 case _DRM_SHM: 184 case _DRM_SHM:
185 map->handle = vmalloc_32(map->size); 185 map->handle = vmalloc_user(map->size);
186 DRM_DEBUG("%lu %d %p\n", 186 DRM_DEBUG("%lu %d %p\n",
187 map->size, drm_order(map->size), map->handle); 187 map->size, drm_order(map->size), map->handle);
188 if (!map->handle) { 188 if (!map->handle) {
@@ -238,7 +238,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
238 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); 238 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
239 if (!list) { 239 if (!list) {
240 if (map->type == _DRM_REGISTERS) 240 if (map->type == _DRM_REGISTERS)
241 drm_ioremapfree(map->handle, map->size, dev); 241 iounmap(map->handle);
242 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 242 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
243 return -EINVAL; 243 return -EINVAL;
244 } 244 }
@@ -255,14 +255,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
255 ret = drm_map_handle(dev, &list->hash, user_token, 0); 255 ret = drm_map_handle(dev, &list->hash, user_token, 0);
256 if (ret) { 256 if (ret) {
257 if (map->type == _DRM_REGISTERS) 257 if (map->type == _DRM_REGISTERS)
258 drm_ioremapfree(map->handle, map->size, dev); 258 iounmap(map->handle);
259 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 259 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 260 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
261 mutex_unlock(&dev->struct_mutex); 261 mutex_unlock(&dev->struct_mutex);
262 return ret; 262 return ret;
263 } 263 }
264 264
265 list->user_token = list->hash.key; 265 list->user_token = list->hash.key << PAGE_SHIFT;
266 mutex_unlock(&dev->struct_mutex); 266 mutex_unlock(&dev->struct_mutex);
267 267
268 *maplist = list; 268 *maplist = list;
@@ -347,7 +347,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
347 347
348 if (r_list->map == map) { 348 if (r_list->map == map) {
349 list_del(list); 349 list_del(list);
350 drm_ht_remove_key(&dev->map_hash, r_list->user_token); 350 drm_ht_remove_key(&dev->map_hash,
351 r_list->user_token >> PAGE_SHIFT);
351 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 352 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
352 break; 353 break;
353 } 354 }
@@ -362,7 +363,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
362 363
363 switch (map->type) { 364 switch (map->type) {
364 case _DRM_REGISTERS: 365 case _DRM_REGISTERS:
365 drm_ioremapfree(map->handle, map->size, dev); 366 iounmap(map->handle);
366 /* FALLTHROUGH */ 367 /* FALLTHROUGH */
367 case _DRM_FRAME_BUFFER: 368 case _DRM_FRAME_BUFFER:
368 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 369 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index 5681cae1d404..92a867082376 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -79,28 +79,6 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
79} 79}
80 80
81#if __OS_HAS_AGP 81#if __OS_HAS_AGP
82/*
83 * Find the drm_map that covers the range [offset, offset+size).
84 */
85static drm_map_t *drm_lookup_map(unsigned long offset,
86 unsigned long size, drm_device_t * dev)
87{
88 struct list_head *list;
89 drm_map_list_t *r_list;
90 drm_map_t *map;
91
92 list_for_each(list, &dev->maplist->head) {
93 r_list = (drm_map_list_t *) list;
94 map = r_list->map;
95 if (!map)
96 continue;
97 if (map->offset <= offset
98 && (offset + size) <= (map->offset + map->size))
99 return map;
100 }
101 return NULL;
102}
103
104static void *agp_remap(unsigned long offset, unsigned long size, 82static void *agp_remap(unsigned long offset, unsigned long size,
105 drm_device_t * dev) 83 drm_device_t * dev)
106{ 84{
@@ -169,13 +147,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
169} 147}
170 148
171#else /* __OS_HAS_AGP */ 149#else /* __OS_HAS_AGP */
172
173static inline drm_map_t *drm_lookup_map(unsigned long offset,
174 unsigned long size, drm_device_t * dev)
175{
176 return NULL;
177}
178
179static inline void *agp_remap(unsigned long offset, unsigned long size, 150static inline void *agp_remap(unsigned long offset, unsigned long size,
180 drm_device_t * dev) 151 drm_device_t * dev)
181{ 152{
@@ -184,57 +155,28 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
184 155
185#endif /* agp */ 156#endif /* agp */
186 157
187void *drm_ioremap(unsigned long offset, unsigned long size, 158#endif /* debug_memory */
188 drm_device_t * dev)
189{
190 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
191 drm_map_t *map = drm_lookup_map(offset, size, dev);
192
193 if (map && map->type == _DRM_AGP)
194 return agp_remap(offset, size, dev);
195 }
196 return ioremap(offset, size);
197}
198EXPORT_SYMBOL(drm_ioremap);
199 159
200#if 0 160void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
201void *drm_ioremap_nocache(unsigned long offset,
202 unsigned long size, drm_device_t * dev)
203{ 161{
204 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { 162 if (drm_core_has_AGP(dev) &&
205 drm_map_t *map = drm_lookup_map(offset, size, dev); 163 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
206 164 map->handle = agp_remap(map->offset, map->size, dev);
207 if (map && map->type == _DRM_AGP) 165 else
208 return agp_remap(offset, size, dev); 166 map->handle = ioremap(map->offset, map->size);
209 }
210 return ioremap_nocache(offset, size);
211} 167}
212#endif /* 0 */ 168EXPORT_SYMBOL(drm_core_ioremap);
213 169
214void drm_ioremapfree(void *pt, unsigned long size, 170void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
215 drm_device_t * dev)
216{ 171{
217 /* 172 if (!map->handle || !map->size)
218 * This is a bit ugly. It would be much cleaner if the DRM API would use separate 173 return;
219 * routines for handling mappings in the AGP space. Hopefully this can be done in 174
220 * a future revision of the interface... 175 if (drm_core_has_AGP(dev) &&
221 */ 176 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
222 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture 177 vunmap(map->handle);
223 && ((unsigned long)pt >= VMALLOC_START 178 else
224 && (unsigned long)pt < VMALLOC_END)) { 179 iounmap(map->handle);
225 unsigned long offset;
226 drm_map_t *map;
227
228 offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
229 map = drm_lookup_map(offset, size, dev);
230 if (map && map->type == _DRM_AGP) {
231 vunmap(pt);
232 return;
233 }
234 }
235
236 iounmap(pt);
237} 180}
238EXPORT_SYMBOL(drm_ioremapfree); 181EXPORT_SYMBOL(drm_core_ioremapfree);
239 182
240#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h
index f1b97aff10cf..63e425b5ea82 100644
--- a/drivers/char/drm/drm_memory.h
+++ b/drivers/char/drm/drm_memory.h
@@ -56,26 +56,6 @@
56# endif 56# endif
57#endif 57#endif
58 58
59static inline unsigned long drm_follow_page(void *vaddr)
60{
61 pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
62 pud_t *pud = pud_offset(pgd, (unsigned long)vaddr);
63 pmd_t *pmd = pmd_offset(pud, (unsigned long)vaddr);
64 pte_t *ptep = pte_offset_kernel(pmd, (unsigned long)vaddr);
65 return pte_pfn(*ptep) << PAGE_SHIFT;
66}
67
68#else /* __OS_HAS_AGP */ 59#else /* __OS_HAS_AGP */
69 60
70static inline unsigned long drm_follow_page(void *vaddr)
71{
72 return 0;
73}
74
75#endif 61#endif
76
77void *drm_ioremap(unsigned long offset, unsigned long size,
78 drm_device_t * dev);
79
80void drm_ioremapfree(void *pt, unsigned long size,
81 drm_device_t * dev);
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index 74581af806e1..6463271deea8 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -205,76 +205,6 @@ void drm_free (void *pt, size_t size, int area) {
205 } 205 }
206} 206}
207 207
208void *drm_ioremap (unsigned long offset, unsigned long size,
209 drm_device_t * dev) {
210 void *pt;
211
212 if (!size) {
213 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
214 "Mapping 0 bytes at 0x%08lx\n", offset);
215 return NULL;
216 }
217
218 if (!(pt = drm_ioremap(offset, size, dev))) {
219 spin_lock(&drm_mem_lock);
220 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
221 spin_unlock(&drm_mem_lock);
222 return NULL;
223 }
224 spin_lock(&drm_mem_lock);
225 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
226 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
227 spin_unlock(&drm_mem_lock);
228 return pt;
229}
230
231#if 0
232void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
233 drm_device_t * dev) {
234 void *pt;
235
236 if (!size) {
237 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
238 "Mapping 0 bytes at 0x%08lx\n", offset);
239 return NULL;
240 }
241
242 if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
243 spin_lock(&drm_mem_lock);
244 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
245 spin_unlock(&drm_mem_lock);
246 return NULL;
247 }
248 spin_lock(&drm_mem_lock);
249 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
250 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
251 spin_unlock(&drm_mem_lock);
252 return pt;
253}
254#endif /* 0 */
255
256void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
257 int alloc_count;
258 int free_count;
259
260 if (!pt)
261 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
262 "Attempt to free NULL pointer\n");
263 else
264 drm_ioremapfree(pt, size, dev);
265
266 spin_lock(&drm_mem_lock);
267 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
268 free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
269 alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
270 spin_unlock(&drm_mem_lock);
271 if (free_count > alloc_count) {
272 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
273 "Excess frees: %d frees, %d allocs\n",
274 free_count, alloc_count);
275 }
276}
277
278#if __OS_HAS_AGP 208#if __OS_HAS_AGP
279 209
280DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { 210DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
index 617526bd5b0c..9b46b85027d0 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/char/drm/drm_mm.c
@@ -42,36 +42,131 @@
42 */ 42 */
43 43
44#include "drmP.h" 44#include "drmP.h"
45#include <linux/slab.h>
46
47unsigned long drm_mm_tail_space(drm_mm_t *mm)
48{
49 struct list_head *tail_node;
50 drm_mm_node_t *entry;
51
52 tail_node = mm->ml_entry.prev;
53 entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
54 if (!entry->free)
55 return 0;
56
57 return entry->size;
58}
59
60int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
61{
62 struct list_head *tail_node;
63 drm_mm_node_t *entry;
64
65 tail_node = mm->ml_entry.prev;
66 entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
67 if (!entry->free)
68 return -ENOMEM;
69
70 if (entry->size <= size)
71 return -ENOMEM;
72
73 entry->size -= size;
74 return 0;
75}
76
77
78static int drm_mm_create_tail_node(drm_mm_t *mm,
79 unsigned long start,
80 unsigned long size)
81{
82 drm_mm_node_t *child;
83
84 child = (drm_mm_node_t *)
85 drm_alloc(sizeof(*child), DRM_MEM_MM);
86 if (!child)
87 return -ENOMEM;
88
89 child->free = 1;
90 child->size = size;
91 child->start = start;
92 child->mm = mm;
93
94 list_add_tail(&child->ml_entry, &mm->ml_entry);
95 list_add_tail(&child->fl_entry, &mm->fl_entry);
96
97 return 0;
98}
99
100
101int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
102{
103 struct list_head *tail_node;
104 drm_mm_node_t *entry;
105
106 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
108 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
110 }
111 entry->size += size;
112 return 0;
113}
114
115static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
116 unsigned long size)
117{
118 drm_mm_node_t *child;
119
120 child = (drm_mm_node_t *)
121 drm_alloc(sizeof(*child), DRM_MEM_MM);
122 if (!child)
123 return NULL;
124
125 INIT_LIST_HEAD(&child->fl_entry);
126
127 child->free = 0;
128 child->size = size;
129 child->start = parent->start;
130 child->mm = parent->mm;
131
132 list_add_tail(&child->ml_entry, &parent->ml_entry);
133 INIT_LIST_HEAD(&child->fl_entry);
134
135 parent->size -= size;
136 parent->start += size;
137 return child;
138}
139
140
45 141
46drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, 142drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
47 unsigned long size, unsigned alignment) 143 unsigned long size, unsigned alignment)
48{ 144{
49 145
146 drm_mm_node_t *align_splitoff = NULL;
50 drm_mm_node_t *child; 147 drm_mm_node_t *child;
148 unsigned tmp = 0;
51 149
52 if (alignment) 150 if (alignment)
53 size += alignment - 1; 151 tmp = parent->start % alignment;
152
153 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
155 if (!align_splitoff)
156 return NULL;
157 }
54 158
55 if (parent->size == size) { 159 if (parent->size == size) {
56 list_del_init(&parent->fl_entry); 160 list_del_init(&parent->fl_entry);
57 parent->free = 0; 161 parent->free = 0;
58 return parent; 162 return parent;
59 } else { 163 } else {
60 child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM); 164 child = drm_mm_split_at_start(parent, size);
61 if (!child) 165 }
62 return NULL;
63
64 INIT_LIST_HEAD(&child->ml_entry);
65 INIT_LIST_HEAD(&child->fl_entry);
66 166
67 child->free = 0; 167 if (align_splitoff)
68 child->size = size; 168 drm_mm_put_block(align_splitoff);
69 child->start = parent->start;
70 169
71 list_add_tail(&child->ml_entry, &parent->ml_entry);
72 parent->size -= size;
73 parent->start += size;
74 }
75 return child; 170 return child;
76} 171}
77 172
@@ -80,12 +175,12 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
80 * Otherwise add to the free stack. 175 * Otherwise add to the free stack.
81 */ 176 */
82 177
83void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur) 178void drm_mm_put_block(drm_mm_node_t * cur)
84{ 179{
85 180
86 drm_mm_node_t *list_root = &mm->root_node; 181 drm_mm_t *mm = cur->mm;
87 struct list_head *cur_head = &cur->ml_entry; 182 struct list_head *cur_head = &cur->ml_entry;
88 struct list_head *root_head = &list_root->ml_entry; 183 struct list_head *root_head = &mm->ml_entry;
89 drm_mm_node_t *prev_node = NULL; 184 drm_mm_node_t *prev_node = NULL;
90 drm_mm_node_t *next_node; 185 drm_mm_node_t *next_node;
91 186
@@ -116,7 +211,7 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
116 } 211 }
117 if (!merged) { 212 if (!merged) {
118 cur->free = 1; 213 cur->free = 1;
119 list_add(&cur->fl_entry, &list_root->fl_entry); 214 list_add(&cur->fl_entry, &mm->fl_entry);
120 } else { 215 } else {
121 list_del(&cur->ml_entry); 216 list_del(&cur->ml_entry);
122 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 217 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
@@ -128,20 +223,30 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
128 unsigned alignment, int best_match) 223 unsigned alignment, int best_match)
129{ 224{
130 struct list_head *list; 225 struct list_head *list;
131 const struct list_head *free_stack = &mm->root_node.fl_entry; 226 const struct list_head *free_stack = &mm->fl_entry;
132 drm_mm_node_t *entry; 227 drm_mm_node_t *entry;
133 drm_mm_node_t *best; 228 drm_mm_node_t *best;
134 unsigned long best_size; 229 unsigned long best_size;
230 unsigned wasted;
135 231
136 best = NULL; 232 best = NULL;
137 best_size = ~0UL; 233 best_size = ~0UL;
138 234
139 if (alignment)
140 size += alignment - 1;
141
142 list_for_each(list, free_stack) { 235 list_for_each(list, free_stack) {
143 entry = list_entry(list, drm_mm_node_t, fl_entry); 236 entry = list_entry(list, drm_mm_node_t, fl_entry);
144 if (entry->size >= size) { 237 wasted = 0;
238
239 if (entry->size < size)
240 continue;
241
242 if (alignment) {
243 register unsigned tmp = entry->start % alignment;
244 if (tmp)
245 wasted += alignment - tmp;
246 }
247
248
249 if (entry->size >= size + wasted) {
145 if (!best_match) 250 if (!best_match)
146 return entry; 251 return entry;
147 if (size < best_size) { 252 if (size < best_size) {
@@ -154,40 +259,32 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
154 return best; 259 return best;
155} 260}
156 261
157int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) 262int drm_mm_clean(drm_mm_t * mm)
158{ 263{
159 drm_mm_node_t *child; 264 struct list_head *head = &mm->ml_entry;
160
161 INIT_LIST_HEAD(&mm->root_node.ml_entry);
162 INIT_LIST_HEAD(&mm->root_node.fl_entry);
163 child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
164 if (!child)
165 return -ENOMEM;
166
167 INIT_LIST_HEAD(&child->ml_entry);
168 INIT_LIST_HEAD(&child->fl_entry);
169 265
170 child->start = start; 266 return (head->next->next == head);
171 child->size = size; 267}
172 child->free = 1;
173 268
174 list_add(&child->fl_entry, &mm->root_node.fl_entry); 269int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
175 list_add(&child->ml_entry, &mm->root_node.ml_entry); 270{
271 INIT_LIST_HEAD(&mm->ml_entry);
272 INIT_LIST_HEAD(&mm->fl_entry);
176 273
177 return 0; 274 return drm_mm_create_tail_node(mm, start, size);
178} 275}
179 276
180EXPORT_SYMBOL(drm_mm_init); 277EXPORT_SYMBOL(drm_mm_init);
181 278
182void drm_mm_takedown(drm_mm_t * mm) 279void drm_mm_takedown(drm_mm_t * mm)
183{ 280{
184 struct list_head *bnode = mm->root_node.fl_entry.next; 281 struct list_head *bnode = mm->fl_entry.next;
185 drm_mm_node_t *entry; 282 drm_mm_node_t *entry;
186 283
187 entry = list_entry(bnode, drm_mm_node_t, fl_entry); 284 entry = list_entry(bnode, drm_mm_node_t, fl_entry);
188 285
189 if (entry->ml_entry.next != &mm->root_node.ml_entry || 286 if (entry->ml_entry.next != &mm->ml_entry ||
190 entry->fl_entry.next != &mm->root_node.fl_entry) { 287 entry->fl_entry.next != &mm->fl_entry) {
191 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 288 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
192 return; 289 return;
193 } 290 }
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 09398d5fbd3f..ad54b845978b 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -226,12 +226,14 @@
226 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 226 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
227 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ 227 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 230 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
230 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 231 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
231 {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 232 {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
232 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 233 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
233 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 234 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
234 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 235 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
236 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
235 {0, 0, 0} 237 {0, 0, 0}
236 238
237#define i810_PCI_IDS \ 239#define i810_PCI_IDS \
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 62d5fe15f046..7fd0da712142 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -500,7 +500,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
500 for (pt = dev->vmalist; pt; pt = pt->next) { 500 for (pt = dev->vmalist; pt; pt = pt->next) {
501 if (!(vma = pt->vma)) 501 if (!(vma = pt->vma))
502 continue; 502 continue;
503 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx", 503 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
504 pt->pid, 504 pt->pid,
505 vma->vm_start, 505 vma->vm_start,
506 vma->vm_end, 506 vma->vm_end,
@@ -510,7 +510,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
510 vma->vm_flags & VM_MAYSHARE ? 's' : 'p', 510 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
511 vma->vm_flags & VM_LOCKED ? 'l' : '-', 511 vma->vm_flags & VM_LOCKED ? 'l' : '-',
512 vma->vm_flags & VM_IO ? 'i' : '-', 512 vma->vm_flags & VM_IO ? 'i' : '-',
513 vma->vm_pgoff << PAGE_SHIFT); 513 vma->vm_pgoff);
514 514
515#if defined(__i386__) 515#if defined(__i386__)
516 pgprot = pgprot_val(vma->vm_page_prot); 516 pgprot = pgprot_val(vma->vm_page_prot);
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
index 19c81d2e13d0..e15db6d6bea9 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/char/drm/drm_sman.c
@@ -101,10 +101,9 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,
101 101
102static void drm_sman_mm_free(void *private, void *ref) 102static void drm_sman_mm_free(void *private, void *ref)
103{ 103{
104 drm_mm_t *mm = (drm_mm_t *) private;
105 drm_mm_node_t *node = (drm_mm_node_t *) ref; 104 drm_mm_node_t *node = (drm_mm_node_t *) ref;
106 105
107 drm_mm_put_block(mm, node); 106 drm_mm_put_block(node);
108} 107}
109 108
110static void drm_sman_mm_destroy(void *private) 109static void drm_sman_mm_destroy(void *private)
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index b9cfc077f6bc..54a632848955 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -70,7 +70,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
70 if (!dev->agp || !dev->agp->cant_use_aperture) 70 if (!dev->agp || !dev->agp->cant_use_aperture)
71 goto vm_nopage_error; 71 goto vm_nopage_error;
72 72
73 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) 73 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
74 goto vm_nopage_error; 74 goto vm_nopage_error;
75 75
76 r_list = drm_hash_entry(hash, drm_map_list_t, hash); 76 r_list = drm_hash_entry(hash, drm_map_list_t, hash);
@@ -227,7 +227,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
227 map->size); 227 map->size);
228 DRM_DEBUG("mtrr_del = %d\n", retcode); 228 DRM_DEBUG("mtrr_del = %d\n", retcode);
229 } 229 }
230 drm_ioremapfree(map->handle, map->size, dev); 230 iounmap(map->handle);
231 break; 231 break;
232 case _DRM_SHM: 232 case _DRM_SHM:
233 vfree(map->handle); 233 vfree(map->handle);
@@ -463,8 +463,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
463 lock_kernel(); 463 lock_kernel();
464 dev = priv->head->dev; 464 dev = priv->head->dev;
465 dma = dev->dma; 465 dma = dev->dma;
466 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 466 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
467 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT); 467 vma->vm_start, vma->vm_end, vma->vm_pgoff);
468 468
469 /* Length must match exact page count */ 469 /* Length must match exact page count */
470 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { 470 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
@@ -537,8 +537,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
537 unsigned long offset = 0; 537 unsigned long offset = 0;
538 drm_hash_item_t *hash; 538 drm_hash_item_t *hash;
539 539
540 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 540 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
541 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT); 541 vma->vm_start, vma->vm_end, vma->vm_pgoff);
542 542
543 if (!priv->authenticated) 543 if (!priv->authenticated)
544 return -EACCES; 544 return -EACCES;
@@ -547,7 +547,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
547 * the AGP mapped at physical address 0 547 * the AGP mapped at physical address 0
548 * --BenH. 548 * --BenH.
549 */ 549 */
550 if (!(vma->vm_pgoff << PAGE_SHIFT) 550 if (!vma->vm_pgoff
551#if __OS_HAS_AGP 551#if __OS_HAS_AGP
552 && (!dev->agp 552 && (!dev->agp
553 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) 553 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
@@ -555,7 +555,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
555 ) 555 )
556 return drm_mmap_dma(filp, vma); 556 return drm_mmap_dma(filp, vma);
557 557
558 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) { 558 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
559 DRM_ERROR("Could not find map\n"); 559 DRM_ERROR("Could not find map\n");
560 return -EINVAL; 560 return -EINVAL;
561 } 561 }
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index fa2de70f7401..60cb4e45a75e 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -219,8 +219,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
219 (drm_i810_private_t *) dev->dev_private; 219 (drm_i810_private_t *) dev->dev_private;
220 220
221 if (dev_priv->ring.virtual_start) { 221 if (dev_priv->ring.virtual_start) {
222 drm_ioremapfree((void *)dev_priv->ring.virtual_start, 222 drm_core_ioremapfree(&dev_priv->ring.map, dev);
223 dev_priv->ring.Size, dev);
224 } 223 }
225 if (dev_priv->hw_status_page) { 224 if (dev_priv->hw_status_page) {
226 pci_free_consistent(dev->pdev, PAGE_SIZE, 225 pci_free_consistent(dev->pdev, PAGE_SIZE,
@@ -236,9 +235,9 @@ static int i810_dma_cleanup(drm_device_t * dev)
236 for (i = 0; i < dma->buf_count; i++) { 235 for (i = 0; i < dma->buf_count; i++) {
237 drm_buf_t *buf = dma->buflist[i]; 236 drm_buf_t *buf = dma->buflist[i];
238 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 237 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
238
239 if (buf_priv->kernel_virtual && buf->total) 239 if (buf_priv->kernel_virtual && buf->total)
240 drm_ioremapfree(buf_priv->kernel_virtual, 240 drm_core_ioremapfree(&buf_priv->map, dev);
241 buf->total, dev);
242 } 241 }
243 } 242 }
244 return 0; 243 return 0;
@@ -311,8 +310,15 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
311 310
312 *buf_priv->in_use = I810_BUF_FREE; 311 *buf_priv->in_use = I810_BUF_FREE;
313 312
314 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, 313 buf_priv->map.offset = buf->bus_address;
315 buf->total, dev); 314 buf_priv->map.size = buf->total;
315 buf_priv->map.type = _DRM_AGP;
316 buf_priv->map.flags = 0;
317 buf_priv->map.mtrr = 0;
318
319 drm_core_ioremap(&buf_priv->map, dev);
320 buf_priv->kernel_virtual = buf_priv->map.handle;
321
316 } 322 }
317 return 0; 323 return 0;
318} 324}
@@ -363,18 +369,24 @@ static int i810_dma_initialize(drm_device_t * dev,
363 dev_priv->ring.End = init->ring_end; 369 dev_priv->ring.End = init->ring_end;
364 dev_priv->ring.Size = init->ring_size; 370 dev_priv->ring.Size = init->ring_size;
365 371
366 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + 372 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
367 init->ring_start, 373 dev_priv->ring.map.size = init->ring_size;
368 init->ring_size, dev); 374 dev_priv->ring.map.type = _DRM_AGP;
375 dev_priv->ring.map.flags = 0;
376 dev_priv->ring.map.mtrr = 0;
369 377
370 if (dev_priv->ring.virtual_start == NULL) { 378 drm_core_ioremap(&dev_priv->ring.map, dev);
379
380 if (dev_priv->ring.map.handle == NULL) {
371 dev->dev_private = (void *)dev_priv; 381 dev->dev_private = (void *)dev_priv;
372 i810_dma_cleanup(dev); 382 i810_dma_cleanup(dev);
373 DRM_ERROR("can not ioremap virtual address for" 383 DRM_ERROR("can not ioremap virtual address for"
374 " ring buffer\n"); 384 " ring buffer\n");
375 return -ENOMEM; 385 return DRM_ERR(ENOMEM);
376 } 386 }
377 387
388 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
389
378 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 390 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
379 391
380 dev_priv->w = init->w; 392 dev_priv->w = init->w;
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index e8cf3ff606f0..e6df49f4928a 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -61,6 +61,7 @@ typedef struct drm_i810_buf_priv {
61 int currently_mapped; 61 int currently_mapped;
62 void *virtual; 62 void *virtual;
63 void *kernel_virtual; 63 void *kernel_virtual;
64 drm_local_map_t map;
64} drm_i810_buf_priv_t; 65} drm_i810_buf_priv_t;
65 66
66typedef struct _drm_i810_ring_buffer { 67typedef struct _drm_i810_ring_buffer {
@@ -72,6 +73,7 @@ typedef struct _drm_i810_ring_buffer {
72 int head; 73 int head;
73 int tail; 74 int tail;
74 int space; 75 int space;
76 drm_local_map_t map;
75} drm_i810_ring_buffer_t; 77} drm_i810_ring_buffer_t;
76 78
77typedef struct drm_i810_private { 79typedef struct drm_i810_private {
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 4f0e5746ab33..95224455ec0c 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -223,8 +223,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
223 (drm_i830_private_t *) dev->dev_private; 223 (drm_i830_private_t *) dev->dev_private;
224 224
225 if (dev_priv->ring.virtual_start) { 225 if (dev_priv->ring.virtual_start) {
226 drm_ioremapfree((void *)dev_priv->ring.virtual_start, 226 drm_core_ioremapfree(&dev_priv->ring.map, dev);
227 dev_priv->ring.Size, dev);
228 } 227 }
229 if (dev_priv->hw_status_page) { 228 if (dev_priv->hw_status_page) {
230 pci_free_consistent(dev->pdev, PAGE_SIZE, 229 pci_free_consistent(dev->pdev, PAGE_SIZE,
@@ -242,8 +241,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
242 drm_buf_t *buf = dma->buflist[i]; 241 drm_buf_t *buf = dma->buflist[i];
243 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 242 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
244 if (buf_priv->kernel_virtual && buf->total) 243 if (buf_priv->kernel_virtual && buf->total)
245 drm_ioremapfree(buf_priv->kernel_virtual, 244 drm_core_ioremapfree(&buf_priv->map, dev);
246 buf->total, dev);
247 } 245 }
248 } 246 }
249 return 0; 247 return 0;
@@ -320,8 +318,14 @@ static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
320 318
321 *buf_priv->in_use = I830_BUF_FREE; 319 *buf_priv->in_use = I830_BUF_FREE;
322 320
323 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, 321 buf_priv->map.offset = buf->bus_address;
324 buf->total, dev); 322 buf_priv->map.size = buf->total;
323 buf_priv->map.type = _DRM_AGP;
324 buf_priv->map.flags = 0;
325 buf_priv->map.mtrr = 0;
326
327 drm_core_ioremap(&buf_priv->map, dev);
328 buf_priv->kernel_virtual = buf_priv->map.handle;
325 } 329 }
326 return 0; 330 return 0;
327} 331}
@@ -373,18 +377,24 @@ static int i830_dma_initialize(drm_device_t * dev,
373 dev_priv->ring.End = init->ring_end; 377 dev_priv->ring.End = init->ring_end;
374 dev_priv->ring.Size = init->ring_size; 378 dev_priv->ring.Size = init->ring_size;
375 379
376 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + 380 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
377 init->ring_start, 381 dev_priv->ring.map.size = init->ring_size;
378 init->ring_size, dev); 382 dev_priv->ring.map.type = _DRM_AGP;
383 dev_priv->ring.map.flags = 0;
384 dev_priv->ring.map.mtrr = 0;
385
386 drm_core_ioremap(&dev_priv->ring.map, dev);
379 387
380 if (dev_priv->ring.virtual_start == NULL) { 388 if (dev_priv->ring.map.handle == NULL) {
381 dev->dev_private = (void *)dev_priv; 389 dev->dev_private = (void *)dev_priv;
382 i830_dma_cleanup(dev); 390 i830_dma_cleanup(dev);
383 DRM_ERROR("can not ioremap virtual address for" 391 DRM_ERROR("can not ioremap virtual address for"
384 " ring buffer\n"); 392 " ring buffer\n");
385 return -ENOMEM; 393 return DRM_ERR(ENOMEM);
386 } 394 }
387 395
396 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
397
388 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 398 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
389 399
390 dev_priv->w = init->w; 400 dev_priv->w = init->w;
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index 85bc5be6f916..e91f94afb4bb 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -68,6 +68,7 @@ typedef struct drm_i830_buf_priv {
68 int currently_mapped; 68 int currently_mapped;
69 void __user *virtual; 69 void __user *virtual;
70 void *kernel_virtual; 70 void *kernel_virtual;
71 drm_local_map_t map;
71} drm_i830_buf_priv_t; 72} drm_i830_buf_priv_t;
72 73
73typedef struct _drm_i830_ring_buffer { 74typedef struct _drm_i830_ring_buffer {
@@ -79,6 +80,7 @@ typedef struct _drm_i830_ring_buffer {
79 int head; 80 int head;
80 int tail; 81 int tail;
81 int space; 82 int space;
83 drm_local_map_t map;
82} drm_i830_ring_buffer_t; 84} drm_i830_ring_buffer_t;
83 85
84typedef struct drm_i830_private { 86typedef struct drm_i830_private {
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index a691ae74129d..c0539c6299cf 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -190,6 +190,11 @@ static int via_initialize(drm_device_t * dev,
190 return DRM_ERR(EFAULT); 190 return DRM_ERR(EFAULT);
191 } 191 }
192 192
193 if (dev_priv->chipset == VIA_DX9_0) {
194 DRM_ERROR("AGP DMA is not supported on this chip\n");
195 return DRM_ERR(EINVAL);
196 }
197
193 dev_priv->ring.map.offset = dev->agp->base + init->offset; 198 dev_priv->ring.map.offset = dev->agp->base + init->offset;
194 dev_priv->ring.map.size = init->size; 199 dev_priv->ring.map.size = init->size;
195 dev_priv->ring.map.type = 0; 200 dev_priv->ring.map.type = 0;
@@ -480,6 +485,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
480 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); 485 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
481 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); 486 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
482 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); 487 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
488 VIA_READ(VIA_REG_TRANSPACE);
483 } 489 }
484 } 490 }
485 return paused; 491 return paused;
@@ -557,8 +563,9 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
557 563
558 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); 564 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
559 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); 565 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
560 566 DRM_WRITEMEMORYBARRIER();
561 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); 567 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
568 VIA_READ(VIA_REG_TRANSPACE);
562} 569}
563 570
564static void via_pad_cache(drm_via_private_t * dev_priv, int qwords) 571static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 806f9ce5f47b..2054d5773717 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -218,7 +218,9 @@ via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); 219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
221 DRM_WRITEMEMORYBARRIER();
221 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 222 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
223 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
222} 224}
223 225
224/* 226/*
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index d21b5b75da0f..8b8778d4a423 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -29,10 +29,10 @@
29 29
30#define DRIVER_NAME "via" 30#define DRIVER_NAME "via"
31#define DRIVER_DESC "VIA Unichrome / Pro" 31#define DRIVER_DESC "VIA Unichrome / Pro"
32#define DRIVER_DATE "20060529" 32#define DRIVER_DATE "20061227"
33 33
34#define DRIVER_MAJOR 2 34#define DRIVER_MAJOR 2
35#define DRIVER_MINOR 10 35#define DRIVER_MINOR 11
36#define DRIVER_PATCHLEVEL 0 36#define DRIVER_PATCHLEVEL 0
37 37
38#include "via_verifier.h" 38#include "via_verifier.h"
@@ -79,7 +79,7 @@ typedef struct drm_via_private {
79 char pci_buf[VIA_PCI_BUF_SIZE]; 79 char pci_buf[VIA_PCI_BUF_SIZE];
80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; 80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
81 uint32_t num_fire_offsets; 81 uint32_t num_fire_offsets;
82 int pro_group_a; 82 int chipset;
83 drm_via_irq_t via_irqs[VIA_NUM_IRQS]; 83 drm_via_irq_t via_irqs[VIA_NUM_IRQS];
84 unsigned num_irqs; 84 unsigned num_irqs;
85 maskarray_t *irq_masks; 85 maskarray_t *irq_masks;
@@ -96,8 +96,9 @@ typedef struct drm_via_private {
96} drm_via_private_t; 96} drm_via_private_t;
97 97
98enum via_family { 98enum via_family {
99 VIA_OTHER = 0, 99 VIA_OTHER = 0, /* Baseline */
100 VIA_PRO_GROUP_A, 100 VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
101 VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
101}; 102};
102 103
103/* VIA MMIO register access */ 104/* VIA MMIO register access */
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index c33d068cde19..1ac5941ad237 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -258,12 +258,16 @@ void via_driver_irq_preinstall(drm_device_t * dev)
258 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; 258 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
259 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; 259 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
260 260
261 dev_priv->irq_masks = (dev_priv->pro_group_a) ? 261 if (dev_priv->chipset == VIA_PRO_GROUP_A ||
262 via_pro_group_a_irqs : via_unichrome_irqs; 262 dev_priv->chipset == VIA_DX9_0) {
263 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 263 dev_priv->irq_masks = via_pro_group_a_irqs;
264 via_num_pro_group_a : via_num_unichrome; 264 dev_priv->num_irqs = via_num_pro_group_a;
265 dev_priv->irq_map = (dev_priv->pro_group_a) ? 265 dev_priv->irq_map = via_irqmap_pro_group_a;
266 via_irqmap_pro_group_a : via_irqmap_unichrome; 266 } else {
267 dev_priv->irq_masks = via_unichrome_irqs;
268 dev_priv->num_irqs = via_num_unichrome;
269 dev_priv->irq_map = via_irqmap_unichrome;
270 }
267 271
268 for (i = 0; i < dev_priv->num_irqs; ++i) { 272 for (i = 0; i < dev_priv->num_irqs; ++i) {
269 atomic_set(&cur_irq->irq_received, 0); 273 atomic_set(&cur_irq->irq_received, 0);
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 782011e0a58d..4e3fc072aa3b 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -106,8 +106,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset)
106 106
107 dev->dev_private = (void *)dev_priv; 107 dev->dev_private = (void *)dev_priv;
108 108
109 if (chipset == VIA_PRO_GROUP_A) 109 dev_priv->chipset = chipset;
110 dev_priv->pro_group_a = 1;
111 110
112 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 111 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
113 if (ret) { 112 if (ret) {
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 70c897c88766..2e7e08078287 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -306,6 +306,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
306 unsigned long lo = ~0, hi = 0, tmp; 306 unsigned long lo = ~0, hi = 0, tmp;
307 uint32_t *addr, *pitch, *height, tex; 307 uint32_t *addr, *pitch, *height, tex;
308 unsigned i; 308 unsigned i;
309 int npot;
309 310
310 if (end > 9) 311 if (end > 9)
311 end = 9; 312 end = 9;
@@ -316,12 +317,15 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
316 &(cur_seq->t_addr[tex = cur_seq->texture][start]); 317 &(cur_seq->t_addr[tex = cur_seq->texture][start]);
317 pitch = &(cur_seq->pitch[tex][start]); 318 pitch = &(cur_seq->pitch[tex][start]);
318 height = &(cur_seq->height[tex][start]); 319 height = &(cur_seq->height[tex][start]);
319 320 npot = cur_seq->tex_npot[tex];
320 for (i = start; i <= end; ++i) { 321 for (i = start; i <= end; ++i) {
321 tmp = *addr++; 322 tmp = *addr++;
322 if (tmp < lo) 323 if (tmp < lo)
323 lo = tmp; 324 lo = tmp;
324 tmp += (*height++ << *pitch++); 325 if (i == 0 && npot)
326 tmp += (*height++ * *pitch++);
327 else
328 tmp += (*height++ << *pitch++);
325 if (tmp > hi) 329 if (tmp > hi)
326 hi = tmp; 330 hi = tmp;
327 } 331 }
@@ -443,13 +447,21 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
443 return 0; 447 return 0;
444 case check_texture_addr3: 448 case check_texture_addr3:
445 cur_seq->unfinished = tex_address; 449 cur_seq->unfinished = tex_address;
446 tmp = ((cmd >> 24) - 0x2B); 450 tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
447 cur_seq->pitch[cur_seq->texture][tmp] = 451 if (tmp == 0 &&
448 (cmd & 0x00F00000) >> 20; 452 (cmd & HC_HTXnEnPit_MASK)) {
449 if (!tmp && (cmd & 0x000FFFFF)) { 453 cur_seq->pitch[cur_seq->texture][tmp] =
450 DRM_ERROR 454 (cmd & HC_HTXnLnPit_MASK);
451 ("Unimplemented texture level 0 pitch mode.\n"); 455 cur_seq->tex_npot[cur_seq->texture] = 1;
452 return 2; 456 } else {
457 cur_seq->pitch[cur_seq->texture][tmp] =
458 (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
459 cur_seq->tex_npot[cur_seq->texture] = 0;
460 if (cmd & 0x000FFFFF) {
461 DRM_ERROR
462 ("Unimplemented texture level 0 pitch mode.\n");
463 return 2;
464 }
453 } 465 }
454 return 0; 466 return 0;
455 case check_texture_addr4: 467 case check_texture_addr4:
@@ -961,7 +973,13 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
961 uint32_t cmd; 973 uint32_t cmd;
962 const uint32_t *buf_end = buf + (size >> 2); 974 const uint32_t *buf_end = buf + (size >> 2);
963 verifier_state_t state = state_command; 975 verifier_state_t state = state_command;
964 int pro_group_a = dev_priv->pro_group_a; 976 int cme_video;
977 int supported_3d;
978
979 cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
980 dev_priv->chipset == VIA_DX9_0);
981
982 supported_3d = dev_priv->chipset != VIA_DX9_0;
965 983
966 hc_state->dev = dev; 984 hc_state->dev = dev;
967 hc_state->unfinished = no_sequence; 985 hc_state->unfinished = no_sequence;
@@ -986,17 +1004,21 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
986 state = via_check_vheader6(&buf, buf_end); 1004 state = via_check_vheader6(&buf, buf_end);
987 break; 1005 break;
988 case state_command: 1006 case state_command:
989 if (HALCYON_HEADER2 == (cmd = *buf)) 1007 if ((HALCYON_HEADER2 == (cmd = *buf)) &&
1008 supported_3d)
990 state = state_header2; 1009 state = state_header2;
991 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1010 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
992 state = state_header1; 1011 state = state_header1;
993 else if (pro_group_a 1012 else if (cme_video
994 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) 1013 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
995 state = state_vheader5; 1014 state = state_vheader5;
996 else if (pro_group_a 1015 else if (cme_video
997 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 1016 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
998 state = state_vheader6; 1017 state = state_vheader6;
999 else { 1018 else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
1019 DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1020 state = state_error;
1021 } else {
1000 DRM_ERROR 1022 DRM_ERROR
1001 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", 1023 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1002 cmd); 1024 cmd);
diff --git a/drivers/char/drm/via_verifier.h b/drivers/char/drm/via_verifier.h
index 256590fcc22a..b77f59df0278 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/char/drm/via_verifier.h
@@ -43,6 +43,7 @@ typedef struct {
43 uint32_t tex_level_lo[2]; 43 uint32_t tex_level_lo[2];
44 uint32_t tex_level_hi[2]; 44 uint32_t tex_level_hi[2];
45 uint32_t tex_palette_size[2]; 45 uint32_t tex_palette_size[2];
46 uint32_t tex_npot[2];
46 drm_via_sequence_t unfinished; 47 drm_via_sequence_t unfinished;
47 int agp_texture; 48 int agp_texture;
48 int multitex; 49 int multitex;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f1afd26a509f..a7b33d2f5991 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1802,7 +1802,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1802 return -ENODEV; 1802 return -ENODEV;
1803 } 1803 }
1804 1804
1805 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1805 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1806 addr_space = IPMI_MEM_ADDR_SPACE; 1806 addr_space = IPMI_MEM_ADDR_SPACE;
1807 else 1807 else
1808 addr_space = IPMI_IO_ADDR_SPACE; 1808 addr_space = IPMI_IO_ADDR_SPACE;
@@ -1848,19 +1848,19 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1848 info->irq_setup = NULL; 1848 info->irq_setup = NULL;
1849 } 1849 }
1850 1850
1851 if (spmi->addr.register_bit_width) { 1851 if (spmi->addr.bit_width) {
1852 /* A (hopefully) properly formed register bit width. */ 1852 /* A (hopefully) properly formed register bit width. */
1853 info->io.regspacing = spmi->addr.register_bit_width / 8; 1853 info->io.regspacing = spmi->addr.bit_width / 8;
1854 } else { 1854 } else {
1855 info->io.regspacing = DEFAULT_REGSPACING; 1855 info->io.regspacing = DEFAULT_REGSPACING;
1856 } 1856 }
1857 info->io.regsize = info->io.regspacing; 1857 info->io.regsize = info->io.regspacing;
1858 info->io.regshift = spmi->addr.register_bit_offset; 1858 info->io.regshift = spmi->addr.bit_offset;
1859 1859
1860 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1860 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1861 info->io_setup = mem_setup; 1861 info->io_setup = mem_setup;
1862 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1862 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1863 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1863 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1864 info->io_setup = port_setup; 1864 info->io_setup = port_setup;
1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1866 } else { 1866 } else {
@@ -1888,10 +1888,8 @@ static __devinit void acpi_find_bmc(void)
1888 return; 1888 return;
1889 1889
1890 for (i = 0; ; i++) { 1890 for (i = 0; ; i++) {
1891 status = acpi_get_firmware_table("SPMI", i+1, 1891 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1892 ACPI_LOGICAL_ADDRESSING, 1892 (struct acpi_table_header **)&spmi);
1893 (struct acpi_table_header **)
1894 &spmi);
1895 if (status != AE_OK) 1893 if (status != AE_OK)
1896 return; 1894 return;
1897 1895
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index a611972024e6..7fca5f470beb 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -372,10 +372,8 @@ static int read_log(struct tpm_bios_log *log)
372 } 372 }
373 373
374 /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ 374 /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
375 status = acpi_get_firmware_table(ACPI_TCPA_SIG, 1, 375 status = acpi_get_table(ACPI_SIG_TCPA, 1,
376 ACPI_LOGICAL_ADDRESSING, 376 (struct acpi_table_header **)&buff);
377 (struct acpi_table_header **)
378 &buff);
379 377
380 if (ACPI_FAILURE(status)) { 378 if (ACPI_FAILURE(status)) {
381 printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n", 379 printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
@@ -409,7 +407,7 @@ static int read_log(struct tpm_bios_log *log)
409 407
410 log->bios_event_log_end = log->bios_event_log + len; 408 log->bios_event_log_end = log->bios_event_log + len;
411 409
412 acpi_os_map_memory(start, len, (void *) &virt); 410 virt = acpi_os_map_memory(start, len);
413 411
414 memcpy(log->bios_event_log, virt, len); 412 memcpy(log->bios_event_log, virt, len);
415 413
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 879250d3d069..ff8c4beaace4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,6 +51,8 @@ config CRYPTO_DEV_PADLOCK_SHA
51 If unsure say M. The compiled module will be 51 If unsure say M. The compiled module will be
52 called padlock-sha.ko 52 called padlock-sha.ko
53 53
54source "arch/s390/crypto/Kconfig"
55
54config CRYPTO_DEV_GEODE 56config CRYPTO_DEV_GEODE
55 tristate "Support for the Geode LX AES engine" 57 tristate "Support for the Geode LX AES engine"
56 depends on CRYPTO && X86_32 && PCI 58 depends on CRYPTO && X86_32 && PCI
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index c2ad72fefd9d..2b4b76e8bd72 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -26,7 +26,7 @@ setup_serial_console(struct pcdp_uart *uart)
26 static char options[64], *p = options; 26 static char options[64], *p = options;
27 char parity; 27 char parity;
28 28
29 mmio = (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY); 29 mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
30 p += sprintf(p, "console=uart,%s,0x%lx", 30 p += sprintf(p, "console=uart,%s,0x%lx",
31 mmio ? "mmio" : "io", uart->addr.address); 31 mmio ? "mmio" : "io", uart->addr.address);
32 if (uart->baud) { 32 if (uart->baud) {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ec796ad087df..850788f4dd2e 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -22,5 +22,19 @@ config HID
22 22
23 If unsure, say Y 23 If unsure, say Y
24 24
25config HID_DEBUG
26 bool "HID debugging support"
27 depends on HID
28 ---help---
29 This option lets the HID layer output diagnostics about its internal
30 state, resolve HID usages, dump HID fields, etc. Individual HID drivers
31 use this debugging facility to output information about individual HID
32 devices, etc.
33
34 This feature is useful for those who are either debugging the HID parser
35 or any HID hardware device.
36
37 If unsure, say N
38
25endmenu 39endmenu
26 40
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6432392110bf..52e97d8f3c95 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -1,15 +1,8 @@
1# 1#
2# Makefile for the HID driver 2# Makefile for the HID driver
3# 3#
4 4hid-objs := hid-core.o hid-input.o
5# Multipart objects.
6hid-objs := hid-core.o hid-input.o
7
8# Optional parts of multipart objects.
9 5
10obj-$(CONFIG_HID) += hid.o 6obj-$(CONFIG_HID) += hid.o
11 7hid-$(CONFIG_HID_DEBUG) += hid-debug.o
12ifeq ($(CONFIG_INPUT_DEBUG),y)
13EXTRA_CFLAGS += -DDEBUG
14endif
15 8
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 49f18f5b2514..8c7d48eff7b7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -28,11 +28,9 @@
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30 30
31#undef DEBUG
32#undef DEBUG_DATA
33
34#include <linux/hid.h> 31#include <linux/hid.h>
35#include <linux/hiddev.h> 32#include <linux/hiddev.h>
33#include <linux/hid-debug.h>
36 34
37/* 35/*
38 * Version Information 36 * Version Information
@@ -951,7 +949,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
951 return -1; 949 return -1;
952 } 950 }
953 951
954#ifdef DEBUG_DATA 952#ifdef CONFIG_HID_DEBUG
955 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un"); 953 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
956#endif 954#endif
957 955
@@ -961,7 +959,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
961 size--; 959 size--;
962 } 960 }
963 961
964#ifdef DEBUG_DATA 962#ifdef CONFIG_HID_DEBUG
965 { 963 {
966 int i; 964 int i;
967 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size); 965 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
new file mode 100644
index 000000000000..89241be4ec9b
--- /dev/null
+++ b/drivers/hid/hid-debug.c
@@ -0,0 +1,764 @@
1/*
2 * $Id: hid-debug.h,v 1.8 2001/09/25 09:37:57 vojtech Exp $
3 *
4 * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
5 * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
6 * (c) 2007 Jiri Kosina
7 *
8 * Some debug stuff for the HID parser.
9 */
10
11/*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * Should you need to contact me, the author, you can do so either by
27 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
28 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
29 */
30
31#include <linux/hid.h>
32
33struct hid_usage_entry {
34 unsigned page;
35 unsigned usage;
36 char *description;
37};
38
39static const struct hid_usage_entry hid_usage_table[] = {
40 { 0, 0, "Undefined" },
41 { 1, 0, "GenericDesktop" },
42 {0, 0x01, "Pointer"},
43 {0, 0x02, "Mouse"},
44 {0, 0x04, "Joystick"},
45 {0, 0x05, "GamePad"},
46 {0, 0x06, "Keyboard"},
47 {0, 0x07, "Keypad"},
48 {0, 0x08, "MultiAxis"},
49 {0, 0x30, "X"},
50 {0, 0x31, "Y"},
51 {0, 0x32, "Z"},
52 {0, 0x33, "Rx"},
53 {0, 0x34, "Ry"},
54 {0, 0x35, "Rz"},
55 {0, 0x36, "Slider"},
56 {0, 0x37, "Dial"},
57 {0, 0x38, "Wheel"},
58 {0, 0x39, "HatSwitch"},
59 {0, 0x3a, "CountedBuffer"},
60 {0, 0x3b, "ByteCount"},
61 {0, 0x3c, "MotionWakeup"},
62 {0, 0x3d, "Start"},
63 {0, 0x3e, "Select"},
64 {0, 0x40, "Vx"},
65 {0, 0x41, "Vy"},
66 {0, 0x42, "Vz"},
67 {0, 0x43, "Vbrx"},
68 {0, 0x44, "Vbry"},
69 {0, 0x45, "Vbrz"},
70 {0, 0x46, "Vno"},
71 {0, 0x80, "SystemControl"},
72 {0, 0x81, "SystemPowerDown"},
73 {0, 0x82, "SystemSleep"},
74 {0, 0x83, "SystemWakeUp"},
75 {0, 0x84, "SystemContextMenu"},
76 {0, 0x85, "SystemMainMenu"},
77 {0, 0x86, "SystemAppMenu"},
78 {0, 0x87, "SystemMenuHelp"},
79 {0, 0x88, "SystemMenuExit"},
80 {0, 0x89, "SystemMenuSelect"},
81 {0, 0x8a, "SystemMenuRight"},
82 {0, 0x8b, "SystemMenuLeft"},
83 {0, 0x8c, "SystemMenuUp"},
84 {0, 0x8d, "SystemMenuDown"},
85 {0, 0x90, "D-PadUp"},
86 {0, 0x91, "D-PadDown"},
87 {0, 0x92, "D-PadRight"},
88 {0, 0x93, "D-PadLeft"},
89 { 2, 0, "Simulation" },
90 {0, 0xb0, "Aileron"},
91 {0, 0xb1, "AileronTrim"},
92 {0, 0xb2, "Anti-Torque"},
93 {0, 0xb3, "Autopilot"},
94 {0, 0xb4, "Chaff"},
95 {0, 0xb5, "Collective"},
96 {0, 0xb6, "DiveBrake"},
97 {0, 0xb7, "ElectronicCountermeasures"},
98 {0, 0xb8, "Elevator"},
99 {0, 0xb9, "ElevatorTrim"},
100 {0, 0xba, "Rudder"},
101 {0, 0xbb, "Throttle"},
102 {0, 0xbc, "FlightCommunications"},
103 {0, 0xbd, "FlareRelease"},
104 {0, 0xbe, "LandingGear"},
105 {0, 0xbf, "ToeBrake"},
106 { 7, 0, "Keyboard" },
107 { 8, 0, "LED" },
108 {0, 0x01, "NumLock"},
109 {0, 0x02, "CapsLock"},
110 {0, 0x03, "ScrollLock"},
111 {0, 0x04, "Compose"},
112 {0, 0x05, "Kana"},
113 {0, 0x4b, "GenericIndicator"},
114 { 9, 0, "Button" },
115 { 10, 0, "Ordinal" },
116 { 12, 0, "Consumer" },
117 {0, 0x238, "HorizontalWheel"},
118 { 13, 0, "Digitizers" },
119 {0, 0x01, "Digitizer"},
120 {0, 0x02, "Pen"},
121 {0, 0x03, "LightPen"},
122 {0, 0x04, "TouchScreen"},
123 {0, 0x05, "TouchPad"},
124 {0, 0x20, "Stylus"},
125 {0, 0x21, "Puck"},
126 {0, 0x22, "Finger"},
127 {0, 0x30, "TipPressure"},
128 {0, 0x31, "BarrelPressure"},
129 {0, 0x32, "InRange"},
130 {0, 0x33, "Touch"},
131 {0, 0x34, "UnTouch"},
132 {0, 0x35, "Tap"},
133 {0, 0x39, "TabletFunctionKey"},
134 {0, 0x3a, "ProgramChangeKey"},
135 {0, 0x3c, "Invert"},
136 {0, 0x42, "TipSwitch"},
137 {0, 0x43, "SecondaryTipSwitch"},
138 {0, 0x44, "BarrelSwitch"},
139 {0, 0x45, "Eraser"},
140 {0, 0x46, "TabletPick"},
141 { 15, 0, "PhysicalInterfaceDevice" },
142 {0, 0x00, "Undefined"},
143 {0, 0x01, "Physical_Interface_Device"},
144 {0, 0x20, "Normal"},
145 {0, 0x21, "Set_Effect_Report"},
146 {0, 0x22, "Effect_Block_Index"},
147 {0, 0x23, "Parameter_Block_Offset"},
148 {0, 0x24, "ROM_Flag"},
149 {0, 0x25, "Effect_Type"},
150 {0, 0x26, "ET_Constant_Force"},
151 {0, 0x27, "ET_Ramp"},
152 {0, 0x28, "ET_Custom_Force_Data"},
153 {0, 0x30, "ET_Square"},
154 {0, 0x31, "ET_Sine"},
155 {0, 0x32, "ET_Triangle"},
156 {0, 0x33, "ET_Sawtooth_Up"},
157 {0, 0x34, "ET_Sawtooth_Down"},
158 {0, 0x40, "ET_Spring"},
159 {0, 0x41, "ET_Damper"},
160 {0, 0x42, "ET_Inertia"},
161 {0, 0x43, "ET_Friction"},
162 {0, 0x50, "Duration"},
163 {0, 0x51, "Sample_Period"},
164 {0, 0x52, "Gain"},
165 {0, 0x53, "Trigger_Button"},
166 {0, 0x54, "Trigger_Repeat_Interval"},
167 {0, 0x55, "Axes_Enable"},
168 {0, 0x56, "Direction_Enable"},
169 {0, 0x57, "Direction"},
170 {0, 0x58, "Type_Specific_Block_Offset"},
171 {0, 0x59, "Block_Type"},
172 {0, 0x5A, "Set_Envelope_Report"},
173 {0, 0x5B, "Attack_Level"},
174 {0, 0x5C, "Attack_Time"},
175 {0, 0x5D, "Fade_Level"},
176 {0, 0x5E, "Fade_Time"},
177 {0, 0x5F, "Set_Condition_Report"},
178 {0, 0x60, "CP_Offset"},
179 {0, 0x61, "Positive_Coefficient"},
180 {0, 0x62, "Negative_Coefficient"},
181 {0, 0x63, "Positive_Saturation"},
182 {0, 0x64, "Negative_Saturation"},
183 {0, 0x65, "Dead_Band"},
184 {0, 0x66, "Download_Force_Sample"},
185 {0, 0x67, "Isoch_Custom_Force_Enable"},
186 {0, 0x68, "Custom_Force_Data_Report"},
187 {0, 0x69, "Custom_Force_Data"},
188 {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
189 {0, 0x6B, "Set_Custom_Force_Report"},
190 {0, 0x6C, "Custom_Force_Data_Offset"},
191 {0, 0x6D, "Sample_Count"},
192 {0, 0x6E, "Set_Periodic_Report"},
193 {0, 0x6F, "Offset"},
194 {0, 0x70, "Magnitude"},
195 {0, 0x71, "Phase"},
196 {0, 0x72, "Period"},
197 {0, 0x73, "Set_Constant_Force_Report"},
198 {0, 0x74, "Set_Ramp_Force_Report"},
199 {0, 0x75, "Ramp_Start"},
200 {0, 0x76, "Ramp_End"},
201 {0, 0x77, "Effect_Operation_Report"},
202 {0, 0x78, "Effect_Operation"},
203 {0, 0x79, "Op_Effect_Start"},
204 {0, 0x7A, "Op_Effect_Start_Solo"},
205 {0, 0x7B, "Op_Effect_Stop"},
206 {0, 0x7C, "Loop_Count"},
207 {0, 0x7D, "Device_Gain_Report"},
208 {0, 0x7E, "Device_Gain"},
209 {0, 0x7F, "PID_Pool_Report"},
210 {0, 0x80, "RAM_Pool_Size"},
211 {0, 0x81, "ROM_Pool_Size"},
212 {0, 0x82, "ROM_Effect_Block_Count"},
213 {0, 0x83, "Simultaneous_Effects_Max"},
214 {0, 0x84, "Pool_Alignment"},
215 {0, 0x85, "PID_Pool_Move_Report"},
216 {0, 0x86, "Move_Source"},
217 {0, 0x87, "Move_Destination"},
218 {0, 0x88, "Move_Length"},
219 {0, 0x89, "PID_Block_Load_Report"},
220 {0, 0x8B, "Block_Load_Status"},
221 {0, 0x8C, "Block_Load_Success"},
222 {0, 0x8D, "Block_Load_Full"},
223 {0, 0x8E, "Block_Load_Error"},
224 {0, 0x8F, "Block_Handle"},
225 {0, 0x90, "PID_Block_Free_Report"},
226 {0, 0x91, "Type_Specific_Block_Handle"},
227 {0, 0x92, "PID_State_Report"},
228 {0, 0x94, "Effect_Playing"},
229 {0, 0x95, "PID_Device_Control_Report"},
230 {0, 0x96, "PID_Device_Control"},
231 {0, 0x97, "DC_Enable_Actuators"},
232 {0, 0x98, "DC_Disable_Actuators"},
233 {0, 0x99, "DC_Stop_All_Effects"},
234 {0, 0x9A, "DC_Device_Reset"},
235 {0, 0x9B, "DC_Device_Pause"},
236 {0, 0x9C, "DC_Device_Continue"},
237 {0, 0x9F, "Device_Paused"},
238 {0, 0xA0, "Actuators_Enabled"},
239 {0, 0xA4, "Safety_Switch"},
240 {0, 0xA5, "Actuator_Override_Switch"},
241 {0, 0xA6, "Actuator_Power"},
242 {0, 0xA7, "Start_Delay"},
243 {0, 0xA8, "Parameter_Block_Size"},
244 {0, 0xA9, "Device_Managed_Pool"},
245 {0, 0xAA, "Shared_Parameter_Blocks"},
246 {0, 0xAB, "Create_New_Effect_Report"},
247 {0, 0xAC, "RAM_Pool_Available"},
248 { 0x84, 0, "Power Device" },
249 { 0x84, 0x02, "PresentStatus" },
250 { 0x84, 0x03, "ChangeStatus" },
251 { 0x84, 0x04, "UPS" },
252 { 0x84, 0x05, "PowerSupply" },
253 { 0x84, 0x10, "BatterySystem" },
254 { 0x84, 0x11, "BatterySystemID" },
255 { 0x84, 0x12, "Battery" },
256 { 0x84, 0x13, "BatteryID" },
257 { 0x84, 0x14, "Charger" },
258 { 0x84, 0x15, "ChargerID" },
259 { 0x84, 0x16, "PowerConverter" },
260 { 0x84, 0x17, "PowerConverterID" },
261 { 0x84, 0x18, "OutletSystem" },
262 { 0x84, 0x19, "OutletSystemID" },
263 { 0x84, 0x1a, "Input" },
264 { 0x84, 0x1b, "InputID" },
265 { 0x84, 0x1c, "Output" },
266 { 0x84, 0x1d, "OutputID" },
267 { 0x84, 0x1e, "Flow" },
268 { 0x84, 0x1f, "FlowID" },
269 { 0x84, 0x20, "Outlet" },
270 { 0x84, 0x21, "OutletID" },
271 { 0x84, 0x22, "Gang" },
272 { 0x84, 0x24, "PowerSummary" },
273 { 0x84, 0x25, "PowerSummaryID" },
274 { 0x84, 0x30, "Voltage" },
275 { 0x84, 0x31, "Current" },
276 { 0x84, 0x32, "Frequency" },
277 { 0x84, 0x33, "ApparentPower" },
278 { 0x84, 0x35, "PercentLoad" },
279 { 0x84, 0x40, "ConfigVoltage" },
280 { 0x84, 0x41, "ConfigCurrent" },
281 { 0x84, 0x43, "ConfigApparentPower" },
282 { 0x84, 0x53, "LowVoltageTransfer" },
283 { 0x84, 0x54, "HighVoltageTransfer" },
284 { 0x84, 0x56, "DelayBeforeStartup" },
285 { 0x84, 0x57, "DelayBeforeShutdown" },
286 { 0x84, 0x58, "Test" },
287 { 0x84, 0x5a, "AudibleAlarmControl" },
288 { 0x84, 0x60, "Present" },
289 { 0x84, 0x61, "Good" },
290 { 0x84, 0x62, "InternalFailure" },
291 { 0x84, 0x65, "Overload" },
292 { 0x84, 0x66, "OverCharged" },
293 { 0x84, 0x67, "OverTemperature" },
294 { 0x84, 0x68, "ShutdownRequested" },
295 { 0x84, 0x69, "ShutdownImminent" },
296 { 0x84, 0x6b, "SwitchOn/Off" },
297 { 0x84, 0x6c, "Switchable" },
298 { 0x84, 0x6d, "Used" },
299 { 0x84, 0x6e, "Boost" },
300 { 0x84, 0x73, "CommunicationLost" },
301 { 0x84, 0xfd, "iManufacturer" },
302 { 0x84, 0xfe, "iProduct" },
303 { 0x84, 0xff, "iSerialNumber" },
304 { 0x85, 0, "Battery System" },
305 { 0x85, 0x01, "SMBBatteryMode" },
306 { 0x85, 0x02, "SMBBatteryStatus" },
307 { 0x85, 0x03, "SMBAlarmWarning" },
308 { 0x85, 0x04, "SMBChargerMode" },
309 { 0x85, 0x05, "SMBChargerStatus" },
310 { 0x85, 0x06, "SMBChargerSpecInfo" },
311 { 0x85, 0x07, "SMBSelectorState" },
312 { 0x85, 0x08, "SMBSelectorPresets" },
313 { 0x85, 0x09, "SMBSelectorInfo" },
314 { 0x85, 0x29, "RemainingCapacityLimit" },
315 { 0x85, 0x2c, "CapacityMode" },
316 { 0x85, 0x42, "BelowRemainingCapacityLimit" },
317 { 0x85, 0x44, "Charging" },
318 { 0x85, 0x45, "Discharging" },
319 { 0x85, 0x4b, "NeedReplacement" },
320 { 0x85, 0x66, "RemainingCapacity" },
321 { 0x85, 0x68, "RunTimeToEmpty" },
322 { 0x85, 0x6a, "AverageTimeToFull" },
323 { 0x85, 0x83, "DesignCapacity" },
324 { 0x85, 0x85, "ManufacturerDate" },
325 { 0x85, 0x89, "iDeviceChemistry" },
326 { 0x85, 0x8b, "Rechargable" },
327 { 0x85, 0x8f, "iOEMInformation" },
328 { 0x85, 0x8d, "CapacityGranularity1" },
329 { 0x85, 0xd0, "ACPresent" },
330 /* pages 0xff00 to 0xffff are vendor-specific */
331 { 0xffff, 0, "Vendor-specific-FF" },
332 { 0, 0, NULL }
333};
334
335static void resolv_usage_page(unsigned page) {
336 const struct hid_usage_entry *p;
337
338 for (p = hid_usage_table; p->description; p++)
339 if (p->page == page) {
340 printk("%s", p->description);
341 return;
342 }
343 printk("%04x", page);
344}
345
346void hid_resolv_usage(unsigned usage) {
347 const struct hid_usage_entry *p;
348
349 resolv_usage_page(usage >> 16);
350 printk(".");
351 for (p = hid_usage_table; p->description; p++)
352 if (p->page == (usage >> 16)) {
353 for(++p; p->description && p->usage != 0; p++)
354 if (p->usage == (usage & 0xffff)) {
355 printk("%s", p->description);
356 return;
357 }
358 break;
359 }
360 printk("%04x", usage & 0xffff);
361}
362EXPORT_SYMBOL_GPL(hid_resolv_usage);
363
364__inline__ static void tab(int n) {
365 while (n--) printk(" ");
366}
367
368void hid_dump_field(struct hid_field *field, int n) {
369 int j;
370
371 if (field->physical) {
372 tab(n);
373 printk("Physical(");
374 hid_resolv_usage(field->physical); printk(")\n");
375 }
376 if (field->logical) {
377 tab(n);
378 printk("Logical(");
379 hid_resolv_usage(field->logical); printk(")\n");
380 }
381 tab(n); printk("Usage(%d)\n", field->maxusage);
382 for (j = 0; j < field->maxusage; j++) {
383 tab(n+2); hid_resolv_usage(field->usage[j].hid); printk("\n");
384 }
385 if (field->logical_minimum != field->logical_maximum) {
386 tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
387 tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
388 }
389 if (field->physical_minimum != field->physical_maximum) {
390 tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
391 tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
392 }
393 if (field->unit_exponent) {
394 tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
395 }
396 if (field->unit) {
397 char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
398 char *units[5][8] = {
399 { "None", "None", "None", "None", "None", "None", "None", "None" },
400 { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
401 { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
402 { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" },
403 { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }
404 };
405
406 int i;
407 int sys;
408 __u32 data = field->unit;
409
410 /* First nibble tells us which system we're in. */
411 sys = data & 0xf;
412 data >>= 4;
413
414 if(sys > 4) {
415 tab(n); printk("Unit(Invalid)\n");
416 }
417 else {
418 int earlier_unit = 0;
419
420 tab(n); printk("Unit(%s : ", systems[sys]);
421
422 for (i=1 ; i<sizeof(__u32)*2 ; i++) {
423 char nibble = data & 0xf;
424 data >>= 4;
425 if (nibble != 0) {
426 if(earlier_unit++ > 0)
427 printk("*");
428 printk("%s", units[sys][i]);
429 if(nibble != 1) {
430 /* This is a _signed_ nibble(!) */
431
432 int val = nibble & 0x7;
433 if(nibble & 0x08)
434 val = -((0x7 & ~val) +1);
435 printk("^%d", val);
436 }
437 }
438 }
439 printk(")\n");
440 }
441 }
442 tab(n); printk("Report Size(%u)\n", field->report_size);
443 tab(n); printk("Report Count(%u)\n", field->report_count);
444 tab(n); printk("Report Offset(%u)\n", field->report_offset);
445
446 tab(n); printk("Flags( ");
447 j = field->flags;
448 printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
449 printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
450 printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
451 printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
452 printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
453 printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPrefferedState " : "");
454 printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
455 printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
456 printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
457 printk(")\n");
458}
459EXPORT_SYMBOL_GPL(hid_dump_field);
460
461void hid_dump_device(struct hid_device *device) {
462 struct hid_report_enum *report_enum;
463 struct hid_report *report;
464 struct list_head *list;
465 unsigned i,k;
466 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
467
468 for (i = 0; i < HID_REPORT_TYPES; i++) {
469 report_enum = device->report_enum + i;
470 list = report_enum->report_list.next;
471 while (list != &report_enum->report_list) {
472 report = (struct hid_report *) list;
473 tab(2);
474 printk("%s", table[i]);
475 if (report->id)
476 printk("(%d)", report->id);
477 printk("[%s]", table[report->type]);
478 printk("\n");
479 for (k = 0; k < report->maxfield; k++) {
480 tab(4);
481 printk("Field(%d)\n", k);
482 hid_dump_field(report->field[k], 6);
483 }
484 list = list->next;
485 }
486 }
487}
488EXPORT_SYMBOL_GPL(hid_dump_device);
489
490void hid_dump_input(struct hid_usage *usage, __s32 value) {
491 printk("hid-debug: input ");
492 hid_resolv_usage(usage->hid);
493 printk(" = %d\n", value);
494}
495EXPORT_SYMBOL_GPL(hid_dump_input);
496
497static char *events[EV_MAX + 1] = {
498 [EV_SYN] = "Sync", [EV_KEY] = "Key",
499 [EV_REL] = "Relative", [EV_ABS] = "Absolute",
500 [EV_MSC] = "Misc", [EV_LED] = "LED",
501 [EV_SND] = "Sound", [EV_REP] = "Repeat",
502 [EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
503 [EV_FF_STATUS] = "ForceFeedbackStatus",
504};
505
506static char *syncs[2] = {
507 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
508};
509static char *keys[KEY_MAX + 1] = {
510 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
511 [KEY_1] = "1", [KEY_2] = "2",
512 [KEY_3] = "3", [KEY_4] = "4",
513 [KEY_5] = "5", [KEY_6] = "6",
514 [KEY_7] = "7", [KEY_8] = "8",
515 [KEY_9] = "9", [KEY_0] = "0",
516 [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal",
517 [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab",
518 [KEY_Q] = "Q", [KEY_W] = "W",
519 [KEY_E] = "E", [KEY_R] = "R",
520 [KEY_T] = "T", [KEY_Y] = "Y",
521 [KEY_U] = "U", [KEY_I] = "I",
522 [KEY_O] = "O", [KEY_P] = "P",
523 [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace",
524 [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl",
525 [KEY_A] = "A", [KEY_S] = "S",
526 [KEY_D] = "D", [KEY_F] = "F",
527 [KEY_G] = "G", [KEY_H] = "H",
528 [KEY_J] = "J", [KEY_K] = "K",
529 [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon",
530 [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave",
531 [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash",
532 [KEY_Z] = "Z", [KEY_X] = "X",
533 [KEY_C] = "C", [KEY_V] = "V",
534 [KEY_B] = "B", [KEY_N] = "N",
535 [KEY_M] = "M", [KEY_COMMA] = "Comma",
536 [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash",
537 [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk",
538 [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space",
539 [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1",
540 [KEY_F2] = "F2", [KEY_F3] = "F3",
541 [KEY_F4] = "F4", [KEY_F5] = "F5",
542 [KEY_F6] = "F6", [KEY_F7] = "F7",
543 [KEY_F8] = "F8", [KEY_F9] = "F9",
544 [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock",
545 [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7",
546 [KEY_KP8] = "KP8", [KEY_KP9] = "KP9",
547 [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4",
548 [KEY_KP5] = "KP5", [KEY_KP6] = "KP6",
549 [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1",
550 [KEY_KP2] = "KP2", [KEY_KP3] = "KP3",
551 [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot",
552 [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd",
553 [KEY_F11] = "F11", [KEY_F12] = "F12",
554 [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana",
555 [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan",
556 [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan",
557 [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter",
558 [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash",
559 [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt",
560 [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home",
561 [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp",
562 [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right",
563 [KEY_END] = "End", [KEY_DOWN] = "Down",
564 [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert",
565 [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro",
566 [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown",
567 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
568 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
569 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
570 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
571 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
572 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
573 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
574 [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo",
575 [KEY_FRONT] = "Front", [KEY_COPY] = "Copy",
576 [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste",
577 [KEY_FIND] = "Find", [KEY_CUT] = "Cut",
578 [KEY_HELP] = "Help", [KEY_MENU] = "Menu",
579 [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup",
580 [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp",
581 [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile",
582 [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
583 [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
584 [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
585 [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
586 [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
587 [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
588 [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
589 [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD",
590 [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong",
591 [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong",
592 [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record",
593 [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone",
594 [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config",
595 [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh",
596 [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move",
597 [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp",
598 [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
599 [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
600 [KEY_REDO] = "Redo", [KEY_F13] = "F13",
601 [KEY_F14] = "F14", [KEY_F15] = "F15",
602 [KEY_F16] = "F16", [KEY_F17] = "F17",
603 [KEY_F18] = "F18", [KEY_F19] = "F19",
604 [KEY_F20] = "F20", [KEY_F21] = "F21",
605 [KEY_F22] = "F22", [KEY_F23] = "F23",
606 [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
607 [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
608 [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
609 [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
610 [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
611 [KEY_PRINT] = "Print", [KEY_HP] = "HP",
612 [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound",
613 [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email",
614 [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search",
615 [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance",
616 [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop",
617 [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
618 [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
619 [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
620 [BTN_0] = "Btn0", [BTN_1] = "Btn1",
621 [BTN_2] = "Btn2", [BTN_3] = "Btn3",
622 [BTN_4] = "Btn4", [BTN_5] = "Btn5",
623 [BTN_6] = "Btn6", [BTN_7] = "Btn7",
624 [BTN_8] = "Btn8", [BTN_9] = "Btn9",
625 [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn",
626 [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn",
627 [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn",
628 [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn",
629 [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn",
630 [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn",
631 [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn",
632 [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2",
633 [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4",
634 [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6",
635 [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA",
636 [BTN_B] = "BtnB", [BTN_C] = "BtnC",
637 [BTN_X] = "BtnX", [BTN_Y] = "BtnY",
638 [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL",
639 [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2",
640 [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
641 [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
642 [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
643 [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
644 [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
645 [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
646 [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
647 [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
648 [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
649 [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
650 [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
651 [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
652 [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
653 [KEY_OPTION] = "Option", [KEY_INFO] = "Info",
654 [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor",
655 [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program",
656 [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites",
657 [KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
658 [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
659 [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
660 [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
661 [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
662 [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
663 [KEY_TV] = "TV", [KEY_TV2] = "TV2",
664 [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
665 [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
666 [KEY_CD] = "CD", [KEY_TAPE] = "Tape",
667 [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner",
668 [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text",
669 [KEY_DVD] = "DVD", [KEY_AUX] = "Aux",
670 [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio",
671 [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory",
672 [KEY_LIST] = "List", [KEY_MEMO] = "Memo",
673 [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red",
674 [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow",
675 [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp",
676 [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First",
677 [KEY_LAST] = "Last", [KEY_AB] = "AB",
678 [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart",
679 [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle",
680 [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous",
681 [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN",
682 [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL",
683 [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine",
684 [KEY_DEL_LINE] = "DeleteLine",
685 [KEY_SEND] = "Send", [KEY_REPLY] = "Reply",
686 [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save",
687 [KEY_DOCUMENTS] = "Documents",
688 [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC",
689 [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2",
690 [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D",
691 [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F",
692 [KEY_FN_S] = "Fn+S",
693 [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2",
694 [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4",
695 [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6",
696 [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8",
697 [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10",
698 [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12",
699 [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle",
700 [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
701 [KEY_KBDILLUMUP] = "KbdIlluminationUp",
702 [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
703};
704
705static char *relatives[REL_MAX + 1] = {
706 [REL_X] = "X", [REL_Y] = "Y",
707 [REL_Z] = "Z", [REL_RX] = "Rx",
708 [REL_RY] = "Ry", [REL_RZ] = "Rz",
709 [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
710 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
711};
712
713static char *absolutes[ABS_MAX + 1] = {
714 [ABS_X] = "X", [ABS_Y] = "Y",
715 [ABS_Z] = "Z", [ABS_RX] = "Rx",
716 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
717 [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder",
718 [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas",
719 [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X",
720 [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X",
721 [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X",
722 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
723 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
724 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
725 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
726 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
727};
728
729static char *misc[MSC_MAX + 1] = {
730 [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled",
731 [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData"
732};
733
734static char *leds[LED_MAX + 1] = {
735 [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock",
736 [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
737 [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
738 [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
739 [LED_MISC] = "Misc",
740};
741
742static char *repeats[REP_MAX + 1] = {
743 [REP_DELAY] = "Delay", [REP_PERIOD] = "Period"
744};
745
746static char *sounds[SND_MAX + 1] = {
747 [SND_CLICK] = "Click", [SND_BELL] = "Bell",
748 [SND_TONE] = "Tone"
749};
750
751static char **names[EV_MAX + 1] = {
752 [EV_SYN] = syncs, [EV_KEY] = keys,
753 [EV_REL] = relatives, [EV_ABS] = absolutes,
754 [EV_MSC] = misc, [EV_LED] = leds,
755 [EV_SND] = sounds, [EV_REP] = repeats,
756};
757
758void hid_resolv_event(__u8 type, __u16 code) {
759
760 printk("%s.%s", events[type] ? events[type] : "?",
761 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
762}
763EXPORT_SYMBOL_GPL(hid_resolv_event);
764
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c7a6833f6821..25d180a24fc4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -31,9 +31,8 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33 33
34#undef DEBUG
35
36#include <linux/hid.h> 34#include <linux/hid.h>
35#include <linux/hid-debug.h>
37 36
38static int hid_pb_fnmode = 1; 37static int hid_pb_fnmode = 1;
39module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644); 38module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644);
@@ -252,9 +251,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
252 251
253 field->hidinput = hidinput; 252 field->hidinput = hidinput;
254 253
255#ifdef DEBUG 254#ifdef CONFIG_HID_DEBUG
256 printk(KERN_DEBUG "Mapping: "); 255 printk(KERN_DEBUG "Mapping: ");
257 resolv_usage(usage->hid); 256 hid_resolv_usage(usage->hid);
258 printk(" ---> "); 257 printk(" ---> ");
259#endif 258#endif
260 259
@@ -682,14 +681,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
682 field->dpad = usage->code; 681 field->dpad = usage->code;
683 } 682 }
684 683
685#ifdef DEBUG 684 hid_resolv_event(usage->type, usage->code);
686 resolv_event(usage->type, usage->code); 685#ifdef CONFIG_HID_DEBUG
687 printk("\n"); 686 printk("\n");
688#endif 687#endif
689 return; 688 return;
690 689
691ignore: 690ignore:
692#ifdef DEBUG 691#ifdef CONFIG_HID_DEBUG
693 printk("IGNORED\n"); 692 printk("IGNORED\n");
694#endif 693#endif
695 return; 694 return;
@@ -804,6 +803,18 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
804} 803}
805EXPORT_SYMBOL_GPL(hidinput_find_field); 804EXPORT_SYMBOL_GPL(hidinput_find_field);
806 805
806static int hidinput_open(struct input_dev *dev)
807{
808 struct hid_device *hid = dev->private;
809 return hid->hid_open(hid);
810}
811
812static void hidinput_close(struct input_dev *dev)
813{
814 struct hid_device *hid = dev->private;
815 hid->hid_close(hid);
816}
817
807/* 818/*
808 * Register the input device; print a message. 819 * Register the input device; print a message.
809 * Configure the input layer interface 820 * Configure the input layer interface
@@ -816,6 +827,7 @@ int hidinput_connect(struct hid_device *hid)
816 struct hid_input *hidinput = NULL; 827 struct hid_input *hidinput = NULL;
817 struct input_dev *input_dev; 828 struct input_dev *input_dev;
818 int i, j, k; 829 int i, j, k;
830 int max_report_type = HID_OUTPUT_REPORT;
819 831
820 INIT_LIST_HEAD(&hid->inputs); 832 INIT_LIST_HEAD(&hid->inputs);
821 833
@@ -828,7 +840,10 @@ int hidinput_connect(struct hid_device *hid)
828 if (i == hid->maxcollection) 840 if (i == hid->maxcollection)
829 return -1; 841 return -1;
830 842
831 for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) 843 if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
844 max_report_type = HID_INPUT_REPORT;
845
846 for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
832 list_for_each_entry(report, &hid->report_enum[k].report_list, list) { 847 list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
833 848
834 if (!report->maxfield) 849 if (!report->maxfield)
@@ -846,8 +861,8 @@ int hidinput_connect(struct hid_device *hid)
846 861
847 input_dev->private = hid; 862 input_dev->private = hid;
848 input_dev->event = hid->hidinput_input_event; 863 input_dev->event = hid->hidinput_input_event;
849 input_dev->open = hid->hidinput_open; 864 input_dev->open = hidinput_open;
850 input_dev->close = hid->hidinput_close; 865 input_dev->close = hidinput_close;
851 866
852 input_dev->name = hid->name; 867 input_dev->name = hid->name;
853 input_dev->phys = hid->phys; 868 input_dev->phys = hid->phys;
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index ccdf3e90862b..9fafadb92510 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -27,7 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/usb_ch9.h> 30#include <linux/usb/ch9.h>
31#include <linux/usb_gadget.h> 31#include <linux/usb_gadget.h>
32#include <linux/usb.h> 32#include <linux/usb.h>
33#include <linux/usb/otg.h> 33#include <linux/usb/otg.h>
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 3f828052f8d2..ec03341d2bd8 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -167,6 +167,13 @@ config BLK_DEV_IDECS
167 Support for Compact Flash cards, outboard IDE disks, tape drives, 167 Support for Compact Flash cards, outboard IDE disks, tape drives,
168 and CD-ROM drives connected through a PCMCIA card. 168 and CD-ROM drives connected through a PCMCIA card.
169 169
170config BLK_DEV_DELKIN
171 tristate "Cardbus IDE support (Delkin/ASKA/Workbit)"
172 depends on CARDBUS && PCI
173 help
174 Support for Delkin, ASKA, and Workbit Cardbus CompactFlash
175 Adapters. This may also work for similar SD and XD adapters.
176
170config BLK_DEV_IDECD 177config BLK_DEV_IDECD
171 tristate "Include IDE/ATAPI CDROM support" 178 tristate "Include IDE/ATAPI CDROM support"
172 ---help--- 179 ---help---
@@ -264,6 +271,13 @@ config BLK_DEV_IDESCSI
264 If both this SCSI emulation and native ATAPI support are compiled 271 If both this SCSI emulation and native ATAPI support are compiled
265 into the kernel, the native support will be used. 272 into the kernel, the native support will be used.
266 273
274config BLK_DEV_IDEACPI
275 bool "IDE ACPI support"
276 depends on ACPI
277 ---help---
278 Implement ACPI support for generic IDE devices. On modern
279 machines ACPI support is required to properly handle ACPI S3 states.
280
267config IDE_TASK_IOCTL 281config IDE_TASK_IOCTL
268 bool "IDE Taskfile Access" 282 bool "IDE Taskfile Access"
269 help 283 help
@@ -606,6 +620,11 @@ config BLK_DEV_PIIX
606 the kernel to change PIO, DMA and UDMA speeds and to configure 620 the kernel to change PIO, DMA and UDMA speeds and to configure
607 the chip to optimum performance. 621 the chip to optimum performance.
608 622
623config BLK_DEV_IT8213
624 tristate "IT8213 IDE support"
625 help
626 This driver adds support for the ITE 8213 IDE controller.
627
609config BLK_DEV_IT821X 628config BLK_DEV_IT821X
610 tristate "IT821X IDE support" 629 tristate "IT821X IDE support"
611 help 630 help
@@ -742,6 +761,11 @@ config BLK_DEV_VIA82CXXX
742 This allows the kernel to change PIO, DMA and UDMA speeds and to 761 This allows the kernel to change PIO, DMA and UDMA speeds and to
743 configure the chip to optimum performance. 762 configure the chip to optimum performance.
744 763
764config BLK_DEV_TC86C001
765 tristate "Toshiba TC86C001 support"
766 help
767 This driver adds support for Toshiba TC86C001 GOKU-S chip.
768
745endif 769endif
746 770
747config BLK_DEV_IDE_PMAC 771config BLK_DEV_IDE_PMAC
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 569fae717503..d9f029e8ff74 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -22,6 +22,7 @@ ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
22ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o 22ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
23ide-core-$(CONFIG_PROC_FS) += ide-proc.o 23ide-core-$(CONFIG_PROC_FS) += ide-proc.o
24ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o 24ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
25ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
25 26
26# built-in only drivers from arm/ 27# built-in only drivers from arm/
27ide-core-$(CONFIG_IDE_ARM) += arm/ide_arm.o 28ide-core-$(CONFIG_IDE_ARM) += arm/ide_arm.o
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
new file mode 100644
index 000000000000..17aea65d7dd2
--- /dev/null
+++ b/drivers/ide/ide-acpi.c
@@ -0,0 +1,697 @@
1/*
2 * ide-acpi.c
3 * Provides ACPI support for IDE drives.
4 *
5 * Copyright (C) 2005 Intel Corp.
6 * Copyright (C) 2005 Randy Dunlap
7 * Copyright (C) 2006 SUSE Linux Products GmbH
8 * Copyright (C) 2006 Hannes Reinecke
9 */
10
11#include <linux/ata.h>
12#include <linux/delay.h>
13#include <linux/device.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <acpi/acpi.h>
17#include <linux/ide.h>
18#include <linux/pci.h>
19
20#include <acpi/acpi_bus.h>
21#include <acpi/acnames.h>
22#include <acpi/acnamesp.h>
23#include <acpi/acparser.h>
24#include <acpi/acexcep.h>
25#include <acpi/acmacros.h>
26#include <acpi/actypes.h>
27
28#define REGS_PER_GTF 7
29struct taskfile_array {
30 u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
31};
32
33struct GTM_buffer {
34 u32 PIO_speed0;
35 u32 DMA_speed0;
36 u32 PIO_speed1;
37 u32 DMA_speed1;
38 u32 GTM_flags;
39};
40
41struct ide_acpi_drive_link {
42 ide_drive_t *drive;
43 acpi_handle obj_handle;
44 u8 idbuff[512];
45};
46
47struct ide_acpi_hwif_link {
48 ide_hwif_t *hwif;
49 acpi_handle obj_handle;
50 struct GTM_buffer gtm;
51 struct ide_acpi_drive_link master;
52 struct ide_acpi_drive_link slave;
53};
54
55#undef DEBUGGING
56/* note: adds function name and KERN_DEBUG */
57#ifdef DEBUGGING
58#define DEBPRINT(fmt, args...) \
59 printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, ## args)
60#else
61#define DEBPRINT(fmt, args...) do {} while (0)
62#endif /* DEBUGGING */
63
64extern int ide_noacpi;
65extern int ide_noacpitfs;
66extern int ide_noacpionboot;
67
68/**
69 * ide_get_dev_handle - finds acpi_handle and PCI device.function
70 * @dev: device to locate
71 * @handle: returned acpi_handle for @dev
72 * @pcidevfn: return PCI device.func for @dev
73 *
74 * Returns the ACPI object handle to the corresponding PCI device.
75 *
76 * Returns 0 on success, <0 on error.
77 */
78static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
79 acpi_integer *pcidevfn)
80{
81 struct pci_dev *pdev = to_pci_dev(dev);
82 unsigned int bus, devnum, func;
83 acpi_integer addr;
84 acpi_handle dev_handle;
85 struct acpi_buffer buffer = {.length = ACPI_ALLOCATE_BUFFER,
86 .pointer = NULL};
87 acpi_status status;
88 struct acpi_device_info *dinfo = NULL;
89 int ret = -ENODEV;
90
91 bus = pdev->bus->number;
92 devnum = PCI_SLOT(pdev->devfn);
93 func = PCI_FUNC(pdev->devfn);
94 /* ACPI _ADR encoding for PCI bus: */
95 addr = (acpi_integer)(devnum << 16 | func);
96
97 DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func);
98
99 dev_handle = DEVICE_ACPI_HANDLE(dev);
100 if (!dev_handle) {
101 DEBPRINT("no acpi handle for device\n");
102 goto err;
103 }
104
105 status = acpi_get_object_info(dev_handle, &buffer);
106 if (ACPI_FAILURE(status)) {
107 DEBPRINT("get_object_info for device failed\n");
108 goto err;
109 }
110 dinfo = buffer.pointer;
111 if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
112 dinfo->address == addr) {
113 *pcidevfn = addr;
114 *handle = dev_handle;
115 } else {
116 DEBPRINT("get_object_info for device has wrong "
117 " address: %llu, should be %u\n",
118 dinfo ? (unsigned long long)dinfo->address : -1ULL,
119 (unsigned int)addr);
120 goto err;
121 }
122
123 DEBPRINT("for dev=0x%x.%x, addr=0x%llx, *handle=0x%p\n",
124 devnum, func, (unsigned long long)addr, *handle);
125 ret = 0;
126err:
127 kfree(dinfo);
128 return ret;
129}
130
131/**
132 * ide_acpi_hwif_get_handle - Get ACPI object handle for a given hwif
133 * @hwif: device to locate
134 *
135 * Retrieves the object handle for a given hwif.
136 *
137 * Returns handle on success, 0 on error.
138 */
139static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
140{
141 struct device *dev = hwif->gendev.parent;
142 acpi_handle dev_handle;
143 acpi_integer pcidevfn;
144 acpi_handle chan_handle;
145 int err;
146
147 DEBPRINT("ENTER: device %s\n", hwif->name);
148
149 if (!dev) {
150 DEBPRINT("no PCI device for %s\n", hwif->name);
151 return NULL;
152 }
153
154 err = ide_get_dev_handle(dev, &dev_handle, &pcidevfn);
155 if (err < 0) {
156 DEBPRINT("ide_get_dev_handle failed (%d)\n", err);
157 return NULL;
158 }
159
160 /* get child objects of dev_handle == channel objects,
161 * + _their_ children == drive objects */
162 /* channel is hwif->channel */
163 chan_handle = acpi_get_child(dev_handle, hwif->channel);
164 DEBPRINT("chan adr=%d: handle=0x%p\n",
165 hwif->channel, chan_handle);
166
167 return chan_handle;
168}
169
170/**
171 * ide_acpi_drive_get_handle - Get ACPI object handle for a given drive
172 * @drive: device to locate
173 *
174 * Retrieves the object handle of a given drive. According to the ACPI
175 * spec the drive is a child of the hwif.
176 *
177 * Returns handle on success, 0 on error.
178 */
179static acpi_handle ide_acpi_drive_get_handle(ide_drive_t *drive)
180{
181 ide_hwif_t *hwif = HWIF(drive);
182 int port;
183 acpi_handle drive_handle;
184
185 if (!hwif->acpidata)
186 return NULL;
187
188 if (!hwif->acpidata->obj_handle)
189 return NULL;
190
191 port = hwif->channel ? drive->dn - 2: drive->dn;
192
193 DEBPRINT("ENTER: %s at channel#: %d port#: %d\n",
194 drive->name, hwif->channel, port);
195
196
197 /* TBD: could also check ACPI object VALID bits */
198 drive_handle = acpi_get_child(hwif->acpidata->obj_handle, port);
199 DEBPRINT("drive %s handle 0x%p\n", drive->name, drive_handle);
200
201 return drive_handle;
202}
203
204/**
205 * do_drive_get_GTF - get the drive bootup default taskfile settings
206 * @drive: the drive for which the taskfile settings should be retrieved
207 * @gtf_length: number of bytes of _GTF data returned at @gtf_address
208 * @gtf_address: buffer containing _GTF taskfile arrays
209 *
210 * The _GTF method has no input parameters.
211 * It returns a variable number of register set values (registers
212 * hex 1F1..1F7, taskfiles).
213 * The <variable number> is not known in advance, so have ACPI-CA
214 * allocate the buffer as needed and return it, then free it later.
215 *
216 * The returned @gtf_length and @gtf_address are only valid if the
217 * function return value is 0.
218 */
219static int do_drive_get_GTF(ide_drive_t *drive,
220 unsigned int *gtf_length, unsigned long *gtf_address,
221 unsigned long *obj_loc)
222{
223 acpi_status status;
224 struct acpi_buffer output;
225 union acpi_object *out_obj;
226 ide_hwif_t *hwif = HWIF(drive);
227 struct device *dev = hwif->gendev.parent;
228 int err = -ENODEV;
229 int port;
230
231 *gtf_length = 0;
232 *gtf_address = 0UL;
233 *obj_loc = 0UL;
234
235 if (ide_noacpi)
236 return 0;
237
238 if (!dev) {
239 DEBPRINT("no PCI device for %s\n", hwif->name);
240 goto out;
241 }
242
243 if (!hwif->acpidata) {
244 DEBPRINT("no ACPI data for %s\n", hwif->name);
245 goto out;
246 }
247
248 port = hwif->channel ? drive->dn - 2: drive->dn;
249
250 if (!drive->acpidata) {
251 if (port == 0) {
252 drive->acpidata = &hwif->acpidata->master;
253 hwif->acpidata->master.drive = drive;
254 } else {
255 drive->acpidata = &hwif->acpidata->slave;
256 hwif->acpidata->slave.drive = drive;
257 }
258 }
259
260 DEBPRINT("ENTER: %s at %s, port#: %d, hard_port#: %d\n",
261 hwif->name, dev->bus_id, port, hwif->channel);
262
263 if (!drive->present) {
264 DEBPRINT("%s drive %d:%d not present\n",
265 hwif->name, hwif->channel, port);
266 goto out;
267 }
268
269 /* Get this drive's _ADR info. if not already known. */
270 if (!drive->acpidata->obj_handle) {
271 drive->acpidata->obj_handle = ide_acpi_drive_get_handle(drive);
272 if (!drive->acpidata->obj_handle) {
273 DEBPRINT("No ACPI object found for %s\n",
274 drive->name);
275 goto out;
276 }
277 }
278
279 /* Setting up output buffer */
280 output.length = ACPI_ALLOCATE_BUFFER;
281 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
282
283 /* _GTF has no input parameters */
284 err = -EIO;
285 status = acpi_evaluate_object(drive->acpidata->obj_handle, "_GTF",
286 NULL, &output);
287 if (ACPI_FAILURE(status)) {
288 printk(KERN_DEBUG
289 "%s: Run _GTF error: status = 0x%x\n",
290 __FUNCTION__, status);
291 goto out;
292 }
293
294 if (!output.length || !output.pointer) {
295 DEBPRINT("Run _GTF: "
296 "length or ptr is NULL (0x%llx, 0x%p)\n",
297 (unsigned long long)output.length,
298 output.pointer);
299 goto out;
300 }
301
302 out_obj = output.pointer;
303 if (out_obj->type != ACPI_TYPE_BUFFER) {
304 DEBPRINT("Run _GTF: error: "
305 "expected object type of ACPI_TYPE_BUFFER, "
306 "got 0x%x\n", out_obj->type);
307 err = -ENOENT;
308 kfree(output.pointer);
309 goto out;
310 }
311
312 if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
313 out_obj->buffer.length % REGS_PER_GTF) {
314 printk(KERN_ERR
315 "%s: unexpected GTF length (%d) or addr (0x%p)\n",
316 __FUNCTION__, out_obj->buffer.length,
317 out_obj->buffer.pointer);
318 err = -ENOENT;
319 kfree(output.pointer);
320 goto out;
321 }
322
323 *gtf_length = out_obj->buffer.length;
324 *gtf_address = (unsigned long)out_obj->buffer.pointer;
325 *obj_loc = (unsigned long)out_obj;
326 DEBPRINT("returning gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n",
327 *gtf_length, *gtf_address, *obj_loc);
328 err = 0;
329out:
330 return err;
331}
332
333/**
334 * taskfile_load_raw - send taskfile registers to drive
335 * @drive: drive to which output is sent
336 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
337 *
338 * Outputs IDE taskfile to the drive.
339 */
340static int taskfile_load_raw(ide_drive_t *drive,
341 const struct taskfile_array *gtf)
342{
343 ide_task_t args;
344 int err = 0;
345
346 DEBPRINT("(0x1f1-1f7): hex: "
347 "%02x %02x %02x %02x %02x %02x %02x\n",
348 gtf->tfa[0], gtf->tfa[1], gtf->tfa[2],
349 gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]);
350
351 memset(&args, 0, sizeof(ide_task_t));
352 args.command_type = IDE_DRIVE_TASK_NO_DATA;
353 args.data_phase = TASKFILE_IN;
354 args.handler = &task_no_data_intr;
355
356 /* convert gtf to IDE Taskfile */
357 args.tfRegister[1] = gtf->tfa[0]; /* 0x1f1 */
358 args.tfRegister[2] = gtf->tfa[1]; /* 0x1f2 */
359 args.tfRegister[3] = gtf->tfa[2]; /* 0x1f3 */
360 args.tfRegister[4] = gtf->tfa[3]; /* 0x1f4 */
361 args.tfRegister[5] = gtf->tfa[4]; /* 0x1f5 */
362 args.tfRegister[6] = gtf->tfa[5]; /* 0x1f6 */
363 args.tfRegister[7] = gtf->tfa[6]; /* 0x1f7 */
364
365 if (ide_noacpitfs) {
366 DEBPRINT("_GTF execution disabled\n");
367 return err;
368 }
369
370 err = ide_raw_taskfile(drive, &args, NULL);
371 if (err)
372 printk(KERN_ERR "%s: ide_raw_taskfile failed: %u\n",
373 __FUNCTION__, err);
374
375 return err;
376}
377
378/**
379 * do_drive_set_taskfiles - write the drive taskfile settings from _GTF
380 * @drive: the drive to which the taskfile command should be sent
381 * @gtf_length: total number of bytes of _GTF taskfiles
382 * @gtf_address: location of _GTF taskfile arrays
383 *
384 * Write {gtf_address, length gtf_length} in groups of
385 * REGS_PER_GTF bytes.
386 */
387static int do_drive_set_taskfiles(ide_drive_t *drive,
388 unsigned int gtf_length,
389 unsigned long gtf_address)
390{
391 int rc = -ENODEV, err;
392 int gtf_count = gtf_length / REGS_PER_GTF;
393 int ix;
394 struct taskfile_array *gtf;
395
396 if (ide_noacpi)
397 return 0;
398
399 DEBPRINT("ENTER: %s, hard_port#: %d\n", drive->name, drive->dn);
400
401 if (!drive->present)
402 goto out;
403 if (!gtf_count) /* shouldn't be here */
404 goto out;
405
406 DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n",
407 gtf_length, gtf_length, gtf_count, gtf_address);
408
409 if (gtf_length % REGS_PER_GTF) {
410 printk(KERN_ERR "%s: unexpected GTF length (%d)\n",
411 __FUNCTION__, gtf_length);
412 goto out;
413 }
414
415 rc = 0;
416 for (ix = 0; ix < gtf_count; ix++) {
417 gtf = (struct taskfile_array *)
418 (gtf_address + ix * REGS_PER_GTF);
419
420 /* send all TaskFile registers (0x1f1-0x1f7) *in*that*order* */
421 err = taskfile_load_raw(drive, gtf);
422 if (err)
423 rc = err;
424 }
425
426out:
427 return rc;
428}
429
430/**
431 * ide_acpi_exec_tfs - get then write drive taskfile settings
432 * @drive: the drive for which the taskfile settings should be
433 * written.
434 *
435 * According to the ACPI spec this should be called after _STM
436 * has been evaluated for the interface. Some ACPI vendors interpret
437 * that as a hard requirement and modify the taskfile according
438 * to the Identify Drive information passed down with _STM.
439 * So one should really make sure to call this only after _STM has
440 * been executed.
441 */
442int ide_acpi_exec_tfs(ide_drive_t *drive)
443{
444 int ret;
445 unsigned int gtf_length;
446 unsigned long gtf_address;
447 unsigned long obj_loc;
448
449 if (ide_noacpi)
450 return 0;
451
452 DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
453
454 ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
455 if (ret < 0) {
456 DEBPRINT("get_GTF error (%d)\n", ret);
457 return ret;
458 }
459
460 DEBPRINT("call set_taskfiles, drive=%s\n", drive->name);
461
462 ret = do_drive_set_taskfiles(drive, gtf_length, gtf_address);
463 kfree((void *)obj_loc);
464 if (ret < 0) {
465 DEBPRINT("set_taskfiles error (%d)\n", ret);
466 }
467
468 DEBPRINT("ret=%d\n", ret);
469
470 return ret;
471}
472EXPORT_SYMBOL_GPL(ide_acpi_exec_tfs);
473
474/**
475 * ide_acpi_get_timing - get the channel (controller) timings
476 * @hwif: target IDE interface (channel)
477 *
478 * This function executes the _GTM ACPI method for the target channel.
479 *
480 */
481void ide_acpi_get_timing(ide_hwif_t *hwif)
482{
483 acpi_status status;
484 struct acpi_buffer output;
485 union acpi_object *out_obj;
486
487 if (ide_noacpi)
488 return;
489
490 DEBPRINT("ENTER:\n");
491
492 if (!hwif->acpidata) {
493 DEBPRINT("no ACPI data for %s\n", hwif->name);
494 return;
495 }
496
497 /* Setting up output buffer for _GTM */
498 output.length = ACPI_ALLOCATE_BUFFER;
499 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
500
501 /* _GTM has no input parameters */
502 status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_GTM",
503 NULL, &output);
504
505 DEBPRINT("_GTM status: %d, outptr: 0x%p, outlen: 0x%llx\n",
506 status, output.pointer,
507 (unsigned long long)output.length);
508
509 if (ACPI_FAILURE(status)) {
510 DEBPRINT("Run _GTM error: status = 0x%x\n", status);
511 return;
512 }
513
514 if (!output.length || !output.pointer) {
515 DEBPRINT("Run _GTM: length or ptr is NULL (0x%llx, 0x%p)\n",
516 (unsigned long long)output.length,
517 output.pointer);
518 kfree(output.pointer);
519 return;
520 }
521
522 out_obj = output.pointer;
523 if (out_obj->type != ACPI_TYPE_BUFFER) {
524 kfree(output.pointer);
525 DEBPRINT("Run _GTM: error: "
526 "expected object type of ACPI_TYPE_BUFFER, "
527 "got 0x%x\n", out_obj->type);
528 return;
529 }
530
531 if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
532 out_obj->buffer.length != sizeof(struct GTM_buffer)) {
533 kfree(output.pointer);
534 printk(KERN_ERR
535 "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
536 "addr (0x%p)\n",
537 __FUNCTION__, out_obj->buffer.length,
538 sizeof(struct GTM_buffer), out_obj->buffer.pointer);
539 return;
540 }
541
542 memcpy(&hwif->acpidata->gtm, out_obj->buffer.pointer,
543 sizeof(struct GTM_buffer));
544
545 DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%Zx\n",
546 out_obj->buffer.pointer, out_obj->buffer.length,
547 sizeof(struct GTM_buffer));
548
549 DEBPRINT("_GTM fields: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
550 hwif->acpidata->gtm.PIO_speed0,
551 hwif->acpidata->gtm.DMA_speed0,
552 hwif->acpidata->gtm.PIO_speed1,
553 hwif->acpidata->gtm.DMA_speed1,
554 hwif->acpidata->gtm.GTM_flags);
555
556 kfree(output.pointer);
557}
558EXPORT_SYMBOL_GPL(ide_acpi_get_timing);
559
560/**
561 * ide_acpi_push_timing - set the channel (controller) timings
562 * @hwif: target IDE interface (channel)
563 *
564 * This function executes the _STM ACPI method for the target channel.
565 *
566 * _STM requires Identify Drive data, which has to passed as an argument.
567 * Unfortunately hd_driveid is a mangled version which we can't readily
568 * use; hence we'll get the information afresh.
569 */
570void ide_acpi_push_timing(ide_hwif_t *hwif)
571{
572 acpi_status status;
573 struct acpi_object_list input;
574 union acpi_object in_params[3];
575 struct ide_acpi_drive_link *master = &hwif->acpidata->master;
576 struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
577
578 if (ide_noacpi)
579 return;
580
581 DEBPRINT("ENTER:\n");
582
583 if (!hwif->acpidata) {
584 DEBPRINT("no ACPI data for %s\n", hwif->name);
585 return;
586 }
587
588 /* Give the GTM buffer + drive Identify data to the channel via the
589 * _STM method: */
590 /* setup input parameters buffer for _STM */
591 input.count = 3;
592 input.pointer = in_params;
593 in_params[0].type = ACPI_TYPE_BUFFER;
594 in_params[0].buffer.length = sizeof(struct GTM_buffer);
595 in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm;
596 in_params[1].type = ACPI_TYPE_BUFFER;
597 in_params[1].buffer.length = sizeof(struct hd_driveid);
598 in_params[1].buffer.pointer = (u8 *)&master->idbuff;
599 in_params[2].type = ACPI_TYPE_BUFFER;
600 in_params[2].buffer.length = sizeof(struct hd_driveid);
601 in_params[2].buffer.pointer = (u8 *)&slave->idbuff;
602 /* Output buffer: _STM has no output */
603
604 status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_STM",
605 &input, NULL);
606
607 if (ACPI_FAILURE(status)) {
608 DEBPRINT("Run _STM error: status = 0x%x\n", status);
609 }
610 DEBPRINT("_STM status: %d\n", status);
611}
612EXPORT_SYMBOL_GPL(ide_acpi_push_timing);
613
614/**
615 * ide_acpi_init - initialize the ACPI link for an IDE interface
616 * @hwif: target IDE interface (channel)
617 *
618 * The ACPI spec is not quite clear when the drive identify buffer
619 * should be obtained. Calling IDENTIFY DEVICE during shutdown
620 * is not the best of ideas as the drive might already being put to
621 * sleep. And obviously we can't call it during resume.
622 * So we get the information during startup; but this means that
623 * any changes during run-time will be lost after resume.
624 */
625void ide_acpi_init(ide_hwif_t *hwif)
626{
627 int unit;
628 int err;
629 struct ide_acpi_drive_link *master;
630 struct ide_acpi_drive_link *slave;
631
632 hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL);
633 if (!hwif->acpidata)
634 return;
635
636 hwif->acpidata->obj_handle = ide_acpi_hwif_get_handle(hwif);
637 if (!hwif->acpidata->obj_handle) {
638 DEBPRINT("no ACPI object for %s found\n", hwif->name);
639 kfree(hwif->acpidata);
640 hwif->acpidata = NULL;
641 return;
642 }
643
644 /*
645 * The ACPI spec mandates that we send information
646 * for both drives, regardless whether they are connected
647 * or not.
648 */
649 hwif->acpidata->master.drive = &hwif->drives[0];
650 hwif->drives[0].acpidata = &hwif->acpidata->master;
651 master = &hwif->acpidata->master;
652
653 hwif->acpidata->slave.drive = &hwif->drives[1];
654 hwif->drives[1].acpidata = &hwif->acpidata->slave;
655 slave = &hwif->acpidata->slave;
656
657
658 /*
659 * Send IDENTIFY for each drive
660 */
661 if (master->drive->present) {
662 err = taskfile_lib_get_identify(master->drive, master->idbuff);
663 if (err) {
664 DEBPRINT("identify device %s failed (%d)\n",
665 master->drive->name, err);
666 }
667 }
668
669 if (slave->drive->present) {
670 err = taskfile_lib_get_identify(slave->drive, slave->idbuff);
671 if (err) {
672 DEBPRINT("identify device %s failed (%d)\n",
673 slave->drive->name, err);
674 }
675 }
676
677 if (ide_noacpionboot) {
678 DEBPRINT("ACPI methods disabled on boot\n");
679 return;
680 }
681
682 /*
683 * ACPI requires us to call _STM on startup
684 */
685 ide_acpi_get_timing(hwif);
686 ide_acpi_push_timing(hwif);
687
688 for (unit = 0; unit < MAX_DRIVES; ++unit) {
689 ide_drive_t *drive = &hwif->drives[unit];
690
691 if (drive->present) {
692 /* Execute ACPI startup code */
693 ide_acpi_exec_tfs(drive);
694 }
695 }
696}
697EXPORT_SYMBOL_GPL(ide_acpi_init);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 5a5c565a32a8..176bbc850d6b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1384,6 +1384,9 @@ static int hwif_init(ide_hwif_t *hwif)
1384 1384
1385done: 1385done:
1386 init_gendisk(hwif); 1386 init_gendisk(hwif);
1387
1388 ide_acpi_init(hwif);
1389
1387 hwif->present = 1; /* success */ 1390 hwif->present = 1; /* success */
1388 return 1; 1391 return 1;
1389 1392
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 6c9bd5165bdb..c750f6ce770a 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -187,6 +187,12 @@ int noautodma = 1;
187 187
188EXPORT_SYMBOL(noautodma); 188EXPORT_SYMBOL(noautodma);
189 189
190#ifdef CONFIG_BLK_DEV_IDEACPI
191int ide_noacpi = 0;
192int ide_noacpitfs = 1;
193int ide_noacpionboot = 1;
194#endif
195
190/* 196/*
191 * This is declared extern in ide.h, for access by other IDE modules: 197 * This is declared extern in ide.h, for access by other IDE modules:
192 */ 198 */
@@ -1214,10 +1220,15 @@ EXPORT_SYMBOL(system_bus_clock);
1214static int generic_ide_suspend(struct device *dev, pm_message_t mesg) 1220static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
1215{ 1221{
1216 ide_drive_t *drive = dev->driver_data; 1222 ide_drive_t *drive = dev->driver_data;
1223 ide_hwif_t *hwif = HWIF(drive);
1217 struct request rq; 1224 struct request rq;
1218 struct request_pm_state rqpm; 1225 struct request_pm_state rqpm;
1219 ide_task_t args; 1226 ide_task_t args;
1220 1227
1228 /* Call ACPI _GTM only once */
1229 if (!(drive->dn % 2))
1230 ide_acpi_get_timing(hwif);
1231
1221 memset(&rq, 0, sizeof(rq)); 1232 memset(&rq, 0, sizeof(rq));
1222 memset(&rqpm, 0, sizeof(rqpm)); 1233 memset(&rqpm, 0, sizeof(rqpm));
1223 memset(&args, 0, sizeof(args)); 1234 memset(&args, 0, sizeof(args));
@@ -1235,10 +1246,17 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
1235static int generic_ide_resume(struct device *dev) 1246static int generic_ide_resume(struct device *dev)
1236{ 1247{
1237 ide_drive_t *drive = dev->driver_data; 1248 ide_drive_t *drive = dev->driver_data;
1249 ide_hwif_t *hwif = HWIF(drive);
1238 struct request rq; 1250 struct request rq;
1239 struct request_pm_state rqpm; 1251 struct request_pm_state rqpm;
1240 ide_task_t args; 1252 ide_task_t args;
1241 1253
1254 /* Call ACPI _STM only once */
1255 if (!(drive->dn % 2))
1256 ide_acpi_push_timing(hwif);
1257
1258 ide_acpi_exec_tfs(drive);
1259
1242 memset(&rq, 0, sizeof(rq)); 1260 memset(&rq, 0, sizeof(rq));
1243 memset(&rqpm, 0, sizeof(rqpm)); 1261 memset(&rqpm, 0, sizeof(rqpm));
1244 memset(&args, 0, sizeof(args)); 1262 memset(&args, 0, sizeof(args));
@@ -1543,6 +1561,24 @@ static int __init ide_setup(char *s)
1543 } 1561 }
1544#endif /* CONFIG_BLK_DEV_IDEPCI */ 1562#endif /* CONFIG_BLK_DEV_IDEPCI */
1545 1563
1564#ifdef CONFIG_BLK_DEV_IDEACPI
1565 if (!strcmp(s, "ide=noacpi")) {
1566 //printk(" : Disable IDE ACPI support.\n");
1567 ide_noacpi = 1;
1568 return 1;
1569 }
1570 if (!strcmp(s, "ide=acpigtf")) {
1571 //printk(" : Enable IDE ACPI _GTF support.\n");
1572 ide_noacpitfs = 0;
1573 return 1;
1574 }
1575 if (!strcmp(s, "ide=acpionboot")) {
1576 //printk(" : Call IDE ACPI methods on boot.\n");
1577 ide_noacpionboot = 0;
1578 return 1;
1579 }
1580#endif /* CONFIG_BLK_DEV_IDEACPI */
1581
1546 /* 1582 /*
1547 * Look for drive options: "hdx=" 1583 * Look for drive options: "hdx="
1548 */ 1584 */
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
index fef08960aa4c..6591ff4753cb 100644
--- a/drivers/ide/pci/Makefile
+++ b/drivers/ide/pci/Makefile
@@ -9,9 +9,10 @@ obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
9obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o 9obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
10obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o 10obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
11obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o 11obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
12obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
12obj-$(CONFIG_BLK_DEV_HPT34X) += hpt34x.o 13obj-$(CONFIG_BLK_DEV_HPT34X) += hpt34x.o
13obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o 14obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
14#obj-$(CONFIG_BLK_DEV_HPT37X) += hpt37x.o 15obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
15obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o 16obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
16obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o 17obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
17obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o 18obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
26obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o 27obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
27obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o 28obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
28obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o 29obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
30obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
29obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o 31obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
30obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o 32obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
31obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o 33obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
new file mode 100644
index 000000000000..e2672fc65d30
--- /dev/null
+++ b/drivers/ide/pci/delkin_cb.c
@@ -0,0 +1,140 @@
1/*
2 * linux/drivers/ide/pci/delkin_cb.c
3 *
4 * Created 20 Oct 2004 by Mark Lord
5 *
6 * Basic support for Delkin/ASKA/Workbit Cardbus CompactFlash adapter
7 *
8 * Modeled after the 16-bit PCMCIA driver: ide-cs.c
9 *
10 * This is slightly peculiar, in that it is a PCI driver,
11 * but is NOT an IDE PCI driver -- the IDE layer does not directly
12 * support hot insertion/removal of PCI interfaces, so this driver
13 * is unable to use the IDE PCI interfaces. Instead, it uses the
14 * same interfaces as the ide-cs (PCMCIA) driver uses.
15 * On the plus side, the driver is also smaller/simpler this way.
16 *
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file COPYING in the main directory of this archive for
19 * more details.
20 */
21#include <linux/autoconf.h>
22#include <linux/types.h>
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/blkdev.h>
26#include <linux/hdreg.h>
27#include <linux/ide.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <asm/io.h>
31
32/*
33 * No chip documentation has yet been found,
34 * so these configuration values were pulled from
35 * a running Win98 system using "debug".
36 * This gives around 3MByte/second read performance,
37 * which is about 2/3 of what the chip is capable of.
38 *
39 * There is also a 4KByte mmio region on the card,
40 * but its purpose has yet to be reverse-engineered.
41 */
42static const u8 setup[] = {
43 0x00, 0x05, 0xbe, 0x01, 0x20, 0x8f, 0x00, 0x00,
44 0xa4, 0x1f, 0xb3, 0x1b, 0x00, 0x00, 0x00, 0x80,
45 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
46 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
47};
48
49static int __devinit
50delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
51{
52 unsigned long base;
53 hw_regs_t hw;
54 ide_hwif_t *hwif = NULL;
55 ide_drive_t *drive;
56 int i, rc;
57
58 rc = pci_enable_device(dev);
59 if (rc) {
60 printk(KERN_ERR "delkin_cb: pci_enable_device failed (%d)\n", rc);
61 return rc;
62 }
63 rc = pci_request_regions(dev, "delkin_cb");
64 if (rc) {
65 printk(KERN_ERR "delkin_cb: pci_request_regions failed (%d)\n", rc);
66 pci_disable_device(dev);
67 return rc;
68 }
69 base = pci_resource_start(dev, 0);
70 outb(0x02, base + 0x1e); /* set nIEN to block interrupts */
71 inb(base + 0x17); /* read status to clear interrupts */
72 for (i = 0; i < sizeof(setup); ++i) {
73 if (setup[i])
74 outb(setup[i], base + i);
75 }
76 pci_release_regions(dev); /* IDE layer handles regions itself */
77
78 memset(&hw, 0, sizeof(hw));
79 ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
80 hw.irq = dev->irq;
81 hw.chipset = ide_pci; /* this enables IRQ sharing */
82
83 rc = ide_register_hw_with_fixup(&hw, &hwif, ide_undecoded_slave);
84 if (rc < 0) {
85 printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc);
86 pci_disable_device(dev);
87 return -ENODEV;
88 }
89 pci_set_drvdata(dev, hwif);
90 hwif->pci_dev = dev;
91 drive = &hwif->drives[0];
92 if (drive->present) {
93 drive->io_32bit = 1;
94 drive->unmask = 1;
95 }
96 return 0;
97}
98
99static void
100delkin_cb_remove (struct pci_dev *dev)
101{
102 ide_hwif_t *hwif = pci_get_drvdata(dev);
103
104 if (hwif)
105 ide_unregister(hwif->index);
106 pci_disable_device(dev);
107}
108
109static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
110 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
111 { 0, },
112};
113MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
114
115static struct pci_driver driver = {
116 .name = "Delkin-ASKA-Workbit Cardbus IDE",
117 .id_table = delkin_cb_pci_tbl,
118 .probe = delkin_cb_probe,
119 .remove = delkin_cb_remove,
120};
121
122static int
123delkin_cb_init (void)
124{
125 return pci_module_init(&driver);
126}
127
128static void
129delkin_cb_exit (void)
130{
131 pci_unregister_driver(&driver);
132}
133
134module_init(delkin_cb_init);
135module_exit(delkin_cb_exit);
136
137MODULE_AUTHOR("Mark Lord");
138MODULE_DESCRIPTION("Basic support for Delkin/ASKA/Workbit Cardbus IDE");
139MODULE_LICENSE("GPL");
140
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index b486442dd5d7..05be8fadda7a 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003 2 * linux/drivers/ide/pci/hpt366.c Version 1.01 Dec 23, 2006
3 * 3 *
4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
5 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 5 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
@@ -60,13 +60,10 @@
60 * channel caused the cached register value to get out of sync with the 60 * channel caused the cached register value to get out of sync with the
61 * actual one, the channels weren't serialized, the turnaround shouldn't 61 * actual one, the channels weren't serialized, the turnaround shouldn't
62 * be done on 66 MHz PCI bus 62 * be done on 66 MHz PCI bus
63 * - avoid calibrating PLL twice as the second time results in a wrong PCI 63 * - disable UltraATA/100 for HPT370 by default as the 33 MHz clock being used
64 * frequency and thus in the wrong timings for the secondary channel 64 * does not allow for this speed anyway
65 * - disable UltraATA/133 for HPT372 by default (50 MHz DPLL clock do not 65 * - avoid touching disabled channels (e.g. HPT371/N are single channel chips,
66 * allow for this speed anyway) 66 * their primary channel is kind of virtual, it isn't tied to any pins)
67 * - add support for HPT302N and HPT371N clocking (the same as for HPT372N)
68 * - HPT371/N are single channel chips, so avoid touching the primary channel
69 * which exists only virtually (there's no pins for it)
70 * - fix/remove bad/unused timing tables and use one set of tables for the whole 67 * - fix/remove bad/unused timing tables and use one set of tables for the whole
71 * HPT37x chip family; save space by introducing the separate transfer mode 68 * HPT37x chip family; save space by introducing the separate transfer mode
72 * table in which the mode lookup is done 69 * table in which the mode lookup is done
@@ -76,11 +73,47 @@
76 * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead 73 * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead
77 * - pass to init_chipset() handlers a copy of the IDE PCI device structure as 74 * - pass to init_chipset() handlers a copy of the IDE PCI device structure as
78 * they tamper with its fields 75 * they tamper with its fields
79 * <source@mvista.com> 76 * - pass to the init_setup handlers a copy of the ide_pci_device_t structure
80 * 77 * since they may tamper with its fields
78 * - prefix the driver startup messages with the real chip name
79 * - claim the extra 240 bytes of I/O space for all chips
80 * - optimize the rate masking/filtering and the drive list lookup code
81 * - use pci_get_slot() to get to the function 1 of HPT36x/374
82 * - cache offset of the channel's misc. control registers (MCRs) being used
83 * throughout the driver
84 * - only touch the relevant MCR when detecting the cable type on HPT374's
85 * function 1
86 * - rename all the register related variables consistently
87 * - move all the interrupt twiddling code from the speedproc handlers into
88 * init_hwif_hpt366(), also grouping all the DMA related code together there
89 * - merge two HPT37x speedproc handlers, fix the PIO timing register mask and
90 * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings
91 * when setting an UltraDMA mode
92 * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select
93 * the best possible one
94 * - clean up DMA timeout handling for HPT370
95 * - switch to using the enumeration type to differ between the numerous chip
96 * variants, matching PCI device/revision ID with the chip type early, at the
97 * init_setup stage
98 * - extend the hpt_info structure to hold the DPLL and PCI clock frequencies,
99 * stop duplicating it for each channel by storing the pointer in the pci_dev
100 * structure: first, at the init_setup stage, point it to a static "template"
101 * with only the chip type and its specific base DPLL frequency, the highest
102 * supported DMA mode, and the chip settings table pointer filled, then, at
103 * the init_chipset stage, allocate per-chip instance and fill it with the
104 * rest of the necessary information
105 * - get rid of the constant thresholds in the HPT37x PCI clock detection code,
106 * switch to calculating PCI clock frequency based on the chip's base DPLL
107 * frequency
108 * - switch to using the DPLL clock and enable UltraATA/133 mode by default on
109 * anything newer than HPT370/A
110 * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(),
111 * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips;
112 * unify HPT36x/37x timing setup code and the speedproc handlers by joining
113 * the register setting lists into the table indexed by the clock selected
114 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
81 */ 115 */
82 116
83
84#include <linux/types.h> 117#include <linux/types.h>
85#include <linux/module.h> 118#include <linux/module.h>
86#include <linux/kernel.h> 119#include <linux/kernel.h>
@@ -332,93 +365,159 @@ static u32 sixty_six_base_hpt37x[] = {
332}; 365};
333 366
334#define HPT366_DEBUG_DRIVE_INFO 0 367#define HPT366_DEBUG_DRIVE_INFO 0
335#define HPT374_ALLOW_ATA133_6 0 368#define HPT374_ALLOW_ATA133_6 1
336#define HPT371_ALLOW_ATA133_6 0 369#define HPT371_ALLOW_ATA133_6 1
337#define HPT302_ALLOW_ATA133_6 0 370#define HPT302_ALLOW_ATA133_6 1
338#define HPT372_ALLOW_ATA133_6 0 371#define HPT372_ALLOW_ATA133_6 1
339#define HPT370_ALLOW_ATA100_5 1 372#define HPT370_ALLOW_ATA100_5 0
340#define HPT366_ALLOW_ATA66_4 1 373#define HPT366_ALLOW_ATA66_4 1
341#define HPT366_ALLOW_ATA66_3 1 374#define HPT366_ALLOW_ATA66_3 1
342#define HPT366_MAX_DEVS 8 375#define HPT366_MAX_DEVS 8
343 376
344#define F_LOW_PCI_33 0x23 377/* Supported ATA clock frequencies */
345#define F_LOW_PCI_40 0x29 378enum ata_clock {
346#define F_LOW_PCI_50 0x2d 379 ATA_CLOCK_25MHZ,
347#define F_LOW_PCI_66 0x42 380 ATA_CLOCK_33MHZ,
381 ATA_CLOCK_40MHZ,
382 ATA_CLOCK_50MHZ,
383 ATA_CLOCK_66MHZ,
384 NUM_ATA_CLOCKS
385};
348 386
349/* 387/*
350 * Hold all the highpoint quirks and revision information in one 388 * Hold all the HighPoint chip information in one place.
351 * place.
352 */ 389 */
353 390
354struct hpt_info 391struct hpt_info {
355{ 392 u8 chip_type; /* Chip type */
356 u8 max_mode; /* Speeds allowed */ 393 u8 max_mode; /* Speeds allowed */
357 int revision; /* Chipset revision */ 394 u8 dpll_clk; /* DPLL clock in MHz */
358 int flags; /* Chipset properties */ 395 u8 pci_clk; /* PCI clock in MHz */
359#define PLL_MODE 1 396 u32 **settings; /* Chipset settings table */
360#define IS_3xxN 2
361#define PCI_66MHZ 4
362 /* Speed table */
363 u32 *speed;
364}; 397};
365 398
366/* 399/* Supported HighPoint chips */
367 * This wants fixing so that we do everything not by classrev 400enum {
368 * (which breaks on the newest chips) but by creating an 401 HPT36x,
369 * enumeration of chip variants and using that 402 HPT370,
370 */ 403 HPT370A,
404 HPT374,
405 HPT372,
406 HPT372A,
407 HPT302,
408 HPT371,
409 HPT372N,
410 HPT302N,
411 HPT371N
412};
413
414static u32 *hpt36x_settings[NUM_ATA_CLOCKS] = {
415 twenty_five_base_hpt36x,
416 thirty_three_base_hpt36x,
417 forty_base_hpt36x,
418 NULL,
419 NULL
420};
421
422static u32 *hpt37x_settings[NUM_ATA_CLOCKS] = {
423 NULL,
424 thirty_three_base_hpt37x,
425 NULL,
426 fifty_base_hpt37x,
427 sixty_six_base_hpt37x
428};
429
430static struct hpt_info hpt36x __devinitdata = {
431 .chip_type = HPT36x,
432 .max_mode = (HPT366_ALLOW_ATA66_4 || HPT366_ALLOW_ATA66_3) ? 2 : 1,
433 .dpll_clk = 0, /* no DPLL */
434 .settings = hpt36x_settings
435};
436
437static struct hpt_info hpt370 __devinitdata = {
438 .chip_type = HPT370,
439 .max_mode = HPT370_ALLOW_ATA100_5 ? 3 : 2,
440 .dpll_clk = 48,
441 .settings = hpt37x_settings
442};
443
444static struct hpt_info hpt370a __devinitdata = {
445 .chip_type = HPT370A,
446 .max_mode = HPT370_ALLOW_ATA100_5 ? 3 : 2,
447 .dpll_clk = 48,
448 .settings = hpt37x_settings
449};
450
451static struct hpt_info hpt374 __devinitdata = {
452 .chip_type = HPT374,
453 .max_mode = HPT374_ALLOW_ATA133_6 ? 4 : 3,
454 .dpll_clk = 48,
455 .settings = hpt37x_settings
456};
457
458static struct hpt_info hpt372 __devinitdata = {
459 .chip_type = HPT372,
460 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3,
461 .dpll_clk = 55,
462 .settings = hpt37x_settings
463};
464
465static struct hpt_info hpt372a __devinitdata = {
466 .chip_type = HPT372A,
467 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3,
468 .dpll_clk = 66,
469 .settings = hpt37x_settings
470};
471
472static struct hpt_info hpt302 __devinitdata = {
473 .chip_type = HPT302,
474 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3,
475 .dpll_clk = 66,
476 .settings = hpt37x_settings
477};
478
479static struct hpt_info hpt371 __devinitdata = {
480 .chip_type = HPT371,
481 .max_mode = HPT371_ALLOW_ATA133_6 ? 4 : 3,
482 .dpll_clk = 66,
483 .settings = hpt37x_settings
484};
485
486static struct hpt_info hpt372n __devinitdata = {
487 .chip_type = HPT372N,
488 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3,
489 .dpll_clk = 77,
490 .settings = hpt37x_settings
491};
492
493static struct hpt_info hpt302n __devinitdata = {
494 .chip_type = HPT302N,
495 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3,
496 .dpll_clk = 77,
497};
371 498
372static __devinit u32 hpt_revision (struct pci_dev *dev) 499static struct hpt_info hpt371n __devinitdata = {
500 .chip_type = HPT371N,
501 .max_mode = HPT371_ALLOW_ATA133_6 ? 4 : 3,
502 .dpll_clk = 77,
503 .settings = hpt37x_settings
504};
505
506static int check_in_drive_list(ide_drive_t *drive, const char **list)
373{ 507{
374 u32 class_rev; 508 struct hd_driveid *id = drive->id;
375 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
376 class_rev &= 0xff;
377
378 switch(dev->device) {
379 /* Remap new 372N onto 372 */
380 case PCI_DEVICE_ID_TTI_HPT372N:
381 class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
382 case PCI_DEVICE_ID_TTI_HPT374:
383 class_rev = PCI_DEVICE_ID_TTI_HPT374; break;
384 case PCI_DEVICE_ID_TTI_HPT371:
385 class_rev = PCI_DEVICE_ID_TTI_HPT371; break;
386 case PCI_DEVICE_ID_TTI_HPT302:
387 class_rev = PCI_DEVICE_ID_TTI_HPT302; break;
388 case PCI_DEVICE_ID_TTI_HPT372:
389 class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
390 default:
391 break;
392 }
393 return class_rev;
394}
395 509
396static int check_in_drive_lists(ide_drive_t *drive, const char **list); 510 while (*list)
511 if (!strcmp(*list++,id->model))
512 return 1;
513 return 0;
514}
397 515
398static u8 hpt3xx_ratemask (ide_drive_t *drive) 516static u8 hpt3xx_ratemask(ide_drive_t *drive)
399{ 517{
400 ide_hwif_t *hwif = drive->hwif; 518 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
401 struct hpt_info *info = ide_get_hwifdata(hwif); 519 u8 mode = info->max_mode;
402 u8 mode = 0; 520
403
404 /* FIXME: TODO - move this to set info->mode once at boot */
405
406 if (info->revision >= 8) { /* HPT374 */
407 mode = (HPT374_ALLOW_ATA133_6) ? 4 : 3;
408 } else if (info->revision >= 7) { /* HPT371 */
409 mode = (HPT371_ALLOW_ATA133_6) ? 4 : 3;
410 } else if (info->revision >= 6) { /* HPT302 */
411 mode = (HPT302_ALLOW_ATA133_6) ? 4 : 3;
412 } else if (info->revision >= 5) { /* HPT372 */
413 mode = (HPT372_ALLOW_ATA133_6) ? 4 : 3;
414 } else if (info->revision >= 4) { /* HPT370A */
415 mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
416 } else if (info->revision >= 3) { /* HPT370 */
417 mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
418 mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : mode;
419 } else { /* HPT366 and HPT368 */
420 mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : 2;
421 }
422 if (!eighty_ninty_three(drive) && mode) 521 if (!eighty_ninty_three(drive) && mode)
423 mode = min(mode, (u8)1); 522 mode = min(mode, (u8)1);
424 return mode; 523 return mode;
@@ -429,75 +528,61 @@ static u8 hpt3xx_ratemask (ide_drive_t *drive)
429 * either PIO or UDMA modes 0,4,5 528 * either PIO or UDMA modes 0,4,5
430 */ 529 */
431 530
432static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed) 531static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed)
433{ 532{
434 ide_hwif_t *hwif = drive->hwif; 533 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
435 struct hpt_info *info = ide_get_hwifdata(hwif); 534 u8 chip_type = info->chip_type;
436 u8 mode = hpt3xx_ratemask(drive); 535 u8 mode = hpt3xx_ratemask(drive);
437 536
438 if (drive->media != ide_disk) 537 if (drive->media != ide_disk)
439 return min(speed, (u8)XFER_PIO_4); 538 return min(speed, (u8)XFER_PIO_4);
440 539
441 switch(mode) { 540 switch (mode) {
442 case 0x04: 541 case 0x04:
443 speed = min(speed, (u8)XFER_UDMA_6); 542 speed = min_t(u8, speed, XFER_UDMA_6);
444 break; 543 break;
445 case 0x03: 544 case 0x03:
446 speed = min(speed, (u8)XFER_UDMA_5); 545 speed = min_t(u8, speed, XFER_UDMA_5);
447 if (info->revision >= 5) 546 if (chip_type >= HPT374)
448 break; 547 break;
449 if (check_in_drive_lists(drive, bad_ata100_5)) 548 if (!check_in_drive_list(drive, bad_ata100_5))
450 speed = min(speed, (u8)XFER_UDMA_4); 549 goto check_bad_ata33;
451 break; 550 /* fall thru */
452 case 0x02: 551 case 0x02:
453 speed = min(speed, (u8)XFER_UDMA_4); 552 speed = min_t(u8, speed, XFER_UDMA_4);
454 /* 553
455 * CHECK ME, Does this need to be set to 5 ?? 554 /*
456 */ 555 * CHECK ME, Does this need to be changed to HPT374 ??
457 if (info->revision >= 3) 556 */
458 break; 557 if (chip_type >= HPT370)
459 if ((check_in_drive_lists(drive, bad_ata66_4)) || 558 goto check_bad_ata33;
460 (!(HPT366_ALLOW_ATA66_4))) 559 if (HPT366_ALLOW_ATA66_4 &&
461 speed = min(speed, (u8)XFER_UDMA_3); 560 !check_in_drive_list(drive, bad_ata66_4))
462 if ((check_in_drive_lists(drive, bad_ata66_3)) || 561 goto check_bad_ata33;
463 (!(HPT366_ALLOW_ATA66_3))) 562
464 speed = min(speed, (u8)XFER_UDMA_2); 563 speed = min_t(u8, speed, XFER_UDMA_3);
465 break; 564 if (HPT366_ALLOW_ATA66_3 &&
565 !check_in_drive_list(drive, bad_ata66_3))
566 goto check_bad_ata33;
567 /* fall thru */
466 case 0x01: 568 case 0x01:
467 speed = min(speed, (u8)XFER_UDMA_2); 569 speed = min_t(u8, speed, XFER_UDMA_2);
468 /* 570
469 * CHECK ME, Does this need to be set to 5 ?? 571 check_bad_ata33:
470 */ 572 if (chip_type >= HPT370A)
471 if (info->revision >= 3)
472 break; 573 break;
473 if (check_in_drive_lists(drive, bad_ata33)) 574 if (!check_in_drive_list(drive, bad_ata33))
474 speed = min(speed, (u8)XFER_MW_DMA_2); 575 break;
475 break; 576 /* fall thru */
476 case 0x00: 577 case 0x00:
477 default: 578 default:
478 speed = min(speed, (u8)XFER_MW_DMA_2); 579 speed = min_t(u8, speed, XFER_MW_DMA_2);
479 break; 580 break;
480 } 581 }
481 return speed; 582 return speed;
482} 583}
483 584
484static int check_in_drive_lists (ide_drive_t *drive, const char **list) 585static u32 get_speed_setting(u8 speed, struct hpt_info *info)
485{
486 struct hd_driveid *id = drive->id;
487
488 if (quirk_drives == list) {
489 while (*list)
490 if (strstr(id->model, *list++))
491 return 1;
492 } else {
493 while (*list)
494 if (!strcmp(*list++,id->model))
495 return 1;
496 }
497 return 0;
498}
499
500static u32 pci_bus_clock_list(u8 speed, u32 *chipset_table)
501{ 586{
502 int i; 587 int i;
503 588
@@ -510,228 +595,161 @@ static u32 pci_bus_clock_list(u8 speed, u32 *chipset_table)
510 for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++) 595 for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
511 if (xfer_speeds[i] == speed) 596 if (xfer_speeds[i] == speed)
512 break; 597 break;
513 return chipset_table[i]; 598 /*
599 * NOTE: info->settings only points to the pointer
600 * to the list of the actual register values
601 */
602 return (*info->settings)[i];
514} 603}
515 604
516static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed) 605static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
517{ 606{
518 ide_hwif_t *hwif = drive->hwif; 607 ide_hwif_t *hwif = HWIF(drive);
519 struct pci_dev *dev = hwif->pci_dev; 608 struct pci_dev *dev = hwif->pci_dev;
520 struct hpt_info *info = ide_get_hwifdata(hwif); 609 struct hpt_info *info = pci_get_drvdata(dev);
521 u8 speed = hpt3xx_ratefilter(drive, xferspeed); 610 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
522 u8 regtime = (drive->select.b.unit & 0x01) ? 0x44 : 0x40; 611 u8 itr_addr = drive->dn ? 0x44 : 0x40;
523 u8 regfast = (hwif->channel) ? 0x55 : 0x51; 612 u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
524 u8 drive_fast = 0; 613 (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff);
525 u32 reg1 = 0, reg2 = 0; 614 u32 new_itr = get_speed_setting(speed, info);
526 615 u32 old_itr = 0;
527 /*
528 * Disable the "fast interrupt" prediction.
529 */
530 pci_read_config_byte(dev, regfast, &drive_fast);
531 if (drive_fast & 0x80)
532 pci_write_config_byte(dev, regfast, drive_fast & ~0x80);
533
534 reg2 = pci_bus_clock_list(speed, info->speed);
535 616
536 /* 617 /*
537 * Disable on-chip PIO FIFO/buffer 618 * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
538 * (to avoid problems handling I/O errors later) 619 * to avoid problems handling I/O errors later
539 */ 620 */
540 pci_read_config_dword(dev, regtime, &reg1); 621 pci_read_config_dword(dev, itr_addr, &old_itr);
541 if (speed >= XFER_MW_DMA_0) { 622 new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
542 reg2 = (reg2 & ~0xc0000000) | (reg1 & 0xc0000000); 623 new_itr &= ~0xc0000000;
543 } else {
544 reg2 = (reg2 & ~0x30070000) | (reg1 & 0x30070000);
545 }
546 reg2 &= ~0x80000000;
547 624
548 pci_write_config_dword(dev, regtime, reg2); 625 pci_write_config_dword(dev, itr_addr, new_itr);
549 626
550 return ide_config_drive_speed(drive, speed); 627 return ide_config_drive_speed(drive, speed);
551} 628}
552 629
553static int hpt370_tune_chipset(ide_drive_t *drive, u8 xferspeed) 630static int hpt37x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
554{ 631{
555 ide_hwif_t *hwif = drive->hwif; 632 ide_hwif_t *hwif = HWIF(drive);
556 struct pci_dev *dev = hwif->pci_dev; 633 struct pci_dev *dev = hwif->pci_dev;
557 struct hpt_info *info = ide_get_hwifdata(hwif); 634 struct hpt_info *info = pci_get_drvdata(dev);
558 u8 speed = hpt3xx_ratefilter(drive, xferspeed); 635 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
559 u8 regfast = (drive->hwif->channel) ? 0x55 : 0x51; 636 u8 itr_addr = 0x40 + (drive->dn * 4);
560 u8 drive_pci = 0x40 + (drive->dn * 4); 637 u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
561 u8 new_fast = 0, drive_fast = 0; 638 (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff);
562 u32 list_conf = 0, drive_conf = 0; 639 u32 new_itr = get_speed_setting(speed, info);
563 u32 conf_mask = (speed >= XFER_MW_DMA_0) ? 0xc0000000 : 0x30070000; 640 u32 old_itr = 0;
564 641
565 /* 642 pci_read_config_dword(dev, itr_addr, &old_itr);
566 * Disable the "fast interrupt" prediction. 643 new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
567 * don't holdoff on interrupts. (== 0x01 despite what the docs say)
568 */
569 pci_read_config_byte(dev, regfast, &drive_fast);
570 new_fast = drive_fast;
571 if (new_fast & 0x02)
572 new_fast &= ~0x02;
573
574#ifdef HPT_DELAY_INTERRUPT
575 if (new_fast & 0x01)
576 new_fast &= ~0x01;
577#else
578 if ((new_fast & 0x01) == 0)
579 new_fast |= 0x01;
580#endif
581 if (new_fast != drive_fast)
582 pci_write_config_byte(dev, regfast, new_fast);
583
584 list_conf = pci_bus_clock_list(speed, info->speed);
585
586 pci_read_config_dword(dev, drive_pci, &drive_conf);
587 list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
588 644
589 if (speed < XFER_MW_DMA_0) 645 if (speed < XFER_MW_DMA_0)
590 list_conf &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */ 646 new_itr &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
591 pci_write_config_dword(dev, drive_pci, list_conf); 647 pci_write_config_dword(dev, itr_addr, new_itr);
592 648
593 return ide_config_drive_speed(drive, speed); 649 return ide_config_drive_speed(drive, speed);
594} 650}
595 651
596static int hpt372_tune_chipset(ide_drive_t *drive, u8 xferspeed) 652static int hpt3xx_tune_chipset(ide_drive_t *drive, u8 speed)
597{ 653{
598 ide_hwif_t *hwif = drive->hwif; 654 ide_hwif_t *hwif = HWIF(drive);
599 struct pci_dev *dev = hwif->pci_dev; 655 struct hpt_info *info = pci_get_drvdata(hwif->pci_dev);
600 struct hpt_info *info = ide_get_hwifdata(hwif);
601 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
602 u8 regfast = (drive->hwif->channel) ? 0x55 : 0x51;
603 u8 drive_fast = 0, drive_pci = 0x40 + (drive->dn * 4);
604 u32 list_conf = 0, drive_conf = 0;
605 u32 conf_mask = (speed >= XFER_MW_DMA_0) ? 0xc0000000 : 0x30070000;
606
607 /*
608 * Disable the "fast interrupt" prediction.
609 * don't holdoff on interrupts. (== 0x01 despite what the docs say)
610 */
611 pci_read_config_byte(dev, regfast, &drive_fast);
612 drive_fast &= ~0x07;
613 pci_write_config_byte(dev, regfast, drive_fast);
614
615 list_conf = pci_bus_clock_list(speed, info->speed);
616 pci_read_config_dword(dev, drive_pci, &drive_conf);
617 list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
618 if (speed < XFER_MW_DMA_0)
619 list_conf &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
620 pci_write_config_dword(dev, drive_pci, list_conf);
621
622 return ide_config_drive_speed(drive, speed);
623}
624 656
625static int hpt3xx_tune_chipset (ide_drive_t *drive, u8 speed) 657 if (info->chip_type >= HPT370)
626{ 658 return hpt37x_tune_chipset(drive, speed);
627 ide_hwif_t *hwif = drive->hwif;
628 struct hpt_info *info = ide_get_hwifdata(hwif);
629
630 if (info->revision >= 8)
631 return hpt372_tune_chipset(drive, speed); /* not a typo */
632 else if (info->revision >= 5)
633 return hpt372_tune_chipset(drive, speed);
634 else if (info->revision >= 3)
635 return hpt370_tune_chipset(drive, speed);
636 else /* hpt368: hpt_minimum_revision(dev, 2) */ 659 else /* hpt368: hpt_minimum_revision(dev, 2) */
637 return hpt36x_tune_chipset(drive, speed); 660 return hpt36x_tune_chipset(drive, speed);
638} 661}
639 662
640static void hpt3xx_tune_drive (ide_drive_t *drive, u8 pio) 663static void hpt3xx_tune_drive(ide_drive_t *drive, u8 pio)
641{ 664{
642 pio = ide_get_best_pio_mode(drive, 255, pio, NULL); 665 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
643 (void) hpt3xx_tune_chipset(drive, (XFER_PIO_0 + pio)); 666 (void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio);
644} 667}
645 668
646/* 669/*
647 * This allows the configuration of ide_pci chipset registers 670 * This allows the configuration of ide_pci chipset registers
648 * for cards that learn about the drive's UDMA, DMA, PIO capabilities 671 * for cards that learn about the drive's UDMA, DMA, PIO capabilities
649 * after the drive is reported by the OS. Initially for designed for 672 * after the drive is reported by the OS. Initially designed for
650 * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc. 673 * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc.
651 * 674 *
652 * check_in_drive_lists(drive, bad_ata66_4)
653 * check_in_drive_lists(drive, bad_ata66_3)
654 * check_in_drive_lists(drive, bad_ata33)
655 *
656 */ 675 */
657static int config_chipset_for_dma (ide_drive_t *drive) 676static int config_chipset_for_dma(ide_drive_t *drive)
658{ 677{
659 u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive)); 678 u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive));
660 ide_hwif_t *hwif = drive->hwif;
661 struct hpt_info *info = ide_get_hwifdata(hwif);
662 679
663 if (!speed) 680 if (!speed)
664 return 0; 681 return 0;
665 682
666 /* If we don't have any timings we can't do a lot */
667 if (info->speed == NULL)
668 return 0;
669
670 (void) hpt3xx_tune_chipset(drive, speed); 683 (void) hpt3xx_tune_chipset(drive, speed);
671 return ide_dma_enable(drive); 684 return ide_dma_enable(drive);
672} 685}
673 686
674static int hpt3xx_quirkproc (ide_drive_t *drive) 687static int hpt3xx_quirkproc(ide_drive_t *drive)
675{ 688{
676 return ((int) check_in_drive_lists(drive, quirk_drives)); 689 struct hd_driveid *id = drive->id;
690 const char **list = quirk_drives;
691
692 while (*list)
693 if (strstr(id->model, *list++))
694 return 1;
695 return 0;
677} 696}
678 697
679static void hpt3xx_intrproc (ide_drive_t *drive) 698static void hpt3xx_intrproc(ide_drive_t *drive)
680{ 699{
681 ide_hwif_t *hwif = drive->hwif; 700 ide_hwif_t *hwif = HWIF(drive);
682 701
683 if (drive->quirk_list) 702 if (drive->quirk_list)
684 return; 703 return;
685 /* drives in the quirk_list may not like intr setups/cleanups */ 704 /* drives in the quirk_list may not like intr setups/cleanups */
686 hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG); 705 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
687} 706}
688 707
689static void hpt3xx_maskproc (ide_drive_t *drive, int mask) 708static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
690{ 709{
691 ide_hwif_t *hwif = drive->hwif; 710 ide_hwif_t *hwif = HWIF(drive);
692 struct hpt_info *info = ide_get_hwifdata(hwif); 711 struct pci_dev *dev = hwif->pci_dev;
693 struct pci_dev *dev = hwif->pci_dev; 712 struct hpt_info *info = pci_get_drvdata(dev);
694 713
695 if (drive->quirk_list) { 714 if (drive->quirk_list) {
696 if (info->revision >= 3) { 715 if (info->chip_type >= HPT370) {
697 u8 reg5a = 0; 716 u8 scr1 = 0;
698 pci_read_config_byte(dev, 0x5a, &reg5a); 717
699 if (((reg5a & 0x10) >> 4) != mask) 718 pci_read_config_byte(dev, 0x5a, &scr1);
700 pci_write_config_byte(dev, 0x5a, mask ? (reg5a | 0x10) : (reg5a & ~0x10)); 719 if (((scr1 & 0x10) >> 4) != mask) {
720 if (mask)
721 scr1 |= 0x10;
722 else
723 scr1 &= ~0x10;
724 pci_write_config_byte(dev, 0x5a, scr1);
725 }
701 } else { 726 } else {
702 if (mask) { 727 if (mask)
703 disable_irq(hwif->irq); 728 disable_irq(hwif->irq);
704 } else { 729 else
705 enable_irq(hwif->irq); 730 enable_irq (hwif->irq);
706 }
707 } 731 }
708 } else { 732 } else
709 if (IDE_CONTROL_REG) 733 hwif->OUTB(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
710 hwif->OUTB(mask ? (drive->ctl | 2) : 734 IDE_CONTROL_REG);
711 (drive->ctl & ~2),
712 IDE_CONTROL_REG);
713 }
714} 735}
715 736
716static int hpt366_config_drive_xfer_rate (ide_drive_t *drive) 737static int hpt366_config_drive_xfer_rate(ide_drive_t *drive)
717{ 738{
718 ide_hwif_t *hwif = drive->hwif; 739 ide_hwif_t *hwif = HWIF(drive);
719 struct hd_driveid *id = drive->id; 740 struct hd_driveid *id = drive->id;
720 741
721 drive->init_speed = 0; 742 drive->init_speed = 0;
722 743
723 if ((id->capability & 1) && drive->autodma) { 744 if ((id->capability & 1) && drive->autodma) {
724 745 if (ide_use_dma(drive) && config_chipset_for_dma(drive))
725 if (ide_use_dma(drive)) { 746 return hwif->ide_dma_on(drive);
726 if (config_chipset_for_dma(drive))
727 return hwif->ide_dma_on(drive);
728 }
729 747
730 goto fast_ata_pio; 748 goto fast_ata_pio;
731 749
732 } else if ((id->capability & 8) || (id->field_valid & 2)) { 750 } else if ((id->capability & 8) || (id->field_valid & 2)) {
733fast_ata_pio: 751fast_ata_pio:
734 hpt3xx_tune_drive(drive, 5); 752 hpt3xx_tune_drive(drive, 255);
735 return hwif->ide_dma_off_quietly(drive); 753 return hwif->ide_dma_off_quietly(drive);
736 } 754 }
737 /* IORDY not supported */ 755 /* IORDY not supported */
@@ -739,31 +757,48 @@ fast_ata_pio:
739} 757}
740 758
741/* 759/*
742 * This is specific to the HPT366 UDMA bios chipset 760 * This is specific to the HPT366 UDMA chipset
743 * by HighPoint|Triones Technologies, Inc. 761 * by HighPoint|Triones Technologies, Inc.
744 */ 762 */
745static int hpt366_ide_dma_lostirq (ide_drive_t *drive) 763static int hpt366_ide_dma_lostirq(ide_drive_t *drive)
746{ 764{
747 struct pci_dev *dev = HWIF(drive)->pci_dev; 765 struct pci_dev *dev = HWIF(drive)->pci_dev;
748 u8 reg50h = 0, reg52h = 0, reg5ah = 0; 766 u8 mcr1 = 0, mcr3 = 0, scr1 = 0;
749 767
750 pci_read_config_byte(dev, 0x50, &reg50h); 768 pci_read_config_byte(dev, 0x50, &mcr1);
751 pci_read_config_byte(dev, 0x52, &reg52h); 769 pci_read_config_byte(dev, 0x52, &mcr3);
752 pci_read_config_byte(dev, 0x5a, &reg5ah); 770 pci_read_config_byte(dev, 0x5a, &scr1);
753 printk("%s: (%s) reg50h=0x%02x, reg52h=0x%02x, reg5ah=0x%02x\n", 771 printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
754 drive->name, __FUNCTION__, reg50h, reg52h, reg5ah); 772 drive->name, __FUNCTION__, mcr1, mcr3, scr1);
755 if (reg5ah & 0x10) 773 if (scr1 & 0x10)
756 pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10); 774 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
757 return __ide_dma_lostirq(drive); 775 return __ide_dma_lostirq(drive);
758} 776}
759 777
760static void hpt370_clear_engine (ide_drive_t *drive) 778static void hpt370_clear_engine(ide_drive_t *drive)
761{ 779{
762 u8 regstate = HWIF(drive)->channel ? 0x54 : 0x50; 780 ide_hwif_t *hwif = HWIF(drive);
763 pci_write_config_byte(HWIF(drive)->pci_dev, regstate, 0x37); 781
782 pci_write_config_byte(hwif->pci_dev, hwif->select_data, 0x37);
764 udelay(10); 783 udelay(10);
765} 784}
766 785
786static void hpt370_irq_timeout(ide_drive_t *drive)
787{
788 ide_hwif_t *hwif = HWIF(drive);
789 u16 bfifo = 0;
790 u8 dma_cmd;
791
792 pci_read_config_word(hwif->pci_dev, hwif->select_data + 2, &bfifo);
793 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
794
795 /* get DMA command mode */
796 dma_cmd = hwif->INB(hwif->dma_command);
797 /* stop DMA */
798 hwif->OUTB(dma_cmd & ~0x1, hwif->dma_command);
799 hpt370_clear_engine(drive);
800}
801
767static void hpt370_ide_dma_start(ide_drive_t *drive) 802static void hpt370_ide_dma_start(ide_drive_t *drive)
768{ 803{
769#ifdef HPT_RESET_STATE_ENGINE 804#ifdef HPT_RESET_STATE_ENGINE
@@ -772,64 +807,35 @@ static void hpt370_ide_dma_start(ide_drive_t *drive)
772 ide_dma_start(drive); 807 ide_dma_start(drive);
773} 808}
774 809
775static int hpt370_ide_dma_end (ide_drive_t *drive) 810static int hpt370_ide_dma_end(ide_drive_t *drive)
776{ 811{
777 ide_hwif_t *hwif = HWIF(drive); 812 ide_hwif_t *hwif = HWIF(drive);
778 u8 dma_stat = hwif->INB(hwif->dma_status); 813 u8 dma_stat = hwif->INB(hwif->dma_status);
779 814
780 if (dma_stat & 0x01) { 815 if (dma_stat & 0x01) {
781 /* wait a little */ 816 /* wait a little */
782 udelay(20); 817 udelay(20);
783 dma_stat = hwif->INB(hwif->dma_status); 818 dma_stat = hwif->INB(hwif->dma_status);
819 if (dma_stat & 0x01)
820 hpt370_irq_timeout(drive);
784 } 821 }
785 if ((dma_stat & 0x01) != 0)
786 /* fallthrough */
787 (void) HWIF(drive)->ide_dma_timeout(drive);
788
789 return __ide_dma_end(drive); 822 return __ide_dma_end(drive);
790} 823}
791 824
792static void hpt370_lostirq_timeout (ide_drive_t *drive) 825static int hpt370_ide_dma_timeout(ide_drive_t *drive)
793{ 826{
794 ide_hwif_t *hwif = HWIF(drive); 827 hpt370_irq_timeout(drive);
795 u8 bfifo = 0, reginfo = hwif->channel ? 0x56 : 0x52;
796 u8 dma_stat = 0, dma_cmd = 0;
797
798 pci_read_config_byte(HWIF(drive)->pci_dev, reginfo, &bfifo);
799 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo);
800 hpt370_clear_engine(drive);
801 /* get dma command mode */
802 dma_cmd = hwif->INB(hwif->dma_command);
803 /* stop dma */
804 hwif->OUTB(dma_cmd & ~0x1, hwif->dma_command);
805 dma_stat = hwif->INB(hwif->dma_status);
806 /* clear errors */
807 hwif->OUTB(dma_stat | 0x6, hwif->dma_status);
808}
809
810static int hpt370_ide_dma_timeout (ide_drive_t *drive)
811{
812 hpt370_lostirq_timeout(drive);
813 hpt370_clear_engine(drive);
814 return __ide_dma_timeout(drive); 828 return __ide_dma_timeout(drive);
815} 829}
816 830
817static int hpt370_ide_dma_lostirq (ide_drive_t *drive)
818{
819 hpt370_lostirq_timeout(drive);
820 hpt370_clear_engine(drive);
821 return __ide_dma_lostirq(drive);
822}
823
824/* returns 1 if DMA IRQ issued, 0 otherwise */ 831/* returns 1 if DMA IRQ issued, 0 otherwise */
825static int hpt374_ide_dma_test_irq(ide_drive_t *drive) 832static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
826{ 833{
827 ide_hwif_t *hwif = HWIF(drive); 834 ide_hwif_t *hwif = HWIF(drive);
828 u16 bfifo = 0; 835 u16 bfifo = 0;
829 u8 reginfo = hwif->channel ? 0x56 : 0x52; 836 u8 dma_stat;
830 u8 dma_stat;
831 837
832 pci_read_config_word(hwif->pci_dev, reginfo, &bfifo); 838 pci_read_config_word(hwif->pci_dev, hwif->select_data + 2, &bfifo);
833 if (bfifo & 0x1FF) { 839 if (bfifo & 0x1FF) {
834// printk("%s: %d bytes in FIFO\n", drive->name, bfifo); 840// printk("%s: %d bytes in FIFO\n", drive->name, bfifo);
835 return 0; 841 return 0;
@@ -837,7 +843,7 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
837 843
838 dma_stat = hwif->INB(hwif->dma_status); 844 dma_stat = hwif->INB(hwif->dma_status);
839 /* return 1 if INTR asserted */ 845 /* return 1 if INTR asserted */
840 if ((dma_stat & 4) == 4) 846 if (dma_stat & 4)
841 return 1; 847 return 1;
842 848
843 if (!drive->waiting_for_dma) 849 if (!drive->waiting_for_dma)
@@ -846,17 +852,17 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
846 return 0; 852 return 0;
847} 853}
848 854
849static int hpt374_ide_dma_end (ide_drive_t *drive) 855static int hpt374_ide_dma_end(ide_drive_t *drive)
850{ 856{
851 struct pci_dev *dev = HWIF(drive)->pci_dev;
852 ide_hwif_t *hwif = HWIF(drive); 857 ide_hwif_t *hwif = HWIF(drive);
853 u8 msc_stat = 0, mscreg = hwif->channel ? 0x54 : 0x50; 858 struct pci_dev *dev = hwif->pci_dev;
854 u8 bwsr_stat = 0, bwsr_mask = hwif->channel ? 0x02 : 0x01; 859 u8 mcr = 0, mcr_addr = hwif->select_data;
855 860 u8 bwsr = 0, mask = hwif->channel ? 0x02 : 0x01;
856 pci_read_config_byte(dev, 0x6a, &bwsr_stat); 861
857 pci_read_config_byte(dev, mscreg, &msc_stat); 862 pci_read_config_byte(dev, 0x6a, &bwsr);
858 if ((bwsr_stat & bwsr_mask) == bwsr_mask) 863 pci_read_config_byte(dev, mcr_addr, &mcr);
859 pci_write_config_byte(dev, mscreg, msc_stat|0x30); 864 if (bwsr & mask)
865 pci_write_config_byte(dev, mcr_addr, mcr | 0x30);
860 return __ide_dma_end(drive); 866 return __ide_dma_end(drive);
861} 867}
862 868
@@ -866,40 +872,37 @@ static int hpt374_ide_dma_end (ide_drive_t *drive)
866 * @mode: clocking mode (0x21 for write, 0x23 otherwise) 872 * @mode: clocking mode (0x21 for write, 0x23 otherwise)
867 * 873 *
868 * Switch the DPLL clock on the HPT3xxN devices. This is a right mess. 874 * Switch the DPLL clock on the HPT3xxN devices. This is a right mess.
869 * NOTE: avoid touching the disabled primary channel on HPT371N -- it
870 * doesn't physically exist anyway...
871 */ 875 */
872 876
873static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode) 877static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
874{ 878{
875 u8 mcr1, scr2 = hwif->INB(hwif->dma_master + 0x7b); 879 u8 scr2 = hwif->INB(hwif->dma_master + 0x7b);
876 880
877 if ((scr2 & 0x7f) == mode) 881 if ((scr2 & 0x7f) == mode)
878 return; 882 return;
879 883
880 /* MISC. control register 1 has the channel enable bit... */
881 mcr1 = hwif->INB(hwif->dma_master + 0x70);
882
883 /* Tristate the bus */ 884 /* Tristate the bus */
884 if (mcr1 & 0x04) 885 hwif->OUTB(0x80, hwif->dma_master + 0x73);
885 hwif->OUTB(0x80, hwif->dma_master + 0x73);
886 hwif->OUTB(0x80, hwif->dma_master + 0x77); 886 hwif->OUTB(0x80, hwif->dma_master + 0x77);
887 887
888 /* Switch clock and reset channels */ 888 /* Switch clock and reset channels */
889 hwif->OUTB(mode, hwif->dma_master + 0x7b); 889 hwif->OUTB(mode, hwif->dma_master + 0x7b);
890 hwif->OUTB(0xc0, hwif->dma_master + 0x79); 890 hwif->OUTB(0xc0, hwif->dma_master + 0x79);
891 891
892 /* Reset state machines */ 892 /*
893 if (mcr1 & 0x04) 893 * Reset the state machines.
894 hwif->OUTB(0x37, hwif->dma_master + 0x70); 894 * NOTE: avoid accidentally enabling the disabled channels.
895 hwif->OUTB(0x37, hwif->dma_master + 0x74); 895 */
896 hwif->OUTB(hwif->INB(hwif->dma_master + 0x70) | 0x32,
897 hwif->dma_master + 0x70);
898 hwif->OUTB(hwif->INB(hwif->dma_master + 0x74) | 0x32,
899 hwif->dma_master + 0x74);
896 900
897 /* Complete reset */ 901 /* Complete reset */
898 hwif->OUTB(0x00, hwif->dma_master + 0x79); 902 hwif->OUTB(0x00, hwif->dma_master + 0x79);
899 903
900 /* Reconnect channels to bus */ 904 /* Reconnect channels to bus */
901 if (mcr1 & 0x04) 905 hwif->OUTB(0x00, hwif->dma_master + 0x73);
902 hwif->OUTB(0x00, hwif->dma_master + 0x73);
903 hwif->OUTB(0x00, hwif->dma_master + 0x77); 906 hwif->OUTB(0x00, hwif->dma_master + 0x77);
904} 907}
905 908
@@ -914,14 +917,12 @@ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
914 917
915static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq) 918static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
916{ 919{
917 ide_hwif_t *hwif = HWIF(drive); 920 hpt3xxn_set_clock(HWIF(drive), rq_data_dir(rq) ? 0x23 : 0x21);
918 u8 wantclock = rq_data_dir(rq) ? 0x23 : 0x21;
919
920 hpt3xxn_set_clock(hwif, wantclock);
921} 921}
922 922
923/* 923/*
924 * Set/get power state for a drive. 924 * Set/get power state for a drive.
925 * NOTE: affects both drives on each channel.
925 * 926 *
926 * When we turn the power back on, we need to re-initialize things. 927 * When we turn the power back on, we need to re-initialize things.
927 */ 928 */
@@ -929,26 +930,18 @@ static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
929 930
930static int hpt3xx_busproc(ide_drive_t *drive, int state) 931static int hpt3xx_busproc(ide_drive_t *drive, int state)
931{ 932{
932 ide_hwif_t *hwif = drive->hwif; 933 ide_hwif_t *hwif = HWIF(drive);
933 struct pci_dev *dev = hwif->pci_dev; 934 struct pci_dev *dev = hwif->pci_dev;
934 u8 tristate, resetmask, bus_reg = 0; 935 u8 mcr_addr = hwif->select_data + 2;
935 u16 tri_reg = 0; 936 u8 resetmask = hwif->channel ? 0x80 : 0x40;
937 u8 bsr2 = 0;
938 u16 mcr = 0;
936 939
937 hwif->bus_state = state; 940 hwif->bus_state = state;
938 941
939 if (hwif->channel) {
940 /* secondary channel */
941 tristate = 0x56;
942 resetmask = 0x80;
943 } else {
944 /* primary channel */
945 tristate = 0x52;
946 resetmask = 0x40;
947 }
948
949 /* Grab the status. */ 942 /* Grab the status. */
950 pci_read_config_word(dev, tristate, &tri_reg); 943 pci_read_config_word(dev, mcr_addr, &mcr);
951 pci_read_config_byte(dev, 0x59, &bus_reg); 944 pci_read_config_byte(dev, 0x59, &bsr2);
952 945
953 /* 946 /*
954 * Set the state. We don't set it if we don't need to do so. 947 * Set the state. We don't set it if we don't need to do so.
@@ -956,22 +949,22 @@ static int hpt3xx_busproc(ide_drive_t *drive, int state)
956 */ 949 */
957 switch (state) { 950 switch (state) {
958 case BUSSTATE_ON: 951 case BUSSTATE_ON:
959 if (!(bus_reg & resetmask)) 952 if (!(bsr2 & resetmask))
960 return 0; 953 return 0;
961 hwif->drives[0].failures = hwif->drives[1].failures = 0; 954 hwif->drives[0].failures = hwif->drives[1].failures = 0;
962 955
963 pci_write_config_byte(dev, 0x59, bus_reg & ~resetmask); 956 pci_write_config_byte(dev, 0x59, bsr2 & ~resetmask);
964 pci_write_config_word(dev, tristate, tri_reg & ~TRISTATE_BIT); 957 pci_write_config_word(dev, mcr_addr, mcr & ~TRISTATE_BIT);
965 return 0; 958 return 0;
966 case BUSSTATE_OFF: 959 case BUSSTATE_OFF:
967 if ((bus_reg & resetmask) && !(tri_reg & TRISTATE_BIT)) 960 if ((bsr2 & resetmask) && !(mcr & TRISTATE_BIT))
968 return 0; 961 return 0;
969 tri_reg &= ~TRISTATE_BIT; 962 mcr &= ~TRISTATE_BIT;
970 break; 963 break;
971 case BUSSTATE_TRISTATE: 964 case BUSSTATE_TRISTATE:
972 if ((bus_reg & resetmask) && (tri_reg & TRISTATE_BIT)) 965 if ((bsr2 & resetmask) && (mcr & TRISTATE_BIT))
973 return 0; 966 return 0;
974 tri_reg |= TRISTATE_BIT; 967 mcr |= TRISTATE_BIT;
975 break; 968 break;
976 default: 969 default:
977 return -EINVAL; 970 return -EINVAL;
@@ -980,268 +973,320 @@ static int hpt3xx_busproc(ide_drive_t *drive, int state)
980 hwif->drives[0].failures = hwif->drives[0].max_failures + 1; 973 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
981 hwif->drives[1].failures = hwif->drives[1].max_failures + 1; 974 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
982 975
983 pci_write_config_word(dev, tristate, tri_reg); 976 pci_write_config_word(dev, mcr_addr, mcr);
984 pci_write_config_byte(dev, 0x59, bus_reg | resetmask); 977 pci_write_config_byte(dev, 0x59, bsr2 | resetmask);
985 return 0; 978 return 0;
986} 979}
987 980
988static void __devinit hpt366_clocking(ide_hwif_t *hwif) 981/**
982 * hpt37x_calibrate_dpll - calibrate the DPLL
983 * @dev: PCI device
984 *
985 * Perform a calibration cycle on the DPLL.
986 * Returns 1 if this succeeds
987 */
988static int __devinit hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f_high)
989{ 989{
990 u32 reg1 = 0; 990 u32 dpll = (f_high << 16) | f_low | 0x100;
991 struct hpt_info *info = ide_get_hwifdata(hwif); 991 u8 scr2;
992 int i;
992 993
993 pci_read_config_dword(hwif->pci_dev, 0x40, &reg1); 994 pci_write_config_dword(dev, 0x5c, dpll);
994 995
995 /* detect bus speed by looking at control reg timing: */ 996 /* Wait for oscillator ready */
996 switch((reg1 >> 8) & 7) { 997 for(i = 0; i < 0x5000; ++i) {
997 case 5: 998 udelay(50);
998 info->speed = forty_base_hpt36x; 999 pci_read_config_byte(dev, 0x5b, &scr2);
999 break; 1000 if (scr2 & 0x80)
1000 case 9:
1001 info->speed = twenty_five_base_hpt36x;
1002 break;
1003 case 7:
1004 default:
1005 info->speed = thirty_three_base_hpt36x;
1006 break; 1001 break;
1007 } 1002 }
1003 /* See if it stays ready (we'll just bail out if it's not yet) */
1004 for(i = 0; i < 0x1000; ++i) {
1005 pci_read_config_byte(dev, 0x5b, &scr2);
1006 /* DPLL destabilized? */
1007 if(!(scr2 & 0x80))
1008 return 0;
1009 }
1010 /* Turn off tuning, we have the DPLL set */
1011 pci_read_config_dword (dev, 0x5c, &dpll);
1012 pci_write_config_dword(dev, 0x5c, (dpll & ~0x100));
1013 return 1;
1008} 1014}
1009 1015
1010static void __devinit hpt37x_clocking(ide_hwif_t *hwif) 1016static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const char *name)
1011{ 1017{
1012 struct hpt_info *info = ide_get_hwifdata(hwif); 1018 struct hpt_info *info = kmalloc(sizeof(struct hpt_info), GFP_KERNEL);
1013 struct pci_dev *dev = hwif->pci_dev; 1019 unsigned long io_base = pci_resource_start(dev, 4);
1014 int adjust, i; 1020 u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */
1015 u16 freq = 0; 1021 enum ata_clock clock;
1016 u32 pll, temp = 0; 1022
1017 u8 reg5bh = 0, mcr1 = 0; 1023 if (info == NULL) {
1018 1024 printk(KERN_ERR "%s: out of memory!\n", name);
1025 return -ENOMEM;
1026 }
1027
1019 /* 1028 /*
1020 * default to pci clock. make sure MA15/16 are set to output 1029 * Copy everything from a static "template" structure
1021 * to prevent drives having problems with 40-pin cables. Needed 1030 * to just allocated per-chip hpt_info structure.
1022 * for some drives such as IBM-DTLA which will not enter ready
1023 * state on reset when PDIAG is a input.
1024 *
1025 * ToDo: should we set 0x21 when using PLL mode ?
1026 */ 1031 */
1027 pci_write_config_byte(dev, 0x5b, 0x23); 1032 *info = *(struct hpt_info *)pci_get_drvdata(dev);
1028 1033
1029 /* 1034 /*
1030 * We'll have to read f_CNT value in order to determine 1035 * FIXME: Not portable. Also, why do we enable the ROM in the first place?
1031 * the PCI clock frequency according to the following ratio: 1036 * We don't seem to be using it.
1032 *
1033 * f_CNT = Fpci * 192 / Fdpll
1034 *
1035 * First try reading the register in which the HighPoint BIOS
1036 * saves f_CNT value before reprogramming the DPLL from its
1037 * default setting (which differs for the various chips).
1038 * NOTE: This register is only accessible via I/O space.
1039 *
1040 * In case the signature check fails, we'll have to resort to
1041 * reading the f_CNT register itself in hopes that nobody has
1042 * touched the DPLL yet...
1043 */ 1037 */
1044 temp = inl(pci_resource_start(dev, 4) + 0x90); 1038 if (dev->resource[PCI_ROM_RESOURCE].start)
1045 if ((temp & 0xFFFFF000) != 0xABCDE000) { 1039 pci_write_config_dword(dev, PCI_ROM_ADDRESS,
1046 printk(KERN_WARNING "HPT37X: no clock data saved by BIOS\n"); 1040 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
1047 1041
1048 /* Calculate the average value of f_CNT */ 1042 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
1049 for (temp = i = 0; i < 128; i++) { 1043 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
1050 pci_read_config_word(dev, 0x78, &freq); 1044 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
1051 temp += freq & 0x1ff; 1045 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
1052 mdelay(1);
1053 }
1054 freq = temp / 128;
1055 } else
1056 freq = temp & 0x1ff;
1057 1046
1058 /* 1047 /*
1059 * HPT3xxN chips use different PCI clock information. 1048 * First, try to estimate the PCI clock frequency...
1060 * Currently we always set up the PLL for them.
1061 */ 1049 */
1050 if (info->chip_type >= HPT370) {
1051 u8 scr1 = 0;
1052 u16 f_cnt = 0;
1053 u32 temp = 0;
1062 1054
1063 if (info->flags & IS_3xxN) { 1055 /* Interrupt force enable. */
1064 if(freq < 0x55) 1056 pci_read_config_byte(dev, 0x5a, &scr1);
1065 pll = F_LOW_PCI_33; 1057 if (scr1 & 0x10)
1066 else if(freq < 0x70) 1058 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
1067 pll = F_LOW_PCI_40;
1068 else if(freq < 0x7F)
1069 pll = F_LOW_PCI_50;
1070 else
1071 pll = F_LOW_PCI_66;
1072 1059
1073 printk(KERN_INFO "HPT3xxN detected, FREQ: %d, PLL: %d\n", freq, pll); 1060 /*
1074 } 1061 * HighPoint does this for HPT372A.
1075 else 1062 * NOTE: This register is only writeable via I/O space.
1076 { 1063 */
1077 if(freq < 0x9C) 1064 if (info->chip_type == HPT372A)
1078 pll = F_LOW_PCI_33; 1065 outb(0x0e, io_base + 0x9c);
1079 else if(freq < 0xb0) 1066
1080 pll = F_LOW_PCI_40; 1067 /*
1081 else if(freq <0xc8) 1068 * Default to PCI clock. Make sure MA15/16 are set to output
1082 pll = F_LOW_PCI_50; 1069 * to prevent drives having problems with 40-pin cables.
1070 */
1071 pci_write_config_byte(dev, 0x5b, 0x23);
1072
1073 /*
1074 * We'll have to read f_CNT value in order to determine
1075 * the PCI clock frequency according to the following ratio:
1076 *
1077 * f_CNT = Fpci * 192 / Fdpll
1078 *
1079 * First try reading the register in which the HighPoint BIOS
1080 * saves f_CNT value before reprogramming the DPLL from its
1081 * default setting (which differs for the various chips).
1082 * NOTE: This register is only accessible via I/O space.
1083 *
1084 * In case the signature check fails, we'll have to resort to
1085 * reading the f_CNT register itself in hopes that nobody has
1086 * touched the DPLL yet...
1087 */
1088 temp = inl(io_base + 0x90);
1089 if ((temp & 0xFFFFF000) != 0xABCDE000) {
1090 int i;
1091
1092 printk(KERN_WARNING "%s: no clock data saved by BIOS\n",
1093 name);
1094
1095 /* Calculate the average value of f_CNT. */
1096 for (temp = i = 0; i < 128; i++) {
1097 pci_read_config_word(dev, 0x78, &f_cnt);
1098 temp += f_cnt & 0x1ff;
1099 mdelay(1);
1100 }
1101 f_cnt = temp / 128;
1102 } else
1103 f_cnt = temp & 0x1ff;
1104
1105 dpll_clk = info->dpll_clk;
1106 pci_clk = (f_cnt * dpll_clk) / 192;
1107
1108 /* Clamp PCI clock to bands. */
1109 if (pci_clk < 40)
1110 pci_clk = 33;
1111 else if(pci_clk < 45)
1112 pci_clk = 40;
1113 else if(pci_clk < 55)
1114 pci_clk = 50;
1083 else 1115 else
1084 pll = F_LOW_PCI_66; 1116 pci_clk = 66;
1085 1117
1086 if (pll == F_LOW_PCI_33) { 1118 printk(KERN_INFO "%s: DPLL base: %d MHz, f_CNT: %d, "
1087 info->speed = thirty_three_base_hpt37x; 1119 "assuming %d MHz PCI\n", name, dpll_clk, f_cnt, pci_clk);
1088 printk(KERN_DEBUG "HPT37X: using 33MHz PCI clock\n"); 1120 } else {
1089 } else if (pll == F_LOW_PCI_40) { 1121 u32 itr1 = 0;
1090 /* Unsupported */ 1122
1091 } else if (pll == F_LOW_PCI_50) { 1123 pci_read_config_dword(dev, 0x40, &itr1);
1092 info->speed = fifty_base_hpt37x; 1124
1093 printk(KERN_DEBUG "HPT37X: using 50MHz PCI clock\n"); 1125 /* Detect PCI clock by looking at cmd_high_time. */
1094 } else { 1126 switch((itr1 >> 8) & 0x07) {
1095 info->speed = sixty_six_base_hpt37x; 1127 case 0x09:
1096 printk(KERN_DEBUG "HPT37X: using 66MHz PCI clock\n"); 1128 pci_clk = 40;
1129 break;
1130 case 0x05:
1131 pci_clk = 25;
1132 break;
1133 case 0x07:
1134 default:
1135 pci_clk = 33;
1136 break;
1097 } 1137 }
1098 } 1138 }
1099 1139
1100 if (pll == F_LOW_PCI_66) 1140 /* Let's assume we'll use PCI clock for the ATA clock... */
1101 info->flags |= PCI_66MHZ; 1141 switch (pci_clk) {
1142 case 25:
1143 clock = ATA_CLOCK_25MHZ;
1144 break;
1145 case 33:
1146 default:
1147 clock = ATA_CLOCK_33MHZ;
1148 break;
1149 case 40:
1150 clock = ATA_CLOCK_40MHZ;
1151 break;
1152 case 50:
1153 clock = ATA_CLOCK_50MHZ;
1154 break;
1155 case 66:
1156 clock = ATA_CLOCK_66MHZ;
1157 break;
1158 }
1102 1159
1103 /* 1160 /*
1104 * only try the pll if we don't have a table for the clock 1161 * Only try the DPLL if we don't have a table for the PCI clock that
1105 * speed that we're running at. NOTE: the internal PLL will 1162 * we are running at for HPT370/A, always use it for anything newer...
1106 * result in slow reads when using a 33MHz PCI clock. we also
1107 * don't like to use the PLL because it will cause glitches
1108 * on PRST/SRST when the HPT state engine gets reset.
1109 * 1163 *
1110 * ToDo: Use 66MHz PLL when ATA133 devices are present on a 1164 * NOTE: Using the internal DPLL results in slow reads on 33 MHz PCI.
1111 * 372 device so we can get ATA133 support 1165 * We also don't like using the DPLL because this causes glitches
1166 * on PRST-/SRST- when the state engine gets reset...
1112 */ 1167 */
1113 if (info->speed) 1168 if (info->chip_type >= HPT374 || info->settings[clock] == NULL) {
1114 goto init_hpt37X_done; 1169 u16 f_low, delta = pci_clk < 50 ? 2 : 4;
1170 int adjust;
1171
1172 /*
1173 * Select 66 MHz DPLL clock only if UltraATA/133 mode is
1174 * supported/enabled, use 50 MHz DPLL clock otherwise...
1175 */
1176 if (info->max_mode == 0x04) {
1177 dpll_clk = 66;
1178 clock = ATA_CLOCK_66MHZ;
1179 } else if (dpll_clk) { /* HPT36x chips don't have DPLL */
1180 dpll_clk = 50;
1181 clock = ATA_CLOCK_50MHZ;
1182 }
1115 1183
1116 info->flags |= PLL_MODE; 1184 if (info->settings[clock] == NULL) {
1117 1185 printk(KERN_ERR "%s: unknown bus timing!\n", name);
1118 /* 1186 kfree(info);
1119 * Adjust the PLL based upon the PCI clock, enable it, and 1187 return -EIO;
1120 * wait for stabilization...
1121 */
1122 adjust = 0;
1123 freq = (pll < F_LOW_PCI_50) ? 2 : 4;
1124 while (adjust++ < 6) {
1125 pci_write_config_dword(dev, 0x5c, (freq + pll) << 16 |
1126 pll | 0x100);
1127
1128 /* wait for clock stabilization */
1129 for (i = 0; i < 0x50000; i++) {
1130 pci_read_config_byte(dev, 0x5b, &reg5bh);
1131 if (reg5bh & 0x80) {
1132 /* spin looking for the clock to destabilize */
1133 for (i = 0; i < 0x1000; ++i) {
1134 pci_read_config_byte(dev, 0x5b,
1135 &reg5bh);
1136 if ((reg5bh & 0x80) == 0)
1137 goto pll_recal;
1138 }
1139 pci_read_config_dword(dev, 0x5c, &pll);
1140 pci_write_config_dword(dev, 0x5c,
1141 pll & ~0x100);
1142 pci_write_config_byte(dev, 0x5b, 0x21);
1143
1144 info->speed = fifty_base_hpt37x;
1145 printk("HPT37X: using 50MHz internal PLL\n");
1146 goto init_hpt37X_done;
1147 }
1148 } 1188 }
1149pll_recal:
1150 if (adjust & 1)
1151 pll -= (adjust >> 1);
1152 else
1153 pll += (adjust >> 1);
1154 }
1155 1189
1156init_hpt37X_done: 1190 /* Select the DPLL clock. */
1157 if (!info->speed) 1191 pci_write_config_byte(dev, 0x5b, 0x21);
1158 printk(KERN_ERR "HPT37x%s: unknown bus timing [%d %d].\n",
1159 (info->flags & IS_3xxN) ? "N" : "", pll, freq);
1160 /*
1161 * Reset the state engines.
1162 * NOTE: avoid accidentally enabling the primary channel on HPT371N.
1163 */
1164 pci_read_config_byte(dev, 0x50, &mcr1);
1165 if (mcr1 & 0x04)
1166 pci_write_config_byte(dev, 0x50, 0x37);
1167 pci_write_config_byte(dev, 0x54, 0x37);
1168 udelay(100);
1169}
1170 1192
1171static int __devinit init_hpt37x(struct pci_dev *dev) 1193 /*
1172{ 1194 * Adjust the DPLL based upon PCI clock, enable it,
1173 u8 reg5ah; 1195 * and wait for stabilization...
1196 */
1197 f_low = (pci_clk * 48) / dpll_clk;
1174 1198
1175 pci_read_config_byte(dev, 0x5a, &reg5ah); 1199 for (adjust = 0; adjust < 8; adjust++) {
1176 /* interrupt force enable */ 1200 if(hpt37x_calibrate_dpll(dev, f_low, f_low + delta))
1177 pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10)); 1201 break;
1178 return 0;
1179}
1180 1202
1181static int __devinit init_hpt366(struct pci_dev *dev) 1203 /*
1182{ 1204 * See if it'll settle at a fractionally different clock
1183 u32 reg1 = 0; 1205 */
1184 u8 drive_fast = 0; 1206 if (adjust & 1)
1207 f_low -= adjust >> 1;
1208 else
1209 f_low += adjust >> 1;
1210 }
1211 if (adjust == 8) {
1212 printk(KERN_ERR "%s: DPLL did not stabilize!\n", name);
1213 kfree(info);
1214 return -EIO;
1215 }
1185 1216
1186 /* 1217 printk("%s: using %d MHz DPLL clock\n", name, dpll_clk);
1187 * Disable the "fast interrupt" prediction. 1218 } else {
1188 */ 1219 /* Mark the fact that we're not using the DPLL. */
1189 pci_read_config_byte(dev, 0x51, &drive_fast); 1220 dpll_clk = 0;
1190 if (drive_fast & 0x80)
1191 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
1192 pci_read_config_dword(dev, 0x40, &reg1);
1193
1194 return 0;
1195}
1196 1221
1197static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const char *name) 1222 printk("%s: using %d MHz PCI clock\n", name, pci_clk);
1198{ 1223 }
1199 int ret = 0;
1200 1224
1201 /* 1225 /*
1202 * FIXME: Not portable. Also, why do we enable the ROM in the first place? 1226 * Advance the table pointer to a slot which points to the list
1203 * We don't seem to be using it. 1227 * of the register values settings matching the clock being used.
1204 */ 1228 */
1205 if (dev->resource[PCI_ROM_RESOURCE].start) 1229 info->settings += clock;
1206 pci_write_config_dword(dev, PCI_ROM_ADDRESS,
1207 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
1208 1230
1209 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); 1231 /* Store the clock frequencies. */
1210 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); 1232 info->dpll_clk = dpll_clk;
1211 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); 1233 info->pci_clk = pci_clk;
1212 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
1213 1234
1214 if (hpt_revision(dev) >= 3) 1235 /* Point to this chip's own instance of the hpt_info structure. */
1215 ret = init_hpt37x(dev); 1236 pci_set_drvdata(dev, info);
1216 else
1217 ret = init_hpt366(dev);
1218 1237
1219 if (ret) 1238 if (info->chip_type >= HPT370) {
1220 return ret; 1239 u8 mcr1, mcr4;
1240
1241 /*
1242 * Reset the state engines.
1243 * NOTE: Avoid accidentally enabling the disabled channels.
1244 */
1245 pci_read_config_byte (dev, 0x50, &mcr1);
1246 pci_read_config_byte (dev, 0x54, &mcr4);
1247 pci_write_config_byte(dev, 0x50, (mcr1 | 0x32));
1248 pci_write_config_byte(dev, 0x54, (mcr4 | 0x32));
1249 udelay(100);
1250 }
1251
1252 /*
1253 * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
1254 * the MISC. register to stretch the UltraDMA Tss timing.
1255 * NOTE: This register is only writeable via I/O space.
1256 */
1257 if (info->chip_type == HPT371N && clock == ATA_CLOCK_66MHZ)
1258
1259 outb(inb(io_base + 0x9c) | 0x04, io_base + 0x9c);
1221 1260
1222 return dev->irq; 1261 return dev->irq;
1223} 1262}
1224 1263
1225static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) 1264static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1226{ 1265{
1227 struct pci_dev *dev = hwif->pci_dev; 1266 struct pci_dev *dev = hwif->pci_dev;
1228 struct hpt_info *info = ide_get_hwifdata(hwif); 1267 struct hpt_info *info = pci_get_drvdata(dev);
1229 u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
1230 int serialize = HPT_SERIALIZE_IO; 1268 int serialize = HPT_SERIALIZE_IO;
1231 1269 u8 scr1 = 0, ata66 = (hwif->channel) ? 0x01 : 0x02;
1270 u8 chip_type = info->chip_type;
1271 u8 new_mcr, old_mcr = 0;
1272
1273 /* Cache the channel's MISC. control registers' offset */
1274 hwif->select_data = hwif->channel ? 0x54 : 0x50;
1275
1232 hwif->tuneproc = &hpt3xx_tune_drive; 1276 hwif->tuneproc = &hpt3xx_tune_drive;
1233 hwif->speedproc = &hpt3xx_tune_chipset; 1277 hwif->speedproc = &hpt3xx_tune_chipset;
1234 hwif->quirkproc = &hpt3xx_quirkproc; 1278 hwif->quirkproc = &hpt3xx_quirkproc;
1235 hwif->intrproc = &hpt3xx_intrproc; 1279 hwif->intrproc = &hpt3xx_intrproc;
1236 hwif->maskproc = &hpt3xx_maskproc; 1280 hwif->maskproc = &hpt3xx_maskproc;
1237 1281 hwif->busproc = &hpt3xx_busproc;
1282
1238 /* 1283 /*
1239 * HPT3xxN chips have some complications: 1284 * HPT3xxN chips have some complications:
1240 * 1285 *
1241 * - on 33 MHz PCI we must clock switch 1286 * - on 33 MHz PCI we must clock switch
1242 * - on 66 MHz PCI we must NOT use the PCI clock 1287 * - on 66 MHz PCI we must NOT use the PCI clock
1243 */ 1288 */
1244 if ((info->flags & (IS_3xxN | PCI_66MHZ)) == IS_3xxN) { 1289 if (chip_type >= HPT372N && info->dpll_clk && info->pci_clk < 66) {
1245 /* 1290 /*
1246 * Clock is shared between the channels, 1291 * Clock is shared between the channels,
1247 * so we'll have to serialize them... :-( 1292 * so we'll have to serialize them... :-(
@@ -1250,200 +1295,171 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1250 hwif->rw_disk = &hpt3xxn_rw_disk; 1295 hwif->rw_disk = &hpt3xxn_rw_disk;
1251 } 1296 }
1252 1297
1298 /* Serialize access to this device if needed */
1299 if (serialize && hwif->mate)
1300 hwif->serialized = hwif->mate->serialized = 1;
1301
1302 /*
1303 * Disable the "fast interrupt" prediction. Don't hold off
1304 * on interrupts. (== 0x01 despite what the docs say)
1305 */
1306 pci_read_config_byte(dev, hwif->select_data + 1, &old_mcr);
1307
1308 if (info->chip_type >= HPT374)
1309 new_mcr = old_mcr & ~0x07;
1310 else if (info->chip_type >= HPT370) {
1311 new_mcr = old_mcr;
1312 new_mcr &= ~0x02;
1313
1314#ifdef HPT_DELAY_INTERRUPT
1315 new_mcr &= ~0x01;
1316#else
1317 new_mcr |= 0x01;
1318#endif
1319 } else /* HPT366 and HPT368 */
1320 new_mcr = old_mcr & ~0x80;
1321
1322 if (new_mcr != old_mcr)
1323 pci_write_config_byte(dev, hwif->select_data + 1, new_mcr);
1324
1325 if (!hwif->dma_base) {
1326 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
1327 return;
1328 }
1329
1330 hwif->ultra_mask = 0x7f;
1331 hwif->mwdma_mask = 0x07;
1332
1253 /* 1333 /*
1254 * The HPT37x uses the CBLID pins as outputs for MA15/MA16 1334 * The HPT37x uses the CBLID pins as outputs for MA15/MA16
1255 * address lines to access an external eeprom. To read valid 1335 * address lines to access an external EEPROM. To read valid
1256 * cable detect state the pins must be enabled as inputs. 1336 * cable detect state the pins must be enabled as inputs.
1257 */ 1337 */
1258 if (info->revision >= 8 && (PCI_FUNC(dev->devfn) & 1)) { 1338 if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) {
1259 /* 1339 /*
1260 * HPT374 PCI function 1 1340 * HPT374 PCI function 1
1261 * - set bit 15 of reg 0x52 to enable TCBLID as input 1341 * - set bit 15 of reg 0x52 to enable TCBLID as input
1262 * - set bit 15 of reg 0x56 to enable FCBLID as input 1342 * - set bit 15 of reg 0x56 to enable FCBLID as input
1263 */ 1343 */
1264 u16 mcr3, mcr6; 1344 u8 mcr_addr = hwif->select_data + 2;
1265 pci_read_config_word(dev, 0x52, &mcr3); 1345 u16 mcr;
1266 pci_read_config_word(dev, 0x56, &mcr6); 1346
1267 pci_write_config_word(dev, 0x52, mcr3 | 0x8000); 1347 pci_read_config_word (dev, mcr_addr, &mcr);
1268 pci_write_config_word(dev, 0x56, mcr6 | 0x8000); 1348 pci_write_config_word(dev, mcr_addr, (mcr | 0x8000));
1269 /* now read cable id register */ 1349 /* now read cable id register */
1270 pci_read_config_byte(dev, 0x5a, &ata66); 1350 pci_read_config_byte (dev, 0x5a, &scr1);
1271 pci_write_config_word(dev, 0x52, mcr3); 1351 pci_write_config_word(dev, mcr_addr, mcr);
1272 pci_write_config_word(dev, 0x56, mcr6); 1352 } else if (chip_type >= HPT370) {
1273 } else if (info->revision >= 3) {
1274 /* 1353 /*
1275 * HPT370/372 and 374 pcifn 0 1354 * HPT370/372 and 374 pcifn 0
1276 * - clear bit 0 of 0x5b to enable P/SCBLID as inputs 1355 * - clear bit 0 of reg 0x5b to enable P/SCBLID as inputs
1277 */ 1356 */
1278 u8 scr2; 1357 u8 scr2 = 0;
1279 pci_read_config_byte(dev, 0x5b, &scr2);
1280 pci_write_config_byte(dev, 0x5b, scr2 & ~1);
1281 /* now read cable id register */
1282 pci_read_config_byte(dev, 0x5a, &ata66);
1283 pci_write_config_byte(dev, 0x5b, scr2);
1284 } else {
1285 pci_read_config_byte(dev, 0x5a, &ata66);
1286 }
1287 1358
1288#ifdef DEBUG 1359 pci_read_config_byte (dev, 0x5b, &scr2);
1289 printk("HPT366: reg5ah=0x%02x ATA-%s Cable Port%d\n", 1360 pci_write_config_byte(dev, 0x5b, (scr2 & ~1));
1290 ata66, (ata66 & regmask) ? "33" : "66", 1361 /* now read cable id register */
1291 PCI_FUNC(hwif->pci_dev->devfn)); 1362 pci_read_config_byte (dev, 0x5a, &scr1);
1292#endif /* DEBUG */ 1363 pci_write_config_byte(dev, 0x5b, scr2);
1293 1364 } else
1294 /* Serialize access to this device */ 1365 pci_read_config_byte (dev, 0x5a, &scr1);
1295 if (serialize && hwif->mate)
1296 hwif->serialized = hwif->mate->serialized = 1;
1297 1366
1298 /* 1367 if (!hwif->udma_four)
1299 * Set up ioctl for power status. 1368 hwif->udma_four = (scr1 & ata66) ? 0 : 1;
1300 * NOTE: power affects both drives on each channel.
1301 */
1302 hwif->busproc = &hpt3xx_busproc;
1303 1369
1304 if (!hwif->dma_base) { 1370 hwif->ide_dma_check = &hpt366_config_drive_xfer_rate;
1305 hwif->drives[0].autotune = 1;
1306 hwif->drives[1].autotune = 1;
1307 return;
1308 }
1309 1371
1310 hwif->ultra_mask = 0x7f; 1372 if (chip_type >= HPT374) {
1311 hwif->mwdma_mask = 0x07; 1373 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1312 1374 hwif->ide_dma_end = &hpt374_ide_dma_end;
1313 if (!(hwif->udma_four)) 1375 } else if (chip_type >= HPT370) {
1314 hwif->udma_four = ((ata66 & regmask) ? 0 : 1); 1376 hwif->dma_start = &hpt370_ide_dma_start;
1315 hwif->ide_dma_check = &hpt366_config_drive_xfer_rate; 1377 hwif->ide_dma_end = &hpt370_ide_dma_end;
1316 1378 hwif->ide_dma_timeout = &hpt370_ide_dma_timeout;
1317 if (info->revision >= 8) { 1379 } else
1318 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq; 1380 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1319 hwif->ide_dma_end = &hpt374_ide_dma_end;
1320 } else if (info->revision >= 5) {
1321 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1322 hwif->ide_dma_end = &hpt374_ide_dma_end;
1323 } else if (info->revision >= 3) {
1324 hwif->dma_start = &hpt370_ide_dma_start;
1325 hwif->ide_dma_end = &hpt370_ide_dma_end;
1326 hwif->ide_dma_timeout = &hpt370_ide_dma_timeout;
1327 hwif->ide_dma_lostirq = &hpt370_ide_dma_lostirq;
1328 } else if (info->revision >= 2)
1329 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1330 else
1331 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1332 1381
1333 if (!noautodma) 1382 if (!noautodma)
1334 hwif->autodma = 1; 1383 hwif->autodma = 1;
1335 hwif->drives[0].autodma = hwif->autodma; 1384 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
1336 hwif->drives[1].autodma = hwif->autodma;
1337} 1385}
1338 1386
1339static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase) 1387static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1340{ 1388{
1341 struct hpt_info *info = ide_get_hwifdata(hwif); 1389 struct pci_dev *dev = hwif->pci_dev;
1342 u8 masterdma = 0, slavedma = 0; 1390 u8 masterdma = 0, slavedma = 0;
1343 u8 dma_new = 0, dma_old = 0; 1391 u8 dma_new = 0, dma_old = 0;
1344 u8 primary = hwif->channel ? 0x4b : 0x43;
1345 u8 secondary = hwif->channel ? 0x4f : 0x47;
1346 unsigned long flags; 1392 unsigned long flags;
1347 1393
1348 if (!dmabase) 1394 if (!dmabase)
1349 return; 1395 return;
1350 1396
1351 if(info->speed == NULL) { 1397 dma_old = hwif->INB(dmabase + 2);
1352 printk(KERN_WARNING "hpt366: no known IDE timings, disabling DMA.\n");
1353 return;
1354 }
1355
1356 dma_old = hwif->INB(dmabase+2);
1357 1398
1358 local_irq_save(flags); 1399 local_irq_save(flags);
1359 1400
1360 dma_new = dma_old; 1401 dma_new = dma_old;
1361 pci_read_config_byte(hwif->pci_dev, primary, &masterdma); 1402 pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
1362 pci_read_config_byte(hwif->pci_dev, secondary, &slavedma); 1403 pci_read_config_byte(dev, hwif->channel ? 0x4f : 0x47, &slavedma);
1363 1404
1364 if (masterdma & 0x30) dma_new |= 0x20; 1405 if (masterdma & 0x30) dma_new |= 0x20;
1365 if (slavedma & 0x30) dma_new |= 0x40; 1406 if ( slavedma & 0x30) dma_new |= 0x40;
1366 if (dma_new != dma_old) 1407 if (dma_new != dma_old)
1367 hwif->OUTB(dma_new, dmabase+2); 1408 hwif->OUTB(dma_new, dmabase + 2);
1368 1409
1369 local_irq_restore(flags); 1410 local_irq_restore(flags);
1370 1411
1371 ide_setup_dma(hwif, dmabase, 8); 1412 ide_setup_dma(hwif, dmabase, 8);
1372} 1413}
1373 1414
1374/*
1375 * We "borrow" this hook in order to set the data structures
1376 * up early enough before dma or init_hwif calls are made.
1377 */
1378
1379static void __devinit init_iops_hpt366(ide_hwif_t *hwif)
1380{
1381 struct hpt_info *info = kzalloc(sizeof(struct hpt_info), GFP_KERNEL);
1382 struct pci_dev *dev = hwif->pci_dev;
1383 u16 did = dev->device;
1384 u8 rid = 0;
1385
1386 if(info == NULL) {
1387 printk(KERN_WARNING "hpt366: out of memory.\n");
1388 return;
1389 }
1390 ide_set_hwifdata(hwif, info);
1391
1392 /* Avoid doing the same thing twice. */
1393 if (hwif->channel && hwif->mate) {
1394 memcpy(info, ide_get_hwifdata(hwif->mate), sizeof(struct hpt_info));
1395 return;
1396 }
1397
1398 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rid);
1399
1400 if (( did == PCI_DEVICE_ID_TTI_HPT366 && rid == 6) ||
1401 ((did == PCI_DEVICE_ID_TTI_HPT372 ||
1402 did == PCI_DEVICE_ID_TTI_HPT302 ||
1403 did == PCI_DEVICE_ID_TTI_HPT371) && rid > 1) ||
1404 did == PCI_DEVICE_ID_TTI_HPT372N)
1405 info->flags |= IS_3xxN;
1406
1407 info->revision = hpt_revision(dev);
1408
1409 if (info->revision >= 3)
1410 hpt37x_clocking(hwif);
1411 else
1412 hpt366_clocking(hwif);
1413}
1414
1415static int __devinit init_setup_hpt374(struct pci_dev *dev, ide_pci_device_t *d) 1415static int __devinit init_setup_hpt374(struct pci_dev *dev, ide_pci_device_t *d)
1416{ 1416{
1417 struct pci_dev *findev = NULL; 1417 struct pci_dev *dev2;
1418 1418
1419 if (PCI_FUNC(dev->devfn) & 1) 1419 if (PCI_FUNC(dev->devfn) & 1)
1420 return -ENODEV; 1420 return -ENODEV;
1421 1421
1422 while ((findev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, findev)) != NULL) { 1422 pci_set_drvdata(dev, &hpt374);
1423 if ((findev->vendor == dev->vendor) && 1423
1424 (findev->device == dev->device) && 1424 if ((dev2 = pci_get_slot(dev->bus, dev->devfn + 1)) != NULL) {
1425 ((findev->devfn - dev->devfn) == 1) && 1425 int ret;
1426 (PCI_FUNC(findev->devfn) & 1)) { 1426
1427 if (findev->irq != dev->irq) { 1427 pci_set_drvdata(dev2, &hpt374);
1428 /* FIXME: we need a core pci_set_interrupt() */ 1428
1429 findev->irq = dev->irq; 1429 if (dev2->irq != dev->irq) {
1430 printk(KERN_WARNING "%s: pci-config space interrupt " 1430 /* FIXME: we need a core pci_set_interrupt() */
1431 "fixed.\n", d->name); 1431 dev2->irq = dev->irq;
1432 } 1432 printk(KERN_WARNING "%s: PCI config space interrupt "
1433 return ide_setup_pci_devices(dev, findev, d); 1433 "fixed.\n", d->name);
1434 } 1434 }
1435 ret = ide_setup_pci_devices(dev, dev2, d);
1436 if (ret < 0)
1437 pci_dev_put(dev2);
1438 return ret;
1435 } 1439 }
1436 return ide_setup_pci_device(dev, d); 1440 return ide_setup_pci_device(dev, d);
1437} 1441}
1438 1442
1439static int __devinit init_setup_hpt37x(struct pci_dev *dev, ide_pci_device_t *d) 1443static int __devinit init_setup_hpt372n(struct pci_dev *dev, ide_pci_device_t *d)
1440{ 1444{
1445 pci_set_drvdata(dev, &hpt372n);
1446
1441 return ide_setup_pci_device(dev, d); 1447 return ide_setup_pci_device(dev, d);
1442} 1448}
1443 1449
1444static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d) 1450static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
1445{ 1451{
1446 u8 mcr1 = 0; 1452 struct hpt_info *info;
1453 u8 rev = 0, mcr1 = 0;
1454
1455 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1456
1457 if (rev > 1) {
1458 d->name = "HPT371N";
1459
1460 info = &hpt371n;
1461 } else
1462 info = &hpt371;
1447 1463
1448 /* 1464 /*
1449 * HPT371 chips physically have only one channel, the secondary one, 1465 * HPT371 chips physically have only one channel, the secondary one,
@@ -1453,59 +1469,94 @@ static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
1453 */ 1469 */
1454 pci_read_config_byte(dev, 0x50, &mcr1); 1470 pci_read_config_byte(dev, 0x50, &mcr1);
1455 if (mcr1 & 0x04) 1471 if (mcr1 & 0x04)
1456 pci_write_config_byte(dev, 0x50, (mcr1 & ~0x04)); 1472 pci_write_config_byte(dev, 0x50, mcr1 & ~0x04);
1473
1474 pci_set_drvdata(dev, info);
1475
1476 return ide_setup_pci_device(dev, d);
1477}
1478
1479static int __devinit init_setup_hpt372a(struct pci_dev *dev, ide_pci_device_t *d)
1480{
1481 struct hpt_info *info;
1482 u8 rev = 0;
1483
1484 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1485
1486 if (rev > 1) {
1487 d->name = "HPT372N";
1488
1489 info = &hpt372n;
1490 } else
1491 info = &hpt372a;
1492 pci_set_drvdata(dev, info);
1493
1494 return ide_setup_pci_device(dev, d);
1495}
1496
1497static int __devinit init_setup_hpt302(struct pci_dev *dev, ide_pci_device_t *d)
1498{
1499 struct hpt_info *info;
1500 u8 rev = 0;
1501
1502 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1503
1504 if (rev > 1) {
1505 d->name = "HPT302N";
1506
1507 info = &hpt302n;
1508 } else
1509 info = &hpt302;
1510 pci_set_drvdata(dev, info);
1457 1511
1458 return ide_setup_pci_device(dev, d); 1512 return ide_setup_pci_device(dev, d);
1459} 1513}
1460 1514
1461static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d) 1515static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
1462{ 1516{
1463 struct pci_dev *findev = NULL; 1517 struct pci_dev *dev2;
1464 u8 pin1 = 0, pin2 = 0; 1518 u8 rev = 0;
1465 unsigned int class_rev; 1519 static char *chipset_names[] = { "HPT366", "HPT366", "HPT368",
1466 char *chipset_names[] = {"HPT366", "HPT366", "HPT368", 1520 "HPT370", "HPT370A", "HPT372",
1467 "HPT370", "HPT370A", "HPT372", 1521 "HPT372N" };
1468 "HPT372N" }; 1522 static struct hpt_info *info[] = { &hpt36x, &hpt36x, &hpt36x,
1523 &hpt370, &hpt370a, &hpt372,
1524 &hpt372n };
1469 1525
1470 if (PCI_FUNC(dev->devfn) & 1) 1526 if (PCI_FUNC(dev->devfn) & 1)
1471 return -ENODEV; 1527 return -ENODEV;
1472 1528
1473 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); 1529 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1474 class_rev &= 0xff;
1475 1530
1476 if(dev->device == PCI_DEVICE_ID_TTI_HPT372N) 1531 if (rev > 6)
1477 class_rev = 6; 1532 rev = 6;
1478 1533
1479 if(class_rev <= 6) 1534 d->name = chipset_names[rev];
1480 d->name = chipset_names[class_rev]; 1535
1481 1536 pci_set_drvdata(dev, info[rev]);
1482 switch(class_rev) { 1537
1483 case 6: 1538 if (rev > 2)
1484 case 5: 1539 goto init_single;
1485 case 4:
1486 case 3:
1487 goto init_single;
1488 default:
1489 break;
1490 }
1491 1540
1492 d->channels = 1; 1541 d->channels = 1;
1493 1542
1494 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1); 1543 if ((dev2 = pci_get_slot(dev->bus, dev->devfn + 1)) != NULL) {
1495 while ((findev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, findev)) != NULL) { 1544 u8 pin1 = 0, pin2 = 0;
1496 if ((findev->vendor == dev->vendor) && 1545 int ret;
1497 (findev->device == dev->device) && 1546
1498 ((findev->devfn - dev->devfn) == 1) && 1547 pci_set_drvdata(dev2, info[rev]);
1499 (PCI_FUNC(findev->devfn) & 1)) { 1548
1500 pci_read_config_byte(findev, PCI_INTERRUPT_PIN, &pin2); 1549 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1);
1501 if ((pin1 != pin2) && (dev->irq == findev->irq)) { 1550 pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2);
1502 d->bootable = ON_BOARD; 1551 if (pin1 != pin2 && dev->irq == dev2->irq) {
1503 printk("%s: onboard version of chipset, " 1552 d->bootable = ON_BOARD;
1504 "pin1=%d pin2=%d\n", d->name, 1553 printk("%s: onboard version of chipset, pin1=%d pin2=%d\n",
1505 pin1, pin2); 1554 d->name, pin1, pin2);
1506 }
1507 return ide_setup_pci_devices(dev, findev, d);
1508 } 1555 }
1556 ret = ide_setup_pci_devices(dev, dev2, d);
1557 if (ret < 0)
1558 pci_dev_put(dev2);
1559 return ret;
1509 } 1560 }
1510init_single: 1561init_single:
1511 return ide_setup_pci_device(dev, d); 1562 return ide_setup_pci_device(dev, d);
@@ -1516,64 +1567,68 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1516 .name = "HPT366", 1567 .name = "HPT366",
1517 .init_setup = init_setup_hpt366, 1568 .init_setup = init_setup_hpt366,
1518 .init_chipset = init_chipset_hpt366, 1569 .init_chipset = init_chipset_hpt366,
1519 .init_iops = init_iops_hpt366,
1520 .init_hwif = init_hwif_hpt366, 1570 .init_hwif = init_hwif_hpt366,
1521 .init_dma = init_dma_hpt366, 1571 .init_dma = init_dma_hpt366,
1522 .channels = 2, 1572 .channels = 2,
1523 .autodma = AUTODMA, 1573 .autodma = AUTODMA,
1574 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1524 .bootable = OFF_BOARD, 1575 .bootable = OFF_BOARD,
1525 .extra = 240 1576 .extra = 240
1526 },{ /* 1 */ 1577 },{ /* 1 */
1527 .name = "HPT372A", 1578 .name = "HPT372A",
1528 .init_setup = init_setup_hpt37x, 1579 .init_setup = init_setup_hpt372a,
1529 .init_chipset = init_chipset_hpt366, 1580 .init_chipset = init_chipset_hpt366,
1530 .init_iops = init_iops_hpt366,
1531 .init_hwif = init_hwif_hpt366, 1581 .init_hwif = init_hwif_hpt366,
1532 .init_dma = init_dma_hpt366, 1582 .init_dma = init_dma_hpt366,
1533 .channels = 2, 1583 .channels = 2,
1534 .autodma = AUTODMA, 1584 .autodma = AUTODMA,
1585 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1535 .bootable = OFF_BOARD, 1586 .bootable = OFF_BOARD,
1587 .extra = 240
1536 },{ /* 2 */ 1588 },{ /* 2 */
1537 .name = "HPT302", 1589 .name = "HPT302",
1538 .init_setup = init_setup_hpt37x, 1590 .init_setup = init_setup_hpt302,
1539 .init_chipset = init_chipset_hpt366, 1591 .init_chipset = init_chipset_hpt366,
1540 .init_iops = init_iops_hpt366,
1541 .init_hwif = init_hwif_hpt366, 1592 .init_hwif = init_hwif_hpt366,
1542 .init_dma = init_dma_hpt366, 1593 .init_dma = init_dma_hpt366,
1543 .channels = 2, 1594 .channels = 2,
1544 .autodma = AUTODMA, 1595 .autodma = AUTODMA,
1596 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1545 .bootable = OFF_BOARD, 1597 .bootable = OFF_BOARD,
1598 .extra = 240
1546 },{ /* 3 */ 1599 },{ /* 3 */
1547 .name = "HPT371", 1600 .name = "HPT371",
1548 .init_setup = init_setup_hpt371, 1601 .init_setup = init_setup_hpt371,
1549 .init_chipset = init_chipset_hpt366, 1602 .init_chipset = init_chipset_hpt366,
1550 .init_iops = init_iops_hpt366,
1551 .init_hwif = init_hwif_hpt366, 1603 .init_hwif = init_hwif_hpt366,
1552 .init_dma = init_dma_hpt366, 1604 .init_dma = init_dma_hpt366,
1553 .channels = 2, 1605 .channels = 2,
1554 .autodma = AUTODMA, 1606 .autodma = AUTODMA,
1555 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1607 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1556 .bootable = OFF_BOARD, 1608 .bootable = OFF_BOARD,
1609 .extra = 240
1557 },{ /* 4 */ 1610 },{ /* 4 */
1558 .name = "HPT374", 1611 .name = "HPT374",
1559 .init_setup = init_setup_hpt374, 1612 .init_setup = init_setup_hpt374,
1560 .init_chipset = init_chipset_hpt366, 1613 .init_chipset = init_chipset_hpt366,
1561 .init_iops = init_iops_hpt366,
1562 .init_hwif = init_hwif_hpt366, 1614 .init_hwif = init_hwif_hpt366,
1563 .init_dma = init_dma_hpt366, 1615 .init_dma = init_dma_hpt366,
1564 .channels = 2, /* 4 */ 1616 .channels = 2, /* 4 */
1565 .autodma = AUTODMA, 1617 .autodma = AUTODMA,
1618 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1566 .bootable = OFF_BOARD, 1619 .bootable = OFF_BOARD,
1620 .extra = 240
1567 },{ /* 5 */ 1621 },{ /* 5 */
1568 .name = "HPT372N", 1622 .name = "HPT372N",
1569 .init_setup = init_setup_hpt37x, 1623 .init_setup = init_setup_hpt372n,
1570 .init_chipset = init_chipset_hpt366, 1624 .init_chipset = init_chipset_hpt366,
1571 .init_iops = init_iops_hpt366,
1572 .init_hwif = init_hwif_hpt366, 1625 .init_hwif = init_hwif_hpt366,
1573 .init_dma = init_dma_hpt366, 1626 .init_dma = init_dma_hpt366,
1574 .channels = 2, /* 4 */ 1627 .channels = 2, /* 4 */
1575 .autodma = AUTODMA, 1628 .autodma = AUTODMA,
1629 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1576 .bootable = OFF_BOARD, 1630 .bootable = OFF_BOARD,
1631 .extra = 240
1577 } 1632 }
1578}; 1633};
1579 1634
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
new file mode 100644
index 000000000000..63248b6909fa
--- /dev/null
+++ b/drivers/ide/pci/it8213.c
@@ -0,0 +1,362 @@
1/*
2 * ITE 8213 IDE driver
3 *
4 * Copyright (C) 2006 Jack Lee
5 * Copyright (C) 2006 Alan Cox
6 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/delay.h>
14#include <linux/hdreg.h>
15#include <linux/ide.h>
16#include <linux/init.h>
17
18#include <asm/io.h>
19
20/*
21 * it8213_ratemask - Compute available modes
22 * @drive: IDE drive
23 *
24 * Compute the available speeds for the devices on the interface. This
25 * is all modes to ATA133 clipped by drive cable setup.
26 */
27
28static u8 it8213_ratemask (ide_drive_t *drive)
29{
30 u8 mode = 4;
31 if (!eighty_ninty_three(drive))
32 mode = min_t(u8, mode, 1);
33 return mode;
34}
35
36/**
37 * it8213_dma_2_pio - return the PIO mode matching DMA
38 * @xfer_rate: transfer speed
39 *
40 * Returns the nearest equivalent PIO timing for the PIO or DMA
41 * mode requested by the controller.
42 */
43
44static u8 it8213_dma_2_pio (u8 xfer_rate) {
45 switch(xfer_rate) {
46 case XFER_UDMA_6:
47 case XFER_UDMA_5:
48 case XFER_UDMA_4:
49 case XFER_UDMA_3:
50 case XFER_UDMA_2:
51 case XFER_UDMA_1:
52 case XFER_UDMA_0:
53 case XFER_MW_DMA_2:
54 case XFER_PIO_4:
55 return 4;
56 case XFER_MW_DMA_1:
57 case XFER_PIO_3:
58 return 3;
59 case XFER_SW_DMA_2:
60 case XFER_PIO_2:
61 return 2;
62 case XFER_MW_DMA_0:
63 case XFER_SW_DMA_1:
64 case XFER_SW_DMA_0:
65 case XFER_PIO_1:
66 case XFER_PIO_0:
67 case XFER_PIO_SLOW:
68 default:
69 return 0;
70 }
71}
72
73/*
74 * it8213_tuneproc - tune a drive
75 * @drive: drive to tune
76 * @pio: desired PIO mode
77 *
78 * Set the interface PIO mode.
79 */
80
81static void it8213_tuneproc (ide_drive_t *drive, u8 pio)
82{
83 ide_hwif_t *hwif = HWIF(drive);
84 struct pci_dev *dev = hwif->pci_dev;
85 int is_slave = drive->dn & 1;
86 int master_port = 0x40;
87 int slave_port = 0x44;
88 unsigned long flags;
89 u16 master_data;
90 u8 slave_data;
91 static DEFINE_SPINLOCK(tune_lock);
92 int control = 0;
93
94 static const u8 timings[][2]= {
95 { 0, 0 },
96 { 0, 0 },
97 { 1, 0 },
98 { 2, 1 },
99 { 2, 3 }, };
100
101 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
102
103 spin_lock_irqsave(&tune_lock, flags);
104 pci_read_config_word(dev, master_port, &master_data);
105
106 if (pio > 1)
107 control |= 1; /* Programmable timing on */
108 if (drive->media != ide_disk)
109 control |= 4; /* ATAPI */
110 if (pio > 2)
111 control |= 2; /* IORDY */
112 if (is_slave) {
113 master_data |= 0x4000;
114 master_data &= ~0x0070;
115 if (pio > 1)
116 master_data = master_data | (control << 4);
117 pci_read_config_byte(dev, slave_port, &slave_data);
118 slave_data = slave_data & 0xf0;
119 slave_data = slave_data | (timings[pio][0] << 2) | timings[pio][1];
120 } else {
121 master_data &= ~0x3307;
122 if (pio > 1)
123 master_data = master_data | control;
124 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
125 }
126 pci_write_config_word(dev, master_port, master_data);
127 if (is_slave)
128 pci_write_config_byte(dev, slave_port, slave_data);
129 spin_unlock_irqrestore(&tune_lock, flags);
130}
131
132/**
133 * it8213_tune_chipset - set controller timings
134 * @drive: Drive to set up
135 * @xferspeed: speed we want to achieve
136 *
137 * Tune the ITE chipset for the desired mode. If we can't achieve
138 * the desired mode then tune for a lower one, but ultimately
139 * make the thing work.
140 */
141
142static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
143{
144
145 ide_hwif_t *hwif = HWIF(drive);
146 struct pci_dev *dev = hwif->pci_dev;
147 u8 maslave = 0x40;
148 u8 speed = ide_rate_filter(it8213_ratemask(drive), xferspeed);
149 int a_speed = 3 << (drive->dn * 4);
150 int u_flag = 1 << drive->dn;
151 int v_flag = 0x01 << drive->dn;
152 int w_flag = 0x10 << drive->dn;
153 int u_speed = 0;
154 u16 reg4042, reg4a;
155 u8 reg48, reg54, reg55;
156
157 pci_read_config_word(dev, maslave, &reg4042);
158 pci_read_config_byte(dev, 0x48, &reg48);
159 pci_read_config_word(dev, 0x4a, &reg4a);
160 pci_read_config_byte(dev, 0x54, &reg54);
161 pci_read_config_byte(dev, 0x55, &reg55);
162
163 switch(speed) {
164 case XFER_UDMA_6:
165 case XFER_UDMA_4:
166 case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break;
167 case XFER_UDMA_5:
168 case XFER_UDMA_3:
169 case XFER_UDMA_1: u_speed = 1 << (drive->dn * 4); break;
170 case XFER_UDMA_0: u_speed = 0 << (drive->dn * 4); break;
171 break;
172 case XFER_MW_DMA_2:
173 case XFER_MW_DMA_1:
174 case XFER_SW_DMA_2:
175 break;
176 case XFER_PIO_4:
177 case XFER_PIO_3:
178 case XFER_PIO_2:
179 case XFER_PIO_1:
180 case XFER_PIO_0:
181 break;
182 default:
183 return -1;
184 }
185
186 if (speed >= XFER_UDMA_0) {
187 if (!(reg48 & u_flag))
188 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
189 if (speed >= XFER_UDMA_5) {
190 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
191 } else {
192 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
193 }
194
195 if ((reg4a & a_speed) != u_speed)
196 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
197 if (speed > XFER_UDMA_2) {
198 if (!(reg54 & v_flag))
199 pci_write_config_byte(dev, 0x54, reg54 | v_flag);
200 } else
201 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
202 } else {
203 if (reg48 & u_flag)
204 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
205 if (reg4a & a_speed)
206 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
207 if (reg54 & v_flag)
208 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
209 if (reg55 & w_flag)
210 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
211 }
212 it8213_tuneproc(drive, it8213_dma_2_pio(speed));
213 return ide_config_drive_speed(drive, speed);
214}
215
216/*
217 * config_chipset_for_dma - configure for DMA
218 * @drive: drive to configure
219 *
220 * Called by the IDE layer when it wants the timings set up.
221 */
222
223static int config_chipset_for_dma (ide_drive_t *drive)
224{
225 u8 speed = ide_dma_speed(drive, it8213_ratemask(drive));
226
227 if (!speed)
228 return 0;
229
230 it8213_tune_chipset(drive, speed);
231
232 return ide_dma_enable(drive);
233}
234
235/**
236 * it8213_configure_drive_for_dma - set up for DMA transfers
237 * @drive: drive we are going to set up
238 *
239 * Set up the drive for DMA, tune the controller and drive as
240 * required. If the drive isn't suitable for DMA or we hit
241 * other problems then we will drop down to PIO and set up
242 * PIO appropriately
243 */
244
245static int it8213_config_drive_for_dma (ide_drive_t *drive)
246{
247 ide_hwif_t *hwif = drive->hwif;
248
249 if (ide_use_dma(drive)) {
250 if (config_chipset_for_dma(drive))
251 return hwif->ide_dma_on(drive);
252 }
253
254 hwif->speedproc(drive, XFER_PIO_0
255 + ide_get_best_pio_mode(drive, 255, 4, NULL));
256
257 return hwif->ide_dma_off_quietly(drive);
258}
259
260/**
261 * init_hwif_it8213 - set up hwif structs
262 * @hwif: interface to set up
263 *
264 * We do the basic set up of the interface structure. The IT8212
265 * requires several custom handlers so we override the default
266 * ide DMA handlers appropriately
267 */
268
269static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
270{
271 u8 reg42h = 0, ata66 = 0;
272
273 hwif->speedproc = &it8213_tune_chipset;
274 hwif->tuneproc = &it8213_tuneproc;
275
276 hwif->autodma = 0;
277
278 hwif->drives[0].autotune = 1;
279 hwif->drives[1].autotune = 1;
280
281 if (!hwif->dma_base)
282 return;
283
284 hwif->atapi_dma = 1;
285 hwif->ultra_mask = 0x7f;
286 hwif->mwdma_mask = 0x06;
287 hwif->swdma_mask = 0x04;
288
289 pci_read_config_byte(hwif->pci_dev, 0x42, &reg42h);
290 ata66 = (reg42h & 0x02) ? 0 : 1;
291
292 hwif->ide_dma_check = &it8213_config_drive_for_dma;
293 if (!(hwif->udma_four))
294 hwif->udma_four = ata66;
295
296 /*
297 * The BIOS often doesn't set up DMA on this controller
298 * so we always do it.
299 */
300 if (!noautodma)
301 hwif->autodma = 1;
302
303 hwif->drives[0].autodma = hwif->autodma;
304 hwif->drives[1].autodma = hwif->autodma;
305}
306
307
308#define DECLARE_ITE_DEV(name_str) \
309 { \
310 .name = name_str, \
311 .init_hwif = init_hwif_it8213, \
312 .channels = 1, \
313 .autodma = AUTODMA, \
314 .enablebits = {{0x41,0x80,0x80}}, \
315 .bootable = ON_BOARD, \
316 }
317
318static ide_pci_device_t it8213_chipsets[] __devinitdata = {
319 /* 0 */ DECLARE_ITE_DEV("IT8213"),
320};
321
322
323/**
324 * it8213_init_one - pci layer discovery entry
325 * @dev: PCI device
326 * @id: ident table entry
327 *
328 * Called by the PCI code when it finds an ITE8213 controller. As
329 * this device follows the standard interfaces we can use the
330 * standard helper functions to do almost all the work for us.
331 */
332
333static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
334{
335 ide_setup_pci_device(dev, &it8213_chipsets[id->driver_data]);
336 return 0;
337}
338
339
340static struct pci_device_id it8213_pci_tbl[] = {
341 { PCI_VENDOR_ID_ITE, 0x8213, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
342 { 0, },
343};
344
345MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
346
347static struct pci_driver driver = {
348 .name = "ITE8213_IDE",
349 .id_table = it8213_pci_tbl,
350 .probe = it8213_init_one,
351};
352
353static int __init it8213_ide_init(void)
354{
355 return ide_pci_register_driver(&driver);
356}
357
358module_init(it8213_ide_init);
359
360MODULE_AUTHOR("Jack Lee, Alan Cox");
361MODULE_DESCRIPTION("PCI driver module for the ITE 8213");
362MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 77a9aaa7dab9..236a03144a27 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -92,26 +92,6 @@ static u8 pdcnew_ratemask(ide_drive_t *drive)
92 return mode; 92 return mode;
93} 93}
94 94
95static int check_in_drive_lists(ide_drive_t *drive, const char **list)
96{
97 struct hd_driveid *id = drive->id;
98
99 if (pdc_quirk_drives == list) {
100 while (*list) {
101 if (strstr(id->model, *list++)) {
102 return 2;
103 }
104 }
105 } else {
106 while (*list) {
107 if (!strcmp(*list++,id->model)) {
108 return 1;
109 }
110 }
111 }
112 return 0;
113}
114
115/** 95/**
116 * get_indexed_reg - Get indexed register 96 * get_indexed_reg - Get indexed register
117 * @hwif: for the port address 97 * @hwif: for the port address
@@ -249,13 +229,6 @@ static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed)
249 return err; 229 return err;
250} 230}
251 231
252/* 0 1 2 3 4 5 6 7 8
253 * 960, 480, 390, 300, 240, 180, 120, 90, 60
254 * 180, 150, 120, 90, 60
255 * DMA_Speed
256 * 180, 120, 90, 90, 90, 60, 30
257 * 11, 5, 4, 3, 2, 1, 0
258 */
259static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio) 232static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio)
260{ 233{
261 pio = ide_get_best_pio_mode(drive, pio, 4, NULL); 234 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
@@ -313,12 +286,10 @@ static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive)
313 286
314 drive->init_speed = 0; 287 drive->init_speed = 0;
315 288
316 if (id && (id->capability & 1) && drive->autodma) { 289 if ((id->capability & 1) && drive->autodma) {
317 290
318 if (ide_use_dma(drive)) { 291 if (ide_use_dma(drive) && config_chipset_for_dma(drive))
319 if (config_chipset_for_dma(drive)) 292 return hwif->ide_dma_on(drive);
320 return hwif->ide_dma_on(drive);
321 }
322 293
323 goto fast_ata_pio; 294 goto fast_ata_pio;
324 295
@@ -333,21 +304,12 @@ fast_ata_pio:
333 304
334static int pdcnew_quirkproc(ide_drive_t *drive) 305static int pdcnew_quirkproc(ide_drive_t *drive)
335{ 306{
336 return check_in_drive_lists(drive, pdc_quirk_drives); 307 const char **list, *model = drive->id->model;
337}
338 308
339static int pdcnew_ide_dma_lostirq(ide_drive_t *drive) 309 for (list = pdc_quirk_drives; *list != NULL; list++)
340{ 310 if (strstr(model, *list) != NULL)
341 if (HWIF(drive)->resetproc != NULL) 311 return 2;
342 HWIF(drive)->resetproc(drive); 312 return 0;
343 return __ide_dma_lostirq(drive);
344}
345
346static int pdcnew_ide_dma_timeout(ide_drive_t *drive)
347{
348 if (HWIF(drive)->resetproc != NULL)
349 HWIF(drive)->resetproc(drive);
350 return __ide_dma_timeout(drive);
351} 313}
352 314
353static void pdcnew_reset(ide_drive_t *drive) 315static void pdcnew_reset(ide_drive_t *drive)
@@ -599,8 +561,6 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
599 hwif->err_stops_fifo = 1; 561 hwif->err_stops_fifo = 1;
600 562
601 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate; 563 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate;
602 hwif->ide_dma_lostirq = &pdcnew_ide_dma_lostirq;
603 hwif->ide_dma_timeout = &pdcnew_ide_dma_timeout;
604 564
605 if (!hwif->udma_four) 565 if (!hwif->udma_four)
606 hwif->udma_four = pdcnew_cable_detect(hwif) ? 0 : 1; 566 hwif->udma_four = pdcnew_cable_detect(hwif) ? 0 : 1;
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 143239c093d5..730e8d1ec2f5 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -123,26 +123,6 @@ static u8 pdc202xx_ratemask (ide_drive_t *drive)
123 return mode; 123 return mode;
124} 124}
125 125
126static int check_in_drive_lists (ide_drive_t *drive, const char **list)
127{
128 struct hd_driveid *id = drive->id;
129
130 if (pdc_quirk_drives == list) {
131 while (*list) {
132 if (strstr(id->model, *list++)) {
133 return 2;
134 }
135 }
136 } else {
137 while (*list) {
138 if (!strcmp(*list++,id->model)) {
139 return 1;
140 }
141 }
142 }
143 return 0;
144}
145
146static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) 126static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed)
147{ 127{
148 ide_hwif_t *hwif = HWIF(drive); 128 ide_hwif_t *hwif = HWIF(drive);
@@ -377,7 +357,12 @@ fast_ata_pio:
377 357
378static int pdc202xx_quirkproc (ide_drive_t *drive) 358static int pdc202xx_quirkproc (ide_drive_t *drive)
379{ 359{
380 return ((int) check_in_drive_lists(drive, pdc_quirk_drives)); 360 const char **list, *model = drive->id->model;
361
362 for (list = pdc_quirk_drives; *list != NULL; list++)
363 if (strstr(model, *list) != NULL)
364 return 2;
365 return 0;
381} 366}
382 367
383static void pdc202xx_old_ide_dma_start(ide_drive_t *drive) 368static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index edb37f3d558d..52cfc2ac22c1 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/piix.c Version 0.45 May 12, 2006 2 * linux/drivers/ide/pci/piix.c Version 0.46 December 3, 2006
3 * 3 *
4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
@@ -163,7 +163,7 @@ static u8 piix_ratemask (ide_drive_t *drive)
163 * if the drive cannot see an 80pin cable. 163 * if the drive cannot see an 80pin cable.
164 */ 164 */
165 if (!eighty_ninty_three(drive)) 165 if (!eighty_ninty_three(drive))
166 mode = min(mode, (u8)1); 166 mode = min_t(u8, mode, 1);
167 return mode; 167 return mode;
168} 168}
169 169
@@ -216,7 +216,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
216{ 216{
217 ide_hwif_t *hwif = HWIF(drive); 217 ide_hwif_t *hwif = HWIF(drive);
218 struct pci_dev *dev = hwif->pci_dev; 218 struct pci_dev *dev = hwif->pci_dev;
219 int is_slave = (&hwif->drives[1] == drive); 219 int is_slave = drive->dn & 1;
220 int master_port = hwif->channel ? 0x42 : 0x40; 220 int master_port = hwif->channel ? 0x42 : 0x40;
221 int slave_port = 0x44; 221 int slave_port = 0x44;
222 unsigned long flags; 222 unsigned long flags;
@@ -225,7 +225,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
225 static DEFINE_SPINLOCK(tune_lock); 225 static DEFINE_SPINLOCK(tune_lock);
226 int control = 0; 226 int control = 0;
227 227
228 /* ISP RTC */ 228 /* ISP RTC */
229 static const u8 timings[][2]= { 229 static const u8 timings[][2]= {
230 { 0, 0 }, 230 { 0, 0 },
231 { 0, 0 }, 231 { 0, 0 },
@@ -233,7 +233,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
233 { 2, 1 }, 233 { 2, 1 },
234 { 2, 3 }, }; 234 { 2, 3 }, };
235 235
236 pio = ide_get_best_pio_mode(drive, pio, 5, NULL); 236 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
237 237
238 /* 238 /*
239 * Master vs slave is synchronized above us but the slave register is 239 * Master vs slave is synchronized above us but the slave register is
@@ -243,25 +243,24 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
243 spin_lock_irqsave(&tune_lock, flags); 243 spin_lock_irqsave(&tune_lock, flags);
244 pci_read_config_word(dev, master_port, &master_data); 244 pci_read_config_word(dev, master_port, &master_data);
245 245
246 if (pio >= 2) 246 if (pio > 1)
247 control |= 1; /* Programmable timing on */ 247 control |= 1; /* Programmable timing on */
248 if (drive->media == ide_disk) 248 if (drive->media == ide_disk)
249 control |= 4; /* Prefetch, post write */ 249 control |= 4; /* Prefetch, post write */
250 if (pio >= 3) 250 if (pio > 2)
251 control |= 2; /* IORDY */ 251 control |= 2; /* IORDY */
252 if (is_slave) { 252 if (is_slave) {
253 master_data = master_data | 0x4000; 253 master_data |= 0x4000;
254 master_data &= ~0x0070;
254 if (pio > 1) { 255 if (pio > 1) {
255 /* enable PPE, IE and TIME */ 256 /* enable PPE, IE and TIME */
256 master_data = master_data | (control << 4); 257 master_data = master_data | (control << 4);
257 } else {
258 master_data &= ~0x0070;
259 } 258 }
260 pci_read_config_byte(dev, slave_port, &slave_data); 259 pci_read_config_byte(dev, slave_port, &slave_data);
261 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0); 260 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0);
262 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0)); 261 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0));
263 } else { 262 } else {
264 master_data = master_data & 0xccf8; 263 master_data &= ~0x3307;
265 if (pio > 1) { 264 if (pio > 1) {
266 /* enable PPE, IE and TIME */ 265 /* enable PPE, IE and TIME */
267 master_data = master_data | control; 266 master_data = master_data | control;
@@ -539,13 +538,19 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = {
539 /* 0 */ DECLARE_PIIX_DEV("PIIXa"), 538 /* 0 */ DECLARE_PIIX_DEV("PIIXa"),
540 /* 1 */ DECLARE_PIIX_DEV("PIIXb"), 539 /* 1 */ DECLARE_PIIX_DEV("PIIXb"),
541 540
542 { /* 2 */ 541 /* 2 */
542 { /*
543 * MPIIX actually has only a single IDE channel mapped to
544 * the primary or secondary ports depending on the value
545 * of the bit 14 of the IDETIM register at offset 0x6c
546 */
543 .name = "MPIIX", 547 .name = "MPIIX",
544 .init_hwif = init_hwif_piix, 548 .init_hwif = init_hwif_piix,
545 .channels = 2, 549 .channels = 2,
546 .autodma = NODMA, 550 .autodma = NODMA,
547 .enablebits = {{0x6D,0x80,0x80}, {0x6F,0x80,0x80}}, 551 .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
548 .bootable = ON_BOARD, 552 .bootable = ON_BOARD,
553 .flags = IDEPCI_FLAG_ISA_PORTS
549 }, 554 },
550 555
551 /* 3 */ DECLARE_PIIX_DEV("PIIX3"), 556 /* 3 */ DECLARE_PIIX_DEV("PIIX3"),
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 90e79c0844d2..2663ddbd9b67 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/slc90e66.c Version 0.12 May 12, 2006 2 * linux/drivers/ide/pci/slc90e66.c Version 0.13 December 30, 2006
3 * 3 *
4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com> 5 * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com>
@@ -26,7 +26,7 @@ static u8 slc90e66_ratemask (ide_drive_t *drive)
26 u8 mode = 2; 26 u8 mode = 2;
27 27
28 if (!eighty_ninty_three(drive)) 28 if (!eighty_ninty_three(drive))
29 mode = min(mode, (u8)1); 29 mode = min_t(u8, mode, 1);
30 return mode; 30 return mode;
31} 31}
32 32
@@ -65,36 +65,47 @@ static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
65{ 65{
66 ide_hwif_t *hwif = HWIF(drive); 66 ide_hwif_t *hwif = HWIF(drive);
67 struct pci_dev *dev = hwif->pci_dev; 67 struct pci_dev *dev = hwif->pci_dev;
68 int is_slave = (&hwif->drives[1] == drive); 68 int is_slave = drive->dn & 1;
69 int master_port = hwif->channel ? 0x42 : 0x40; 69 int master_port = hwif->channel ? 0x42 : 0x40;
70 int slave_port = 0x44; 70 int slave_port = 0x44;
71 unsigned long flags; 71 unsigned long flags;
72 u16 master_data; 72 u16 master_data;
73 u8 slave_data; 73 u8 slave_data;
74 /* ISP RTC */ 74 int control = 0;
75 /* ISP RTC */
75 static const u8 timings[][2]= { 76 static const u8 timings[][2]= {
76 { 0, 0 }, 77 { 0, 0 },
77 { 0, 0 }, 78 { 0, 0 },
78 { 1, 0 }, 79 { 1, 0 },
79 { 2, 1 }, 80 { 2, 1 },
80 { 2, 3 }, }; 81 { 2, 3 }, };
81 82
82 pio = ide_get_best_pio_mode(drive, pio, 5, NULL); 83 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
83 spin_lock_irqsave(&ide_lock, flags); 84 spin_lock_irqsave(&ide_lock, flags);
84 pci_read_config_word(dev, master_port, &master_data); 85 pci_read_config_word(dev, master_port, &master_data);
86
87 if (pio > 1)
88 control |= 1; /* Programmable timing on */
89 if (drive->media == ide_disk)
90 control |= 4; /* Prefetch, post write */
91 if (pio > 2)
92 control |= 2; /* IORDY */
85 if (is_slave) { 93 if (is_slave) {
86 master_data = master_data | 0x4000; 94 master_data |= 0x4000;
87 if (pio > 1) 95 master_data &= ~0x0070;
96 if (pio > 1) {
88 /* enable PPE, IE and TIME */ 97 /* enable PPE, IE and TIME */
89 master_data = master_data | 0x0070; 98 master_data = master_data | (control << 4);
99 }
90 pci_read_config_byte(dev, slave_port, &slave_data); 100 pci_read_config_byte(dev, slave_port, &slave_data);
91 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0); 101 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0);
92 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0)); 102 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0));
93 } else { 103 } else {
94 master_data = master_data & 0xccf8; 104 master_data &= ~0x3307;
95 if (pio > 1) 105 if (pio > 1) {
96 /* enable PPE, IE and TIME */ 106 /* enable PPE, IE and TIME */
97 master_data = master_data | 0x0007; 107 master_data = master_data | control;
108 }
98 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8); 109 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
99 } 110 }
100 pci_write_config_word(dev, master_port, master_data); 111 pci_write_config_word(dev, master_port, master_data);
@@ -173,7 +184,7 @@ static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive)
173 184
174 drive->init_speed = 0; 185 drive->init_speed = 0;
175 186
176 if (id && (id->capability & 1) && drive->autodma) { 187 if ((id->capability & 1) && drive->autodma) {
177 188
178 if (ide_use_dma(drive) && slc90e66_config_drive_for_dma(drive)) 189 if (ide_use_dma(drive) && slc90e66_config_drive_for_dma(drive))
179 return hwif->ide_dma_on(drive); 190 return hwif->ide_dma_on(drive);
@@ -201,7 +212,7 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
201 hwif->irq = hwif->channel ? 15 : 14; 212 hwif->irq = hwif->channel ? 15 : 14;
202 213
203 hwif->speedproc = &slc90e66_tune_chipset; 214 hwif->speedproc = &slc90e66_tune_chipset;
204 hwif->tuneproc = &slc90e66_tune_drive; 215 hwif->tuneproc = &slc90e66_tune_drive;
205 216
206 pci_read_config_byte(hwif->pci_dev, 0x47, &reg47); 217 pci_read_config_byte(hwif->pci_dev, 0x47, &reg47);
207 218
@@ -213,14 +224,16 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
213 224
214 hwif->atapi_dma = 1; 225 hwif->atapi_dma = 1;
215 hwif->ultra_mask = 0x1f; 226 hwif->ultra_mask = 0x1f;
216 hwif->mwdma_mask = 0x07; 227 hwif->mwdma_mask = 0x06;
217 hwif->swdma_mask = 0x07; 228 hwif->swdma_mask = 0x04;
218 229
219 if (!(hwif->udma_four)) 230 if (!hwif->udma_four) {
220 /* bit[0(1)]: 0:80, 1:40 */ 231 /* bit[0(1)]: 0:80, 1:40 */
221 hwif->udma_four = (reg47 & mask) ? 0 : 1; 232 hwif->udma_four = (reg47 & mask) ? 0 : 1;
233 }
222 234
223 hwif->ide_dma_check = &slc90e66_config_drive_xfer_rate; 235 hwif->ide_dma_check = &slc90e66_config_drive_xfer_rate;
236
224 if (!noautodma) 237 if (!noautodma)
225 hwif->autodma = 1; 238 hwif->autodma = 1;
226 hwif->drives[0].autodma = hwif->autodma; 239 hwif->drives[0].autodma = hwif->autodma;
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
new file mode 100644
index 000000000000..2ad72bbda342
--- /dev/null
+++ b/drivers/ide/pci/tc86c001.c
@@ -0,0 +1,309 @@
1/*
2 * drivers/ide/pci/tc86c001.c Version 1.00 Dec 12, 2006
3 *
4 * Copyright (C) 2002 Toshiba Corporation
5 * Copyright (C) 2005-2006 MontaVista Software, Inc. <source@mvista.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <linux/ide.h>
15
16static inline u8 tc86c001_ratemask(ide_drive_t *drive)
17{
18 return eighty_ninty_three(drive) ? 2 : 1;
19}
20
21static int tc86c001_tune_chipset(ide_drive_t *drive, u8 speed)
22{
23 ide_hwif_t *hwif = HWIF(drive);
24 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
25 u16 mode, scr = hwif->INW(scr_port);
26
27 speed = ide_rate_filter(tc86c001_ratemask(drive), speed);
28
29 switch (speed) {
30 case XFER_UDMA_4: mode = 0x00c0; break;
31 case XFER_UDMA_3: mode = 0x00b0; break;
32 case XFER_UDMA_2: mode = 0x00a0; break;
33 case XFER_UDMA_1: mode = 0x0090; break;
34 case XFER_UDMA_0: mode = 0x0080; break;
35 case XFER_MW_DMA_2: mode = 0x0070; break;
36 case XFER_MW_DMA_1: mode = 0x0060; break;
37 case XFER_MW_DMA_0: mode = 0x0050; break;
38 case XFER_PIO_4: mode = 0x0400; break;
39 case XFER_PIO_3: mode = 0x0300; break;
40 case XFER_PIO_2: mode = 0x0200; break;
41 case XFER_PIO_1: mode = 0x0100; break;
42 case XFER_PIO_0:
43 default: mode = 0x0000; break;
44 }
45
46 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
47 scr |= mode;
48 hwif->OUTW(scr, scr_port);
49
50 return ide_config_drive_speed(drive, speed);
51}
52
53static void tc86c001_tune_drive(ide_drive_t *drive, u8 pio)
54{
55 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
56 (void) tc86c001_tune_chipset(drive, XFER_PIO_0 + pio);
57}
58
59/*
60 * HACKITY HACK
61 *
62 * This is a workaround for the limitation 5 of the TC86C001 IDE controller:
63 * if a DMA transfer terminates prematurely, the controller leaves the device's
64 * interrupt request (INTRQ) pending and does not generate a PCI interrupt (or
65 * set the interrupt bit in the DMA status register), thus no PCI interrupt
66 * will occur until a DMA transfer has been successfully completed.
67 *
68 * We work around this by initiating dummy, zero-length DMA transfer on
69 * a DMA timeout expiration. I found no better way to do this with the current
70 * IDE core than to temporarily replace a higher level driver's timer expiry
71 * handler with our own backing up to that handler in case our recovery fails.
72 */
73static int tc86c001_timer_expiry(ide_drive_t *drive)
74{
75 ide_hwif_t *hwif = HWIF(drive);
76 ide_expiry_t *expiry = ide_get_hwifdata(hwif);
77 ide_hwgroup_t *hwgroup = HWGROUP(drive);
78 u8 dma_stat = hwif->INB(hwif->dma_status);
79
80 /* Restore a higher level driver's expiry handler first. */
81 hwgroup->expiry = expiry;
82
83 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
84 unsigned long sc_base = hwif->config_data;
85 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
86 u8 dma_cmd = hwif->INB(hwif->dma_command);
87
88 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
89 "attempting recovery...\n", drive->name);
90
91 /* Stop DMA */
92 hwif->OUTB(dma_cmd & ~0x01, hwif->dma_command);
93
94 /* Setup the dummy DMA transfer */
95 hwif->OUTW(0, sc_base + 0x0a); /* Sector Count */
96 hwif->OUTW(0, twcr_port); /* Transfer Word Count 1 or 2 */
97
98 /* Start the dummy DMA transfer */
99 hwif->OUTB(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */
100 hwif->OUTB(0x01, hwif->dma_command); /* set START_STOPBM */
101
102 /*
103 * If an interrupt was pending, it should come thru shortly.
104 * If not, a higher level driver's expiry handler should
105 * eventually cause some kind of recovery from the DMA stall.
106 */
107 return WAIT_MIN_SLEEP;
108 }
109
110 /* Chain to the restored expiry handler if DMA wasn't active. */
111 if (likely(expiry != NULL))
112 return expiry(drive);
113
114 /* If there was no handler, "emulate" that for ide_timer_expiry()... */
115 return -1;
116}
117
118static void tc86c001_dma_start(ide_drive_t *drive)
119{
120 ide_hwif_t *hwif = HWIF(drive);
121 ide_hwgroup_t *hwgroup = HWGROUP(drive);
122 unsigned long sc_base = hwif->config_data;
123 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
124 unsigned long nsectors = hwgroup->rq->nr_sectors;
125
126 /*
127 * We have to manually load the sector count and size into
128 * the appropriate system control registers for DMA to work
129 * with LBA48 and ATAPI devices...
130 */
131 hwif->OUTW(nsectors, sc_base + 0x0a); /* Sector Count */
132 hwif->OUTW(SECTOR_SIZE / 2, twcr_port); /* Transfer Word Count 1/2 */
133
134 /* Install our timeout expiry hook, saving the current handler... */
135 ide_set_hwifdata(hwif, hwgroup->expiry);
136 hwgroup->expiry = &tc86c001_timer_expiry;
137
138 ide_dma_start(drive);
139}
140
141static int tc86c001_busproc(ide_drive_t *drive, int state)
142{
143 ide_hwif_t *hwif = HWIF(drive);
144 unsigned long sc_base = hwif->config_data;
145 u16 scr1;
146
147 /* System Control 1 Register bit 11 (ATA Hard Reset) read */
148 scr1 = hwif->INW(sc_base + 0x00);
149
150 switch (state) {
151 case BUSSTATE_ON:
152 if (!(scr1 & 0x0800))
153 return 0;
154 scr1 &= ~0x0800;
155
156 hwif->drives[0].failures = hwif->drives[1].failures = 0;
157 break;
158 case BUSSTATE_OFF:
159 if (scr1 & 0x0800)
160 return 0;
161 scr1 |= 0x0800;
162
163 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
164 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
165 break;
166 default:
167 return -EINVAL;
168 }
169
170 /* System Control 1 Register bit 11 (ATA Hard Reset) write */
171 hwif->OUTW(scr1, sc_base + 0x00);
172 return 0;
173}
174
175static int config_chipset_for_dma(ide_drive_t *drive)
176{
177 u8 speed = ide_dma_speed(drive, tc86c001_ratemask(drive));
178
179 if (!speed)
180 return 0;
181
182 (void) tc86c001_tune_chipset(drive, speed);
183 return ide_dma_enable(drive);
184}
185
186static int tc86c001_config_drive_xfer_rate(ide_drive_t *drive)
187{
188 ide_hwif_t *hwif = HWIF(drive);
189 struct hd_driveid *id = drive->id;
190
191 if ((id->capability & 1) && drive->autodma) {
192
193 if (ide_use_dma(drive) && config_chipset_for_dma(drive))
194 return hwif->ide_dma_on(drive);
195
196 goto fast_ata_pio;
197
198 } else if ((id->capability & 8) || (id->field_valid & 2)) {
199fast_ata_pio:
200 tc86c001_tune_drive(drive, 255);
201 return hwif->ide_dma_off_quietly(drive);
202 }
203 /* IORDY not supported */
204 return 0;
205}
206
207static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
208{
209 unsigned long sc_base = pci_resource_start(hwif->pci_dev, 5);
210 u16 scr1 = hwif->INW(sc_base + 0x00);;
211
212 /* System Control 1 Register bit 15 (Soft Reset) set */
213 hwif->OUTW(scr1 | 0x8000, sc_base + 0x00);
214
215 /* System Control 1 Register bit 14 (FIFO Reset) set */
216 hwif->OUTW(scr1 | 0x4000, sc_base + 0x00);
217
218 /* System Control 1 Register: reset clear */
219 hwif->OUTW(scr1 & ~0xc000, sc_base + 0x00);
220
221 /* Store the system control register base for convenience... */
222 hwif->config_data = sc_base;
223
224 hwif->tuneproc = &tc86c001_tune_drive;
225 hwif->speedproc = &tc86c001_tune_chipset;
226 hwif->busproc = &tc86c001_busproc;
227
228 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
229
230 if (!hwif->dma_base)
231 return;
232
233 /*
234 * Sector Count Control Register bits 0 and 1 set:
235 * software sets Sector Count Register for master and slave device
236 */
237 hwif->OUTW(0x0003, sc_base + 0x0c);
238
239 /* Sector Count Register limit */
240 hwif->rqsize = 0xffff;
241
242 hwif->atapi_dma = 1;
243 hwif->ultra_mask = 0x1f;
244 hwif->mwdma_mask = 0x07;
245
246 hwif->ide_dma_check = &tc86c001_config_drive_xfer_rate;
247 hwif->dma_start = &tc86c001_dma_start;
248
249 if (!hwif->udma_four) {
250 /*
251 * System Control 1 Register bit 13 (PDIAGN):
252 * 0=80-pin cable, 1=40-pin cable
253 */
254 scr1 = hwif->INW(sc_base + 0x00);
255 hwif->udma_four = (scr1 & 0x2000) ? 0 : 1;
256 }
257
258 if (!noautodma)
259 hwif->autodma = 1;
260 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
261}
262
263static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
264 const char *name)
265{
266 int err = pci_request_region(dev, 5, name);
267
268 if (err)
269 printk(KERN_ERR "%s: system control regs already in use", name);
270 return err;
271}
272
273static ide_pci_device_t tc86c001_chipset __devinitdata = {
274 .name = "TC86C001",
275 .init_chipset = init_chipset_tc86c001,
276 .init_hwif = init_hwif_tc86c001,
277 .channels = 1,
278 .autodma = AUTODMA,
279 .bootable = OFF_BOARD
280};
281
282static int __devinit tc86c001_init_one(struct pci_dev *dev,
283 const struct pci_device_id *id)
284{
285 return ide_setup_pci_device(dev, &tc86c001_chipset);
286}
287
288static struct pci_device_id tc86c001_pci_tbl[] = {
289 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
290 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
291 { 0, }
292};
293MODULE_DEVICE_TABLE(pci, tc86c001_pci_tbl);
294
295static struct pci_driver driver = {
296 .name = "TC86C001",
297 .id_table = tc86c001_pci_tbl,
298 .probe = tc86c001_init_one
299};
300
301static int __init tc86c001_ide_init(void)
302{
303 return ide_pci_register_driver(&driver);
304}
305module_init(tc86c001_ide_init);
306
307MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
308MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE");
309MODULE_LICENSE("GPL");
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 695e23904d30..a52c80fe7d3e 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -783,10 +783,11 @@ static LIST_HEAD(ide_pci_drivers);
783 * Returns are the same as for pci_register_driver 783 * Returns are the same as for pci_register_driver
784 */ 784 */
785 785
786int __ide_pci_register_driver(struct pci_driver *driver, struct module *module) 786int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
787 const char *mod_name)
787{ 788{
788 if(!pre_init) 789 if(!pre_init)
789 return __pci_register_driver(driver, module); 790 return __pci_register_driver(driver, module, mod_name);
790 driver->driver.owner = module; 791 driver->driver.owner = module;
791 list_add_tail(&driver->node, &ide_pci_drivers); 792 list_add_tail(&driver->node, &ide_pci_drivers);
792 return 0; 793 return 0;
@@ -862,6 +863,6 @@ void __init ide_scan_pcibus (int scan_direction)
862 { 863 {
863 list_del(l); 864 list_del(l);
864 d = list_entry(l, struct pci_driver, node); 865 d = list_entry(l, struct pci_driver, node);
865 __pci_register_driver(d, d->driver.owner); 866 __pci_register_driver(d, d->driver.owner, d->driver.mod_name);
866 } 867 }
867} 868}
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index af939796750d..d2bb5a9a303f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -360,8 +360,7 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
360 if (event == NETEVENT_NEIGH_UPDATE) { 360 if (event == NETEVENT_NEIGH_UPDATE) {
361 struct neighbour *neigh = ctx; 361 struct neighbour *neigh = ctx;
362 362
363 if (neigh->dev->type == ARPHRD_INFINIBAND && 363 if (neigh->nud_state & NUD_VALID) {
364 (neigh->nud_state & NUD_VALID)) {
365 set_timeout(jiffies); 364 set_timeout(jiffies);
366 } 365 }
367 } 366 }
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed141ebd1c8..13efd4170349 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -642,7 +642,8 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
643} 643}
644 644
645static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, 645static void build_smp_wc(struct ib_qp *qp,
646 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
646 struct ib_wc *wc) 647 struct ib_wc *wc)
647{ 648{
648 memset(wc, 0, sizeof *wc); 649 memset(wc, 0, sizeof *wc);
@@ -652,7 +653,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
652 wc->pkey_index = pkey_index; 653 wc->pkey_index = pkey_index;
653 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 654 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
654 wc->src_qp = IB_QP0; 655 wc->src_qp = IB_QP0;
655 wc->qp_num = IB_QP0; 656 wc->qp = qp;
656 wc->slid = slid; 657 wc->slid = slid;
657 wc->sl = 0; 658 wc->sl = 0;
658 wc->dlid_path_bits = 0; 659 wc->dlid_path_bits = 0;
@@ -713,7 +714,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
713 goto out; 714 goto out;
714 } 715 }
715 716
716 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), 717 build_smp_wc(mad_agent_priv->agent.qp,
718 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
717 send_wr->wr.ud.pkey_index, 719 send_wr->wr.ud.pkey_index,
718 send_wr->wr.ud.port_num, &mad_wc); 720 send_wr->wr.ud.port_num, &mad_wc);
719 721
@@ -2355,7 +2357,8 @@ static void local_completions(struct work_struct *work)
2355 * Defined behavior is to complete response 2357 * Defined behavior is to complete response
2356 * before request 2358 * before request
2357 */ 2359 */
2358 build_smp_wc((unsigned long) local->mad_send_wr, 2360 build_smp_wc(recv_mad_agent->agent.qp,
2361 (unsigned long) local->mad_send_wr,
2359 be16_to_cpu(IB_LID_PERMISSIVE), 2362 be16_to_cpu(IB_LID_PERMISSIVE),
2360 0, recv_mad_agent->agent.port_num, &wc); 2363 0, recv_mad_agent->agent.port_num, &wc);
2361 2364
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 743247ec065e..df1efbc10882 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -933,7 +933,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
933 resp->wc[i].vendor_err = wc[i].vendor_err; 933 resp->wc[i].vendor_err = wc[i].vendor_err;
934 resp->wc[i].byte_len = wc[i].byte_len; 934 resp->wc[i].byte_len = wc[i].byte_len;
935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; 935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
936 resp->wc[i].qp_num = wc[i].qp_num; 936 resp->wc[i].qp_num = wc[i].qp->qp_num;
937 resp->wc[i].src_qp = wc[i].src_qp; 937 resp->wc[i].src_qp = wc[i].src_qp;
938 resp->wc[i].wc_flags = wc[i].wc_flags; 938 resp->wc[i].wc_flags = wc[i].wc_flags;
939 resp->wc[i].pkey_index = wc[i].pkey_index; 939 resp->wc[i].pkey_index = wc[i].pkey_index;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 05c9154d46f4..5175c99ee586 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -153,7 +153,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
153 153
154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce)); 154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
155 entry->wr_id = ce->hdr.context; 155 entry->wr_id = ce->hdr.context;
156 entry->qp_num = ce->handle; 156 entry->qp = &qp->ibqp;
157 entry->wc_flags = 0; 157 entry->wc_flags = 0;
158 entry->slid = 0; 158 entry->slid = 0;
159 entry->sl = 0; 159 entry->sl = 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1c722032319c..cf95ee474b0f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -119,13 +119,14 @@ struct ehca_qp {
119 struct ipz_qp_handle ipz_qp_handle; 119 struct ipz_qp_handle ipz_qp_handle;
120 struct ehca_pfqp pf; 120 struct ehca_pfqp pf;
121 struct ib_qp_init_attr init_attr; 121 struct ib_qp_init_attr init_attr;
122 u64 uspace_squeue;
123 u64 uspace_rqueue;
124 u64 uspace_fwh;
125 struct ehca_cq *send_cq; 122 struct ehca_cq *send_cq;
126 struct ehca_cq *recv_cq; 123 struct ehca_cq *recv_cq;
127 unsigned int sqerr_purgeflag; 124 unsigned int sqerr_purgeflag;
128 struct hlist_node list_entries; 125 struct hlist_node list_entries;
126 /* mmap counter for resources mapped into user space */
127 u32 mm_count_squeue;
128 u32 mm_count_rqueue;
129 u32 mm_count_galpa;
129}; 130};
130 131
131/* must be power of 2 */ 132/* must be power of 2 */
@@ -142,13 +143,14 @@ struct ehca_cq {
142 struct ipz_cq_handle ipz_cq_handle; 143 struct ipz_cq_handle ipz_cq_handle;
143 struct ehca_pfcq pf; 144 struct ehca_pfcq pf;
144 spinlock_t cb_lock; 145 spinlock_t cb_lock;
145 u64 uspace_queue;
146 u64 uspace_fwh;
147 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 146 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
148 struct list_head entry; 147 struct list_head entry;
149 u32 nr_callbacks; 148 u32 nr_callbacks;
150 spinlock_t task_lock; 149 spinlock_t task_lock;
151 u32 ownpid; 150 u32 ownpid;
151 /* mmap counter for resources mapped into user space */
152 u32 mm_count_queue;
153 u32 mm_count_galpa;
152}; 154};
153 155
154enum ehca_mr_flag { 156enum ehca_mr_flag {
@@ -248,20 +250,6 @@ struct ehca_ucontext {
248 struct ib_ucontext ib_ucontext; 250 struct ib_ucontext ib_ucontext;
249}; 251};
250 252
251struct ehca_module *ehca_module_new(void);
252
253int ehca_module_delete(struct ehca_module *me);
254
255int ehca_eq_ctor(struct ehca_eq *eq);
256
257int ehca_eq_dtor(struct ehca_eq *eq);
258
259struct ehca_shca *ehca_shca_new(void);
260
261int ehca_shca_delete(struct ehca_shca *me);
262
263struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
264
265int ehca_init_pd_cache(void); 253int ehca_init_pd_cache(void);
266void ehca_cleanup_pd_cache(void); 254void ehca_cleanup_pd_cache(void);
267int ehca_init_cq_cache(void); 255int ehca_init_cq_cache(void);
@@ -283,7 +271,6 @@ extern int ehca_port_act_time;
283extern int ehca_use_hp_mr; 271extern int ehca_use_hp_mr;
284 272
285struct ipzu_queue_resp { 273struct ipzu_queue_resp {
286 u64 queue; /* points to first queue entry */
287 u32 qe_size; /* queue entry size */ 274 u32 qe_size; /* queue entry size */
288 u32 act_nr_of_sg; 275 u32 act_nr_of_sg;
289 u32 queue_length; /* queue length allocated in bytes */ 276 u32 queue_length; /* queue length allocated in bytes */
@@ -296,7 +283,6 @@ struct ehca_create_cq_resp {
296 u32 cq_number; 283 u32 cq_number;
297 u32 token; 284 u32 token;
298 struct ipzu_queue_resp ipz_queue; 285 struct ipzu_queue_resp ipz_queue;
299 struct h_galpas galpas;
300}; 286};
301 287
302struct ehca_create_qp_resp { 288struct ehca_create_qp_resp {
@@ -309,7 +295,6 @@ struct ehca_create_qp_resp {
309 u32 dummy; /* padding for 8 byte alignment */ 295 u32 dummy; /* padding for 8 byte alignment */
310 struct ipzu_queue_resp ipz_squeue; 296 struct ipzu_queue_resp ipz_squeue;
311 struct ipzu_queue_resp ipz_rqueue; 297 struct ipzu_queue_resp ipz_rqueue;
312 struct h_galpas galpas;
313}; 298};
314 299
315struct ehca_alloc_cq_parms { 300struct ehca_alloc_cq_parms {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6074c897f51c..9291a86ca053 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
267 if (context) { 267 if (context) {
268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue; 268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
269 struct ehca_create_cq_resp resp; 269 struct ehca_create_cq_resp resp;
270 struct vm_area_struct *vma;
271 memset(&resp, 0, sizeof(resp)); 270 memset(&resp, 0, sizeof(resp));
272 resp.cq_number = my_cq->cq_number; 271 resp.cq_number = my_cq->cq_number;
273 resp.token = my_cq->token; 272 resp.token = my_cq->token;
@@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
276 resp.ipz_queue.queue_length = ipz_queue->queue_length; 275 resp.ipz_queue.queue_length = ipz_queue->queue_length;
277 resp.ipz_queue.pagesize = ipz_queue->pagesize; 276 resp.ipz_queue.pagesize = ipz_queue->pagesize;
278 resp.ipz_queue.toggle_state = ipz_queue->toggle_state; 277 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
279 ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
280 ipz_queue->queue_length,
281 (void**)&resp.ipz_queue.queue,
282 &vma);
283 if (ret) {
284 ehca_err(device, "Could not mmap queue pages");
285 cq = ERR_PTR(ret);
286 goto create_cq_exit4;
287 }
288 my_cq->uspace_queue = resp.ipz_queue.queue;
289 resp.galpas = my_cq->galpas;
290 ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
291 (void**)&resp.galpas.kernel.fw_handle,
292 &vma);
293 if (ret) {
294 ehca_err(device, "Could not mmap fw_handle");
295 cq = ERR_PTR(ret);
296 goto create_cq_exit5;
297 }
298 my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
299 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 278 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
300 ehca_err(device, "Copy to udata failed."); 279 ehca_err(device, "Copy to udata failed.");
301 goto create_cq_exit6; 280 goto create_cq_exit4;
302 } 281 }
303 } 282 }
304 283
305 return cq; 284 return cq;
306 285
307create_cq_exit6:
308 ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
309
310create_cq_exit5:
311 ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
312
313create_cq_exit4: 286create_cq_exit4:
314 ipz_queue_dtor(&my_cq->ipz_queue); 287 ipz_queue_dtor(&my_cq->ipz_queue);
315 288
@@ -333,7 +306,6 @@ create_cq_exit1:
333int ehca_destroy_cq(struct ib_cq *cq) 306int ehca_destroy_cq(struct ib_cq *cq)
334{ 307{
335 u64 h_ret; 308 u64 h_ret;
336 int ret;
337 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 309 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
338 int cq_num = my_cq->cq_number; 310 int cq_num = my_cq->cq_number;
339 struct ib_device *device = cq->device; 311 struct ib_device *device = cq->device;
@@ -343,6 +315,20 @@ int ehca_destroy_cq(struct ib_cq *cq)
343 u32 cur_pid = current->tgid; 315 u32 cur_pid = current->tgid;
344 unsigned long flags; 316 unsigned long flags;
345 317
318 if (cq->uobject) {
319 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
320 ehca_err(device, "Resources still referenced in "
321 "user space cq_num=%x", my_cq->cq_number);
322 return -EINVAL;
323 }
324 if (my_cq->ownpid != cur_pid) {
325 ehca_err(device, "Invalid caller pid=%x ownpid=%x "
326 "cq_num=%x",
327 cur_pid, my_cq->ownpid, my_cq->cq_number);
328 return -EINVAL;
329 }
330 }
331
346 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 332 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 while (my_cq->nr_callbacks) { 333 while (my_cq->nr_callbacks) {
348 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 334 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -353,25 +339,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
353 idr_remove(&ehca_cq_idr, my_cq->token); 339 idr_remove(&ehca_cq_idr, my_cq->token);
354 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 340 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
355 341
356 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
357 ehca_err(device, "Invalid caller pid=%x ownpid=%x",
358 cur_pid, my_cq->ownpid);
359 return -EINVAL;
360 }
361
362 /* un-mmap if vma alloc */
363 if (my_cq->uspace_queue ) {
364 ret = ehca_munmap(my_cq->uspace_queue,
365 my_cq->ipz_queue.queue_length);
366 if (ret)
367 ehca_err(device, "Could not munmap queue ehca_cq=%p "
368 "cq_num=%x", my_cq, cq_num);
369 ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
370 if (ret)
371 ehca_err(device, "Could not munmap fwh ehca_cq=%p "
372 "cq_num=%x", my_cq, cq_num);
373 }
374
375 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 342 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
376 if (h_ret == H_R_STATE) { 343 if (h_ret == H_R_STATE) {
377 /* cq in err: read err data and destroy it forcibly */ 344 /* cq in err: read err data and destroy it forcibly */
@@ -400,7 +367,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
400 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 367 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
401 u32 cur_pid = current->tgid; 368 u32 cur_pid = current->tgid;
402 369
403 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { 370 if (cq->uobject && my_cq->ownpid != cur_pid) {
404 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", 371 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
405 cur_pid, my_cq->ownpid); 372 cur_pid, my_cq->ownpid);
406 return -EINVAL; 373 return -EINVAL;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index cd7789f0d08e..95fd59fb4528 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -171,14 +171,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
171 171
172void ehca_poll_eqs(unsigned long data); 172void ehca_poll_eqs(unsigned long data);
173 173
174int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
175 struct vm_area_struct **vma);
176
177int ehca_mmap_register(u64 physical,void **mapped,
178 struct vm_area_struct **vma);
179
180int ehca_munmap(unsigned long addr, size_t len);
181
182#ifdef CONFIG_PPC_64K_PAGES 174#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(gfp_t flags); 175void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 176void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 6574fbbaead5..1155bcf48212 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0019"); 55MODULE_VERSION("SVNEHCA_0020");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -288,7 +288,7 @@ int ehca_init_device(struct ehca_shca *shca)
288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); 288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
289 shca->ib_device.owner = THIS_MODULE; 289 shca->ib_device.owner = THIS_MODULE;
290 290
291 shca->ib_device.uverbs_abi_ver = 5; 291 shca->ib_device.uverbs_abi_ver = 6;
292 shca->ib_device.uverbs_cmd_mask = 292 shca->ib_device.uverbs_cmd_mask =
293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
790 int ret; 790 int ret;
791 791
792 printk(KERN_INFO "eHCA Infiniband Device Driver " 792 printk(KERN_INFO "eHCA Infiniband Device Driver "
793 "(Rel.: SVNEHCA_0019)\n"); 793 "(Rel.: SVNEHCA_0020)\n");
794 idr_init(&ehca_qp_idr); 794 idr_init(&ehca_qp_idr);
795 idr_init(&ehca_cq_idr); 795 idr_init(&ehca_cq_idr);
796 spin_lock_init(&ehca_qp_idr_lock); 796 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 34b85556d01e..95efef921f1d 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -637,7 +637,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue; 637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue; 638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
639 struct ehca_create_qp_resp resp; 639 struct ehca_create_qp_resp resp;
640 struct vm_area_struct * vma;
641 memset(&resp, 0, sizeof(resp)); 640 memset(&resp, 0, sizeof(resp));
642 641
643 resp.qp_num = my_qp->real_qp_num; 642 resp.qp_num = my_qp->real_qp_num;
@@ -651,59 +650,21 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
651 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; 650 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
652 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; 651 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
653 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state; 652 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
654 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
655 ipz_rqueue->queue_length,
656 (void**)&resp.ipz_rqueue.queue,
657 &vma);
658 if (ret) {
659 ehca_err(pd->device, "Could not mmap rqueue pages");
660 goto create_qp_exit3;
661 }
662 my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
663 /* squeue properties */ 653 /* squeue properties */
664 resp.ipz_squeue.qe_size = ipz_squeue->qe_size; 654 resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
665 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg; 655 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
666 resp.ipz_squeue.queue_length = ipz_squeue->queue_length; 656 resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
667 resp.ipz_squeue.pagesize = ipz_squeue->pagesize; 657 resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
668 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state; 658 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
669 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
670 ipz_squeue->queue_length,
671 (void**)&resp.ipz_squeue.queue,
672 &vma);
673 if (ret) {
674 ehca_err(pd->device, "Could not mmap squeue pages");
675 goto create_qp_exit4;
676 }
677 my_qp->uspace_squeue = resp.ipz_squeue.queue;
678 /* fw_handle */
679 resp.galpas = my_qp->galpas;
680 ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
681 (void**)&resp.galpas.kernel.fw_handle,
682 &vma);
683 if (ret) {
684 ehca_err(pd->device, "Could not mmap fw_handle");
685 goto create_qp_exit5;
686 }
687 my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
688
689 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 659 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
690 ehca_err(pd->device, "Copy to udata failed"); 660 ehca_err(pd->device, "Copy to udata failed");
691 ret = -EINVAL; 661 ret = -EINVAL;
692 goto create_qp_exit6; 662 goto create_qp_exit3;
693 } 663 }
694 } 664 }
695 665
696 return &my_qp->ib_qp; 666 return &my_qp->ib_qp;
697 667
698create_qp_exit6:
699 ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
700
701create_qp_exit5:
702 ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
703
704create_qp_exit4:
705 ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
706
707create_qp_exit3: 668create_qp_exit3:
708 ipz_queue_dtor(&my_qp->ipz_rqueue); 669 ipz_queue_dtor(&my_qp->ipz_rqueue);
709 ipz_queue_dtor(&my_qp->ipz_squeue); 670 ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -931,7 +892,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
931 my_qp->qp_type == IB_QPT_SMI) && 892 my_qp->qp_type == IB_QPT_SMI) &&
932 statetrans == IB_QPST_SQE2RTS) { 893 statetrans == IB_QPST_SQE2RTS) {
933 /* mark next free wqe if kernel */ 894 /* mark next free wqe if kernel */
934 if (my_qp->uspace_squeue == 0) { 895 if (!ibqp->uobject) {
935 struct ehca_wqe *wqe; 896 struct ehca_wqe *wqe;
936 /* lock send queue */ 897 /* lock send queue */
937 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); 898 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -1417,11 +1378,18 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1417 enum ib_qp_type qp_type; 1378 enum ib_qp_type qp_type;
1418 unsigned long flags; 1379 unsigned long flags;
1419 1380
1420 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && 1381 if (ibqp->uobject) {
1421 my_pd->ownpid != cur_pid) { 1382 if (my_qp->mm_count_galpa ||
1422 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", 1383 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
1423 cur_pid, my_pd->ownpid); 1384 ehca_err(ibqp->device, "Resources still referenced in "
1424 return -EINVAL; 1385 "user space qp_num=%x", ibqp->qp_num);
1386 return -EINVAL;
1387 }
1388 if (my_pd->ownpid != cur_pid) {
1389 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
1390 cur_pid, my_pd->ownpid);
1391 return -EINVAL;
1392 }
1425 } 1393 }
1426 1394
1427 if (my_qp->send_cq) { 1395 if (my_qp->send_cq) {
@@ -1439,24 +1407,6 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1439 idr_remove(&ehca_qp_idr, my_qp->token); 1407 idr_remove(&ehca_qp_idr, my_qp->token);
1440 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 1408 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1441 1409
1442 /* un-mmap if vma alloc */
1443 if (my_qp->uspace_rqueue) {
1444 ret = ehca_munmap(my_qp->uspace_rqueue,
1445 my_qp->ipz_rqueue.queue_length);
1446 if (ret)
1447 ehca_err(ibqp->device, "Could not munmap rqueue "
1448 "qp_num=%x", qp_num);
1449 ret = ehca_munmap(my_qp->uspace_squeue,
1450 my_qp->ipz_squeue.queue_length);
1451 if (ret)
1452 ehca_err(ibqp->device, "Could not munmap squeue "
1453 "qp_num=%x", qp_num);
1454 ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
1455 if (ret)
1456 ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
1457 qp_num);
1458 }
1459
1460 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1410 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1461 if (h_ret != H_SUCCESS) { 1411 if (h_ret != H_SUCCESS) {
1462 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " 1412 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index b46bda1bf85d..08d3f892d9f3 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -579,7 +579,7 @@ poll_cq_one_read_cqe:
579 } else 579 } else
580 wc->status = IB_WC_SUCCESS; 580 wc->status = IB_WC_SUCCESS;
581 581
582 wc->qp_num = cqe->local_qp_number; 582 wc->qp = NULL;
583 wc->byte_len = cqe->nr_bytes_transferred; 583 wc->byte_len = cqe->nr_bytes_transferred;
584 wc->pkey_index = cqe->pkey_index; 584 wc->pkey_index = cqe->pkey_index;
585 wc->slid = cqe->rlid; 585 wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e08764e4aef2..73db920b6945 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -68,105 +68,183 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
68 return 0; 68 return 0;
69} 69}
70 70
71struct page *ehca_nopage(struct vm_area_struct *vma, 71static void ehca_mm_open(struct vm_area_struct *vma)
72 unsigned long address, int *type)
73{ 72{
74 struct page *mypage = NULL; 73 u32 *count = (u32*)vma->vm_private_data;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 74 if (!count) {
76 u32 idr_handle = fileoffset >> 32; 75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ 76 vma->vm_start, vma->vm_end);
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 77 return;
79 u32 cur_pid = current->tgid; 78 }
80 unsigned long flags; 79 (*count)++;
81 struct ehca_cq *cq; 80 if (!(*count))
82 struct ehca_qp *qp; 81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
83 struct ehca_pd *pd; 82 vma->vm_start, vma->vm_end);
84 u64 offset; 83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
85 void *vaddr; 84 vma->vm_start, vma->vm_end, *count);
85}
86 86
87 switch (q_type) { 87static void ehca_mm_close(struct vm_area_struct *vma)
88 case 1: /* CQ */ 88{
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 89 u32 *count = (u32*)vma->vm_private_data;
90 cq = idr_find(&ehca_cq_idr, idr_handle); 90 if (!count) {
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
92 99
93 /* make sure this mmap really belongs to the authorized user */ 100static struct vm_operations_struct vm_ops = {
94 if (!cq) { 101 .open = ehca_mm_open,
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); 102 .close = ehca_mm_close,
96 return NOPAGE_SIGBUS; 103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize != EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121 ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
122 vsize, vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_RESERVED;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%x", ret);
150 return ret;
97 } 151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
98 157
99 if (cq->ownpid != cur_pid) { 158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 1: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
100 ehca_err(cq->ib_cq.device, 171 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x", 172 "ehca_mmap_fw() failed rc=%x cq_num=%x",
102 cur_pid, cq->ownpid); 173 ret, cq->cq_number);
103 return NOPAGE_SIGBUS; 174 return ret;
104 } 175 }
176 break;
105 177
106 if (rsrc_type == 2) { 178 case 2: /* cq queue_addr */
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
108 offset = address - vma->vm_start; 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); 181 if (unlikely(ret)) {
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", 182 ehca_err(cq->ib_cq.device,
111 offset, vaddr); 183 "ehca_mmap_queue() failed rc=%x cq_num=%x",
112 mypage = virt_to_page(vaddr); 184 ret, cq->cq_number);
185 return ret;
113 } 186 }
114 break; 187 break;
115 188
116 case 2: /* QP */ 189 default:
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags); 190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
118 qp = idr_find(&ehca_qp_idr, idr_handle); 191 rsrc_type, cq->cq_number);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 192 return -EINVAL;
193 }
120 194
121 /* make sure this mmap really belongs to the authorized user */ 195 return 0;
122 if (!qp) { 196}
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); 197
124 return NOPAGE_SIGBUS; 198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 1: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%x qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
125 } 212 }
213 break;
126 214
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); 215 case 2: /* qp rqueue_addr */
128 if (pd->ownpid != cur_pid) { 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217 qp->ib_qp.qp_num);
218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
129 ehca_err(qp->ib_qp.device, 220 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x", 221 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
131 cur_pid, pd->ownpid); 222 ret, qp->ib_qp.qp_num);
132 return NOPAGE_SIGBUS; 223 return ret;
133 } 224 }
225 break;
134 226
135 if (rsrc_type == 2) { /* rqueue */ 227 case 3: /* qp squeue_addr */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
137 offset = address - vma->vm_start; 229 qp->ib_qp.qp_num);
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); 230 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", 231 if (unlikely(ret)) {
140 offset, vaddr); 232 ehca_err(qp->ib_qp.device,
141 mypage = virt_to_page(vaddr); 233 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
142 } else if (rsrc_type == 3) { /* squeue */ 234 ret, qp->ib_qp.qp_num);
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); 235 return ret;
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147 offset, vaddr);
148 mypage = virt_to_page(vaddr);
149 } 236 }
150 break; 237 break;
151 238
152 default: 239 default:
153 ehca_gen_err("bad queue type %x", q_type); 240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
154 return NOPAGE_SIGBUS; 241 rsrc_type, qp->ib_qp.qp_num);
155 } 242 return -EINVAL;
156
157 if (!mypage) {
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
160 } 243 }
161 get_page(mypage);
162 244
163 return mypage; 245 return 0;
164} 246}
165 247
166static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
168};
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171{ 249{
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 250 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
@@ -175,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid; 254 u32 cur_pid = current->tgid;
177 u32 ret; 255 u32 ret;
178 u64 vsize, physical;
179 unsigned long flags; 256 unsigned long flags;
180 struct ehca_cq *cq; 257 struct ehca_cq *cq;
181 struct ehca_qp *qp; 258 struct ehca_qp *qp;
@@ -201,44 +278,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 278 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202 return -EINVAL; 279 return -EINVAL;
203 280
204 switch (rsrc_type) { 281 ret = ehca_mmap_cq(vma, cq, rsrc_type);
205 case 1: /* galpa fw handle */ 282 if (unlikely(ret)) {
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); 283 ehca_err(cq->ib_cq.device,
207 vma->vm_flags |= VM_RESERVED; 284 "ehca_mmap_cq() failed rc=%x cq_num=%x",
208 vsize = vma->vm_end - vma->vm_start; 285 ret, cq->cq_number);
209 if (vsize != EHCA_PAGESIZE) { 286 return ret;
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
212 return -EINVAL;
213 }
214
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
218
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
223 vma->vm_page_prot);
224 if (ret) {
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
227 ret);
228 return -ENOMEM;
229 }
230 break;
231
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
236 break;
237
238 default:
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
240 rsrc_type);
241 return -EINVAL;
242 } 287 }
243 break; 288 break;
244 289
@@ -262,50 +307,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) 307 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263 return -EINVAL; 308 return -EINVAL;
264 309
265 switch (rsrc_type) { 310 ret = ehca_mmap_qp(vma, qp, rsrc_type);
266 case 1: /* galpa fw handle */ 311 if (unlikely(ret)) {
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); 312 ehca_err(qp->ib_qp.device,
268 vma->vm_flags |= VM_RESERVED; 313 "ehca_mmap_qp() failed rc=%x qp_num=%x",
269 vsize = vma->vm_end - vma->vm_start; 314 ret, qp->ib_qp.qp_num);
270 if (vsize != EHCA_PAGESIZE) { 315 return ret;
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
273 return -EINVAL;
274 }
275
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
279
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281 vsize, physical);
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
284 vma->vm_page_prot);
285 if (ret) {
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
288 ret);
289 return -ENOMEM;
290 }
291 break;
292
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
297 break;
298
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
303 break;
304
305 default:
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
307 rsrc_type);
308 return -EINVAL;
309 } 316 }
310 break; 317 break;
311 318
@@ -316,77 +323,3 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
316 323
317 return 0; 324 return 0;
318} 325}
319
320int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
322{
323 down_write(&current->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
326 foffset);
327 up_write(&current->mm->mmap_sem);
328 if (!(*mapped)) {
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330 foffset, length);
331 return -EINVAL;
332 }
333
334 *vma = find_vma(current->mm, (u64)*mapped);
335 if (!(*vma)) {
336 down_write(&current->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(&current->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340 return -EINVAL;
341 }
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
344
345 return 0;
346}
347
348int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
350{
351 int ret;
352 unsigned long vsize;
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355 if (ret) {
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
357 return ret;
358 }
359
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
365 return -EINVAL;
366 }
367
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
374 if (ret) {
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376 return -ENOMEM;
377 }
378
379 return 0;
380
381}
382
383int ehca_munmap(unsigned long addr, size_t len) {
384 int ret = 0;
385 struct mm_struct *mm = current->mm;
386 if (mm) {
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);
390 }
391 return ret;
392}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 46c1c89bf6ae..64f07b19349f 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -379,7 +379,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
379 wc.vendor_err = 0; 379 wc.vendor_err = 0;
380 wc.byte_len = 0; 380 wc.byte_len = 0;
381 wc.imm_data = 0; 381 wc.imm_data = 0;
382 wc.qp_num = qp->ibqp.qp_num; 382 wc.qp = &qp->ibqp;
383 wc.src_qp = 0; 383 wc.src_qp = 0;
384 wc.wc_flags = 0; 384 wc.wc_flags = 0;
385 wc.pkey_index = 0; 385 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index ce6038743c5c..5ff20cb04494 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -702,7 +702,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
703 wc->vendor_err = 0; 703 wc->vendor_err = 0;
704 wc->byte_len = 0; 704 wc->byte_len = 0;
705 wc->qp_num = qp->ibqp.qp_num; 705 wc->qp = &qp->ibqp;
706 wc->src_qp = qp->remote_qpn; 706 wc->src_qp = qp->remote_qpn;
707 wc->pkey_index = 0; 707 wc->pkey_index = 0;
708 wc->slid = qp->remote_ah_attr.dlid; 708 wc->slid = qp->remote_ah_attr.dlid;
@@ -836,7 +836,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
837 wc.vendor_err = 0; 837 wc.vendor_err = 0;
838 wc.byte_len = wqe->length; 838 wc.byte_len = wqe->length;
839 wc.qp_num = qp->ibqp.qp_num; 839 wc.qp = &qp->ibqp;
840 wc.src_qp = qp->remote_qpn; 840 wc.src_qp = qp->remote_qpn;
841 wc.pkey_index = 0; 841 wc.pkey_index = 0;
842 wc.slid = qp->remote_ah_attr.dlid; 842 wc.slid = qp->remote_ah_attr.dlid;
@@ -951,7 +951,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
952 wc.vendor_err = 0; 952 wc.vendor_err = 0;
953 wc.byte_len = 0; 953 wc.byte_len = 0;
954 wc.qp_num = qp->ibqp.qp_num; 954 wc.qp = &qp->ibqp;
955 wc.src_qp = qp->remote_qpn; 955 wc.src_qp = qp->remote_qpn;
956 wc.pkey_index = 0; 956 wc.pkey_index = 0;
957 wc.slid = qp->remote_ah_attr.dlid; 957 wc.slid = qp->remote_ah_attr.dlid;
@@ -1511,7 +1511,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1511 wc.status = IB_WC_SUCCESS; 1511 wc.status = IB_WC_SUCCESS;
1512 wc.opcode = IB_WC_RECV; 1512 wc.opcode = IB_WC_RECV;
1513 wc.vendor_err = 0; 1513 wc.vendor_err = 0;
1514 wc.qp_num = qp->ibqp.qp_num; 1514 wc.qp = &qp->ibqp;
1515 wc.src_qp = qp->remote_qpn; 1515 wc.src_qp = qp->remote_qpn;
1516 wc.pkey_index = 0; 1516 wc.pkey_index = 0;
1517 wc.slid = qp->remote_ah_attr.dlid; 1517 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f7530512045d..e86cb171872e 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -137,7 +137,7 @@ bad_lkey:
137 wc.vendor_err = 0; 137 wc.vendor_err = 0;
138 wc.byte_len = 0; 138 wc.byte_len = 0;
139 wc.imm_data = 0; 139 wc.imm_data = 0;
140 wc.qp_num = qp->ibqp.qp_num; 140 wc.qp = &qp->ibqp;
141 wc.src_qp = 0; 141 wc.src_qp = 0;
142 wc.wc_flags = 0; 142 wc.wc_flags = 0;
143 wc.pkey_index = 0; 143 wc.pkey_index = 0;
@@ -336,7 +336,7 @@ again:
336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
337 wc.vendor_err = 0; 337 wc.vendor_err = 0;
338 wc.byte_len = 0; 338 wc.byte_len = 0;
339 wc.qp_num = sqp->ibqp.qp_num; 339 wc.qp = &sqp->ibqp;
340 wc.src_qp = sqp->remote_qpn; 340 wc.src_qp = sqp->remote_qpn;
341 wc.pkey_index = 0; 341 wc.pkey_index = 0;
342 wc.slid = sqp->remote_ah_attr.dlid; 342 wc.slid = sqp->remote_ah_attr.dlid;
@@ -426,7 +426,7 @@ again:
426 wc.status = IB_WC_SUCCESS; 426 wc.status = IB_WC_SUCCESS;
427 wc.vendor_err = 0; 427 wc.vendor_err = 0;
428 wc.byte_len = wqe->length; 428 wc.byte_len = wqe->length;
429 wc.qp_num = qp->ibqp.qp_num; 429 wc.qp = &qp->ibqp;
430 wc.src_qp = qp->remote_qpn; 430 wc.src_qp = qp->remote_qpn;
431 /* XXX do we know which pkey matched? Only needed for GSI. */ 431 /* XXX do we know which pkey matched? Only needed for GSI. */
432 wc.pkey_index = 0; 432 wc.pkey_index = 0;
@@ -447,7 +447,7 @@ send_comp:
447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
448 wc.vendor_err = 0; 448 wc.vendor_err = 0;
449 wc.byte_len = wqe->length; 449 wc.byte_len = wqe->length;
450 wc.qp_num = sqp->ibqp.qp_num; 450 wc.qp = &sqp->ibqp;
451 wc.src_qp = 0; 451 wc.src_qp = 0;
452 wc.pkey_index = 0; 452 wc.pkey_index = 0;
453 wc.slid = 0; 453 wc.slid = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index e636cfd67a82..325d6634ff53 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -49,7 +49,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
50 wc->vendor_err = 0; 50 wc->vendor_err = 0;
51 wc->byte_len = wqe->length; 51 wc->byte_len = wqe->length;
52 wc->qp_num = qp->ibqp.qp_num; 52 wc->qp = &qp->ibqp;
53 wc->src_qp = qp->remote_qpn; 53 wc->src_qp = qp->remote_qpn;
54 wc->pkey_index = 0; 54 wc->pkey_index = 0;
55 wc->slid = qp->remote_ah_attr.dlid; 55 wc->slid = qp->remote_ah_attr.dlid;
@@ -411,7 +411,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
411 wc.status = IB_WC_SUCCESS; 411 wc.status = IB_WC_SUCCESS;
412 wc.opcode = IB_WC_RECV; 412 wc.opcode = IB_WC_RECV;
413 wc.vendor_err = 0; 413 wc.vendor_err = 0;
414 wc.qp_num = qp->ibqp.qp_num; 414 wc.qp = &qp->ibqp;
415 wc.src_qp = qp->remote_qpn; 415 wc.src_qp = qp->remote_qpn;
416 wc.pkey_index = 0; 416 wc.pkey_index = 0;
417 wc.slid = qp->remote_ah_attr.dlid; 417 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 49f1102af8b3..9a3e54664ee4 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -66,7 +66,7 @@ bad_lkey:
66 wc.vendor_err = 0; 66 wc.vendor_err = 0;
67 wc.byte_len = 0; 67 wc.byte_len = 0;
68 wc.imm_data = 0; 68 wc.imm_data = 0;
69 wc.qp_num = qp->ibqp.qp_num; 69 wc.qp = &qp->ibqp;
70 wc.src_qp = 0; 70 wc.src_qp = 0;
71 wc.wc_flags = 0; 71 wc.wc_flags = 0;
72 wc.pkey_index = 0; 72 wc.pkey_index = 0;
@@ -255,7 +255,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
255 wc->status = IB_WC_SUCCESS; 255 wc->status = IB_WC_SUCCESS;
256 wc->opcode = IB_WC_RECV; 256 wc->opcode = IB_WC_RECV;
257 wc->vendor_err = 0; 257 wc->vendor_err = 0;
258 wc->qp_num = qp->ibqp.qp_num; 258 wc->qp = &qp->ibqp;
259 wc->src_qp = sqp->ibqp.qp_num; 259 wc->src_qp = sqp->ibqp.qp_num;
260 /* XXX do we know which pkey matched? Only needed for GSI. */ 260 /* XXX do we know which pkey matched? Only needed for GSI. */
261 wc->pkey_index = 0; 261 wc->pkey_index = 0;
@@ -474,7 +474,7 @@ done:
474 wc.vendor_err = 0; 474 wc.vendor_err = 0;
475 wc.opcode = IB_WC_SEND; 475 wc.opcode = IB_WC_SEND;
476 wc.byte_len = len; 476 wc.byte_len = len;
477 wc.qp_num = qp->ibqp.qp_num; 477 wc.qp = &qp->ibqp;
478 wc.src_qp = 0; 478 wc.src_qp = 0;
479 wc.wc_flags = 0; 479 wc.wc_flags = 0;
480 /* XXX initialize other fields? */ 480 /* XXX initialize other fields? */
@@ -651,7 +651,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
651 wc.status = IB_WC_SUCCESS; 651 wc.status = IB_WC_SUCCESS;
652 wc.opcode = IB_WC_RECV; 652 wc.opcode = IB_WC_RECV;
653 wc.vendor_err = 0; 653 wc.vendor_err = 0;
654 wc.qp_num = qp->ibqp.qp_num; 654 wc.qp = &qp->ibqp;
655 wc.src_qp = src_qp; 655 wc.src_qp = src_qp;
656 /* XXX do we know which pkey matched? Only needed for GSI. */ 656 /* XXX do we know which pkey matched? Only needed for GSI. */
657 wc.pkey_index = 0; 657 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 768df7265b81..968d1519761c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1854,7 +1854,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1854 1854
1855 memset(inbox + 256, 0, 256); 1855 memset(inbox + 256, 0, 256);
1856 1856
1857 MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1857 MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
1859 1859
1860 val = in_wc->sl << 4; 1860 val = in_wc->sl << 4;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1159c8a0f2c5..efd79ef109a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -534,7 +534,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
534 } 534 }
535 } 535 }
536 536
537 entry->qp_num = (*cur_qp)->qpn; 537 entry->qp = &(*cur_qp)->ibqp;
538 538
539 if (is_send) { 539 if (is_send) {
540 wq = &(*cur_qp)->sq; 540 wq = &(*cur_qp)->sq;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 705eb1d0e554..af5ee2ec4499 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -958,16 +958,17 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
958 return netdev_priv(dev); 958 return netdev_priv(dev);
959} 959}
960 960
961static ssize_t show_pkey(struct class_device *cdev, char *buf) 961static ssize_t show_pkey(struct device *dev,
962 struct device_attribute *attr, char *buf)
962{ 963{
963 struct ipoib_dev_priv *priv = 964 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
964 netdev_priv(container_of(cdev, struct net_device, class_dev));
965 965
966 return sprintf(buf, "0x%04x\n", priv->pkey); 966 return sprintf(buf, "0x%04x\n", priv->pkey);
967} 967}
968static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 968static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
969 969
970static ssize_t create_child(struct class_device *cdev, 970static ssize_t create_child(struct device *dev,
971 struct device_attribute *attr,
971 const char *buf, size_t count) 972 const char *buf, size_t count)
972{ 973{
973 int pkey; 974 int pkey;
@@ -985,14 +986,14 @@ static ssize_t create_child(struct class_device *cdev,
985 */ 986 */
986 pkey |= 0x8000; 987 pkey |= 0x8000;
987 988
988 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 989 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
989 pkey);
990 990
991 return ret ? ret : count; 991 return ret ? ret : count;
992} 992}
993static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 993static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
994 994
995static ssize_t delete_child(struct class_device *cdev, 995static ssize_t delete_child(struct device *dev,
996 struct device_attribute *attr,
996 const char *buf, size_t count) 997 const char *buf, size_t count)
997{ 998{
998 int pkey; 999 int pkey;
@@ -1004,18 +1005,16 @@ static ssize_t delete_child(struct class_device *cdev,
1004 if (pkey < 0 || pkey > 0xffff) 1005 if (pkey < 0 || pkey > 0xffff)
1005 return -EINVAL; 1006 return -EINVAL;
1006 1007
1007 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev), 1008 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1008 pkey);
1009 1009
1010 return ret ? ret : count; 1010 return ret ? ret : count;
1011 1011
1012} 1012}
1013static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 1013static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1014 1014
1015int ipoib_add_pkey_attr(struct net_device *dev) 1015int ipoib_add_pkey_attr(struct net_device *dev)
1016{ 1016{
1017 return class_device_create_file(&dev->class_dev, 1017 return device_create_file(&dev->dev, &dev_attr_pkey);
1018 &class_device_attr_pkey);
1019} 1018}
1020 1019
1021static struct net_device *ipoib_add_port(const char *format, 1020static struct net_device *ipoib_add_port(const char *format,
@@ -1083,11 +1082,9 @@ static struct net_device *ipoib_add_port(const char *format,
1083 1082
1084 if (ipoib_add_pkey_attr(priv->dev)) 1083 if (ipoib_add_pkey_attr(priv->dev))
1085 goto sysfs_failed; 1084 goto sysfs_failed;
1086 if (class_device_create_file(&priv->dev->class_dev, 1085 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1087 &class_device_attr_create_child))
1088 goto sysfs_failed; 1086 goto sysfs_failed;
1089 if (class_device_create_file(&priv->dev->class_dev, 1087 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1090 &class_device_attr_delete_child))
1091 goto sysfs_failed; 1088 goto sysfs_failed;
1092 1089
1093 return priv->dev; 1090 return priv->dev;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index f887780e8093..085eafe6667c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -42,15 +42,15 @@
42 42
43#include "ipoib.h" 43#include "ipoib.h"
44 44
45static ssize_t show_parent(struct class_device *class_dev, char *buf) 45static ssize_t show_parent(struct device *d, struct device_attribute *attr,
46 char *buf)
46{ 47{
47 struct net_device *dev = 48 struct net_device *dev = to_net_dev(d);
48 container_of(class_dev, struct net_device, class_dev);
49 struct ipoib_dev_priv *priv = netdev_priv(dev); 49 struct ipoib_dev_priv *priv = netdev_priv(dev);
50 50
51 return sprintf(buf, "%s\n", priv->parent->name); 51 return sprintf(buf, "%s\n", priv->parent->name);
52} 52}
53static CLASS_DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL); 53static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
54 54
55int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) 55int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
56{ 56{
@@ -118,8 +118,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
118 if (ipoib_add_pkey_attr(priv->dev)) 118 if (ipoib_add_pkey_attr(priv->dev))
119 goto sysfs_failed; 119 goto sysfs_failed;
120 120
121 if (class_device_create_file(&priv->dev->class_dev, 121 if (device_create_file(&priv->dev->dev, &dev_attr_parent))
122 &class_device_attr_parent))
123 goto sysfs_failed; 122 goto sysfs_failed;
124 123
125 list_add_tail(&priv->list, &ppriv->child_intfs); 124 list_add_tail(&priv->list, &ppriv->child_intfs);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 72611fd15103..5e8ac577f0ad 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -548,6 +548,7 @@ static int srp_reconnect_target(struct srp_target_port *target)
548 target->tx_head = 0; 548 target->tx_head = 0;
549 target->tx_tail = 0; 549 target->tx_tail = 0;
550 550
551 target->qp_in_error = 0;
551 ret = srp_connect_target(target); 552 ret = srp_connect_target(target);
552 if (ret) 553 if (ret)
553 goto err; 554 goto err;
@@ -878,6 +879,7 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
878 printk(KERN_ERR PFX "failed %s status %d\n", 879 printk(KERN_ERR PFX "failed %s status %d\n",
879 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 880 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
880 wc.status); 881 wc.status);
882 target->qp_in_error = 1;
881 break; 883 break;
882 } 884 }
883 885
@@ -1337,6 +1339,8 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1337 1339
1338 printk(KERN_ERR "SRP abort called\n"); 1340 printk(KERN_ERR "SRP abort called\n");
1339 1341
1342 if (target->qp_in_error)
1343 return FAILED;
1340 if (srp_find_req(target, scmnd, &req)) 1344 if (srp_find_req(target, scmnd, &req))
1341 return FAILED; 1345 return FAILED;
1342 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1346 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
@@ -1365,6 +1369,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1365 1369
1366 printk(KERN_ERR "SRP reset_device called\n"); 1370 printk(KERN_ERR "SRP reset_device called\n");
1367 1371
1372 if (target->qp_in_error)
1373 return FAILED;
1368 if (srp_find_req(target, scmnd, &req)) 1374 if (srp_find_req(target, scmnd, &req))
1369 return FAILED; 1375 return FAILED;
1370 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1376 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
@@ -1801,6 +1807,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1801 goto err_free; 1807 goto err_free;
1802 } 1808 }
1803 1809
1810 target->qp_in_error = 0;
1804 ret = srp_connect_target(target); 1811 ret = srp_connect_target(target);
1805 if (ret) { 1812 if (ret) {
1806 printk(KERN_ERR PFX "Connection failed\n"); 1813 printk(KERN_ERR PFX "Connection failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index c21772317b86..2f3319c719a5 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -158,6 +158,7 @@ struct srp_target_port {
158 struct completion done; 158 struct completion done;
159 int status; 159 int status;
160 enum srp_target_state state; 160 enum srp_target_state state;
161 int qp_in_error;
161}; 162};
162 163
163struct srp_iu { 164struct srp_iu {
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index f0ce822c1028..17c8c63cbe1a 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -45,7 +45,7 @@ EXPORT_SYMBOL(serio_interrupt);
45EXPORT_SYMBOL(__serio_register_port); 45EXPORT_SYMBOL(__serio_register_port);
46EXPORT_SYMBOL(serio_unregister_port); 46EXPORT_SYMBOL(serio_unregister_port);
47EXPORT_SYMBOL(serio_unregister_child_port); 47EXPORT_SYMBOL(serio_unregister_child_port);
48EXPORT_SYMBOL(serio_register_driver); 48EXPORT_SYMBOL(__serio_register_driver);
49EXPORT_SYMBOL(serio_unregister_driver); 49EXPORT_SYMBOL(serio_unregister_driver);
50EXPORT_SYMBOL(serio_open); 50EXPORT_SYMBOL(serio_open);
51EXPORT_SYMBOL(serio_close); 51EXPORT_SYMBOL(serio_close);
@@ -789,12 +789,14 @@ static void serio_attach_driver(struct serio_driver *drv)
789 drv->driver.name, error); 789 drv->driver.name, error);
790} 790}
791 791
792int serio_register_driver(struct serio_driver *drv) 792int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
793{ 793{
794 int manual_bind = drv->manual_bind; 794 int manual_bind = drv->manual_bind;
795 int error; 795 int error;
796 796
797 drv->driver.bus = &serio_bus; 797 drv->driver.bus = &serio_bus;
798 drv->driver.owner = owner;
799 drv->driver.mod_name = mod_name;
798 800
799 /* 801 /*
800 * Temporarily disable automatic binding because probing 802 * Temporarily disable automatic binding because probing
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 4363a915b1f4..3daf049a288a 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -75,7 +75,6 @@ static const struct usb_device_id zc0301_id_table[] = { \
75 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \ 75 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
76 { ZC0301_USB_DEVICE(0x055f, 0xd003, 0xff), }, /* TAS5130 */ \ 76 { ZC0301_USB_DEVICE(0x055f, 0xd003, 0xff), }, /* TAS5130 */ \
77 { ZC0301_USB_DEVICE(0x055f, 0xd004, 0xff), }, /* TAS5130 */ \ 77 { ZC0301_USB_DEVICE(0x055f, 0xd004, 0xff), }, /* TAS5130 */ \
78 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
79 { ZC0301_USB_DEVICE(0x0ac8, 0x0301, 0xff), }, \ 78 { ZC0301_USB_DEVICE(0x0ac8, 0x0301, 0xff), }, \
80 { ZC0301_USB_DEVICE(0x0ac8, 0x301b, 0xff), }, /* PB-0330/HV7131 */ \ 79 { ZC0301_USB_DEVICE(0x0ac8, 0x301b, 0xff), }, /* PB-0330/HV7131 */ \
81 { ZC0301_USB_DEVICE(0x0ac8, 0x303b, 0xff), }, /* PB-0330 */ \ 80 { ZC0301_USB_DEVICE(0x0ac8, 0x303b, 0xff), }, /* PB-0330 */ \
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 00db31c314e0..89bba277da5f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -69,6 +69,25 @@ config TIFM_7XX1
69 To compile this driver as a module, choose M here: the module will 69 To compile this driver as a module, choose M here: the module will
70 be called tifm_7xx1. 70 be called tifm_7xx1.
71 71
72config ASUS_LAPTOP
73 tristate "Asus Laptop Extras (EXPERIMENTAL)"
74 depends on X86
75 depends on ACPI
76 depends on EXPERIMENTAL && !ACPI_ASUS
77 depends on LEDS_CLASS
78 depends on BACKLIGHT_CLASS_DEVICE
79 ---help---
80 This is the new Linux driver for Asus laptops. It may also support some
81 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
82 standard ACPI events that go through /proc/acpi/events. It also adds
83 support for video output switching, LCD backlight control, Bluetooth and
84 Wlan control, and most importantly, allows you to blink those fancy LEDs.
85
86 For more information and a userspace daemon for handling the extra
87 buttons see <http://acpi4asus.sf.net/>.
88
89 If you have an ACPI-compatible ASUS laptop, say Y or M here.
90
72config MSI_LAPTOP 91config MSI_LAPTOP
73 tristate "MSI Laptop Extras" 92 tristate "MSI Laptop Extras"
74 depends on X86 93 depends on X86
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c9e98ab021c5..35da53c409c0 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -6,6 +6,7 @@ obj- := misc.o # Dummy rule to force built-in.o to be made
6obj-$(CONFIG_IBM_ASM) += ibmasm/ 6obj-$(CONFIG_IBM_ASM) += ibmasm/
7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o 8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
9obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
9obj-$(CONFIG_LKDTM) += lkdtm.o 10obj-$(CONFIG_LKDTM) += lkdtm.o
10obj-$(CONFIG_TIFM_CORE) += tifm_core.o 11obj-$(CONFIG_TIFM_CORE) += tifm_core.o
11obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 12obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
new file mode 100644
index 000000000000..861c39935f99
--- /dev/null
+++ b/drivers/misc/asus-laptop.c
@@ -0,0 +1,1165 @@
1/*
2 * asus-laptop.c - Asus Laptop Support
3 *
4 *
5 * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
6 * Copyright (C) 2006 Corentin Chary
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 *
23 * The development page for this driver is located at
24 * http://sourceforge.net/projects/acpi4asus/
25 *
26 * Credits:
27 * Pontus Fuchs - Helper functions, cleanup
28 * Johann Wiesner - Small compile fixes
29 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
30 * Eric Burghard - LED display support for W1N
31 * Josh Green - Light Sens support
32 * Thomas Tuttle - His first patch for led support was very helpfull
33 *
34 */
35
36#include <linux/autoconf.h>
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/init.h>
40#include <linux/types.h>
41#include <linux/err.h>
42#include <linux/proc_fs.h>
43#include <linux/backlight.h>
44#include <linux/fb.h>
45#include <linux/leds.h>
46#include <linux/platform_device.h>
47#include <acpi/acpi_drivers.h>
48#include <acpi/acpi_bus.h>
49#include <asm/uaccess.h>
50
51#define ASUS_LAPTOP_VERSION "0.40"
52
53#define ASUS_HOTK_NAME "Asus Laptop Support"
54#define ASUS_HOTK_CLASS "hotkey"
55#define ASUS_HOTK_DEVICE_NAME "Hotkey"
56#define ASUS_HOTK_HID "ATK0100"
57#define ASUS_HOTK_FILE "asus-laptop"
58#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
59
60/*
61 * Some events we use, same for all Asus
62 */
63#define ATKD_BR_UP 0x10
64#define ATKD_BR_DOWN 0x20
65#define ATKD_LCD_ON 0x33
66#define ATKD_LCD_OFF 0x34
67
68/*
69 * Known bits returned by \_SB.ATKD.HWRS
70 */
71#define WL_HWRS 0x80
72#define BT_HWRS 0x100
73
74/*
75 * Flags for hotk status
76 * WL_ON and BT_ON are also used for wireless_status()
77 */
78#define WL_ON 0x01 //internal Wifi
79#define BT_ON 0x02 //internal Bluetooth
80#define MLED_ON 0x04 //mail LED
81#define TLED_ON 0x08 //touchpad LED
82#define RLED_ON 0x10 //Record LED
83#define PLED_ON 0x20 //Phone LED
84#define LCD_ON 0x40 //LCD backlight
85
86#define ASUS_LOG ASUS_HOTK_FILE ": "
87#define ASUS_ERR KERN_ERR ASUS_LOG
88#define ASUS_WARNING KERN_WARNING ASUS_LOG
89#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
90#define ASUS_INFO KERN_INFO ASUS_LOG
91#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
92
93MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
94MODULE_DESCRIPTION(ASUS_HOTK_NAME);
95MODULE_LICENSE("GPL");
96
97#define ASUS_HANDLE(object, paths...) \
98 static acpi_handle object##_handle = NULL; \
99 static char *object##_paths[] = { paths }
100
101/* LED */
102ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED");
103ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED");
104ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */
105ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */
106
107/* LEDD */
108ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM");
109
110/* Bluetooth and WLAN
111 * WLED and BLED are not handled like other XLED, because in some dsdt
112 * they also control the WLAN/Bluetooth device.
113 */
114ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED");
115ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED");
116ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */
117
118/* Brightness */
119ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV");
120ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV");
121
122/* Backlight */
123ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
124 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
125 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
126 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
127 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
128 "\\_SB.PCI0.PX40.Q10", /* S1x */
129 "\\Q10"); /* A2x, L2D, L3D, M2E */
130
131/* Display */
132ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
133ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
134 M6A M6V VX-1 V6J V6V W3Z */
135 "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
136 S5A M5A z33A W1Jc W2V */
137 "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
138 "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
139 "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
140 "\\_SB.PCI0.VGA.GETD", /* Z96F */
141 "\\ACTD", /* A2D */
142 "\\ADVG", /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
143 "\\DNXT", /* P30 */
144 "\\INFB", /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
145 "\\SSTE"); /* A3F A6F A3N A3L M6N W3N W6A */
146
147ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
148ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
149
150/*
151 * This is the main structure, we can use it to store anything interesting
152 * about the hotk device
153 */
154struct asus_hotk {
155 char *name; //laptop name
156 struct acpi_device *device; //the device we are in
157 acpi_handle handle; //the handle of the hotk device
158 char status; //status of the hotk, for LEDs, ...
159 u32 ledd_status; //status of the LED display
160 u8 light_level; //light sensor level
161 u8 light_switch; //light sensor switch value
162 u16 event_count[128]; //count for each event TODO make this better
163};
164
165/*
166 * This header is made available to allow proper configuration given model,
167 * revision number , ... this info cannot go in struct asus_hotk because it is
168 * available before the hotk
169 */
170static struct acpi_table_header *asus_info;
171
172/* The actual device the driver binds to */
173static struct asus_hotk *hotk;
174
175/*
176 * The hotkey driver declaration
177 */
178static int asus_hotk_add(struct acpi_device *device);
179static int asus_hotk_remove(struct acpi_device *device, int type);
180static struct acpi_driver asus_hotk_driver = {
181 .name = ASUS_HOTK_NAME,
182 .class = ASUS_HOTK_CLASS,
183 .ids = ASUS_HOTK_HID,
184 .ops = {
185 .add = asus_hotk_add,
186 .remove = asus_hotk_remove,
187 },
188};
189
190/* The backlight device /sys/class/backlight */
191static struct backlight_device *asus_backlight_device;
192
193/*
194 * The backlight class declaration
195 */
196static int read_brightness(struct backlight_device *bd);
197static int update_bl_status(struct backlight_device *bd);
198static struct backlight_properties asusbl_data = {
199 .owner = THIS_MODULE,
200 .get_brightness = read_brightness,
201 .update_status = update_bl_status,
202 .max_brightness = 15,
203};
204
205/* These functions actually update the LED's, and are called from a
206 * workqueue. By doing this as separate work rather than when the LED
207 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
208 * potentially bad time, such as a timer interrupt. */
209static struct workqueue_struct *led_workqueue;
210
211#define ASUS_LED(object, ledname) \
212 static void object##_led_set(struct led_classdev *led_cdev, \
213 enum led_brightness value); \
214 static void object##_led_update(struct work_struct *ignored); \
215 static int object##_led_wk; \
216 DECLARE_WORK(object##_led_work, object##_led_update); \
217 static struct led_classdev object##_led = { \
218 .name = "asus:" ledname, \
219 .brightness_set = object##_led_set, \
220 }
221
222ASUS_LED(mled, "mail");
223ASUS_LED(tled, "touchpad");
224ASUS_LED(rled, "record");
225ASUS_LED(pled, "phone");
226
227/*
228 * This function evaluates an ACPI method, given an int as parameter, the
229 * method is searched within the scope of the handle, can be NULL. The output
230 * of the method is written is output, which can also be NULL
231 *
232 * returns 1 if write is successful, 0 else.
233 */
234static int write_acpi_int(acpi_handle handle, const char *method, int val,
235 struct acpi_buffer *output)
236{
237 struct acpi_object_list params; //list of input parameters (an int here)
238 union acpi_object in_obj; //the only param we use
239 acpi_status status;
240
241 params.count = 1;
242 params.pointer = &in_obj;
243 in_obj.type = ACPI_TYPE_INTEGER;
244 in_obj.integer.value = val;
245
246 status = acpi_evaluate_object(handle, (char *)method, &params, output);
247 return (status == AE_OK);
248}
249
250static int read_acpi_int(acpi_handle handle, const char *method, int *val,
251 struct acpi_object_list *params)
252{
253 struct acpi_buffer output;
254 union acpi_object out_obj;
255 acpi_status status;
256
257 output.length = sizeof(out_obj);
258 output.pointer = &out_obj;
259
260 status = acpi_evaluate_object(handle, (char *)method, params, &output);
261 *val = out_obj.integer.value;
262 return (status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER);
263}
264
265static int read_wireless_status(int mask)
266{
267 int status;
268
269 if (!wireless_status_handle)
270 return (hotk->status & mask) ? 1 : 0;
271
272 if (read_acpi_int(wireless_status_handle, NULL, &status, NULL)) {
273 return (status & mask) ? 1 : 0;
274 } else
275 printk(ASUS_WARNING "Error reading Wireless status\n");
276
277 return (hotk->status & mask) ? 1 : 0;
278}
279
280/* Generic LED functions */
281static int read_status(int mask)
282{
283 /* There is a special method for both wireless devices */
284 if (mask == BT_ON || mask == WL_ON)
285 return read_wireless_status(mask);
286
287 return (hotk->status & mask) ? 1 : 0;
288}
289
290static void write_status(acpi_handle handle, int out, int mask, int invert)
291{
292 hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask);
293
294 if (invert) /* invert target value */
295 out = !out & 0x1;
296
297 if (handle && !write_acpi_int(handle, NULL, out, NULL))
298 printk(ASUS_WARNING " write failed\n");
299}
300
301/* /sys/class/led handlers */
302#define ASUS_LED_HANDLER(object, mask, invert) \
303 static void object##_led_set(struct led_classdev *led_cdev, \
304 enum led_brightness value) \
305 { \
306 object##_led_wk = value; \
307 queue_work(led_workqueue, &object##_led_work); \
308 } \
309 static void object##_led_update(struct work_struct *ignored) \
310 { \
311 int value = object##_led_wk; \
312 write_status(object##_set_handle, value, (mask), (invert)); \
313 }
314
315ASUS_LED_HANDLER(mled, MLED_ON, 1);
316ASUS_LED_HANDLER(pled, PLED_ON, 0);
317ASUS_LED_HANDLER(rled, RLED_ON, 0);
318ASUS_LED_HANDLER(tled, TLED_ON, 0);
319
320static int get_lcd_state(void)
321{
322 return read_status(LCD_ON);
323}
324
325static int set_lcd_state(int value)
326{
327 int lcd = 0;
328 acpi_status status = 0;
329
330 lcd = value ? 1 : 0;
331
332 if (lcd == get_lcd_state())
333 return 0;
334
335 if (lcd_switch_handle) {
336 status = acpi_evaluate_object(lcd_switch_handle,
337 NULL, NULL, NULL);
338
339 if (ACPI_FAILURE(status))
340 printk(ASUS_WARNING "Error switching LCD\n");
341 }
342
343 write_status(NULL, lcd, LCD_ON, 0);
344 return 0;
345}
346
347static void lcd_blank(int blank)
348{
349 struct backlight_device *bd = asus_backlight_device;
350
351 if (bd) {
352 down(&bd->sem);
353 if (likely(bd->props)) {
354 bd->props->power = blank;
355 if (likely(bd->props->update_status))
356 bd->props->update_status(bd);
357 }
358 up(&bd->sem);
359 }
360}
361
362static int read_brightness(struct backlight_device *bd)
363{
364 int value;
365
366 if (!read_acpi_int(brightness_get_handle, NULL, &value, NULL))
367 printk(ASUS_WARNING "Error reading brightness\n");
368
369 return value;
370}
371
372static int set_brightness(struct backlight_device *bd, int value)
373{
374 int ret = 0;
375
376 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
377 /* 0 <= value <= 15 */
378
379 if (!write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
380 printk(ASUS_WARNING "Error changing brightness\n");
381 ret = -EIO;
382 }
383
384 return ret;
385}
386
387static int update_bl_status(struct backlight_device *bd)
388{
389 int rv;
390 int value = bd->props->brightness;
391
392 rv = set_brightness(bd, value);
393 if (rv)
394 return rv;
395
396 value = (bd->props->power == FB_BLANK_UNBLANK) ? 1 : 0;
397 return set_lcd_state(value);
398}
399
400/*
401 * Platform device handlers
402 */
403
404/*
405 * We write our info in page, we begin at offset off and cannot write more
406 * than count bytes. We set eof to 1 if we handle those 2 values. We return the
407 * number of bytes written in page
408 */
409static ssize_t show_infos(struct device *dev,
410 struct device_attribute *attr, char *page)
411{
412 int len = 0;
413 int temp;
414 char buf[16]; //enough for all info
415 /*
416 * We use the easy way, we don't care of off and count, so we don't set eof
417 * to 1
418 */
419
420 len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n");
421 len += sprintf(page + len, "Model reference : %s\n", hotk->name);
422 /*
423 * The SFUN method probably allows the original driver to get the list
424 * of features supported by a given model. For now, 0x0100 or 0x0800
425 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
426 * The significance of others is yet to be found.
427 */
428 if (read_acpi_int(hotk->handle, "SFUN", &temp, NULL))
429 len +=
430 sprintf(page + len, "SFUN value : 0x%04x\n", temp);
431 /*
432 * Another value for userspace: the ASYM method returns 0x02 for
433 * battery low and 0x04 for battery critical, its readings tend to be
434 * more accurate than those provided by _BST.
435 * Note: since not all the laptops provide this method, errors are
436 * silently ignored.
437 */
438 if (read_acpi_int(hotk->handle, "ASYM", &temp, NULL))
439 len +=
440 sprintf(page + len, "ASYM value : 0x%04x\n", temp);
441 if (asus_info) {
442 snprintf(buf, 16, "%d", asus_info->length);
443 len += sprintf(page + len, "DSDT length : %s\n", buf);
444 snprintf(buf, 16, "%d", asus_info->checksum);
445 len += sprintf(page + len, "DSDT checksum : %s\n", buf);
446 snprintf(buf, 16, "%d", asus_info->revision);
447 len += sprintf(page + len, "DSDT revision : %s\n", buf);
448 snprintf(buf, 7, "%s", asus_info->oem_id);
449 len += sprintf(page + len, "OEM id : %s\n", buf);
450 snprintf(buf, 9, "%s", asus_info->oem_table_id);
451 len += sprintf(page + len, "OEM table id : %s\n", buf);
452 snprintf(buf, 16, "%x", asus_info->oem_revision);
453 len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
454 snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
455 len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
456 snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
457 len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
458 }
459
460 return len;
461}
462
463static int parse_arg(const char *buf, unsigned long count, int *val)
464{
465 if (!count)
466 return 0;
467 if (count > 31)
468 return -EINVAL;
469 if (sscanf(buf, "%i", val) != 1)
470 return -EINVAL;
471 return count;
472}
473
474static ssize_t store_status(const char *buf, size_t count,
475 acpi_handle handle, int mask, int invert)
476{
477 int rv, value;
478 int out = 0;
479
480 rv = parse_arg(buf, count, &value);
481 if (rv > 0)
482 out = value ? 1 : 0;
483
484 write_status(handle, out, mask, invert);
485
486 return rv;
487}
488
489/*
490 * LEDD display
491 */
492static ssize_t show_ledd(struct device *dev,
493 struct device_attribute *attr, char *buf)
494{
495 return sprintf(buf, "0x%08x\n", hotk->ledd_status);
496}
497
498static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
499 const char *buf, size_t count)
500{
501 int rv, value;
502
503 rv = parse_arg(buf, count, &value);
504 if (rv > 0) {
505 if (!write_acpi_int(ledd_set_handle, NULL, value, NULL))
506 printk(ASUS_WARNING "LED display write failed\n");
507 else
508 hotk->ledd_status = (u32) value;
509 }
510 return rv;
511}
512
513/*
514 * WLAN
515 */
516static ssize_t show_wlan(struct device *dev,
517 struct device_attribute *attr, char *buf)
518{
519 return sprintf(buf, "%d\n", read_status(WL_ON));
520}
521
522static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
523 const char *buf, size_t count)
524{
525 return store_status(buf, count, wl_switch_handle, WL_ON, 0);
526}
527
528/*
529 * Bluetooth
530 */
531static ssize_t show_bluetooth(struct device *dev,
532 struct device_attribute *attr, char *buf)
533{
534 return sprintf(buf, "%d\n", read_status(BT_ON));
535}
536
537static ssize_t store_bluetooth(struct device *dev,
538 struct device_attribute *attr, const char *buf,
539 size_t count)
540{
541 return store_status(buf, count, bt_switch_handle, BT_ON, 0);
542}
543
544/*
545 * Display
546 */
547static void set_display(int value)
548{
549 /* no sanity check needed for now */
550 if (!write_acpi_int(display_set_handle, NULL, value, NULL))
551 printk(ASUS_WARNING "Error setting display\n");
552 return;
553}
554
555static int read_display(void)
556{
557 int value = 0;
558
559 /* In most of the case, we know how to set the display, but sometime
560 we can't read it */
561 if (display_get_handle) {
562 if (!read_acpi_int(display_get_handle, NULL, &value, NULL))
563 printk(ASUS_WARNING "Error reading display status\n");
564 }
565
566 value &= 0x0F; /* needed for some models, shouldn't hurt others */
567
568 return value;
569}
570
571/*
572 * Now, *this* one could be more user-friendly, but so far, no-one has
573 * complained. The significance of bits is the same as in store_disp()
574 */
575static ssize_t show_disp(struct device *dev,
576 struct device_attribute *attr, char *buf)
577{
578 return sprintf(buf, "%d\n", read_display());
579}
580
581/*
582 * Experimental support for display switching. As of now: 1 should activate
583 * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI.
584 * Any combination (bitwise) of these will suffice. I never actually tested 4
585 * displays hooked up simultaneously, so be warned. See the acpi4asus README
586 * for more info.
587 */
588static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 int rv, value;
592
593 rv = parse_arg(buf, count, &value);
594 if (rv > 0)
595 set_display(value);
596 return rv;
597}
598
599/*
600 * Light Sens
601 */
602static void set_light_sens_switch(int value)
603{
604 if (!write_acpi_int(ls_switch_handle, NULL, value, NULL))
605 printk(ASUS_WARNING "Error setting light sensor switch\n");
606 hotk->light_switch = value;
607}
608
609static ssize_t show_lssw(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 return sprintf(buf, "%d\n", hotk->light_switch);
613}
614
615static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
616 const char *buf, size_t count)
617{
618 int rv, value;
619
620 rv = parse_arg(buf, count, &value);
621 if (rv > 0)
622 set_light_sens_switch(value ? 1 : 0);
623
624 return rv;
625}
626
627static void set_light_sens_level(int value)
628{
629 if (!write_acpi_int(ls_level_handle, NULL, value, NULL))
630 printk(ASUS_WARNING "Error setting light sensor level\n");
631 hotk->light_level = value;
632}
633
634static ssize_t show_lslvl(struct device *dev,
635 struct device_attribute *attr, char *buf)
636{
637 return sprintf(buf, "%d\n", hotk->light_level);
638}
639
640static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
641 const char *buf, size_t count)
642{
643 int rv, value;
644
645 rv = parse_arg(buf, count, &value);
646 if (rv > 0) {
647 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
648 /* 0 <= value <= 15 */
649 set_light_sens_level(value);
650 }
651
652 return rv;
653}
654
655static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
656{
657 /* TODO Find a better way to handle events count. */
658 if (!hotk)
659 return;
660
661 /*
662 * We need to tell the backlight device when the backlight power is
663 * switched
664 */
665 if (event == ATKD_LCD_ON) {
666 write_status(NULL, 1, LCD_ON, 0);
667 lcd_blank(FB_BLANK_UNBLANK);
668 } else if (event == ATKD_LCD_OFF) {
669 write_status(NULL, 0, LCD_ON, 0);
670 lcd_blank(FB_BLANK_POWERDOWN);
671 }
672
673 acpi_bus_generate_event(hotk->device, event,
674 hotk->event_count[event % 128]++);
675
676 return;
677}
678
679#define ASUS_CREATE_DEVICE_ATTR(_name) \
680 struct device_attribute dev_attr_##_name = { \
681 .attr = { \
682 .name = __stringify(_name), \
683 .mode = 0, \
684 .owner = THIS_MODULE }, \
685 .show = NULL, \
686 .store = NULL, \
687 }
688
689#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \
690 do { \
691 dev_attr_##_name.attr.mode = _mode; \
692 dev_attr_##_name.show = _show; \
693 dev_attr_##_name.store = _store; \
694 } while(0)
695
696static ASUS_CREATE_DEVICE_ATTR(infos);
697static ASUS_CREATE_DEVICE_ATTR(wlan);
698static ASUS_CREATE_DEVICE_ATTR(bluetooth);
699static ASUS_CREATE_DEVICE_ATTR(display);
700static ASUS_CREATE_DEVICE_ATTR(ledd);
701static ASUS_CREATE_DEVICE_ATTR(ls_switch);
702static ASUS_CREATE_DEVICE_ATTR(ls_level);
703
704static struct attribute *asuspf_attributes[] = {
705 &dev_attr_infos.attr,
706 &dev_attr_wlan.attr,
707 &dev_attr_bluetooth.attr,
708 &dev_attr_display.attr,
709 &dev_attr_ledd.attr,
710 &dev_attr_ls_switch.attr,
711 &dev_attr_ls_level.attr,
712 NULL
713};
714
715static struct attribute_group asuspf_attribute_group = {
716 .attrs = asuspf_attributes
717};
718
719static struct platform_driver asuspf_driver = {
720 .driver = {
721 .name = ASUS_HOTK_FILE,
722 .owner = THIS_MODULE,
723 }
724};
725
726static struct platform_device *asuspf_device;
727
728static void asus_hotk_add_fs(void)
729{
730 ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL);
731
732 if (wl_switch_handle)
733 ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan);
734
735 if (bt_switch_handle)
736 ASUS_SET_DEVICE_ATTR(bluetooth, 0644,
737 show_bluetooth, store_bluetooth);
738
739 if (display_set_handle && display_get_handle)
740 ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp);
741 else if (display_set_handle)
742 ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
743
744 if (ledd_set_handle)
745 ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd);
746
747 if (ls_switch_handle && ls_level_handle) {
748 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
749 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
750 }
751}
752
753static int asus_handle_init(char *name, acpi_handle * handle,
754 char **paths, int num_paths)
755{
756 int i;
757 acpi_status status;
758
759 for (i = 0; i < num_paths; i++) {
760 status = acpi_get_handle(NULL, paths[i], handle);
761 if (ACPI_SUCCESS(status))
762 return 0;
763 }
764
765 *handle = NULL;
766 return -ENODEV;
767}
768
769#define ASUS_HANDLE_INIT(object) \
770 asus_handle_init(#object, &object##_handle, object##_paths, \
771 ARRAY_SIZE(object##_paths))
772
773/*
774 * This function is used to initialize the hotk with right values. In this
775 * method, we can make all the detection we want, and modify the hotk struct
776 */
777static int asus_hotk_get_info(void)
778{
779 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
780 union acpi_object *model = NULL;
781 int bsts_result, hwrs_result;
782 char *string = NULL;
783 acpi_status status;
784
785 /*
786 * Get DSDT headers early enough to allow for differentiating between
787 * models, but late enough to allow acpi_bus_register_driver() to fail
788 * before doing anything ACPI-specific. Should we encounter a machine,
789 * which needs special handling (i.e. its hotkey device has a different
790 * HID), this bit will be moved. A global variable asus_info contains
791 * the DSDT header.
792 */
793 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
794 if (ACPI_FAILURE(status))
795 printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
796
797 /* We have to write 0 on init this far for all ASUS models */
798 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
799 printk(ASUS_ERR "Hotkey initialization failed\n");
800 return -ENODEV;
801 }
802
803 /* This needs to be called for some laptops to init properly */
804 if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result, NULL))
805 printk(ASUS_WARNING "Error calling BSTS\n");
806 else if (bsts_result)
807 printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n",
808 bsts_result);
809
810 /*
811 * Try to match the object returned by INIT to the specific model.
812 * Handle every possible object (or the lack of thereof) the DSDT
813 * writers might throw at us. When in trouble, we pass NULL to
814 * asus_model_match() and try something completely different.
815 */
816 if (buffer.pointer) {
817 model = buffer.pointer;
818 switch (model->type) {
819 case ACPI_TYPE_STRING:
820 string = model->string.pointer;
821 break;
822 case ACPI_TYPE_BUFFER:
823 string = model->buffer.pointer;
824 break;
825 default:
826 string = "";
827 break;
828 }
829 }
830 hotk->name = kstrdup(string, GFP_KERNEL);
831 if (!hotk->name)
832 return -ENOMEM;
833
834 if (*string)
835 printk(ASUS_NOTICE " %s model detected\n", string);
836
837 ASUS_HANDLE_INIT(mled_set);
838 ASUS_HANDLE_INIT(tled_set);
839 ASUS_HANDLE_INIT(rled_set);
840 ASUS_HANDLE_INIT(pled_set);
841
842 ASUS_HANDLE_INIT(ledd_set);
843
844 /*
845 * The HWRS method return informations about the hardware.
846 * 0x80 bit is for WLAN, 0x100 for Bluetooth.
847 * The significance of others is yet to be found.
848 * If we don't find the method, we assume the device are present.
849 */
850 if (!read_acpi_int(hotk->handle, "HRWS", &hwrs_result, NULL))
851 hwrs_result = WL_HWRS | BT_HWRS;
852
853 if (hwrs_result & WL_HWRS)
854 ASUS_HANDLE_INIT(wl_switch);
855 if (hwrs_result & BT_HWRS)
856 ASUS_HANDLE_INIT(bt_switch);
857
858 ASUS_HANDLE_INIT(wireless_status);
859
860 ASUS_HANDLE_INIT(brightness_set);
861 ASUS_HANDLE_INIT(brightness_get);
862
863 ASUS_HANDLE_INIT(lcd_switch);
864
865 ASUS_HANDLE_INIT(display_set);
866 ASUS_HANDLE_INIT(display_get);
867
868 /* There is a lot of models with "ALSL", but a few get
869 a real light sens, so we need to check it. */
870 if (ASUS_HANDLE_INIT(ls_switch))
871 ASUS_HANDLE_INIT(ls_level);
872
873 kfree(model);
874
875 return AE_OK;
876}
877
878static int asus_hotk_check(void)
879{
880 int result = 0;
881
882 result = acpi_bus_get_status(hotk->device);
883 if (result)
884 return result;
885
886 if (hotk->device->status.present) {
887 result = asus_hotk_get_info();
888 } else {
889 printk(ASUS_ERR "Hotkey device not present, aborting\n");
890 return -EINVAL;
891 }
892
893 return result;
894}
895
896static int asus_hotk_found;
897
898static int asus_hotk_add(struct acpi_device *device)
899{
900 acpi_status status = AE_OK;
901 int result;
902
903 if (!device)
904 return -EINVAL;
905
906 printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
907 ASUS_LAPTOP_VERSION);
908
909 hotk = kmalloc(sizeof(struct asus_hotk), GFP_KERNEL);
910 if (!hotk)
911 return -ENOMEM;
912 memset(hotk, 0, sizeof(struct asus_hotk));
913
914 hotk->handle = device->handle;
915 strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
916 strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
917 acpi_driver_data(device) = hotk;
918 hotk->device = device;
919
920 result = asus_hotk_check();
921 if (result)
922 goto end;
923
924 asus_hotk_add_fs();
925
926 /*
927 * We install the handler, it will receive the hotk in parameter, so, we
928 * could add other data to the hotk struct
929 */
930 status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
931 asus_hotk_notify, hotk);
932 if (ACPI_FAILURE(status))
933 printk(ASUS_ERR "Error installing notify handler\n");
934
935 asus_hotk_found = 1;
936
937 /* WLED and BLED are on by default */
938 write_status(bt_switch_handle, 1, BT_ON, 0);
939 write_status(wl_switch_handle, 1, WL_ON, 0);
940
941 /* LCD Backlight is on by default */
942 write_status(NULL, 1, LCD_ON, 0);
943
944 /* LED display is off by default */
945 hotk->ledd_status = 0xFFF;
946
947 /* Set initial values of light sensor and level */
948 hotk->light_switch = 1; /* Default to light sensor disabled */
949 hotk->light_level = 0; /* level 5 for sensor sensitivity */
950
951 if (ls_switch_handle)
952 set_light_sens_switch(hotk->light_switch);
953
954 if (ls_level_handle)
955 set_light_sens_level(hotk->light_level);
956
957 end:
958 if (result) {
959 kfree(hotk->name);
960 kfree(hotk);
961 }
962
963 return result;
964}
965
966static int asus_hotk_remove(struct acpi_device *device, int type)
967{
968 acpi_status status = 0;
969
970 if (!device || !acpi_driver_data(device))
971 return -EINVAL;
972
973 status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
974 asus_hotk_notify);
975 if (ACPI_FAILURE(status))
976 printk(ASUS_ERR "Error removing notify handler\n");
977
978 kfree(hotk->name);
979 kfree(hotk);
980
981 return 0;
982}
983
984static void asus_backlight_exit(void)
985{
986 if (asus_backlight_device)
987 backlight_device_unregister(asus_backlight_device);
988}
989
990#define ASUS_LED_UNREGISTER(object) \
991 if(object##_led.class_dev \
992 && !IS_ERR(object##_led.class_dev)) \
993 led_classdev_unregister(&object##_led)
994
995static void asus_led_exit(void)
996{
997 ASUS_LED_UNREGISTER(mled);
998 ASUS_LED_UNREGISTER(tled);
999 ASUS_LED_UNREGISTER(pled);
1000 ASUS_LED_UNREGISTER(rled);
1001
1002 destroy_workqueue(led_workqueue);
1003}
1004
1005static void __exit asus_laptop_exit(void)
1006{
1007 asus_backlight_exit();
1008 asus_led_exit();
1009
1010 acpi_bus_unregister_driver(&asus_hotk_driver);
1011 sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group);
1012 platform_device_unregister(asuspf_device);
1013 platform_driver_unregister(&asuspf_driver);
1014}
1015
1016static int asus_backlight_init(struct device *dev)
1017{
1018 struct backlight_device *bd;
1019
1020 if (brightness_set_handle && lcd_switch_handle) {
1021 bd = backlight_device_register(ASUS_HOTK_FILE, dev,
1022 NULL, &asusbl_data);
1023 if (IS_ERR(bd)) {
1024 printk(ASUS_ERR
1025 "Could not register asus backlight device\n");
1026 asus_backlight_device = NULL;
1027 return PTR_ERR(bd);
1028 }
1029
1030 asus_backlight_device = bd;
1031
1032 down(&bd->sem);
1033 if (likely(bd->props)) {
1034 bd->props->brightness = read_brightness(NULL);
1035 bd->props->power = FB_BLANK_UNBLANK;
1036 if (likely(bd->props->update_status))
1037 bd->props->update_status(bd);
1038 }
1039 up(&bd->sem);
1040 }
1041 return 0;
1042}
1043
1044static int asus_led_register(acpi_handle handle,
1045 struct led_classdev *ldev, struct device *dev)
1046{
1047 if (!handle)
1048 return 0;
1049
1050 return led_classdev_register(dev, ldev);
1051}
1052
1053#define ASUS_LED_REGISTER(object, device) \
1054 asus_led_register(object##_set_handle, &object##_led, device)
1055
1056static int asus_led_init(struct device *dev)
1057{
1058 int rv;
1059
1060 rv = ASUS_LED_REGISTER(mled, dev);
1061 if (rv)
1062 return rv;
1063
1064 rv = ASUS_LED_REGISTER(tled, dev);
1065 if (rv)
1066 return rv;
1067
1068 rv = ASUS_LED_REGISTER(rled, dev);
1069 if (rv)
1070 return rv;
1071
1072 rv = ASUS_LED_REGISTER(pled, dev);
1073 if (rv)
1074 return rv;
1075
1076 led_workqueue = create_singlethread_workqueue("led_workqueue");
1077 if (!led_workqueue)
1078 return -ENOMEM;
1079
1080 return 0;
1081}
1082
1083static int __init asus_laptop_init(void)
1084{
1085 struct device *dev;
1086 int result;
1087
1088 if (acpi_disabled)
1089 return -ENODEV;
1090
1091 if (!acpi_specific_hotkey_enabled) {
1092 printk(ASUS_ERR "Using generic hotkey driver\n");
1093 return -ENODEV;
1094 }
1095
1096 result = acpi_bus_register_driver(&asus_hotk_driver);
1097 if (result < 0)
1098 return result;
1099
1100 /*
1101 * This is a bit of a kludge. We only want this module loaded
1102 * for ASUS systems, but there's currently no way to probe the
1103 * ACPI namespace for ASUS HIDs. So we just return failure if
1104 * we didn't find one, which will cause the module to be
1105 * unloaded.
1106 */
1107 if (!asus_hotk_found) {
1108 acpi_bus_unregister_driver(&asus_hotk_driver);
1109 return -ENODEV;
1110 }
1111
1112 dev = acpi_get_physical_device(hotk->device->handle);
1113
1114 result = asus_backlight_init(dev);
1115 if (result)
1116 goto fail_backlight;
1117
1118 result = asus_led_init(dev);
1119 if (result)
1120 goto fail_led;
1121
1122 /* Register platform stuff */
1123 result = platform_driver_register(&asuspf_driver);
1124 if (result)
1125 goto fail_platform_driver;
1126
1127 asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
1128 if (!asuspf_device) {
1129 result = -ENOMEM;
1130 goto fail_platform_device1;
1131 }
1132
1133 result = platform_device_add(asuspf_device);
1134 if (result)
1135 goto fail_platform_device2;
1136
1137 result = sysfs_create_group(&asuspf_device->dev.kobj,
1138 &asuspf_attribute_group);
1139 if (result)
1140 goto fail_sysfs;
1141
1142 return 0;
1143
1144 fail_sysfs:
1145 platform_device_del(asuspf_device);
1146
1147 fail_platform_device2:
1148 platform_device_put(asuspf_device);
1149
1150 fail_platform_device1:
1151 platform_driver_unregister(&asuspf_driver);
1152
1153 fail_platform_driver:
1154 asus_led_exit();
1155
1156 fail_led:
1157 asus_backlight_exit();
1158
1159 fail_backlight:
1160
1161 return result;
1162}
1163
1164module_init(asus_laptop_init);
1165module_exit(asus_laptop_exit);
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 2ab7add78f94..e21e490fedb0 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -11,66 +11,25 @@
11 11
12#include <linux/tifm.h> 12#include <linux/tifm.h>
13#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
14#include <linux/freezer.h>
14 15
15#define DRIVER_NAME "tifm_7xx1" 16#define DRIVER_NAME "tifm_7xx1"
16#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
17 18
18static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) 19static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
19{ 20{
20 int cnt;
21 unsigned long flags;
22
23 spin_lock_irqsave(&fm->lock, flags);
24 if (!fm->inhibit_new_cards) {
25 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
26 if (fm->sockets[cnt] == sock) {
27 fm->remove_mask |= (1 << cnt);
28 queue_work(fm->wq, &fm->media_remover);
29 break;
30 }
31 }
32 }
33 spin_unlock_irqrestore(&fm->lock, flags);
34}
35
36static void tifm_7xx1_remove_media(struct work_struct *work)
37{
38 struct tifm_adapter *fm =
39 container_of(work, struct tifm_adapter, media_remover);
40 unsigned long flags; 21 unsigned long flags;
41 int cnt;
42 struct tifm_dev *sock;
43 22
44 if (!class_device_get(&fm->cdev))
45 return;
46 spin_lock_irqsave(&fm->lock, flags); 23 spin_lock_irqsave(&fm->lock, flags);
47 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 24 fm->socket_change_set |= 1 << sock->socket_id;
48 if (fm->sockets[cnt] && (fm->remove_mask & (1 << cnt))) { 25 wake_up_all(&fm->change_set_notify);
49 printk(KERN_INFO DRIVER_NAME
50 ": demand removing card from socket %d\n", cnt);
51 sock = fm->sockets[cnt];
52 fm->sockets[cnt] = NULL;
53 fm->remove_mask &= ~(1 << cnt);
54
55 writel(0x0e00, sock->addr + SOCK_CONTROL);
56
57 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
58 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
59 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
60 fm->addr + FM_SET_INTERRUPT_ENABLE);
61
62 spin_unlock_irqrestore(&fm->lock, flags);
63 device_unregister(&sock->dev);
64 spin_lock_irqsave(&fm->lock, flags);
65 }
66 }
67 spin_unlock_irqrestore(&fm->lock, flags); 26 spin_unlock_irqrestore(&fm->lock, flags);
68 class_device_put(&fm->cdev);
69} 27}
70 28
71static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) 29static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
72{ 30{
73 struct tifm_adapter *fm = dev_id; 31 struct tifm_adapter *fm = dev_id;
32 struct tifm_dev *sock;
74 unsigned int irq_status; 33 unsigned int irq_status;
75 unsigned int sock_irq_status, cnt; 34 unsigned int sock_irq_status, cnt;
76 35
@@ -84,42 +43,32 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
84 if (irq_status & TIFM_IRQ_ENABLE) { 43 if (irq_status & TIFM_IRQ_ENABLE) {
85 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 44 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
86 45
87 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 46 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
88 sock_irq_status = (irq_status >> cnt) & 47 sock = fm->sockets[cnt];
89 (TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK); 48 sock_irq_status = (irq_status >> cnt)
90 49 & (TIFM_IRQ_FIFOMASK(1)
91 if (fm->sockets[cnt]) { 50 | TIFM_IRQ_CARDMASK(1));
92 if (sock_irq_status &&
93 fm->sockets[cnt]->signal_irq)
94 sock_irq_status = fm->sockets[cnt]->
95 signal_irq(fm->sockets[cnt],
96 sock_irq_status);
97 51
98 if (irq_status & (1 << cnt)) 52 if (sock && sock_irq_status)
99 fm->remove_mask |= 1 << cnt; 53 sock->signal_irq(sock, sock_irq_status);
100 } else {
101 if (irq_status & (1 << cnt))
102 fm->insert_mask |= 1 << cnt;
103 }
104 } 54 }
55
56 fm->socket_change_set |= irq_status
57 & ((1 << fm->num_sockets) - 1);
105 } 58 }
106 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); 59 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
107 60
108 if (!fm->inhibit_new_cards) { 61 if (!fm->socket_change_set)
109 if (!fm->remove_mask && !fm->insert_mask) { 62 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
110 writel(TIFM_IRQ_ENABLE, 63 else
111 fm->addr + FM_SET_INTERRUPT_ENABLE); 64 wake_up_all(&fm->change_set_notify);
112 } else {
113 queue_work(fm->wq, &fm->media_remover);
114 queue_work(fm->wq, &fm->media_inserter);
115 }
116 }
117 65
118 spin_unlock(&fm->lock); 66 spin_unlock(&fm->lock);
119 return IRQ_HANDLED; 67 return IRQ_HANDLED;
120} 68}
121 69
122static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is_x2) 70static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr,
71 int is_x2)
123{ 72{
124 unsigned int s_state; 73 unsigned int s_state;
125 int cnt; 74 int cnt;
@@ -127,8 +76,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
127 writel(0x0e00, sock_addr + SOCK_CONTROL); 76 writel(0x0e00, sock_addr + SOCK_CONTROL);
128 77
129 for (cnt = 0; cnt < 100; cnt++) { 78 for (cnt = 0; cnt < 100; cnt++) {
130 if (!(TIFM_SOCK_STATE_POWERED & 79 if (!(TIFM_SOCK_STATE_POWERED
131 readl(sock_addr + SOCK_PRESENT_STATE))) 80 & readl(sock_addr + SOCK_PRESENT_STATE)))
132 break; 81 break;
133 msleep(10); 82 msleep(10);
134 } 83 }
@@ -151,8 +100,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
151 } 100 }
152 101
153 for (cnt = 0; cnt < 100; cnt++) { 102 for (cnt = 0; cnt < 100; cnt++) {
154 if ((TIFM_SOCK_STATE_POWERED & 103 if ((TIFM_SOCK_STATE_POWERED
155 readl(sock_addr + SOCK_PRESENT_STATE))) 104 & readl(sock_addr + SOCK_PRESENT_STATE)))
156 break; 105 break;
157 msleep(10); 106 msleep(10);
158 } 107 }
@@ -170,130 +119,209 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
170 return base_addr + ((sock_num + 1) << 10); 119 return base_addr + ((sock_num + 1) << 10);
171} 120}
172 121
173static void tifm_7xx1_insert_media(struct work_struct *work) 122static int tifm_7xx1_switch_media(void *data)
174{ 123{
175 struct tifm_adapter *fm = 124 struct tifm_adapter *fm = data;
176 container_of(work, struct tifm_adapter, media_inserter);
177 unsigned long flags; 125 unsigned long flags;
178 tifm_media_id media_id; 126 tifm_media_id media_id;
179 char *card_name = "xx"; 127 char *card_name = "xx";
180 int cnt, ok_to_register; 128 int cnt, rc;
181 unsigned int insert_mask; 129 struct tifm_dev *sock;
182 struct tifm_dev *new_sock = NULL; 130 unsigned int socket_change_set;
183 131
184 if (!class_device_get(&fm->cdev)) 132 while (1) {
185 return; 133 rc = wait_event_interruptible(fm->change_set_notify,
186 spin_lock_irqsave(&fm->lock, flags); 134 fm->socket_change_set);
187 insert_mask = fm->insert_mask; 135 if (rc == -ERESTARTSYS)
188 fm->insert_mask = 0; 136 try_to_freeze();
189 if (fm->inhibit_new_cards) { 137
138 spin_lock_irqsave(&fm->lock, flags);
139 socket_change_set = fm->socket_change_set;
140 fm->socket_change_set = 0;
141
142 dev_dbg(fm->dev, "checking media set %x\n",
143 socket_change_set);
144
145 if (kthread_should_stop())
146 socket_change_set = (1 << fm->num_sockets) - 1;
190 spin_unlock_irqrestore(&fm->lock, flags); 147 spin_unlock_irqrestore(&fm->lock, flags);
191 class_device_put(&fm->cdev);
192 return;
193 }
194 spin_unlock_irqrestore(&fm->lock, flags);
195 148
196 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 149 if (!socket_change_set)
197 if (!(insert_mask & (1 << cnt)))
198 continue; 150 continue;
199 151
200 media_id = tifm_7xx1_toggle_sock_power(tifm_7xx1_sock_addr(fm->addr, cnt), 152 spin_lock_irqsave(&fm->lock, flags);
201 fm->max_sockets == 2); 153 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
202 if (media_id) { 154 if (!(socket_change_set & (1 << cnt)))
203 ok_to_register = 0; 155 continue;
204 new_sock = tifm_alloc_device(fm, cnt); 156 sock = fm->sockets[cnt];
205 if (new_sock) { 157 if (sock) {
206 new_sock->addr = tifm_7xx1_sock_addr(fm->addr,
207 cnt);
208 new_sock->media_id = media_id;
209 switch (media_id) {
210 case 1:
211 card_name = "xd";
212 break;
213 case 2:
214 card_name = "ms";
215 break;
216 case 3:
217 card_name = "sd";
218 break;
219 default:
220 break;
221 }
222 snprintf(new_sock->dev.bus_id, BUS_ID_SIZE,
223 "tifm_%s%u:%u", card_name, fm->id, cnt);
224 printk(KERN_INFO DRIVER_NAME 158 printk(KERN_INFO DRIVER_NAME
225 ": %s card detected in socket %d\n", 159 ": demand removing card from socket %d\n",
226 card_name, cnt); 160 cnt);
161 fm->sockets[cnt] = NULL;
162 spin_unlock_irqrestore(&fm->lock, flags);
163 device_unregister(&sock->dev);
227 spin_lock_irqsave(&fm->lock, flags); 164 spin_lock_irqsave(&fm->lock, flags);
228 if (!fm->sockets[cnt]) { 165 writel(0x0e00,
229 fm->sockets[cnt] = new_sock; 166 tifm_7xx1_sock_addr(fm->addr, cnt)
230 ok_to_register = 1; 167 + SOCK_CONTROL);
168 }
169 if (kthread_should_stop())
170 continue;
171
172 spin_unlock_irqrestore(&fm->lock, flags);
173 media_id = tifm_7xx1_toggle_sock_power(
174 tifm_7xx1_sock_addr(fm->addr, cnt),
175 fm->num_sockets == 2);
176 if (media_id) {
177 sock = tifm_alloc_device(fm);
178 if (sock) {
179 sock->addr = tifm_7xx1_sock_addr(fm->addr,
180 cnt);
181 sock->media_id = media_id;
182 sock->socket_id = cnt;
183 switch (media_id) {
184 case 1:
185 card_name = "xd";
186 break;
187 case 2:
188 card_name = "ms";
189 break;
190 case 3:
191 card_name = "sd";
192 break;
193 default:
194 tifm_free_device(&sock->dev);
195 spin_lock_irqsave(&fm->lock, flags);
196 continue;
197 }
198 snprintf(sock->dev.bus_id, BUS_ID_SIZE,
199 "tifm_%s%u:%u", card_name,
200 fm->id, cnt);
201 printk(KERN_INFO DRIVER_NAME
202 ": %s card detected in socket %d\n",
203 card_name, cnt);
204 if (!device_register(&sock->dev)) {
205 spin_lock_irqsave(&fm->lock, flags);
206 if (!fm->sockets[cnt]) {
207 fm->sockets[cnt] = sock;
208 sock = NULL;
209 }
210 spin_unlock_irqrestore(&fm->lock, flags);
211 }
212 if (sock)
213 tifm_free_device(&sock->dev);
231 } 214 }
215 spin_lock_irqsave(&fm->lock, flags);
216 }
217 }
218
219 if (!kthread_should_stop()) {
220 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
221 | TIFM_IRQ_CARDMASK(socket_change_set),
222 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
223 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
224 | TIFM_IRQ_CARDMASK(socket_change_set),
225 fm->addr + FM_SET_INTERRUPT_ENABLE);
226 writel(TIFM_IRQ_ENABLE,
227 fm->addr + FM_SET_INTERRUPT_ENABLE);
228 spin_unlock_irqrestore(&fm->lock, flags);
229 } else {
230 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
231 if (fm->sockets[cnt])
232 fm->socket_change_set |= 1 << cnt;
233 }
234 if (!fm->socket_change_set) {
235 spin_unlock_irqrestore(&fm->lock, flags);
236 return 0;
237 } else {
232 spin_unlock_irqrestore(&fm->lock, flags); 238 spin_unlock_irqrestore(&fm->lock, flags);
233 if (!ok_to_register ||
234 device_register(&new_sock->dev)) {
235 spin_lock_irqsave(&fm->lock, flags);
236 fm->sockets[cnt] = NULL;
237 spin_unlock_irqrestore(&fm->lock,
238 flags);
239 tifm_free_device(&new_sock->dev);
240 }
241 } 239 }
242 } 240 }
243 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
244 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
245 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
246 fm->addr + FM_SET_INTERRUPT_ENABLE);
247 } 241 }
248 242 return 0;
249 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
250 class_device_put(&fm->cdev);
251} 243}
252 244
245#ifdef CONFIG_PM
246
253static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) 247static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
254{ 248{
255 struct tifm_adapter *fm = pci_get_drvdata(dev); 249 dev_dbg(&dev->dev, "suspending host\n");
256 unsigned long flags;
257 250
258 spin_lock_irqsave(&fm->lock, flags); 251 pci_save_state(dev);
259 fm->inhibit_new_cards = 1; 252 pci_enable_wake(dev, pci_choose_state(dev, state), 0);
260 fm->remove_mask = 0xf; 253 pci_disable_device(dev);
261 fm->insert_mask = 0; 254 pci_set_power_state(dev, pci_choose_state(dev, state));
262 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
263 spin_unlock_irqrestore(&fm->lock, flags);
264 flush_workqueue(fm->wq);
265
266 tifm_7xx1_remove_media(&fm->media_remover);
267
268 pci_set_power_state(dev, PCI_D3hot);
269 pci_disable_device(dev);
270 pci_save_state(dev);
271 return 0; 255 return 0;
272} 256}
273 257
274static int tifm_7xx1_resume(struct pci_dev *dev) 258static int tifm_7xx1_resume(struct pci_dev *dev)
275{ 259{
276 struct tifm_adapter *fm = pci_get_drvdata(dev); 260 struct tifm_adapter *fm = pci_get_drvdata(dev);
261 int cnt, rc;
277 unsigned long flags; 262 unsigned long flags;
263 tifm_media_id new_ids[fm->num_sockets];
278 264
265 pci_set_power_state(dev, PCI_D0);
279 pci_restore_state(dev); 266 pci_restore_state(dev);
280 pci_enable_device(dev); 267 rc = pci_enable_device(dev);
281 pci_set_power_state(dev, PCI_D0); 268 if (rc)
282 pci_set_master(dev); 269 return rc;
270 pci_set_master(dev);
283 271
272 dev_dbg(&dev->dev, "resuming host\n");
273
274 for (cnt = 0; cnt < fm->num_sockets; cnt++)
275 new_ids[cnt] = tifm_7xx1_toggle_sock_power(
276 tifm_7xx1_sock_addr(fm->addr, cnt),
277 fm->num_sockets == 2);
284 spin_lock_irqsave(&fm->lock, flags); 278 spin_lock_irqsave(&fm->lock, flags);
285 fm->inhibit_new_cards = 0; 279 fm->socket_change_set = 0;
286 writel(TIFM_IRQ_SETALL, fm->addr + FM_INTERRUPT_STATUS); 280 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
287 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 281 if (fm->sockets[cnt]) {
288 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 282 if (fm->sockets[cnt]->media_id == new_ids[cnt])
289 fm->addr + FM_SET_INTERRUPT_ENABLE); 283 fm->socket_change_set |= 1 << cnt;
290 fm->insert_mask = 0xf; 284
285 fm->sockets[cnt]->media_id = new_ids[cnt];
286 }
287 }
288
289 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
290 fm->addr + FM_SET_INTERRUPT_ENABLE);
291 if (!fm->socket_change_set) {
292 spin_unlock_irqrestore(&fm->lock, flags);
293 return 0;
294 } else {
295 fm->socket_change_set = 0;
296 spin_unlock_irqrestore(&fm->lock, flags);
297 }
298
299 wait_event_timeout(fm->change_set_notify, fm->socket_change_set, HZ);
300
301 spin_lock_irqsave(&fm->lock, flags);
302 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
303 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
304 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
305 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
306 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
307 fm->addr + FM_SET_INTERRUPT_ENABLE);
308 writel(TIFM_IRQ_ENABLE,
309 fm->addr + FM_SET_INTERRUPT_ENABLE);
310 fm->socket_change_set = 0;
311
291 spin_unlock_irqrestore(&fm->lock, flags); 312 spin_unlock_irqrestore(&fm->lock, flags);
292 return 0; 313 return 0;
293} 314}
294 315
316#else
317
318#define tifm_7xx1_suspend NULL
319#define tifm_7xx1_resume NULL
320
321#endif /* CONFIG_PM */
322
295static int tifm_7xx1_probe(struct pci_dev *dev, 323static int tifm_7xx1_probe(struct pci_dev *dev,
296 const struct pci_device_id *dev_id) 324 const struct pci_device_id *dev_id)
297{ 325{
298 struct tifm_adapter *fm; 326 struct tifm_adapter *fm;
299 int pci_dev_busy = 0; 327 int pci_dev_busy = 0;
@@ -324,19 +352,18 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
324 } 352 }
325 353
326 fm->dev = &dev->dev; 354 fm->dev = &dev->dev;
327 fm->max_sockets = (dev->device == 0x803B) ? 2 : 4; 355 fm->num_sockets = (dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM)
328 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->max_sockets, 356 ? 4 : 2;
329 GFP_KERNEL); 357 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->num_sockets,
358 GFP_KERNEL);
330 if (!fm->sockets) 359 if (!fm->sockets)
331 goto err_out_free; 360 goto err_out_free;
332 361
333 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
334 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
335 fm->eject = tifm_7xx1_eject; 362 fm->eject = tifm_7xx1_eject;
336 pci_set_drvdata(dev, fm); 363 pci_set_drvdata(dev, fm);
337 364
338 fm->addr = ioremap(pci_resource_start(dev, 0), 365 fm->addr = ioremap(pci_resource_start(dev, 0),
339 pci_resource_len(dev, 0)); 366 pci_resource_len(dev, 0));
340 if (!fm->addr) 367 if (!fm->addr)
341 goto err_out_free; 368 goto err_out_free;
342 369
@@ -344,16 +371,15 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
344 if (rc) 371 if (rc)
345 goto err_out_unmap; 372 goto err_out_unmap;
346 373
347 rc = tifm_add_adapter(fm); 374 init_waitqueue_head(&fm->change_set_notify);
375 rc = tifm_add_adapter(fm, tifm_7xx1_switch_media);
348 if (rc) 376 if (rc)
349 goto err_out_irq; 377 goto err_out_irq;
350 378
351 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 379 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
352 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 380 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
353 fm->addr + FM_SET_INTERRUPT_ENABLE); 381 fm->addr + FM_SET_INTERRUPT_ENABLE);
354 382 wake_up_process(fm->media_switcher);
355 fm->insert_mask = 0xf;
356
357 return 0; 383 return 0;
358 384
359err_out_irq: 385err_out_irq:
@@ -377,19 +403,15 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
377 struct tifm_adapter *fm = pci_get_drvdata(dev); 403 struct tifm_adapter *fm = pci_get_drvdata(dev);
378 unsigned long flags; 404 unsigned long flags;
379 405
406 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
407 mmiowb();
408 free_irq(dev->irq, fm);
409
380 spin_lock_irqsave(&fm->lock, flags); 410 spin_lock_irqsave(&fm->lock, flags);
381 fm->inhibit_new_cards = 1; 411 fm->socket_change_set = (1 << fm->num_sockets) - 1;
382 fm->remove_mask = 0xf;
383 fm->insert_mask = 0;
384 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
385 spin_unlock_irqrestore(&fm->lock, flags); 412 spin_unlock_irqrestore(&fm->lock, flags);
386 413
387 flush_workqueue(fm->wq); 414 kthread_stop(fm->media_switcher);
388
389 tifm_7xx1_remove_media(&fm->media_remover);
390
391 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
392 free_irq(dev->irq, fm);
393 415
394 tifm_remove_adapter(fm); 416 tifm_remove_adapter(fm);
395 417
@@ -404,10 +426,12 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
404} 426}
405 427
406static struct pci_device_id tifm_7xx1_pci_tbl [] = { 428static struct pci_device_id tifm_7xx1_pci_tbl [] = {
407 { PCI_VENDOR_ID_TI, 0x8033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 429 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
408 0 }, /* xx21 - the one I have */ 430 PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
409 { PCI_VENDOR_ID_TI, 0x803B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 431 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
410 0 }, /* xx12 - should be also supported */ 432 PCI_ANY_ID, 0, 0, 0 },
433 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
434 PCI_ANY_ID, 0, 0, 0 },
411 { } 435 { }
412}; 436};
413 437
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index d61df5c3ac36..6b10ebe9d936 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -14,7 +14,7 @@
14#include <linux/idr.h> 14#include <linux/idr.h>
15 15
16#define DRIVER_NAME "tifm_core" 16#define DRIVER_NAME "tifm_core"
17#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
18 18
19static DEFINE_IDR(tifm_adapter_idr); 19static DEFINE_IDR(tifm_adapter_idr);
20static DEFINE_SPINLOCK(tifm_adapter_lock); 20static DEFINE_SPINLOCK(tifm_adapter_lock);
@@ -60,10 +60,41 @@ static int tifm_uevent(struct device *dev, char **envp, int num_envp,
60 return 0; 60 return 0;
61} 61}
62 62
63#ifdef CONFIG_PM
64
65static int tifm_device_suspend(struct device *dev, pm_message_t state)
66{
67 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
68 struct tifm_driver *drv = fm_dev->drv;
69
70 if (drv && drv->suspend)
71 return drv->suspend(fm_dev, state);
72 return 0;
73}
74
75static int tifm_device_resume(struct device *dev)
76{
77 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
78 struct tifm_driver *drv = fm_dev->drv;
79
80 if (drv && drv->resume)
81 return drv->resume(fm_dev);
82 return 0;
83}
84
85#else
86
87#define tifm_device_suspend NULL
88#define tifm_device_resume NULL
89
90#endif /* CONFIG_PM */
91
63static struct bus_type tifm_bus_type = { 92static struct bus_type tifm_bus_type = {
64 .name = "tifm", 93 .name = "tifm",
65 .match = tifm_match, 94 .match = tifm_match,
66 .uevent = tifm_uevent, 95 .uevent = tifm_uevent,
96 .suspend = tifm_device_suspend,
97 .resume = tifm_device_resume
67}; 98};
68 99
69static void tifm_free(struct class_device *cdev) 100static void tifm_free(struct class_device *cdev)
@@ -71,8 +102,6 @@ static void tifm_free(struct class_device *cdev)
71 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev); 102 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev);
72 103
73 kfree(fm->sockets); 104 kfree(fm->sockets);
74 if (fm->wq)
75 destroy_workqueue(fm->wq);
76 kfree(fm); 105 kfree(fm);
77} 106}
78 107
@@ -101,7 +130,8 @@ void tifm_free_adapter(struct tifm_adapter *fm)
101} 130}
102EXPORT_SYMBOL(tifm_free_adapter); 131EXPORT_SYMBOL(tifm_free_adapter);
103 132
104int tifm_add_adapter(struct tifm_adapter *fm) 133int tifm_add_adapter(struct tifm_adapter *fm,
134 int (*mediathreadfn)(void *data))
105{ 135{
106 int rc; 136 int rc;
107 137
@@ -113,10 +143,10 @@ int tifm_add_adapter(struct tifm_adapter *fm)
113 spin_unlock(&tifm_adapter_lock); 143 spin_unlock(&tifm_adapter_lock);
114 if (!rc) { 144 if (!rc) {
115 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); 145 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id);
116 strncpy(fm->wq_name, fm->cdev.class_id, KOBJ_NAME_LEN); 146 fm->media_switcher = kthread_create(mediathreadfn,
147 fm, "tifm/%u", fm->id);
117 148
118 fm->wq = create_singlethread_workqueue(fm->wq_name); 149 if (!IS_ERR(fm->media_switcher))
119 if (fm->wq)
120 return class_device_add(&fm->cdev); 150 return class_device_add(&fm->cdev);
121 151
122 spin_lock(&tifm_adapter_lock); 152 spin_lock(&tifm_adapter_lock);
@@ -141,27 +171,27 @@ EXPORT_SYMBOL(tifm_remove_adapter);
141void tifm_free_device(struct device *dev) 171void tifm_free_device(struct device *dev)
142{ 172{
143 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); 173 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
144 if (fm_dev->wq)
145 destroy_workqueue(fm_dev->wq);
146 kfree(fm_dev); 174 kfree(fm_dev);
147} 175}
148EXPORT_SYMBOL(tifm_free_device); 176EXPORT_SYMBOL(tifm_free_device);
149 177
150struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id) 178static void tifm_dummy_signal_irq(struct tifm_dev *sock,
179 unsigned int sock_irq_status)
180{
181 return;
182}
183
184struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm)
151{ 185{
152 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); 186 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
153 187
154 if (dev) { 188 if (dev) {
155 spin_lock_init(&dev->lock); 189 spin_lock_init(&dev->lock);
156 snprintf(dev->wq_name, KOBJ_NAME_LEN, "tifm%u:%u", fm->id, id); 190
157 dev->wq = create_singlethread_workqueue(dev->wq_name);
158 if (!dev->wq) {
159 kfree(dev);
160 return NULL;
161 }
162 dev->dev.parent = fm->dev; 191 dev->dev.parent = fm->dev;
163 dev->dev.bus = &tifm_bus_type; 192 dev->dev.bus = &tifm_bus_type;
164 dev->dev.release = tifm_free_device; 193 dev->dev.release = tifm_free_device;
194 dev->signal_irq = tifm_dummy_signal_irq;
165 } 195 }
166 return dev; 196 return dev;
167} 197}
@@ -219,6 +249,7 @@ static int tifm_device_remove(struct device *dev)
219 struct tifm_driver *drv = fm_dev->drv; 249 struct tifm_driver *drv = fm_dev->drv;
220 250
221 if (drv) { 251 if (drv) {
252 fm_dev->signal_irq = tifm_dummy_signal_irq;
222 if (drv->remove) 253 if (drv->remove)
223 drv->remove(fm_dev); 254 drv->remove(fm_dev);
224 fm_dev->drv = NULL; 255 fm_dev->drv = NULL;
@@ -233,6 +264,8 @@ int tifm_register_driver(struct tifm_driver *drv)
233 drv->driver.bus = &tifm_bus_type; 264 drv->driver.bus = &tifm_bus_type;
234 drv->driver.probe = tifm_device_probe; 265 drv->driver.probe = tifm_device_probe;
235 drv->driver.remove = tifm_device_remove; 266 drv->driver.remove = tifm_device_remove;
267 drv->driver.suspend = tifm_device_suspend;
268 drv->driver.resume = tifm_device_resume;
236 269
237 return driver_register(&drv->driver); 270 return driver_register(&drv->driver);
238} 271}
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index aa152f31851e..2ce50f38e3c7 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -823,6 +823,9 @@ static int __init at91_mci_probe(struct platform_device *pdev)
823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
824 mmc->caps = MMC_CAP_BYTEBLOCK; 824 mmc->caps = MMC_CAP_BYTEBLOCK;
825 825
826 mmc->max_blk_size = 4095;
827 mmc->max_blk_count = mmc->max_req_size;
828
826 host = mmc_priv(mmc); 829 host = mmc_priv(mmc);
827 host->mmc = mmc; 830 host->mmc = mmc;
828 host->buffer = NULL; 831 host->buffer = NULL;
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 800527cf40d5..b834be261ab7 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -152,8 +152,9 @@ static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
152 ? 1 : 0; 152 ? 1 : 0;
153} 153}
154 154
155static inline int au1xmmc_card_readonly(struct au1xmmc_host *host) 155static int au1xmmc_card_readonly(struct mmc_host *mmc)
156{ 156{
157 struct au1xmmc_host *host = mmc_priv(mmc);
157 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) 158 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
158 ? 1 : 0; 159 ? 1 : 0;
159} 160}
@@ -193,6 +194,8 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
193 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); 194 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194 195
195 switch (mmc_resp_type(cmd)) { 196 switch (mmc_resp_type(cmd)) {
197 case MMC_RSP_NONE:
198 break;
196 case MMC_RSP_R1: 199 case MMC_RSP_R1:
197 mmccmd |= SD_CMD_RT_1; 200 mmccmd |= SD_CMD_RT_1;
198 break; 201 break;
@@ -205,6 +208,10 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
205 case MMC_RSP_R3: 208 case MMC_RSP_R3:
206 mmccmd |= SD_CMD_RT_3; 209 mmccmd |= SD_CMD_RT_3;
207 break; 210 break;
211 default:
212 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
213 mmc_resp_type(cmd));
214 return MMC_ERR_INVALID;
208 } 215 }
209 216
210 switch(cmd->opcode) { 217 switch(cmd->opcode) {
@@ -878,6 +885,7 @@ static void au1xmmc_init_dma(struct au1xmmc_host *host)
878static const struct mmc_host_ops au1xmmc_ops = { 885static const struct mmc_host_ops au1xmmc_ops = {
879 .request = au1xmmc_request, 886 .request = au1xmmc_request,
880 .set_ios = au1xmmc_set_ios, 887 .set_ios = au1xmmc_set_ios,
888 .get_ro = au1xmmc_card_readonly,
881}; 889};
882 890
883static int __devinit au1xmmc_probe(struct platform_device *pdev) 891static int __devinit au1xmmc_probe(struct platform_device *pdev)
@@ -914,6 +922,9 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
914 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 922 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
915 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 923 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
916 924
925 mmc->max_blk_size = 2048;
926 mmc->max_blk_count = 512;
927
917 mmc->ocr_avail = AU1XMMC_OCR; 928 mmc->ocr_avail = AU1XMMC_OCR;
918 929
919 host = mmc_priv(mmc); 930 host = mmc_priv(mmc);
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index bfb9ff693208..b060d4bfba29 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -958,8 +958,10 @@ static int imxmci_probe(struct platform_device *pdev)
958 /* MMC core transfer sizes tunable parameters */ 958 /* MMC core transfer sizes tunable parameters */
959 mmc->max_hw_segs = 64; 959 mmc->max_hw_segs = 64;
960 mmc->max_phys_segs = 64; 960 mmc->max_phys_segs = 64;
961 mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */
962 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ 961 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
962 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
963 mmc->max_blk_size = 2048;
964 mmc->max_blk_count = 65535;
963 965
964 host = mmc_priv(mmc); 966 host = mmc_priv(mmc);
965 host->mmc = mmc; 967 host->mmc = mmc;
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 6f2a282e2b97..5046a1661342 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -103,11 +103,16 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
103 mmc_hostname(host), mrq->cmd->opcode, 103 mmc_hostname(host), mrq->cmd->opcode,
104 mrq->cmd->arg, mrq->cmd->flags); 104 mrq->cmd->arg, mrq->cmd->flags);
105 105
106 WARN_ON(host->card_busy == NULL); 106 WARN_ON(!host->claimed);
107 107
108 mrq->cmd->error = 0; 108 mrq->cmd->error = 0;
109 mrq->cmd->mrq = mrq; 109 mrq->cmd->mrq = mrq;
110 if (mrq->data) { 110 if (mrq->data) {
111 BUG_ON(mrq->data->blksz > host->max_blk_size);
112 BUG_ON(mrq->data->blocks > host->max_blk_count);
113 BUG_ON(mrq->data->blocks * mrq->data->blksz >
114 host->max_req_size);
115
111 mrq->cmd->data = mrq->data; 116 mrq->cmd->data = mrq->data;
112 mrq->data->error = 0; 117 mrq->data->error = 0;
113 mrq->data->mrq = mrq; 118 mrq->data->mrq = mrq;
@@ -157,7 +162,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
157{ 162{
158 struct mmc_request mrq; 163 struct mmc_request mrq;
159 164
160 BUG_ON(host->card_busy == NULL); 165 BUG_ON(!host->claimed);
161 166
162 memset(&mrq, 0, sizeof(struct mmc_request)); 167 memset(&mrq, 0, sizeof(struct mmc_request));
163 168
@@ -195,7 +200,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
195 200
196 int i, err; 201 int i, err;
197 202
198 BUG_ON(host->card_busy == NULL); 203 BUG_ON(!host->claimed);
199 BUG_ON(retries < 0); 204 BUG_ON(retries < 0);
200 205
201 err = MMC_ERR_INVALID; 206 err = MMC_ERR_INVALID;
@@ -289,7 +294,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
289 else 294 else
290 limit_us = 100000; 295 limit_us = 100000;
291 296
292 if (timeout_us > limit_us) { 297 /*
298 * SDHC cards always use these fixed values.
299 */
300 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
293 data->timeout_ns = limit_us * 1000; 301 data->timeout_ns = limit_us * 1000;
294 data->timeout_clks = 0; 302 data->timeout_clks = 0;
295 } 303 }
@@ -320,14 +328,14 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
320 spin_lock_irqsave(&host->lock, flags); 328 spin_lock_irqsave(&host->lock, flags);
321 while (1) { 329 while (1) {
322 set_current_state(TASK_UNINTERRUPTIBLE); 330 set_current_state(TASK_UNINTERRUPTIBLE);
323 if (host->card_busy == NULL) 331 if (!host->claimed)
324 break; 332 break;
325 spin_unlock_irqrestore(&host->lock, flags); 333 spin_unlock_irqrestore(&host->lock, flags);
326 schedule(); 334 schedule();
327 spin_lock_irqsave(&host->lock, flags); 335 spin_lock_irqsave(&host->lock, flags);
328 } 336 }
329 set_current_state(TASK_RUNNING); 337 set_current_state(TASK_RUNNING);
330 host->card_busy = card; 338 host->claimed = 1;
331 spin_unlock_irqrestore(&host->lock, flags); 339 spin_unlock_irqrestore(&host->lock, flags);
332 remove_wait_queue(&host->wq, &wait); 340 remove_wait_queue(&host->wq, &wait);
333 341
@@ -353,10 +361,10 @@ void mmc_release_host(struct mmc_host *host)
353{ 361{
354 unsigned long flags; 362 unsigned long flags;
355 363
356 BUG_ON(host->card_busy == NULL); 364 BUG_ON(!host->claimed);
357 365
358 spin_lock_irqsave(&host->lock, flags); 366 spin_lock_irqsave(&host->lock, flags);
359 host->card_busy = NULL; 367 host->claimed = 0;
360 spin_unlock_irqrestore(&host->lock, flags); 368 spin_unlock_irqrestore(&host->lock, flags);
361 369
362 wake_up(&host->wq); 370 wake_up(&host->wq);
@@ -372,7 +380,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
372 mmc_hostname(host), ios->clock, ios->bus_mode, 380 mmc_hostname(host), ios->clock, ios->bus_mode,
373 ios->power_mode, ios->chip_select, ios->vdd, 381 ios->power_mode, ios->chip_select, ios->vdd,
374 ios->bus_width); 382 ios->bus_width);
375 383
376 host->ops->set_ios(host, ios); 384 host->ops->set_ios(host, ios);
377} 385}
378 386
@@ -381,7 +389,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
381 int err; 389 int err;
382 struct mmc_command cmd; 390 struct mmc_command cmd;
383 391
384 BUG_ON(host->card_busy == NULL); 392 BUG_ON(!host->claimed);
385 393
386 if (host->card_selected == card) 394 if (host->card_selected == card)
387 return MMC_ERR_NONE; 395 return MMC_ERR_NONE;
@@ -588,34 +596,65 @@ static void mmc_decode_csd(struct mmc_card *card)
588 596
589 if (mmc_card_sd(card)) { 597 if (mmc_card_sd(card)) {
590 csd_struct = UNSTUFF_BITS(resp, 126, 2); 598 csd_struct = UNSTUFF_BITS(resp, 126, 2);
591 if (csd_struct != 0) { 599
600 switch (csd_struct) {
601 case 0:
602 m = UNSTUFF_BITS(resp, 115, 4);
603 e = UNSTUFF_BITS(resp, 112, 3);
604 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
605 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
606
607 m = UNSTUFF_BITS(resp, 99, 4);
608 e = UNSTUFF_BITS(resp, 96, 3);
609 csd->max_dtr = tran_exp[e] * tran_mant[m];
610 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
611
612 e = UNSTUFF_BITS(resp, 47, 3);
613 m = UNSTUFF_BITS(resp, 62, 12);
614 csd->capacity = (1 + m) << (e + 2);
615
616 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
617 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
618 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
619 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
620 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
621 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
622 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
623 break;
624 case 1:
625 /*
626 * This is a block-addressed SDHC card. Most
627 * interesting fields are unused and have fixed
628 * values. To avoid getting tripped by buggy cards,
629 * we assume those fixed values ourselves.
630 */
631 mmc_card_set_blockaddr(card);
632
633 csd->tacc_ns = 0; /* Unused */
634 csd->tacc_clks = 0; /* Unused */
635
636 m = UNSTUFF_BITS(resp, 99, 4);
637 e = UNSTUFF_BITS(resp, 96, 3);
638 csd->max_dtr = tran_exp[e] * tran_mant[m];
639 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
640
641 m = UNSTUFF_BITS(resp, 48, 22);
642 csd->capacity = (1 + m) << 10;
643
644 csd->read_blkbits = 9;
645 csd->read_partial = 0;
646 csd->write_misalign = 0;
647 csd->read_misalign = 0;
648 csd->r2w_factor = 4; /* Unused */
649 csd->write_blkbits = 9;
650 csd->write_partial = 0;
651 break;
652 default:
592 printk("%s: unrecognised CSD structure version %d\n", 653 printk("%s: unrecognised CSD structure version %d\n",
593 mmc_hostname(card->host), csd_struct); 654 mmc_hostname(card->host), csd_struct);
594 mmc_card_set_bad(card); 655 mmc_card_set_bad(card);
595 return; 656 return;
596 } 657 }
597
598 m = UNSTUFF_BITS(resp, 115, 4);
599 e = UNSTUFF_BITS(resp, 112, 3);
600 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
601 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
602
603 m = UNSTUFF_BITS(resp, 99, 4);
604 e = UNSTUFF_BITS(resp, 96, 3);
605 csd->max_dtr = tran_exp[e] * tran_mant[m];
606 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
607
608 e = UNSTUFF_BITS(resp, 47, 3);
609 m = UNSTUFF_BITS(resp, 62, 12);
610 csd->capacity = (1 + m) << (e + 2);
611
612 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
613 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
614 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
615 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
616 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
617 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
618 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
619 } else { 658 } else {
620 /* 659 /*
621 * We only understand CSD structure v1.1 and v1.2. 660 * We only understand CSD structure v1.1 and v1.2.
@@ -848,6 +887,41 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
848 return err; 887 return err;
849} 888}
850 889
890static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2)
891{
892 struct mmc_command cmd;
893 int err, sd2;
894 static const u8 test_pattern = 0xAA;
895
896 /*
897 * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
898 * before SD_APP_OP_COND. This command will harmlessly fail for
899 * SD 1.0 cards.
900 */
901 cmd.opcode = SD_SEND_IF_COND;
902 cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
903 cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR;
904
905 err = mmc_wait_for_cmd(host, &cmd, 0);
906 if (err == MMC_ERR_NONE) {
907 if ((cmd.resp[0] & 0xFF) == test_pattern) {
908 sd2 = 1;
909 } else {
910 sd2 = 0;
911 err = MMC_ERR_FAILED;
912 }
913 } else {
914 /*
915 * Treat errors as SD 1.0 card.
916 */
917 sd2 = 0;
918 err = MMC_ERR_NONE;
919 }
920 if (rsd2)
921 *rsd2 = sd2;
922 return err;
923}
924
851/* 925/*
852 * Discover cards by requesting their CID. If this command 926 * Discover cards by requesting their CID. If this command
853 * times out, it is not an error; there are no further cards 927 * times out, it is not an error; there are no further cards
@@ -1018,7 +1092,8 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1018 mmc_wait_for_req(host, &mrq); 1092 mmc_wait_for_req(host, &mrq);
1019 1093
1020 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1094 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1021 mmc_card_set_dead(card); 1095 printk("%s: unable to read EXT_CSD, performance "
1096 "might suffer.\n", mmc_hostname(card->host));
1022 continue; 1097 continue;
1023 } 1098 }
1024 1099
@@ -1034,7 +1109,6 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1034 printk("%s: card is mmc v4 but doesn't support " 1109 printk("%s: card is mmc v4 but doesn't support "
1035 "any high-speed modes.\n", 1110 "any high-speed modes.\n",
1036 mmc_hostname(card->host)); 1111 mmc_hostname(card->host));
1037 mmc_card_set_bad(card);
1038 continue; 1112 continue;
1039 } 1113 }
1040 1114
@@ -1215,7 +1289,9 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1215 mmc_wait_for_req(host, &mrq); 1289 mmc_wait_for_req(host, &mrq);
1216 1290
1217 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1291 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1218 mmc_card_set_dead(card); 1292 printk("%s: unable to read switch capabilities, "
1293 "performance might suffer.\n",
1294 mmc_hostname(card->host));
1219 continue; 1295 continue;
1220 } 1296 }
1221 1297
@@ -1247,12 +1323,8 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1247 1323
1248 mmc_wait_for_req(host, &mrq); 1324 mmc_wait_for_req(host, &mrq);
1249 1325
1250 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1326 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE ||
1251 mmc_card_set_dead(card); 1327 (status[16] & 0xF) != 1) {
1252 continue;
1253 }
1254
1255 if ((status[16] & 0xF) != 1) {
1256 printk(KERN_WARNING "%s: Problem switching card " 1328 printk(KERN_WARNING "%s: Problem switching card "
1257 "into high-speed mode!\n", 1329 "into high-speed mode!\n",
1258 mmc_hostname(host)); 1330 mmc_hostname(host));
@@ -1334,6 +1406,10 @@ static void mmc_setup(struct mmc_host *host)
1334 mmc_power_up(host); 1406 mmc_power_up(host);
1335 mmc_idle_cards(host); 1407 mmc_idle_cards(host);
1336 1408
1409 err = mmc_send_if_cond(host, host->ocr_avail, NULL);
1410 if (err != MMC_ERR_NONE) {
1411 return;
1412 }
1337 err = mmc_send_app_op_cond(host, 0, &ocr); 1413 err = mmc_send_app_op_cond(host, 0, &ocr);
1338 1414
1339 /* 1415 /*
@@ -1386,10 +1462,21 @@ static void mmc_setup(struct mmc_host *host)
1386 * all get the idea that they should be ready for CMD2. 1462 * all get the idea that they should be ready for CMD2.
1387 * (My SanDisk card seems to need this.) 1463 * (My SanDisk card seems to need this.)
1388 */ 1464 */
1389 if (host->mode == MMC_MODE_SD) 1465 if (host->mode == MMC_MODE_SD) {
1390 mmc_send_app_op_cond(host, host->ocr, NULL); 1466 int err, sd2;
1391 else 1467 err = mmc_send_if_cond(host, host->ocr, &sd2);
1468 if (err == MMC_ERR_NONE) {
1469 /*
1470 * If SD_SEND_IF_COND indicates an SD 2.0
1471 * compliant card and we should set bit 30
1472 * of the ocr to indicate that we can handle
1473 * block-addressed SDHC cards.
1474 */
1475 mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL);
1476 }
1477 } else {
1392 mmc_send_op_cond(host, host->ocr, NULL); 1478 mmc_send_op_cond(host, host->ocr, NULL);
1479 }
1393 1480
1394 mmc_discover_cards(host); 1481 mmc_discover_cards(host);
1395 1482
@@ -1519,8 +1606,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
1519 */ 1606 */
1520 host->max_hw_segs = 1; 1607 host->max_hw_segs = 1;
1521 host->max_phys_segs = 1; 1608 host->max_phys_segs = 1;
1522 host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
1523 host->max_seg_size = PAGE_CACHE_SIZE; 1609 host->max_seg_size = PAGE_CACHE_SIZE;
1610
1611 host->max_req_size = PAGE_CACHE_SIZE;
1612 host->max_blk_size = 512;
1613 host->max_blk_count = PAGE_CACHE_SIZE / 512;
1524 } 1614 }
1525 1615
1526 return host; 1616 return host;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 87713572293f..05ba8ace70e7 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -237,13 +237,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 brq.mrq.cmd = &brq.cmd; 237 brq.mrq.cmd = &brq.cmd;
238 brq.mrq.data = &brq.data; 238 brq.mrq.data = &brq.data;
239 239
240 brq.cmd.arg = req->sector << 9; 240 brq.cmd.arg = req->sector;
241 if (!mmc_card_blockaddr(card))
242 brq.cmd.arg <<= 9;
241 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 243 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
242 brq.data.blksz = 1 << md->block_bits; 244 brq.data.blksz = 1 << md->block_bits;
243 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
244 brq.stop.opcode = MMC_STOP_TRANSMISSION; 245 brq.stop.opcode = MMC_STOP_TRANSMISSION;
245 brq.stop.arg = 0; 246 brq.stop.arg = 0;
246 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 247 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
248 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
249 if (brq.data.blocks > card->host->max_blk_count)
250 brq.data.blocks = card->host->max_blk_count;
247 251
248 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); 252 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
249 253
@@ -375,9 +379,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
375 spin_unlock_irq(&md->lock); 379 spin_unlock_irq(&md->lock);
376 } 380 }
377 381
382flush_queue:
383
378 mmc_card_release_host(card); 384 mmc_card_release_host(card);
379 385
380flush_queue:
381 spin_lock_irq(&md->lock); 386 spin_lock_irq(&md->lock);
382 while (ret) { 387 while (ret) {
383 ret = end_that_request_chunk(req, 0, 388 ret = end_that_request_chunk(req, 0,
@@ -494,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
494 struct mmc_command cmd; 499 struct mmc_command cmd;
495 int err; 500 int err;
496 501
502 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
503 if (mmc_card_blockaddr(card))
504 return 0;
505
497 mmc_card_claim_host(card); 506 mmc_card_claim_host(card);
498 cmd.opcode = MMC_SET_BLOCKLEN; 507 cmd.opcode = MMC_SET_BLOCKLEN;
499 cmd.arg = 1 << md->block_bits; 508 cmd.arg = 1 << md->block_bits;
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index 3e35a43819fb..c27e42645cdb 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
147 147
148 blk_queue_prep_rq(mq->queue, mmc_prep_request); 148 blk_queue_prep_rq(mq->queue, mmc_prep_request);
149 blk_queue_bounce_limit(mq->queue, limit); 149 blk_queue_bounce_limit(mq->queue, limit);
150 blk_queue_max_sectors(mq->queue, host->max_sectors); 150 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
153 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 153 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index e334acd045bc..d32698b02d7f 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -199,7 +199,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
199 memset(card, 0, sizeof(struct mmc_card)); 199 memset(card, 0, sizeof(struct mmc_card));
200 card->host = host; 200 card->host = host;
201 device_initialize(&card->dev); 201 device_initialize(&card->dev);
202 card->dev.parent = mmc_dev(host); 202 card->dev.parent = mmc_classdev(host);
203 card->dev.bus = &mmc_bus_type; 203 card->dev.bus = &mmc_bus_type;
204 card->dev.release = mmc_release_card; 204 card->dev.release = mmc_release_card;
205} 205}
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index ccfe6561be24..5941dd951e82 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -524,15 +524,24 @@ static int mmci_probe(struct amba_device *dev, void *id)
524 /* 524 /*
525 * Since we only have a 16-bit data length register, we must 525 * Since we only have a 16-bit data length register, we must
526 * ensure that we don't exceed 2^16-1 bytes in a single request. 526 * ensure that we don't exceed 2^16-1 bytes in a single request.
527 * Choose 64 (512-byte) sectors as the limit.
528 */ 527 */
529 mmc->max_sectors = 64; 528 mmc->max_req_size = 65535;
530 529
531 /* 530 /*
532 * Set the maximum segment size. Since we aren't doing DMA 531 * Set the maximum segment size. Since we aren't doing DMA
533 * (yet) we are only limited by the data length register. 532 * (yet) we are only limited by the data length register.
534 */ 533 */
535 mmc->max_seg_size = mmc->max_sectors << 9; 534 mmc->max_seg_size = mmc->max_req_size;
535
536 /*
537 * Block size can be up to 2048 bytes, but must be a power of two.
538 */
539 mmc->max_blk_size = 2048;
540
541 /*
542 * No limit on the number of blocks transferred.
543 */
544 mmc->max_blk_count = mmc->max_req_size;
536 545
537 spin_lock_init(&host->lock); 546 spin_lock_init(&host->lock);
538 547
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index d30540b27614..1e96a2f65022 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -1099,8 +1099,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1099 */ 1099 */
1100 mmc->max_phys_segs = 32; 1100 mmc->max_phys_segs = 32;
1101 mmc->max_hw_segs = 32; 1101 mmc->max_hw_segs = 32;
1102 mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ 1102 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1103 mmc->max_seg_size = mmc->max_sectors * 512; 1103 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1104 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1105 mmc->max_seg_size = mmc->max_req_size;
1104 1106
1105 if (host->power_pin >= 0) { 1107 if (host->power_pin >= 0) {
1106 if ((ret = omap_request_gpio(host->power_pin)) != 0) { 1108 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index 6073d998b11f..9774fc68b61a 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -450,6 +450,16 @@ static int pxamci_probe(struct platform_device *pdev)
450 */ 450 */
451 mmc->max_seg_size = PAGE_SIZE; 451 mmc->max_seg_size = PAGE_SIZE;
452 452
453 /*
454 * Block length register is 10 bits.
455 */
456 mmc->max_blk_size = 1023;
457
458 /*
459 * Block count register is 16 bits.
460 */
461 mmc->max_blk_count = 65535;
462
453 host = mmc_priv(mmc); 463 host = mmc_priv(mmc);
454 host->mmc = mmc; 464 host->mmc = mmc;
455 host->dma = -1; 465 host->dma = -1;
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index c2d13d7e9911..4bf1fea5e2c4 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -37,6 +37,7 @@ static unsigned int debug_quirks = 0;
37#define SDHCI_QUIRK_FORCE_DMA (1<<1) 37#define SDHCI_QUIRK_FORCE_DMA (1<<1)
38/* Controller doesn't like some resets when there is no card inserted. */ 38/* Controller doesn't like some resets when there is no card inserted. */
39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) 39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
40#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
40 41
41static const struct pci_device_id pci_ids[] __devinitdata = { 42static const struct pci_device_id pci_ids[] __devinitdata = {
42 { 43 {
@@ -65,6 +66,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
65 .driver_data = SDHCI_QUIRK_FORCE_DMA, 66 .driver_data = SDHCI_QUIRK_FORCE_DMA,
66 }, 67 },
67 68
69 {
70 .vendor = PCI_VENDOR_ID_ENE,
71 .device = PCI_DEVICE_ID_ENE_CB712_SD,
72 .subvendor = PCI_ANY_ID,
73 .subdevice = PCI_ANY_ID,
74 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
75 },
76
68 { /* Generic SD host controller */ 77 { /* Generic SD host controller */
69 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 78 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
70 }, 79 },
@@ -197,15 +206,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
197 * * 206 * *
198\*****************************************************************************/ 207\*****************************************************************************/
199 208
200static inline char* sdhci_kmap_sg(struct sdhci_host* host) 209static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
201{ 210{
202 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ); 211 return page_address(host->cur_sg->page) + host->cur_sg->offset;
203 return host->mapped_sg + host->cur_sg->offset;
204}
205
206static inline void sdhci_kunmap_sg(struct sdhci_host* host)
207{
208 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
209} 212}
210 213
211static inline int sdhci_next_sg(struct sdhci_host* host) 214static inline int sdhci_next_sg(struct sdhci_host* host)
@@ -240,7 +243,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
240 chunk_remain = 0; 243 chunk_remain = 0;
241 data = 0; 244 data = 0;
242 245
243 buffer = sdhci_kmap_sg(host) + host->offset; 246 buffer = sdhci_sg_to_buffer(host) + host->offset;
244 247
245 while (blksize) { 248 while (blksize) {
246 if (chunk_remain == 0) { 249 if (chunk_remain == 0) {
@@ -264,16 +267,13 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
264 } 267 }
265 268
266 if (host->remain == 0) { 269 if (host->remain == 0) {
267 sdhci_kunmap_sg(host);
268 if (sdhci_next_sg(host) == 0) { 270 if (sdhci_next_sg(host) == 0) {
269 BUG_ON(blksize != 0); 271 BUG_ON(blksize != 0);
270 return; 272 return;
271 } 273 }
272 buffer = sdhci_kmap_sg(host); 274 buffer = sdhci_sg_to_buffer(host);
273 } 275 }
274 } 276 }
275
276 sdhci_kunmap_sg(host);
277} 277}
278 278
279static void sdhci_write_block_pio(struct sdhci_host *host) 279static void sdhci_write_block_pio(struct sdhci_host *host)
@@ -290,7 +290,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
290 data = 0; 290 data = 0;
291 291
292 bytes = 0; 292 bytes = 0;
293 buffer = sdhci_kmap_sg(host) + host->offset; 293 buffer = sdhci_sg_to_buffer(host) + host->offset;
294 294
295 while (blksize) { 295 while (blksize) {
296 size = min(host->size, host->remain); 296 size = min(host->size, host->remain);
@@ -314,16 +314,13 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
314 } 314 }
315 315
316 if (host->remain == 0) { 316 if (host->remain == 0) {
317 sdhci_kunmap_sg(host);
318 if (sdhci_next_sg(host) == 0) { 317 if (sdhci_next_sg(host) == 0) {
319 BUG_ON(blksize != 0); 318 BUG_ON(blksize != 0);
320 return; 319 return;
321 } 320 }
322 buffer = sdhci_kmap_sg(host); 321 buffer = sdhci_sg_to_buffer(host);
323 } 322 }
324 } 323 }
325
326 sdhci_kunmap_sg(host);
327} 324}
328 325
329static void sdhci_transfer_pio(struct sdhci_host *host) 326static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -372,7 +369,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
372 369
373 /* Sanity checks */ 370 /* Sanity checks */
374 BUG_ON(data->blksz * data->blocks > 524288); 371 BUG_ON(data->blksz * data->blocks > 524288);
375 BUG_ON(data->blksz > host->max_block); 372 BUG_ON(data->blksz > host->mmc->max_blk_size);
376 BUG_ON(data->blocks > 65535); 373 BUG_ON(data->blocks > 65535);
377 374
378 /* timeout in us */ 375 /* timeout in us */
@@ -674,10 +671,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
674 if (host->power == power) 671 if (host->power == power)
675 return; 672 return;
676 673
677 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 674 if (power == (unsigned short)-1) {
678 675 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
679 if (power == (unsigned short)-1)
680 goto out; 676 goto out;
677 }
678
679 /*
680 * Spec says that we should clear the power reg before setting
681 * a new value. Some controllers don't seem to like this though.
682 */
683 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
684 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
681 685
682 pwr = SDHCI_POWER_ON; 686 pwr = SDHCI_POWER_ON;
683 687
@@ -1109,7 +1113,9 @@ static int sdhci_resume (struct pci_dev *pdev)
1109 1113
1110 pci_set_power_state(pdev, PCI_D0); 1114 pci_set_power_state(pdev, PCI_D0);
1111 pci_restore_state(pdev); 1115 pci_restore_state(pdev);
1112 pci_enable_device(pdev); 1116 ret = pci_enable_device(pdev);
1117 if (ret)
1118 return ret;
1113 1119
1114 for (i = 0;i < chip->num_slots;i++) { 1120 for (i = 0;i < chip->num_slots;i++) {
1115 if (!chip->hosts[i]) 1121 if (!chip->hosts[i])
@@ -1274,15 +1280,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1274 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1280 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1275 host->timeout_clk *= 1000; 1281 host->timeout_clk *= 1000;
1276 1282
1277 host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1278 if (host->max_block >= 3) {
1279 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1280 host->slot_descr);
1281 ret = -ENODEV;
1282 goto unmap;
1283 }
1284 host->max_block = 512 << host->max_block;
1285
1286 /* 1283 /*
1287 * Set host parameters. 1284 * Set host parameters.
1288 */ 1285 */
@@ -1294,9 +1291,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1294 mmc->ocr_avail = 0; 1291 mmc->ocr_avail = 0;
1295 if (caps & SDHCI_CAN_VDD_330) 1292 if (caps & SDHCI_CAN_VDD_330)
1296 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1293 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1297 else if (caps & SDHCI_CAN_VDD_300) 1294 if (caps & SDHCI_CAN_VDD_300)
1298 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1295 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1299 else if (caps & SDHCI_CAN_VDD_180) 1296 if (caps & SDHCI_CAN_VDD_180)
1300 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; 1297 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
1301 1298
1302 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { 1299 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
@@ -1326,15 +1323,33 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1326 1323
1327 /* 1324 /*
1328 * Maximum number of sectors in one transfer. Limited by DMA boundary 1325 * Maximum number of sectors in one transfer. Limited by DMA boundary
1329 * size (512KiB), which means (512 KiB/512=) 1024 entries. 1326 * size (512KiB).
1330 */ 1327 */
1331 mmc->max_sectors = 1024; 1328 mmc->max_req_size = 524288;
1332 1329
1333 /* 1330 /*
1334 * Maximum segment size. Could be one segment with the maximum number 1331 * Maximum segment size. Could be one segment with the maximum number
1335 * of sectors. 1332 * of bytes.
1333 */
1334 mmc->max_seg_size = mmc->max_req_size;
1335
1336 /*
1337 * Maximum block size. This varies from controller to controller and
1338 * is specified in the capabilities register.
1339 */
1340 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1341 if (mmc->max_blk_size >= 3) {
1342 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1343 host->slot_descr);
1344 ret = -ENODEV;
1345 goto unmap;
1346 }
1347 mmc->max_blk_size = 512 << mmc->max_blk_size;
1348
1349 /*
1350 * Maximum block count.
1336 */ 1351 */
1337 mmc->max_seg_size = mmc->max_sectors * 512; 1352 mmc->max_blk_count = 65535;
1338 1353
1339 /* 1354 /*
1340 * Init tasklets. 1355 * Init tasklets.
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h
index f9d1a0a6f03a..e324f0a623dc 100644
--- a/drivers/mmc/sdhci.h
+++ b/drivers/mmc/sdhci.h
@@ -174,7 +174,6 @@ struct sdhci_host {
174 174
175 unsigned int max_clk; /* Max possible freq (MHz) */ 175 unsigned int max_clk; /* Max possible freq (MHz) */
176 unsigned int timeout_clk; /* Timeout freq (KHz) */ 176 unsigned int timeout_clk; /* Timeout freq (KHz) */
177 unsigned int max_block; /* Max block size (bytes) */
178 177
179 unsigned int clock; /* Current clock (MHz) */ 178 unsigned int clock; /* Current clock (MHz) */
180 unsigned short power; /* Current voltage */ 179 unsigned short power; /* Current voltage */
@@ -184,7 +183,6 @@ struct sdhci_host {
184 struct mmc_data *data; /* Current data request */ 183 struct mmc_data *data; /* Current data request */
185 184
186 struct scatterlist *cur_sg; /* We're working on this */ 185 struct scatterlist *cur_sg; /* We're working on this */
187 char *mapped_sg; /* This is where it's mapped */
188 int num_sg; /* Entries left */ 186 int num_sg; /* Entries left */
189 int offset; /* Offset into current sg */ 187 int offset; /* Offset into current sg */
190 int remain; /* Bytes left in current */ 188 int remain; /* Bytes left in current */
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index fa4a52886b97..e65f8a0a9349 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -17,7 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19#define DRIVER_NAME "tifm_sd" 19#define DRIVER_NAME "tifm_sd"
20#define DRIVER_VERSION "0.6" 20#define DRIVER_VERSION "0.7"
21 21
22static int no_dma = 0; 22static int no_dma = 0;
23static int fixed_timeout = 0; 23static int fixed_timeout = 0;
@@ -79,7 +79,6 @@ typedef enum {
79 79
80enum { 80enum {
81 FIFO_RDY = 0x0001, /* hardware dependent value */ 81 FIFO_RDY = 0x0001, /* hardware dependent value */
82 HOST_REG = 0x0002,
83 EJECT = 0x0004, 82 EJECT = 0x0004,
84 EJECT_DONE = 0x0008, 83 EJECT_DONE = 0x0008,
85 CARD_BUSY = 0x0010, 84 CARD_BUSY = 0x0010,
@@ -95,46 +94,53 @@ struct tifm_sd {
95 card_state_t state; 94 card_state_t state;
96 unsigned int clk_freq; 95 unsigned int clk_freq;
97 unsigned int clk_div; 96 unsigned int clk_div;
98 unsigned long timeout_jiffies; // software timeout - 2 sec 97 unsigned long timeout_jiffies;
99 98
99 struct tasklet_struct finish_tasklet;
100 struct timer_list timer;
100 struct mmc_request *req; 101 struct mmc_request *req;
101 struct work_struct cmd_handler; 102 wait_queue_head_t notify;
102 struct delayed_work abort_handler;
103 wait_queue_head_t can_eject;
104 103
105 size_t written_blocks; 104 size_t written_blocks;
106 char *buffer;
107 size_t buffer_size; 105 size_t buffer_size;
108 size_t buffer_pos; 106 size_t buffer_pos;
109 107
110}; 108};
111 109
110static char* tifm_sd_data_buffer(struct mmc_data *data)
111{
112 return page_address(data->sg->page) + data->sg->offset;
113}
114
112static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, 115static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
113 unsigned int host_status) 116 unsigned int host_status)
114{ 117{
115 struct mmc_command *cmd = host->req->cmd; 118 struct mmc_command *cmd = host->req->cmd;
116 unsigned int t_val = 0, cnt = 0; 119 unsigned int t_val = 0, cnt = 0;
120 char *buffer;
117 121
118 if (host_status & TIFM_MMCSD_BRS) { 122 if (host_status & TIFM_MMCSD_BRS) {
119 /* in non-dma rx mode BRS fires when fifo is still not empty */ 123 /* in non-dma rx mode BRS fires when fifo is still not empty */
120 if (host->buffer && (cmd->data->flags & MMC_DATA_READ)) { 124 if (no_dma && (cmd->data->flags & MMC_DATA_READ)) {
125 buffer = tifm_sd_data_buffer(host->req->data);
121 while (host->buffer_size > host->buffer_pos) { 126 while (host->buffer_size > host->buffer_pos) {
122 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 127 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
123 host->buffer[host->buffer_pos++] = t_val & 0xff; 128 buffer[host->buffer_pos++] = t_val & 0xff;
124 host->buffer[host->buffer_pos++] = 129 buffer[host->buffer_pos++] =
125 (t_val >> 8) & 0xff; 130 (t_val >> 8) & 0xff;
126 } 131 }
127 } 132 }
128 return 1; 133 return 1;
129 } else if (host->buffer) { 134 } else if (no_dma) {
135 buffer = tifm_sd_data_buffer(host->req->data);
130 if ((cmd->data->flags & MMC_DATA_READ) && 136 if ((cmd->data->flags & MMC_DATA_READ) &&
131 (host_status & TIFM_MMCSD_AF)) { 137 (host_status & TIFM_MMCSD_AF)) {
132 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 138 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
133 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 139 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
134 if (host->buffer_size > host->buffer_pos) { 140 if (host->buffer_size > host->buffer_pos) {
135 host->buffer[host->buffer_pos++] = 141 buffer[host->buffer_pos++] =
136 t_val & 0xff; 142 t_val & 0xff;
137 host->buffer[host->buffer_pos++] = 143 buffer[host->buffer_pos++] =
138 (t_val >> 8) & 0xff; 144 (t_val >> 8) & 0xff;
139 } 145 }
140 } 146 }
@@ -142,11 +148,12 @@ static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
142 && (host_status & TIFM_MMCSD_AE)) { 148 && (host_status & TIFM_MMCSD_AE)) {
143 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 149 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
144 if (host->buffer_size > host->buffer_pos) { 150 if (host->buffer_size > host->buffer_pos) {
145 t_val = host->buffer[host->buffer_pos++] & 0x00ff; 151 t_val = buffer[host->buffer_pos++]
146 t_val |= ((host->buffer[host->buffer_pos++]) << 8) 152 & 0x00ff;
147 & 0xff00; 153 t_val |= ((buffer[host->buffer_pos++])
154 << 8) & 0xff00;
148 writel(t_val, 155 writel(t_val,
149 sock->addr + SOCK_MMCSD_DATA); 156 sock->addr + SOCK_MMCSD_DATA);
150 } 157 }
151 } 158 }
152 } 159 }
@@ -206,7 +213,7 @@ static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
206 cmd_mask |= TIFM_MMCSD_READ; 213 cmd_mask |= TIFM_MMCSD_READ;
207 214
208 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", 215 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
209 cmd->opcode, cmd->arg, cmd_mask); 216 cmd->opcode, cmd->arg, cmd_mask);
210 217
211 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); 218 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
212 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); 219 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
@@ -239,65 +246,78 @@ change_state:
239 tifm_sd_fetch_resp(cmd, sock); 246 tifm_sd_fetch_resp(cmd, sock);
240 if (cmd->data) { 247 if (cmd->data) {
241 host->state = BRS; 248 host->state = BRS;
242 } else 249 } else {
243 host->state = READY; 250 host->state = READY;
251 }
244 goto change_state; 252 goto change_state;
245 } 253 }
246 break; 254 break;
247 case BRS: 255 case BRS:
248 if (tifm_sd_transfer_data(sock, host, host_status)) { 256 if (tifm_sd_transfer_data(sock, host, host_status)) {
249 if (!host->req->stop) { 257 if (cmd->data->flags & MMC_DATA_WRITE) {
250 if (cmd->data->flags & MMC_DATA_WRITE) { 258 host->state = CARD;
251 host->state = CARD; 259 } else {
260 if (no_dma) {
261 if (host->req->stop) {
262 tifm_sd_exec(host, host->req->stop);
263 host->state = SCMD;
264 } else {
265 host->state = READY;
266 }
252 } else { 267 } else {
253 host->state = 268 host->state = FIFO;
254 host->buffer ? READY : FIFO;
255 } 269 }
256 goto change_state;
257 } 270 }
258 tifm_sd_exec(host, host->req->stop); 271 goto change_state;
259 host->state = SCMD;
260 } 272 }
261 break; 273 break;
262 case SCMD: 274 case SCMD:
263 if (host_status & TIFM_MMCSD_EOC) { 275 if (host_status & TIFM_MMCSD_EOC) {
264 tifm_sd_fetch_resp(host->req->stop, sock); 276 tifm_sd_fetch_resp(host->req->stop, sock);
265 if (cmd->error) { 277 host->state = READY;
266 host->state = READY;
267 } else if (cmd->data->flags & MMC_DATA_WRITE) {
268 host->state = CARD;
269 } else {
270 host->state = host->buffer ? READY : FIFO;
271 }
272 goto change_state; 278 goto change_state;
273 } 279 }
274 break; 280 break;
275 case CARD: 281 case CARD:
282 dev_dbg(&sock->dev, "waiting for CARD, have %zd blocks\n",
283 host->written_blocks);
276 if (!(host->flags & CARD_BUSY) 284 if (!(host->flags & CARD_BUSY)
277 && (host->written_blocks == cmd->data->blocks)) { 285 && (host->written_blocks == cmd->data->blocks)) {
278 host->state = host->buffer ? READY : FIFO; 286 if (no_dma) {
287 if (host->req->stop) {
288 tifm_sd_exec(host, host->req->stop);
289 host->state = SCMD;
290 } else {
291 host->state = READY;
292 }
293 } else {
294 host->state = FIFO;
295 }
279 goto change_state; 296 goto change_state;
280 } 297 }
281 break; 298 break;
282 case FIFO: 299 case FIFO:
283 if (host->flags & FIFO_RDY) { 300 if (host->flags & FIFO_RDY) {
284 host->state = READY;
285 host->flags &= ~FIFO_RDY; 301 host->flags &= ~FIFO_RDY;
302 if (host->req->stop) {
303 tifm_sd_exec(host, host->req->stop);
304 host->state = SCMD;
305 } else {
306 host->state = READY;
307 }
286 goto change_state; 308 goto change_state;
287 } 309 }
288 break; 310 break;
289 case READY: 311 case READY:
290 queue_work(sock->wq, &host->cmd_handler); 312 tasklet_schedule(&host->finish_tasklet);
291 return; 313 return;
292 } 314 }
293 315
294 queue_delayed_work(sock->wq, &host->abort_handler,
295 host->timeout_jiffies);
296} 316}
297 317
298/* Called from interrupt handler */ 318/* Called from interrupt handler */
299static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, 319static void tifm_sd_signal_irq(struct tifm_dev *sock,
300 unsigned int sock_irq_status) 320 unsigned int sock_irq_status)
301{ 321{
302 struct tifm_sd *host; 322 struct tifm_sd *host;
303 unsigned int host_status = 0, fifo_status = 0; 323 unsigned int host_status = 0, fifo_status = 0;
@@ -305,7 +325,6 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
305 325
306 spin_lock(&sock->lock); 326 spin_lock(&sock->lock);
307 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); 327 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
308 cancel_delayed_work(&host->abort_handler);
309 328
310 if (sock_irq_status & FIFO_EVENT) { 329 if (sock_irq_status & FIFO_EVENT) {
311 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); 330 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
@@ -318,19 +337,17 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
318 host_status = readl(sock->addr + SOCK_MMCSD_STATUS); 337 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
319 writel(host_status, sock->addr + SOCK_MMCSD_STATUS); 338 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
320 339
321 if (!(host->flags & HOST_REG))
322 queue_work(sock->wq, &host->cmd_handler);
323 if (!host->req) 340 if (!host->req)
324 goto done; 341 goto done;
325 342
326 if (host_status & TIFM_MMCSD_ERRMASK) { 343 if (host_status & TIFM_MMCSD_ERRMASK) {
327 if (host_status & TIFM_MMCSD_CERR) 344 if (host_status & TIFM_MMCSD_CERR)
328 error_code = MMC_ERR_FAILED; 345 error_code = MMC_ERR_FAILED;
329 else if (host_status & 346 else if (host_status
330 (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) 347 & (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO))
331 error_code = MMC_ERR_TIMEOUT; 348 error_code = MMC_ERR_TIMEOUT;
332 else if (host_status & 349 else if (host_status
333 (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) 350 & (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC))
334 error_code = MMC_ERR_BADCRC; 351 error_code = MMC_ERR_BADCRC;
335 352
336 writel(TIFM_FIFO_INT_SETALL, 353 writel(TIFM_FIFO_INT_SETALL,
@@ -340,12 +357,11 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
340 if (host->req->stop) { 357 if (host->req->stop) {
341 if (host->state == SCMD) { 358 if (host->state == SCMD) {
342 host->req->stop->error = error_code; 359 host->req->stop->error = error_code;
343 } else if(host->state == BRS) { 360 } else if (host->state == BRS
361 || host->state == CARD
362 || host->state == FIFO) {
344 host->req->cmd->error = error_code; 363 host->req->cmd->error = error_code;
345 tifm_sd_exec(host, host->req->stop); 364 tifm_sd_exec(host, host->req->stop);
346 queue_delayed_work(sock->wq,
347 &host->abort_handler,
348 host->timeout_jiffies);
349 host->state = SCMD; 365 host->state = SCMD;
350 goto done; 366 goto done;
351 } else { 367 } else {
@@ -359,8 +375,8 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
359 375
360 if (host_status & TIFM_MMCSD_CB) 376 if (host_status & TIFM_MMCSD_CB)
361 host->flags |= CARD_BUSY; 377 host->flags |= CARD_BUSY;
362 if ((host_status & TIFM_MMCSD_EOFB) && 378 if ((host_status & TIFM_MMCSD_EOFB)
363 (host->flags & CARD_BUSY)) { 379 && (host->flags & CARD_BUSY)) {
364 host->written_blocks++; 380 host->written_blocks++;
365 host->flags &= ~CARD_BUSY; 381 host->flags &= ~CARD_BUSY;
366 } 382 }
@@ -370,22 +386,22 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
370 tifm_sd_process_cmd(sock, host, host_status); 386 tifm_sd_process_cmd(sock, host, host_status);
371done: 387done:
372 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", 388 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n",
373 host_status, fifo_status); 389 host_status, fifo_status);
374 spin_unlock(&sock->lock); 390 spin_unlock(&sock->lock);
375 return sock_irq_status;
376} 391}
377 392
378static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) 393static void tifm_sd_prepare_data(struct tifm_sd *host, struct mmc_command *cmd)
379{ 394{
380 struct tifm_dev *sock = card->dev; 395 struct tifm_dev *sock = host->dev;
381 unsigned int dest_cnt; 396 unsigned int dest_cnt;
382 397
383 /* DMA style IO */ 398 /* DMA style IO */
384 399 dev_dbg(&sock->dev, "setting dma for %d blocks\n",
400 cmd->data->blocks);
385 writel(TIFM_FIFO_INT_SETALL, 401 writel(TIFM_FIFO_INT_SETALL,
386 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 402 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
387 writel(ilog2(cmd->data->blksz) - 2, 403 writel(ilog2(cmd->data->blksz) - 2,
388 sock->addr + SOCK_FIFO_PAGE_SIZE); 404 sock->addr + SOCK_FIFO_PAGE_SIZE);
389 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); 405 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL);
390 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); 406 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
391 407
@@ -399,7 +415,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
399 if (cmd->data->flags & MMC_DATA_WRITE) { 415 if (cmd->data->flags & MMC_DATA_WRITE) {
400 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 416 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
401 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, 417 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN,
402 sock->addr + SOCK_DMA_CONTROL); 418 sock->addr + SOCK_DMA_CONTROL);
403 } else { 419 } else {
404 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 420 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
405 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); 421 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL);
@@ -407,7 +423,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
407} 423}
408 424
409static void tifm_sd_set_data_timeout(struct tifm_sd *host, 425static void tifm_sd_set_data_timeout(struct tifm_sd *host,
410 struct mmc_data *data) 426 struct mmc_data *data)
411{ 427{
412 struct tifm_dev *sock = host->dev; 428 struct tifm_dev *sock = host->dev;
413 unsigned int data_timeout = data->timeout_clks; 429 unsigned int data_timeout = data->timeout_clks;
@@ -416,22 +432,21 @@ static void tifm_sd_set_data_timeout(struct tifm_sd *host,
416 return; 432 return;
417 433
418 data_timeout += data->timeout_ns / 434 data_timeout += data->timeout_ns /
419 ((1000000000 / host->clk_freq) * host->clk_div); 435 ((1000000000UL / host->clk_freq) * host->clk_div);
420 data_timeout *= 10; // call it fudge factor for now
421 436
422 if (data_timeout < 0xffff) { 437 if (data_timeout < 0xffff) {
423 writel((~TIFM_MMCSD_DPE) &
424 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
425 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
426 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 438 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
439 writel((~TIFM_MMCSD_DPE)
440 & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
441 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
427 } else { 442 } else {
428 writel(TIFM_MMCSD_DPE |
429 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
430 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
431 data_timeout = (data_timeout >> 10) + 1; 443 data_timeout = (data_timeout >> 10) + 1;
432 if(data_timeout > 0xffff) 444 if (data_timeout > 0xffff)
433 data_timeout = 0; /* set to unlimited */ 445 data_timeout = 0; /* set to unlimited */
434 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 446 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
447 writel(TIFM_MMCSD_DPE
448 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
449 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
435 } 450 }
436} 451}
437 452
@@ -474,11 +489,10 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
474 } 489 }
475 490
476 host->req = mrq; 491 host->req = mrq;
492 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
477 host->state = CMD; 493 host->state = CMD;
478 queue_delayed_work(sock->wq, &host->abort_handler,
479 host->timeout_jiffies);
480 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 494 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
481 sock->addr + SOCK_CONTROL); 495 sock->addr + SOCK_CONTROL);
482 tifm_sd_exec(host, mrq->cmd); 496 tifm_sd_exec(host, mrq->cmd);
483 spin_unlock_irqrestore(&sock->lock, flags); 497 spin_unlock_irqrestore(&sock->lock, flags);
484 return; 498 return;
@@ -493,9 +507,9 @@ err_out:
493 mmc_request_done(mmc, mrq); 507 mmc_request_done(mmc, mrq);
494} 508}
495 509
496static void tifm_sd_end_cmd(struct work_struct *work) 510static void tifm_sd_end_cmd(unsigned long data)
497{ 511{
498 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 512 struct tifm_sd *host = (struct tifm_sd*)data;
499 struct tifm_dev *sock = host->dev; 513 struct tifm_dev *sock = host->dev;
500 struct mmc_host *mmc = tifm_get_drvdata(sock); 514 struct mmc_host *mmc = tifm_get_drvdata(sock);
501 struct mmc_request *mrq; 515 struct mmc_request *mrq;
@@ -504,6 +518,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
504 518
505 spin_lock_irqsave(&sock->lock, flags); 519 spin_lock_irqsave(&sock->lock, flags);
506 520
521 del_timer(&host->timer);
507 mrq = host->req; 522 mrq = host->req;
508 host->req = NULL; 523 host->req = NULL;
509 host->state = IDLE; 524 host->state = IDLE;
@@ -517,8 +532,8 @@ static void tifm_sd_end_cmd(struct work_struct *work)
517 r_data = mrq->cmd->data; 532 r_data = mrq->cmd->data;
518 if (r_data) { 533 if (r_data) {
519 if (r_data->flags & MMC_DATA_WRITE) { 534 if (r_data->flags & MMC_DATA_WRITE) {
520 r_data->bytes_xfered = host->written_blocks * 535 r_data->bytes_xfered = host->written_blocks
521 r_data->blksz; 536 * r_data->blksz;
522 } else { 537 } else {
523 r_data->bytes_xfered = r_data->blocks - 538 r_data->bytes_xfered = r_data->blocks -
524 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 539 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -532,7 +547,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
532 } 547 }
533 548
534 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 549 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
535 sock->addr + SOCK_CONTROL); 550 sock->addr + SOCK_CONTROL);
536 551
537 spin_unlock_irqrestore(&sock->lock, flags); 552 spin_unlock_irqrestore(&sock->lock, flags);
538 mmc_request_done(mmc, mrq); 553 mmc_request_done(mmc, mrq);
@@ -544,15 +559,6 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
544 struct tifm_dev *sock = host->dev; 559 struct tifm_dev *sock = host->dev;
545 unsigned long flags; 560 unsigned long flags;
546 struct mmc_data *r_data = mrq->cmd->data; 561 struct mmc_data *r_data = mrq->cmd->data;
547 char *t_buffer = NULL;
548
549 if (r_data) {
550 t_buffer = kmap(r_data->sg->page);
551 if (!t_buffer) {
552 printk(KERN_ERR DRIVER_NAME ": kmap failed\n");
553 goto err_out;
554 }
555 }
556 562
557 spin_lock_irqsave(&sock->lock, flags); 563 spin_lock_irqsave(&sock->lock, flags);
558 if (host->flags & EJECT) { 564 if (host->flags & EJECT) {
@@ -569,15 +575,14 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
569 if (r_data) { 575 if (r_data) {
570 tifm_sd_set_data_timeout(host, r_data); 576 tifm_sd_set_data_timeout(host, r_data);
571 577
572 host->buffer = t_buffer + r_data->sg->offset; 578 host->buffer_size = mrq->cmd->data->blocks
573 host->buffer_size = mrq->cmd->data->blocks * 579 * mrq->cmd->data->blksz;
574 mrq->cmd->data->blksz;
575 580
576 writel(TIFM_MMCSD_BUFINT | 581 writel(TIFM_MMCSD_BUFINT
577 readl(sock->addr + SOCK_MMCSD_INT_ENABLE), 582 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
578 sock->addr + SOCK_MMCSD_INT_ENABLE); 583 sock->addr + SOCK_MMCSD_INT_ENABLE);
579 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) | 584 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
580 (TIFM_MMCSD_FIFO_SIZE - 1), 585 | (TIFM_MMCSD_FIFO_SIZE - 1),
581 sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 586 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
582 587
583 host->written_blocks = 0; 588 host->written_blocks = 0;
@@ -588,26 +593,22 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
588 } 593 }
589 594
590 host->req = mrq; 595 host->req = mrq;
596 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
591 host->state = CMD; 597 host->state = CMD;
592 queue_delayed_work(sock->wq, &host->abort_handler,
593 host->timeout_jiffies);
594 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 598 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
595 sock->addr + SOCK_CONTROL); 599 sock->addr + SOCK_CONTROL);
596 tifm_sd_exec(host, mrq->cmd); 600 tifm_sd_exec(host, mrq->cmd);
597 spin_unlock_irqrestore(&sock->lock, flags); 601 spin_unlock_irqrestore(&sock->lock, flags);
598 return; 602 return;
599 603
600err_out: 604err_out:
601 if (t_buffer)
602 kunmap(r_data->sg->page);
603
604 mrq->cmd->error = MMC_ERR_TIMEOUT; 605 mrq->cmd->error = MMC_ERR_TIMEOUT;
605 mmc_request_done(mmc, mrq); 606 mmc_request_done(mmc, mrq);
606} 607}
607 608
608static void tifm_sd_end_cmd_nodma(struct work_struct *work) 609static void tifm_sd_end_cmd_nodma(unsigned long data)
609{ 610{
610 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 611 struct tifm_sd *host = (struct tifm_sd*)data;
611 struct tifm_dev *sock = host->dev; 612 struct tifm_dev *sock = host->dev;
612 struct mmc_host *mmc = tifm_get_drvdata(sock); 613 struct mmc_host *mmc = tifm_get_drvdata(sock);
613 struct mmc_request *mrq; 614 struct mmc_request *mrq;
@@ -616,6 +617,7 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
616 617
617 spin_lock_irqsave(&sock->lock, flags); 618 spin_lock_irqsave(&sock->lock, flags);
618 619
620 del_timer(&host->timer);
619 mrq = host->req; 621 mrq = host->req;
620 host->req = NULL; 622 host->req = NULL;
621 host->state = IDLE; 623 host->state = IDLE;
@@ -633,8 +635,8 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
633 sock->addr + SOCK_MMCSD_INT_ENABLE); 635 sock->addr + SOCK_MMCSD_INT_ENABLE);
634 636
635 if (r_data->flags & MMC_DATA_WRITE) { 637 if (r_data->flags & MMC_DATA_WRITE) {
636 r_data->bytes_xfered = host->written_blocks * 638 r_data->bytes_xfered = host->written_blocks
637 r_data->blksz; 639 * r_data->blksz;
638 } else { 640 } else {
639 r_data->bytes_xfered = r_data->blocks - 641 r_data->bytes_xfered = r_data->blocks -
640 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 642 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -642,29 +644,44 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
642 r_data->bytes_xfered += r_data->blksz - 644 r_data->bytes_xfered += r_data->blksz -
643 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; 645 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
644 } 646 }
645 host->buffer = NULL;
646 host->buffer_pos = 0; 647 host->buffer_pos = 0;
647 host->buffer_size = 0; 648 host->buffer_size = 0;
648 } 649 }
649 650
650 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 651 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
651 sock->addr + SOCK_CONTROL); 652 sock->addr + SOCK_CONTROL);
652 653
653 spin_unlock_irqrestore(&sock->lock, flags); 654 spin_unlock_irqrestore(&sock->lock, flags);
654 655
655 if (r_data)
656 kunmap(r_data->sg->page);
657
658 mmc_request_done(mmc, mrq); 656 mmc_request_done(mmc, mrq);
659} 657}
660 658
661static void tifm_sd_abort(struct work_struct *work) 659static void tifm_sd_terminate(struct tifm_sd *host)
660{
661 struct tifm_dev *sock = host->dev;
662 unsigned long flags;
663
664 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
665 mmiowb();
666 spin_lock_irqsave(&sock->lock, flags);
667 host->flags |= EJECT;
668 if (host->req) {
669 writel(TIFM_FIFO_INT_SETALL,
670 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
671 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
672 tasklet_schedule(&host->finish_tasklet);
673 }
674 spin_unlock_irqrestore(&sock->lock, flags);
675}
676
677static void tifm_sd_abort(unsigned long data)
662{ 678{
663 struct tifm_sd *host = 679 struct tifm_sd *host = (struct tifm_sd*)data;
664 container_of(work, struct tifm_sd, abort_handler.work);
665 680
666 printk(KERN_ERR DRIVER_NAME 681 printk(KERN_ERR DRIVER_NAME
667 ": card failed to respond for a long period of time"); 682 ": card failed to respond for a long period of time");
683
684 tifm_sd_terminate(host);
668 tifm_eject(host->dev); 685 tifm_eject(host->dev);
669} 686}
670 687
@@ -683,9 +700,9 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
683 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), 700 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
684 sock->addr + SOCK_MMCSD_CONFIG); 701 sock->addr + SOCK_MMCSD_CONFIG);
685 } else { 702 } else {
686 writel((~TIFM_MMCSD_4BBUS) & 703 writel((~TIFM_MMCSD_4BBUS)
687 readl(sock->addr + SOCK_MMCSD_CONFIG), 704 & readl(sock->addr + SOCK_MMCSD_CONFIG),
688 sock->addr + SOCK_MMCSD_CONFIG); 705 sock->addr + SOCK_MMCSD_CONFIG);
689 } 706 }
690 707
691 if (ios->clock) { 708 if (ios->clock) {
@@ -704,23 +721,24 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
704 if ((20000000 / clk_div1) > (24000000 / clk_div2)) { 721 if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
705 host->clk_freq = 20000000; 722 host->clk_freq = 20000000;
706 host->clk_div = clk_div1; 723 host->clk_div = clk_div1;
707 writel((~TIFM_CTRL_FAST_CLK) & 724 writel((~TIFM_CTRL_FAST_CLK)
708 readl(sock->addr + SOCK_CONTROL), 725 & readl(sock->addr + SOCK_CONTROL),
709 sock->addr + SOCK_CONTROL); 726 sock->addr + SOCK_CONTROL);
710 } else { 727 } else {
711 host->clk_freq = 24000000; 728 host->clk_freq = 24000000;
712 host->clk_div = clk_div2; 729 host->clk_div = clk_div2;
713 writel(TIFM_CTRL_FAST_CLK | 730 writel(TIFM_CTRL_FAST_CLK
714 readl(sock->addr + SOCK_CONTROL), 731 | readl(sock->addr + SOCK_CONTROL),
715 sock->addr + SOCK_CONTROL); 732 sock->addr + SOCK_CONTROL);
716 } 733 }
717 } else { 734 } else {
718 host->clk_div = 0; 735 host->clk_div = 0;
719 } 736 }
720 host->clk_div &= TIFM_MMCSD_CLKMASK; 737 host->clk_div &= TIFM_MMCSD_CLKMASK;
721 writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) & 738 writel(host->clk_div
722 readl(sock->addr + SOCK_MMCSD_CONFIG)), 739 | ((~TIFM_MMCSD_CLKMASK)
723 sock->addr + SOCK_MMCSD_CONFIG); 740 & readl(sock->addr + SOCK_MMCSD_CONFIG)),
741 sock->addr + SOCK_MMCSD_CONFIG);
724 742
725 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 743 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
726 host->flags |= OPENDRAIN; 744 host->flags |= OPENDRAIN;
@@ -734,7 +752,7 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
734 // allow removal. 752 // allow removal.
735 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { 753 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) {
736 host->flags |= EJECT_DONE; 754 host->flags |= EJECT_DONE;
737 wake_up_all(&host->can_eject); 755 wake_up_all(&host->notify);
738 } 756 }
739 757
740 spin_unlock_irqrestore(&sock->lock, flags); 758 spin_unlock_irqrestore(&sock->lock, flags);
@@ -762,20 +780,67 @@ static struct mmc_host_ops tifm_sd_ops = {
762 .get_ro = tifm_sd_ro 780 .get_ro = tifm_sd_ro
763}; 781};
764 782
765static void tifm_sd_register_host(struct work_struct *work) 783static int tifm_sd_initialize_host(struct tifm_sd *host)
766{ 784{
767 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 785 int rc;
786 unsigned int host_status = 0;
768 struct tifm_dev *sock = host->dev; 787 struct tifm_dev *sock = host->dev;
769 struct mmc_host *mmc = tifm_get_drvdata(sock);
770 unsigned long flags;
771 788
772 spin_lock_irqsave(&sock->lock, flags); 789 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
773 host->flags |= HOST_REG; 790 mmiowb();
774 PREPARE_WORK(&host->cmd_handler, 791 host->clk_div = 61;
775 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); 792 host->clk_freq = 20000000;
776 spin_unlock_irqrestore(&sock->lock, flags); 793 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
777 dev_dbg(&sock->dev, "adding host\n"); 794 writel(host->clk_div | TIFM_MMCSD_POWER,
778 mmc_add_host(mmc); 795 sock->addr + SOCK_MMCSD_CONFIG);
796
797 /* wait up to 0.51 sec for reset */
798 for (rc = 2; rc <= 256; rc <<= 1) {
799 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
800 rc = 0;
801 break;
802 }
803 msleep(rc);
804 }
805
806 if (rc) {
807 printk(KERN_ERR DRIVER_NAME
808 ": controller failed to reset\n");
809 return -ENODEV;
810 }
811
812 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
813 writel(host->clk_div | TIFM_MMCSD_POWER,
814 sock->addr + SOCK_MMCSD_CONFIG);
815 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
816
817 // command timeout fixed to 64 clocks for now
818 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
819 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
820
821 /* INAB should take much less than reset */
822 for (rc = 1; rc <= 16; rc <<= 1) {
823 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
824 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
825 if (!(host_status & TIFM_MMCSD_ERRMASK)
826 && (host_status & TIFM_MMCSD_EOC)) {
827 rc = 0;
828 break;
829 }
830 msleep(rc);
831 }
832
833 if (rc) {
834 printk(KERN_ERR DRIVER_NAME
835 ": card not ready - probe failed on initialization\n");
836 return -ENODEV;
837 }
838
839 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
840 sock->addr + SOCK_MMCSD_INT_ENABLE);
841 mmiowb();
842
843 return 0;
779} 844}
780 845
781static int tifm_sd_probe(struct tifm_dev *sock) 846static int tifm_sd_probe(struct tifm_dev *sock)
@@ -784,8 +849,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
784 struct tifm_sd *host; 849 struct tifm_sd *host;
785 int rc = -EIO; 850 int rc = -EIO;
786 851
787 if (!(TIFM_SOCK_STATE_OCCUPIED & 852 if (!(TIFM_SOCK_STATE_OCCUPIED
788 readl(sock->addr + SOCK_PRESENT_STATE))) { 853 & readl(sock->addr + SOCK_PRESENT_STATE))) {
789 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); 854 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n");
790 return rc; 855 return rc;
791 } 856 }
@@ -795,109 +860,99 @@ static int tifm_sd_probe(struct tifm_dev *sock)
795 return -ENOMEM; 860 return -ENOMEM;
796 861
797 host = mmc_priv(mmc); 862 host = mmc_priv(mmc);
798 host->dev = sock;
799 host->clk_div = 61;
800 init_waitqueue_head(&host->can_eject);
801 INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
802 INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
803
804 tifm_set_drvdata(sock, mmc); 863 tifm_set_drvdata(sock, mmc);
805 sock->signal_irq = tifm_sd_signal_irq; 864 host->dev = sock;
806
807 host->clk_freq = 20000000;
808 host->timeout_jiffies = msecs_to_jiffies(1000); 865 host->timeout_jiffies = msecs_to_jiffies(1000);
809 866
867 init_waitqueue_head(&host->notify);
868 tasklet_init(&host->finish_tasklet,
869 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
870 (unsigned long)host);
871 setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
872
810 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; 873 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request;
811 mmc->ops = &tifm_sd_ops; 874 mmc->ops = &tifm_sd_ops;
812 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 875 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
813 mmc->caps = MMC_CAP_4_BIT_DATA; 876 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE;
814 mmc->f_min = 20000000 / 60; 877 mmc->f_min = 20000000 / 60;
815 mmc->f_max = 24000000; 878 mmc->f_max = 24000000;
816 mmc->max_hw_segs = 1; 879 mmc->max_hw_segs = 1;
817 mmc->max_phys_segs = 1; 880 mmc->max_phys_segs = 1;
818 mmc->max_sectors = 127; 881 // limited by DMA counter - it's safer to stick with
819 mmc->max_seg_size = mmc->max_sectors << 11; //2k maximum hw block length 882 // block counter has 11 bits though
820 883 mmc->max_blk_count = 256;
821 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); 884 // 2k maximum hw block length
822 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); 885 mmc->max_blk_size = 2048;
823 writel(host->clk_div | TIFM_MMCSD_POWER, 886 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
824 sock->addr + SOCK_MMCSD_CONFIG); 887 mmc->max_seg_size = mmc->max_req_size;
888 sock->signal_irq = tifm_sd_signal_irq;
889 rc = tifm_sd_initialize_host(host);
825 890
826 for (rc = 0; rc < 50; rc++) { 891 if (!rc)
827 /* Wait for reset ack */ 892 rc = mmc_add_host(mmc);
828 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { 893 if (rc)
829 rc = 0; 894 goto out_free_mmc;
830 break;
831 }
832 msleep(10);
833 }
834 895
835 if (rc) { 896 return 0;
836 printk(KERN_ERR DRIVER_NAME 897out_free_mmc:
837 ": card not ready - probe failed\n"); 898 mmc_free_host(mmc);
838 mmc_free_host(mmc); 899 return rc;
839 return -ENODEV; 900}
840 }
841 901
842 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); 902static void tifm_sd_remove(struct tifm_dev *sock)
843 writel(host->clk_div | TIFM_MMCSD_POWER, 903{
844 sock->addr + SOCK_MMCSD_CONFIG); 904 struct mmc_host *mmc = tifm_get_drvdata(sock);
845 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 905 struct tifm_sd *host = mmc_priv(mmc);
846 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
847 sock->addr + SOCK_MMCSD_INT_ENABLE);
848 906
849 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); // command timeout 64 clocks for now 907 del_timer_sync(&host->timer);
850 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); 908 tifm_sd_terminate(host);
851 writel(host->clk_div | TIFM_MMCSD_POWER, 909 wait_event_timeout(host->notify, host->flags & EJECT_DONE,
852 sock->addr + SOCK_MMCSD_CONFIG); 910 host->timeout_jiffies);
911 tasklet_kill(&host->finish_tasklet);
912 mmc_remove_host(mmc);
853 913
854 queue_delayed_work(sock->wq, &host->abort_handler, 914 /* The meaning of the bit majority in this constant is unknown. */
855 host->timeout_jiffies); 915 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
916 sock->addr + SOCK_CONTROL);
856 917
857 return 0; 918 tifm_set_drvdata(sock, NULL);
919 mmc_free_host(mmc);
858} 920}
859 921
860static int tifm_sd_host_is_down(struct tifm_dev *sock) 922#ifdef CONFIG_PM
923
924static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
861{ 925{
862 struct mmc_host *mmc = tifm_get_drvdata(sock); 926 struct mmc_host *mmc = tifm_get_drvdata(sock);
863 struct tifm_sd *host = mmc_priv(mmc); 927 int rc;
864 unsigned long flags;
865 int rc = 0;
866 928
867 spin_lock_irqsave(&sock->lock, flags); 929 rc = mmc_suspend_host(mmc, state);
868 rc = (host->flags & EJECT_DONE); 930 /* The meaning of the bit majority in this constant is unknown. */
869 spin_unlock_irqrestore(&sock->lock, flags); 931 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
932 sock->addr + SOCK_CONTROL);
870 return rc; 933 return rc;
871} 934}
872 935
873static void tifm_sd_remove(struct tifm_dev *sock) 936static int tifm_sd_resume(struct tifm_dev *sock)
874{ 937{
875 struct mmc_host *mmc = tifm_get_drvdata(sock); 938 struct mmc_host *mmc = tifm_get_drvdata(sock);
876 struct tifm_sd *host = mmc_priv(mmc); 939 struct tifm_sd *host = mmc_priv(mmc);
877 unsigned long flags;
878 940
879 spin_lock_irqsave(&sock->lock, flags); 941 if (sock->media_id != FM_SD
880 host->flags |= EJECT; 942 || tifm_sd_initialize_host(host)) {
881 if (host->req) 943 tifm_eject(sock);
882 queue_work(sock->wq, &host->cmd_handler); 944 return 0;
883 spin_unlock_irqrestore(&sock->lock, flags); 945 } else {
884 wait_event_timeout(host->can_eject, tifm_sd_host_is_down(sock), 946 return mmc_resume_host(mmc);
885 host->timeout_jiffies); 947 }
948}
886 949
887 if (host->flags & HOST_REG) 950#else
888 mmc_remove_host(mmc);
889 951
890 /* The meaning of the bit majority in this constant is unknown. */ 952#define tifm_sd_suspend NULL
891 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), 953#define tifm_sd_resume NULL
892 sock->addr + SOCK_CONTROL);
893 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
894 writel(TIFM_FIFO_INT_SETALL,
895 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
896 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
897 954
898 tifm_set_drvdata(sock, NULL); 955#endif /* CONFIG_PM */
899 mmc_free_host(mmc);
900}
901 956
902static tifm_media_id tifm_sd_id_tbl[] = { 957static tifm_media_id tifm_sd_id_tbl[] = {
903 FM_SD, 0 958 FM_SD, 0
@@ -910,7 +965,9 @@ static struct tifm_driver tifm_sd_driver = {
910 }, 965 },
911 .id_table = tifm_sd_id_tbl, 966 .id_table = tifm_sd_id_tbl,
912 .probe = tifm_sd_probe, 967 .probe = tifm_sd_probe,
913 .remove = tifm_sd_remove 968 .remove = tifm_sd_remove,
969 .suspend = tifm_sd_suspend,
970 .resume = tifm_sd_resume
914}; 971};
915 972
916static int __init tifm_sd_init(void) 973static int __init tifm_sd_init(void)
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 7a282672f8e9..a44d8777ab9f 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver 2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 * 3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. 4 * Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
272 return host->num_sg; 272 return host->num_sg;
273} 273}
274 274
275static inline char *wbsd_kmap_sg(struct wbsd_host *host) 275static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
276{ 276{
277 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + 277 return page_address(host->cur_sg->page) + host->cur_sg->offset;
278 host->cur_sg->offset;
279 return host->mapped_sg;
280}
281
282static inline void wbsd_kunmap_sg(struct wbsd_host *host)
283{
284 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
285} 278}
286 279
287static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) 280static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
302 * we do not transfer too much. 295 * we do not transfer too much.
303 */ 296 */
304 for (i = 0; i < len; i++) { 297 for (i = 0; i < len; i++) {
305 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 298 sgbuf = page_address(sg[i].page) + sg[i].offset;
306 if (size < sg[i].length) 299 if (size < sg[i].length)
307 memcpy(dmabuf, sgbuf, size); 300 memcpy(dmabuf, sgbuf, size);
308 else 301 else
309 memcpy(dmabuf, sgbuf, sg[i].length); 302 memcpy(dmabuf, sgbuf, sg[i].length);
310 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
311 dmabuf += sg[i].length; 303 dmabuf += sg[i].length;
312 304
313 if (size < sg[i].length) 305 if (size < sg[i].length)
@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
347 * we do not transfer too much. 339 * we do not transfer too much.
348 */ 340 */
349 for (i = 0; i < len; i++) { 341 for (i = 0; i < len; i++) {
350 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 342 sgbuf = page_address(sg[i].page) + sg[i].offset;
351 if (size < sg[i].length) 343 if (size < sg[i].length)
352 memcpy(sgbuf, dmabuf, size); 344 memcpy(sgbuf, dmabuf, size);
353 else 345 else
@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
497 if (data->bytes_xfered == host->size) 489 if (data->bytes_xfered == host->size)
498 return; 490 return;
499 491
500 buffer = wbsd_kmap_sg(host) + host->offset; 492 buffer = wbsd_sg_to_buffer(host) + host->offset;
501 493
502 /* 494 /*
503 * Drain the fifo. This has a tendency to loop longer 495 * Drain the fifo. This has a tendency to loop longer
@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
526 /* 518 /*
527 * Transfer done? 519 * Transfer done?
528 */ 520 */
529 if (data->bytes_xfered == host->size) { 521 if (data->bytes_xfered == host->size)
530 wbsd_kunmap_sg(host);
531 return; 522 return;
532 }
533 523
534 /* 524 /*
535 * End of scatter list entry? 525 * End of scatter list entry?
536 */ 526 */
537 if (host->remain == 0) { 527 if (host->remain == 0) {
538 wbsd_kunmap_sg(host);
539
540 /* 528 /*
541 * Get next entry. Check if last. 529 * Get next entry. Check if last.
542 */ 530 */
@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
554 return; 542 return;
555 } 543 }
556 544
557 buffer = wbsd_kmap_sg(host); 545 buffer = wbsd_sg_to_buffer(host);
558 } 546 }
559 } 547 }
560 } 548 }
561 549
562 wbsd_kunmap_sg(host);
563
564 /* 550 /*
565 * This is a very dirty hack to solve a 551 * This is a very dirty hack to solve a
566 * hardware problem. The chip doesn't trigger 552 * hardware problem. The chip doesn't trigger
@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
583 if (data->bytes_xfered == host->size) 569 if (data->bytes_xfered == host->size)
584 return; 570 return;
585 571
586 buffer = wbsd_kmap_sg(host) + host->offset; 572 buffer = wbsd_sg_to_buffer(host) + host->offset;
587 573
588 /* 574 /*
589 * Fill the fifo. This has a tendency to loop longer 575 * Fill the fifo. This has a tendency to loop longer
@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
612 /* 598 /*
613 * Transfer done? 599 * Transfer done?
614 */ 600 */
615 if (data->bytes_xfered == host->size) { 601 if (data->bytes_xfered == host->size)
616 wbsd_kunmap_sg(host);
617 return; 602 return;
618 }
619 603
620 /* 604 /*
621 * End of scatter list entry? 605 * End of scatter list entry?
622 */ 606 */
623 if (host->remain == 0) { 607 if (host->remain == 0) {
624 wbsd_kunmap_sg(host);
625
626 /* 608 /*
627 * Get next entry. Check if last. 609 * Get next entry. Check if last.
628 */ 610 */
@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
640 return; 622 return;
641 } 623 }
642 624
643 buffer = wbsd_kmap_sg(host); 625 buffer = wbsd_sg_to_buffer(host);
644 } 626 }
645 } 627 }
646 } 628 }
647 629
648 wbsd_kunmap_sg(host);
649
650 /* 630 /*
651 * The controller stops sending interrupts for 631 * The controller stops sending interrupts for
652 * 'FIFO empty' under certain conditions. So we 632 * 'FIFO empty' under certain conditions. So we
@@ -910,6 +890,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 */ 890 */
911 if (cmd->data && (cmd->error == MMC_ERR_NONE)) { 891 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
912 /* 892 /*
893 * The hardware is so delightfully stupid that it has a list
894 * of "data" commands. If a command isn't on this list, it'll
895 * just go back to the idle state and won't send any data
896 * interrupts.
897 */
898 switch (cmd->opcode) {
899 case 11:
900 case 17:
901 case 18:
902 case 20:
903 case 24:
904 case 25:
905 case 26:
906 case 27:
907 case 30:
908 case 42:
909 case 56:
910 break;
911
912 /* ACMDs. We don't keep track of state, so we just treat them
913 * like any other command. */
914 case 51:
915 break;
916
917 default:
918#ifdef CONFIG_MMC_DEBUG
919 printk(KERN_WARNING "%s: Data command %d is not "
920 "supported by this controller.\n",
921 mmc_hostname(host->mmc), cmd->opcode);
922#endif
923 cmd->data->error = MMC_ERR_INVALID;
924
925 if (cmd->data->stop)
926 wbsd_send_command(host, cmd->data->stop);
927
928 goto done;
929 };
930
931 /*
913 * Dirty fix for hardware bug. 932 * Dirty fix for hardware bug.
914 */ 933 */
915 if (host->dma == -1) 934 if (host->dma == -1)
@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1343 mmc->max_phys_segs = 128; 1362 mmc->max_phys_segs = 128;
1344 1363
1345 /* 1364 /*
1346 * Maximum number of sectors in one transfer. Also limited by 64kB 1365 * Maximum request size. Also limited by 64KiB buffer.
1347 * buffer.
1348 */ 1366 */
1349 mmc->max_sectors = 128; 1367 mmc->max_req_size = 65536;
1350 1368
1351 /* 1369 /*
1352 * Maximum segment size. Could be one segment with the maximum number 1370 * Maximum segment size. Could be one segment with the maximum number
1353 * of segments. 1371 * of bytes.
1372 */
1373 mmc->max_seg_size = mmc->max_req_size;
1374
1375 /*
1376 * Maximum block size. We have 12 bits (= 4095) but have to subtract
1377 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
1378 */
1379 mmc->max_blk_size = 4087;
1380
1381 /*
1382 * Maximum block count. There is no real limit so the maximum
1383 * request size will be the only restriction.
1354 */ 1384 */
1355 mmc->max_seg_size = mmc->max_sectors * 512; 1385 mmc->max_blk_count = mmc->max_req_size;
1356 1386
1357 dev_set_drvdata(dev, mmc); 1387 dev_set_drvdata(dev, mmc);
1358 1388
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
index 6072993f01e3..d06718b0e2ab 100644
--- a/drivers/mmc/wbsd.h
+++ b/drivers/mmc/wbsd.h
@@ -154,7 +154,6 @@ struct wbsd_host
154 154
155 struct scatterlist* cur_sg; /* Current SG entry */ 155 struct scatterlist* cur_sg; /* Current SG entry */
156 unsigned int num_sg; /* Number of entries left */ 156 unsigned int num_sg; /* Number of entries left */
157 void* mapped_sg; /* vaddr of mapped sg */
158 157
159 unsigned int offset; /* Offset into current entry */ 158 unsigned int offset; /* Offset into current entry */
160 unsigned int remain; /* Data left in curren entry */ 159 unsigned int remain; /* Data left in curren entry */
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80bdcf846234..716a47210aa3 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -792,8 +792,7 @@ static void poll_vortex(struct net_device *dev)
792{ 792{
793 struct vortex_private *vp = netdev_priv(dev); 793 struct vortex_private *vp = netdev_priv(dev);
794 unsigned long flags; 794 unsigned long flags;
795 local_save_flags(flags); 795 local_irq_save(flags);
796 local_irq_disable();
797 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); 796 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
798 local_irq_restore(flags); 797 local_irq_restore(flags);
799} 798}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8aa8dd02b910..ad92b6a76ee6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -190,7 +190,7 @@ config MII
190 190
191config MACB 191config MACB
192 tristate "Atmel MACB support" 192 tristate "Atmel MACB support"
193 depends on NET_ETHERNET && AVR32 193 depends on NET_ETHERNET && (AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263)
194 select MII 194 select MII
195 help 195 help
196 The Atmel MACB ethernet interface is found on many AT32 and AT91 196 The Atmel MACB ethernet interface is found on many AT32 and AT91
@@ -235,16 +235,6 @@ config BMAC
235 To compile this driver as a module, choose M here: the module 235 To compile this driver as a module, choose M here: the module
236 will be called bmac. 236 will be called bmac.
237 237
238config OAKNET
239 tristate "National DP83902AV (Oak ethernet) support"
240 depends on NET_ETHERNET && PPC && BROKEN
241 select CRC32
242 help
243 Say Y if your machine has this type of Ethernet network card.
244
245 To compile this driver as a module, choose M here: the module
246 will be called oaknet.
247
248config ARIADNE 238config ARIADNE
249 tristate "Ariadne support" 239 tristate "Ariadne support"
250 depends on NET_ETHERNET && ZORRO 240 depends on NET_ETHERNET && ZORRO
@@ -1155,21 +1145,6 @@ config SEEQ8005
1155 <file:Documentation/networking/net-modules.txt>. The module 1145 <file:Documentation/networking/net-modules.txt>. The module
1156 will be called seeq8005. 1146 will be called seeq8005.
1157 1147
1158config SKMC
1159 tristate "SKnet MCA support"
1160 depends on NET_ETHERNET && MCA && BROKEN
1161 ---help---
1162 These are Micro Channel Ethernet adapters. You need to say Y to "MCA
1163 support" in order to use this driver. Supported cards are the SKnet
1164 Junior MC2 and the SKnet MC2(+). The driver automatically
1165 distinguishes between the two cards. Note that using multiple boards
1166 of different type hasn't been tested with this driver. Say Y if you
1167 have one of these Ethernet adapters.
1168
1169 To compile this driver as a module, choose M here and read
1170 <file:Documentation/networking/net-modules.txt>. The module
1171 will be called sk_mca.
1172
1173config NE2_MCA 1148config NE2_MCA
1174 tristate "NE/2 (ne2000 MCA version) support" 1149 tristate "NE/2 (ne2000 MCA version) support"
1175 depends on NET_ETHERNET && MCA_LEGACY 1150 depends on NET_ETHERNET && MCA_LEGACY
@@ -1788,6 +1763,18 @@ config LAN_SAA9730
1788 workstations. 1763 workstations.
1789 See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>. 1764 See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
1790 1765
1766config SC92031
1767 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
1768 depends on NET_PCI && PCI && EXPERIMENTAL
1769 select CRC32
1770 ---help---
1771 This is a driver for the Fast Ethernet PCI network cards based on
1772 the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
1773 have one of these, say Y here.
1774
1775 To compile this driver as a module, choose M here: the module
1776 will be called sc92031. This is recommended.
1777
1791config NET_POCKET 1778config NET_POCKET
1792 bool "Pocket and portable adapters" 1779 bool "Pocket and portable adapters"
1793 depends on NET_ETHERNET && PARPORT 1780 depends on NET_ETHERNET && PARPORT
@@ -2392,6 +2379,24 @@ config CHELSIO_T1_NAPI
2392 NAPI is a driver API designed to reduce CPU and interrupt load 2379 NAPI is a driver API designed to reduce CPU and interrupt load
2393 when the driver is receiving lots of packets from the card. 2380 when the driver is receiving lots of packets from the card.
2394 2381
2382config CHELSIO_T3
2383 tristate "Chelsio Communications T3 10Gb Ethernet support"
2384 depends on PCI
2385 help
2386 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2387 adapters.
2388
2389 For general information about Chelsio and our products, visit
2390 our website at <http://www.chelsio.com>.
2391
2392 For customer support, please visit our customer support page at
2393 <http://www.chelsio.com/support.htm>.
2394
2395 Please send feedback to <linux-bugs@chelsio.com>.
2396
2397 To compile this driver as a module, choose M here: the module
2398 will be called cxgb3.
2399
2395config EHEA 2400config EHEA
2396 tristate "eHEA Ethernet support" 2401 tristate "eHEA Ethernet support"
2397 depends on IBMEBUS 2402 depends on IBMEBUS
@@ -2488,6 +2493,13 @@ config NETXEN_NIC
2488 help 2493 help
2489 This enables the support for NetXen's Gigabit Ethernet card. 2494 This enables the support for NetXen's Gigabit Ethernet card.
2490 2495
2496config PASEMI_MAC
2497 tristate "PA Semi 1/10Gbit MAC"
2498 depends on PPC64 && PCI
2499 help
2500 This driver supports the on-chip 1/10Gbit Ethernet controller on
2501 PA Semi's PWRficient line of chips.
2502
2491endmenu 2503endmenu
2492 2504
2493source "drivers/net/tokenring/Kconfig" 2505source "drivers/net/tokenring/Kconfig"
@@ -2541,6 +2553,7 @@ config DEFXX
2541config SKFP 2553config SKFP
2542 tristate "SysKonnect FDDI PCI support" 2554 tristate "SysKonnect FDDI PCI support"
2543 depends on FDDI && PCI 2555 depends on FDDI && PCI
2556 select BITREVERSE
2544 ---help--- 2557 ---help---
2545 Say Y here if you have a SysKonnect FDDI PCI adapter. 2558 Say Y here if you have a SysKonnect FDDI PCI adapter.
2546 The following adapters are supported by this driver: 2559 The following adapters are supported by this driver:
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4c0d4e5ce42b..0878e3df5174 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 6obj-$(CONFIG_IBM_EMAC) += ibm_emac/
7obj-$(CONFIG_IXGB) += ixgb/ 7obj-$(CONFIG_IXGB) += ixgb/
8obj-$(CONFIG_CHELSIO_T1) += chelsio/ 8obj-$(CONFIG_CHELSIO_T1) += chelsio/
9obj-$(CONFIG_CHELSIO_T3) += cxgb3/
9obj-$(CONFIG_EHEA) += ehea/ 10obj-$(CONFIG_EHEA) += ehea/
10obj-$(CONFIG_BONDING) += bonding/ 11obj-$(CONFIG_BONDING) += bonding/
11obj-$(CONFIG_GIANFAR) += gianfar_driver.o 12obj-$(CONFIG_GIANFAR) += gianfar_driver.o
@@ -36,8 +37,6 @@ obj-$(CONFIG_CASSINI) += cassini.o
36obj-$(CONFIG_MACE) += mace.o 37obj-$(CONFIG_MACE) += mace.o
37obj-$(CONFIG_BMAC) += bmac.o 38obj-$(CONFIG_BMAC) += bmac.o
38 39
39obj-$(CONFIG_OAKNET) += oaknet.o 8390.o
40
41obj-$(CONFIG_DGRS) += dgrs.o 40obj-$(CONFIG_DGRS) += dgrs.o
42obj-$(CONFIG_VORTEX) += 3c59x.o 41obj-$(CONFIG_VORTEX) += 3c59x.o
43obj-$(CONFIG_TYPHOON) += typhoon.o 42obj-$(CONFIG_TYPHOON) += typhoon.o
@@ -137,7 +136,6 @@ obj-$(CONFIG_AT1700) += at1700.o
137obj-$(CONFIG_EL1) += 3c501.o 136obj-$(CONFIG_EL1) += 3c501.o
138obj-$(CONFIG_EL16) += 3c507.o 137obj-$(CONFIG_EL16) += 3c507.o
139obj-$(CONFIG_ELMC) += 3c523.o 138obj-$(CONFIG_ELMC) += 3c523.o
140obj-$(CONFIG_SKMC) += sk_mca.o
141obj-$(CONFIG_IBMLANA) += ibmlana.o 139obj-$(CONFIG_IBMLANA) += ibmlana.o
142obj-$(CONFIG_ELMC_II) += 3c527.o 140obj-$(CONFIG_ELMC_II) += 3c527.o
143obj-$(CONFIG_EL3) += 3c509.o 141obj-$(CONFIG_EL3) += 3c509.o
@@ -160,6 +158,7 @@ obj-$(CONFIG_APRICOT) += 82596.o
160obj-$(CONFIG_LASI_82596) += lasi_82596.o 158obj-$(CONFIG_LASI_82596) += lasi_82596.o
161obj-$(CONFIG_MVME16x_NET) += 82596.o 159obj-$(CONFIG_MVME16x_NET) += 82596.o
162obj-$(CONFIG_BVME6000_NET) += 82596.o 160obj-$(CONFIG_BVME6000_NET) += 82596.o
161obj-$(CONFIG_SC92031) += sc92031.o
163 162
164# This is also a 82596 and should probably be merged 163# This is also a 82596 and should probably be merged
165obj-$(CONFIG_LP486E) += lp486e.o 164obj-$(CONFIG_LP486E) += lp486e.o
@@ -196,6 +195,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
196obj-$(CONFIG_SMC911X) += smc911x.o 195obj-$(CONFIG_SMC911X) += smc911x.o
197obj-$(CONFIG_DM9000) += dm9000.o 196obj-$(CONFIG_DM9000) += dm9000.o
198obj-$(CONFIG_FEC_8XX) += fec_8xx/ 197obj-$(CONFIG_FEC_8XX) += fec_8xx/
198obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
199 199
200obj-$(CONFIG_MACB) += macb.o 200obj-$(CONFIG_MACB) += macb.o
201 201
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 9305eb9b1b98..dd8ed456c8b2 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -59,7 +59,6 @@ extern struct net_device *wavelan_probe(int unit);
59extern struct net_device *arlan_probe(int unit); 59extern struct net_device *arlan_probe(int unit);
60extern struct net_device *el16_probe(int unit); 60extern struct net_device *el16_probe(int unit);
61extern struct net_device *elmc_probe(int unit); 61extern struct net_device *elmc_probe(int unit);
62extern struct net_device *skmca_probe(int unit);
63extern struct net_device *elplus_probe(int unit); 62extern struct net_device *elplus_probe(int unit);
64extern struct net_device *ac3200_probe(int unit); 63extern struct net_device *ac3200_probe(int unit);
65extern struct net_device *es_probe(int unit); 64extern struct net_device *es_probe(int unit);
@@ -153,9 +152,6 @@ static struct devprobe2 mca_probes[] __initdata = {
153#ifdef CONFIG_ELMC_II /* 3c527 */ 152#ifdef CONFIG_ELMC_II /* 3c527 */
154 {mc32_probe, 0}, 153 {mc32_probe, 0},
155#endif 154#endif
156#ifdef CONFIG_SKMC /* SKnet Microchannel */
157 {skmca_probe, 0},
158#endif
159 {NULL, 0}, 155 {NULL, 0},
160}; 156};
161 157
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 18896f24d407..9c399aaefbdd 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1334,8 +1334,7 @@ err_no_interrupt:
1334static void amd8111e_poll(struct net_device *dev) 1334static void amd8111e_poll(struct net_device *dev)
1335{ 1335{
1336 unsigned long flags; 1336 unsigned long flags;
1337 local_save_flags(flags); 1337 local_irq_save(flags);
1338 local_irq_disable();
1339 amd8111e_interrupt(0, dev); 1338 amd8111e_interrupt(0, dev);
1340 local_irq_restore(flags); 1339 local_irq_restore(flags);
1341} 1340}
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index fada15d959de..1621b8fe35cf 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -641,7 +641,7 @@ static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
641{ 641{
642 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 642 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
643 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 643 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
644 strlcpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); 644 strlcpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info));
645} 645}
646 646
647static const struct ethtool_ops at91ether_ethtool_ops = { 647static const struct ethtool_ops at91ether_ethtool_ops = {
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index f3faa4fe58e7..72c41f5907f2 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -587,7 +587,7 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
587{ 587{
588 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 588 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
589 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 589 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
590 strlcpy(info->bus_info, dev->class_dev.dev->bus_id, 590 strlcpy(info->bus_info, dev->dev.parent->bus_id,
591 sizeof(info->bus_info)); 591 sizeof(info->bus_info));
592} 592}
593 593
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 303a8d94ad4b..5ff7882297d6 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -721,7 +721,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
721 struct ring_info *src_map, *dest_map; 721 struct ring_info *src_map, *dest_map;
722 struct rx_header *rh; 722 struct rx_header *rh;
723 int dest_idx; 723 int dest_idx;
724 u32 ctrl; 724 __le32 ctrl;
725 725
726 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); 726 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
727 dest_desc = &bp->rx_ring[dest_idx]; 727 dest_desc = &bp->rx_ring[dest_idx];
@@ -783,7 +783,7 @@ static int b44_rx(struct b44 *bp, int budget)
783 RX_PKT_BUF_SZ, 783 RX_PKT_BUF_SZ,
784 PCI_DMA_FROMDEVICE); 784 PCI_DMA_FROMDEVICE);
785 rh = (struct rx_header *) skb->data; 785 rh = (struct rx_header *) skb->data;
786 len = cpu_to_le16(rh->len); 786 len = le16_to_cpu(rh->len);
787 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) || 787 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
788 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { 788 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
789 drop_it: 789 drop_it:
@@ -799,7 +799,7 @@ static int b44_rx(struct b44 *bp, int budget)
799 do { 799 do {
800 udelay(2); 800 udelay(2);
801 barrier(); 801 barrier();
802 len = cpu_to_le16(rh->len); 802 len = le16_to_cpu(rh->len);
803 } while (len == 0 && i++ < 5); 803 } while (len == 0 && i++ < 5);
804 if (len == 0) 804 if (len == 0)
805 goto drop_it; 805 goto drop_it;
@@ -2061,7 +2061,7 @@ out:
2061static int b44_read_eeprom(struct b44 *bp, u8 *data) 2061static int b44_read_eeprom(struct b44 *bp, u8 *data)
2062{ 2062{
2063 long i; 2063 long i;
2064 u16 *ptr = (u16 *) data; 2064 __le16 *ptr = (__le16 *) data;
2065 2065
2066 for (i = 0; i < 128; i += 2) 2066 for (i = 0; i < 128; i += 2)
2067 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i)); 2067 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 4944507fad23..18fc13336628 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -308,8 +308,8 @@
308#define MII_TLEDCTRL_ENABLE 0x0040 308#define MII_TLEDCTRL_ENABLE 0x0040
309 309
310struct dma_desc { 310struct dma_desc {
311 u32 ctrl; 311 __le32 ctrl;
312 u32 addr; 312 __le32 addr;
313}; 313};
314 314
315/* There are only 12 bits in the DMA engine for descriptor offsetting 315/* There are only 12 bits in the DMA engine for descriptor offsetting
@@ -327,9 +327,9 @@ struct dma_desc {
327#define RX_COPY_THRESHOLD 256 327#define RX_COPY_THRESHOLD 256
328 328
329struct rx_header { 329struct rx_header {
330 u16 len; 330 __le16 len;
331 u16 flags; 331 __le16 flags;
332 u16 pad[12]; 332 __le16 pad[12];
333}; 333};
334#define RX_HEADER_LEN 28 334#define RX_HEADER_LEN 28
335 335
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 4528ce9c4e43..c143304dcff5 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/crc32.h> 20#include <linux/crc32.h>
21#include <linux/bitrev.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
22#include <asm/dbdma.h> 23#include <asm/dbdma.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -140,7 +141,6 @@ static unsigned char *bmac_emergency_rxbuf;
140 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
141 + sizeof(struct sk_buff_head)) 142 + sizeof(struct sk_buff_head))
142 143
143static unsigned char bitrev(unsigned char b);
144static int bmac_open(struct net_device *dev); 144static int bmac_open(struct net_device *dev);
145static int bmac_close(struct net_device *dev); 145static int bmac_close(struct net_device *dev);
146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
@@ -586,18 +586,6 @@ bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
586 virt_to_bus(addr), 0); 586 virt_to_bus(addr), 0);
587} 587}
588 588
589/* Bit-reverse one byte of an ethernet hardware address. */
590static unsigned char
591bitrev(unsigned char b)
592{
593 int d = 0, i;
594
595 for (i = 0; i < 8; ++i, b >>= 1)
596 d = (d << 1) | (b & 1);
597 return d;
598}
599
600
601static void 589static void
602bmac_init_tx_ring(struct bmac_data *bp) 590bmac_init_tx_ring(struct bmac_data *bp)
603{ 591{
@@ -1224,8 +1212,8 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1224 { 1212 {
1225 reset_and_select_srom(dev); 1213 reset_and_select_srom(dev);
1226 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1214 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1227 ea[2*i] = bitrev(data & 0x0ff); 1215 ea[2*i] = bitrev8(data & 0x0ff);
1228 ea[2*i+1] = bitrev((data >> 8) & 0x0ff); 1216 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1229 } 1217 }
1230} 1218}
1231 1219
@@ -1315,7 +1303,7 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1315 1303
1316 rev = addr[0] == 0 && addr[1] == 0xA0; 1304 rev = addr[0] == 0 && addr[1] == 0xA0;
1317 for (j = 0; j < 6; ++j) 1305 for (j = 0; j < 6; ++j)
1318 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; 1306 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1319 1307
1320 /* Enable chip without interrupts for now */ 1308 /* Enable chip without interrupts for now */
1321 bmac_enable_and_reset_chip(dev); 1309 bmac_enable_and_reset_chip(dev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ee7b75b976b5..5a96d7611af1 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -39,12 +39,9 @@
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#define BCM_VLAN 1 40#define BCM_VLAN 1
41#endif 41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h> 42#include <net/ip.h>
44#include <net/tcp.h> 43#include <net/tcp.h>
45#include <net/checksum.h> 44#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h> 45#include <linux/workqueue.h>
49#include <linux/crc32.h> 46#include <linux/crc32.h>
50#include <linux/prefetch.h> 47#include <linux/prefetch.h>
@@ -1728,7 +1725,7 @@ bnx2_tx_int(struct bnx2 *bp)
1728 1725
1729 tx_buf = &bp->tx_buf_ring[sw_ring_cons]; 1726 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1730 skb = tx_buf->skb; 1727 skb = tx_buf->skb;
1731#ifdef BCM_TSO 1728
1732 /* partial BD completions possible with TSO packets */ 1729 /* partial BD completions possible with TSO packets */
1733 if (skb_is_gso(skb)) { 1730 if (skb_is_gso(skb)) {
1734 u16 last_idx, last_ring_idx; 1731 u16 last_idx, last_ring_idx;
@@ -1744,7 +1741,7 @@ bnx2_tx_int(struct bnx2 *bp)
1744 break; 1741 break;
1745 } 1742 }
1746 } 1743 }
1747#endif 1744
1748 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 1745 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1749 skb_headlen(skb), PCI_DMA_TODEVICE); 1746 skb_headlen(skb), PCI_DMA_TODEVICE);
1750 1747
@@ -4514,7 +4511,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4514 vlan_tag_flags |= 4511 vlan_tag_flags |=
4515 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 4512 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4516 } 4513 }
4517#ifdef BCM_TSO
4518 if ((mss = skb_shinfo(skb)->gso_size) && 4514 if ((mss = skb_shinfo(skb)->gso_size) &&
4519 (skb->len > (bp->dev->mtu + ETH_HLEN))) { 4515 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4520 u32 tcp_opt_len, ip_tcp_len; 4516 u32 tcp_opt_len, ip_tcp_len;
@@ -4547,7 +4543,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4547 } 4543 }
4548 } 4544 }
4549 else 4545 else
4550#endif
4551 { 4546 {
4552 mss = 0; 4547 mss = 0;
4553 } 4548 }
@@ -5544,10 +5539,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
5544 .set_tx_csum = ethtool_op_set_tx_csum, 5539 .set_tx_csum = ethtool_op_set_tx_csum,
5545 .get_sg = ethtool_op_get_sg, 5540 .get_sg = ethtool_op_get_sg,
5546 .set_sg = ethtool_op_set_sg, 5541 .set_sg = ethtool_op_set_sg,
5547#ifdef BCM_TSO
5548 .get_tso = ethtool_op_get_tso, 5542 .get_tso = ethtool_op_get_tso,
5549 .set_tso = bnx2_set_tso, 5543 .set_tso = bnx2_set_tso,
5550#endif
5551 .self_test_count = bnx2_self_test_count, 5544 .self_test_count = bnx2_self_test_count,
5552 .self_test = bnx2_self_test, 5545 .self_test = bnx2_self_test,
5553 .get_strings = bnx2_get_strings, 5546 .get_strings = bnx2_get_strings,
@@ -5954,8 +5947,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5954 * responding after a while. 5947 * responding after a while.
5955 * 5948 *
5956 * AMD believes this incompatibility is unique to the 5706, and 5949 * AMD believes this incompatibility is unique to the 5706, and
5957 * prefers to locally disable MSI rather than globally disabling it 5950 * prefers to locally disable MSI rather than globally disabling it.
5958 * using pci_msi_quirk.
5959 */ 5951 */
5960 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { 5952 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5961 struct pci_dev *amd_8132 = NULL; 5953 struct pci_dev *amd_8132 = NULL;
@@ -6104,9 +6096,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6104#ifdef BCM_VLAN 6096#ifdef BCM_VLAN
6105 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 6097 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6106#endif 6098#endif
6107#ifdef BCM_TSO
6108 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 6099 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6109#endif
6110 6100
6111 netif_carrier_off(bp->dev); 6101 netif_carrier_off(bp->dev);
6112 6102
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6482aed4bb7c..d3801a00d3d5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4704,6 +4704,7 @@ static int bond_check_params(struct bond_params *params)
4704static struct lock_class_key bonding_netdev_xmit_lock_key; 4704static struct lock_class_key bonding_netdev_xmit_lock_key;
4705 4705
4706/* Create a new bond based on the specified name and bonding parameters. 4706/* Create a new bond based on the specified name and bonding parameters.
4707 * If name is NULL, obtain a suitable "bond%d" name for us.
4707 * Caller must NOT hold rtnl_lock; we need to release it here before we 4708 * Caller must NOT hold rtnl_lock; we need to release it here before we
4708 * set up our sysfs entries. 4709 * set up our sysfs entries.
4709 */ 4710 */
@@ -4713,7 +4714,8 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4713 int res; 4714 int res;
4714 4715
4715 rtnl_lock(); 4716 rtnl_lock();
4716 bond_dev = alloc_netdev(sizeof(struct bonding), name, ether_setup); 4717 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
4718 ether_setup);
4717 if (!bond_dev) { 4719 if (!bond_dev) {
4718 printk(KERN_ERR DRV_NAME 4720 printk(KERN_ERR DRV_NAME
4719 ": %s: eek! can't alloc netdev!\n", 4721 ": %s: eek! can't alloc netdev!\n",
@@ -4722,6 +4724,12 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4722 goto out_rtnl; 4724 goto out_rtnl;
4723 } 4725 }
4724 4726
4727 if (!name) {
4728 res = dev_alloc_name(bond_dev, "bond%d");
4729 if (res < 0)
4730 goto out_netdev;
4731 }
4732
4725 /* bond_init() must be called after dev_alloc_name() (for the 4733 /* bond_init() must be called after dev_alloc_name() (for the
4726 * /proc files), but before register_netdevice(), because we 4734 * /proc files), but before register_netdevice(), because we
4727 * need to set function pointers. 4735 * need to set function pointers.
@@ -4748,14 +4756,19 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4748 4756
4749 rtnl_unlock(); /* allows sysfs registration of net device */ 4757 rtnl_unlock(); /* allows sysfs registration of net device */
4750 res = bond_create_sysfs_entry(bond_dev->priv); 4758 res = bond_create_sysfs_entry(bond_dev->priv);
4751 goto done; 4759 if (res < 0) {
4760 rtnl_lock();
4761 goto out_bond;
4762 }
4763
4764 return 0;
4765
4752out_bond: 4766out_bond:
4753 bond_deinit(bond_dev); 4767 bond_deinit(bond_dev);
4754out_netdev: 4768out_netdev:
4755 free_netdev(bond_dev); 4769 free_netdev(bond_dev);
4756out_rtnl: 4770out_rtnl:
4757 rtnl_unlock(); 4771 rtnl_unlock();
4758done:
4759 return res; 4772 return res;
4760} 4773}
4761 4774
@@ -4763,7 +4776,6 @@ static int __init bonding_init(void)
4763{ 4776{
4764 int i; 4777 int i;
4765 int res; 4778 int res;
4766 char new_bond_name[8]; /* Enough room for 999 bonds at init. */
4767 4779
4768 printk(KERN_INFO "%s", version); 4780 printk(KERN_INFO "%s", version);
4769 4781
@@ -4776,8 +4788,7 @@ static int __init bonding_init(void)
4776 bond_create_proc_dir(); 4788 bond_create_proc_dir();
4777#endif 4789#endif
4778 for (i = 0; i < max_bonds; i++) { 4790 for (i = 0; i < max_bonds; i++) {
4779 sprintf(new_bond_name, "bond%d",i); 4791 res = bond_create(NULL, &bonding_defaults, NULL);
4780 res = bond_create(new_bond_name,&bonding_defaults, NULL);
4781 if (res) 4792 if (res)
4782 goto err; 4793 goto err;
4783 } 4794 }
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ced9ed8f995a..878f7aabeeac 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -39,8 +39,7 @@
39 39
40/* #define BONDING_DEBUG 1 */ 40/* #define BONDING_DEBUG 1 */
41#include "bonding.h" 41#include "bonding.h"
42#define to_class_dev(obj) container_of(obj,struct class_device,kobj) 42#define to_dev(obj) container_of(obj,struct device,kobj)
43#define to_net_dev(class) container_of(class, struct net_device, class_dev)
44#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv)) 43#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv))
45 44
46/*---------------------------- Declarations -------------------------------*/ 45/*---------------------------- Declarations -------------------------------*/
@@ -154,7 +153,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
154 * If it's > expected, then there's a file open, 153 * If it's > expected, then there's a file open,
155 * and we have to fail. 154 * and we have to fail.
156 */ 155 */
157 if (atomic_read(&bond->dev->class_dev.kobj.kref.refcount) 156 if (atomic_read(&bond->dev->dev.kobj.kref.refcount)
158 > expected_refcount){ 157 > expected_refcount){
159 rtnl_unlock(); 158 rtnl_unlock();
160 printk(KERN_INFO DRV_NAME 159 printk(KERN_INFO DRV_NAME
@@ -201,13 +200,13 @@ int bond_create_slave_symlinks(struct net_device *master, struct net_device *sla
201 int ret = 0; 200 int ret = 0;
202 201
203 /* first, create a link from the slave back to the master */ 202 /* first, create a link from the slave back to the master */
204 ret = sysfs_create_link(&(slave->class_dev.kobj), &(master->class_dev.kobj), 203 ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
205 "master"); 204 "master");
206 if (ret) 205 if (ret)
207 return ret; 206 return ret;
208 /* next, create a link from the master to the slave */ 207 /* next, create a link from the master to the slave */
209 sprintf(linkname,"slave_%s",slave->name); 208 sprintf(linkname,"slave_%s",slave->name);
210 ret = sysfs_create_link(&(master->class_dev.kobj), &(slave->class_dev.kobj), 209 ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
211 linkname); 210 linkname);
212 return ret; 211 return ret;
213 212
@@ -217,20 +216,21 @@ void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *s
217{ 216{
218 char linkname[IFNAMSIZ+7]; 217 char linkname[IFNAMSIZ+7];
219 218
220 sysfs_remove_link(&(slave->class_dev.kobj), "master"); 219 sysfs_remove_link(&(slave->dev.kobj), "master");
221 sprintf(linkname,"slave_%s",slave->name); 220 sprintf(linkname,"slave_%s",slave->name);
222 sysfs_remove_link(&(master->class_dev.kobj), linkname); 221 sysfs_remove_link(&(master->dev.kobj), linkname);
223} 222}
224 223
225 224
226/* 225/*
227 * Show the slaves in the current bond. 226 * Show the slaves in the current bond.
228 */ 227 */
229static ssize_t bonding_show_slaves(struct class_device *cd, char *buf) 228static ssize_t bonding_show_slaves(struct device *d,
229 struct device_attribute *attr, char *buf)
230{ 230{
231 struct slave *slave; 231 struct slave *slave;
232 int i, res = 0; 232 int i, res = 0;
233 struct bonding *bond = to_bond(cd); 233 struct bonding *bond = to_bond(d);
234 234
235 read_lock_bh(&bond->lock); 235 read_lock_bh(&bond->lock);
236 bond_for_each_slave(bond, slave, i) { 236 bond_for_each_slave(bond, slave, i) {
@@ -254,14 +254,16 @@ static ssize_t bonding_show_slaves(struct class_device *cd, char *buf)
254 * up for this to succeed. 254 * up for this to succeed.
255 * This function is largely the same flow as bonding_update_bonds(). 255 * This function is largely the same flow as bonding_update_bonds().
256 */ 256 */
257static ssize_t bonding_store_slaves(struct class_device *cd, const char *buffer, size_t count) 257static ssize_t bonding_store_slaves(struct device *d,
258 struct device_attribute *attr,
259 const char *buffer, size_t count)
258{ 260{
259 char command[IFNAMSIZ + 1] = { 0, }; 261 char command[IFNAMSIZ + 1] = { 0, };
260 char *ifname; 262 char *ifname;
261 int i, res, found, ret = count; 263 int i, res, found, ret = count;
262 struct slave *slave; 264 struct slave *slave;
263 struct net_device *dev = NULL; 265 struct net_device *dev = NULL;
264 struct bonding *bond = to_bond(cd); 266 struct bonding *bond = to_bond(d);
265 267
266 /* Quick sanity check -- is the bond interface up? */ 268 /* Quick sanity check -- is the bond interface up? */
267 if (!(bond->dev->flags & IFF_UP)) { 269 if (!(bond->dev->flags & IFF_UP)) {
@@ -387,25 +389,28 @@ out:
387 return ret; 389 return ret;
388} 390}
389 391
390static CLASS_DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves); 392static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves);
391 393
392/* 394/*
393 * Show and set the bonding mode. The bond interface must be down to 395 * Show and set the bonding mode. The bond interface must be down to
394 * change the mode. 396 * change the mode.
395 */ 397 */
396static ssize_t bonding_show_mode(struct class_device *cd, char *buf) 398static ssize_t bonding_show_mode(struct device *d,
399 struct device_attribute *attr, char *buf)
397{ 400{
398 struct bonding *bond = to_bond(cd); 401 struct bonding *bond = to_bond(d);
399 402
400 return sprintf(buf, "%s %d\n", 403 return sprintf(buf, "%s %d\n",
401 bond_mode_tbl[bond->params.mode].modename, 404 bond_mode_tbl[bond->params.mode].modename,
402 bond->params.mode) + 1; 405 bond->params.mode) + 1;
403} 406}
404 407
405static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size_t count) 408static ssize_t bonding_store_mode(struct device *d,
409 struct device_attribute *attr,
410 const char *buf, size_t count)
406{ 411{
407 int new_value, ret = count; 412 int new_value, ret = count;
408 struct bonding *bond = to_bond(cd); 413 struct bonding *bond = to_bond(d);
409 414
410 if (bond->dev->flags & IFF_UP) { 415 if (bond->dev->flags & IFF_UP) {
411 printk(KERN_ERR DRV_NAME 416 printk(KERN_ERR DRV_NAME
@@ -438,16 +443,18 @@ static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size
438out: 443out:
439 return ret; 444 return ret;
440} 445}
441static CLASS_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode); 446static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode);
442 447
443/* 448/*
444 * Show and set the bonding transmit hash method. The bond interface must be down to 449 * Show and set the bonding transmit hash method. The bond interface must be down to
445 * change the xmit hash policy. 450 * change the xmit hash policy.
446 */ 451 */
447static ssize_t bonding_show_xmit_hash(struct class_device *cd, char *buf) 452static ssize_t bonding_show_xmit_hash(struct device *d,
453 struct device_attribute *attr,
454 char *buf)
448{ 455{
449 int count; 456 int count;
450 struct bonding *bond = to_bond(cd); 457 struct bonding *bond = to_bond(d);
451 458
452 if ((bond->params.mode != BOND_MODE_XOR) && 459 if ((bond->params.mode != BOND_MODE_XOR) &&
453 (bond->params.mode != BOND_MODE_8023AD)) { 460 (bond->params.mode != BOND_MODE_8023AD)) {
@@ -462,10 +469,12 @@ static ssize_t bonding_show_xmit_hash(struct class_device *cd, char *buf)
462 return count; 469 return count;
463} 470}
464 471
465static ssize_t bonding_store_xmit_hash(struct class_device *cd, const char *buf, size_t count) 472static ssize_t bonding_store_xmit_hash(struct device *d,
473 struct device_attribute *attr,
474 const char *buf, size_t count)
466{ 475{
467 int new_value, ret = count; 476 int new_value, ret = count;
468 struct bonding *bond = to_bond(cd); 477 struct bonding *bond = to_bond(d);
469 478
470 if (bond->dev->flags & IFF_UP) { 479 if (bond->dev->flags & IFF_UP) {
471 printk(KERN_ERR DRV_NAME 480 printk(KERN_ERR DRV_NAME
@@ -501,24 +510,28 @@ static ssize_t bonding_store_xmit_hash(struct class_device *cd, const char *buf,
501out: 510out:
502 return ret; 511 return ret;
503} 512}
504static CLASS_DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash); 513static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash);
505 514
506/* 515/*
507 * Show and set arp_validate. 516 * Show and set arp_validate.
508 */ 517 */
509static ssize_t bonding_show_arp_validate(struct class_device *cd, char *buf) 518static ssize_t bonding_show_arp_validate(struct device *d,
519 struct device_attribute *attr,
520 char *buf)
510{ 521{
511 struct bonding *bond = to_bond(cd); 522 struct bonding *bond = to_bond(d);
512 523
513 return sprintf(buf, "%s %d\n", 524 return sprintf(buf, "%s %d\n",
514 arp_validate_tbl[bond->params.arp_validate].modename, 525 arp_validate_tbl[bond->params.arp_validate].modename,
515 bond->params.arp_validate) + 1; 526 bond->params.arp_validate) + 1;
516} 527}
517 528
518static ssize_t bonding_store_arp_validate(struct class_device *cd, const char *buf, size_t count) 529static ssize_t bonding_store_arp_validate(struct device *d,
530 struct device_attribute *attr,
531 const char *buf, size_t count)
519{ 532{
520 int new_value; 533 int new_value;
521 struct bonding *bond = to_bond(cd); 534 struct bonding *bond = to_bond(d);
522 535
523 new_value = bond_parse_parm((char *)buf, arp_validate_tbl); 536 new_value = bond_parse_parm((char *)buf, arp_validate_tbl);
524 if (new_value < 0) { 537 if (new_value < 0) {
@@ -548,7 +561,7 @@ static ssize_t bonding_store_arp_validate(struct class_device *cd, const char *b
548 return count; 561 return count;
549} 562}
550 563
551static CLASS_DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate); 564static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate);
552 565
553/* 566/*
554 * Show and set the arp timer interval. There are two tricky bits 567 * Show and set the arp timer interval. There are two tricky bits
@@ -556,17 +569,21 @@ static CLASS_DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_valid
556 * MII monitoring. Second, if the ARP timer isn't running, we must 569 * MII monitoring. Second, if the ARP timer isn't running, we must
557 * start it. 570 * start it.
558 */ 571 */
559static ssize_t bonding_show_arp_interval(struct class_device *cd, char *buf) 572static ssize_t bonding_show_arp_interval(struct device *d,
573 struct device_attribute *attr,
574 char *buf)
560{ 575{
561 struct bonding *bond = to_bond(cd); 576 struct bonding *bond = to_bond(d);
562 577
563 return sprintf(buf, "%d\n", bond->params.arp_interval) + 1; 578 return sprintf(buf, "%d\n", bond->params.arp_interval) + 1;
564} 579}
565 580
566static ssize_t bonding_store_arp_interval(struct class_device *cd, const char *buf, size_t count) 581static ssize_t bonding_store_arp_interval(struct device *d,
582 struct device_attribute *attr,
583 const char *buf, size_t count)
567{ 584{
568 int new_value, ret = count; 585 int new_value, ret = count;
569 struct bonding *bond = to_bond(cd); 586 struct bonding *bond = to_bond(d);
570 587
571 if (sscanf(buf, "%d", &new_value) != 1) { 588 if (sscanf(buf, "%d", &new_value) != 1) {
572 printk(KERN_ERR DRV_NAME 589 printk(KERN_ERR DRV_NAME
@@ -638,15 +655,17 @@ static ssize_t bonding_store_arp_interval(struct class_device *cd, const char *b
638out: 655out:
639 return ret; 656 return ret;
640} 657}
641static CLASS_DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval); 658static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval);
642 659
643/* 660/*
644 * Show and set the arp targets. 661 * Show and set the arp targets.
645 */ 662 */
646static ssize_t bonding_show_arp_targets(struct class_device *cd, char *buf) 663static ssize_t bonding_show_arp_targets(struct device *d,
664 struct device_attribute *attr,
665 char *buf)
647{ 666{
648 int i, res = 0; 667 int i, res = 0;
649 struct bonding *bond = to_bond(cd); 668 struct bonding *bond = to_bond(d);
650 669
651 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) { 670 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
652 if (bond->params.arp_targets[i]) 671 if (bond->params.arp_targets[i])
@@ -660,11 +679,13 @@ static ssize_t bonding_show_arp_targets(struct class_device *cd, char *buf)
660 return res; 679 return res;
661} 680}
662 681
663static ssize_t bonding_store_arp_targets(struct class_device *cd, const char *buf, size_t count) 682static ssize_t bonding_store_arp_targets(struct device *d,
683 struct device_attribute *attr,
684 const char *buf, size_t count)
664{ 685{
665 u32 newtarget; 686 u32 newtarget;
666 int i = 0, done = 0, ret = count; 687 int i = 0, done = 0, ret = count;
667 struct bonding *bond = to_bond(cd); 688 struct bonding *bond = to_bond(d);
668 u32 *targets; 689 u32 *targets;
669 690
670 targets = bond->params.arp_targets; 691 targets = bond->params.arp_targets;
@@ -742,24 +763,28 @@ static ssize_t bonding_store_arp_targets(struct class_device *cd, const char *bu
742out: 763out:
743 return ret; 764 return ret;
744} 765}
745static CLASS_DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets); 766static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
746 767
747/* 768/*
748 * Show and set the up and down delays. These must be multiples of the 769 * Show and set the up and down delays. These must be multiples of the
749 * MII monitoring value, and are stored internally as the multiplier. 770 * MII monitoring value, and are stored internally as the multiplier.
750 * Thus, we must translate to MS for the real world. 771 * Thus, we must translate to MS for the real world.
751 */ 772 */
752static ssize_t bonding_show_downdelay(struct class_device *cd, char *buf) 773static ssize_t bonding_show_downdelay(struct device *d,
774 struct device_attribute *attr,
775 char *buf)
753{ 776{
754 struct bonding *bond = to_bond(cd); 777 struct bonding *bond = to_bond(d);
755 778
756 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1; 779 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1;
757} 780}
758 781
759static ssize_t bonding_store_downdelay(struct class_device *cd, const char *buf, size_t count) 782static ssize_t bonding_store_downdelay(struct device *d,
783 struct device_attribute *attr,
784 const char *buf, size_t count)
760{ 785{
761 int new_value, ret = count; 786 int new_value, ret = count;
762 struct bonding *bond = to_bond(cd); 787 struct bonding *bond = to_bond(d);
763 788
764 if (!(bond->params.miimon)) { 789 if (!(bond->params.miimon)) {
765 printk(KERN_ERR DRV_NAME 790 printk(KERN_ERR DRV_NAME
@@ -800,20 +825,24 @@ static ssize_t bonding_store_downdelay(struct class_device *cd, const char *buf,
800out: 825out:
801 return ret; 826 return ret;
802} 827}
803static CLASS_DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay); 828static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay);
804 829
805static ssize_t bonding_show_updelay(struct class_device *cd, char *buf) 830static ssize_t bonding_show_updelay(struct device *d,
831 struct device_attribute *attr,
832 char *buf)
806{ 833{
807 struct bonding *bond = to_bond(cd); 834 struct bonding *bond = to_bond(d);
808 835
809 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1; 836 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1;
810 837
811} 838}
812 839
813static ssize_t bonding_store_updelay(struct class_device *cd, const char *buf, size_t count) 840static ssize_t bonding_store_updelay(struct device *d,
841 struct device_attribute *attr,
842 const char *buf, size_t count)
814{ 843{
815 int new_value, ret = count; 844 int new_value, ret = count;
816 struct bonding *bond = to_bond(cd); 845 struct bonding *bond = to_bond(d);
817 846
818 if (!(bond->params.miimon)) { 847 if (!(bond->params.miimon)) {
819 printk(KERN_ERR DRV_NAME 848 printk(KERN_ERR DRV_NAME
@@ -854,25 +883,29 @@ static ssize_t bonding_store_updelay(struct class_device *cd, const char *buf, s
854out: 883out:
855 return ret; 884 return ret;
856} 885}
857static CLASS_DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay); 886static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay);
858 887
859/* 888/*
860 * Show and set the LACP interval. Interface must be down, and the mode 889 * Show and set the LACP interval. Interface must be down, and the mode
861 * must be set to 802.3ad mode. 890 * must be set to 802.3ad mode.
862 */ 891 */
863static ssize_t bonding_show_lacp(struct class_device *cd, char *buf) 892static ssize_t bonding_show_lacp(struct device *d,
893 struct device_attribute *attr,
894 char *buf)
864{ 895{
865 struct bonding *bond = to_bond(cd); 896 struct bonding *bond = to_bond(d);
866 897
867 return sprintf(buf, "%s %d\n", 898 return sprintf(buf, "%s %d\n",
868 bond_lacp_tbl[bond->params.lacp_fast].modename, 899 bond_lacp_tbl[bond->params.lacp_fast].modename,
869 bond->params.lacp_fast) + 1; 900 bond->params.lacp_fast) + 1;
870} 901}
871 902
872static ssize_t bonding_store_lacp(struct class_device *cd, const char *buf, size_t count) 903static ssize_t bonding_store_lacp(struct device *d,
904 struct device_attribute *attr,
905 const char *buf, size_t count)
873{ 906{
874 int new_value, ret = count; 907 int new_value, ret = count;
875 struct bonding *bond = to_bond(cd); 908 struct bonding *bond = to_bond(d);
876 909
877 if (bond->dev->flags & IFF_UP) { 910 if (bond->dev->flags & IFF_UP) {
878 printk(KERN_ERR DRV_NAME 911 printk(KERN_ERR DRV_NAME
@@ -906,7 +939,7 @@ static ssize_t bonding_store_lacp(struct class_device *cd, const char *buf, size
906out: 939out:
907 return ret; 940 return ret;
908} 941}
909static CLASS_DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp); 942static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
910 943
911/* 944/*
912 * Show and set the MII monitor interval. There are two tricky bits 945 * Show and set the MII monitor interval. There are two tricky bits
@@ -914,17 +947,21 @@ static CLASS_DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bondin
914 * ARP monitoring. Second, if the timer isn't running, we must 947 * ARP monitoring. Second, if the timer isn't running, we must
915 * start it. 948 * start it.
916 */ 949 */
917static ssize_t bonding_show_miimon(struct class_device *cd, char *buf) 950static ssize_t bonding_show_miimon(struct device *d,
951 struct device_attribute *attr,
952 char *buf)
918{ 953{
919 struct bonding *bond = to_bond(cd); 954 struct bonding *bond = to_bond(d);
920 955
921 return sprintf(buf, "%d\n", bond->params.miimon) + 1; 956 return sprintf(buf, "%d\n", bond->params.miimon) + 1;
922} 957}
923 958
924static ssize_t bonding_store_miimon(struct class_device *cd, const char *buf, size_t count) 959static ssize_t bonding_store_miimon(struct device *d,
960 struct device_attribute *attr,
961 const char *buf, size_t count)
925{ 962{
926 int new_value, ret = count; 963 int new_value, ret = count;
927 struct bonding *bond = to_bond(cd); 964 struct bonding *bond = to_bond(d);
928 965
929 if (sscanf(buf, "%d", &new_value) != 1) { 966 if (sscanf(buf, "%d", &new_value) != 1) {
930 printk(KERN_ERR DRV_NAME 967 printk(KERN_ERR DRV_NAME
@@ -1000,7 +1037,7 @@ static ssize_t bonding_store_miimon(struct class_device *cd, const char *buf, si
1000out: 1037out:
1001 return ret; 1038 return ret;
1002} 1039}
1003static CLASS_DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon); 1040static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon);
1004 1041
1005/* 1042/*
1006 * Show and set the primary slave. The store function is much 1043 * Show and set the primary slave. The store function is much
@@ -1009,10 +1046,12 @@ static CLASS_DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding
1009 * The bond must be a mode that supports a primary for this be 1046 * The bond must be a mode that supports a primary for this be
1010 * set. 1047 * set.
1011 */ 1048 */
1012static ssize_t bonding_show_primary(struct class_device *cd, char *buf) 1049static ssize_t bonding_show_primary(struct device *d,
1050 struct device_attribute *attr,
1051 char *buf)
1013{ 1052{
1014 int count = 0; 1053 int count = 0;
1015 struct bonding *bond = to_bond(cd); 1054 struct bonding *bond = to_bond(d);
1016 1055
1017 if (bond->primary_slave) 1056 if (bond->primary_slave)
1018 count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1; 1057 count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1;
@@ -1022,11 +1061,13 @@ static ssize_t bonding_show_primary(struct class_device *cd, char *buf)
1022 return count; 1061 return count;
1023} 1062}
1024 1063
1025static ssize_t bonding_store_primary(struct class_device *cd, const char *buf, size_t count) 1064static ssize_t bonding_store_primary(struct device *d,
1065 struct device_attribute *attr,
1066 const char *buf, size_t count)
1026{ 1067{
1027 int i; 1068 int i;
1028 struct slave *slave; 1069 struct slave *slave;
1029 struct bonding *bond = to_bond(cd); 1070 struct bonding *bond = to_bond(d);
1030 1071
1031 write_lock_bh(&bond->lock); 1072 write_lock_bh(&bond->lock);
1032 if (!USES_PRIMARY(bond->params.mode)) { 1073 if (!USES_PRIMARY(bond->params.mode)) {
@@ -1065,22 +1106,26 @@ out:
1065 write_unlock_bh(&bond->lock); 1106 write_unlock_bh(&bond->lock);
1066 return count; 1107 return count;
1067} 1108}
1068static CLASS_DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary); 1109static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
1069 1110
1070/* 1111/*
1071 * Show and set the use_carrier flag. 1112 * Show and set the use_carrier flag.
1072 */ 1113 */
1073static ssize_t bonding_show_carrier(struct class_device *cd, char *buf) 1114static ssize_t bonding_show_carrier(struct device *d,
1115 struct device_attribute *attr,
1116 char *buf)
1074{ 1117{
1075 struct bonding *bond = to_bond(cd); 1118 struct bonding *bond = to_bond(d);
1076 1119
1077 return sprintf(buf, "%d\n", bond->params.use_carrier) + 1; 1120 return sprintf(buf, "%d\n", bond->params.use_carrier) + 1;
1078} 1121}
1079 1122
1080static ssize_t bonding_store_carrier(struct class_device *cd, const char *buf, size_t count) 1123static ssize_t bonding_store_carrier(struct device *d,
1124 struct device_attribute *attr,
1125 const char *buf, size_t count)
1081{ 1126{
1082 int new_value, ret = count; 1127 int new_value, ret = count;
1083 struct bonding *bond = to_bond(cd); 1128 struct bonding *bond = to_bond(d);
1084 1129
1085 1130
1086 if (sscanf(buf, "%d", &new_value) != 1) { 1131 if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1102,16 +1147,18 @@ static ssize_t bonding_store_carrier(struct class_device *cd, const char *buf, s
1102out: 1147out:
1103 return count; 1148 return count;
1104} 1149}
1105static CLASS_DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier); 1150static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier);
1106 1151
1107 1152
1108/* 1153/*
1109 * Show and set currently active_slave. 1154 * Show and set currently active_slave.
1110 */ 1155 */
1111static ssize_t bonding_show_active_slave(struct class_device *cd, char *buf) 1156static ssize_t bonding_show_active_slave(struct device *d,
1157 struct device_attribute *attr,
1158 char *buf)
1112{ 1159{
1113 struct slave *curr; 1160 struct slave *curr;
1114 struct bonding *bond = to_bond(cd); 1161 struct bonding *bond = to_bond(d);
1115 int count; 1162 int count;
1116 1163
1117 1164
@@ -1126,13 +1173,15 @@ static ssize_t bonding_show_active_slave(struct class_device *cd, char *buf)
1126 return count; 1173 return count;
1127} 1174}
1128 1175
1129static ssize_t bonding_store_active_slave(struct class_device *cd, const char *buf, size_t count) 1176static ssize_t bonding_store_active_slave(struct device *d,
1177 struct device_attribute *attr,
1178 const char *buf, size_t count)
1130{ 1179{
1131 int i; 1180 int i;
1132 struct slave *slave; 1181 struct slave *slave;
1133 struct slave *old_active = NULL; 1182 struct slave *old_active = NULL;
1134 struct slave *new_active = NULL; 1183 struct slave *new_active = NULL;
1135 struct bonding *bond = to_bond(cd); 1184 struct bonding *bond = to_bond(d);
1136 1185
1137 write_lock_bh(&bond->lock); 1186 write_lock_bh(&bond->lock);
1138 if (!USES_PRIMARY(bond->params.mode)) { 1187 if (!USES_PRIMARY(bond->params.mode)) {
@@ -1194,16 +1243,18 @@ out:
1194 return count; 1243 return count;
1195 1244
1196} 1245}
1197static CLASS_DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave); 1246static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave);
1198 1247
1199 1248
1200/* 1249/*
1201 * Show link status of the bond interface. 1250 * Show link status of the bond interface.
1202 */ 1251 */
1203static ssize_t bonding_show_mii_status(struct class_device *cd, char *buf) 1252static ssize_t bonding_show_mii_status(struct device *d,
1253 struct device_attribute *attr,
1254 char *buf)
1204{ 1255{
1205 struct slave *curr; 1256 struct slave *curr;
1206 struct bonding *bond = to_bond(cd); 1257 struct bonding *bond = to_bond(d);
1207 1258
1208 read_lock(&bond->curr_slave_lock); 1259 read_lock(&bond->curr_slave_lock);
1209 curr = bond->curr_active_slave; 1260 curr = bond->curr_active_slave;
@@ -1211,16 +1262,18 @@ static ssize_t bonding_show_mii_status(struct class_device *cd, char *buf)
1211 1262
1212 return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1; 1263 return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1;
1213} 1264}
1214static CLASS_DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); 1265static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
1215 1266
1216 1267
1217/* 1268/*
1218 * Show current 802.3ad aggregator ID. 1269 * Show current 802.3ad aggregator ID.
1219 */ 1270 */
1220static ssize_t bonding_show_ad_aggregator(struct class_device *cd, char *buf) 1271static ssize_t bonding_show_ad_aggregator(struct device *d,
1272 struct device_attribute *attr,
1273 char *buf)
1221{ 1274{
1222 int count = 0; 1275 int count = 0;
1223 struct bonding *bond = to_bond(cd); 1276 struct bonding *bond = to_bond(d);
1224 1277
1225 if (bond->params.mode == BOND_MODE_8023AD) { 1278 if (bond->params.mode == BOND_MODE_8023AD) {
1226 struct ad_info ad_info; 1279 struct ad_info ad_info;
@@ -1231,16 +1284,18 @@ static ssize_t bonding_show_ad_aggregator(struct class_device *cd, char *buf)
1231 1284
1232 return count; 1285 return count;
1233} 1286}
1234static CLASS_DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL); 1287static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
1235 1288
1236 1289
1237/* 1290/*
1238 * Show number of active 802.3ad ports. 1291 * Show number of active 802.3ad ports.
1239 */ 1292 */
1240static ssize_t bonding_show_ad_num_ports(struct class_device *cd, char *buf) 1293static ssize_t bonding_show_ad_num_ports(struct device *d,
1294 struct device_attribute *attr,
1295 char *buf)
1241{ 1296{
1242 int count = 0; 1297 int count = 0;
1243 struct bonding *bond = to_bond(cd); 1298 struct bonding *bond = to_bond(d);
1244 1299
1245 if (bond->params.mode == BOND_MODE_8023AD) { 1300 if (bond->params.mode == BOND_MODE_8023AD) {
1246 struct ad_info ad_info; 1301 struct ad_info ad_info;
@@ -1251,16 +1306,18 @@ static ssize_t bonding_show_ad_num_ports(struct class_device *cd, char *buf)
1251 1306
1252 return count; 1307 return count;
1253} 1308}
1254static CLASS_DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL); 1309static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
1255 1310
1256 1311
1257/* 1312/*
1258 * Show current 802.3ad actor key. 1313 * Show current 802.3ad actor key.
1259 */ 1314 */
1260static ssize_t bonding_show_ad_actor_key(struct class_device *cd, char *buf) 1315static ssize_t bonding_show_ad_actor_key(struct device *d,
1316 struct device_attribute *attr,
1317 char *buf)
1261{ 1318{
1262 int count = 0; 1319 int count = 0;
1263 struct bonding *bond = to_bond(cd); 1320 struct bonding *bond = to_bond(d);
1264 1321
1265 if (bond->params.mode == BOND_MODE_8023AD) { 1322 if (bond->params.mode == BOND_MODE_8023AD) {
1266 struct ad_info ad_info; 1323 struct ad_info ad_info;
@@ -1271,16 +1328,18 @@ static ssize_t bonding_show_ad_actor_key(struct class_device *cd, char *buf)
1271 1328
1272 return count; 1329 return count;
1273} 1330}
1274static CLASS_DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL); 1331static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
1275 1332
1276 1333
1277/* 1334/*
1278 * Show current 802.3ad partner key. 1335 * Show current 802.3ad partner key.
1279 */ 1336 */
1280static ssize_t bonding_show_ad_partner_key(struct class_device *cd, char *buf) 1337static ssize_t bonding_show_ad_partner_key(struct device *d,
1338 struct device_attribute *attr,
1339 char *buf)
1281{ 1340{
1282 int count = 0; 1341 int count = 0;
1283 struct bonding *bond = to_bond(cd); 1342 struct bonding *bond = to_bond(d);
1284 1343
1285 if (bond->params.mode == BOND_MODE_8023AD) { 1344 if (bond->params.mode == BOND_MODE_8023AD) {
1286 struct ad_info ad_info; 1345 struct ad_info ad_info;
@@ -1291,16 +1350,18 @@ static ssize_t bonding_show_ad_partner_key(struct class_device *cd, char *buf)
1291 1350
1292 return count; 1351 return count;
1293} 1352}
1294static CLASS_DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL); 1353static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
1295 1354
1296 1355
1297/* 1356/*
1298 * Show current 802.3ad partner mac. 1357 * Show current 802.3ad partner mac.
1299 */ 1358 */
1300static ssize_t bonding_show_ad_partner_mac(struct class_device *cd, char *buf) 1359static ssize_t bonding_show_ad_partner_mac(struct device *d,
1360 struct device_attribute *attr,
1361 char *buf)
1301{ 1362{
1302 int count = 0; 1363 int count = 0;
1303 struct bonding *bond = to_bond(cd); 1364 struct bonding *bond = to_bond(d);
1304 1365
1305 if (bond->params.mode == BOND_MODE_8023AD) { 1366 if (bond->params.mode == BOND_MODE_8023AD) {
1306 struct ad_info ad_info; 1367 struct ad_info ad_info;
@@ -1319,30 +1380,30 @@ static ssize_t bonding_show_ad_partner_mac(struct class_device *cd, char *buf)
1319 1380
1320 return count; 1381 return count;
1321} 1382}
1322static CLASS_DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); 1383static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1323 1384
1324 1385
1325 1386
1326static struct attribute *per_bond_attrs[] = { 1387static struct attribute *per_bond_attrs[] = {
1327 &class_device_attr_slaves.attr, 1388 &dev_attr_slaves.attr,
1328 &class_device_attr_mode.attr, 1389 &dev_attr_mode.attr,
1329 &class_device_attr_arp_validate.attr, 1390 &dev_attr_arp_validate.attr,
1330 &class_device_attr_arp_interval.attr, 1391 &dev_attr_arp_interval.attr,
1331 &class_device_attr_arp_ip_target.attr, 1392 &dev_attr_arp_ip_target.attr,
1332 &class_device_attr_downdelay.attr, 1393 &dev_attr_downdelay.attr,
1333 &class_device_attr_updelay.attr, 1394 &dev_attr_updelay.attr,
1334 &class_device_attr_lacp_rate.attr, 1395 &dev_attr_lacp_rate.attr,
1335 &class_device_attr_xmit_hash_policy.attr, 1396 &dev_attr_xmit_hash_policy.attr,
1336 &class_device_attr_miimon.attr, 1397 &dev_attr_miimon.attr,
1337 &class_device_attr_primary.attr, 1398 &dev_attr_primary.attr,
1338 &class_device_attr_use_carrier.attr, 1399 &dev_attr_use_carrier.attr,
1339 &class_device_attr_active_slave.attr, 1400 &dev_attr_active_slave.attr,
1340 &class_device_attr_mii_status.attr, 1401 &dev_attr_mii_status.attr,
1341 &class_device_attr_ad_aggregator.attr, 1402 &dev_attr_ad_aggregator.attr,
1342 &class_device_attr_ad_num_ports.attr, 1403 &dev_attr_ad_num_ports.attr,
1343 &class_device_attr_ad_actor_key.attr, 1404 &dev_attr_ad_actor_key.attr,
1344 &class_device_attr_ad_partner_key.attr, 1405 &dev_attr_ad_partner_key.attr,
1345 &class_device_attr_ad_partner_mac.attr, 1406 &dev_attr_ad_partner_mac.attr,
1346 NULL, 1407 NULL,
1347}; 1408};
1348 1409
@@ -1367,11 +1428,26 @@ int bond_create_sysfs(void)
1367 if (!firstbond) 1428 if (!firstbond)
1368 return -ENODEV; 1429 return -ENODEV;
1369 1430
1370 netdev_class = firstbond->dev->class_dev.class; 1431 netdev_class = firstbond->dev->dev.class;
1371 if (!netdev_class) 1432 if (!netdev_class)
1372 return -ENODEV; 1433 return -ENODEV;
1373 1434
1374 ret = class_create_file(netdev_class, &class_attr_bonding_masters); 1435 ret = class_create_file(netdev_class, &class_attr_bonding_masters);
1436 /*
1437 * Permit multiple loads of the module by ignoring failures to
1438 * create the bonding_masters sysfs file. Bonding devices
1439 * created by second or subsequent loads of the module will
1440 * not be listed in, or controllable by, bonding_masters, but
1441 * will have the usual "bonding" sysfs directory.
1442 *
1443 * This is done to preserve backwards compatibility for
1444 * initscripts/sysconfig, which load bonding multiple times to
1445 * configure multiple bonding devices.
1446 */
1447 if (ret == -EEXIST) {
1448 netdev_class = NULL;
1449 return 0;
1450 }
1375 1451
1376 return ret; 1452 return ret;
1377 1453
@@ -1395,13 +1471,13 @@ int bond_create_sysfs_entry(struct bonding *bond)
1395 struct net_device *dev = bond->dev; 1471 struct net_device *dev = bond->dev;
1396 int err; 1472 int err;
1397 1473
1398 err = sysfs_create_group(&(dev->class_dev.kobj), &bonding_group); 1474 err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
1399 if (err) { 1475 if (err) {
1400 printk(KERN_EMERG "eek! didn't create group!\n"); 1476 printk(KERN_EMERG "eek! didn't create group!\n");
1401 } 1477 }
1402 1478
1403 if (expected_refcount < 1) 1479 if (expected_refcount < 1)
1404 expected_refcount = atomic_read(&bond->dev->class_dev.kobj.kref.refcount); 1480 expected_refcount = atomic_read(&bond->dev->dev.kobj.kref.refcount);
1405 1481
1406 return err; 1482 return err;
1407} 1483}
@@ -1412,6 +1488,6 @@ void bond_destroy_sysfs_entry(struct bonding *bond)
1412{ 1488{
1413 struct net_device *dev = bond->dev; 1489 struct net_device *dev = bond->dev;
1414 1490
1415 sysfs_remove_group(&(dev->class_dev.kobj), &bonding_group); 1491 sysfs_remove_group(&(dev->dev.kobj), &bonding_group);
1416} 1492}
1417 1493
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 0978c9ac6d2b..41aa78bf1f78 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
22#include "bond_3ad.h" 22#include "bond_3ad.h"
23#include "bond_alb.h" 23#include "bond_alb.h"
24 24
25#define DRV_VERSION "3.1.1" 25#define DRV_VERSION "3.1.2"
26#define DRV_RELDATE "September 26, 2006" 26#define DRV_RELDATE "January 20, 2007"
27#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 29
@@ -237,12 +237,13 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
237#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ 237#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
238 BOND_ARP_VALIDATE_BACKUP) 238 BOND_ARP_VALIDATE_BACKUP)
239 239
240extern inline int slave_do_arp_validate(struct bonding *bond, struct slave *slave) 240static inline int slave_do_arp_validate(struct bonding *bond,
241 struct slave *slave)
241{ 242{
242 return bond->params.arp_validate & (1 << slave->state); 243 return bond->params.arp_validate & (1 << slave->state);
243} 244}
244 245
245extern inline unsigned long slave_last_rx(struct bonding *bond, 246static inline unsigned long slave_last_rx(struct bonding *bond,
246 struct slave *slave) 247 struct slave *slave)
247{ 248{
248 if (slave_do_arp_validate(bond, slave)) 249 if (slave_do_arp_validate(bond, slave))
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 74758d2c7af8..787f2f2820fe 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -324,7 +324,7 @@ struct board_info {
324 unsigned char mdio_phybaseaddr; 324 unsigned char mdio_phybaseaddr;
325 struct gmac *gmac; 325 struct gmac *gmac;
326 struct gphy *gphy; 326 struct gphy *gphy;
327 struct mdio_ops *mdio_ops; 327 struct mdio_ops *mdio_ops;
328 const char *desc; 328 const char *desc;
329}; 329};
330 330
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
index 35f565be4fd3..e36d45b78cc7 100644
--- a/drivers/net/chelsio/cpl5_cmd.h
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -103,7 +103,7 @@ enum CPL_opcode {
103 CPL_MIGRATE_C2T_RPL = 0xDD, 103 CPL_MIGRATE_C2T_RPL = 0xDD,
104 CPL_ERROR = 0xD7, 104 CPL_ERROR = 0xD7,
105 105
106 /* internal: driver -> TOM */ 106 /* internal: driver -> TOM */
107 CPL_MSS_CHANGE = 0xE1 107 CPL_MSS_CHANGE = 0xE1
108}; 108};
109 109
@@ -159,8 +159,8 @@ enum { // TX_PKT_LSO ethernet types
159}; 159};
160 160
161union opcode_tid { 161union opcode_tid {
162 u32 opcode_tid; 162 u32 opcode_tid;
163 u8 opcode; 163 u8 opcode;
164}; 164};
165 165
166#define S_OPCODE 24 166#define S_OPCODE 24
@@ -234,7 +234,7 @@ struct cpl_pass_accept_req {
234 u32 local_ip; 234 u32 local_ip;
235 u32 peer_ip; 235 u32 peer_ip;
236 u32 tos_tid; 236 u32 tos_tid;
237 struct tcp_options tcp_options; 237 struct tcp_options tcp_options;
238 u8 dst_mac[6]; 238 u8 dst_mac[6];
239 u16 vlan_tag; 239 u16 vlan_tag;
240 u8 src_mac[6]; 240 u8 src_mac[6];
@@ -250,12 +250,12 @@ struct cpl_pass_accept_rpl {
250 u32 peer_ip; 250 u32 peer_ip;
251 u32 opt0h; 251 u32 opt0h;
252 union { 252 union {
253 u32 opt0l; 253 u32 opt0l;
254 struct { 254 struct {
255 u8 rsvd[3]; 255 u8 rsvd[3];
256 u8 status; 256 u8 status;
257 };
257 }; 258 };
258 };
259}; 259};
260 260
261struct cpl_act_open_req { 261struct cpl_act_open_req {
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index fd5d821f3f2a..7d0f24f69777 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -69,14 +69,14 @@ static inline void cancel_mac_stats_update(struct adapter *ap)
69 cancel_delayed_work(&ap->stats_update_task); 69 cancel_delayed_work(&ap->stats_update_task);
70} 70}
71 71
72#define MAX_CMDQ_ENTRIES 16384 72#define MAX_CMDQ_ENTRIES 16384
73#define MAX_CMDQ1_ENTRIES 1024 73#define MAX_CMDQ1_ENTRIES 1024
74#define MAX_RX_BUFFERS 16384 74#define MAX_RX_BUFFERS 16384
75#define MAX_RX_JUMBO_BUFFERS 16384 75#define MAX_RX_JUMBO_BUFFERS 16384
76#define MAX_TX_BUFFERS_HIGH 16384U 76#define MAX_TX_BUFFERS_HIGH 16384U
77#define MAX_TX_BUFFERS_LOW 1536U 77#define MAX_TX_BUFFERS_LOW 1536U
78#define MAX_TX_BUFFERS 1460U 78#define MAX_TX_BUFFERS 1460U
79#define MIN_FL_ENTRIES 32 79#define MIN_FL_ENTRIES 32
80 80
81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
@@ -143,7 +143,7 @@ static void link_report(struct port_info *p)
143 case SPEED_100: s = "100Mbps"; break; 143 case SPEED_100: s = "100Mbps"; break;
144 } 144 }
145 145
146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", 146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147 p->dev->name, s, 147 p->dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); 148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149 } 149 }
@@ -233,7 +233,7 @@ static int cxgb_up(struct adapter *adapter)
233 233
234 t1_sge_start(adapter->sge); 234 t1_sge_start(adapter->sge);
235 t1_interrupts_enable(adapter); 235 t1_interrupts_enable(adapter);
236 out_err: 236out_err:
237 return err; 237 return err;
238} 238}
239 239
@@ -454,51 +454,21 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
454 const struct cmac_statistics *s; 454 const struct cmac_statistics *s;
455 const struct sge_intr_counts *t; 455 const struct sge_intr_counts *t;
456 struct sge_port_stats ss; 456 struct sge_port_stats ss;
457 unsigned int len;
457 458
458 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); 459 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
459 460
460 *data++ = s->TxOctetsOK; 461 len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
461 *data++ = s->TxOctetsBad; 462 memcpy(data, &s->TxOctetsOK, len);
462 *data++ = s->TxUnicastFramesOK; 463 data += len;
463 *data++ = s->TxMulticastFramesOK; 464
464 *data++ = s->TxBroadcastFramesOK; 465 len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
465 *data++ = s->TxPauseFrames; 466 memcpy(data, &s->RxOctetsOK, len);
466 *data++ = s->TxFramesWithDeferredXmissions; 467 data += len;
467 *data++ = s->TxLateCollisions;
468 *data++ = s->TxTotalCollisions;
469 *data++ = s->TxFramesAbortedDueToXSCollisions;
470 *data++ = s->TxUnderrun;
471 *data++ = s->TxLengthErrors;
472 *data++ = s->TxInternalMACXmitError;
473 *data++ = s->TxFramesWithExcessiveDeferral;
474 *data++ = s->TxFCSErrors;
475
476 *data++ = s->RxOctetsOK;
477 *data++ = s->RxOctetsBad;
478 *data++ = s->RxUnicastFramesOK;
479 *data++ = s->RxMulticastFramesOK;
480 *data++ = s->RxBroadcastFramesOK;
481 *data++ = s->RxPauseFrames;
482 *data++ = s->RxFCSErrors;
483 *data++ = s->RxAlignErrors;
484 *data++ = s->RxSymbolErrors;
485 *data++ = s->RxDataErrors;
486 *data++ = s->RxSequenceErrors;
487 *data++ = s->RxRuntErrors;
488 *data++ = s->RxJabberErrors;
489 *data++ = s->RxInternalMACRcvError;
490 *data++ = s->RxInRangeLengthErrors;
491 *data++ = s->RxOutOfRangeLengthField;
492 *data++ = s->RxFrameTooLongErrors;
493 468
494 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); 469 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
495 *data++ = ss.rx_packets; 470 memcpy(data, &ss, sizeof(ss));
496 *data++ = ss.rx_cso_good; 471 data += sizeof(ss);
497 *data++ = ss.tx_packets;
498 *data++ = ss.tx_cso;
499 *data++ = ss.tx_tso;
500 *data++ = ss.vlan_xtract;
501 *data++ = ss.vlan_insert;
502 472
503 t = t1_sge_get_intr_counts(adapter->sge); 473 t = t1_sge_get_intr_counts(adapter->sge);
504 *data++ = t->rx_drops; 474 *data++ = t->rx_drops;
@@ -749,7 +719,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
749 return -EINVAL; 719 return -EINVAL;
750 720
751 if (adapter->flags & FULL_INIT_DONE) 721 if (adapter->flags & FULL_INIT_DONE)
752 return -EBUSY; 722 return -EBUSY;
753 723
754 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; 724 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
755 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; 725 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@ -764,7 +734,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
764 struct adapter *adapter = dev->priv; 734 struct adapter *adapter = dev->priv;
765 735
766 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; 736 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
767 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; 737 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
768 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; 738 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
769 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); 739 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
770 return 0; 740 return 0;
@@ -782,9 +752,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
782 752
783static int get_eeprom_len(struct net_device *dev) 753static int get_eeprom_len(struct net_device *dev)
784{ 754{
785 struct adapter *adapter = dev->priv; 755 struct adapter *adapter = dev->priv;
786 756
787 return t1_is_asic(adapter) ? EEPROM_SIZE : 0; 757 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
788} 758}
789 759
790#define EEPROM_MAGIC(ap) \ 760#define EEPROM_MAGIC(ap) \
@@ -848,7 +818,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
848 u32 val; 818 u32 val;
849 819
850 if (!phy->mdio_read) 820 if (!phy->mdio_read)
851 return -EOPNOTSUPP; 821 return -EOPNOTSUPP;
852 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, 822 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
853 &val); 823 &val);
854 data->val_out = val; 824 data->val_out = val;
@@ -860,7 +830,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
860 if (!capable(CAP_NET_ADMIN)) 830 if (!capable(CAP_NET_ADMIN))
861 return -EPERM; 831 return -EPERM;
862 if (!phy->mdio_write) 832 if (!phy->mdio_write)
863 return -EOPNOTSUPP; 833 return -EOPNOTSUPP;
864 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, 834 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
865 data->val_in); 835 data->val_in);
866 break; 836 break;
@@ -879,9 +849,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
879 struct cmac *mac = adapter->port[dev->if_port].mac; 849 struct cmac *mac = adapter->port[dev->if_port].mac;
880 850
881 if (!mac->ops->set_mtu) 851 if (!mac->ops->set_mtu)
882 return -EOPNOTSUPP; 852 return -EOPNOTSUPP;
883 if (new_mtu < 68) 853 if (new_mtu < 68)
884 return -EINVAL; 854 return -EINVAL;
885 if ((ret = mac->ops->set_mtu(mac, new_mtu))) 855 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
886 return ret; 856 return ret;
887 dev->mtu = new_mtu; 857 dev->mtu = new_mtu;
@@ -1211,9 +1181,9 @@ static int __devinit init_one(struct pci_dev *pdev,
1211 1181
1212 return 0; 1182 return 0;
1213 1183
1214 out_release_adapter_res: 1184out_release_adapter_res:
1215 t1_free_sw_modules(adapter); 1185 t1_free_sw_modules(adapter);
1216 out_free_dev: 1186out_free_dev:
1217 if (adapter) { 1187 if (adapter) {
1218 if (adapter->regs) 1188 if (adapter->regs)
1219 iounmap(adapter->regs); 1189 iounmap(adapter->regs);
@@ -1222,7 +1192,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1222 free_netdev(adapter->port[i].dev); 1192 free_netdev(adapter->port[i].dev);
1223 } 1193 }
1224 pci_release_regions(pdev); 1194 pci_release_regions(pdev);
1225 out_disable_pdev: 1195out_disable_pdev:
1226 pci_disable_device(pdev); 1196 pci_disable_device(pdev);
1227 pci_set_drvdata(pdev, NULL); 1197 pci_set_drvdata(pdev, NULL);
1228 return err; 1198 return err;
@@ -1273,28 +1243,27 @@ static int t1_clock(struct adapter *adapter, int mode)
1273 int M_MEM_VAL; 1243 int M_MEM_VAL;
1274 1244
1275 enum { 1245 enum {
1276 M_CORE_BITS = 9, 1246 M_CORE_BITS = 9,
1277 T_CORE_VAL = 0, 1247 T_CORE_VAL = 0,
1278 T_CORE_BITS = 2, 1248 T_CORE_BITS = 2,
1279 N_CORE_VAL = 0, 1249 N_CORE_VAL = 0,
1280 N_CORE_BITS = 2, 1250 N_CORE_BITS = 2,
1281 M_MEM_BITS = 9, 1251 M_MEM_BITS = 9,
1282 T_MEM_VAL = 0, 1252 T_MEM_VAL = 0,
1283 T_MEM_BITS = 2, 1253 T_MEM_BITS = 2,
1284 N_MEM_VAL = 0, 1254 N_MEM_VAL = 0,
1285 N_MEM_BITS = 2, 1255 N_MEM_BITS = 2,
1286 NP_LOAD = 1 << 17, 1256 NP_LOAD = 1 << 17,
1287 S_LOAD_MEM = 1 << 5, 1257 S_LOAD_MEM = 1 << 5,
1288 S_LOAD_CORE = 1 << 6, 1258 S_LOAD_CORE = 1 << 6,
1289 S_CLOCK = 1 << 3 1259 S_CLOCK = 1 << 3
1290 }; 1260 };
1291 1261
1292 if (!t1_is_T1B(adapter)) 1262 if (!t1_is_T1B(adapter))
1293 return -ENODEV; /* Can't re-clock this chip. */ 1263 return -ENODEV; /* Can't re-clock this chip. */
1294 1264
1295 if (mode & 2) { 1265 if (mode & 2)
1296 return 0; /* show current mode. */ 1266 return 0; /* show current mode. */
1297 }
1298 1267
1299 if ((adapter->t1powersave & 1) == (mode & 1)) 1268 if ((adapter->t1powersave & 1) == (mode & 1))
1300 return -EALREADY; /* ASIC already running in mode. */ 1269 return -EALREADY; /* ASIC already running in mode. */
@@ -1386,26 +1355,26 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
1386static void __devexit remove_one(struct pci_dev *pdev) 1355static void __devexit remove_one(struct pci_dev *pdev)
1387{ 1356{
1388 struct net_device *dev = pci_get_drvdata(pdev); 1357 struct net_device *dev = pci_get_drvdata(pdev);
1358 struct adapter *adapter = dev->priv;
1359 int i;
1389 1360
1390 if (dev) { 1361 for_each_port(adapter, i) {
1391 int i; 1362 if (test_bit(i, &adapter->registered_device_map))
1392 struct adapter *adapter = dev->priv; 1363 unregister_netdev(adapter->port[i].dev);
1393 1364 }
1394 for_each_port(adapter, i)
1395 if (test_bit(i, &adapter->registered_device_map))
1396 unregister_netdev(adapter->port[i].dev);
1397 1365
1398 t1_free_sw_modules(adapter); 1366 t1_free_sw_modules(adapter);
1399 iounmap(adapter->regs); 1367 iounmap(adapter->regs);
1400 while (--i >= 0)
1401 if (adapter->port[i].dev)
1402 free_netdev(adapter->port[i].dev);
1403 1368
1404 pci_release_regions(pdev); 1369 while (--i >= 0) {
1405 pci_disable_device(pdev); 1370 if (adapter->port[i].dev)
1406 pci_set_drvdata(pdev, NULL); 1371 free_netdev(adapter->port[i].dev);
1407 t1_sw_reset(pdev);
1408 } 1372 }
1373
1374 pci_release_regions(pdev);
1375 pci_disable_device(pdev);
1376 pci_set_drvdata(pdev, NULL);
1377 t1_sw_reset(pdev);
1409} 1378}
1410 1379
1411static struct pci_driver driver = { 1380static struct pci_driver driver = {
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
index 9ebecaa97d31..eef655c827d9 100644
--- a/drivers/net/chelsio/elmer0.h
+++ b/drivers/net/chelsio/elmer0.h
@@ -46,14 +46,14 @@ enum {
46}; 46};
47 47
48/* ELMER0 registers */ 48/* ELMER0 registers */
49#define A_ELMER0_VERSION 0x100000 49#define A_ELMER0_VERSION 0x100000
50#define A_ELMER0_PHY_CFG 0x100004 50#define A_ELMER0_PHY_CFG 0x100004
51#define A_ELMER0_INT_ENABLE 0x100008 51#define A_ELMER0_INT_ENABLE 0x100008
52#define A_ELMER0_INT_CAUSE 0x10000c 52#define A_ELMER0_INT_CAUSE 0x10000c
53#define A_ELMER0_GPI_CFG 0x100010 53#define A_ELMER0_GPI_CFG 0x100010
54#define A_ELMER0_GPI_STAT 0x100014 54#define A_ELMER0_GPI_STAT 0x100014
55#define A_ELMER0_GPO 0x100018 55#define A_ELMER0_GPO 0x100018
56#define A_ELMER0_PORT0_MI1_CFG 0x400000 56#define A_ELMER0_PORT0_MI1_CFG 0x400000
57 57
58#define S_MI1_MDI_ENABLE 0 58#define S_MI1_MDI_ENABLE 0
59#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) 59#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
@@ -111,18 +111,18 @@ enum {
111#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) 111#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
112#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) 112#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
113 113
114#define A_ELMER0_PORT1_MI1_CFG 0x500000 114#define A_ELMER0_PORT1_MI1_CFG 0x500000
115#define A_ELMER0_PORT1_MI1_ADDR 0x500004 115#define A_ELMER0_PORT1_MI1_ADDR 0x500004
116#define A_ELMER0_PORT1_MI1_DATA 0x500008 116#define A_ELMER0_PORT1_MI1_DATA 0x500008
117#define A_ELMER0_PORT1_MI1_OP 0x50000c 117#define A_ELMER0_PORT1_MI1_OP 0x50000c
118#define A_ELMER0_PORT2_MI1_CFG 0x600000 118#define A_ELMER0_PORT2_MI1_CFG 0x600000
119#define A_ELMER0_PORT2_MI1_ADDR 0x600004 119#define A_ELMER0_PORT2_MI1_ADDR 0x600004
120#define A_ELMER0_PORT2_MI1_DATA 0x600008 120#define A_ELMER0_PORT2_MI1_DATA 0x600008
121#define A_ELMER0_PORT2_MI1_OP 0x60000c 121#define A_ELMER0_PORT2_MI1_OP 0x60000c
122#define A_ELMER0_PORT3_MI1_CFG 0x700000 122#define A_ELMER0_PORT3_MI1_CFG 0x700000
123#define A_ELMER0_PORT3_MI1_ADDR 0x700004 123#define A_ELMER0_PORT3_MI1_ADDR 0x700004
124#define A_ELMER0_PORT3_MI1_DATA 0x700008 124#define A_ELMER0_PORT3_MI1_DATA 0x700008
125#define A_ELMER0_PORT3_MI1_OP 0x70000c 125#define A_ELMER0_PORT3_MI1_OP 0x70000c
126 126
127/* Simple bit definition for GPI and GP0 registers. */ 127/* Simple bit definition for GPI and GP0 registers. */
128#define ELMER0_GP_BIT0 0x0001 128#define ELMER0_GP_BIT0 0x0001
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 4192f0f5b3ee..d7c5406a6c3f 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -202,9 +202,9 @@ static void espi_setup_for_pm3393(adapter_t *adapter)
202 202
203static void espi_setup_for_vsc7321(adapter_t *adapter) 203static void espi_setup_for_vsc7321(adapter_t *adapter)
204{ 204{
205 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); 205 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
206 writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); 206 writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
207 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); 207 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
208 writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); 208 writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
209 writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); 209 writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
210 writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); 210 writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
@@ -247,10 +247,10 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
247 writel(V_OUT_OF_SYNC_COUNT(4) | 247 writel(V_OUT_OF_SYNC_COUNT(4) |
248 V_DIP2_PARITY_ERR_THRES(3) | 248 V_DIP2_PARITY_ERR_THRES(3) |
249 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); 249 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
250 writel(nports == 4 ? 0x200040 : 0x1000080, 250 writel(nports == 4 ? 0x200040 : 0x1000080,
251 adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); 251 adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
252 } else 252 } else
253 writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); 253 writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
254 254
255 if (mac_type == CHBT_MAC_PM3393) 255 if (mac_type == CHBT_MAC_PM3393)
256 espi_setup_for_pm3393(adapter); 256 espi_setup_for_pm3393(adapter);
@@ -301,7 +301,8 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
301{ 301{
302 struct peespi *espi = adapter->espi; 302 struct peespi *espi = adapter->espi;
303 303
304 if (!is_T2(adapter)) return; 304 if (!is_T2(adapter))
305 return;
305 spin_lock(&espi->lock); 306 spin_lock(&espi->lock);
306 espi->misc_ctrl = (val & ~MON_MASK) | 307 espi->misc_ctrl = (val & ~MON_MASK) |
307 (espi->misc_ctrl & MON_MASK); 308 (espi->misc_ctrl & MON_MASK);
@@ -340,32 +341,31 @@ u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
340 * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in 341 * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
341 * one shot, since there is no per port counter on the out side. 342 * one shot, since there is no per port counter on the out side.
342 */ 343 */
343int 344int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
344t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
345{ 345{
346 struct peespi *espi = adapter->espi; 346 struct peespi *espi = adapter->espi;
347 u8 i, nport = (u8)adapter->params.nports; 347 u8 i, nport = (u8)adapter->params.nports;
348 348
349 if (!wait) { 349 if (!wait) {
350 if (!spin_trylock(&espi->lock)) 350 if (!spin_trylock(&espi->lock))
351 return -1; 351 return -1;
352 } else 352 } else
353 spin_lock(&espi->lock); 353 spin_lock(&espi->lock);
354 354
355 if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) { 355 if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) {
356 espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | 356 espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
357 F_MONITORED_DIRECTION; 357 F_MONITORED_DIRECTION;
358 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); 358 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
359 } 359 }
360 for (i = 0 ; i < nport; i++, valp++) { 360 for (i = 0 ; i < nport; i++, valp++) {
361 if (i) { 361 if (i) {
362 writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), 362 writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
363 adapter->regs + A_ESPI_MISC_CONTROL); 363 adapter->regs + A_ESPI_MISC_CONTROL);
364 } 364 }
365 *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); 365 *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
366 } 366 }
367 367
368 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); 368 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
369 spin_unlock(&espi->lock); 369 spin_unlock(&espi->lock);
370 return 0; 370 return 0;
371} 371}
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h
index 17a3c2ba36a3..ccdb2bc9ae98 100644
--- a/drivers/net/chelsio/fpga_defs.h
+++ b/drivers/net/chelsio/fpga_defs.h
@@ -98,9 +98,9 @@
98#define A_MI0_DATA_INT 0xb10 98#define A_MI0_DATA_INT 0xb10
99 99
100/* GMAC registers */ 100/* GMAC registers */
101#define A_GMAC_MACID_LO 0x28 101#define A_GMAC_MACID_LO 0x28
102#define A_GMAC_MACID_HI 0x2c 102#define A_GMAC_MACID_HI 0x2c
103#define A_GMAC_CSR 0x30 103#define A_GMAC_CSR 0x30
104 104
105#define S_INTERFACE 0 105#define S_INTERFACE 0
106#define M_INTERFACE 0x3 106#define M_INTERFACE 0x3
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
index a2b8ad9b5535..006a2eb2d362 100644
--- a/drivers/net/chelsio/gmac.h
+++ b/drivers/net/chelsio/gmac.h
@@ -42,8 +42,15 @@
42 42
43#include "common.h" 43#include "common.h"
44 44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL }; 45enum {
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 }; 46 MAC_STATS_UPDATE_FAST,
47 MAC_STATS_UPDATE_FULL
48};
49
50enum {
51 MAC_DIRECTION_RX = 1,
52 MAC_DIRECTION_TX = 2
53};
47 54
48struct cmac_statistics { 55struct cmac_statistics {
49 /* Transmit */ 56 /* Transmit */
diff --git a/drivers/net/chelsio/ixf1010.c b/drivers/net/chelsio/ixf1010.c
index 5b8f144e83d4..10b2a9a19006 100644
--- a/drivers/net/chelsio/ixf1010.c
+++ b/drivers/net/chelsio/ixf1010.c
@@ -145,48 +145,61 @@ static void disable_port(struct cmac *mac)
145 t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val); 145 t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val);
146} 146}
147 147
148#define RMON_UPDATE(mac, name, stat_name) \
149 t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
150 (mac)->stats.stat_name += val;
151
152/* 148/*
153 * Read the current values of the RMON counters and add them to the cumulative 149 * Read the current values of the RMON counters and add them to the cumulative
154 * port statistics. The HW RMON counters are cleared by this operation. 150 * port statistics. The HW RMON counters are cleared by this operation.
155 */ 151 */
156static void port_stats_update(struct cmac *mac) 152static void port_stats_update(struct cmac *mac)
157{ 153{
158 u32 val; 154 static struct {
155 unsigned int reg;
156 unsigned int offset;
157 } hw_stats[] = {
158
159#define HW_STAT(name, stat_name) \
160 { REG_##name, \
161 (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
162
163 /* Rx stats */
164 HW_STAT(RxOctetsTotalOK, RxOctetsOK),
165 HW_STAT(RxOctetsBad, RxOctetsBad),
166 HW_STAT(RxUCPkts, RxUnicastFramesOK),
167 HW_STAT(RxMCPkts, RxMulticastFramesOK),
168 HW_STAT(RxBCPkts, RxBroadcastFramesOK),
169 HW_STAT(RxJumboPkts, RxJumboFramesOK),
170 HW_STAT(RxFCSErrors, RxFCSErrors),
171 HW_STAT(RxAlignErrors, RxAlignErrors),
172 HW_STAT(RxLongErrors, RxFrameTooLongErrors),
173 HW_STAT(RxVeryLongErrors, RxFrameTooLongErrors),
174 HW_STAT(RxPauseMacControlCounter, RxPauseFrames),
175 HW_STAT(RxDataErrors, RxDataErrors),
176 HW_STAT(RxJabberErrors, RxJabberErrors),
177 HW_STAT(RxRuntErrors, RxRuntErrors),
178 HW_STAT(RxShortErrors, RxRuntErrors),
179 HW_STAT(RxSequenceErrors, RxSequenceErrors),
180 HW_STAT(RxSymbolErrors, RxSymbolErrors),
181
182 /* Tx stats (skip collision stats as we are full-duplex only) */
183 HW_STAT(TxOctetsTotalOK, TxOctetsOK),
184 HW_STAT(TxOctetsBad, TxOctetsBad),
185 HW_STAT(TxUCPkts, TxUnicastFramesOK),
186 HW_STAT(TxMCPkts, TxMulticastFramesOK),
187 HW_STAT(TxBCPkts, TxBroadcastFramesOK),
188 HW_STAT(TxJumboPkts, TxJumboFramesOK),
189 HW_STAT(TxPauseFrames, TxPauseFrames),
190 HW_STAT(TxExcessiveLengthDrop, TxLengthErrors),
191 HW_STAT(TxUnderrun, TxUnderrun),
192 HW_STAT(TxCRCErrors, TxFCSErrors)
193 }, *p = hw_stats;
194 u64 *stats = (u64 *) &mac->stats;
195 unsigned int i;
196
197 for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
198 u32 val;
159 199
160 /* Rx stats */ 200 t1_tpi_read(mac->adapter, MACREG(mac, p->reg), &val);
161 RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK); 201 stats[p->offset] += val;
162 RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad); 202 }
163 RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK);
164 RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK);
165 RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK);
166 RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK);
167 RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors);
168 RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors);
169 RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors);
170 RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors);
171 RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames);
172 RMON_UPDATE(mac, RxDataErrors, RxDataErrors);
173 RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors);
174 RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors);
175 RMON_UPDATE(mac, RxShortErrors, RxRuntErrors);
176 RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors);
177 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
178
179 /* Tx stats (skip collision stats as we are full-duplex only) */
180 RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
181 RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad);
182 RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK);
183 RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK);
184 RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK);
185 RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK);
186 RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames);
187 RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors);
188 RMON_UPDATE(mac, TxUnderrun, TxUnderrun);
189 RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors);
190} 203}
191 204
192/* No-op interrupt operation as this MAC does not support interrupts */ 205/* No-op interrupt operation as this MAC does not support interrupts */
@@ -273,7 +286,8 @@ static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
273static int mac_set_mtu(struct cmac *mac, int mtu) 286static int mac_set_mtu(struct cmac *mac, int mtu)
274{ 287{
275 /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */ 288 /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */
276 if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL; 289 if (mtu > (MAX_FRAME_SIZE - 14 - 4))
290 return -EINVAL;
277 t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE), 291 t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE),
278 mtu + 14 + 4); 292 mtu + 14 + 4);
279 return 0; 293 return 0;
@@ -357,8 +371,8 @@ static void enable_port(struct cmac *mac)
357 val |= (1 << index); 371 val |= (1 << index);
358 t1_tpi_write(adapter, REG_PORT_ENABLE, val); 372 t1_tpi_write(adapter, REG_PORT_ENABLE, val);
359 373
360 index <<= 2; 374 index <<= 2;
361 if (is_T2(adapter)) { 375 if (is_T2(adapter)) {
362 /* T204: set the Fifo water level & threshold */ 376 /* T204: set the Fifo water level & threshold */
363 t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740); 377 t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740);
364 t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730); 378 t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730);
@@ -389,6 +403,10 @@ static int mac_disable(struct cmac *mac, int which)
389 return 0; 403 return 0;
390} 404}
391 405
406#define RMON_UPDATE(mac, name, stat_name) \
407 t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
408 (mac)->stats.stat_name += val;
409
392/* 410/*
393 * This function is called periodically to accumulate the current values of the 411 * This function is called periodically to accumulate the current values of the
394 * RMON counters into the port statistics. Since the counters are only 32 bits 412 * RMON counters into the port statistics. Since the counters are only 32 bits
@@ -460,10 +478,12 @@ static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index)
460 struct cmac *mac; 478 struct cmac *mac;
461 u32 val; 479 u32 val;
462 480
463 if (index > 9) return NULL; 481 if (index > 9)
482 return NULL;
464 483
465 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); 484 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
466 if (!mac) return NULL; 485 if (!mac)
486 return NULL;
467 487
468 mac->ops = &ixf1010_ops; 488 mac->ops = &ixf1010_ops;
469 mac->instance = (cmac_instance *)(mac + 1); 489 mac->instance = (cmac_instance *)(mac + 1);
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c
index 28ac93ff7c4f..5867e3b0a887 100644
--- a/drivers/net/chelsio/mv88e1xxx.c
+++ b/drivers/net/chelsio/mv88e1xxx.c
@@ -73,9 +73,8 @@ static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
73 73
74 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 74 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
75 elmer |= ELMER0_GP_BIT1; 75 elmer |= ELMER0_GP_BIT1;
76 if (is_T2(cphy->adapter)) { 76 if (is_T2(cphy->adapter))
77 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 77 elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4;
78 }
79 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 78 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
80 } 79 }
81 return 0; 80 return 0;
@@ -92,9 +91,8 @@ static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
92 91
93 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 92 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
94 elmer &= ~ELMER0_GP_BIT1; 93 elmer &= ~ELMER0_GP_BIT1;
95 if (is_T2(cphy->adapter)) { 94 if (is_T2(cphy->adapter))
96 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); 95 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
97 }
98 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
99 } 97 }
100 return 0; 98 return 0;
@@ -112,9 +110,8 @@ static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
112 if (t1_is_asic(cphy->adapter)) { 110 if (t1_is_asic(cphy->adapter)) {
113 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); 111 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
114 elmer |= ELMER0_GP_BIT1; 112 elmer |= ELMER0_GP_BIT1;
115 if (is_T2(cphy->adapter)) { 113 if (is_T2(cphy->adapter))
116 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 114 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
117 }
118 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); 115 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
119 } 116 }
120 return 0; 117 return 0;
@@ -300,7 +297,7 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
300 297
301 /* 298 /*
302 * Loop until cause reads zero. Need to handle bouncing interrupts. 299 * Loop until cause reads zero. Need to handle bouncing interrupts.
303 */ 300 */
304 while (1) { 301 while (1) {
305 u32 cause; 302 u32 cause;
306 303
@@ -308,15 +305,16 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
308 MV88E1XXX_INTERRUPT_STATUS_REGISTER, 305 MV88E1XXX_INTERRUPT_STATUS_REGISTER,
309 &cause); 306 &cause);
310 cause &= INTR_ENABLE_MASK; 307 cause &= INTR_ENABLE_MASK;
311 if (!cause) break; 308 if (!cause)
309 break;
312 310
313 if (cause & MV88E1XXX_INTR_LINK_CHNG) { 311 if (cause & MV88E1XXX_INTR_LINK_CHNG) {
314 (void) simple_mdio_read(cphy, 312 (void) simple_mdio_read(cphy,
315 MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); 313 MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
316 314
317 if (status & MV88E1XXX_INTR_LINK_CHNG) { 315 if (status & MV88E1XXX_INTR_LINK_CHNG)
318 cphy->state |= PHY_LINK_UP; 316 cphy->state |= PHY_LINK_UP;
319 } else { 317 else {
320 cphy->state &= ~PHY_LINK_UP; 318 cphy->state &= ~PHY_LINK_UP;
321 if (cphy->state & PHY_AUTONEG_EN) 319 if (cphy->state & PHY_AUTONEG_EN)
322 cphy->state &= ~PHY_AUTONEG_RDY; 320 cphy->state &= ~PHY_AUTONEG_RDY;
@@ -360,7 +358,8 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
360{ 358{
361 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); 359 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
362 360
363 if (!cphy) return NULL; 361 if (!cphy)
362 return NULL;
364 363
365 cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops); 364 cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
366 365
@@ -377,11 +376,11 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
377 } 376 }
378 (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ 377 (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */
379 378
380 /* LED */ 379 /* LED */
381 if (is_T2(adapter)) { 380 if (is_T2(adapter)) {
382 (void) simple_mdio_write(cphy, 381 (void) simple_mdio_write(cphy,
383 MV88E1XXX_LED_CONTROL_REGISTER, 0x1); 382 MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
384 } 383 }
385 384
386 return cphy; 385 return cphy;
387} 386}
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 82fed1dd5005..87dde3e60046 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -10,25 +10,25 @@ static int my3126_reset(struct cphy *cphy, int wait)
10 * This can be done through registers. It is not required since 10 * This can be done through registers. It is not required since
11 * a full chip reset is used. 11 * a full chip reset is used.
12 */ 12 */
13 return (0); 13 return 0;
14} 14}
15 15
16static int my3126_interrupt_enable(struct cphy *cphy) 16static int my3126_interrupt_enable(struct cphy *cphy)
17{ 17{
18 schedule_delayed_work(&cphy->phy_update, HZ/30); 18 schedule_delayed_work(&cphy->phy_update, HZ/30);
19 t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); 19 t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
20 return (0); 20 return 0;
21} 21}
22 22
23static int my3126_interrupt_disable(struct cphy *cphy) 23static int my3126_interrupt_disable(struct cphy *cphy)
24{ 24{
25 cancel_rearming_delayed_work(&cphy->phy_update); 25 cancel_rearming_delayed_work(&cphy->phy_update);
26 return (0); 26 return 0;
27} 27}
28 28
29static int my3126_interrupt_clear(struct cphy *cphy) 29static int my3126_interrupt_clear(struct cphy *cphy)
30{ 30{
31 return (0); 31 return 0;
32} 32}
33 33
34#define OFFSET(REG_ADDR) (REG_ADDR << 2) 34#define OFFSET(REG_ADDR) (REG_ADDR << 2)
@@ -102,7 +102,7 @@ static void my3216_poll(struct work_struct *work)
102 102
103static int my3126_set_loopback(struct cphy *cphy, int on) 103static int my3126_set_loopback(struct cphy *cphy, int on)
104{ 104{
105 return (0); 105 return 0;
106} 106}
107 107
108/* To check the activity LED */ 108/* To check the activity LED */
@@ -146,7 +146,7 @@ static int my3126_get_link_status(struct cphy *cphy,
146 if (fc) 146 if (fc)
147 *fc = PAUSE_RX | PAUSE_TX; 147 *fc = PAUSE_RX | PAUSE_TX;
148 148
149 return (0); 149 return 0;
150} 150}
151 151
152static void my3126_destroy(struct cphy *cphy) 152static void my3126_destroy(struct cphy *cphy)
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
178 cphy->bmsr = 0; 178 cphy->bmsr = 0;
179 179
180 return (cphy); 180 return cphy;
181} 181}
182 182
183/* Chip Reset */ 183/* Chip Reset */
@@ -198,7 +198,7 @@ static int my3126_phy_reset(adapter_t * adapter)
198 val |= 0x8000; 198 val |= 0x8000;
199 t1_tpi_write(adapter, A_ELMER0_GPO, val); 199 t1_tpi_write(adapter, A_ELMER0_GPO, val);
200 udelay(100); 200 udelay(100);
201 return (0); 201 return 0;
202} 202}
203 203
204struct gphy t1_my3126_ops = { 204struct gphy t1_my3126_ops = {
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 63cabeb98afe..69129edeefd6 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -446,17 +446,51 @@ static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
446 *val += 1ull << 40; 446 *val += 1ull << 40;
447} 447}
448 448
449#define RMON_UPDATE(mac, name, stat_name) \
450 pm3393_rmon_update((mac)->adapter, OFFSET(name), \
451 &(mac)->stats.stat_name, \
452 (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)))
453
454
455static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, 449static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
456 int flag) 450 int flag)
457{ 451{
458 u64 ro; 452 static struct {
459 u32 val0, val1, val2, val3; 453 unsigned int reg;
454 unsigned int offset;
455 } hw_stats [] = {
456
457#define HW_STAT(name, stat_name) \
458 { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
459
460 /* Rx stats */
461 HW_STAT(RxOctetsReceivedOK, RxOctetsOK),
462 HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK),
463 HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK),
464 HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK),
465 HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames),
466 HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors),
467 HW_STAT(RxFramesLostDueToInternalMACErrors,
468 RxInternalMACRcvError),
469 HW_STAT(RxSymbolErrors, RxSymbolErrors),
470 HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors),
471 HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors),
472 HW_STAT(RxJabbers, RxJabberErrors),
473 HW_STAT(RxFragments, RxRuntErrors),
474 HW_STAT(RxUndersizedFrames, RxRuntErrors),
475 HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK),
476 HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK),
477
478 /* Tx stats */
479 HW_STAT(TxOctetsTransmittedOK, TxOctetsOK),
480 HW_STAT(TxFramesLostDueToInternalMACTransmissionError,
481 TxInternalMACXmitError),
482 HW_STAT(TxTransmitSystemError, TxFCSErrors),
483 HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK),
484 HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK),
485 HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK),
486 HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames),
487 HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK),
488 HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK)
489 }, *p = hw_stats;
490 u64 ro;
491 u32 val0, val1, val2, val3;
492 u64 *stats = (u64 *) &mac->stats;
493 unsigned int i;
460 494
461 /* Snap the counters */ 495 /* Snap the counters */
462 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, 496 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
@@ -470,35 +504,14 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
470 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | 504 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
471 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); 505 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
472 506
473 /* Rx stats */ 507 for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
474 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); 508 unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW;
475 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); 509
476 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); 510 pm3393_rmon_update((mac)->adapter, OFFSET(p->reg),
477 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); 511 stats + p->offset, ro & (reg >> 2));
478 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); 512 }
479 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); 513
480 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, 514
481 RxInternalMACRcvError);
482 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
483 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
484 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
485 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
486 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
487 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
488 RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
489 RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
490
491 /* Tx stats */
492 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
493 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
494 TxInternalMACXmitError);
495 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
496 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
497 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
498 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
499 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
500 RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
501 RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
502 515
503 return &mac->stats; 516 return &mac->stats;
504} 517}
@@ -534,9 +547,9 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
534 /* Store local copy */ 547 /* Store local copy */
535 memcpy(cmac->instance->mac_addr, ma, 6); 548 memcpy(cmac->instance->mac_addr, ma, 6);
536 549
537 lo = ((u32) ma[1] << 8) | (u32) ma[0]; 550 lo = ((u32) ma[1] << 8) | (u32) ma[0];
538 mid = ((u32) ma[3] << 8) | (u32) ma[2]; 551 mid = ((u32) ma[3] << 8) | (u32) ma[2];
539 hi = ((u32) ma[5] << 8) | (u32) ma[4]; 552 hi = ((u32) ma[5] << 8) | (u32) ma[4];
540 553
541 /* Disable Rx/Tx MAC before configuring it. */ 554 /* Disable Rx/Tx MAC before configuring it. */
542 if (enabled) 555 if (enabled)
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 659cb2252e44..89a682702fa9 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -71,12 +71,9 @@
71#define SGE_FREEL_REFILL_THRESH 16 71#define SGE_FREEL_REFILL_THRESH 16
72#define SGE_RESPQ_E_N 1024 72#define SGE_RESPQ_E_N 1024
73#define SGE_INTRTIMER_NRES 1000 73#define SGE_INTRTIMER_NRES 1000
74#define SGE_RX_COPY_THRES 256
75#define SGE_RX_SM_BUF_SIZE 1536 74#define SGE_RX_SM_BUF_SIZE 1536
76#define SGE_TX_DESC_MAX_PLEN 16384 75#define SGE_TX_DESC_MAX_PLEN 16384
77 76
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) 77#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81 78
82/* 79/*
@@ -85,10 +82,6 @@
85 */ 82 */
86#define TX_RECLAIM_PERIOD (HZ / 4) 83#define TX_RECLAIM_PERIOD (HZ / 4)
87 84
88#ifndef NET_IP_ALIGN
89# define NET_IP_ALIGN 2
90#endif
91
92#define M_CMD_LEN 0x7fffffff 85#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v) 86#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN) 87#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
@@ -195,7 +188,7 @@ struct cmdQ {
195 struct cmdQ_e *entries; /* HW command descriptor Q */ 188 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 189 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
198 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
199}; 192};
200 193
201struct freelQ { 194struct freelQ {
@@ -241,9 +234,9 @@ struct sched_port {
241/* Per T204 device */ 234/* Per T204 device */
242struct sched { 235struct sched {
243 ktime_t last_updated; /* last time quotas were computed */ 236 ktime_t last_updated; /* last time quotas were computed */
244 unsigned int max_avail; /* max bits to be sent to any port */ 237 unsigned int max_avail; /* max bits to be sent to any port */
245 unsigned int port; /* port index (round robin ports) */ 238 unsigned int port; /* port index (round robin ports) */
246 unsigned int num; /* num skbs in per port queues */ 239 unsigned int num; /* num skbs in per port queues */
247 struct sched_port p[MAX_NPORTS]; 240 struct sched_port p[MAX_NPORTS];
248 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
249}; 242};
@@ -259,10 +252,10 @@ static void restart_sched(unsigned long);
259 * contention. 252 * contention.
260 */ 253 */
261struct sge { 254struct sge {
262 struct adapter *adapter; /* adapter backpointer */ 255 struct adapter *adapter; /* adapter backpointer */
263 struct net_device *netdev; /* netdevice backpointer */ 256 struct net_device *netdev; /* netdevice backpointer */
264 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 257 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
265 struct respQ respQ; /* response Q */ 258 struct respQ respQ; /* response Q */
266 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 259 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
267 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 260 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
268 unsigned int jumbo_fl; /* jumbo freelist Q index */ 261 unsigned int jumbo_fl; /* jumbo freelist Q index */
@@ -460,7 +453,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
460 if (credits < MAX_SKB_FRAGS + 1) 453 if (credits < MAX_SKB_FRAGS + 1)
461 goto out; 454 goto out;
462 455
463 again: 456again:
464 for (i = 0; i < MAX_NPORTS; i++) { 457 for (i = 0; i < MAX_NPORTS; i++) {
465 s->port = ++s->port & (MAX_NPORTS - 1); 458 s->port = ++s->port & (MAX_NPORTS - 1);
466 skbq = &s->p[s->port].skbq; 459 skbq = &s->p[s->port].skbq;
@@ -483,8 +476,8 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
483 if (update-- && sched_update_avail(sge)) 476 if (update-- && sched_update_avail(sge))
484 goto again; 477 goto again;
485 478
486 out: 479out:
487 /* If there are more pending skbs, we use the hardware to schedule us 480 /* If there are more pending skbs, we use the hardware to schedule us
488 * again. 481 * again.
489 */ 482 */
490 if (s->num && !skb) { 483 if (s->num && !skb) {
@@ -575,11 +568,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
575 q->size = p->freelQ_size[i]; 568 q->size = p->freelQ_size[i];
576 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; 569 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
577 size = sizeof(struct freelQ_e) * q->size; 570 size = sizeof(struct freelQ_e) * q->size;
578 q->entries = (struct freelQ_e *) 571 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
579 pci_alloc_consistent(pdev, size, &q->dma_addr);
580 if (!q->entries) 572 if (!q->entries)
581 goto err_no_mem; 573 goto err_no_mem;
582 memset(q->entries, 0, size); 574
583 size = sizeof(struct freelQ_ce) * q->size; 575 size = sizeof(struct freelQ_ce) * q->size;
584 q->centries = kzalloc(size, GFP_KERNEL); 576 q->centries = kzalloc(size, GFP_KERNEL);
585 if (!q->centries) 577 if (!q->centries)
@@ -613,11 +605,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
613 sge->respQ.size = SGE_RESPQ_E_N; 605 sge->respQ.size = SGE_RESPQ_E_N;
614 sge->respQ.credits = 0; 606 sge->respQ.credits = 0;
615 size = sizeof(struct respQ_e) * sge->respQ.size; 607 size = sizeof(struct respQ_e) * sge->respQ.size;
616 sge->respQ.entries = (struct respQ_e *) 608 sge->respQ.entries =
617 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 609 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
618 if (!sge->respQ.entries) 610 if (!sge->respQ.entries)
619 goto err_no_mem; 611 goto err_no_mem;
620 memset(sge->respQ.entries, 0, size);
621 return 0; 612 return 0;
622 613
623err_no_mem: 614err_no_mem:
@@ -637,20 +628,12 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
637 q->in_use -= n; 628 q->in_use -= n;
638 ce = &q->centries[cidx]; 629 ce = &q->centries[cidx];
639 while (n--) { 630 while (n--) {
640 if (q->sop) { 631 if (likely(pci_unmap_len(ce, dma_len))) {
641 if (likely(pci_unmap_len(ce, dma_len))) { 632 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
642 pci_unmap_single(pdev, 633 pci_unmap_len(ce, dma_len),
643 pci_unmap_addr(ce, dma_addr), 634 PCI_DMA_TODEVICE);
644 pci_unmap_len(ce, dma_len), 635 if (q->sop)
645 PCI_DMA_TODEVICE);
646 q->sop = 0; 636 q->sop = 0;
647 }
648 } else {
649 if (likely(pci_unmap_len(ce, dma_len))) {
650 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
651 pci_unmap_len(ce, dma_len),
652 PCI_DMA_TODEVICE);
653 }
654 } 637 }
655 if (ce->skb) { 638 if (ce->skb) {
656 dev_kfree_skb_any(ce->skb); 639 dev_kfree_skb_any(ce->skb);
@@ -711,11 +694,10 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
711 q->stop_thres = 0; 694 q->stop_thres = 0;
712 spin_lock_init(&q->lock); 695 spin_lock_init(&q->lock);
713 size = sizeof(struct cmdQ_e) * q->size; 696 size = sizeof(struct cmdQ_e) * q->size;
714 q->entries = (struct cmdQ_e *) 697 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
715 pci_alloc_consistent(pdev, size, &q->dma_addr);
716 if (!q->entries) 698 if (!q->entries)
717 goto err_no_mem; 699 goto err_no_mem;
718 memset(q->entries, 0, size); 700
719 size = sizeof(struct cmdQ_ce) * q->size; 701 size = sizeof(struct cmdQ_ce) * q->size;
720 q->centries = kzalloc(size, GFP_KERNEL); 702 q->centries = kzalloc(size, GFP_KERNEL);
721 if (!q->centries) 703 if (!q->centries)
@@ -770,7 +752,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
770static void configure_sge(struct sge *sge, struct sge_params *p) 752static void configure_sge(struct sge *sge, struct sge_params *p)
771{ 753{
772 struct adapter *ap = sge->adapter; 754 struct adapter *ap = sge->adapter;
773 755
774 writel(0, ap->regs + A_SG_CONTROL); 756 writel(0, ap->regs + A_SG_CONTROL);
775 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 757 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
776 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 758 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
@@ -850,7 +832,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
850 struct freelQ_e *e = &q->entries[q->pidx]; 832 struct freelQ_e *e = &q->entries[q->pidx];
851 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 833 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
852 834
853
854 while (q->credits < q->size) { 835 while (q->credits < q->size) {
855 struct sk_buff *skb; 836 struct sk_buff *skb;
856 dma_addr_t mapping; 837 dma_addr_t mapping;
@@ -862,6 +843,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
862 skb_reserve(skb, q->dma_offset); 843 skb_reserve(skb, q->dma_offset);
863 mapping = pci_map_single(pdev, skb->data, dma_len, 844 mapping = pci_map_single(pdev, skb->data, dma_len,
864 PCI_DMA_FROMDEVICE); 845 PCI_DMA_FROMDEVICE);
846 skb_reserve(skb, sge->rx_pkt_pad);
847
865 ce->skb = skb; 848 ce->skb = skb;
866 pci_unmap_addr_set(ce, dma_addr, mapping); 849 pci_unmap_addr_set(ce, dma_addr, mapping);
867 pci_unmap_len_set(ce, dma_len, dma_len); 850 pci_unmap_len_set(ce, dma_len, dma_len);
@@ -881,7 +864,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
881 } 864 }
882 q->credits++; 865 q->credits++;
883 } 866 }
884
885} 867}
886 868
887/* 869/*
@@ -1041,6 +1023,10 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
1041 } 1023 }
1042} 1024}
1043 1025
1026static int copybreak __read_mostly = 256;
1027module_param(copybreak, int, 0);
1028MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1029
1044/** 1030/**
1045 * get_packet - return the next ingress packet buffer 1031 * get_packet - return the next ingress packet buffer
1046 * @pdev: the PCI device that received the packet 1032 * @pdev: the PCI device that received the packet
@@ -1060,45 +1046,42 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
1060 * be copied but there is no memory for the copy. 1046 * be copied but there is no memory for the copy.
1061 */ 1047 */
1062static inline struct sk_buff *get_packet(struct pci_dev *pdev, 1048static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1063 struct freelQ *fl, unsigned int len, 1049 struct freelQ *fl, unsigned int len)
1064 int dma_pad, int skb_pad,
1065 unsigned int copy_thres,
1066 unsigned int drop_thres)
1067{ 1050{
1068 struct sk_buff *skb; 1051 struct sk_buff *skb;
1069 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1052 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1070 1053
1071 if (len < copy_thres) { 1054 if (len < copybreak) {
1072 skb = alloc_skb(len + skb_pad, GFP_ATOMIC); 1055 skb = alloc_skb(len + 2, GFP_ATOMIC);
1073 if (likely(skb != NULL)) { 1056 if (!skb)
1074 skb_reserve(skb, skb_pad);
1075 skb_put(skb, len);
1076 pci_dma_sync_single_for_cpu(pdev,
1077 pci_unmap_addr(ce, dma_addr),
1078 pci_unmap_len(ce, dma_len),
1079 PCI_DMA_FROMDEVICE);
1080 memcpy(skb->data, ce->skb->data + dma_pad, len);
1081 pci_dma_sync_single_for_device(pdev,
1082 pci_unmap_addr(ce, dma_addr),
1083 pci_unmap_len(ce, dma_len),
1084 PCI_DMA_FROMDEVICE);
1085 } else if (!drop_thres)
1086 goto use_orig_buf; 1057 goto use_orig_buf;
1087 1058
1059 skb_reserve(skb, 2); /* align IP header */
1060 skb_put(skb, len);
1061 pci_dma_sync_single_for_cpu(pdev,
1062 pci_unmap_addr(ce, dma_addr),
1063 pci_unmap_len(ce, dma_len),
1064 PCI_DMA_FROMDEVICE);
1065 memcpy(skb->data, ce->skb->data, len);
1066 pci_dma_sync_single_for_device(pdev,
1067 pci_unmap_addr(ce, dma_addr),
1068 pci_unmap_len(ce, dma_len),
1069 PCI_DMA_FROMDEVICE);
1088 recycle_fl_buf(fl, fl->cidx); 1070 recycle_fl_buf(fl, fl->cidx);
1089 return skb; 1071 return skb;
1090 } 1072 }
1091 1073
1092 if (fl->credits < drop_thres) { 1074use_orig_buf:
1075 if (fl->credits < 2) {
1093 recycle_fl_buf(fl, fl->cidx); 1076 recycle_fl_buf(fl, fl->cidx);
1094 return NULL; 1077 return NULL;
1095 } 1078 }
1096 1079
1097use_orig_buf:
1098 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 1080 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
1099 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1081 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1100 skb = ce->skb; 1082 skb = ce->skb;
1101 skb_reserve(skb, dma_pad); 1083 prefetch(skb->data);
1084
1102 skb_put(skb, len); 1085 skb_put(skb, len);
1103 return skb; 1086 return skb;
1104} 1087}
@@ -1137,6 +1120,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1137static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1120static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1138{ 1121{
1139 unsigned int count = 0; 1122 unsigned int count = 0;
1123
1140 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1141 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1142 unsigned int i, len = skb->len - skb->data_len; 1126 unsigned int i, len = skb->len - skb->data_len;
@@ -1343,7 +1327,7 @@ static void restart_sched(unsigned long arg)
1343 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1327 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1344 unsigned int genbit, pidx, count; 1328 unsigned int genbit, pidx, count;
1345 count = 1 + skb_shinfo(skb)->nr_frags; 1329 count = 1 + skb_shinfo(skb)->nr_frags;
1346 count += compute_large_page_tx_descs(skb); 1330 count += compute_large_page_tx_descs(skb);
1347 q->in_use += count; 1331 q->in_use += count;
1348 genbit = q->genbit; 1332 genbit = q->genbit;
1349 pidx = q->pidx; 1333 pidx = q->pidx;
@@ -1375,27 +1359,25 @@ static void restart_sched(unsigned long arg)
1375 * 1359 *
1376 * Process an ingress ethernet pakcet and deliver it to the stack. 1360 * Process an ingress ethernet pakcet and deliver it to the stack.
1377 */ 1361 */
1378static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) 1362static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1379{ 1363{
1380 struct sk_buff *skb; 1364 struct sk_buff *skb;
1381 struct cpl_rx_pkt *p; 1365 const struct cpl_rx_pkt *p;
1382 struct adapter *adapter = sge->adapter; 1366 struct adapter *adapter = sge->adapter;
1383 struct sge_port_stats *st; 1367 struct sge_port_stats *st;
1384 1368
1385 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, 1369 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1386 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
1387 SGE_RX_DROP_THRES);
1388 if (unlikely(!skb)) { 1370 if (unlikely(!skb)) {
1389 sge->stats.rx_drops++; 1371 sge->stats.rx_drops++;
1390 return 0; 1372 return;
1391 } 1373 }
1392 1374
1393 p = (struct cpl_rx_pkt *)skb->data; 1375 p = (const struct cpl_rx_pkt *) skb->data;
1394 skb_pull(skb, sizeof(*p));
1395 if (p->iff >= adapter->params.nports) { 1376 if (p->iff >= adapter->params.nports) {
1396 kfree_skb(skb); 1377 kfree_skb(skb);
1397 return 0; 1378 return;
1398 } 1379 }
1380 __skb_pull(skb, sizeof(*p));
1399 1381
1400 skb->dev = adapter->port[p->iff].dev; 1382 skb->dev = adapter->port[p->iff].dev;
1401 skb->dev->last_rx = jiffies; 1383 skb->dev->last_rx = jiffies;
@@ -1427,7 +1409,6 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1427 netif_rx(skb); 1409 netif_rx(skb);
1428#endif 1410#endif
1429 } 1411 }
1430 return 0;
1431} 1412}
1432 1413
1433/* 1414/*
@@ -1448,29 +1429,28 @@ static inline int enough_free_Tx_descs(const struct cmdQ *q)
1448static void restart_tx_queues(struct sge *sge) 1429static void restart_tx_queues(struct sge *sge)
1449{ 1430{
1450 struct adapter *adap = sge->adapter; 1431 struct adapter *adap = sge->adapter;
1432 int i;
1451 1433
1452 if (enough_free_Tx_descs(&sge->cmdQ[0])) { 1434 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1453 int i; 1435 return;
1454 1436
1455 for_each_port(adap, i) { 1437 for_each_port(adap, i) {
1456 struct net_device *nd = adap->port[i].dev; 1438 struct net_device *nd = adap->port[i].dev;
1457 1439
1458 if (test_and_clear_bit(nd->if_port, 1440 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1459 &sge->stopped_tx_queues) && 1441 netif_running(nd)) {
1460 netif_running(nd)) { 1442 sge->stats.cmdQ_restarted[2]++;
1461 sge->stats.cmdQ_restarted[2]++; 1443 netif_wake_queue(nd);
1462 netif_wake_queue(nd);
1463 }
1464 } 1444 }
1465 } 1445 }
1466} 1446}
1467 1447
1468/* 1448/*
1469 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1449 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1470 * information. 1450 * information.
1471 */ 1451 */
1472static unsigned int update_tx_info(struct adapter *adapter, 1452static unsigned int update_tx_info(struct adapter *adapter,
1473 unsigned int flags, 1453 unsigned int flags,
1474 unsigned int pr0) 1454 unsigned int pr0)
1475{ 1455{
1476 struct sge *sge = adapter->sge; 1456 struct sge *sge = adapter->sge;
@@ -1510,29 +1490,30 @@ static int process_responses(struct adapter *adapter, int budget)
1510 struct sge *sge = adapter->sge; 1490 struct sge *sge = adapter->sge;
1511 struct respQ *q = &sge->respQ; 1491 struct respQ *q = &sge->respQ;
1512 struct respQ_e *e = &q->entries[q->cidx]; 1492 struct respQ_e *e = &q->entries[q->cidx];
1513 int budget_left = budget; 1493 int done = 0;
1514 unsigned int flags = 0; 1494 unsigned int flags = 0;
1515 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1495 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1516
1517 1496
1518 while (likely(budget_left && e->GenerationBit == q->genbit)) { 1497 while (done < budget && e->GenerationBit == q->genbit) {
1519 flags |= e->Qsleeping; 1498 flags |= e->Qsleeping;
1520 1499
1521 cmdq_processed[0] += e->Cmdq0CreditReturn; 1500 cmdq_processed[0] += e->Cmdq0CreditReturn;
1522 cmdq_processed[1] += e->Cmdq1CreditReturn; 1501 cmdq_processed[1] += e->Cmdq1CreditReturn;
1523 1502
1524 /* We batch updates to the TX side to avoid cacheline 1503 /* We batch updates to the TX side to avoid cacheline
1525 * ping-pong of TX state information on MP where the sender 1504 * ping-pong of TX state information on MP where the sender
1526 * might run on a different CPU than this function... 1505 * might run on a different CPU than this function...
1527 */ 1506 */
1528 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) { 1507 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1529 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1508 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1530 cmdq_processed[0] = 0; 1509 cmdq_processed[0] = 0;
1531 } 1510 }
1511
1532 if (unlikely(cmdq_processed[1] > 16)) { 1512 if (unlikely(cmdq_processed[1] > 16)) {
1533 sge->cmdQ[1].processed += cmdq_processed[1]; 1513 sge->cmdQ[1].processed += cmdq_processed[1];
1534 cmdq_processed[1] = 0; 1514 cmdq_processed[1] = 0;
1535 } 1515 }
1516
1536 if (likely(e->DataValid)) { 1517 if (likely(e->DataValid)) {
1537 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1518 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1538 1519
@@ -1542,12 +1523,16 @@ static int process_responses(struct adapter *adapter, int budget)
1542 else 1523 else
1543 sge_rx(sge, fl, e->BufferLength); 1524 sge_rx(sge, fl, e->BufferLength);
1544 1525
1526 ++done;
1527
1545 /* 1528 /*
1546 * Note: this depends on each packet consuming a 1529 * Note: this depends on each packet consuming a
1547 * single free-list buffer; cf. the BUG above. 1530 * single free-list buffer; cf. the BUG above.
1548 */ 1531 */
1549 if (++fl->cidx == fl->size) 1532 if (++fl->cidx == fl->size)
1550 fl->cidx = 0; 1533 fl->cidx = 0;
1534 prefetch(fl->centries[fl->cidx].skb);
1535
1551 if (unlikely(--fl->credits < 1536 if (unlikely(--fl->credits <
1552 fl->size - SGE_FREEL_REFILL_THRESH)) 1537 fl->size - SGE_FREEL_REFILL_THRESH))
1553 refill_free_list(sge, fl); 1538 refill_free_list(sge, fl);
@@ -1566,14 +1551,20 @@ static int process_responses(struct adapter *adapter, int budget)
1566 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1551 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1567 q->credits = 0; 1552 q->credits = 0;
1568 } 1553 }
1569 --budget_left;
1570 } 1554 }
1571 1555
1572 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1556 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1573 sge->cmdQ[1].processed += cmdq_processed[1]; 1557 sge->cmdQ[1].processed += cmdq_processed[1];
1574 1558
1575 budget -= budget_left; 1559 return done;
1576 return budget; 1560}
1561
1562static inline int responses_pending(const struct adapter *adapter)
1563{
1564 const struct respQ *Q = &adapter->sge->respQ;
1565 const struct respQ_e *e = &Q->entries[Q->cidx];
1566
1567 return (e->GenerationBit == Q->genbit);
1577} 1568}
1578 1569
1579#ifdef CONFIG_CHELSIO_T1_NAPI 1570#ifdef CONFIG_CHELSIO_T1_NAPI
@@ -1585,19 +1576,25 @@ static int process_responses(struct adapter *adapter, int budget)
1585 * which the caller must ensure is a valid pure response. Returns 1 if it 1576 * which the caller must ensure is a valid pure response. Returns 1 if it
1586 * encounters a valid data-carrying response, 0 otherwise. 1577 * encounters a valid data-carrying response, 0 otherwise.
1587 */ 1578 */
1588static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) 1579static int process_pure_responses(struct adapter *adapter)
1589{ 1580{
1590 struct sge *sge = adapter->sge; 1581 struct sge *sge = adapter->sge;
1591 struct respQ *q = &sge->respQ; 1582 struct respQ *q = &sge->respQ;
1583 struct respQ_e *e = &q->entries[q->cidx];
1584 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1592 unsigned int flags = 0; 1585 unsigned int flags = 0;
1593 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1586 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1594 1587
1588 prefetch(fl->centries[fl->cidx].skb);
1589 if (e->DataValid)
1590 return 1;
1591
1595 do { 1592 do {
1596 flags |= e->Qsleeping; 1593 flags |= e->Qsleeping;
1597 1594
1598 cmdq_processed[0] += e->Cmdq0CreditReturn; 1595 cmdq_processed[0] += e->Cmdq0CreditReturn;
1599 cmdq_processed[1] += e->Cmdq1CreditReturn; 1596 cmdq_processed[1] += e->Cmdq1CreditReturn;
1600 1597
1601 e++; 1598 e++;
1602 if (unlikely(++q->cidx == q->size)) { 1599 if (unlikely(++q->cidx == q->size)) {
1603 q->cidx = 0; 1600 q->cidx = 0;
@@ -1613,7 +1610,7 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1613 sge->stats.pure_rsps++; 1610 sge->stats.pure_rsps++;
1614 } while (e->GenerationBit == q->genbit && !e->DataValid); 1611 } while (e->GenerationBit == q->genbit && !e->DataValid);
1615 1612
1616 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1613 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1617 sge->cmdQ[1].processed += cmdq_processed[1]; 1614 sge->cmdQ[1].processed += cmdq_processed[1];
1618 1615
1619 return e->GenerationBit == q->genbit; 1616 return e->GenerationBit == q->genbit;
@@ -1627,23 +1624,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1627int t1_poll(struct net_device *dev, int *budget) 1624int t1_poll(struct net_device *dev, int *budget)
1628{ 1625{
1629 struct adapter *adapter = dev->priv; 1626 struct adapter *adapter = dev->priv;
1630 int effective_budget = min(*budget, dev->quota); 1627 int work_done;
1631 int work_done = process_responses(adapter, effective_budget);
1632 1628
1629 work_done = process_responses(adapter, min(*budget, dev->quota));
1633 *budget -= work_done; 1630 *budget -= work_done;
1634 dev->quota -= work_done; 1631 dev->quota -= work_done;
1635 1632
1636 if (work_done >= effective_budget) 1633 if (unlikely(responses_pending(adapter)))
1637 return 1; 1634 return 1;
1638 1635
1639 spin_lock_irq(&adapter->async_lock); 1636 netif_rx_complete(dev);
1640 __netif_rx_complete(dev);
1641 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1637 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1642 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1643 adapter->regs + A_PL_ENABLE);
1644 spin_unlock_irq(&adapter->async_lock);
1645 1638
1646 return 0; 1639 return 0;
1640
1647} 1641}
1648 1642
1649/* 1643/*
@@ -1652,44 +1646,33 @@ int t1_poll(struct net_device *dev, int *budget)
1652irqreturn_t t1_interrupt(int irq, void *data) 1646irqreturn_t t1_interrupt(int irq, void *data)
1653{ 1647{
1654 struct adapter *adapter = data; 1648 struct adapter *adapter = data;
1655 struct net_device *dev = adapter->sge->netdev;
1656 struct sge *sge = adapter->sge; 1649 struct sge *sge = adapter->sge;
1657 u32 cause; 1650 int handled;
1658 int handled = 0;
1659 1651
1660 cause = readl(adapter->regs + A_PL_CAUSE); 1652 if (likely(responses_pending(adapter))) {
1661 if (cause == 0 || cause == ~0) 1653 struct net_device *dev = sge->netdev;
1662 return IRQ_NONE;
1663 1654
1664 spin_lock(&adapter->async_lock); 1655 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1665 if (cause & F_PL_INTR_SGE_DATA) {
1666 struct respQ *q = &adapter->sge->respQ;
1667 struct respQ_e *e = &q->entries[q->cidx];
1668
1669 handled = 1;
1670 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1671
1672 if (e->GenerationBit == q->genbit &&
1673 __netif_rx_schedule_prep(dev)) {
1674 if (e->DataValid || process_pure_responses(adapter, e)) {
1675 /* mask off data IRQ */
1676 writel(adapter->slow_intr_mask,
1677 adapter->regs + A_PL_ENABLE);
1678 __netif_rx_schedule(sge->netdev);
1679 goto unlock;
1680 }
1681 /* no data, no NAPI needed */
1682 netif_poll_enable(dev);
1683 1656
1657 if (__netif_rx_schedule_prep(dev)) {
1658 if (process_pure_responses(adapter))
1659 __netif_rx_schedule(dev);
1660 else {
1661 /* no data, no NAPI needed */
1662 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1663 netif_poll_enable(dev); /* undo schedule_prep */
1664 }
1684 } 1665 }
1685 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1666 return IRQ_HANDLED;
1686 } else 1667 }
1687 handled = t1_slow_intr_handler(adapter); 1668
1669 spin_lock(&adapter->async_lock);
1670 handled = t1_slow_intr_handler(adapter);
1671 spin_unlock(&adapter->async_lock);
1688 1672
1689 if (!handled) 1673 if (!handled)
1690 sge->stats.unhandled_irqs++; 1674 sge->stats.unhandled_irqs++;
1691unlock: 1675
1692 spin_unlock(&adapter->async_lock);
1693 return IRQ_RETVAL(handled != 0); 1676 return IRQ_RETVAL(handled != 0);
1694} 1677}
1695 1678
@@ -1712,17 +1695,13 @@ unlock:
1712irqreturn_t t1_interrupt(int irq, void *cookie) 1695irqreturn_t t1_interrupt(int irq, void *cookie)
1713{ 1696{
1714 int work_done; 1697 int work_done;
1715 struct respQ_e *e;
1716 struct adapter *adapter = cookie; 1698 struct adapter *adapter = cookie;
1717 struct respQ *Q = &adapter->sge->respQ;
1718 1699
1719 spin_lock(&adapter->async_lock); 1700 spin_lock(&adapter->async_lock);
1720 e = &Q->entries[Q->cidx];
1721 prefetch(e);
1722 1701
1723 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1702 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1724 1703
1725 if (likely(e->GenerationBit == Q->genbit)) 1704 if (likely(responses_pending(adapter)))
1726 work_done = process_responses(adapter, -1); 1705 work_done = process_responses(adapter, -1);
1727 else 1706 else
1728 work_done = t1_slow_intr_handler(adapter); 1707 work_done = t1_slow_intr_handler(adapter);
@@ -1796,7 +1775,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1796 * through the scheduler. 1775 * through the scheduler.
1797 */ 1776 */
1798 if (sge->tx_sched && !qid && skb->dev) { 1777 if (sge->tx_sched && !qid && skb->dev) {
1799 use_sched: 1778use_sched:
1800 use_sched_skb = 1; 1779 use_sched_skb = 1;
1801 /* Note that the scheduler might return a different skb than 1780 /* Note that the scheduler might return a different skb than
1802 * the one passed in. 1781 * the one passed in.
@@ -1900,7 +1879,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1900 cpl = (struct cpl_tx_pkt *)hdr; 1879 cpl = (struct cpl_tx_pkt *)hdr;
1901 } else { 1880 } else {
1902 /* 1881 /*
1903 * Packets shorter than ETH_HLEN can break the MAC, drop them 1882 * Packets shorter than ETH_HLEN can break the MAC, drop them
1904 * early. Also, we may get oversized packets because some 1883 * early. Also, we may get oversized packets because some
1905 * parts of the kernel don't handle our unusual hard_header_len 1884 * parts of the kernel don't handle our unusual hard_header_len
1906 * right, drop those too. 1885 * right, drop those too.
@@ -1984,9 +1963,9 @@ send:
1984 * then silently discard to avoid leak. 1963 * then silently discard to avoid leak.
1985 */ 1964 */
1986 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1965 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1987 dev_kfree_skb_any(skb); 1966 dev_kfree_skb_any(skb);
1988 ret = NETDEV_TX_OK; 1967 ret = NETDEV_TX_OK;
1989 } 1968 }
1990 return ret; 1969 return ret;
1991} 1970}
1992 1971
@@ -2099,31 +2078,35 @@ static void espibug_workaround_t204(unsigned long data)
2099 2078
2100 if (adapter->open_device_map & PORT_MASK) { 2079 if (adapter->open_device_map & PORT_MASK) {
2101 int i; 2080 int i;
2102 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) { 2081
2082 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2103 return; 2083 return;
2104 } 2084
2105 for (i = 0; i < nports; i++) { 2085 for (i = 0; i < nports; i++) {
2106 struct sk_buff *skb = sge->espibug_skb[i]; 2086 struct sk_buff *skb = sge->espibug_skb[i];
2107 if ( (netif_running(adapter->port[i].dev)) && 2087
2108 !(netif_queue_stopped(adapter->port[i].dev)) && 2088 if (!netif_running(adapter->port[i].dev) ||
2109 (seop[i] && ((seop[i] & 0xfff) == 0)) && 2089 netif_queue_stopped(adapter->port[i].dev) ||
2110 skb ) { 2090 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2111 if (!skb->cb[0]) { 2091 continue;
2112 u8 ch_mac_addr[ETH_ALEN] = 2092
2113 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2093 if (!skb->cb[0]) {
2114 memcpy(skb->data + sizeof(struct cpl_tx_pkt), 2094 u8 ch_mac_addr[ETH_ALEN] = {
2115 ch_mac_addr, ETH_ALEN); 2095 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2116 memcpy(skb->data + skb->len - 10, 2096 };
2117 ch_mac_addr, ETH_ALEN); 2097
2118 skb->cb[0] = 0xff; 2098 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2119 } 2099 ch_mac_addr, ETH_ALEN);
2120 2100 memcpy(skb->data + skb->len - 10,
2121 /* bump the reference count to avoid freeing of 2101 ch_mac_addr, ETH_ALEN);
2122 * the skb once the DMA has completed. 2102 skb->cb[0] = 0xff;
2123 */
2124 skb = skb_get(skb);
2125 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2126 } 2103 }
2104
2105 /* bump the reference count to avoid freeing of
2106 * the skb once the DMA has completed.
2107 */
2108 skb = skb_get(skb);
2109 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2127 } 2110 }
2128 } 2111 }
2129 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2112 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
@@ -2192,9 +2175,8 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2192 if (adapter->params.nports > 1) { 2175 if (adapter->params.nports > 1) {
2193 tx_sched_init(sge); 2176 tx_sched_init(sge);
2194 sge->espibug_timer.function = espibug_workaround_t204; 2177 sge->espibug_timer.function = espibug_workaround_t204;
2195 } else { 2178 } else
2196 sge->espibug_timer.function = espibug_workaround; 2179 sge->espibug_timer.function = espibug_workaround;
2197 }
2198 sge->espibug_timer.data = (unsigned long)sge->adapter; 2180 sge->espibug_timer.data = (unsigned long)sge->adapter;
2199 2181
2200 sge->espibug_timeout = 1; 2182 sge->espibug_timeout = 1;
@@ -2202,7 +2184,7 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2202 if (adapter->params.nports > 1) 2184 if (adapter->params.nports > 1)
2203 sge->espibug_timeout = HZ/100; 2185 sge->espibug_timeout = HZ/100;
2204 } 2186 }
2205 2187
2206 2188
2207 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2189 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2208 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2190 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 22ed9a383c08..c2522cdfab37 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -223,13 +223,13 @@ static int fpga_slow_intr(adapter_t *adapter)
223 t1_sge_intr_error_handler(adapter->sge); 223 t1_sge_intr_error_handler(adapter->sge);
224 224
225 if (cause & FPGA_PCIX_INTERRUPT_GMAC) 225 if (cause & FPGA_PCIX_INTERRUPT_GMAC)
226 fpga_phy_intr_handler(adapter); 226 fpga_phy_intr_handler(adapter);
227 227
228 if (cause & FPGA_PCIX_INTERRUPT_TP) { 228 if (cause & FPGA_PCIX_INTERRUPT_TP) {
229 /* 229 /*
230 * FPGA doesn't support MC4 interrupts and it requires 230 * FPGA doesn't support MC4 interrupts and it requires
231 * this odd layer of indirection for MC5. 231 * this odd layer of indirection for MC5.
232 */ 232 */
233 u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); 233 u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
234 234
235 /* Clear TP interrupt */ 235 /* Clear TP interrupt */
@@ -262,8 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
262 udelay(10); 262 udelay(10);
263 } while (busy && --attempts); 263 } while (busy && --attempts);
264 if (busy) 264 if (busy)
265 CH_ALERT("%s: MDIO operation timed out\n", 265 CH_ALERT("%s: MDIO operation timed out\n", adapter->name);
266 adapter->name);
267 return busy; 266 return busy;
268} 267}
269 268
@@ -605,22 +604,23 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
605 604
606 switch (board_info(adapter)->board) { 605 switch (board_info(adapter)->board) {
607#ifdef CONFIG_CHELSIO_T1_1G 606#ifdef CONFIG_CHELSIO_T1_1G
608 case CHBT_BOARD_CHT204: 607 case CHBT_BOARD_CHT204:
609 case CHBT_BOARD_CHT204E: 608 case CHBT_BOARD_CHT204E:
610 case CHBT_BOARD_CHN204: 609 case CHBT_BOARD_CHN204:
611 case CHBT_BOARD_CHT204V: { 610 case CHBT_BOARD_CHT204V: {
612 int i, port_bit; 611 int i, port_bit;
613 for_each_port(adapter, i) { 612 for_each_port(adapter, i) {
614 port_bit = i + 1; 613 port_bit = i + 1;
615 if (!(cause & (1 << port_bit))) continue; 614 if (!(cause & (1 << port_bit)))
615 continue;
616 616
617 phy = adapter->port[i].phy; 617 phy = adapter->port[i].phy;
618 phy_cause = phy->ops->interrupt_handler(phy); 618 phy_cause = phy->ops->interrupt_handler(phy);
619 if (phy_cause & cphy_cause_link_change) 619 if (phy_cause & cphy_cause_link_change)
620 t1_link_changed(adapter, i); 620 t1_link_changed(adapter, i);
621 } 621 }
622 break; 622 break;
623 } 623 }
624 case CHBT_BOARD_CHT101: 624 case CHBT_BOARD_CHT101:
625 if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ 625 if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
626 phy = adapter->port[0].phy; 626 phy = adapter->port[0].phy;
@@ -631,13 +631,13 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
631 break; 631 break;
632 case CHBT_BOARD_7500: { 632 case CHBT_BOARD_7500: {
633 int p; 633 int p;
634 /* 634 /*
635 * Elmer0's interrupt cause isn't useful here because there is 635 * Elmer0's interrupt cause isn't useful here because there is
636 * only one bit that can be set for all 4 ports. This means 636 * only one bit that can be set for all 4 ports. This means
637 * we are forced to check every PHY's interrupt status 637 * we are forced to check every PHY's interrupt status
638 * register to see who initiated the interrupt. 638 * register to see who initiated the interrupt.
639 */ 639 */
640 for_each_port(adapter, p) { 640 for_each_port(adapter, p) {
641 phy = adapter->port[p].phy; 641 phy = adapter->port[p].phy;
642 phy_cause = phy->ops->interrupt_handler(phy); 642 phy_cause = phy->ops->interrupt_handler(phy);
643 if (phy_cause & cphy_cause_link_change) 643 if (phy_cause & cphy_cause_link_change)
@@ -658,7 +658,7 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
658 break; 658 break;
659 case CHBT_BOARD_8000: 659 case CHBT_BOARD_8000:
660 case CHBT_BOARD_CHT110: 660 case CHBT_BOARD_CHT110:
661 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", 661 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
662 cause); 662 cause);
663 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ 663 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
664 struct cmac *mac = adapter->port[0].mac; 664 struct cmac *mac = adapter->port[0].mac;
@@ -670,9 +670,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
670 670
671 t1_tpi_read(adapter, 671 t1_tpi_read(adapter,
672 A_ELMER0_GPI_STAT, &mod_detect); 672 A_ELMER0_GPI_STAT, &mod_detect);
673 CH_MSG(adapter, INFO, LINK, "XPAK %s\n", 673 CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
674 mod_detect ? "removed" : "inserted"); 674 mod_detect ? "removed" : "inserted");
675 } 675 }
676 break; 676 break;
677#ifdef CONFIG_CHELSIO_T1_COUGAR 677#ifdef CONFIG_CHELSIO_T1_COUGAR
678 case CHBT_BOARD_COUGAR: 678 case CHBT_BOARD_COUGAR:
@@ -688,7 +688,8 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
688 688
689 for_each_port(adapter, i) { 689 for_each_port(adapter, i) {
690 port_bit = i ? i + 1 : 0; 690 port_bit = i ? i + 1 : 0;
691 if (!(cause & (1 << port_bit))) continue; 691 if (!(cause & (1 << port_bit)))
692 continue;
692 693
693 phy = adapter->port[i].phy; 694 phy = adapter->port[i].phy;
694 phy_cause = phy->ops->interrupt_handler(phy); 695 phy_cause = phy->ops->interrupt_handler(phy);
@@ -755,7 +756,7 @@ void t1_interrupts_disable(adapter_t* adapter)
755 756
756 /* Disable PCIX & external chip interrupts. */ 757 /* Disable PCIX & external chip interrupts. */
757 if (t1_is_asic(adapter)) 758 if (t1_is_asic(adapter))
758 writel(0, adapter->regs + A_PL_ENABLE); 759 writel(0, adapter->regs + A_PL_ENABLE);
759 760
760 /* PCI-X interrupts */ 761 /* PCI-X interrupts */
761 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); 762 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
@@ -830,11 +831,11 @@ int t1_slow_intr_handler(adapter_t *adapter)
830/* Power sequencing is a work-around for Intel's XPAKs. */ 831/* Power sequencing is a work-around for Intel's XPAKs. */
831static void power_sequence_xpak(adapter_t* adapter) 832static void power_sequence_xpak(adapter_t* adapter)
832{ 833{
833 u32 mod_detect; 834 u32 mod_detect;
834 u32 gpo; 835 u32 gpo;
835 836
836 /* Check for XPAK */ 837 /* Check for XPAK */
837 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); 838 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
838 if (!(ELMER0_GP_BIT5 & mod_detect)) { 839 if (!(ELMER0_GP_BIT5 & mod_detect)) {
839 /* XPAK is present */ 840 /* XPAK is present */
840 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); 841 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
@@ -877,31 +878,31 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
877 case CHBT_BOARD_N210: 878 case CHBT_BOARD_N210:
878 case CHBT_BOARD_CHT210: 879 case CHBT_BOARD_CHT210:
879 case CHBT_BOARD_COUGAR: 880 case CHBT_BOARD_COUGAR:
880 t1_tpi_par(adapter, 0xf); 881 t1_tpi_par(adapter, 0xf);
881 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); 882 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
882 break; 883 break;
883 case CHBT_BOARD_CHT110: 884 case CHBT_BOARD_CHT110:
884 t1_tpi_par(adapter, 0xf); 885 t1_tpi_par(adapter, 0xf);
885 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); 886 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
886 887
887 /* TBD XXX Might not need. This fixes a problem 888 /* TBD XXX Might not need. This fixes a problem
888 * described in the Intel SR XPAK errata. 889 * described in the Intel SR XPAK errata.
889 */ 890 */
890 power_sequence_xpak(adapter); 891 power_sequence_xpak(adapter);
891 break; 892 break;
892#ifdef CONFIG_CHELSIO_T1_1G 893#ifdef CONFIG_CHELSIO_T1_1G
893 case CHBT_BOARD_CHT204E: 894 case CHBT_BOARD_CHT204E:
894 /* add config space write here */ 895 /* add config space write here */
895 case CHBT_BOARD_CHT204: 896 case CHBT_BOARD_CHT204:
896 case CHBT_BOARD_CHT204V: 897 case CHBT_BOARD_CHT204V:
897 case CHBT_BOARD_CHN204: 898 case CHBT_BOARD_CHN204:
898 t1_tpi_par(adapter, 0xf); 899 t1_tpi_par(adapter, 0xf);
899 t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); 900 t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
900 break; 901 break;
901 case CHBT_BOARD_CHT101: 902 case CHBT_BOARD_CHT101:
902 case CHBT_BOARD_7500: 903 case CHBT_BOARD_7500:
903 t1_tpi_par(adapter, 0xf); 904 t1_tpi_par(adapter, 0xf);
904 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); 905 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
905 break; 906 break;
906#endif 907#endif
907 } 908 }
@@ -941,7 +942,7 @@ int t1_init_hw_modules(adapter_t *adapter)
941 goto out_err; 942 goto out_err;
942 943
943 err = 0; 944 err = 0;
944 out_err: 945out_err:
945 return err; 946 return err;
946} 947}
947 948
@@ -983,7 +984,7 @@ void t1_free_sw_modules(adapter_t *adapter)
983 if (adapter->espi) 984 if (adapter->espi)
984 t1_espi_destroy(adapter->espi); 985 t1_espi_destroy(adapter->espi);
985#ifdef CONFIG_CHELSIO_T1_COUGAR 986#ifdef CONFIG_CHELSIO_T1_COUGAR
986 if (adapter->cspi) 987 if (adapter->cspi)
987 t1_cspi_destroy(adapter->cspi); 988 t1_cspi_destroy(adapter->cspi);
988#endif 989#endif
989} 990}
@@ -1010,7 +1011,7 @@ static void __devinit init_link_config(struct link_config *lc,
1010 CH_ERR("%s: CSPI initialization failed\n", 1011 CH_ERR("%s: CSPI initialization failed\n",
1011 adapter->name); 1012 adapter->name);
1012 goto error; 1013 goto error;
1013 } 1014 }
1014#endif 1015#endif
1015 1016
1016/* 1017/*
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
index 0ca0b6e19e43..6222d585e447 100644
--- a/drivers/net/chelsio/tp.c
+++ b/drivers/net/chelsio/tp.c
@@ -17,39 +17,36 @@ struct petp {
17static void tp_init(adapter_t * ap, const struct tp_params *p, 17static void tp_init(adapter_t * ap, const struct tp_params *p,
18 unsigned int tp_clk) 18 unsigned int tp_clk)
19{ 19{
20 if (t1_is_asic(ap)) { 20 u32 val;
21 u32 val;
22
23 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
24 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
25 if (!p->pm_size)
26 val |= F_OFFLOAD_DISABLE;
27 else
28 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
29 F_TP_IN_ESPI_CHECK_TCP_CSUM;
30 writel(val, ap->regs + A_TP_IN_CONFIG);
31 writel(F_TP_OUT_CSPI_CPL |
32 F_TP_OUT_ESPI_ETHERNET |
33 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
34 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
35 ap->regs + A_TP_OUT_CONFIG);
36 writel(V_IP_TTL(64) |
37 F_PATH_MTU /* IP DF bit */ |
38 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
39 V_SYN_COOKIE_PARAMETER(29),
40 ap->regs + A_TP_GLOBAL_CONFIG);
41 /*
42 * Enable pause frame deadlock prevention.
43 */
44 if (is_T2(ap) && ap->params.nports > 1) {
45 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
46
47 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
48 V_DROP_TICKS_CNT(drop_ticks) |
49 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
50 ap->regs + A_TP_TX_DROP_CONFIG);
51 }
52 21
22 if (!t1_is_asic(ap))
23 return;
24
25 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
26 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
27 if (!p->pm_size)
28 val |= F_OFFLOAD_DISABLE;
29 else
30 val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM;
31 writel(val, ap->regs + A_TP_IN_CONFIG);
32 writel(F_TP_OUT_CSPI_CPL |
33 F_TP_OUT_ESPI_ETHERNET |
34 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
35 F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG);
36 writel(V_IP_TTL(64) |
37 F_PATH_MTU /* IP DF bit */ |
38 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
39 V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG);
40 /*
41 * Enable pause frame deadlock prevention.
42 */
43 if (is_T2(ap) && ap->params.nports > 1) {
44 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
45
46 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
47 V_DROP_TICKS_CNT(drop_ticks) |
48 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
49 ap->regs + A_TP_TX_DROP_CONFIG);
53 } 50 }
54} 51}
55 52
@@ -61,6 +58,7 @@ void t1_tp_destroy(struct petp *tp)
61struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) 58struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
62{ 59{
63 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); 60 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
61
64 if (!tp) 62 if (!tp)
65 return NULL; 63 return NULL;
66 64
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index 85dc3b1dc309..534ffa0f616e 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -226,22 +226,21 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len)
226 if (ib[i].addr == INITBLOCK_SLEEP) { 226 if (ib[i].addr == INITBLOCK_SLEEP) {
227 udelay( ib[i].data ); 227 udelay( ib[i].data );
228 CH_ERR("sleep %d us\n",ib[i].data); 228 CH_ERR("sleep %d us\n",ib[i].data);
229 } else { 229 } else
230 vsc_write( adapter, ib[i].addr, ib[i].data ); 230 vsc_write( adapter, ib[i].addr, ib[i].data );
231 }
232 } 231 }
233} 232}
234 233
235static int bist_rd(adapter_t *adapter, int moduleid, int address) 234static int bist_rd(adapter_t *adapter, int moduleid, int address)
236{ 235{
237 int data=0; 236 int data = 0;
238 u32 result=0; 237 u32 result = 0;
239 238
240 if( (address != 0x0) && 239 if ((address != 0x0) &&
241 (address != 0x1) && 240 (address != 0x1) &&
242 (address != 0x2) && 241 (address != 0x2) &&
243 (address != 0xd) && 242 (address != 0xd) &&
244 (address != 0xe)) 243 (address != 0xe))
245 CH_ERR("No bist address: 0x%x\n", address); 244 CH_ERR("No bist address: 0x%x\n", address);
246 245
247 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | 246 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
@@ -251,27 +250,27 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
251 udelay(10); 250 udelay(10);
252 251
253 vsc_read(adapter, REG_RAM_BIST_RESULT, &result); 252 vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
254 if((result & (1<<9)) != 0x0) 253 if ((result & (1 << 9)) != 0x0)
255 CH_ERR("Still in bist read: 0x%x\n", result); 254 CH_ERR("Still in bist read: 0x%x\n", result);
256 else if((result & (1<<8)) != 0x0) 255 else if ((result & (1 << 8)) != 0x0)
257 CH_ERR("bist read error: 0x%x\n", result); 256 CH_ERR("bist read error: 0x%x\n", result);
258 257
259 return(result & 0xff); 258 return (result & 0xff);
260} 259}
261 260
262static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) 261static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
263{ 262{
264 int data=0; 263 int data = 0;
265 u32 result=0; 264 u32 result = 0;
266 265
267 if( (address != 0x0) && 266 if ((address != 0x0) &&
268 (address != 0x1) && 267 (address != 0x1) &&
269 (address != 0x2) && 268 (address != 0x2) &&
270 (address != 0xd) && 269 (address != 0xd) &&
271 (address != 0xe)) 270 (address != 0xe))
272 CH_ERR("No bist address: 0x%x\n", address); 271 CH_ERR("No bist address: 0x%x\n", address);
273 272
274 if( value>255 ) 273 if (value > 255)
275 CH_ERR("Suspicious write out of range value: 0x%x\n", value); 274 CH_ERR("Suspicious write out of range value: 0x%x\n", value);
276 275
277 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | 276 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
@@ -281,12 +280,12 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
281 udelay(5); 280 udelay(5);
282 281
283 vsc_read(adapter, REG_RAM_BIST_CMD, &result); 282 vsc_read(adapter, REG_RAM_BIST_CMD, &result);
284 if((result & (1<<27)) != 0x0) 283 if ((result & (1 << 27)) != 0x0)
285 CH_ERR("Still in bist write: 0x%x\n", result); 284 CH_ERR("Still in bist write: 0x%x\n", result);
286 else if((result & (1<<26)) != 0x0) 285 else if ((result & (1 << 26)) != 0x0)
287 CH_ERR("bist write error: 0x%x\n", result); 286 CH_ERR("bist write error: 0x%x\n", result);
288 287
289 return(0); 288 return 0;
290} 289}
291 290
292static int run_bist(adapter_t *adapter, int moduleid) 291static int run_bist(adapter_t *adapter, int moduleid)
@@ -295,7 +294,7 @@ static int run_bist(adapter_t *adapter, int moduleid)
295 (void) bist_wr(adapter,moduleid, 0x00, 0x02); 294 (void) bist_wr(adapter,moduleid, 0x00, 0x02);
296 (void) bist_wr(adapter,moduleid, 0x01, 0x01); 295 (void) bist_wr(adapter,moduleid, 0x01, 0x01);
297 296
298 return(0); 297 return 0;
299} 298}
300 299
301static int check_bist(adapter_t *adapter, int moduleid) 300static int check_bist(adapter_t *adapter, int moduleid)
@@ -309,27 +308,26 @@ static int check_bist(adapter_t *adapter, int moduleid)
309 if ((result & 3) != 0x3) 308 if ((result & 3) != 0x3)
310 CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", 309 CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
311 result, moduleid, column); 310 result, moduleid, column);
312 return(0); 311 return 0;
313} 312}
314 313
315static int enable_mem(adapter_t *adapter, int moduleid) 314static int enable_mem(adapter_t *adapter, int moduleid)
316{ 315{
317 /*enable mem*/ 316 /*enable mem*/
318 (void) bist_wr(adapter,moduleid, 0x00, 0x00); 317 (void) bist_wr(adapter,moduleid, 0x00, 0x00);
319 return(0); 318 return 0;
320} 319}
321 320
322static int run_bist_all(adapter_t *adapter) 321static int run_bist_all(adapter_t *adapter)
323{ 322{
324 int port=0; 323 int port = 0;
325 u32 val=0; 324 u32 val = 0;
326 325
327 vsc_write(adapter, REG_MEM_BIST, 0x5); 326 vsc_write(adapter, REG_MEM_BIST, 0x5);
328 vsc_read(adapter, REG_MEM_BIST, &val); 327 vsc_read(adapter, REG_MEM_BIST, &val);
329 328
330 for(port=0; port<12; port++){ 329 for (port = 0; port < 12; port++)
331 vsc_write(adapter, REG_DEV_SETUP(port), 0x0); 330 vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
332 }
333 331
334 udelay(300); 332 udelay(300);
335 vsc_write(adapter, REG_SPI4_MISC, 0x00040409); 333 vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
@@ -352,13 +350,13 @@ static int run_bist_all(adapter_t *adapter)
352 udelay(300); 350 udelay(300);
353 vsc_write(adapter, REG_SPI4_MISC, 0x60040400); 351 vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
354 udelay(300); 352 udelay(300);
355 for(port=0; port<12; port++){ 353 for (port = 0; port < 12; port++)
356 vsc_write(adapter, REG_DEV_SETUP(port), 0x1); 354 vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
357 } 355
358 udelay(300); 356 udelay(300);
359 vsc_write(adapter, REG_MEM_BIST, 0x0); 357 vsc_write(adapter, REG_MEM_BIST, 0x0);
360 mdelay(10); 358 mdelay(10);
361 return(0); 359 return 0;
362} 360}
363 361
364static int mac_intr_handler(struct cmac *mac) 362static int mac_intr_handler(struct cmac *mac)
@@ -591,40 +589,46 @@ static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
591 589
592static void port_stats_update(struct cmac *mac) 590static void port_stats_update(struct cmac *mac)
593{ 591{
594 int port = mac->instance->index; 592 struct {
593 unsigned int reg;
594 unsigned int offset;
595 } hw_stats[] = {
596
597#define HW_STAT(reg, stat_name) \
598 { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
599
600 /* Rx stats */
601 HW_STAT(RxUnicast, RxUnicastFramesOK),
602 HW_STAT(RxMulticast, RxMulticastFramesOK),
603 HW_STAT(RxBroadcast, RxBroadcastFramesOK),
604 HW_STAT(Crc, RxFCSErrors),
605 HW_STAT(RxAlignment, RxAlignErrors),
606 HW_STAT(RxOversize, RxFrameTooLongErrors),
607 HW_STAT(RxPause, RxPauseFrames),
608 HW_STAT(RxJabbers, RxJabberErrors),
609 HW_STAT(RxFragments, RxRuntErrors),
610 HW_STAT(RxUndersize, RxRuntErrors),
611 HW_STAT(RxSymbolCarrier, RxSymbolErrors),
612 HW_STAT(RxSize1519ToMax, RxJumboFramesOK),
613
614 /* Tx stats (skip collision stats as we are full-duplex only) */
615 HW_STAT(TxUnicast, TxUnicastFramesOK),
616 HW_STAT(TxMulticast, TxMulticastFramesOK),
617 HW_STAT(TxBroadcast, TxBroadcastFramesOK),
618 HW_STAT(TxPause, TxPauseFrames),
619 HW_STAT(TxUnderrun, TxUnderrun),
620 HW_STAT(TxSize1519ToMax, TxJumboFramesOK),
621 }, *p = hw_stats;
622 unsigned int port = mac->instance->index;
623 u64 *stats = (u64 *)&mac->stats;
624 unsigned int i;
625
626 for (i = 0; i < ARRAY_SIZE(hw_stats); i++)
627 rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
595 628
596 /* Rx stats */ 629 rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
597 rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); 630 rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
598 rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); 631 rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
599 rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK);
600 rmon_update(mac, REG_RX_MULTICAST(port),
601 &mac->stats.RxMulticastFramesOK);
602 rmon_update(mac, REG_RX_BROADCAST(port),
603 &mac->stats.RxBroadcastFramesOK);
604 rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors);
605 rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors);
606 rmon_update(mac, REG_RX_OVERSIZE(port),
607 &mac->stats.RxFrameTooLongErrors);
608 rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames);
609 rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors);
610 rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors);
611 rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors);
612 rmon_update(mac, REG_RX_SYMBOL_CARRIER(port),
613 &mac->stats.RxSymbolErrors);
614 rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port),
615 &mac->stats.RxJumboFramesOK);
616
617 /* Tx stats (skip collision stats as we are full-duplex only) */
618 rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
619 rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK);
620 rmon_update(mac, REG_TX_MULTICAST(port),
621 &mac->stats.TxMulticastFramesOK);
622 rmon_update(mac, REG_TX_BROADCAST(port),
623 &mac->stats.TxBroadcastFramesOK);
624 rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames);
625 rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun);
626 rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port),
627 &mac->stats.TxJumboFramesOK);
628} 632}
629 633
630/* 634/*
@@ -686,7 +690,8 @@ static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
686 int i; 690 int i;
687 691
688 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); 692 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
689 if (!mac) return NULL; 693 if (!mac)
694 return NULL;
690 695
691 mac->ops = &vsc7326_ops; 696 mac->ops = &vsc7326_ops;
692 mac->instance = (cmac_instance *)(mac + 1); 697 mac->instance = (cmac_instance *)(mac + 1);
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h
index 491bcf75c4fb..479edbcabe68 100644
--- a/drivers/net/chelsio/vsc7326_reg.h
+++ b/drivers/net/chelsio/vsc7326_reg.h
@@ -192,73 +192,84 @@
192#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ 192#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */
193 193
194/* Statistics */ 194/* Statistics */
195/* CRA(0x4,pn,reg) */
196/* reg below */
195/* pn = port number, 0-a, a = 10GbE */ 197/* pn = port number, 0-a, a = 10GbE */
196#define REG_RX_IN_BYTES(pn) CRA(0x4,pn,0x00) /* # Rx in octets */
197#define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01) /* Frames w/ symbol errors */
198#define REG_RX_PAUSE(pn) CRA(0x4,pn,0x02) /* # pause frames received */
199#define REG_RX_UNSUP_OPCODE(pn) CRA(0x4,pn,0x03) /* # control frames with unsupported opcode */
200#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,0x04) /* # octets in good frames */
201#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,0x05) /* # octets in bad frames */
202#define REG_RX_UNICAST(pn) CRA(0x4,pn,0x06) /* # good unicast frames */
203#define REG_RX_MULTICAST(pn) CRA(0x4,pn,0x07) /* # good multicast frames */
204#define REG_RX_BROADCAST(pn) CRA(0x4,pn,0x08) /* # good broadcast frames */
205#define REG_CRC(pn) CRA(0x4,pn,0x09) /* # frames w/ bad CRC only */
206#define REG_RX_ALIGNMENT(pn) CRA(0x4,pn,0x0a) /* # frames w/ alignment err */
207#define REG_RX_UNDERSIZE(pn) CRA(0x4,pn,0x0b) /* # frames undersize */
208#define REG_RX_FRAGMENTS(pn) CRA(0x4,pn,0x0c) /* # frames undersize w/ crc err */
209#define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d) /* # frames with length error */
210#define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e) /* # frames with illegal length field */
211#define REG_RX_OVERSIZE(pn) CRA(0x4,pn,0x0f) /* # frames oversize */
212#define REG_RX_JABBERS(pn) CRA(0x4,pn,0x10) /* # frames oversize w/ crc err */
213#define REG_RX_SIZE_64(pn) CRA(0x4,pn,0x11) /* # frames 64 octets long */
214#define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12) /* # frames 65-127 octets */
215#define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13) /* # frames 128-255 */
216#define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14) /* # frames 256-511 */
217#define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15) /* # frames 512-1023 */
218#define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16) /* # frames 1024-1518 */
219#define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17) /* # frames 1519-max */
220 198
221#define REG_TX_OUT_BYTES(pn) CRA(0x4,pn,0x18) /* # octets tx */ 199enum {
222#define REG_TX_PAUSE(pn) CRA(0x4,pn,0x19) /* # pause frames sent */ 200 RxInBytes = 0x00, // # Rx in octets
223#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,0x1a) /* # octets tx OK */ 201 RxSymbolCarrier = 0x01, // Frames w/ symbol errors
224#define REG_TX_UNICAST(pn) CRA(0x4,pn,0x1b) /* # frames unicast */ 202 RxPause = 0x02, // # pause frames received
225#define REG_TX_MULTICAST(pn) CRA(0x4,pn,0x1c) /* # frames multicast */ 203 RxUnsupOpcode = 0x03, // # control frames with unsupported opcode
226#define REG_TX_BROADCAST(pn) CRA(0x4,pn,0x1d) /* # frames broadcast */ 204 RxOkBytes = 0x04, // # octets in good frames
227#define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e) /* # frames tx after multiple collisions */ 205 RxBadBytes = 0x05, // # octets in bad frames
228#define REG_TX_LATE_COLL(pn) CRA(0x4,pn,0x1f) /* # late collisions detected */ 206 RxUnicast = 0x06, // # good unicast frames
229#define REG_TX_XCOLL(pn) CRA(0x4,pn,0x20) /* # frames lost, excessive collisions */ 207 RxMulticast = 0x07, // # good multicast frames
230#define REG_TX_DEFER(pn) CRA(0x4,pn,0x21) /* # frames deferred on first tx attempt */ 208 RxBroadcast = 0x08, // # good broadcast frames
231#define REG_TX_XDEFER(pn) CRA(0x4,pn,0x22) /* # frames excessively deferred */ 209 Crc = 0x09, // # frames w/ bad CRC only
232#define REG_TX_CSENSE(pn) CRA(0x4,pn,0x23) /* carrier sense errors at frame end */ 210 RxAlignment = 0x0a, // # frames w/ alignment err
233#define REG_TX_SIZE_64(pn) CRA(0x4,pn,0x24) /* # frames 64 octets long */ 211 RxUndersize = 0x0b, // # frames undersize
234#define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25) /* # frames 65-127 octets */ 212 RxFragments = 0x0c, // # frames undersize w/ crc err
235#define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26) /* # frames 128-255 */ 213 RxInRangeLengthError = 0x0d, // # frames with length error
236#define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27) /* # frames 256-511 */ 214 RxOutOfRangeError = 0x0e, // # frames with illegal length field
237#define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28) /* # frames 512-1023 */ 215 RxOversize = 0x0f, // # frames oversize
238#define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29) /* # frames 1024-1518 */ 216 RxJabbers = 0x10, // # frames oversize w/ crc err
239#define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a) /* # frames 1519-max */ 217 RxSize64 = 0x11, // # frames 64 octets long
240#define REG_TX_SINGLE_COLL(pn) CRA(0x4,pn,0x2b) /* # frames tx after single collision */ 218 RxSize65To127 = 0x12, // # frames 65-127 octets
241#define REG_TX_BACKOFF2(pn) CRA(0x4,pn,0x2c) /* # frames tx ok after 2 backoffs/collisions */ 219 RxSize128To255 = 0x13, // # frames 128-255
242#define REG_TX_BACKOFF3(pn) CRA(0x4,pn,0x2d) /* after 3 backoffs/collisions */ 220 RxSize256To511 = 0x14, // # frames 256-511
243#define REG_TX_BACKOFF4(pn) CRA(0x4,pn,0x2e) /* after 4 */ 221 RxSize512To1023 = 0x15, // # frames 512-1023
244#define REG_TX_BACKOFF5(pn) CRA(0x4,pn,0x2f) /* after 5 */ 222 RxSize1024To1518 = 0x16, // # frames 1024-1518
245#define REG_TX_BACKOFF6(pn) CRA(0x4,pn,0x30) /* after 6 */ 223 RxSize1519ToMax = 0x17, // # frames 1519-max
246#define REG_TX_BACKOFF7(pn) CRA(0x4,pn,0x31) /* after 7 */
247#define REG_TX_BACKOFF8(pn) CRA(0x4,pn,0x32) /* after 8 */
248#define REG_TX_BACKOFF9(pn) CRA(0x4,pn,0x33) /* after 9 */
249#define REG_TX_BACKOFF10(pn) CRA(0x4,pn,0x34) /* after 10 */
250#define REG_TX_BACKOFF11(pn) CRA(0x4,pn,0x35) /* after 11 */
251#define REG_TX_BACKOFF12(pn) CRA(0x4,pn,0x36) /* after 12 */
252#define REG_TX_BACKOFF13(pn) CRA(0x4,pn,0x37) /* after 13 */
253#define REG_TX_BACKOFF14(pn) CRA(0x4,pn,0x38) /* after 14 */
254#define REG_TX_BACKOFF15(pn) CRA(0x4,pn,0x39) /* after 15 */
255#define REG_TX_UNDERRUN(pn) CRA(0x4,pn,0x3a) /* # frames dropped from underrun */
256#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
257#define REG_RX_IPG_SHRINK(pn) CRA(0x4,pn,0x3c) /* # of IPG shrinks detected */
258 224
259#define REG_STAT_STICKY1G(pn) CRA(0x4,pn,0x3e) /* tri-speed sticky bits */ 225 TxOutBytes = 0x18, // # octets tx
260#define REG_STAT_STICKY10G CRA(0x4,0xa,0x3e) /* 10GbE sticky bits */ 226 TxPause = 0x19, // # pause frames sent
261#define REG_STAT_INIT(pn) CRA(0x4,pn,0x3f) /* Clear all statistics */ 227 TxOkBytes = 0x1a, // # octets tx OK
228 TxUnicast = 0x1b, // # frames unicast
229 TxMulticast = 0x1c, // # frames multicast
230 TxBroadcast = 0x1d, // # frames broadcast
231 TxMultipleColl = 0x1e, // # frames tx after multiple collisions
232 TxLateColl = 0x1f, // # late collisions detected
233 TxXcoll = 0x20, // # frames lost, excessive collisions
234 TxDefer = 0x21, // # frames deferred on first tx attempt
235 TxXdefer = 0x22, // # frames excessively deferred
236 TxCsense = 0x23, // carrier sense errors at frame end
237 TxSize64 = 0x24, // # frames 64 octets long
238 TxSize65To127 = 0x25, // # frames 65-127 octets
239 TxSize128To255 = 0x26, // # frames 128-255
240 TxSize256To511 = 0x27, // # frames 256-511
241 TxSize512To1023 = 0x28, // # frames 512-1023
242 TxSize1024To1518 = 0x29, // # frames 1024-1518
243 TxSize1519ToMax = 0x2a, // # frames 1519-max
244 TxSingleColl = 0x2b, // # frames tx after single collision
245 TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions
246 TxBackoff3 = 0x2d, // after 3 backoffs/collisions
247 TxBackoff4 = 0x2e, // after 4
248 TxBackoff5 = 0x2f, // after 5
249 TxBackoff6 = 0x30, // after 6
250 TxBackoff7 = 0x31, // after 7
251 TxBackoff8 = 0x32, // after 8
252 TxBackoff9 = 0x33, // after 9
253 TxBackoff10 = 0x34, // after 10
254 TxBackoff11 = 0x35, // after 11
255 TxBackoff12 = 0x36, // after 12
256 TxBackoff13 = 0x37, // after 13
257 TxBackoff14 = 0x38, // after 14
258 TxBackoff15 = 0x39, // after 15
259 TxUnderrun = 0x3a, // # frames dropped from underrun
260 // Hole. See REG_RX_XGMII_PROT_ERR below.
261 RxIpgShrink = 0x3c, // # of IPG shrinks detected
262 // Duplicate. See REG_STAT_STICKY10G below.
263 StatSticky1G = 0x3e, // tri-speed sticky bits
264 StatInit = 0x3f // Clear all statistics
265};
266
267#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
268#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */
269
270#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes)
271#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes)
272#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes)
262 273
263/* MII-Management Block registers */ 274/* MII-Management Block registers */
264/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If 275/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If
diff --git a/drivers/net/chelsio/vsc8244.c b/drivers/net/chelsio/vsc8244.c
index c493e783d459..251d4859c91d 100644
--- a/drivers/net/chelsio/vsc8244.c
+++ b/drivers/net/chelsio/vsc8244.c
@@ -54,7 +54,7 @@ enum {
54}; 54};
55 55
56#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ 56#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
57 VSC_INTR_NEG_DONE) 57 VSC_INTR_NEG_DONE)
58#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ 58#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
59 VSC_INTR_ENABLE) 59 VSC_INTR_ENABLE)
60 60
@@ -94,19 +94,18 @@ static int vsc8244_intr_enable(struct cphy *cphy)
94{ 94{
95 simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK); 95 simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK);
96 96
97 /* Enable interrupts through Elmer */ 97 /* Enable interrupts through Elmer */
98 if (t1_is_asic(cphy->adapter)) { 98 if (t1_is_asic(cphy->adapter)) {
99 u32 elmer; 99 u32 elmer;
100 100
101 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 101 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
102 elmer |= ELMER0_GP_BIT1; 102 elmer |= ELMER0_GP_BIT1;
103 if (is_T2(cphy->adapter)) { 103 if (is_T2(cphy->adapter))
104 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 104 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
105 }
106 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 105 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
107 } 106 }
108 107
109 return 0; 108 return 0;
110} 109}
111 110
112static int vsc8244_intr_disable(struct cphy *cphy) 111static int vsc8244_intr_disable(struct cphy *cphy)
@@ -118,19 +117,18 @@ static int vsc8244_intr_disable(struct cphy *cphy)
118 117
119 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 118 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
120 elmer &= ~ELMER0_GP_BIT1; 119 elmer &= ~ELMER0_GP_BIT1;
121 if (is_T2(cphy->adapter)) { 120 if (is_T2(cphy->adapter))
122 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); 121 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
123 }
124 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 122 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
125 } 123 }
126 124
127 return 0; 125 return 0;
128} 126}
129 127
130static int vsc8244_intr_clear(struct cphy *cphy) 128static int vsc8244_intr_clear(struct cphy *cphy)
131{ 129{
132 u32 val; 130 u32 val;
133 u32 elmer; 131 u32 elmer;
134 132
135 /* Clear PHY interrupts by reading the register. */ 133 /* Clear PHY interrupts by reading the register. */
136 simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val); 134 simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val);
@@ -138,13 +136,12 @@ static int vsc8244_intr_clear(struct cphy *cphy)
138 if (t1_is_asic(cphy->adapter)) { 136 if (t1_is_asic(cphy->adapter)) {
139 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); 137 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
140 elmer |= ELMER0_GP_BIT1; 138 elmer |= ELMER0_GP_BIT1;
141 if (is_T2(cphy->adapter)) { 139 if (is_T2(cphy->adapter))
142 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 140 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
143 }
144 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); 141 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
145 } 142 }
146 143
147 return 0; 144 return 0;
148} 145}
149 146
150/* 147/*
@@ -179,13 +176,13 @@ static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex)
179 176
180int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits) 177int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits)
181{ 178{
182 int ret; 179 int ret;
183 unsigned int val; 180 unsigned int val;
184 181
185 ret = mdio_read(phy, mmd, reg, &val); 182 ret = mdio_read(phy, mmd, reg, &val);
186 if (!ret) 183 if (!ret)
187 ret = mdio_write(phy, mmd, reg, val | bits); 184 ret = mdio_write(phy, mmd, reg, val | bits);
188 return ret; 185 return ret;
189} 186}
190 187
191static int vsc8244_autoneg_enable(struct cphy *cphy) 188static int vsc8244_autoneg_enable(struct cphy *cphy)
@@ -235,7 +232,7 @@ static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map)
235} 232}
236 233
237static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok, 234static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok,
238 int *speed, int *duplex, int *fc) 235 int *speed, int *duplex, int *fc)
239{ 236{
240 unsigned int bmcr, status, lpa, adv; 237 unsigned int bmcr, status, lpa, adv;
241 int err, sp = -1, dplx = -1, pause = 0; 238 int err, sp = -1, dplx = -1, pause = 0;
@@ -343,11 +340,13 @@ static struct cphy_ops vsc8244_ops = {
343 .get_link_status = vsc8244_get_link_status 340 .get_link_status = vsc8244_get_link_status
344}; 341};
345 342
346static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops) 343static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr,
344 struct mdio_ops *mdio_ops)
347{ 345{
348 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); 346 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
349 347
350 if (!cphy) return NULL; 348 if (!cphy)
349 return NULL;
351 350
352 cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops); 351 cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops);
353 352
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
new file mode 100644
index 000000000000..343467985321
--- /dev/null
+++ b/drivers/net/cxgb3/Makefile
@@ -0,0 +1,8 @@
1#
2# Chelsio T3 driver
3#
4
5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
6
7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
8 xgmac.o sge.o l2t.o cxgb3_offload.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
new file mode 100644
index 000000000000..5c97a64451ce
--- /dev/null
+++ b/drivers/net/cxgb3/adapter.h
@@ -0,0 +1,279 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file should not be included directly. Include common.h instead. */
34
35#ifndef __T3_ADAPTER_H__
36#define __T3_ADAPTER_H__
37
38#include <linux/pci.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/timer.h>
42#include <linux/cache.h>
43#include <linux/mutex.h>
44#include "t3cdev.h"
45#include <asm/semaphore.h>
46#include <asm/bitops.h>
47#include <asm/io.h>
48
49typedef irqreturn_t(*intr_handler_t) (int, void *);
50
51struct vlan_group;
52
53struct port_info {
54 struct vlan_group *vlan_grp;
55 const struct port_type_info *port_type;
56 u8 port_id;
57 u8 rx_csum_offload;
58 u8 nqsets;
59 u8 first_qset;
60 struct cphy phy;
61 struct cmac mac;
62 struct link_config link_config;
63 struct net_device_stats netstats;
64 int activity;
65};
66
67enum { /* adapter flags */
68 FULL_INIT_DONE = (1 << 0),
69 USING_MSI = (1 << 1),
70 USING_MSIX = (1 << 2),
71 QUEUES_BOUND = (1 << 3),
72};
73
74struct rx_desc;
75struct rx_sw_desc;
76
77struct sge_fl { /* SGE per free-buffer list state */
78 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned int credits; /* # of available Rx buffers */
80 unsigned int size; /* capacity of free list */
81 unsigned int cidx; /* consumer index */
82 unsigned int pidx; /* producer index */
83 unsigned int gen; /* free list generation */
84 struct rx_desc *desc; /* address of HW Rx descriptor ring */
85 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
86 dma_addr_t phys_addr; /* physical address of HW ring start */
87 unsigned int cntxt_id; /* SGE context id for the free list */
88 unsigned long empty; /* # of times queue ran out of buffers */
89};
90
91/*
92 * Bundle size for grouping offload RX packets for delivery to the stack.
93 * Don't make this too big as we do prefetch on each packet in a bundle.
94 */
95# define RX_BUNDLE_SIZE 8
96
97struct rsp_desc;
98
99struct sge_rspq { /* state for an SGE response queue */
100 unsigned int credits; /* # of pending response credits */
101 unsigned int size; /* capacity of response queue */
102 unsigned int cidx; /* consumer index */
103 unsigned int gen; /* current generation bit */
104 unsigned int polling; /* is the queue serviced through NAPI? */
105 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
106 unsigned int next_holdoff; /* holdoff time for next interrupt */
107 struct rsp_desc *desc; /* address of HW response ring */
108 dma_addr_t phys_addr; /* physical address of the ring */
109 unsigned int cntxt_id; /* SGE context id for the response q */
110 spinlock_t lock; /* guards response processing */
111 struct sk_buff *rx_head; /* offload packet receive queue head */
112 struct sk_buff *rx_tail; /* offload packet receive queue tail */
113
114 unsigned long offload_pkts;
115 unsigned long offload_bundles;
116 unsigned long eth_pkts; /* # of ethernet packets */
117 unsigned long pure_rsps; /* # of pure (non-data) responses */
118 unsigned long imm_data; /* responses with immediate data */
119 unsigned long rx_drops; /* # of packets dropped due to no mem */
120 unsigned long async_notif; /* # of asynchronous notification events */
121 unsigned long empty; /* # of times queue ran out of credits */
122 unsigned long nomem; /* # of responses deferred due to no mem */
123 unsigned long unhandled_irqs; /* # of spurious intrs */
124};
125
126struct tx_desc;
127struct tx_sw_desc;
128
129struct sge_txq { /* state for an SGE Tx queue */
130 unsigned long flags; /* HW DMA fetch status */
131 unsigned int in_use; /* # of in-use Tx descriptors */
132 unsigned int size; /* # of descriptors */
133 unsigned int processed; /* total # of descs HW has processed */
134 unsigned int cleaned; /* total # of descs SW has reclaimed */
135 unsigned int stop_thres; /* SW TX queue suspend threshold */
136 unsigned int cidx; /* consumer index */
137 unsigned int pidx; /* producer index */
138 unsigned int gen; /* current value of generation bit */
139 unsigned int unacked; /* Tx descriptors used since last COMPL */
140 struct tx_desc *desc; /* address of HW Tx descriptor ring */
141 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
142 spinlock_t lock; /* guards enqueueing of new packets */
143 unsigned int token; /* WR token */
144 dma_addr_t phys_addr; /* physical address of the ring */
145 struct sk_buff_head sendq; /* List of backpressured offload packets */
146 struct tasklet_struct qresume_tsk; /* restarts the queue */
147 unsigned int cntxt_id; /* SGE context id for the Tx q */
148 unsigned long stops; /* # of times q has been stopped */
149 unsigned long restarts; /* # of queue restarts */
150};
151
152enum { /* per port SGE statistics */
153 SGE_PSTAT_TSO, /* # of TSO requests */
154 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
155 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
156 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
157 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
158
159 SGE_PSTAT_MAX /* must be last */
160};
161
162struct sge_qset { /* an SGE queue set */
163 struct sge_rspq rspq;
164 struct sge_fl fl[SGE_RXQ_PER_SET];
165 struct sge_txq txq[SGE_TXQ_PER_SET];
166 struct net_device *netdev; /* associated net device */
167 unsigned long txq_stopped; /* which Tx queues are stopped */
168 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
169 unsigned long port_stats[SGE_PSTAT_MAX];
170} ____cacheline_aligned;
171
172struct sge {
173 struct sge_qset qs[SGE_QSETS];
174 spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
175};
176
177struct adapter {
178 struct t3cdev tdev;
179 struct list_head adapter_list;
180 void __iomem *regs;
181 struct pci_dev *pdev;
182 unsigned long registered_device_map;
183 unsigned long open_device_map;
184 unsigned long flags;
185
186 const char *name;
187 int msg_enable;
188 unsigned int mmio_len;
189
190 struct adapter_params params;
191 unsigned int slow_intr_mask;
192 unsigned long irq_stats[IRQ_NUM_STATS];
193
194 struct {
195 unsigned short vec;
196 char desc[22];
197 } msix_info[SGE_QSETS + 1];
198
199 /* T3 modules */
200 struct sge sge;
201 struct mc7 pmrx;
202 struct mc7 pmtx;
203 struct mc7 cm;
204 struct mc5 mc5;
205
206 struct net_device *port[MAX_NPORTS];
207 unsigned int check_task_cnt;
208 struct delayed_work adap_check_task;
209 struct work_struct ext_intr_handler_task;
210
211 /*
212 * Dummy netdevices are needed when using multiple receive queues with
213 * NAPI as each netdevice can service only one queue.
214 */
215 struct net_device *dummy_netdev[SGE_QSETS - 1];
216
217 struct dentry *debugfs_root;
218
219 struct mutex mdio_lock;
220 spinlock_t stats_lock;
221 spinlock_t work_lock;
222};
223
224static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
225{
226 u32 val = readl(adapter->regs + reg_addr);
227
228 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
229 return val;
230}
231
232static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
233{
234 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
235 writel(val, adapter->regs + reg_addr);
236}
237
238static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
239{
240 return netdev_priv(adap->port[idx]);
241}
242
243/*
244 * We use the spare atalk_ptr to map a net device to its SGE queue set.
245 * This is a macro so it can be used as l-value.
246 */
247#define dev2qset(netdev) ((netdev)->atalk_ptr)
248
249#define OFFLOAD_DEVMAP_BIT 15
250
251#define tdev2adap(d) container_of(d, struct adapter, tdev)
252
253static inline int offload_running(struct adapter *adapter)
254{
255 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
256}
257
258int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
259
260void t3_os_ext_intr_handler(struct adapter *adapter);
261void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
262 int speed, int duplex, int fc);
263
264void t3_sge_start(struct adapter *adap);
265void t3_sge_stop(struct adapter *adap);
266void t3_free_sge_resources(struct adapter *adap);
267void t3_sge_err_intr_handler(struct adapter *adapter);
268intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
269int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
270int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
271void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
272int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
273 int irq_vec_idx, const struct qset_params *p,
274 int ntxq, struct net_device *netdev);
275int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
276 unsigned char *data);
277irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
278
279#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
new file mode 100644
index 000000000000..73a41e6a5bfc
--- /dev/null
+++ b/drivers/net/cxgb3/ael1002.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 AEL100X_TX_DISABLE = 9,
37 AEL100X_TX_CONFIG1 = 0xc002,
38 AEL1002_PWR_DOWN_HI = 0xc011,
39 AEL1002_PWR_DOWN_LO = 0xc012,
40 AEL1002_XFI_EQL = 0xc015,
41 AEL1002_LB_EN = 0xc017,
42
43 LASI_CTRL = 0x9002,
44 LASI_STAT = 0x9005
45};
46
47static void ael100x_txon(struct cphy *phy)
48{
49 int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
50
51 msleep(100);
52 t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
53 msleep(30);
54}
55
56static int ael1002_power_down(struct cphy *phy, int enable)
57{
58 int err;
59
60 err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
61 if (!err)
62 err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
63 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
64 return err;
65}
66
67static int ael1002_reset(struct cphy *phy, int wait)
68{
69 int err;
70
71 if ((err = ael1002_power_down(phy, 0)) ||
72 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
73 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
74 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
75 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
76 (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
77 0, 1 << 5)))
78 return err;
79 return 0;
80}
81
82static int ael1002_intr_noop(struct cphy *phy)
83{
84 return 0;
85}
86
87static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
88 int *speed, int *duplex, int *fc)
89{
90 if (link_ok) {
91 unsigned int status;
92 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
93
94 /*
95 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
96 * once more to get the current link state.
97 */
98 if (!err && !(status & BMSR_LSTATUS))
99 err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
100 &status);
101 if (err)
102 return err;
103 *link_ok = !!(status & BMSR_LSTATUS);
104 }
105 if (speed)
106 *speed = SPEED_10000;
107 if (duplex)
108 *duplex = DUPLEX_FULL;
109 return 0;
110}
111
112static struct cphy_ops ael1002_ops = {
113 .reset = ael1002_reset,
114 .intr_enable = ael1002_intr_noop,
115 .intr_disable = ael1002_intr_noop,
116 .intr_clear = ael1002_intr_noop,
117 .intr_handler = ael1002_intr_noop,
118 .get_link_status = ael100x_get_link_status,
119 .power_down = ael1002_power_down,
120};
121
122void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
123 int phy_addr, const struct mdio_ops *mdio_ops)
124{
125 cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
126 ael100x_txon(phy);
127}
128
129static int ael1006_reset(struct cphy *phy, int wait)
130{
131 return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
132}
133
134static int ael1006_intr_enable(struct cphy *phy)
135{
136 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
137}
138
139static int ael1006_intr_disable(struct cphy *phy)
140{
141 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
142}
143
144static int ael1006_intr_clear(struct cphy *phy)
145{
146 u32 val;
147
148 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
149}
150
151static int ael1006_intr_handler(struct cphy *phy)
152{
153 unsigned int status;
154 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
155
156 if (err)
157 return err;
158 return (status & 1) ? cphy_cause_link_change : 0;
159}
160
161static int ael1006_power_down(struct cphy *phy, int enable)
162{
163 return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
164 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
165}
166
167static struct cphy_ops ael1006_ops = {
168 .reset = ael1006_reset,
169 .intr_enable = ael1006_intr_enable,
170 .intr_disable = ael1006_intr_disable,
171 .intr_clear = ael1006_intr_clear,
172 .intr_handler = ael1006_intr_handler,
173 .get_link_status = ael100x_get_link_status,
174 .power_down = ael1006_power_down,
175};
176
177void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
178 int phy_addr, const struct mdio_ops *mdio_ops)
179{
180 cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
181 ael100x_txon(phy);
182}
183
184static struct cphy_ops qt2045_ops = {
185 .reset = ael1006_reset,
186 .intr_enable = ael1006_intr_enable,
187 .intr_disable = ael1006_intr_disable,
188 .intr_clear = ael1006_intr_clear,
189 .intr_handler = ael1006_intr_handler,
190 .get_link_status = ael100x_get_link_status,
191 .power_down = ael1006_power_down,
192};
193
194void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
195 int phy_addr, const struct mdio_ops *mdio_ops)
196{
197 unsigned int stat;
198
199 cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
200
201 /*
202 * Some cards where the PHY is supposed to be at address 0 actually
203 * have it at 1.
204 */
205 if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
206 stat == 0xffff)
207 phy->addr = 1;
208}
209
210static int xaui_direct_reset(struct cphy *phy, int wait)
211{
212 return 0;
213}
214
215static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
216 int *speed, int *duplex, int *fc)
217{
218 if (link_ok) {
219 unsigned int status;
220
221 status = t3_read_reg(phy->adapter,
222 XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
223 *link_ok = !(status & F_LOWSIG0);
224 }
225 if (speed)
226 *speed = SPEED_10000;
227 if (duplex)
228 *duplex = DUPLEX_FULL;
229 return 0;
230}
231
232static int xaui_direct_power_down(struct cphy *phy, int enable)
233{
234 return 0;
235}
236
237static struct cphy_ops xaui_direct_ops = {
238 .reset = xaui_direct_reset,
239 .intr_enable = ael1002_intr_noop,
240 .intr_disable = ael1002_intr_noop,
241 .intr_clear = ael1002_intr_noop,
242 .intr_handler = ael1002_intr_noop,
243 .get_link_status = xaui_direct_get_link_status,
244 .power_down = xaui_direct_power_down,
245};
246
247void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
248 int phy_addr, const struct mdio_ops *mdio_ops)
249{
250 cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
251}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
new file mode 100644
index 000000000000..e23deeb7d06d
--- /dev/null
+++ b/drivers/net/cxgb3/common.h
@@ -0,0 +1,729 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHELSIO_COMMON_H
33#define __CHELSIO_COMMON_H
34
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/ctype.h>
38#include <linux/delay.h>
39#include <linux/init.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include "version.h"
44
45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
47#define CH_ALERT(adap, fmt, ...) \
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49
50/*
51 * More powerful macro that selectively prints messages based on msg_enable.
52 * For info and debugging messages.
53 */
54#define CH_MSG(adapter, level, category, fmt, ...) do { \
55 if ((adapter)->msg_enable & NETIF_MSG_##category) \
56 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
57 ## __VA_ARGS__); \
58} while (0)
59
60#ifdef DEBUG
61# define CH_DBG(adapter, category, fmt, ...) \
62 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
63#else
64# define CH_DBG(adapter, category, fmt, ...)
65#endif
66
67/* Additional NETIF_MSG_* categories */
68#define NETIF_MSG_MMIO 0x8000000
69
70struct t3_rx_mode {
71 struct net_device *dev;
72 struct dev_mc_list *mclist;
73 unsigned int idx;
74};
75
76static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
77 struct dev_mc_list *mclist)
78{
79 p->dev = dev;
80 p->mclist = mclist;
81 p->idx = 0;
82}
83
84static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
85{
86 u8 *addr = NULL;
87
88 if (rm->mclist && rm->idx < rm->dev->mc_count) {
89 addr = rm->mclist->dmi_addr;
90 rm->mclist = rm->mclist->next;
91 rm->idx++;
92 }
93 return addr;
94}
95
96enum {
97 MAX_NPORTS = 2, /* max # of ports */
98 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
99 EEPROMSIZE = 8192, /* Serial EEPROM size */
100 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
101 TCB_SIZE = 128, /* TCB size */
102 NMTUS = 16, /* size of MTU table */
103 NCCTRL_WIN = 32, /* # of congestion control windows */
104};
105
106#define MAX_RX_COALESCING_LEN 16224U
107
108enum {
109 PAUSE_RX = 1 << 0,
110 PAUSE_TX = 1 << 1,
111 PAUSE_AUTONEG = 1 << 2
112};
113
114enum {
115 SUPPORTED_OFFLOAD = 1 << 24,
116 SUPPORTED_IRQ = 1 << 25
117};
118
119enum { /* adapter interrupt-maintained statistics */
120 STAT_ULP_CH0_PBL_OOB,
121 STAT_ULP_CH1_PBL_OOB,
122 STAT_PCI_CORR_ECC,
123
124 IRQ_NUM_STATS /* keep last */
125};
126
127enum {
128 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
129 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
130 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
131};
132
133enum sge_context_type { /* SGE egress context types */
134 SGE_CNTXT_RDMA = 0,
135 SGE_CNTXT_ETH = 2,
136 SGE_CNTXT_OFLD = 4,
137 SGE_CNTXT_CTRL = 5
138};
139
140enum {
141 AN_PKT_SIZE = 32, /* async notification packet size */
142 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
143};
144
145struct sg_ent { /* SGE scatter/gather entry */
146 u32 len[2];
147 u64 addr[2];
148};
149
150#ifndef SGE_NUM_GENBITS
151/* Must be 1 or 2 */
152# define SGE_NUM_GENBITS 2
153#endif
154
155#define TX_DESC_FLITS 16U
156#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
157
158struct cphy;
159struct adapter;
160
161struct mdio_ops {
162 int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
163 int reg_addr, unsigned int *val);
164 int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
165 int reg_addr, unsigned int val);
166};
167
168struct adapter_info {
169 unsigned char nports; /* # of ports */
170 unsigned char phy_base_addr; /* MDIO PHY base address */
171 unsigned char mdien;
172 unsigned char mdiinv;
173 unsigned int gpio_out; /* GPIO output settings */
174 unsigned int gpio_intr; /* GPIO IRQ enable mask */
175 unsigned long caps; /* adapter capabilities */
176 const struct mdio_ops *mdio_ops; /* MDIO operations */
177 const char *desc; /* product description */
178};
179
180struct port_type_info {
181 void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
182 int phy_addr, const struct mdio_ops *ops);
183 unsigned int caps;
184 const char *desc;
185};
186
187struct mc5_stats {
188 unsigned long parity_err;
189 unsigned long active_rgn_full;
190 unsigned long nfa_srch_err;
191 unsigned long unknown_cmd;
192 unsigned long reqq_parity_err;
193 unsigned long dispq_parity_err;
194 unsigned long del_act_empty;
195};
196
197struct mc7_stats {
198 unsigned long corr_err;
199 unsigned long uncorr_err;
200 unsigned long parity_err;
201 unsigned long addr_err;
202};
203
204struct mac_stats {
205 u64 tx_octets; /* total # of octets in good frames */
206 u64 tx_octets_bad; /* total # of octets in error frames */
207 u64 tx_frames; /* all good frames */
208 u64 tx_mcast_frames; /* good multicast frames */
209 u64 tx_bcast_frames; /* good broadcast frames */
210 u64 tx_pause; /* # of transmitted pause frames */
211 u64 tx_deferred; /* frames with deferred transmissions */
212 u64 tx_late_collisions; /* # of late collisions */
213 u64 tx_total_collisions; /* # of total collisions */
214 u64 tx_excess_collisions; /* frame errors from excessive collissions */
215 u64 tx_underrun; /* # of Tx FIFO underruns */
216 u64 tx_len_errs; /* # of Tx length errors */
217 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
218 u64 tx_excess_deferral; /* # of frames with excessive deferral */
219 u64 tx_fcs_errs; /* # of frames with bad FCS */
220
221 u64 tx_frames_64; /* # of Tx frames in a particular range */
222 u64 tx_frames_65_127;
223 u64 tx_frames_128_255;
224 u64 tx_frames_256_511;
225 u64 tx_frames_512_1023;
226 u64 tx_frames_1024_1518;
227 u64 tx_frames_1519_max;
228
229 u64 rx_octets; /* total # of octets in good frames */
230 u64 rx_octets_bad; /* total # of octets in error frames */
231 u64 rx_frames; /* all good frames */
232 u64 rx_mcast_frames; /* good multicast frames */
233 u64 rx_bcast_frames; /* good broadcast frames */
234 u64 rx_pause; /* # of received pause frames */
235 u64 rx_fcs_errs; /* # of received frames with bad FCS */
236 u64 rx_align_errs; /* alignment errors */
237 u64 rx_symbol_errs; /* symbol errors */
238 u64 rx_data_errs; /* data errors */
239 u64 rx_sequence_errs; /* sequence errors */
240 u64 rx_runt; /* # of runt frames */
241 u64 rx_jabber; /* # of jabber frames */
242 u64 rx_short; /* # of short frames */
243 u64 rx_too_long; /* # of oversized frames */
244 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
245
246 u64 rx_frames_64; /* # of Rx frames in a particular range */
247 u64 rx_frames_65_127;
248 u64 rx_frames_128_255;
249 u64 rx_frames_256_511;
250 u64 rx_frames_512_1023;
251 u64 rx_frames_1024_1518;
252 u64 rx_frames_1519_max;
253
254 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
255
256 unsigned long tx_fifo_parity_err;
257 unsigned long rx_fifo_parity_err;
258 unsigned long tx_fifo_urun;
259 unsigned long rx_fifo_ovfl;
260 unsigned long serdes_signal_loss;
261 unsigned long xaui_pcs_ctc_err;
262 unsigned long xaui_pcs_align_change;
263};
264
265struct tp_mib_stats {
266 u32 ipInReceive_hi;
267 u32 ipInReceive_lo;
268 u32 ipInHdrErrors_hi;
269 u32 ipInHdrErrors_lo;
270 u32 ipInAddrErrors_hi;
271 u32 ipInAddrErrors_lo;
272 u32 ipInUnknownProtos_hi;
273 u32 ipInUnknownProtos_lo;
274 u32 ipInDiscards_hi;
275 u32 ipInDiscards_lo;
276 u32 ipInDelivers_hi;
277 u32 ipInDelivers_lo;
278 u32 ipOutRequests_hi;
279 u32 ipOutRequests_lo;
280 u32 ipOutDiscards_hi;
281 u32 ipOutDiscards_lo;
282 u32 ipOutNoRoutes_hi;
283 u32 ipOutNoRoutes_lo;
284 u32 ipReasmTimeout;
285 u32 ipReasmReqds;
286 u32 ipReasmOKs;
287 u32 ipReasmFails;
288
289 u32 reserved[8];
290
291 u32 tcpActiveOpens;
292 u32 tcpPassiveOpens;
293 u32 tcpAttemptFails;
294 u32 tcpEstabResets;
295 u32 tcpOutRsts;
296 u32 tcpCurrEstab;
297 u32 tcpInSegs_hi;
298 u32 tcpInSegs_lo;
299 u32 tcpOutSegs_hi;
300 u32 tcpOutSegs_lo;
301 u32 tcpRetransSeg_hi;
302 u32 tcpRetransSeg_lo;
303 u32 tcpInErrs_hi;
304 u32 tcpInErrs_lo;
305 u32 tcpRtoMin;
306 u32 tcpRtoMax;
307};
308
309struct tp_params {
310 unsigned int nchan; /* # of channels */
311 unsigned int pmrx_size; /* total PMRX capacity */
312 unsigned int pmtx_size; /* total PMTX capacity */
313 unsigned int cm_size; /* total CM capacity */
314 unsigned int chan_rx_size; /* per channel Rx size */
315 unsigned int chan_tx_size; /* per channel Tx size */
316 unsigned int rx_pg_size; /* Rx page size */
317 unsigned int tx_pg_size; /* Tx page size */
318 unsigned int rx_num_pgs; /* # of Rx pages */
319 unsigned int tx_num_pgs; /* # of Tx pages */
320 unsigned int ntimer_qs; /* # of timer queues */
321};
322
323struct qset_params { /* SGE queue set parameters */
324 unsigned int polling; /* polling/interrupt service for rspq */
325 unsigned int coalesce_usecs; /* irq coalescing timer */
326 unsigned int rspq_size; /* # of entries in response queue */
327 unsigned int fl_size; /* # of entries in regular free list */
328 unsigned int jumbo_size; /* # of entries in jumbo free list */
329 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
330 unsigned int cong_thres; /* FL congestion threshold */
331};
332
333struct sge_params {
334 unsigned int max_pkt_size; /* max offload pkt size */
335 struct qset_params qset[SGE_QSETS];
336};
337
338struct mc5_params {
339 unsigned int mode; /* selects MC5 width */
340 unsigned int nservers; /* size of server region */
341 unsigned int nfilters; /* size of filter region */
342 unsigned int nroutes; /* size of routing region */
343};
344
345/* Default MC5 region sizes */
346enum {
347 DEFAULT_NSERVERS = 512,
348 DEFAULT_NFILTERS = 128
349};
350
351/* MC5 modes, these must be non-0 */
352enum {
353 MC5_MODE_144_BIT = 1,
354 MC5_MODE_72_BIT = 2
355};
356
357struct vpd_params {
358 unsigned int cclk;
359 unsigned int mclk;
360 unsigned int uclk;
361 unsigned int mdc;
362 unsigned int mem_timing;
363 u8 eth_base[6];
364 u8 port_type[MAX_NPORTS];
365 unsigned short xauicfg[2];
366};
367
368struct pci_params {
369 unsigned int vpd_cap_addr;
370 unsigned int pcie_cap_addr;
371 unsigned short speed;
372 unsigned char width;
373 unsigned char variant;
374};
375
376enum {
377 PCI_VARIANT_PCI,
378 PCI_VARIANT_PCIX_MODE1_PARITY,
379 PCI_VARIANT_PCIX_MODE1_ECC,
380 PCI_VARIANT_PCIX_266_MODE2,
381 PCI_VARIANT_PCIE
382};
383
384struct adapter_params {
385 struct sge_params sge;
386 struct mc5_params mc5;
387 struct tp_params tp;
388 struct vpd_params vpd;
389 struct pci_params pci;
390
391 const struct adapter_info *info;
392
393 unsigned short mtus[NMTUS];
394 unsigned short a_wnd[NCCTRL_WIN];
395 unsigned short b_wnd[NCCTRL_WIN];
396
397 unsigned int nports; /* # of ethernet ports */
398 unsigned int stats_update_period; /* MAC stats accumulation period */
399 unsigned int linkpoll_period; /* link poll period in 0.1s */
400 unsigned int rev; /* chip revision */
401};
402
403struct trace_params {
404 u32 sip;
405 u32 sip_mask;
406 u32 dip;
407 u32 dip_mask;
408 u16 sport;
409 u16 sport_mask;
410 u16 dport;
411 u16 dport_mask;
412 u32 vlan:12;
413 u32 vlan_mask:12;
414 u32 intf:4;
415 u32 intf_mask:4;
416 u8 proto;
417 u8 proto_mask;
418};
419
420struct link_config {
421 unsigned int supported; /* link capabilities */
422 unsigned int advertising; /* advertised capabilities */
423 unsigned short requested_speed; /* speed user has requested */
424 unsigned short speed; /* actual link speed */
425 unsigned char requested_duplex; /* duplex user has requested */
426 unsigned char duplex; /* actual link duplex */
427 unsigned char requested_fc; /* flow control user has requested */
428 unsigned char fc; /* actual link flow control */
429 unsigned char autoneg; /* autonegotiating? */
430 unsigned int link_ok; /* link up? */
431};
432
433#define SPEED_INVALID 0xffff
434#define DUPLEX_INVALID 0xff
435
436struct mc5 {
437 struct adapter *adapter;
438 unsigned int tcam_size;
439 unsigned char part_type;
440 unsigned char parity_enabled;
441 unsigned char mode;
442 struct mc5_stats stats;
443};
444
445static inline unsigned int t3_mc5_size(const struct mc5 *p)
446{
447 return p->tcam_size;
448}
449
450struct mc7 {
451 struct adapter *adapter; /* backpointer to adapter */
452 unsigned int size; /* memory size in bytes */
453 unsigned int width; /* MC7 interface width */
454 unsigned int offset; /* register address offset for MC7 instance */
455 const char *name; /* name of MC7 instance */
456 struct mc7_stats stats; /* MC7 statistics */
457};
458
459static inline unsigned int t3_mc7_size(const struct mc7 *p)
460{
461 return p->size;
462}
463
464struct cmac {
465 struct adapter *adapter;
466 unsigned int offset;
467 unsigned int nucast; /* # of address filters for unicast MACs */
468 struct mac_stats stats;
469};
470
471enum {
472 MAC_DIRECTION_RX = 1,
473 MAC_DIRECTION_TX = 2,
474 MAC_RXFIFO_SIZE = 32768
475};
476
477/* IEEE 802.3ae specified MDIO devices */
478enum {
479 MDIO_DEV_PMA_PMD = 1,
480 MDIO_DEV_WIS = 2,
481 MDIO_DEV_PCS = 3,
482 MDIO_DEV_XGXS = 4
483};
484
485/* PHY loopback direction */
486enum {
487 PHY_LOOPBACK_TX = 1,
488 PHY_LOOPBACK_RX = 2
489};
490
491/* PHY interrupt types */
492enum {
493 cphy_cause_link_change = 1,
494 cphy_cause_fifo_error = 2
495};
496
497/* PHY operations */
498struct cphy_ops {
499 void (*destroy)(struct cphy *phy);
500 int (*reset)(struct cphy *phy, int wait);
501
502 int (*intr_enable)(struct cphy *phy);
503 int (*intr_disable)(struct cphy *phy);
504 int (*intr_clear)(struct cphy *phy);
505 int (*intr_handler)(struct cphy *phy);
506
507 int (*autoneg_enable)(struct cphy *phy);
508 int (*autoneg_restart)(struct cphy *phy);
509
510 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
511 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
512 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
513 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
514 int *duplex, int *fc);
515 int (*power_down)(struct cphy *phy, int enable);
516};
517
518/* A PHY instance */
519struct cphy {
520 int addr; /* PHY address */
521 struct adapter *adapter; /* associated adapter */
522 unsigned long fifo_errors; /* FIFO over/under-flows */
523 const struct cphy_ops *ops; /* PHY operations */
524 int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
525 int reg_addr, unsigned int *val);
526 int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
527 int reg_addr, unsigned int val);
528};
529
530/* Convenience MDIO read/write wrappers */
531static inline int mdio_read(struct cphy *phy, int mmd, int reg,
532 unsigned int *valp)
533{
534 return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
535}
536
537static inline int mdio_write(struct cphy *phy, int mmd, int reg,
538 unsigned int val)
539{
540 return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
541}
542
543/* Convenience initializer */
544static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
545 int phy_addr, struct cphy_ops *phy_ops,
546 const struct mdio_ops *mdio_ops)
547{
548 phy->adapter = adapter;
549 phy->addr = phy_addr;
550 phy->ops = phy_ops;
551 if (mdio_ops) {
552 phy->mdio_read = mdio_ops->read;
553 phy->mdio_write = mdio_ops->write;
554 }
555}
556
557/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
558#define MAC_STATS_ACCUM_SECS 180
559
560#define XGM_REG(reg_addr, idx) \
561 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
562
563struct addr_val_pair {
564 unsigned int reg_addr;
565 unsigned int val;
566};
567
568#include "adapter.h"
569
570#ifndef PCI_VENDOR_ID_CHELSIO
571# define PCI_VENDOR_ID_CHELSIO 0x1425
572#endif
573
574#define for_each_port(adapter, iter) \
575 for (iter = 0; iter < (adapter)->params.nports; ++iter)
576
577#define adapter_info(adap) ((adap)->params.info)
578
579static inline int uses_xaui(const struct adapter *adap)
580{
581 return adapter_info(adap)->caps & SUPPORTED_AUI;
582}
583
584static inline int is_10G(const struct adapter *adap)
585{
586 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
587}
588
589static inline int is_offload(const struct adapter *adap)
590{
591 return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
592}
593
594static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
595{
596 return adap->params.vpd.cclk / 1000;
597}
598
599static inline unsigned int is_pcie(const struct adapter *adap)
600{
601 return adap->params.pci.variant == PCI_VARIANT_PCIE;
602}
603
604void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
605 u32 val);
606void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
607 int n, unsigned int offset);
608int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
609 int polarity, int attempts, int delay, u32 *valp);
610static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
611 int polarity, int attempts, int delay)
612{
613 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
614 delay, NULL);
615}
616int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
617 unsigned int set);
618int t3_phy_reset(struct cphy *phy, int mmd, int wait);
619int t3_phy_advertise(struct cphy *phy, unsigned int advert);
620int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
621
622void t3_intr_enable(struct adapter *adapter);
623void t3_intr_disable(struct adapter *adapter);
624void t3_intr_clear(struct adapter *adapter);
625void t3_port_intr_enable(struct adapter *adapter, int idx);
626void t3_port_intr_disable(struct adapter *adapter, int idx);
627void t3_port_intr_clear(struct adapter *adapter, int idx);
628int t3_slow_intr_handler(struct adapter *adapter);
629int t3_phy_intr_handler(struct adapter *adapter);
630
631void t3_link_changed(struct adapter *adapter, int port_id);
632int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
633const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
634int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
635int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
636int t3_seeprom_wp(struct adapter *adapter, int enable);
637int t3_read_flash(struct adapter *adapter, unsigned int addr,
638 unsigned int nwords, u32 *data, int byte_oriented);
639int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
640int t3_get_fw_version(struct adapter *adapter, u32 *vers);
641int t3_check_fw_version(struct adapter *adapter);
642int t3_init_hw(struct adapter *adapter, u32 fw_params);
643void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
644void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
645int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
646 int reset);
647void t3_led_ready(struct adapter *adapter);
648void t3_fatal_err(struct adapter *adapter);
649void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
650void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
651 const u8 * cpus, const u16 *rspq);
652int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
653int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
654int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
655 unsigned int n, unsigned int *valp);
656int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
657 u64 *buf);
658
659int t3_mac_reset(struct cmac *mac);
660void t3b_pcs_reset(struct cmac *mac);
661int t3_mac_enable(struct cmac *mac, int which);
662int t3_mac_disable(struct cmac *mac, int which);
663int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
664int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
665int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
666int t3_mac_set_num_ucast(struct cmac *mac, int n);
667const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
668int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
669
670void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
671int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
672 unsigned int nroutes);
673void t3_mc5_intr_handler(struct mc5 *mc5);
674int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
675 u32 *buf);
676
677int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
678void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
679void t3_tp_set_offload_mode(struct adapter *adap, int enable);
680void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
681void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
682 unsigned short alpha[NCCTRL_WIN],
683 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
684void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
685void t3_get_cong_cntl_tab(struct adapter *adap,
686 unsigned short incr[NMTUS][NCCTRL_WIN]);
687void t3_config_trace_filter(struct adapter *adapter,
688 const struct trace_params *tp, int filter_index,
689 int invert, int enable);
690int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
691
692void t3_sge_prep(struct adapter *adap, struct sge_params *p);
693void t3_sge_init(struct adapter *adap, struct sge_params *p);
694int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
695 enum sge_context_type type, int respq, u64 base_addr,
696 unsigned int size, unsigned int token, int gen,
697 unsigned int cidx);
698int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
699 int gts_enable, u64 base_addr, unsigned int size,
700 unsigned int esize, unsigned int cong_thres, int gen,
701 unsigned int cidx);
702int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
703 int irq_vec_idx, u64 base_addr, unsigned int size,
704 unsigned int fl_thres, int gen, unsigned int cidx);
705int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
706 unsigned int size, int rspq, int ovfl_mode,
707 unsigned int credits, unsigned int credit_thres);
708int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
709int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
710int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
711int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
712int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
713int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
714int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
715int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
716int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
717 unsigned int credits);
718
719void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
720 int phy_addr, const struct mdio_ops *mdio_ops);
721void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
722 int phy_addr, const struct mdio_ops *mdio_ops);
723void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
724 int phy_addr, const struct mdio_ops *mdio_ops);
725void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
726 const struct mdio_ops *mdio_ops);
727void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
728 int phy_addr, const struct mdio_ops *mdio_ops);
729#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 000000000000..2095ddacff78
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
33#define _CXGB3_OFFLOAD_CTL_DEFS_H
34
35enum {
36 GET_MAX_OUTSTANDING_WR,
37 GET_TX_MAX_CHUNK,
38 GET_TID_RANGE,
39 GET_STID_RANGE,
40 GET_RTBL_RANGE,
41 GET_L2T_CAPACITY,
42 GET_MTUS,
43 GET_WR_LEN,
44 GET_IFF_FROM_MAC,
45 GET_DDP_PARAMS,
46 GET_PORTS,
47
48 ULP_ISCSI_GET_PARAMS,
49 ULP_ISCSI_SET_PARAMS,
50
51 RDMA_GET_PARAMS,
52 RDMA_CQ_OP,
53 RDMA_CQ_SETUP,
54 RDMA_CQ_DISABLE,
55 RDMA_CTRL_QP_SETUP,
56 RDMA_GET_MEM,
57};
58
59/*
60 * Structure used to describe a TID range. Valid TIDs are [base, base+num).
61 */
62struct tid_range {
63 unsigned int base; /* first TID */
64 unsigned int num; /* number of TIDs in range */
65};
66
67/*
68 * Structure used to request the size and contents of the MTU table.
69 */
70struct mtutab {
71 unsigned int size; /* # of entries in the MTU table */
72 const unsigned short *mtus; /* the MTU table values */
73};
74
75struct net_device;
76
77/*
78 * Structure used to request the adapter net_device owning a given MAC address.
79 */
80struct iff_mac {
81 struct net_device *dev; /* the net_device */
82 const unsigned char *mac_addr; /* MAC address to lookup */
83 u16 vlan_tag;
84};
85
86struct pci_dev;
87
88/*
89 * Structure used to request the TCP DDP parameters.
90 */
91struct ddp_params {
92 unsigned int llimit; /* TDDP region start address */
93 unsigned int ulimit; /* TDDP region end address */
94 unsigned int tag_mask; /* TDDP tag mask */
95 struct pci_dev *pdev;
96};
97
98struct adap_ports {
99 unsigned int nports; /* number of ports on this adapter */
100 struct net_device *lldevs[2];
101};
102
103/*
104 * Structure used to return information to the iscsi layer.
105 */
106struct ulp_iscsi_info {
107 unsigned int offset;
108 unsigned int llimit;
109 unsigned int ulimit;
110 unsigned int tagmask;
111 unsigned int pgsz3;
112 unsigned int pgsz2;
113 unsigned int pgsz1;
114 unsigned int pgsz0;
115 unsigned int max_rxsz;
116 unsigned int max_txsz;
117 struct pci_dev *pdev;
118};
119
120/*
121 * Structure used to return information to the RDMA layer.
122 */
123struct rdma_info {
124 unsigned int tpt_base; /* TPT base address */
125 unsigned int tpt_top; /* TPT last entry address */
126 unsigned int pbl_base; /* PBL base address */
127 unsigned int pbl_top; /* PBL last entry address */
128 unsigned int rqt_base; /* RQT base address */
129 unsigned int rqt_top; /* RQT last entry address */
130 unsigned int udbell_len; /* user doorbell region length */
131 unsigned long udbell_physbase; /* user doorbell physical start addr */
132 void __iomem *kdb_addr; /* kernel doorbell register address */
133 struct pci_dev *pdev; /* associated PCI device */
134};
135
136/*
137 * Structure used to request an operation on an RDMA completion queue.
138 */
139struct rdma_cq_op {
140 unsigned int id;
141 unsigned int op;
142 unsigned int credits;
143};
144
145/*
146 * Structure used to setup RDMA completion queues.
147 */
148struct rdma_cq_setup {
149 unsigned int id;
150 unsigned long long base_addr;
151 unsigned int size;
152 unsigned int credits;
153 unsigned int credit_thres;
154 unsigned int ovfl_mode;
155};
156
157/*
158 * Structure used to setup the RDMA control egress context.
159 */
160struct rdma_ctrlqp_setup {
161 unsigned long long base_addr;
162 unsigned int size;
163};
164#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
new file mode 100644
index 000000000000..16e004990c59
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_DEFS_H
34#define _CHELSIO_DEFS_H
35
36#include <linux/skbuff.h>
37#include <net/tcp.h>
38
39#include "t3cdev.h"
40
41#include "cxgb3_offload.h"
42
43#define VALIDATE_TID 1
44
45void *cxgb_alloc_mem(unsigned long size);
46void cxgb_free_mem(void *addr);
47void cxgb_neigh_update(struct neighbour *neigh);
48void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
49
50/*
51 * Map an ATID or STID to their entries in the corresponding TID tables.
52 */
53static inline union active_open_entry *atid2entry(const struct tid_info *t,
54 unsigned int atid)
55{
56 return &t->atid_tab[atid - t->atid_base];
57}
58
59static inline union listen_entry *stid2entry(const struct tid_info *t,
60 unsigned int stid)
61{
62 return &t->stid_tab[stid - t->stid_base];
63}
64
65/*
66 * Find the connection corresponding to a TID.
67 */
68static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
69 unsigned int tid)
70{
71 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
72}
73
74/*
75 * Find the connection corresponding to a server TID.
76 */
77static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
78 unsigned int tid)
79{
80 if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
81 return NULL;
82 return &(stid2entry(t, tid)->t3c_tid);
83}
84
85/*
86 * Find the connection corresponding to an active-open TID.
87 */
88static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
89 unsigned int tid)
90{
91 if (tid < t->atid_base || tid >= t->atid_base + t->natids)
92 return NULL;
93 return &(atid2entry(t, tid)->t3c_tid);
94}
95
96int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
97int attach_t3cdev(struct t3cdev *dev);
98void detach_t3cdev(struct t3cdev *dev);
99#endif
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 000000000000..a94281861a66
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHIOCTL_H__
33#define __CHIOCTL_H__
34
35/*
36 * Ioctl commands specific to this driver.
37 */
38enum {
39 CHELSIO_SETREG = 1024,
40 CHELSIO_GETREG,
41 CHELSIO_SETTPI,
42 CHELSIO_GETTPI,
43 CHELSIO_GETMTUTAB,
44 CHELSIO_SETMTUTAB,
45 CHELSIO_GETMTU,
46 CHELSIO_SET_PM,
47 CHELSIO_GET_PM,
48 CHELSIO_GET_TCAM,
49 CHELSIO_SET_TCAM,
50 CHELSIO_GET_TCB,
51 CHELSIO_GET_MEM,
52 CHELSIO_LOAD_FW,
53 CHELSIO_GET_PROTO,
54 CHELSIO_SET_PROTO,
55 CHELSIO_SET_TRACE_FILTER,
56 CHELSIO_SET_QSET_PARAMS,
57 CHELSIO_GET_QSET_PARAMS,
58 CHELSIO_SET_QSET_NUM,
59 CHELSIO_GET_QSET_NUM,
60 CHELSIO_SET_PKTSCHED,
61};
62
63struct ch_reg {
64 uint32_t cmd;
65 uint32_t addr;
66 uint32_t val;
67};
68
69struct ch_cntxt {
70 uint32_t cmd;
71 uint32_t cntxt_type;
72 uint32_t cntxt_id;
73 uint32_t data[4];
74};
75
76/* context types */
77enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
78
79struct ch_desc {
80 uint32_t cmd;
81 uint32_t queue_num;
82 uint32_t idx;
83 uint32_t size;
84 uint8_t data[128];
85};
86
87struct ch_mem_range {
88 uint32_t cmd;
89 uint32_t mem_id;
90 uint32_t addr;
91 uint32_t len;
92 uint32_t version;
93 uint8_t buf[0];
94};
95
96struct ch_qset_params {
97 uint32_t cmd;
98 uint32_t qset_idx;
99 int32_t txq_size[3];
100 int32_t rspq_size;
101 int32_t fl_size[2];
102 int32_t intr_lat;
103 int32_t polling;
104 int32_t cong_thres;
105};
106
107struct ch_pktsched_params {
108 uint32_t cmd;
109 uint8_t sched;
110 uint8_t idx;
111 uint8_t min;
112 uint8_t max;
113 uint8_t binding;
114};
115
116#ifndef TCB_SIZE
117# define TCB_SIZE 128
118#endif
119
120/* TCB size in 32-bit words */
121#define TCB_WORDS (TCB_SIZE / 4)
122
123enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
124
125struct ch_mtus {
126 uint32_t cmd;
127 uint32_t nmtus;
128 uint16_t mtus[NMTUS];
129};
130
131struct ch_pm {
132 uint32_t cmd;
133 uint32_t tx_pg_sz;
134 uint32_t tx_num_pg;
135 uint32_t rx_pg_sz;
136 uint32_t rx_num_pg;
137 uint32_t pm_total;
138};
139
140struct ch_tcam {
141 uint32_t cmd;
142 uint32_t tcam_size;
143 uint32_t nservers;
144 uint32_t nroutes;
145 uint32_t nfilters;
146};
147
148struct ch_tcb {
149 uint32_t cmd;
150 uint32_t tcb_index;
151 uint32_t tcb_data[TCB_WORDS];
152};
153
154struct ch_tcam_word {
155 uint32_t cmd;
156 uint32_t addr;
157 uint32_t buf[3];
158};
159
160struct ch_trace {
161 uint32_t cmd;
162 uint32_t sip;
163 uint32_t sip_mask;
164 uint32_t dip;
165 uint32_t dip_mask;
166 uint16_t sport;
167 uint16_t sport_mask;
168 uint16_t dport;
169 uint16_t dport_mask;
170 uint32_t vlan:12;
171 uint32_t vlan_mask:12;
172 uint32_t intf:4;
173 uint32_t intf_mask:4;
174 uint8_t proto;
175 uint8_t proto_mask;
176 uint8_t invert_match:1;
177 uint8_t config_tx:1;
178 uint8_t config_rx:1;
179 uint8_t trace_tx:1;
180 uint8_t trace_rx:1;
181};
182
183#define SIOCCHIOCTL SIOCDEVPRIVATE
184
185#endif
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
new file mode 100644
index 000000000000..dfa035a1ad45
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -0,0 +1,2515 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
45#include <asm/uaccess.h>
46
47#include "common.h"
48#include "cxgb3_ioctl.h"
49#include "regs.h"
50#include "cxgb3_offload.h"
51#include "version.h"
52
53#include "cxgb3_ctl_defs.h"
54#include "t3_cpl.h"
55#include "firmware_exports.h"
56
57enum {
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
63 MIN_TXQ_ENTRIES = 4,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
66 MIN_FL_ENTRIES = 32
67};
68
69#define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75#define EEPROM_MAGIC 0x38E2F10C
76
77#define to_net_dev(class) container_of(class, struct net_device, class_dev)
78
79#define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
93 {0,}
94};
95
96MODULE_DESCRIPTION(DRV_DESC);
97MODULE_AUTHOR("Chelsio Communications");
98MODULE_LICENSE("Dual BSD/GPL");
99MODULE_VERSION(DRV_VERSION);
100MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104module_param(dflt_msg_enable, int, 0644);
105MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107/*
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
111 *
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
115 */
116static int msi = 2;
117
118module_param(msi, int, 0644);
119MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121/*
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
124 */
125
126static int ofld_disable = 0;
127
128module_param(ofld_disable, int, 0644);
129MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131/*
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
138 */
139static struct workqueue_struct *cxgb3_wq;
140
141/**
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
144 *
145 * Shows the link status, speed, and duplex of a port.
146 */
147static void link_report(struct net_device *dev)
148{
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
154
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
165 }
166
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 }
170}
171
172/**
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
180 *
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
184 */
185void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
187{
188 struct net_device *dev = adapter->port[port_id];
189
190 /* Skip changes from disabled ports. */
191 if (!netif_running(dev))
192 return;
193
194 if (link_stat != netif_carrier_ok(dev)) {
195 if (link_stat)
196 netif_carrier_on(dev);
197 else
198 netif_carrier_off(dev);
199 link_report(dev);
200 }
201}
202
203static void cxgb_set_rxmode(struct net_device *dev)
204{
205 struct t3_rx_mode rm;
206 struct port_info *pi = netdev_priv(dev);
207
208 init_rx_mode(&rm, dev, dev->mc_list);
209 t3_mac_set_rx_mode(&pi->mac, &rm);
210}
211
212/**
213 * link_start - enable a port
214 * @dev: the device to enable
215 *
216 * Performs the MAC and PHY actions needed to enable a port.
217 */
218static void link_start(struct net_device *dev)
219{
220 struct t3_rx_mode rm;
221 struct port_info *pi = netdev_priv(dev);
222 struct cmac *mac = &pi->mac;
223
224 init_rx_mode(&rm, dev, dev->mc_list);
225 t3_mac_reset(mac);
226 t3_mac_set_mtu(mac, dev->mtu);
227 t3_mac_set_address(mac, 0, dev->dev_addr);
228 t3_mac_set_rx_mode(mac, &rm);
229 t3_link_start(&pi->phy, mac, &pi->link_config);
230 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231}
232
233static inline void cxgb_disable_msi(struct adapter *adapter)
234{
235 if (adapter->flags & USING_MSIX) {
236 pci_disable_msix(adapter->pdev);
237 adapter->flags &= ~USING_MSIX;
238 } else if (adapter->flags & USING_MSI) {
239 pci_disable_msi(adapter->pdev);
240 adapter->flags &= ~USING_MSI;
241 }
242}
243
244/*
245 * Interrupt handler for asynchronous events used with MSI-X.
246 */
247static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
248{
249 t3_slow_intr_handler(cookie);
250 return IRQ_HANDLED;
251}
252
253/*
254 * Name the MSI-X interrupts.
255 */
256static void name_msix_vecs(struct adapter *adap)
257{
258 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
259
260 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
261 adap->msix_info[0].desc[n] = 0;
262
263 for_each_port(adap, j) {
264 struct net_device *d = adap->port[j];
265 const struct port_info *pi = netdev_priv(d);
266
267 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
268 snprintf(adap->msix_info[msi_idx].desc, n,
269 "%s (queue %d)", d->name, i);
270 adap->msix_info[msi_idx].desc[n] = 0;
271 }
272 }
273}
274
275static int request_msix_data_irqs(struct adapter *adap)
276{
277 int i, j, err, qidx = 0;
278
279 for_each_port(adap, i) {
280 int nqsets = adap2pinfo(adap, i)->nqsets;
281
282 for (j = 0; j < nqsets; ++j) {
283 err = request_irq(adap->msix_info[qidx + 1].vec,
284 t3_intr_handler(adap,
285 adap->sge.qs[qidx].
286 rspq.polling), 0,
287 adap->msix_info[qidx + 1].desc,
288 &adap->sge.qs[qidx]);
289 if (err) {
290 while (--qidx >= 0)
291 free_irq(adap->msix_info[qidx + 1].vec,
292 &adap->sge.qs[qidx]);
293 return err;
294 }
295 qidx++;
296 }
297 }
298 return 0;
299}
300
301/**
302 * setup_rss - configure RSS
303 * @adap: the adapter
304 *
305 * Sets up RSS to distribute packets to multiple receive queues. We
306 * configure the RSS CPU lookup table to distribute to the number of HW
307 * receive queues, and the response queue lookup table to narrow that
308 * down to the response queues actually configured for each port.
309 * We always configure the RSS mapping for two ports since the mapping
310 * table has plenty of entries.
311 */
312static void setup_rss(struct adapter *adap)
313{
314 int i;
315 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
316 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
317 u8 cpus[SGE_QSETS + 1];
318 u16 rspq_map[RSS_TABLE_SIZE];
319
320 for (i = 0; i < SGE_QSETS; ++i)
321 cpus[i] = i;
322 cpus[SGE_QSETS] = 0xff; /* terminator */
323
324 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
325 rspq_map[i] = i % nq0;
326 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327 }
328
329 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
330 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
331 V_RRCPLCPUSIZE(6), cpus, rspq_map);
332}
333
334/*
335 * If we have multiple receive queues per port serviced by NAPI we need one
336 * netdevice per queue as NAPI operates on netdevices. We already have one
337 * netdevice, namely the one associated with the interface, so we use dummy
338 * ones for any additional queues. Note that these netdevices exist purely
339 * so that NAPI has something to work with, they do not represent network
340 * ports and are not registered.
341 */
342static int init_dummy_netdevs(struct adapter *adap)
343{
344 int i, j, dummy_idx = 0;
345 struct net_device *nd;
346
347 for_each_port(adap, i) {
348 struct net_device *dev = adap->port[i];
349 const struct port_info *pi = netdev_priv(dev);
350
351 for (j = 0; j < pi->nqsets - 1; j++) {
352 if (!adap->dummy_netdev[dummy_idx]) {
353 nd = alloc_netdev(0, "", ether_setup);
354 if (!nd)
355 goto free_all;
356
357 nd->priv = adap;
358 nd->weight = 64;
359 set_bit(__LINK_STATE_START, &nd->state);
360 adap->dummy_netdev[dummy_idx] = nd;
361 }
362 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
363 dummy_idx++;
364 }
365 }
366 return 0;
367
368free_all:
369 while (--dummy_idx >= 0) {
370 free_netdev(adap->dummy_netdev[dummy_idx]);
371 adap->dummy_netdev[dummy_idx] = NULL;
372 }
373 return -ENOMEM;
374}
375
376/*
377 * Wait until all NAPI handlers are descheduled. This includes the handlers of
378 * both netdevices representing interfaces and the dummy ones for the extra
379 * queues.
380 */
381static void quiesce_rx(struct adapter *adap)
382{
383 int i;
384 struct net_device *dev;
385
386 for_each_port(adap, i) {
387 dev = adap->port[i];
388 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
389 msleep(1);
390 }
391
392 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
393 dev = adap->dummy_netdev[i];
394 if (dev)
395 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
396 msleep(1);
397 }
398}
399
400/**
401 * setup_sge_qsets - configure SGE Tx/Rx/response queues
402 * @adap: the adapter
403 *
404 * Determines how many sets of SGE queues to use and initializes them.
405 * We support multiple queue sets per port if we have MSI-X, otherwise
406 * just one queue set per port.
407 */
408static int setup_sge_qsets(struct adapter *adap)
409{
410 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
411 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
412
413 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
414 irq_idx = -1;
415
416 for_each_port(adap, i) {
417 struct net_device *dev = adap->port[i];
418 const struct port_info *pi = netdev_priv(dev);
419
420 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
421 err = t3_sge_alloc_qset(adap, qset_idx, 1,
422 (adap->flags & USING_MSIX) ? qset_idx + 1 :
423 irq_idx,
424 &adap->params.sge.qset[qset_idx], ntxq,
425 j == 0 ? dev :
426 adap-> dummy_netdev[dummy_dev_idx++]);
427 if (err) {
428 t3_free_sge_resources(adap);
429 return err;
430 }
431 }
432 }
433
434 return 0;
435}
436
437static ssize_t attr_show(struct class_device *cd, char *buf,
438 ssize_t(*format) (struct adapter *, char *))
439{
440 ssize_t len;
441 struct adapter *adap = to_net_dev(cd)->priv;
442
443 /* Synchronize with ioctls that may shut down the device */
444 rtnl_lock();
445 len = (*format) (adap, buf);
446 rtnl_unlock();
447 return len;
448}
449
450static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
453{
454 char *endp;
455 ssize_t ret;
456 unsigned int val;
457 struct adapter *adap = to_net_dev(cd)->priv;
458
459 if (!capable(CAP_NET_ADMIN))
460 return -EPERM;
461
462 val = simple_strtoul(buf, &endp, 0);
463 if (endp == buf || val < min_val || val > max_val)
464 return -EINVAL;
465
466 rtnl_lock();
467 ret = (*set) (adap, val);
468 if (!ret)
469 ret = len;
470 rtnl_unlock();
471 return ret;
472}
473
474#define CXGB3_SHOW(name, val_expr) \
475static ssize_t format_##name(struct adapter *adap, char *buf) \
476{ \
477 return sprintf(buf, "%u\n", val_expr); \
478} \
479static ssize_t show_##name(struct class_device *cd, char *buf) \
480{ \
481 return attr_show(cd, buf, format_##name); \
482}
483
484static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
485{
486 if (adap->flags & FULL_INIT_DONE)
487 return -EBUSY;
488 if (val && adap->params.rev == 0)
489 return -EINVAL;
490 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
491 return -EINVAL;
492 adap->params.mc5.nfilters = val;
493 return 0;
494}
495
496static ssize_t store_nfilters(struct class_device *cd, const char *buf,
497 size_t len)
498{
499 return attr_store(cd, buf, len, set_nfilters, 0, ~0);
500}
501
502static ssize_t set_nservers(struct adapter *adap, unsigned int val)
503{
504 if (adap->flags & FULL_INIT_DONE)
505 return -EBUSY;
506 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
507 return -EINVAL;
508 adap->params.mc5.nservers = val;
509 return 0;
510}
511
512static ssize_t store_nservers(struct class_device *cd, const char *buf,
513 size_t len)
514{
515 return attr_store(cd, buf, len, set_nservers, 0, ~0);
516}
517
518#define CXGB3_ATTR_R(name, val_expr) \
519CXGB3_SHOW(name, val_expr) \
520static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
521
522#define CXGB3_ATTR_RW(name, val_expr, store_method) \
523CXGB3_SHOW(name, val_expr) \
524static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
525
526CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
527CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
528CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
529
530static struct attribute *cxgb3_attrs[] = {
531 &class_device_attr_cam_size.attr,
532 &class_device_attr_nfilters.attr,
533 &class_device_attr_nservers.attr,
534 NULL
535};
536
537static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
538
539static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
540{
541 ssize_t len;
542 unsigned int v, addr, bpt, cpt;
543 struct adapter *adap = to_net_dev(cd)->priv;
544
545 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
546 rtnl_lock();
547 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
548 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
549 if (sched & 1)
550 v >>= 16;
551 bpt = (v >> 8) & 0xff;
552 cpt = v & 0xff;
553 if (!cpt)
554 len = sprintf(buf, "disabled\n");
555 else {
556 v = (adap->params.vpd.cclk * 1000) / cpt;
557 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
558 }
559 rtnl_unlock();
560 return len;
561}
562
563static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
564 size_t len, int sched)
565{
566 char *endp;
567 ssize_t ret;
568 unsigned int val;
569 struct adapter *adap = to_net_dev(cd)->priv;
570
571 if (!capable(CAP_NET_ADMIN))
572 return -EPERM;
573
574 val = simple_strtoul(buf, &endp, 0);
575 if (endp == buf || val > 10000000)
576 return -EINVAL;
577
578 rtnl_lock();
579 ret = t3_config_sched(adap, val, sched);
580 if (!ret)
581 ret = len;
582 rtnl_unlock();
583 return ret;
584}
585
586#define TM_ATTR(name, sched) \
587static ssize_t show_##name(struct class_device *cd, char *buf) \
588{ \
589 return tm_attr_show(cd, buf, sched); \
590} \
591static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
592{ \
593 return tm_attr_store(cd, buf, len, sched); \
594} \
595static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
596
597TM_ATTR(sched0, 0);
598TM_ATTR(sched1, 1);
599TM_ATTR(sched2, 2);
600TM_ATTR(sched3, 3);
601TM_ATTR(sched4, 4);
602TM_ATTR(sched5, 5);
603TM_ATTR(sched6, 6);
604TM_ATTR(sched7, 7);
605
606static struct attribute *offload_attrs[] = {
607 &class_device_attr_sched0.attr,
608 &class_device_attr_sched1.attr,
609 &class_device_attr_sched2.attr,
610 &class_device_attr_sched3.attr,
611 &class_device_attr_sched4.attr,
612 &class_device_attr_sched5.attr,
613 &class_device_attr_sched6.attr,
614 &class_device_attr_sched7.attr,
615 NULL
616};
617
618static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
619
620/*
621 * Sends an sk_buff to an offload queue driver
622 * after dealing with any active network taps.
623 */
624static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
625{
626 int ret;
627
628 local_bh_disable();
629 ret = t3_offload_tx(tdev, skb);
630 local_bh_enable();
631 return ret;
632}
633
634static int write_smt_entry(struct adapter *adapter, int idx)
635{
636 struct cpl_smt_write_req *req;
637 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
638
639 if (!skb)
640 return -ENOMEM;
641
642 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
643 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
644 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
645 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
646 req->iff = idx;
647 memset(req->src_mac1, 0, sizeof(req->src_mac1));
648 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
649 skb->priority = 1;
650 offload_tx(&adapter->tdev, skb);
651 return 0;
652}
653
654static int init_smt(struct adapter *adapter)
655{
656 int i;
657
658 for_each_port(adapter, i)
659 write_smt_entry(adapter, i);
660 return 0;
661}
662
663static void init_port_mtus(struct adapter *adapter)
664{
665 unsigned int mtus = adapter->port[0]->mtu;
666
667 if (adapter->port[1])
668 mtus |= adapter->port[1]->mtu << 16;
669 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
670}
671
672static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
673 int hi, int port)
674{
675 struct sk_buff *skb;
676 struct mngt_pktsched_wr *req;
677
678 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
679 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
680 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
681 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
682 req->sched = sched;
683 req->idx = qidx;
684 req->min = lo;
685 req->max = hi;
686 req->binding = port;
687 t3_mgmt_tx(adap, skb);
688}
689
690static void bind_qsets(struct adapter *adap)
691{
692 int i, j;
693
694 for_each_port(adap, i) {
695 const struct port_info *pi = adap2pinfo(adap, i);
696
697 for (j = 0; j < pi->nqsets; ++j)
698 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
699 -1, i);
700 }
701}
702
703/**
704 * cxgb_up - enable the adapter
705 * @adapter: adapter being enabled
706 *
707 * Called when the first port is enabled, this function performs the
708 * actions necessary to make an adapter operational, such as completing
709 * the initialization of HW modules, and enabling interrupts.
710 *
711 * Must be called with the rtnl lock held.
712 */
713static int cxgb_up(struct adapter *adap)
714{
715 int err = 0;
716
717 if (!(adap->flags & FULL_INIT_DONE)) {
718 err = t3_check_fw_version(adap);
719 if (err)
720 goto out;
721
722 err = init_dummy_netdevs(adap);
723 if (err)
724 goto out;
725
726 err = t3_init_hw(adap, 0);
727 if (err)
728 goto out;
729
730 err = setup_sge_qsets(adap);
731 if (err)
732 goto out;
733
734 setup_rss(adap);
735 adap->flags |= FULL_INIT_DONE;
736 }
737
738 t3_intr_clear(adap);
739
740 if (adap->flags & USING_MSIX) {
741 name_msix_vecs(adap);
742 err = request_irq(adap->msix_info[0].vec,
743 t3_async_intr_handler, 0,
744 adap->msix_info[0].desc, adap);
745 if (err)
746 goto irq_err;
747
748 if (request_msix_data_irqs(adap)) {
749 free_irq(adap->msix_info[0].vec, adap);
750 goto irq_err;
751 }
752 } else if ((err = request_irq(adap->pdev->irq,
753 t3_intr_handler(adap,
754 adap->sge.qs[0].rspq.
755 polling),
756 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
757 adap->name, adap)))
758 goto irq_err;
759
760 t3_sge_start(adap);
761 t3_intr_enable(adap);
762
763 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
764 bind_qsets(adap);
765 adap->flags |= QUEUES_BOUND;
766
767out:
768 return err;
769irq_err:
770 CH_ERR(adap, "request_irq failed, err %d\n", err);
771 goto out;
772}
773
774/*
775 * Release resources when all the ports and offloading have been stopped.
776 */
777static void cxgb_down(struct adapter *adapter)
778{
779 t3_sge_stop(adapter);
780 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
781 t3_intr_disable(adapter);
782 spin_unlock_irq(&adapter->work_lock);
783
784 if (adapter->flags & USING_MSIX) {
785 int i, n = 0;
786
787 free_irq(adapter->msix_info[0].vec, adapter);
788 for_each_port(adapter, i)
789 n += adap2pinfo(adapter, i)->nqsets;
790
791 for (i = 0; i < n; ++i)
792 free_irq(adapter->msix_info[i + 1].vec,
793 &adapter->sge.qs[i]);
794 } else
795 free_irq(adapter->pdev->irq, adapter);
796
797 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
798 quiesce_rx(adapter);
799}
800
801static void schedule_chk_task(struct adapter *adap)
802{
803 unsigned int timeo;
804
805 timeo = adap->params.linkpoll_period ?
806 (HZ * adap->params.linkpoll_period) / 10 :
807 adap->params.stats_update_period * HZ;
808 if (timeo)
809 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
810}
811
812static int offload_open(struct net_device *dev)
813{
814 struct adapter *adapter = dev->priv;
815 struct t3cdev *tdev = T3CDEV(dev);
816 int adap_up = adapter->open_device_map & PORT_MASK;
817 int err = 0;
818
819 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
820 return 0;
821
822 if (!adap_up && (err = cxgb_up(adapter)) < 0)
823 return err;
824
825 t3_tp_set_offload_mode(adapter, 1);
826 tdev->lldev = adapter->port[0];
827 err = cxgb3_offload_activate(adapter);
828 if (err)
829 goto out;
830
831 init_port_mtus(adapter);
832 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
833 adapter->params.b_wnd,
834 adapter->params.rev == 0 ?
835 adapter->port[0]->mtu : 0xffff);
836 init_smt(adapter);
837
838 /* Never mind if the next step fails */
839 sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
840
841 /* Call back all registered clients */
842 cxgb3_add_clients(tdev);
843
844out:
845 /* restore them in case the offload module has changed them */
846 if (err) {
847 t3_tp_set_offload_mode(adapter, 0);
848 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
849 cxgb3_set_dummy_ops(tdev);
850 }
851 return err;
852}
853
854static int offload_close(struct t3cdev *tdev)
855{
856 struct adapter *adapter = tdev2adap(tdev);
857
858 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
859 return 0;
860
861 /* Call back all registered clients */
862 cxgb3_remove_clients(tdev);
863
864 sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
865
866 tdev->lldev = NULL;
867 cxgb3_set_dummy_ops(tdev);
868 t3_tp_set_offload_mode(adapter, 0);
869 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
870
871 if (!adapter->open_device_map)
872 cxgb_down(adapter);
873
874 cxgb3_offload_deactivate(adapter);
875 return 0;
876}
877
878static int cxgb_open(struct net_device *dev)
879{
880 int err;
881 struct adapter *adapter = dev->priv;
882 struct port_info *pi = netdev_priv(dev);
883 int other_ports = adapter->open_device_map & PORT_MASK;
884
885 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
886 return err;
887
888 set_bit(pi->port_id, &adapter->open_device_map);
889 if (!ofld_disable) {
890 err = offload_open(dev);
891 if (err)
892 printk(KERN_WARNING
893 "Could not initialize offload capabilities\n");
894 }
895
896 link_start(dev);
897 t3_port_intr_enable(adapter, pi->port_id);
898 netif_start_queue(dev);
899 if (!other_ports)
900 schedule_chk_task(adapter);
901
902 return 0;
903}
904
905static int cxgb_close(struct net_device *dev)
906{
907 struct adapter *adapter = dev->priv;
908 struct port_info *p = netdev_priv(dev);
909
910 t3_port_intr_disable(adapter, p->port_id);
911 netif_stop_queue(dev);
912 p->phy.ops->power_down(&p->phy, 1);
913 netif_carrier_off(dev);
914 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
915
916 spin_lock(&adapter->work_lock); /* sync with update task */
917 clear_bit(p->port_id, &adapter->open_device_map);
918 spin_unlock(&adapter->work_lock);
919
920 if (!(adapter->open_device_map & PORT_MASK))
921 cancel_rearming_delayed_workqueue(cxgb3_wq,
922 &adapter->adap_check_task);
923
924 if (!adapter->open_device_map)
925 cxgb_down(adapter);
926
927 return 0;
928}
929
930static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
931{
932 struct adapter *adapter = dev->priv;
933 struct port_info *p = netdev_priv(dev);
934 struct net_device_stats *ns = &p->netstats;
935 const struct mac_stats *pstats;
936
937 spin_lock(&adapter->stats_lock);
938 pstats = t3_mac_update_stats(&p->mac);
939 spin_unlock(&adapter->stats_lock);
940
941 ns->tx_bytes = pstats->tx_octets;
942 ns->tx_packets = pstats->tx_frames;
943 ns->rx_bytes = pstats->rx_octets;
944 ns->rx_packets = pstats->rx_frames;
945 ns->multicast = pstats->rx_mcast_frames;
946
947 ns->tx_errors = pstats->tx_underrun;
948 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
949 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
950 pstats->rx_fifo_ovfl;
951
952 /* detailed rx_errors */
953 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
954 ns->rx_over_errors = 0;
955 ns->rx_crc_errors = pstats->rx_fcs_errs;
956 ns->rx_frame_errors = pstats->rx_symbol_errs;
957 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
958 ns->rx_missed_errors = pstats->rx_cong_drops;
959
960 /* detailed tx_errors */
961 ns->tx_aborted_errors = 0;
962 ns->tx_carrier_errors = 0;
963 ns->tx_fifo_errors = pstats->tx_underrun;
964 ns->tx_heartbeat_errors = 0;
965 ns->tx_window_errors = 0;
966 return ns;
967}
968
969static u32 get_msglevel(struct net_device *dev)
970{
971 struct adapter *adapter = dev->priv;
972
973 return adapter->msg_enable;
974}
975
976static void set_msglevel(struct net_device *dev, u32 val)
977{
978 struct adapter *adapter = dev->priv;
979
980 adapter->msg_enable = val;
981}
982
983static char stats_strings[][ETH_GSTRING_LEN] = {
984 "TxOctetsOK ",
985 "TxFramesOK ",
986 "TxMulticastFramesOK",
987 "TxBroadcastFramesOK",
988 "TxPauseFrames ",
989 "TxUnderrun ",
990 "TxExtUnderrun ",
991
992 "TxFrames64 ",
993 "TxFrames65To127 ",
994 "TxFrames128To255 ",
995 "TxFrames256To511 ",
996 "TxFrames512To1023 ",
997 "TxFrames1024To1518 ",
998 "TxFrames1519ToMax ",
999
1000 "RxOctetsOK ",
1001 "RxFramesOK ",
1002 "RxMulticastFramesOK",
1003 "RxBroadcastFramesOK",
1004 "RxPauseFrames ",
1005 "RxFCSErrors ",
1006 "RxSymbolErrors ",
1007 "RxShortErrors ",
1008 "RxJabberErrors ",
1009 "RxLengthErrors ",
1010 "RxFIFOoverflow ",
1011
1012 "RxFrames64 ",
1013 "RxFrames65To127 ",
1014 "RxFrames128To255 ",
1015 "RxFrames256To511 ",
1016 "RxFrames512To1023 ",
1017 "RxFrames1024To1518 ",
1018 "RxFrames1519ToMax ",
1019
1020 "PhyFIFOErrors ",
1021 "TSO ",
1022 "VLANextractions ",
1023 "VLANinsertions ",
1024 "TxCsumOffload ",
1025 "RxCsumGood ",
1026 "RxDrops "
1027};
1028
1029static int get_stats_count(struct net_device *dev)
1030{
1031 return ARRAY_SIZE(stats_strings);
1032}
1033
1034#define T3_REGMAP_SIZE (3 * 1024)
1035
1036static int get_regs_len(struct net_device *dev)
1037{
1038 return T3_REGMAP_SIZE;
1039}
1040
1041static int get_eeprom_len(struct net_device *dev)
1042{
1043 return EEPROMSIZE;
1044}
1045
1046static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1047{
1048 u32 fw_vers = 0;
1049 struct adapter *adapter = dev->priv;
1050
1051 t3_get_fw_version(adapter, &fw_vers);
1052
1053 strcpy(info->driver, DRV_NAME);
1054 strcpy(info->version, DRV_VERSION);
1055 strcpy(info->bus_info, pci_name(adapter->pdev));
1056 if (!fw_vers)
1057 strcpy(info->fw_version, "N/A");
1058 else {
1059 snprintf(info->fw_version, sizeof(info->fw_version),
1060 "%s %u.%u.%u",
1061 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1062 G_FW_VERSION_MAJOR(fw_vers),
1063 G_FW_VERSION_MINOR(fw_vers),
1064 G_FW_VERSION_MICRO(fw_vers));
1065 }
1066}
1067
1068static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1069{
1070 if (stringset == ETH_SS_STATS)
1071 memcpy(data, stats_strings, sizeof(stats_strings));
1072}
1073
1074static unsigned long collect_sge_port_stats(struct adapter *adapter,
1075 struct port_info *p, int idx)
1076{
1077 int i;
1078 unsigned long tot = 0;
1079
1080 for (i = 0; i < p->nqsets; ++i)
1081 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1082 return tot;
1083}
1084
1085static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1086 u64 *data)
1087{
1088 struct adapter *adapter = dev->priv;
1089 struct port_info *pi = netdev_priv(dev);
1090 const struct mac_stats *s;
1091
1092 spin_lock(&adapter->stats_lock);
1093 s = t3_mac_update_stats(&pi->mac);
1094 spin_unlock(&adapter->stats_lock);
1095
1096 *data++ = s->tx_octets;
1097 *data++ = s->tx_frames;
1098 *data++ = s->tx_mcast_frames;
1099 *data++ = s->tx_bcast_frames;
1100 *data++ = s->tx_pause;
1101 *data++ = s->tx_underrun;
1102 *data++ = s->tx_fifo_urun;
1103
1104 *data++ = s->tx_frames_64;
1105 *data++ = s->tx_frames_65_127;
1106 *data++ = s->tx_frames_128_255;
1107 *data++ = s->tx_frames_256_511;
1108 *data++ = s->tx_frames_512_1023;
1109 *data++ = s->tx_frames_1024_1518;
1110 *data++ = s->tx_frames_1519_max;
1111
1112 *data++ = s->rx_octets;
1113 *data++ = s->rx_frames;
1114 *data++ = s->rx_mcast_frames;
1115 *data++ = s->rx_bcast_frames;
1116 *data++ = s->rx_pause;
1117 *data++ = s->rx_fcs_errs;
1118 *data++ = s->rx_symbol_errs;
1119 *data++ = s->rx_short;
1120 *data++ = s->rx_jabber;
1121 *data++ = s->rx_too_long;
1122 *data++ = s->rx_fifo_ovfl;
1123
1124 *data++ = s->rx_frames_64;
1125 *data++ = s->rx_frames_65_127;
1126 *data++ = s->rx_frames_128_255;
1127 *data++ = s->rx_frames_256_511;
1128 *data++ = s->rx_frames_512_1023;
1129 *data++ = s->rx_frames_1024_1518;
1130 *data++ = s->rx_frames_1519_max;
1131
1132 *data++ = pi->phy.fifo_errors;
1133
1134 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1135 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1136 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1137 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1138 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1139 *data++ = s->rx_cong_drops;
1140}
1141
1142static inline void reg_block_dump(struct adapter *ap, void *buf,
1143 unsigned int start, unsigned int end)
1144{
1145 u32 *p = buf + start;
1146
1147 for (; start <= end; start += sizeof(u32))
1148 *p++ = t3_read_reg(ap, start);
1149}
1150
1151static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1152 void *buf)
1153{
1154 struct adapter *ap = dev->priv;
1155
1156 /*
1157 * Version scheme:
1158 * bits 0..9: chip version
1159 * bits 10..15: chip revision
1160 * bit 31: set for PCIe cards
1161 */
1162 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1163
1164 /*
1165 * We skip the MAC statistics registers because they are clear-on-read.
1166 * Also reading multi-register stats would need to synchronize with the
1167 * periodic mac stats accumulation. Hard to justify the complexity.
1168 */
1169 memset(buf, 0, T3_REGMAP_SIZE);
1170 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1171 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1172 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1173 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1174 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1175 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1176 XGM_REG(A_XGM_SERDES_STAT3, 1));
1177 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1178 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1179}
1180
1181static int restart_autoneg(struct net_device *dev)
1182{
1183 struct port_info *p = netdev_priv(dev);
1184
1185 if (!netif_running(dev))
1186 return -EAGAIN;
1187 if (p->link_config.autoneg != AUTONEG_ENABLE)
1188 return -EINVAL;
1189 p->phy.ops->autoneg_restart(&p->phy);
1190 return 0;
1191}
1192
1193static int cxgb3_phys_id(struct net_device *dev, u32 data)
1194{
1195 int i;
1196 struct adapter *adapter = dev->priv;
1197
1198 if (data == 0)
1199 data = 2;
1200
1201 for (i = 0; i < data * 2; i++) {
1202 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1203 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1204 if (msleep_interruptible(500))
1205 break;
1206 }
1207 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1208 F_GPIO0_OUT_VAL);
1209 return 0;
1210}
1211
1212static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1213{
1214 struct port_info *p = netdev_priv(dev);
1215
1216 cmd->supported = p->link_config.supported;
1217 cmd->advertising = p->link_config.advertising;
1218
1219 if (netif_carrier_ok(dev)) {
1220 cmd->speed = p->link_config.speed;
1221 cmd->duplex = p->link_config.duplex;
1222 } else {
1223 cmd->speed = -1;
1224 cmd->duplex = -1;
1225 }
1226
1227 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1228 cmd->phy_address = p->phy.addr;
1229 cmd->transceiver = XCVR_EXTERNAL;
1230 cmd->autoneg = p->link_config.autoneg;
1231 cmd->maxtxpkt = 0;
1232 cmd->maxrxpkt = 0;
1233 return 0;
1234}
1235
1236static int speed_duplex_to_caps(int speed, int duplex)
1237{
1238 int cap = 0;
1239
1240 switch (speed) {
1241 case SPEED_10:
1242 if (duplex == DUPLEX_FULL)
1243 cap = SUPPORTED_10baseT_Full;
1244 else
1245 cap = SUPPORTED_10baseT_Half;
1246 break;
1247 case SPEED_100:
1248 if (duplex == DUPLEX_FULL)
1249 cap = SUPPORTED_100baseT_Full;
1250 else
1251 cap = SUPPORTED_100baseT_Half;
1252 break;
1253 case SPEED_1000:
1254 if (duplex == DUPLEX_FULL)
1255 cap = SUPPORTED_1000baseT_Full;
1256 else
1257 cap = SUPPORTED_1000baseT_Half;
1258 break;
1259 case SPEED_10000:
1260 if (duplex == DUPLEX_FULL)
1261 cap = SUPPORTED_10000baseT_Full;
1262 }
1263 return cap;
1264}
1265
1266#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1267 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1268 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1269 ADVERTISED_10000baseT_Full)
1270
1271static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1272{
1273 struct port_info *p = netdev_priv(dev);
1274 struct link_config *lc = &p->link_config;
1275
1276 if (!(lc->supported & SUPPORTED_Autoneg))
1277 return -EOPNOTSUPP; /* can't change speed/duplex */
1278
1279 if (cmd->autoneg == AUTONEG_DISABLE) {
1280 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1281
1282 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1283 return -EINVAL;
1284 lc->requested_speed = cmd->speed;
1285 lc->requested_duplex = cmd->duplex;
1286 lc->advertising = 0;
1287 } else {
1288 cmd->advertising &= ADVERTISED_MASK;
1289 cmd->advertising &= lc->supported;
1290 if (!cmd->advertising)
1291 return -EINVAL;
1292 lc->requested_speed = SPEED_INVALID;
1293 lc->requested_duplex = DUPLEX_INVALID;
1294 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1295 }
1296 lc->autoneg = cmd->autoneg;
1297 if (netif_running(dev))
1298 t3_link_start(&p->phy, &p->mac, lc);
1299 return 0;
1300}
1301
1302static void get_pauseparam(struct net_device *dev,
1303 struct ethtool_pauseparam *epause)
1304{
1305 struct port_info *p = netdev_priv(dev);
1306
1307 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1308 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1309 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1310}
1311
1312static int set_pauseparam(struct net_device *dev,
1313 struct ethtool_pauseparam *epause)
1314{
1315 struct port_info *p = netdev_priv(dev);
1316 struct link_config *lc = &p->link_config;
1317
1318 if (epause->autoneg == AUTONEG_DISABLE)
1319 lc->requested_fc = 0;
1320 else if (lc->supported & SUPPORTED_Autoneg)
1321 lc->requested_fc = PAUSE_AUTONEG;
1322 else
1323 return -EINVAL;
1324
1325 if (epause->rx_pause)
1326 lc->requested_fc |= PAUSE_RX;
1327 if (epause->tx_pause)
1328 lc->requested_fc |= PAUSE_TX;
1329 if (lc->autoneg == AUTONEG_ENABLE) {
1330 if (netif_running(dev))
1331 t3_link_start(&p->phy, &p->mac, lc);
1332 } else {
1333 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1334 if (netif_running(dev))
1335 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1336 }
1337 return 0;
1338}
1339
1340static u32 get_rx_csum(struct net_device *dev)
1341{
1342 struct port_info *p = netdev_priv(dev);
1343
1344 return p->rx_csum_offload;
1345}
1346
1347static int set_rx_csum(struct net_device *dev, u32 data)
1348{
1349 struct port_info *p = netdev_priv(dev);
1350
1351 p->rx_csum_offload = data;
1352 return 0;
1353}
1354
1355static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1356{
1357 struct adapter *adapter = dev->priv;
1358
1359 e->rx_max_pending = MAX_RX_BUFFERS;
1360 e->rx_mini_max_pending = 0;
1361 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1362 e->tx_max_pending = MAX_TXQ_ENTRIES;
1363
1364 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1365 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1366 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1367 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1368}
1369
1370static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1371{
1372 int i;
1373 struct adapter *adapter = dev->priv;
1374
1375 if (e->rx_pending > MAX_RX_BUFFERS ||
1376 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1377 e->tx_pending > MAX_TXQ_ENTRIES ||
1378 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1379 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1380 e->rx_pending < MIN_FL_ENTRIES ||
1381 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1382 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1383 return -EINVAL;
1384
1385 if (adapter->flags & FULL_INIT_DONE)
1386 return -EBUSY;
1387
1388 for (i = 0; i < SGE_QSETS; ++i) {
1389 struct qset_params *q = &adapter->params.sge.qset[i];
1390
1391 q->rspq_size = e->rx_mini_pending;
1392 q->fl_size = e->rx_pending;
1393 q->jumbo_size = e->rx_jumbo_pending;
1394 q->txq_size[0] = e->tx_pending;
1395 q->txq_size[1] = e->tx_pending;
1396 q->txq_size[2] = e->tx_pending;
1397 }
1398 return 0;
1399}
1400
1401static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1402{
1403 struct adapter *adapter = dev->priv;
1404 struct qset_params *qsp = &adapter->params.sge.qset[0];
1405 struct sge_qset *qs = &adapter->sge.qs[0];
1406
1407 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1408 return -EINVAL;
1409
1410 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1411 t3_update_qset_coalesce(qs, qsp);
1412 return 0;
1413}
1414
1415static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1416{
1417 struct adapter *adapter = dev->priv;
1418 struct qset_params *q = adapter->params.sge.qset;
1419
1420 c->rx_coalesce_usecs = q->coalesce_usecs;
1421 return 0;
1422}
1423
1424static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1425 u8 * data)
1426{
1427 int i, err = 0;
1428 struct adapter *adapter = dev->priv;
1429
1430 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1431 if (!buf)
1432 return -ENOMEM;
1433
1434 e->magic = EEPROM_MAGIC;
1435 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1436 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1437
1438 if (!err)
1439 memcpy(data, buf + e->offset, e->len);
1440 kfree(buf);
1441 return err;
1442}
1443
1444static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1445 u8 * data)
1446{
1447 u8 *buf;
1448 int err = 0;
1449 u32 aligned_offset, aligned_len, *p;
1450 struct adapter *adapter = dev->priv;
1451
1452 if (eeprom->magic != EEPROM_MAGIC)
1453 return -EINVAL;
1454
1455 aligned_offset = eeprom->offset & ~3;
1456 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1457
1458 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1459 buf = kmalloc(aligned_len, GFP_KERNEL);
1460 if (!buf)
1461 return -ENOMEM;
1462 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1463 if (!err && aligned_len > 4)
1464 err = t3_seeprom_read(adapter,
1465 aligned_offset + aligned_len - 4,
1466 (u32 *) & buf[aligned_len - 4]);
1467 if (err)
1468 goto out;
1469 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1470 } else
1471 buf = data;
1472
1473 err = t3_seeprom_wp(adapter, 0);
1474 if (err)
1475 goto out;
1476
1477 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1478 err = t3_seeprom_write(adapter, aligned_offset, *p);
1479 aligned_offset += 4;
1480 }
1481
1482 if (!err)
1483 err = t3_seeprom_wp(adapter, 1);
1484out:
1485 if (buf != data)
1486 kfree(buf);
1487 return err;
1488}
1489
1490static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1491{
1492 wol->supported = 0;
1493 wol->wolopts = 0;
1494 memset(&wol->sopass, 0, sizeof(wol->sopass));
1495}
1496
1497static const struct ethtool_ops cxgb_ethtool_ops = {
1498 .get_settings = get_settings,
1499 .set_settings = set_settings,
1500 .get_drvinfo = get_drvinfo,
1501 .get_msglevel = get_msglevel,
1502 .set_msglevel = set_msglevel,
1503 .get_ringparam = get_sge_param,
1504 .set_ringparam = set_sge_param,
1505 .get_coalesce = get_coalesce,
1506 .set_coalesce = set_coalesce,
1507 .get_eeprom_len = get_eeprom_len,
1508 .get_eeprom = get_eeprom,
1509 .set_eeprom = set_eeprom,
1510 .get_pauseparam = get_pauseparam,
1511 .set_pauseparam = set_pauseparam,
1512 .get_rx_csum = get_rx_csum,
1513 .set_rx_csum = set_rx_csum,
1514 .get_tx_csum = ethtool_op_get_tx_csum,
1515 .set_tx_csum = ethtool_op_set_tx_csum,
1516 .get_sg = ethtool_op_get_sg,
1517 .set_sg = ethtool_op_set_sg,
1518 .get_link = ethtool_op_get_link,
1519 .get_strings = get_strings,
1520 .phys_id = cxgb3_phys_id,
1521 .nway_reset = restart_autoneg,
1522 .get_stats_count = get_stats_count,
1523 .get_ethtool_stats = get_stats,
1524 .get_regs_len = get_regs_len,
1525 .get_regs = get_regs,
1526 .get_wol = get_wol,
1527 .get_tso = ethtool_op_get_tso,
1528 .set_tso = ethtool_op_set_tso,
1529 .get_perm_addr = ethtool_op_get_perm_addr
1530};
1531
1532static int in_range(int val, int lo, int hi)
1533{
1534 return val < 0 || (val <= hi && val >= lo);
1535}
1536
1537static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1538{
1539 int ret;
1540 u32 cmd;
1541 struct adapter *adapter = dev->priv;
1542
1543 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1544 return -EFAULT;
1545
1546 switch (cmd) {
1547 case CHELSIO_SETREG:{
1548 struct ch_reg edata;
1549
1550 if (!capable(CAP_NET_ADMIN))
1551 return -EPERM;
1552 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1553 return -EFAULT;
1554 if ((edata.addr & 3) != 0
1555 || edata.addr >= adapter->mmio_len)
1556 return -EINVAL;
1557 writel(edata.val, adapter->regs + edata.addr);
1558 break;
1559 }
1560 case CHELSIO_GETREG:{
1561 struct ch_reg edata;
1562
1563 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1564 return -EFAULT;
1565 if ((edata.addr & 3) != 0
1566 || edata.addr >= adapter->mmio_len)
1567 return -EINVAL;
1568 edata.val = readl(adapter->regs + edata.addr);
1569 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1570 return -EFAULT;
1571 break;
1572 }
1573 case CHELSIO_SET_QSET_PARAMS:{
1574 int i;
1575 struct qset_params *q;
1576 struct ch_qset_params t;
1577
1578 if (!capable(CAP_NET_ADMIN))
1579 return -EPERM;
1580 if (copy_from_user(&t, useraddr, sizeof(t)))
1581 return -EFAULT;
1582 if (t.qset_idx >= SGE_QSETS)
1583 return -EINVAL;
1584 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1585 !in_range(t.cong_thres, 0, 255) ||
1586 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1587 MAX_TXQ_ENTRIES) ||
1588 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1589 MAX_TXQ_ENTRIES) ||
1590 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1591 MAX_CTRL_TXQ_ENTRIES) ||
1592 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1593 MAX_RX_BUFFERS)
1594 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1595 MAX_RX_JUMBO_BUFFERS)
1596 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1597 MAX_RSPQ_ENTRIES))
1598 return -EINVAL;
1599 if ((adapter->flags & FULL_INIT_DONE) &&
1600 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1601 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1602 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1603 t.polling >= 0 || t.cong_thres >= 0))
1604 return -EBUSY;
1605
1606 q = &adapter->params.sge.qset[t.qset_idx];
1607
1608 if (t.rspq_size >= 0)
1609 q->rspq_size = t.rspq_size;
1610 if (t.fl_size[0] >= 0)
1611 q->fl_size = t.fl_size[0];
1612 if (t.fl_size[1] >= 0)
1613 q->jumbo_size = t.fl_size[1];
1614 if (t.txq_size[0] >= 0)
1615 q->txq_size[0] = t.txq_size[0];
1616 if (t.txq_size[1] >= 0)
1617 q->txq_size[1] = t.txq_size[1];
1618 if (t.txq_size[2] >= 0)
1619 q->txq_size[2] = t.txq_size[2];
1620 if (t.cong_thres >= 0)
1621 q->cong_thres = t.cong_thres;
1622 if (t.intr_lat >= 0) {
1623 struct sge_qset *qs =
1624 &adapter->sge.qs[t.qset_idx];
1625
1626 q->coalesce_usecs = t.intr_lat;
1627 t3_update_qset_coalesce(qs, q);
1628 }
1629 if (t.polling >= 0) {
1630 if (adapter->flags & USING_MSIX)
1631 q->polling = t.polling;
1632 else {
1633 /* No polling with INTx for T3A */
1634 if (adapter->params.rev == 0 &&
1635 !(adapter->flags & USING_MSI))
1636 t.polling = 0;
1637
1638 for (i = 0; i < SGE_QSETS; i++) {
1639 q = &adapter->params.sge.
1640 qset[i];
1641 q->polling = t.polling;
1642 }
1643 }
1644 }
1645 break;
1646 }
1647 case CHELSIO_GET_QSET_PARAMS:{
1648 struct qset_params *q;
1649 struct ch_qset_params t;
1650
1651 if (copy_from_user(&t, useraddr, sizeof(t)))
1652 return -EFAULT;
1653 if (t.qset_idx >= SGE_QSETS)
1654 return -EINVAL;
1655
1656 q = &adapter->params.sge.qset[t.qset_idx];
1657 t.rspq_size = q->rspq_size;
1658 t.txq_size[0] = q->txq_size[0];
1659 t.txq_size[1] = q->txq_size[1];
1660 t.txq_size[2] = q->txq_size[2];
1661 t.fl_size[0] = q->fl_size;
1662 t.fl_size[1] = q->jumbo_size;
1663 t.polling = q->polling;
1664 t.intr_lat = q->coalesce_usecs;
1665 t.cong_thres = q->cong_thres;
1666
1667 if (copy_to_user(useraddr, &t, sizeof(t)))
1668 return -EFAULT;
1669 break;
1670 }
1671 case CHELSIO_SET_QSET_NUM:{
1672 struct ch_reg edata;
1673 struct port_info *pi = netdev_priv(dev);
1674 unsigned int i, first_qset = 0, other_qsets = 0;
1675
1676 if (!capable(CAP_NET_ADMIN))
1677 return -EPERM;
1678 if (adapter->flags & FULL_INIT_DONE)
1679 return -EBUSY;
1680 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1681 return -EFAULT;
1682 if (edata.val < 1 ||
1683 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1684 return -EINVAL;
1685
1686 for_each_port(adapter, i)
1687 if (adapter->port[i] && adapter->port[i] != dev)
1688 other_qsets += adap2pinfo(adapter, i)->nqsets;
1689
1690 if (edata.val + other_qsets > SGE_QSETS)
1691 return -EINVAL;
1692
1693 pi->nqsets = edata.val;
1694
1695 for_each_port(adapter, i)
1696 if (adapter->port[i]) {
1697 pi = adap2pinfo(adapter, i);
1698 pi->first_qset = first_qset;
1699 first_qset += pi->nqsets;
1700 }
1701 break;
1702 }
1703 case CHELSIO_GET_QSET_NUM:{
1704 struct ch_reg edata;
1705 struct port_info *pi = netdev_priv(dev);
1706
1707 edata.cmd = CHELSIO_GET_QSET_NUM;
1708 edata.val = pi->nqsets;
1709 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1710 return -EFAULT;
1711 break;
1712 }
1713 case CHELSIO_LOAD_FW:{
1714 u8 *fw_data;
1715 struct ch_mem_range t;
1716
1717 if (!capable(CAP_NET_ADMIN))
1718 return -EPERM;
1719 if (copy_from_user(&t, useraddr, sizeof(t)))
1720 return -EFAULT;
1721
1722 fw_data = kmalloc(t.len, GFP_KERNEL);
1723 if (!fw_data)
1724 return -ENOMEM;
1725
1726 if (copy_from_user
1727 (fw_data, useraddr + sizeof(t), t.len)) {
1728 kfree(fw_data);
1729 return -EFAULT;
1730 }
1731
1732 ret = t3_load_fw(adapter, fw_data, t.len);
1733 kfree(fw_data);
1734 if (ret)
1735 return ret;
1736 break;
1737 }
1738 case CHELSIO_SETMTUTAB:{
1739 struct ch_mtus m;
1740 int i;
1741
1742 if (!is_offload(adapter))
1743 return -EOPNOTSUPP;
1744 if (!capable(CAP_NET_ADMIN))
1745 return -EPERM;
1746 if (offload_running(adapter))
1747 return -EBUSY;
1748 if (copy_from_user(&m, useraddr, sizeof(m)))
1749 return -EFAULT;
1750 if (m.nmtus != NMTUS)
1751 return -EINVAL;
1752 if (m.mtus[0] < 81) /* accommodate SACK */
1753 return -EINVAL;
1754
1755 /* MTUs must be in ascending order */
1756 for (i = 1; i < NMTUS; ++i)
1757 if (m.mtus[i] < m.mtus[i - 1])
1758 return -EINVAL;
1759
1760 memcpy(adapter->params.mtus, m.mtus,
1761 sizeof(adapter->params.mtus));
1762 break;
1763 }
1764 case CHELSIO_GET_PM:{
1765 struct tp_params *p = &adapter->params.tp;
1766 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1767
1768 if (!is_offload(adapter))
1769 return -EOPNOTSUPP;
1770 m.tx_pg_sz = p->tx_pg_size;
1771 m.tx_num_pg = p->tx_num_pgs;
1772 m.rx_pg_sz = p->rx_pg_size;
1773 m.rx_num_pg = p->rx_num_pgs;
1774 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1775 if (copy_to_user(useraddr, &m, sizeof(m)))
1776 return -EFAULT;
1777 break;
1778 }
1779 case CHELSIO_SET_PM:{
1780 struct ch_pm m;
1781 struct tp_params *p = &adapter->params.tp;
1782
1783 if (!is_offload(adapter))
1784 return -EOPNOTSUPP;
1785 if (!capable(CAP_NET_ADMIN))
1786 return -EPERM;
1787 if (adapter->flags & FULL_INIT_DONE)
1788 return -EBUSY;
1789 if (copy_from_user(&m, useraddr, sizeof(m)))
1790 return -EFAULT;
1791 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1792 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1793 return -EINVAL; /* not power of 2 */
1794 if (!(m.rx_pg_sz & 0x14000))
1795 return -EINVAL; /* not 16KB or 64KB */
1796 if (!(m.tx_pg_sz & 0x1554000))
1797 return -EINVAL;
1798 if (m.tx_num_pg == -1)
1799 m.tx_num_pg = p->tx_num_pgs;
1800 if (m.rx_num_pg == -1)
1801 m.rx_num_pg = p->rx_num_pgs;
1802 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1803 return -EINVAL;
1804 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1805 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1806 return -EINVAL;
1807 p->rx_pg_size = m.rx_pg_sz;
1808 p->tx_pg_size = m.tx_pg_sz;
1809 p->rx_num_pgs = m.rx_num_pg;
1810 p->tx_num_pgs = m.tx_num_pg;
1811 break;
1812 }
1813 case CHELSIO_GET_MEM:{
1814 struct ch_mem_range t;
1815 struct mc7 *mem;
1816 u64 buf[32];
1817
1818 if (!is_offload(adapter))
1819 return -EOPNOTSUPP;
1820 if (!(adapter->flags & FULL_INIT_DONE))
1821 return -EIO; /* need the memory controllers */
1822 if (copy_from_user(&t, useraddr, sizeof(t)))
1823 return -EFAULT;
1824 if ((t.addr & 7) || (t.len & 7))
1825 return -EINVAL;
1826 if (t.mem_id == MEM_CM)
1827 mem = &adapter->cm;
1828 else if (t.mem_id == MEM_PMRX)
1829 mem = &adapter->pmrx;
1830 else if (t.mem_id == MEM_PMTX)
1831 mem = &adapter->pmtx;
1832 else
1833 return -EINVAL;
1834
1835 /*
1836 * Version scheme:
1837 * bits 0..9: chip version
1838 * bits 10..15: chip revision
1839 */
1840 t.version = 3 | (adapter->params.rev << 10);
1841 if (copy_to_user(useraddr, &t, sizeof(t)))
1842 return -EFAULT;
1843
1844 /*
1845 * Read 256 bytes at a time as len can be large and we don't
1846 * want to use huge intermediate buffers.
1847 */
1848 useraddr += sizeof(t); /* advance to start of buffer */
1849 while (t.len) {
1850 unsigned int chunk =
1851 min_t(unsigned int, t.len, sizeof(buf));
1852
1853 ret =
1854 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1855 buf);
1856 if (ret)
1857 return ret;
1858 if (copy_to_user(useraddr, buf, chunk))
1859 return -EFAULT;
1860 useraddr += chunk;
1861 t.addr += chunk;
1862 t.len -= chunk;
1863 }
1864 break;
1865 }
1866 case CHELSIO_SET_TRACE_FILTER:{
1867 struct ch_trace t;
1868 const struct trace_params *tp;
1869
1870 if (!capable(CAP_NET_ADMIN))
1871 return -EPERM;
1872 if (!offload_running(adapter))
1873 return -EAGAIN;
1874 if (copy_from_user(&t, useraddr, sizeof(t)))
1875 return -EFAULT;
1876
1877 tp = (const struct trace_params *)&t.sip;
1878 if (t.config_tx)
1879 t3_config_trace_filter(adapter, tp, 0,
1880 t.invert_match,
1881 t.trace_tx);
1882 if (t.config_rx)
1883 t3_config_trace_filter(adapter, tp, 1,
1884 t.invert_match,
1885 t.trace_rx);
1886 break;
1887 }
1888 case CHELSIO_SET_PKTSCHED:{
1889 struct ch_pktsched_params p;
1890
1891 if (!capable(CAP_NET_ADMIN))
1892 return -EPERM;
1893 if (!adapter->open_device_map)
1894 return -EAGAIN; /* uP and SGE must be running */
1895 if (copy_from_user(&p, useraddr, sizeof(p)))
1896 return -EFAULT;
1897 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1898 p.binding);
1899 break;
1900
1901 }
1902 default:
1903 return -EOPNOTSUPP;
1904 }
1905 return 0;
1906}
1907
1908static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1909{
1910 int ret, mmd;
1911 struct adapter *adapter = dev->priv;
1912 struct port_info *pi = netdev_priv(dev);
1913 struct mii_ioctl_data *data = if_mii(req);
1914
1915 switch (cmd) {
1916 case SIOCGMIIPHY:
1917 data->phy_id = pi->phy.addr;
1918 /* FALLTHRU */
1919 case SIOCGMIIREG:{
1920 u32 val;
1921 struct cphy *phy = &pi->phy;
1922
1923 if (!phy->mdio_read)
1924 return -EOPNOTSUPP;
1925 if (is_10G(adapter)) {
1926 mmd = data->phy_id >> 8;
1927 if (!mmd)
1928 mmd = MDIO_DEV_PCS;
1929 else if (mmd > MDIO_DEV_XGXS)
1930 return -EINVAL;
1931
1932 ret =
1933 phy->mdio_read(adapter, data->phy_id & 0x1f,
1934 mmd, data->reg_num, &val);
1935 } else
1936 ret =
1937 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938 0, data->reg_num & 0x1f,
1939 &val);
1940 if (!ret)
1941 data->val_out = val;
1942 break;
1943 }
1944 case SIOCSMIIREG:{
1945 struct cphy *phy = &pi->phy;
1946
1947 if (!capable(CAP_NET_ADMIN))
1948 return -EPERM;
1949 if (!phy->mdio_write)
1950 return -EOPNOTSUPP;
1951 if (is_10G(adapter)) {
1952 mmd = data->phy_id >> 8;
1953 if (!mmd)
1954 mmd = MDIO_DEV_PCS;
1955 else if (mmd > MDIO_DEV_XGXS)
1956 return -EINVAL;
1957
1958 ret =
1959 phy->mdio_write(adapter,
1960 data->phy_id & 0x1f, mmd,
1961 data->reg_num,
1962 data->val_in);
1963 } else
1964 ret =
1965 phy->mdio_write(adapter,
1966 data->phy_id & 0x1f, 0,
1967 data->reg_num & 0x1f,
1968 data->val_in);
1969 break;
1970 }
1971 case SIOCCHIOCTL:
1972 return cxgb_extension_ioctl(dev, req->ifr_data);
1973 default:
1974 return -EOPNOTSUPP;
1975 }
1976 return ret;
1977}
1978
1979static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1980{
1981 int ret;
1982 struct adapter *adapter = dev->priv;
1983 struct port_info *pi = netdev_priv(dev);
1984
1985 if (new_mtu < 81) /* accommodate SACK */
1986 return -EINVAL;
1987 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1988 return ret;
1989 dev->mtu = new_mtu;
1990 init_port_mtus(adapter);
1991 if (adapter->params.rev == 0 && offload_running(adapter))
1992 t3_load_mtus(adapter, adapter->params.mtus,
1993 adapter->params.a_wnd, adapter->params.b_wnd,
1994 adapter->port[0]->mtu);
1995 return 0;
1996}
1997
1998static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1999{
2000 struct adapter *adapter = dev->priv;
2001 struct port_info *pi = netdev_priv(dev);
2002 struct sockaddr *addr = p;
2003
2004 if (!is_valid_ether_addr(addr->sa_data))
2005 return -EINVAL;
2006
2007 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2008 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2009 if (offload_running(adapter))
2010 write_smt_entry(adapter, pi->port_id);
2011 return 0;
2012}
2013
2014/**
2015 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2016 * @adap: the adapter
2017 * @p: the port
2018 *
2019 * Ensures that current Rx processing on any of the queues associated with
2020 * the given port completes before returning. We do this by acquiring and
2021 * releasing the locks of the response queues associated with the port.
2022 */
2023static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2024{
2025 int i;
2026
2027 for (i = 0; i < p->nqsets; i++) {
2028 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2029
2030 spin_lock_irq(&q->lock);
2031 spin_unlock_irq(&q->lock);
2032 }
2033}
2034
2035static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2036{
2037 struct adapter *adapter = dev->priv;
2038 struct port_info *pi = netdev_priv(dev);
2039
2040 pi->vlan_grp = grp;
2041 if (adapter->params.rev > 0)
2042 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2043 else {
2044 /* single control for all ports */
2045 unsigned int i, have_vlans = 0;
2046 for_each_port(adapter, i)
2047 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2048
2049 t3_set_vlan_accel(adapter, 1, have_vlans);
2050 }
2051 t3_synchronize_rx(adapter, pi);
2052}
2053
2054static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2055{
2056 /* nothing */
2057}
2058
2059#ifdef CONFIG_NET_POLL_CONTROLLER
2060static void cxgb_netpoll(struct net_device *dev)
2061{
2062 struct adapter *adapter = dev->priv;
2063 struct sge_qset *qs = dev2qset(dev);
2064
2065 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2066 adapter);
2067}
2068#endif
2069
2070/*
2071 * Periodic accumulation of MAC statistics.
2072 */
2073static void mac_stats_update(struct adapter *adapter)
2074{
2075 int i;
2076
2077 for_each_port(adapter, i) {
2078 struct net_device *dev = adapter->port[i];
2079 struct port_info *p = netdev_priv(dev);
2080
2081 if (netif_running(dev)) {
2082 spin_lock(&adapter->stats_lock);
2083 t3_mac_update_stats(&p->mac);
2084 spin_unlock(&adapter->stats_lock);
2085 }
2086 }
2087}
2088
2089static void check_link_status(struct adapter *adapter)
2090{
2091 int i;
2092
2093 for_each_port(adapter, i) {
2094 struct net_device *dev = adapter->port[i];
2095 struct port_info *p = netdev_priv(dev);
2096
2097 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2098 t3_link_changed(adapter, i);
2099 }
2100}
2101
2102static void t3_adap_check_task(struct work_struct *work)
2103{
2104 struct adapter *adapter = container_of(work, struct adapter,
2105 adap_check_task.work);
2106 const struct adapter_params *p = &adapter->params;
2107
2108 adapter->check_task_cnt++;
2109
2110 /* Check link status for PHYs without interrupts */
2111 if (p->linkpoll_period)
2112 check_link_status(adapter);
2113
2114 /* Accumulate MAC stats if needed */
2115 if (!p->linkpoll_period ||
2116 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2117 p->stats_update_period) {
2118 mac_stats_update(adapter);
2119 adapter->check_task_cnt = 0;
2120 }
2121
2122 /* Schedule the next check update if any port is active. */
2123 spin_lock(&adapter->work_lock);
2124 if (adapter->open_device_map & PORT_MASK)
2125 schedule_chk_task(adapter);
2126 spin_unlock(&adapter->work_lock);
2127}
2128
2129/*
2130 * Processes external (PHY) interrupts in process context.
2131 */
2132static void ext_intr_task(struct work_struct *work)
2133{
2134 struct adapter *adapter = container_of(work, struct adapter,
2135 ext_intr_handler_task);
2136
2137 t3_phy_intr_handler(adapter);
2138
2139 /* Now reenable external interrupts */
2140 spin_lock_irq(&adapter->work_lock);
2141 if (adapter->slow_intr_mask) {
2142 adapter->slow_intr_mask |= F_T3DBG;
2143 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2144 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2145 adapter->slow_intr_mask);
2146 }
2147 spin_unlock_irq(&adapter->work_lock);
2148}
2149
2150/*
2151 * Interrupt-context handler for external (PHY) interrupts.
2152 */
2153void t3_os_ext_intr_handler(struct adapter *adapter)
2154{
2155 /*
2156 * Schedule a task to handle external interrupts as they may be slow
2157 * and we use a mutex to protect MDIO registers. We disable PHY
2158 * interrupts in the meantime and let the task reenable them when
2159 * it's done.
2160 */
2161 spin_lock(&adapter->work_lock);
2162 if (adapter->slow_intr_mask) {
2163 adapter->slow_intr_mask &= ~F_T3DBG;
2164 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2165 adapter->slow_intr_mask);
2166 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2167 }
2168 spin_unlock(&adapter->work_lock);
2169}
2170
2171void t3_fatal_err(struct adapter *adapter)
2172{
2173 unsigned int fw_status[4];
2174
2175 if (adapter->flags & FULL_INIT_DONE) {
2176 t3_sge_stop(adapter);
2177 t3_intr_disable(adapter);
2178 }
2179 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2180 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2181 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2182 fw_status[0], fw_status[1],
2183 fw_status[2], fw_status[3]);
2184
2185}
2186
2187static int __devinit cxgb_enable_msix(struct adapter *adap)
2188{
2189 struct msix_entry entries[SGE_QSETS + 1];
2190 int i, err;
2191
2192 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2193 entries[i].entry = i;
2194
2195 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2196 if (!err) {
2197 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2198 adap->msix_info[i].vec = entries[i].vector;
2199 } else if (err > 0)
2200 dev_info(&adap->pdev->dev,
2201 "only %d MSI-X vectors left, not using MSI-X\n", err);
2202 return err;
2203}
2204
2205static void __devinit print_port_info(struct adapter *adap,
2206 const struct adapter_info *ai)
2207{
2208 static const char *pci_variant[] = {
2209 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2210 };
2211
2212 int i;
2213 char buf[80];
2214
2215 if (is_pcie(adap))
2216 snprintf(buf, sizeof(buf), "%s x%d",
2217 pci_variant[adap->params.pci.variant],
2218 adap->params.pci.width);
2219 else
2220 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2221 pci_variant[adap->params.pci.variant],
2222 adap->params.pci.speed, adap->params.pci.width);
2223
2224 for_each_port(adap, i) {
2225 struct net_device *dev = adap->port[i];
2226 const struct port_info *pi = netdev_priv(dev);
2227
2228 if (!test_bit(i, &adap->registered_device_map))
2229 continue;
2230 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2231 dev->name, ai->desc, pi->port_type->desc,
2232 adap->params.rev, buf,
2233 (adap->flags & USING_MSIX) ? " MSI-X" :
2234 (adap->flags & USING_MSI) ? " MSI" : "");
2235 if (adap->name == dev->name && adap->params.vpd.mclk)
2236 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2237 adap->name, t3_mc7_size(&adap->cm) >> 20,
2238 t3_mc7_size(&adap->pmtx) >> 20,
2239 t3_mc7_size(&adap->pmrx) >> 20);
2240 }
2241}
2242
2243static int __devinit init_one(struct pci_dev *pdev,
2244 const struct pci_device_id *ent)
2245{
2246 static int version_printed;
2247
2248 int i, err, pci_using_dac = 0;
2249 unsigned long mmio_start, mmio_len;
2250 const struct adapter_info *ai;
2251 struct adapter *adapter = NULL;
2252 struct port_info *pi;
2253
2254 if (!version_printed) {
2255 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2256 ++version_printed;
2257 }
2258
2259 if (!cxgb3_wq) {
2260 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2261 if (!cxgb3_wq) {
2262 printk(KERN_ERR DRV_NAME
2263 ": cannot initialize work queue\n");
2264 return -ENOMEM;
2265 }
2266 }
2267
2268 err = pci_request_regions(pdev, DRV_NAME);
2269 if (err) {
2270 /* Just info, some other driver may have claimed the device. */
2271 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2272 return err;
2273 }
2274
2275 err = pci_enable_device(pdev);
2276 if (err) {
2277 dev_err(&pdev->dev, "cannot enable PCI device\n");
2278 goto out_release_regions;
2279 }
2280
2281 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2282 pci_using_dac = 1;
2283 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2284 if (err) {
2285 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2286 "coherent allocations\n");
2287 goto out_disable_device;
2288 }
2289 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2290 dev_err(&pdev->dev, "no usable DMA configuration\n");
2291 goto out_disable_device;
2292 }
2293
2294 pci_set_master(pdev);
2295
2296 mmio_start = pci_resource_start(pdev, 0);
2297 mmio_len = pci_resource_len(pdev, 0);
2298 ai = t3_get_adapter_info(ent->driver_data);
2299
2300 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2301 if (!adapter) {
2302 err = -ENOMEM;
2303 goto out_disable_device;
2304 }
2305
2306 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2307 if (!adapter->regs) {
2308 dev_err(&pdev->dev, "cannot map device registers\n");
2309 err = -ENOMEM;
2310 goto out_free_adapter;
2311 }
2312
2313 adapter->pdev = pdev;
2314 adapter->name = pci_name(pdev);
2315 adapter->msg_enable = dflt_msg_enable;
2316 adapter->mmio_len = mmio_len;
2317
2318 mutex_init(&adapter->mdio_lock);
2319 spin_lock_init(&adapter->work_lock);
2320 spin_lock_init(&adapter->stats_lock);
2321
2322 INIT_LIST_HEAD(&adapter->adapter_list);
2323 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2324 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2325
2326 for (i = 0; i < ai->nports; ++i) {
2327 struct net_device *netdev;
2328
2329 netdev = alloc_etherdev(sizeof(struct port_info));
2330 if (!netdev) {
2331 err = -ENOMEM;
2332 goto out_free_dev;
2333 }
2334
2335 SET_MODULE_OWNER(netdev);
2336 SET_NETDEV_DEV(netdev, &pdev->dev);
2337
2338 adapter->port[i] = netdev;
2339 pi = netdev_priv(netdev);
2340 pi->rx_csum_offload = 1;
2341 pi->nqsets = 1;
2342 pi->first_qset = i;
2343 pi->activity = 0;
2344 pi->port_id = i;
2345 netif_carrier_off(netdev);
2346 netdev->irq = pdev->irq;
2347 netdev->mem_start = mmio_start;
2348 netdev->mem_end = mmio_start + mmio_len - 1;
2349 netdev->priv = adapter;
2350 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2351 netdev->features |= NETIF_F_LLTX;
2352 if (pci_using_dac)
2353 netdev->features |= NETIF_F_HIGHDMA;
2354
2355 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2356 netdev->vlan_rx_register = vlan_rx_register;
2357 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2358
2359 netdev->open = cxgb_open;
2360 netdev->stop = cxgb_close;
2361 netdev->hard_start_xmit = t3_eth_xmit;
2362 netdev->get_stats = cxgb_get_stats;
2363 netdev->set_multicast_list = cxgb_set_rxmode;
2364 netdev->do_ioctl = cxgb_ioctl;
2365 netdev->change_mtu = cxgb_change_mtu;
2366 netdev->set_mac_address = cxgb_set_mac_addr;
2367#ifdef CONFIG_NET_POLL_CONTROLLER
2368 netdev->poll_controller = cxgb_netpoll;
2369#endif
2370 netdev->weight = 64;
2371
2372 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2373 }
2374
2375 pci_set_drvdata(pdev, adapter->port[0]);
2376 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2377 err = -ENODEV;
2378 goto out_free_dev;
2379 }
2380
2381 /*
2382 * The card is now ready to go. If any errors occur during device
2383 * registration we do not fail the whole card but rather proceed only
2384 * with the ports we manage to register successfully. However we must
2385 * register at least one net device.
2386 */
2387 for_each_port(adapter, i) {
2388 err = register_netdev(adapter->port[i]);
2389 if (err)
2390 dev_warn(&pdev->dev,
2391 "cannot register net device %s, skipping\n",
2392 adapter->port[i]->name);
2393 else {
2394 /*
2395 * Change the name we use for messages to the name of
2396 * the first successfully registered interface.
2397 */
2398 if (!adapter->registered_device_map)
2399 adapter->name = adapter->port[i]->name;
2400
2401 __set_bit(i, &adapter->registered_device_map);
2402 }
2403 }
2404 if (!adapter->registered_device_map) {
2405 dev_err(&pdev->dev, "could not register any net devices\n");
2406 goto out_free_dev;
2407 }
2408
2409 /* Driver's ready. Reflect it on LEDs */
2410 t3_led_ready(adapter);
2411
2412 if (is_offload(adapter)) {
2413 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2414 cxgb3_adapter_ofld(adapter);
2415 }
2416
2417 /* See what interrupts we'll be using */
2418 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2419 adapter->flags |= USING_MSIX;
2420 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2421 adapter->flags |= USING_MSI;
2422
2423 err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2424 &cxgb3_attr_group);
2425
2426 print_port_info(adapter, ai);
2427 return 0;
2428
2429out_free_dev:
2430 iounmap(adapter->regs);
2431 for (i = ai->nports - 1; i >= 0; --i)
2432 if (adapter->port[i])
2433 free_netdev(adapter->port[i]);
2434
2435out_free_adapter:
2436 kfree(adapter);
2437
2438out_disable_device:
2439 pci_disable_device(pdev);
2440out_release_regions:
2441 pci_release_regions(pdev);
2442 pci_set_drvdata(pdev, NULL);
2443 return err;
2444}
2445
2446static void __devexit remove_one(struct pci_dev *pdev)
2447{
2448 struct net_device *dev = pci_get_drvdata(pdev);
2449
2450 if (dev) {
2451 int i;
2452 struct adapter *adapter = dev->priv;
2453
2454 t3_sge_stop(adapter);
2455 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2456 &cxgb3_attr_group);
2457
2458 for_each_port(adapter, i)
2459 if (test_bit(i, &adapter->registered_device_map))
2460 unregister_netdev(adapter->port[i]);
2461
2462 if (is_offload(adapter)) {
2463 cxgb3_adapter_unofld(adapter);
2464 if (test_bit(OFFLOAD_DEVMAP_BIT,
2465 &adapter->open_device_map))
2466 offload_close(&adapter->tdev);
2467 }
2468
2469 t3_free_sge_resources(adapter);
2470 cxgb_disable_msi(adapter);
2471
2472 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2473 if (adapter->dummy_netdev[i]) {
2474 free_netdev(adapter->dummy_netdev[i]);
2475 adapter->dummy_netdev[i] = NULL;
2476 }
2477
2478 for_each_port(adapter, i)
2479 if (adapter->port[i])
2480 free_netdev(adapter->port[i]);
2481
2482 iounmap(adapter->regs);
2483 kfree(adapter);
2484 pci_release_regions(pdev);
2485 pci_disable_device(pdev);
2486 pci_set_drvdata(pdev, NULL);
2487 }
2488}
2489
2490static struct pci_driver driver = {
2491 .name = DRV_NAME,
2492 .id_table = cxgb3_pci_tbl,
2493 .probe = init_one,
2494 .remove = __devexit_p(remove_one),
2495};
2496
2497static int __init cxgb3_init_module(void)
2498{
2499 int ret;
2500
2501 cxgb3_offload_init();
2502
2503 ret = pci_register_driver(&driver);
2504 return ret;
2505}
2506
2507static void __exit cxgb3_cleanup_module(void)
2508{
2509 pci_unregister_driver(&driver);
2510 if (cxgb3_wq)
2511 destroy_workqueue(cxgb3_wq);
2512}
2513
2514module_init(cxgb3_init_module);
2515module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
new file mode 100644
index 000000000000..c3a02d613382
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1222 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/list.h>
35#include <net/neighbour.h>
36#include <linux/notifier.h>
37#include <asm/atomic.h>
38#include <linux/proc_fs.h>
39#include <linux/if_vlan.h>
40#include <net/netevent.h>
41#include <linux/highmem.h>
42#include <linux/vmalloc.h>
43
44#include "common.h"
45#include "regs.h"
46#include "cxgb3_ioctl.h"
47#include "cxgb3_ctl_defs.h"
48#include "cxgb3_defs.h"
49#include "l2t.h"
50#include "firmware_exports.h"
51#include "cxgb3_offload.h"
52
53static LIST_HEAD(client_list);
54static LIST_HEAD(ofld_dev_list);
55static DEFINE_MUTEX(cxgb3_db_lock);
56
57static DEFINE_RWLOCK(adapter_list_lock);
58static LIST_HEAD(adapter_list);
59
60static const unsigned int MAX_ATIDS = 64 * 1024;
61static const unsigned int ATID_BASE = 0x100000;
62
63static inline int offload_activated(struct t3cdev *tdev)
64{
65 const struct adapter *adapter = tdev2adap(tdev);
66
67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
68}
69
70/**
71 * cxgb3_register_client - register an offload client
72 * @client: the client
73 *
74 * Add the client to the client list,
75 * and call backs the client for each activated offload device
76 */
77void cxgb3_register_client(struct cxgb3_client *client)
78{
79 struct t3cdev *tdev;
80
81 mutex_lock(&cxgb3_db_lock);
82 list_add_tail(&client->client_list, &client_list);
83
84 if (client->add) {
85 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
86 if (offload_activated(tdev))
87 client->add(tdev);
88 }
89 }
90 mutex_unlock(&cxgb3_db_lock);
91}
92
93EXPORT_SYMBOL(cxgb3_register_client);
94
95/**
96 * cxgb3_unregister_client - unregister an offload client
97 * @client: the client
98 *
99 * Remove the client to the client list,
100 * and call backs the client for each activated offload device.
101 */
102void cxgb3_unregister_client(struct cxgb3_client *client)
103{
104 struct t3cdev *tdev;
105
106 mutex_lock(&cxgb3_db_lock);
107 list_del(&client->client_list);
108
109 if (client->remove) {
110 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
111 if (offload_activated(tdev))
112 client->remove(tdev);
113 }
114 }
115 mutex_unlock(&cxgb3_db_lock);
116}
117
118EXPORT_SYMBOL(cxgb3_unregister_client);
119
120/**
121 * cxgb3_add_clients - activate registered clients for an offload device
122 * @tdev: the offload device
123 *
124 * Call backs all registered clients once a offload device is activated
125 */
126void cxgb3_add_clients(struct t3cdev *tdev)
127{
128 struct cxgb3_client *client;
129
130 mutex_lock(&cxgb3_db_lock);
131 list_for_each_entry(client, &client_list, client_list) {
132 if (client->add)
133 client->add(tdev);
134 }
135 mutex_unlock(&cxgb3_db_lock);
136}
137
138/**
139 * cxgb3_remove_clients - deactivates registered clients
140 * for an offload device
141 * @tdev: the offload device
142 *
143 * Call backs all registered clients once a offload device is deactivated
144 */
145void cxgb3_remove_clients(struct t3cdev *tdev)
146{
147 struct cxgb3_client *client;
148
149 mutex_lock(&cxgb3_db_lock);
150 list_for_each_entry(client, &client_list, client_list) {
151 if (client->remove)
152 client->remove(tdev);
153 }
154 mutex_unlock(&cxgb3_db_lock);
155}
156
157static struct net_device *get_iff_from_mac(struct adapter *adapter,
158 const unsigned char *mac,
159 unsigned int vlan)
160{
161 int i;
162
163 for_each_port(adapter, i) {
164 const struct vlan_group *grp;
165 struct net_device *dev = adapter->port[i];
166 const struct port_info *p = netdev_priv(dev);
167
168 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
169 if (vlan && vlan != VLAN_VID_MASK) {
170 grp = p->vlan_grp;
171 dev = grp ? grp->vlan_devices[vlan] : NULL;
172 } else
173 while (dev->master)
174 dev = dev->master;
175 return dev;
176 }
177 }
178 return NULL;
179}
180
181static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
182 void *data)
183{
184 int ret = 0;
185 struct ulp_iscsi_info *uiip = data;
186
187 switch (req) {
188 case ULP_ISCSI_GET_PARAMS:
189 uiip->pdev = adapter->pdev;
190 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
191 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
192 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
193 /*
194 * On tx, the iscsi pdu has to be <= tx page size and has to
195 * fit into the Tx PM FIFO.
196 */
197 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
198 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
199 /* on rx, the iscsi pdu has to be < rx page size and the
200 whole pdu + cpl headers has to fit into one sge buffer */
201 uiip->max_rxsz = min_t(unsigned int,
202 adapter->params.tp.rx_pg_size,
203 (adapter->sge.qs[0].fl[1].buf_size -
204 sizeof(struct cpl_rx_data) * 2 -
205 sizeof(struct cpl_rx_data_ddp)));
206 break;
207 case ULP_ISCSI_SET_PARAMS:
208 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
209 break;
210 default:
211 ret = -EOPNOTSUPP;
212 }
213 return ret;
214}
215
216/* Response queue used for RDMA events. */
217#define ASYNC_NOTIF_RSPQ 0
218
219static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
220{
221 int ret = 0;
222
223 switch (req) {
224 case RDMA_GET_PARAMS:{
225 struct rdma_info *req = data;
226 struct pci_dev *pdev = adapter->pdev;
227
228 req->udbell_physbase = pci_resource_start(pdev, 2);
229 req->udbell_len = pci_resource_len(pdev, 2);
230 req->tpt_base =
231 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
232 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
233 req->pbl_base =
234 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
235 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
236 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
237 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
238 req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
239 req->pdev = pdev;
240 break;
241 }
242 case RDMA_CQ_OP:{
243 unsigned long flags;
244 struct rdma_cq_op *req = data;
245
246 /* may be called in any context */
247 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
248 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
249 req->credits);
250 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
251 break;
252 }
253 case RDMA_GET_MEM:{
254 struct ch_mem_range *t = data;
255 struct mc7 *mem;
256
257 if ((t->addr & 7) || (t->len & 7))
258 return -EINVAL;
259 if (t->mem_id == MEM_CM)
260 mem = &adapter->cm;
261 else if (t->mem_id == MEM_PMRX)
262 mem = &adapter->pmrx;
263 else if (t->mem_id == MEM_PMTX)
264 mem = &adapter->pmtx;
265 else
266 return -EINVAL;
267
268 ret =
269 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
270 (u64 *) t->buf);
271 if (ret)
272 return ret;
273 break;
274 }
275 case RDMA_CQ_SETUP:{
276 struct rdma_cq_setup *req = data;
277
278 spin_lock_irq(&adapter->sge.reg_lock);
279 ret =
280 t3_sge_init_cqcntxt(adapter, req->id,
281 req->base_addr, req->size,
282 ASYNC_NOTIF_RSPQ,
283 req->ovfl_mode, req->credits,
284 req->credit_thres);
285 spin_unlock_irq(&adapter->sge.reg_lock);
286 break;
287 }
288 case RDMA_CQ_DISABLE:
289 spin_lock_irq(&adapter->sge.reg_lock);
290 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
291 spin_unlock_irq(&adapter->sge.reg_lock);
292 break;
293 case RDMA_CTRL_QP_SETUP:{
294 struct rdma_ctrlqp_setup *req = data;
295
296 spin_lock_irq(&adapter->sge.reg_lock);
297 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
298 SGE_CNTXT_RDMA,
299 ASYNC_NOTIF_RSPQ,
300 req->base_addr, req->size,
301 FW_RI_TID_START, 1, 0);
302 spin_unlock_irq(&adapter->sge.reg_lock);
303 break;
304 }
305 default:
306 ret = -EOPNOTSUPP;
307 }
308 return ret;
309}
310
311static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
312{
313 struct adapter *adapter = tdev2adap(tdev);
314 struct tid_range *tid;
315 struct mtutab *mtup;
316 struct iff_mac *iffmacp;
317 struct ddp_params *ddpp;
318 struct adap_ports *ports;
319 int i;
320
321 switch (req) {
322 case GET_MAX_OUTSTANDING_WR:
323 *(unsigned int *)data = FW_WR_NUM;
324 break;
325 case GET_WR_LEN:
326 *(unsigned int *)data = WR_FLITS;
327 break;
328 case GET_TX_MAX_CHUNK:
329 *(unsigned int *)data = 1 << 20; /* 1MB */
330 break;
331 case GET_TID_RANGE:
332 tid = data;
333 tid->num = t3_mc5_size(&adapter->mc5) -
334 adapter->params.mc5.nroutes -
335 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
336 tid->base = 0;
337 break;
338 case GET_STID_RANGE:
339 tid = data;
340 tid->num = adapter->params.mc5.nservers;
341 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
342 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
343 break;
344 case GET_L2T_CAPACITY:
345 *(unsigned int *)data = 2048;
346 break;
347 case GET_MTUS:
348 mtup = data;
349 mtup->size = NMTUS;
350 mtup->mtus = adapter->params.mtus;
351 break;
352 case GET_IFF_FROM_MAC:
353 iffmacp = data;
354 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
355 iffmacp->vlan_tag &
356 VLAN_VID_MASK);
357 break;
358 case GET_DDP_PARAMS:
359 ddpp = data;
360 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
361 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
362 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
363 break;
364 case GET_PORTS:
365 ports = data;
366 ports->nports = adapter->params.nports;
367 for_each_port(adapter, i)
368 ports->lldevs[i] = adapter->port[i];
369 break;
370 case ULP_ISCSI_GET_PARAMS:
371 case ULP_ISCSI_SET_PARAMS:
372 if (!offload_running(adapter))
373 return -EAGAIN;
374 return cxgb_ulp_iscsi_ctl(adapter, req, data);
375 case RDMA_GET_PARAMS:
376 case RDMA_CQ_OP:
377 case RDMA_CQ_SETUP:
378 case RDMA_CQ_DISABLE:
379 case RDMA_CTRL_QP_SETUP:
380 case RDMA_GET_MEM:
381 if (!offload_running(adapter))
382 return -EAGAIN;
383 return cxgb_rdma_ctl(adapter, req, data);
384 default:
385 return -EOPNOTSUPP;
386 }
387 return 0;
388}
389
390/*
391 * Dummy handler for Rx offload packets in case we get an offload packet before
392 * proper processing is setup. This complains and drops the packet as it isn't
393 * normal to get offload packets at this stage.
394 */
395static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
396 int n)
397{
398 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
399 n, ntohl(*(u32 *)skbs[0]->data));
400 while (n--)
401 dev_kfree_skb_any(skbs[n]);
402 return 0;
403}
404
405static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
406{
407}
408
409void cxgb3_set_dummy_ops(struct t3cdev *dev)
410{
411 dev->recv = rx_offload_blackhole;
412 dev->neigh_update = dummy_neigh_update;
413}
414
415/*
416 * Free an active-open TID.
417 */
418void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
419{
420 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
421 union active_open_entry *p = atid2entry(t, atid);
422 void *ctx = p->t3c_tid.ctx;
423
424 spin_lock_bh(&t->atid_lock);
425 p->next = t->afree;
426 t->afree = p;
427 t->atids_in_use--;
428 spin_unlock_bh(&t->atid_lock);
429
430 return ctx;
431}
432
433EXPORT_SYMBOL(cxgb3_free_atid);
434
435/*
436 * Free a server TID and return it to the free pool.
437 */
438void cxgb3_free_stid(struct t3cdev *tdev, int stid)
439{
440 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
441 union listen_entry *p = stid2entry(t, stid);
442
443 spin_lock_bh(&t->stid_lock);
444 p->next = t->sfree;
445 t->sfree = p;
446 t->stids_in_use--;
447 spin_unlock_bh(&t->stid_lock);
448}
449
450EXPORT_SYMBOL(cxgb3_free_stid);
451
452void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
453 void *ctx, unsigned int tid)
454{
455 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
456
457 t->tid_tab[tid].client = client;
458 t->tid_tab[tid].ctx = ctx;
459 atomic_inc(&t->tids_in_use);
460}
461
462EXPORT_SYMBOL(cxgb3_insert_tid);
463
464/*
465 * Populate a TID_RELEASE WR. The skb must be already propely sized.
466 */
467static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
468{
469 struct cpl_tid_release *req;
470
471 skb->priority = CPL_PRIORITY_SETUP;
472 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
475}
476
477static void t3_process_tid_release_list(struct work_struct *work)
478{
479 struct t3c_data *td = container_of(work, struct t3c_data,
480 tid_release_task);
481 struct sk_buff *skb;
482 struct t3cdev *tdev = td->dev;
483
484
485 spin_lock_bh(&td->tid_release_lock);
486 while (td->tid_release_list) {
487 struct t3c_tid_entry *p = td->tid_release_list;
488
489 td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
490 spin_unlock_bh(&td->tid_release_lock);
491
492 skb = alloc_skb(sizeof(struct cpl_tid_release),
493 GFP_KERNEL | __GFP_NOFAIL);
494 mk_tid_release(skb, p - td->tid_maps.tid_tab);
495 cxgb3_ofld_send(tdev, skb);
496 p->ctx = NULL;
497 spin_lock_bh(&td->tid_release_lock);
498 }
499 spin_unlock_bh(&td->tid_release_lock);
500}
501
502/* use ctx as a next pointer in the tid release list */
503void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
504{
505 struct t3c_data *td = T3C_DATA(tdev);
506 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
507
508 spin_lock_bh(&td->tid_release_lock);
509 p->ctx = (void *)td->tid_release_list;
510 td->tid_release_list = p;
511 if (!p->ctx)
512 schedule_work(&td->tid_release_task);
513 spin_unlock_bh(&td->tid_release_lock);
514}
515
516EXPORT_SYMBOL(cxgb3_queue_tid_release);
517
518/*
519 * Remove a tid from the TID table. A client may defer processing its last
520 * CPL message if it is locked at the time it arrives, and while the message
521 * sits in the client's backlog the TID may be reused for another connection.
522 * To handle this we atomically switch the TID association if it still points
523 * to the original client context.
524 */
525void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
526{
527 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
528
529 BUG_ON(tid >= t->ntids);
530 if (tdev->type == T3A)
531 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
532 else {
533 struct sk_buff *skb;
534
535 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
536 if (likely(skb)) {
537 mk_tid_release(skb, tid);
538 cxgb3_ofld_send(tdev, skb);
539 t->tid_tab[tid].ctx = NULL;
540 } else
541 cxgb3_queue_tid_release(tdev, tid);
542 }
543 atomic_dec(&t->tids_in_use);
544}
545
546EXPORT_SYMBOL(cxgb3_remove_tid);
547
548int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
549 void *ctx)
550{
551 int atid = -1;
552 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
553
554 spin_lock_bh(&t->atid_lock);
555 if (t->afree) {
556 union active_open_entry *p = t->afree;
557
558 atid = (p - t->atid_tab) + t->atid_base;
559 t->afree = p->next;
560 p->t3c_tid.ctx = ctx;
561 p->t3c_tid.client = client;
562 t->atids_in_use++;
563 }
564 spin_unlock_bh(&t->atid_lock);
565 return atid;
566}
567
568EXPORT_SYMBOL(cxgb3_alloc_atid);
569
570int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
571 void *ctx)
572{
573 int stid = -1;
574 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
575
576 spin_lock_bh(&t->stid_lock);
577 if (t->sfree) {
578 union listen_entry *p = t->sfree;
579
580 stid = (p - t->stid_tab) + t->stid_base;
581 t->sfree = p->next;
582 p->t3c_tid.ctx = ctx;
583 p->t3c_tid.client = client;
584 t->stids_in_use++;
585 }
586 spin_unlock_bh(&t->stid_lock);
587 return stid;
588}
589
590EXPORT_SYMBOL(cxgb3_alloc_stid);
591
592static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
593{
594 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
595
596 if (rpl->status != CPL_ERR_NONE)
597 printk(KERN_ERR
598 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
599 rpl->status, GET_TID(rpl));
600
601 return CPL_RET_BUF_DONE;
602}
603
604static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
605{
606 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
607
608 if (rpl->status != CPL_ERR_NONE)
609 printk(KERN_ERR
610 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
611 rpl->status, GET_TID(rpl));
612
613 return CPL_RET_BUF_DONE;
614}
615
616static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
617{
618 struct cpl_act_open_rpl *rpl = cplhdr(skb);
619 unsigned int atid = G_TID(ntohl(rpl->atid));
620 struct t3c_tid_entry *t3c_tid;
621
622 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
623 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
624 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
625 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
626 t3c_tid->
627 ctx);
628 } else {
629 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
630 dev->name, CPL_ACT_OPEN_RPL);
631 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
632 }
633}
634
635static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
636{
637 union opcode_tid *p = cplhdr(skb);
638 unsigned int stid = G_TID(ntohl(p->opcode_tid));
639 struct t3c_tid_entry *t3c_tid;
640
641 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
642 if (t3c_tid->ctx && t3c_tid->client->handlers &&
643 t3c_tid->client->handlers[p->opcode]) {
644 return t3c_tid->client->handlers[p->opcode] (dev, skb,
645 t3c_tid->ctx);
646 } else {
647 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
648 dev->name, p->opcode);
649 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
650 }
651}
652
653static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
654{
655 union opcode_tid *p = cplhdr(skb);
656 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
657 struct t3c_tid_entry *t3c_tid;
658
659 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
660 if (t3c_tid->ctx && t3c_tid->client->handlers &&
661 t3c_tid->client->handlers[p->opcode]) {
662 return t3c_tid->client->handlers[p->opcode]
663 (dev, skb, t3c_tid->ctx);
664 } else {
665 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
666 dev->name, p->opcode);
667 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
668 }
669}
670
671static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
672{
673 struct cpl_pass_accept_req *req = cplhdr(skb);
674 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
675 struct t3c_tid_entry *t3c_tid;
676
677 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
678 if (t3c_tid->ctx && t3c_tid->client->handlers &&
679 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
680 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
681 (dev, skb, t3c_tid->ctx);
682 } else {
683 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
684 dev->name, CPL_PASS_ACCEPT_REQ);
685 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
686 }
687}
688
689static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
690{
691 union opcode_tid *p = cplhdr(skb);
692 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
693 struct t3c_tid_entry *t3c_tid;
694
695 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
696 if (t3c_tid->ctx && t3c_tid->client->handlers &&
697 t3c_tid->client->handlers[p->opcode]) {
698 return t3c_tid->client->handlers[p->opcode]
699 (dev, skb, t3c_tid->ctx);
700 } else {
701 struct cpl_abort_req_rss *req = cplhdr(skb);
702 struct cpl_abort_rpl *rpl;
703
704 struct sk_buff *skb =
705 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
706 if (!skb) {
707 printk("do_abort_req_rss: couldn't get skb!\n");
708 goto out;
709 }
710 skb->priority = CPL_PRIORITY_DATA;
711 __skb_put(skb, sizeof(struct cpl_abort_rpl));
712 rpl = cplhdr(skb);
713 rpl->wr.wr_hi =
714 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
715 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
716 OPCODE_TID(rpl) =
717 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
718 rpl->cmd = req->status;
719 cxgb3_ofld_send(dev, skb);
720out:
721 return CPL_RET_BUF_DONE;
722 }
723}
724
725static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
726{
727 struct cpl_act_establish *req = cplhdr(skb);
728 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
729 struct t3c_tid_entry *t3c_tid;
730
731 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
732 if (t3c_tid->ctx && t3c_tid->client->handlers &&
733 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
734 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
735 (dev, skb, t3c_tid->ctx);
736 } else {
737 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
738 dev->name, CPL_PASS_ACCEPT_REQ);
739 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
740 }
741}
742
743static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
744{
745 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
746
747 if (rpl->status != CPL_ERR_NONE)
748 printk(KERN_ERR
749 "Unexpected SET_TCB_RPL status %u for tid %u\n",
750 rpl->status, GET_TID(rpl));
751 return CPL_RET_BUF_DONE;
752}
753
754static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
755{
756 struct cpl_trace_pkt *p = cplhdr(skb);
757
758 skb->protocol = 0xffff;
759 skb->dev = dev->lldev;
760 skb_pull(skb, sizeof(*p));
761 skb->mac.raw = skb->data;
762 netif_receive_skb(skb);
763 return 0;
764}
765
766static int do_term(struct t3cdev *dev, struct sk_buff *skb)
767{
768 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
769 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
770 struct t3c_tid_entry *t3c_tid;
771
772 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
773 if (t3c_tid->ctx && t3c_tid->client->handlers &&
774 t3c_tid->client->handlers[opcode]) {
775 return t3c_tid->client->handlers[opcode] (dev, skb,
776 t3c_tid->ctx);
777 } else {
778 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
779 dev->name, opcode);
780 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
781 }
782}
783
784static int nb_callback(struct notifier_block *self, unsigned long event,
785 void *ctx)
786{
787 switch (event) {
788 case (NETEVENT_NEIGH_UPDATE):{
789 cxgb_neigh_update((struct neighbour *)ctx);
790 break;
791 }
792 case (NETEVENT_PMTU_UPDATE):
793 break;
794 case (NETEVENT_REDIRECT):{
795 struct netevent_redirect *nr = ctx;
796 cxgb_redirect(nr->old, nr->new);
797 cxgb_neigh_update(nr->new->neighbour);
798 break;
799 }
800 default:
801 break;
802 }
803 return 0;
804}
805
806static struct notifier_block nb = {
807 .notifier_call = nb_callback
808};
809
810/*
811 * Process a received packet with an unknown/unexpected CPL opcode.
812 */
813static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
814{
815 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
816 *skb->data);
817 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
818}
819
820/*
821 * Handlers for each CPL opcode
822 */
823static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
824
825/*
826 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
827 * to unregister an existing handler.
828 */
829void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
830{
831 if (opcode < NUM_CPL_CMDS)
832 cpl_handlers[opcode] = h ? h : do_bad_cpl;
833 else
834 printk(KERN_ERR "T3C: handler registration for "
835 "opcode %x failed\n", opcode);
836}
837
838EXPORT_SYMBOL(t3_register_cpl_handler);
839
840/*
841 * T3CDEV's receive method.
842 */
843int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
844{
845 while (n--) {
846 struct sk_buff *skb = *skbs++;
847 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
848 int ret = cpl_handlers[opcode] (dev, skb);
849
850#if VALIDATE_TID
851 if (ret & CPL_RET_UNKNOWN_TID) {
852 union opcode_tid *p = cplhdr(skb);
853
854 printk(KERN_ERR "%s: CPL message (opcode %u) had "
855 "unknown TID %u\n", dev->name, opcode,
856 G_TID(ntohl(p->opcode_tid)));
857 }
858#endif
859 if (ret & CPL_RET_BUF_DONE)
860 kfree_skb(skb);
861 }
862 return 0;
863}
864
865/*
866 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
867 */
868int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
869{
870 int r;
871
872 local_bh_disable();
873 r = dev->send(dev, skb);
874 local_bh_enable();
875 return r;
876}
877
878EXPORT_SYMBOL(cxgb3_ofld_send);
879
880static int is_offloading(struct net_device *dev)
881{
882 struct adapter *adapter;
883 int i;
884
885 read_lock_bh(&adapter_list_lock);
886 list_for_each_entry(adapter, &adapter_list, adapter_list) {
887 for_each_port(adapter, i) {
888 if (dev == adapter->port[i]) {
889 read_unlock_bh(&adapter_list_lock);
890 return 1;
891 }
892 }
893 }
894 read_unlock_bh(&adapter_list_lock);
895 return 0;
896}
897
898void cxgb_neigh_update(struct neighbour *neigh)
899{
900 struct net_device *dev = neigh->dev;
901
902 if (dev && (is_offloading(dev))) {
903 struct t3cdev *tdev = T3CDEV(dev);
904
905 BUG_ON(!tdev);
906 t3_l2t_update(tdev, neigh);
907 }
908}
909
910static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
911{
912 struct sk_buff *skb;
913 struct cpl_set_tcb_field *req;
914
915 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
916 if (!skb) {
917 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
918 return;
919 }
920 skb->priority = CPL_PRIORITY_CONTROL;
921 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
922 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
923 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
924 req->reply = 0;
925 req->cpu_idx = 0;
926 req->word = htons(W_TCB_L2T_IX);
927 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
928 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
929 tdev->send(tdev, skb);
930}
931
932void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
933{
934 struct net_device *olddev, *newdev;
935 struct tid_info *ti;
936 struct t3cdev *tdev;
937 u32 tid;
938 int update_tcb;
939 struct l2t_entry *e;
940 struct t3c_tid_entry *te;
941
942 olddev = old->neighbour->dev;
943 newdev = new->neighbour->dev;
944 if (!is_offloading(olddev))
945 return;
946 if (!is_offloading(newdev)) {
947 printk(KERN_WARNING "%s: Redirect to non-offload"
948 "device ignored.\n", __FUNCTION__);
949 return;
950 }
951 tdev = T3CDEV(olddev);
952 BUG_ON(!tdev);
953 if (tdev != T3CDEV(newdev)) {
954 printk(KERN_WARNING "%s: Redirect to different "
955 "offload device ignored.\n", __FUNCTION__);
956 return;
957 }
958
959 /* Add new L2T entry */
960 e = t3_l2t_get(tdev, new->neighbour, newdev);
961 if (!e) {
962 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
963 __FUNCTION__);
964 return;
965 }
966
967 /* Walk tid table and notify clients of dst change. */
968 ti = &(T3C_DATA(tdev))->tid_maps;
969 for (tid = 0; tid < ti->ntids; tid++) {
970 te = lookup_tid(ti, tid);
971 BUG_ON(!te);
972 if (te->ctx && te->client && te->client->redirect) {
973 update_tcb = te->client->redirect(te->ctx, old, new, e);
974 if (update_tcb) {
975 l2t_hold(L2DATA(tdev), e);
976 set_l2t_ix(tdev, tid, e);
977 }
978 }
979 }
980 l2t_release(L2DATA(tdev), e);
981}
982
983/*
984 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
985 * The allocated memory is cleared.
986 */
987void *cxgb_alloc_mem(unsigned long size)
988{
989 void *p = kmalloc(size, GFP_KERNEL);
990
991 if (!p)
992 p = vmalloc(size);
993 if (p)
994 memset(p, 0, size);
995 return p;
996}
997
998/*
999 * Free memory allocated through t3_alloc_mem().
1000 */
1001void cxgb_free_mem(void *addr)
1002{
1003 unsigned long p = (unsigned long)addr;
1004
1005 if (p >= VMALLOC_START && p < VMALLOC_END)
1006 vfree(addr);
1007 else
1008 kfree(addr);
1009}
1010
1011/*
1012 * Allocate and initialize the TID tables. Returns 0 on success.
1013 */
1014static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1015 unsigned int natids, unsigned int nstids,
1016 unsigned int atid_base, unsigned int stid_base)
1017{
1018 unsigned long size = ntids * sizeof(*t->tid_tab) +
1019 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1020
1021 t->tid_tab = cxgb_alloc_mem(size);
1022 if (!t->tid_tab)
1023 return -ENOMEM;
1024
1025 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1026 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1027 t->ntids = ntids;
1028 t->nstids = nstids;
1029 t->stid_base = stid_base;
1030 t->sfree = NULL;
1031 t->natids = natids;
1032 t->atid_base = atid_base;
1033 t->afree = NULL;
1034 t->stids_in_use = t->atids_in_use = 0;
1035 atomic_set(&t->tids_in_use, 0);
1036 spin_lock_init(&t->stid_lock);
1037 spin_lock_init(&t->atid_lock);
1038
1039 /*
1040 * Setup the free lists for stid_tab and atid_tab.
1041 */
1042 if (nstids) {
1043 while (--nstids)
1044 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1045 t->sfree = t->stid_tab;
1046 }
1047 if (natids) {
1048 while (--natids)
1049 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1050 t->afree = t->atid_tab;
1051 }
1052 return 0;
1053}
1054
1055static void free_tid_maps(struct tid_info *t)
1056{
1057 cxgb_free_mem(t->tid_tab);
1058}
1059
1060static inline void add_adapter(struct adapter *adap)
1061{
1062 write_lock_bh(&adapter_list_lock);
1063 list_add_tail(&adap->adapter_list, &adapter_list);
1064 write_unlock_bh(&adapter_list_lock);
1065}
1066
1067static inline void remove_adapter(struct adapter *adap)
1068{
1069 write_lock_bh(&adapter_list_lock);
1070 list_del(&adap->adapter_list);
1071 write_unlock_bh(&adapter_list_lock);
1072}
1073
1074int cxgb3_offload_activate(struct adapter *adapter)
1075{
1076 struct t3cdev *dev = &adapter->tdev;
1077 int natids, err;
1078 struct t3c_data *t;
1079 struct tid_range stid_range, tid_range;
1080 struct mtutab mtutab;
1081 unsigned int l2t_capacity;
1082
1083 t = kcalloc(1, sizeof(*t), GFP_KERNEL);
1084 if (!t)
1085 return -ENOMEM;
1086
1087 err = -EOPNOTSUPP;
1088 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1089 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1090 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1091 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1092 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1093 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1094 goto out_free;
1095
1096 err = -ENOMEM;
1097 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1098 if (!L2DATA(dev))
1099 goto out_free;
1100
1101 natids = min(tid_range.num / 2, MAX_ATIDS);
1102 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1103 stid_range.num, ATID_BASE, stid_range.base);
1104 if (err)
1105 goto out_free_l2t;
1106
1107 t->mtus = mtutab.mtus;
1108 t->nmtus = mtutab.size;
1109
1110 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1111 spin_lock_init(&t->tid_release_lock);
1112 INIT_LIST_HEAD(&t->list_node);
1113 t->dev = dev;
1114
1115 T3C_DATA(dev) = t;
1116 dev->recv = process_rx;
1117 dev->neigh_update = t3_l2t_update;
1118
1119 /* Register netevent handler once */
1120 if (list_empty(&adapter_list))
1121 register_netevent_notifier(&nb);
1122
1123 add_adapter(adapter);
1124 return 0;
1125
1126out_free_l2t:
1127 t3_free_l2t(L2DATA(dev));
1128 L2DATA(dev) = NULL;
1129out_free:
1130 kfree(t);
1131 return err;
1132}
1133
1134void cxgb3_offload_deactivate(struct adapter *adapter)
1135{
1136 struct t3cdev *tdev = &adapter->tdev;
1137 struct t3c_data *t = T3C_DATA(tdev);
1138
1139 remove_adapter(adapter);
1140 if (list_empty(&adapter_list))
1141 unregister_netevent_notifier(&nb);
1142
1143 free_tid_maps(&t->tid_maps);
1144 T3C_DATA(tdev) = NULL;
1145 t3_free_l2t(L2DATA(tdev));
1146 L2DATA(tdev) = NULL;
1147 kfree(t);
1148}
1149
1150static inline void register_tdev(struct t3cdev *tdev)
1151{
1152 static int unit;
1153
1154 mutex_lock(&cxgb3_db_lock);
1155 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1156 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1157 mutex_unlock(&cxgb3_db_lock);
1158}
1159
1160static inline void unregister_tdev(struct t3cdev *tdev)
1161{
1162 mutex_lock(&cxgb3_db_lock);
1163 list_del(&tdev->ofld_dev_list);
1164 mutex_unlock(&cxgb3_db_lock);
1165}
1166
1167void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1168{
1169 struct t3cdev *tdev = &adapter->tdev;
1170
1171 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1172
1173 cxgb3_set_dummy_ops(tdev);
1174 tdev->send = t3_offload_tx;
1175 tdev->ctl = cxgb_offload_ctl;
1176 tdev->type = adapter->params.rev == 0 ? T3A : T3B;
1177
1178 register_tdev(tdev);
1179}
1180
1181void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1182{
1183 struct t3cdev *tdev = &adapter->tdev;
1184
1185 tdev->recv = NULL;
1186 tdev->neigh_update = NULL;
1187
1188 unregister_tdev(tdev);
1189}
1190
1191void __init cxgb3_offload_init(void)
1192{
1193 int i;
1194
1195 for (i = 0; i < NUM_CPL_CMDS; ++i)
1196 cpl_handlers[i] = do_bad_cpl;
1197
1198 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1199 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1200 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1201 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1202 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1203 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1204 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1205 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1206 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1207 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1208 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1209 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1210 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1211 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1212 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1213 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1214 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1215 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1216 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1217 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1218 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1219 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1220 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1221 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1222}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
new file mode 100644
index 000000000000..0e6beb69ba17
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CXGB3_OFFLOAD_H
34#define _CXGB3_OFFLOAD_H
35
36#include <linux/list.h>
37#include <linux/skbuff.h>
38
39#include "l2t.h"
40
41#include "t3cdev.h"
42#include "t3_cpl.h"
43
44struct adapter;
45
46void cxgb3_offload_init(void);
47
48void cxgb3_adapter_ofld(struct adapter *adapter);
49void cxgb3_adapter_unofld(struct adapter *adapter);
50int cxgb3_offload_activate(struct adapter *adapter);
51void cxgb3_offload_deactivate(struct adapter *adapter);
52
53void cxgb3_set_dummy_ops(struct t3cdev *dev);
54
55/*
56 * Client registration. Users of T3 driver must register themselves.
57 * The T3 driver will call the add function of every client for each T3
58 * adapter activated, passing up the t3cdev ptr. Each client fills out an
59 * array of callback functions to process CPL messages.
60 */
61
62void cxgb3_register_client(struct cxgb3_client *client);
63void cxgb3_unregister_client(struct cxgb3_client *client);
64void cxgb3_add_clients(struct t3cdev *tdev);
65void cxgb3_remove_clients(struct t3cdev *tdev);
66
67typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
68 struct sk_buff *skb, void *ctx);
69
70struct cxgb3_client {
71 char *name;
72 void (*add) (struct t3cdev *);
73 void (*remove) (struct t3cdev *);
74 cxgb3_cpl_handler_func *handlers;
75 int (*redirect)(void *ctx, struct dst_entry *old,
76 struct dst_entry *new, struct l2t_entry *l2t);
77 struct list_head client_list;
78};
79
80/*
81 * TID allocation services.
82 */
83int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
84 void *ctx);
85int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
86 void *ctx);
87void *cxgb3_free_atid(struct t3cdev *dev, int atid);
88void cxgb3_free_stid(struct t3cdev *dev, int stid);
89void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
90 void *ctx, unsigned int tid);
91void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
92void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
93
94struct t3c_tid_entry {
95 struct cxgb3_client *client;
96 void *ctx;
97};
98
99/* CPL message priority levels */
100enum {
101 CPL_PRIORITY_DATA = 0, /* data messages */
102 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
103 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
104 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
105 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
106 CPL_PRIORITY_CONTROL = 1 /* offload control messages */
107};
108
109/* Flags for return value of CPL message handlers */
110enum {
111 CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
112 CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
113 CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
114};
115
116typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
117
118/*
119 * Returns a pointer to the first byte of the CPL header in an sk_buff that
120 * contains a CPL message.
121 */
122static inline void *cplhdr(struct sk_buff *skb)
123{
124 return skb->data;
125}
126
127void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
128
129union listen_entry {
130 struct t3c_tid_entry t3c_tid;
131 union listen_entry *next;
132};
133
134union active_open_entry {
135 struct t3c_tid_entry t3c_tid;
136 union active_open_entry *next;
137};
138
139/*
140 * Holds the size, base address, free list start, etc of the TID, server TID,
141 * and active-open TID tables for a offload device.
142 * The tables themselves are allocated dynamically.
143 */
144struct tid_info {
145 struct t3c_tid_entry *tid_tab;
146 unsigned int ntids;
147 atomic_t tids_in_use;
148
149 union listen_entry *stid_tab;
150 unsigned int nstids;
151 unsigned int stid_base;
152
153 union active_open_entry *atid_tab;
154 unsigned int natids;
155 unsigned int atid_base;
156
157 /*
158 * The following members are accessed R/W so we put them in their own
159 * cache lines.
160 *
161 * XXX We could combine the atid fields above with the lock here since
162 * atids are use once (unlike other tids). OTOH the above fields are
163 * usually in cache due to tid_tab.
164 */
165 spinlock_t atid_lock ____cacheline_aligned_in_smp;
166 union active_open_entry *afree;
167 unsigned int atids_in_use;
168
169 spinlock_t stid_lock ____cacheline_aligned;
170 union listen_entry *sfree;
171 unsigned int stids_in_use;
172};
173
174struct t3c_data {
175 struct list_head list_node;
176 struct t3cdev *dev;
177 unsigned int tx_max_chunk; /* max payload for TX_DATA */
178 unsigned int max_wrs; /* max in-flight WRs per connection */
179 unsigned int nmtus;
180 const unsigned short *mtus;
181 struct tid_info tid_maps;
182
183 struct t3c_tid_entry *tid_release_list;
184 spinlock_t tid_release_lock;
185 struct work_struct tid_release_task;
186};
187
188/*
189 * t3cdev -> t3c_data accessor
190 */
191#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
192
193#endif
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h
new file mode 100644
index 000000000000..6a835f6a262a
--- /dev/null
+++ b/drivers/net/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _FIRMWARE_EXPORTS_H_
33#define _FIRMWARE_EXPORTS_H_
34
35/* WR OPCODES supported by the firmware.
36 */
37#define FW_WROPCODE_FORWARD 0x01
38#define FW_WROPCODE_BYPASS 0x05
39
40#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
41
42#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
43#define FW_WROPCODE_ULPTX_MEM_READ 0x02
44#define FW_WROPCODE_ULPTX_PKT 0x04
45#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
46
47#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
48
49#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
50#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
51#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
52#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
53#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
54#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
55#define FW_WROPCODE_OFLD_TX_DATA 0x0D
56#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
57
58#define FW_WROPCODE_RI_RDMA_INIT 0x10
59#define FW_WROPCODE_RI_RDMA_WRITE 0x11
60#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
61#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
62#define FW_WROPCODE_RI_SEND 0x14
63#define FW_WROPCODE_RI_TERMINATE 0x15
64#define FW_WROPCODE_RI_RDMA_READ 0x16
65#define FW_WROPCODE_RI_RECEIVE 0x17
66#define FW_WROPCODE_RI_BIND_MW 0x18
67#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
68#define FW_WROPCODE_RI_LOCAL_INV 0x1A
69#define FW_WROPCODE_RI_MODIFY_QP 0x1B
70#define FW_WROPCODE_RI_BYPASS 0x1C
71
72#define FW_WROPOCDE_RSVD 0x1E
73
74#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
75
76#define FW_WROPCODE_MNGT 0x1D
77#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
78
79/* Maximum size of a WR sent from the host, limited by the SGE.
80 *
81 * Note: WR coming from ULP or TP are only limited by CIM.
82 */
83#define FW_WR_SIZE 128
84
85/* Maximum number of outstanding WRs sent from the host. Value must be
86 * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
87 * offload modules to limit the number of WRs per connection.
88 */
89#define FW_T3_WR_NUM 16
90#define FW_N3_WR_NUM 7
91
92#ifndef N3
93# define FW_WR_NUM FW_T3_WR_NUM
94#else
95# define FW_WR_NUM FW_N3_WR_NUM
96#endif
97
98/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
99 * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
100 * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
101 *
102 * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
103 * to RESP Queue[i].
104 */
105#define FW_TUNNEL_NUM 8
106#define FW_TUNNEL_SGEEC_START 8
107#define FW_TUNNEL_TID_START 65544
108
109/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
110 * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
111 * (or 'uP Token') FW_CTRL_TID_START.
112 *
113 * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
114 */
115#define FW_CTRL_NUM 8
116#define FW_CTRL_SGEEC_START 65528
117#define FW_CTRL_TID_START 65536
118
119/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
120 * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
121 *
122 * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
123 * OFFLOAD Queues, as the host is responsible for providing the correct TID in
124 * every WR.
125 *
126 * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
127 */
128#define FW_OFLD_NUM 8
129#define FW_OFLD_SGEEC_START 0
130
131/*
132 *
133 */
134#define FW_RI_NUM 1
135#define FW_RI_SGEEC_START 65527
136#define FW_RI_TID_START 65552
137
138/*
139 * The RX_PKT_TID
140 */
141#define FW_RX_PKT_NUM 1
142#define FW_RX_PKT_TID_START 65553
143
144/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
145 * by the firmware.
146 */
147#define FW_WRC_NUM \
148 (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
149
150/*
151 * FW type and version.
152 */
153#define S_FW_VERSION_TYPE 28
154#define M_FW_VERSION_TYPE 0xF
155#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
156#define G_FW_VERSION_TYPE(x) \
157 (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
158
159#define S_FW_VERSION_MAJOR 16
160#define M_FW_VERSION_MAJOR 0xFFF
161#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
162#define G_FW_VERSION_MAJOR(x) \
163 (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
164
165#define S_FW_VERSION_MINOR 8
166#define M_FW_VERSION_MINOR 0xFF
167#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
168#define G_FW_VERSION_MINOR(x) \
169 (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
170
171#define S_FW_VERSION_MICRO 0
172#define M_FW_VERSION_MICRO 0xFF
173#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
174#define G_FW_VERSION_MICRO(x) \
175 (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
176
177#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
new file mode 100644
index 000000000000..3c0cb8557058
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/if.h>
36#include <linux/if_vlan.h>
37#include <linux/jhash.h>
38#include <net/neighbour.h>
39#include "common.h"
40#include "t3cdev.h"
41#include "cxgb3_defs.h"
42#include "l2t.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define VLAN_NONE 0xfff
47
48/*
49 * Module locking notes: There is a RW lock protecting the L2 table as a
50 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
51 * under the protection of the table lock, individual entry changes happen
52 * while holding that entry's spinlock. The table lock nests outside the
53 * entry locks. Allocations of new entries take the table lock as writers so
54 * no other lookups can happen while allocating new entries. Entry updates
55 * take the table lock as readers so multiple entries can be updated in
56 * parallel. An L2T entry can be dropped by decrementing its reference count
57 * and therefore can happen in parallel with entry allocation but no entry
58 * can change state or increment its ref count during allocation as both of
59 * these perform lookups.
60 */
61
62static inline unsigned int vlan_prio(const struct l2t_entry *e)
63{
64 return e->vlan >> 13;
65}
66
67static inline unsigned int arp_hash(u32 key, int ifindex,
68 const struct l2t_data *d)
69{
70 return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
71}
72
73static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
74{
75 neigh_hold(n);
76 if (e->neigh)
77 neigh_release(e->neigh);
78 e->neigh = n;
79}
80
81/*
82 * Set up an L2T entry and send any packets waiting in the arp queue. The
83 * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
84 * entry locked.
85 */
86static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
87 struct l2t_entry *e)
88{
89 struct cpl_l2t_write_req *req;
90
91 if (!skb) {
92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
93 if (!skb)
94 return -ENOMEM;
95 }
96
97 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
98 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
99 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
100 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
101 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
102 V_L2T_W_PRIO(vlan_prio(e)));
103 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
105 skb->priority = CPL_PRIORITY_CONTROL;
106 cxgb3_ofld_send(dev, skb);
107 while (e->arpq_head) {
108 skb = e->arpq_head;
109 e->arpq_head = skb->next;
110 skb->next = NULL;
111 cxgb3_ofld_send(dev, skb);
112 }
113 e->arpq_tail = NULL;
114 e->state = L2T_STATE_VALID;
115
116 return 0;
117}
118
119/*
120 * Add a packet to the an L2T entry's queue of packets awaiting resolution.
121 * Must be called with the entry's lock held.
122 */
123static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
124{
125 skb->next = NULL;
126 if (e->arpq_head)
127 e->arpq_tail->next = skb;
128 else
129 e->arpq_head = skb;
130 e->arpq_tail = skb;
131}
132
133int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
134 struct l2t_entry *e)
135{
136again:
137 switch (e->state) {
138 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
139 neigh_event_send(e->neigh, NULL);
140 spin_lock_bh(&e->lock);
141 if (e->state == L2T_STATE_STALE)
142 e->state = L2T_STATE_VALID;
143 spin_unlock_bh(&e->lock);
144 case L2T_STATE_VALID: /* fast-path, send the packet on */
145 return cxgb3_ofld_send(dev, skb);
146 case L2T_STATE_RESOLVING:
147 spin_lock_bh(&e->lock);
148 if (e->state != L2T_STATE_RESOLVING) {
149 /* ARP already completed */
150 spin_unlock_bh(&e->lock);
151 goto again;
152 }
153 arpq_enqueue(e, skb);
154 spin_unlock_bh(&e->lock);
155
156 /*
157 * Only the first packet added to the arpq should kick off
158 * resolution. However, because the alloc_skb below can fail,
159 * we allow each packet added to the arpq to retry resolution
160 * as a way of recovering from transient memory exhaustion.
161 * A better way would be to use a work request to retry L2T
162 * entries when there's no memory.
163 */
164 if (!neigh_event_send(e->neigh, NULL)) {
165 skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
166 GFP_ATOMIC);
167 if (!skb)
168 break;
169
170 spin_lock_bh(&e->lock);
171 if (e->arpq_head)
172 setup_l2e_send_pending(dev, skb, e);
173 else /* we lost the race */
174 __kfree_skb(skb);
175 spin_unlock_bh(&e->lock);
176 }
177 }
178 return 0;
179}
180
181EXPORT_SYMBOL(t3_l2t_send_slow);
182
183void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
184{
185again:
186 switch (e->state) {
187 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
188 neigh_event_send(e->neigh, NULL);
189 spin_lock_bh(&e->lock);
190 if (e->state == L2T_STATE_STALE) {
191 e->state = L2T_STATE_VALID;
192 }
193 spin_unlock_bh(&e->lock);
194 return;
195 case L2T_STATE_VALID: /* fast-path, send the packet on */
196 return;
197 case L2T_STATE_RESOLVING:
198 spin_lock_bh(&e->lock);
199 if (e->state != L2T_STATE_RESOLVING) {
200 /* ARP already completed */
201 spin_unlock_bh(&e->lock);
202 goto again;
203 }
204 spin_unlock_bh(&e->lock);
205
206 /*
207 * Only the first packet added to the arpq should kick off
208 * resolution. However, because the alloc_skb below can fail,
209 * we allow each packet added to the arpq to retry resolution
210 * as a way of recovering from transient memory exhaustion.
211 * A better way would be to use a work request to retry L2T
212 * entries when there's no memory.
213 */
214 neigh_event_send(e->neigh, NULL);
215 }
216 return;
217}
218
219EXPORT_SYMBOL(t3_l2t_send_event);
220
221/*
222 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
223 */
224static struct l2t_entry *alloc_l2e(struct l2t_data *d)
225{
226 struct l2t_entry *end, *e, **p;
227
228 if (!atomic_read(&d->nfree))
229 return NULL;
230
231 /* there's definitely a free entry */
232 for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
233 if (atomic_read(&e->refcnt) == 0)
234 goto found;
235
236 for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
237found:
238 d->rover = e + 1;
239 atomic_dec(&d->nfree);
240
241 /*
242 * The entry we found may be an inactive entry that is
243 * presently in the hash table. We need to remove it.
244 */
245 if (e->state != L2T_STATE_UNUSED) {
246 int hash = arp_hash(e->addr, e->ifindex, d);
247
248 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
249 if (*p == e) {
250 *p = e->next;
251 break;
252 }
253 e->state = L2T_STATE_UNUSED;
254 }
255 return e;
256}
257
258/*
259 * Called when an L2T entry has no more users. The entry is left in the hash
260 * table since it is likely to be reused but we also bump nfree to indicate
261 * that the entry can be reallocated for a different neighbor. We also drop
262 * the existing neighbor reference in case the neighbor is going away and is
263 * waiting on our reference.
264 *
265 * Because entries can be reallocated to other neighbors once their ref count
266 * drops to 0 we need to take the entry's lock to avoid races with a new
267 * incarnation.
268 */
269void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
270{
271 spin_lock_bh(&e->lock);
272 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
273 if (e->neigh) {
274 neigh_release(e->neigh);
275 e->neigh = NULL;
276 }
277 }
278 spin_unlock_bh(&e->lock);
279 atomic_inc(&d->nfree);
280}
281
282EXPORT_SYMBOL(t3_l2e_free);
283
284/*
285 * Update an L2T entry that was previously used for the same next hop as neigh.
286 * Must be called with softirqs disabled.
287 */
288static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
289{
290 unsigned int nud_state;
291
292 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
293
294 if (neigh != e->neigh)
295 neigh_replace(e, neigh);
296 nud_state = neigh->nud_state;
297 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
298 !(nud_state & NUD_VALID))
299 e->state = L2T_STATE_RESOLVING;
300 else if (nud_state & NUD_CONNECTED)
301 e->state = L2T_STATE_VALID;
302 else
303 e->state = L2T_STATE_STALE;
304 spin_unlock(&e->lock);
305}
306
307struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
308 struct net_device *dev)
309{
310 struct l2t_entry *e;
311 struct l2t_data *d = L2DATA(cdev);
312 u32 addr = *(u32 *) neigh->primary_key;
313 int ifidx = neigh->dev->ifindex;
314 int hash = arp_hash(addr, ifidx, d);
315 struct port_info *p = netdev_priv(dev);
316 int smt_idx = p->port_id;
317
318 write_lock_bh(&d->lock);
319 for (e = d->l2tab[hash].first; e; e = e->next)
320 if (e->addr == addr && e->ifindex == ifidx &&
321 e->smt_idx == smt_idx) {
322 l2t_hold(d, e);
323 if (atomic_read(&e->refcnt) == 1)
324 reuse_entry(e, neigh);
325 goto done;
326 }
327
328 /* Need to allocate a new entry */
329 e = alloc_l2e(d);
330 if (e) {
331 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
332 e->next = d->l2tab[hash].first;
333 d->l2tab[hash].first = e;
334 e->state = L2T_STATE_RESOLVING;
335 e->addr = addr;
336 e->ifindex = ifidx;
337 e->smt_idx = smt_idx;
338 atomic_set(&e->refcnt, 1);
339 neigh_replace(e, neigh);
340 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
341 e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
342 else
343 e->vlan = VLAN_NONE;
344 spin_unlock(&e->lock);
345 }
346done:
347 write_unlock_bh(&d->lock);
348 return e;
349}
350
351EXPORT_SYMBOL(t3_l2t_get);
352
353/*
354 * Called when address resolution fails for an L2T entry to handle packets
355 * on the arpq head. If a packet specifies a failure handler it is invoked,
356 * otherwise the packets is sent to the offload device.
357 *
358 * XXX: maybe we should abandon the latter behavior and just require a failure
359 * handler.
360 */
361static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
362{
363 while (arpq) {
364 struct sk_buff *skb = arpq;
365 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
366
367 arpq = skb->next;
368 skb->next = NULL;
369 if (cb->arp_failure_handler)
370 cb->arp_failure_handler(dev, skb);
371 else
372 cxgb3_ofld_send(dev, skb);
373 }
374}
375
376/*
377 * Called when the host's ARP layer makes a change to some entry that is
378 * loaded into the HW L2 table.
379 */
380void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
381{
382 struct l2t_entry *e;
383 struct sk_buff *arpq = NULL;
384 struct l2t_data *d = L2DATA(dev);
385 u32 addr = *(u32 *) neigh->primary_key;
386 int ifidx = neigh->dev->ifindex;
387 int hash = arp_hash(addr, ifidx, d);
388
389 read_lock_bh(&d->lock);
390 for (e = d->l2tab[hash].first; e; e = e->next)
391 if (e->addr == addr && e->ifindex == ifidx) {
392 spin_lock(&e->lock);
393 goto found;
394 }
395 read_unlock_bh(&d->lock);
396 return;
397
398found:
399 read_unlock(&d->lock);
400 if (atomic_read(&e->refcnt)) {
401 if (neigh != e->neigh)
402 neigh_replace(e, neigh);
403
404 if (e->state == L2T_STATE_RESOLVING) {
405 if (neigh->nud_state & NUD_FAILED) {
406 arpq = e->arpq_head;
407 e->arpq_head = e->arpq_tail = NULL;
408 } else if (neigh_is_connected(neigh))
409 setup_l2e_send_pending(dev, NULL, e);
410 } else {
411 e->state = neigh_is_connected(neigh) ?
412 L2T_STATE_VALID : L2T_STATE_STALE;
413 if (memcmp(e->dmac, neigh->ha, 6))
414 setup_l2e_send_pending(dev, NULL, e);
415 }
416 }
417 spin_unlock_bh(&e->lock);
418
419 if (arpq)
420 handle_failed_resolution(dev, arpq);
421}
422
423struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
424{
425 struct l2t_data *d;
426 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
427
428 d = cxgb_alloc_mem(size);
429 if (!d)
430 return NULL;
431
432 d->nentries = l2t_capacity;
433 d->rover = &d->l2tab[1]; /* entry 0 is not used */
434 atomic_set(&d->nfree, l2t_capacity - 1);
435 rwlock_init(&d->lock);
436
437 for (i = 0; i < l2t_capacity; ++i) {
438 d->l2tab[i].idx = i;
439 d->l2tab[i].state = L2T_STATE_UNUSED;
440 spin_lock_init(&d->l2tab[i].lock);
441 atomic_set(&d->l2tab[i].refcnt, 0);
442 }
443 return d;
444}
445
446void t3_free_l2t(struct l2t_data *d)
447{
448 cxgb_free_mem(d);
449}
450
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
new file mode 100644
index 000000000000..ba5d2cbd7241
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_L2T_H
34#define _CHELSIO_L2T_H
35
36#include <linux/spinlock.h>
37#include "t3cdev.h"
38#include <asm/atomic.h>
39
40enum {
41 L2T_STATE_VALID, /* entry is up to date */
42 L2T_STATE_STALE, /* entry may be used but needs revalidation */
43 L2T_STATE_RESOLVING, /* entry needs address resolution */
44 L2T_STATE_UNUSED /* entry not in use */
45};
46
47struct neighbour;
48struct sk_buff;
49
50/*
51 * Each L2T entry plays multiple roles. First of all, it keeps state for the
52 * corresponding entry of the HW L2 table and maintains a queue of offload
53 * packets awaiting address resolution. Second, it is a node of a hash table
54 * chain, where the nodes of the chain are linked together through their next
55 * pointer. Finally, each node is a bucket of a hash table, pointing to the
56 * first element in its chain through its first pointer.
57 */
58struct l2t_entry {
59 u16 state; /* entry state */
60 u16 idx; /* entry index */
61 u32 addr; /* dest IP address */
62 int ifindex; /* neighbor's net_device's ifindex */
63 u16 smt_idx; /* SMT index */
64 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
65 struct neighbour *neigh; /* associated neighbour */
66 struct l2t_entry *first; /* start of hash chain */
67 struct l2t_entry *next; /* next l2t_entry on chain */
68 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
69 struct sk_buff *arpq_tail;
70 spinlock_t lock;
71 atomic_t refcnt; /* entry reference count */
72 u8 dmac[6]; /* neighbour's MAC address */
73};
74
75struct l2t_data {
76 unsigned int nentries; /* number of entries */
77 struct l2t_entry *rover; /* starting point for next allocation */
78 atomic_t nfree; /* number of free entries */
79 rwlock_t lock;
80 struct l2t_entry l2tab[0];
81};
82
83typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
84 struct sk_buff * skb);
85
86/*
87 * Callback stored in an skb to handle address resolution failure.
88 */
89struct l2t_skb_cb {
90 arp_failure_handler_func arp_failure_handler;
91};
92
93#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
94
95static inline void set_arp_failure_handler(struct sk_buff *skb,
96 arp_failure_handler_func hnd)
97{
98 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
99}
100
101/*
102 * Getting to the L2 data from an offload device.
103 */
104#define L2DATA(dev) ((dev)->l2opt)
105
106#define W_TCB_L2T_IX 0
107#define S_TCB_L2T_IX 7
108#define M_TCB_L2T_IX 0x7ffULL
109#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
110
111void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
112void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
113struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
114 struct net_device *dev);
115int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
116 struct l2t_entry *e);
117void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
118struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
119void t3_free_l2t(struct l2t_data *d);
120
121int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
122
123static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
124 struct l2t_entry *e)
125{
126 if (likely(e->state == L2T_STATE_VALID))
127 return cxgb3_ofld_send(dev, skb);
128 return t3_l2t_send_slow(dev, skb, e);
129}
130
131static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
132{
133 if (atomic_dec_and_test(&e->refcnt))
134 t3_l2e_free(d, e);
135}
136
137static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
138{
139 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
140 atomic_dec(&d->nfree);
141}
142
143#endif
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
new file mode 100644
index 000000000000..644d62ea86a6
--- /dev/null
+++ b/drivers/net/cxgb3/mc5.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 IDT75P52100 = 4,
37 IDT75N43102 = 5
38};
39
40/* DBGI command mode */
41enum {
42 DBGI_MODE_MBUS = 0,
43 DBGI_MODE_IDT52100 = 5
44};
45
46/* IDT 75P52100 commands */
47#define IDT_CMD_READ 0
48#define IDT_CMD_WRITE 1
49#define IDT_CMD_SEARCH 2
50#define IDT_CMD_LEARN 3
51
52/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
53#define IDT_LAR_ADR0 0x180006
54#define IDT_LAR_MODE144 0xffff0000
55
56/* IDT SCR and SSR addresses (low 32 bits) */
57#define IDT_SCR_ADR0 0x180000
58#define IDT_SSR0_ADR0 0x180002
59#define IDT_SSR1_ADR0 0x180004
60
61/* IDT GMR base address (low 32 bits) */
62#define IDT_GMR_BASE_ADR0 0x180020
63
64/* IDT data and mask array base addresses (low 32 bits) */
65#define IDT_DATARY_BASE_ADR0 0
66#define IDT_MSKARY_BASE_ADR0 0x80000
67
68/* IDT 75N43102 commands */
69#define IDT4_CMD_SEARCH144 3
70#define IDT4_CMD_WRITE 4
71#define IDT4_CMD_READ 5
72
73/* IDT 75N43102 SCR address (low 32 bits) */
74#define IDT4_SCR_ADR0 0x3
75
76/* IDT 75N43102 GMR base addresses (low 32 bits) */
77#define IDT4_GMR_BASE0 0x10
78#define IDT4_GMR_BASE1 0x20
79#define IDT4_GMR_BASE2 0x30
80
81/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
82#define IDT4_DATARY_BASE_ADR0 0x1000000
83#define IDT4_MSKARY_BASE_ADR0 0x2000000
84
85#define MAX_WRITE_ATTEMPTS 5
86
87#define MAX_ROUTES 2048
88
89/*
90 * Issue a command to the TCAM and wait for its completion. The address and
91 * any data required by the command must have been setup by the caller.
92 */
93static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
94{
95 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
96 return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
98}
99
100static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
101 u32 v3)
102{
103 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
104 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
106}
107
108static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
109 u32 v3)
110{
111 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
112 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
113 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
114}
115
116static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
117 u32 *v3)
118{
119 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
120 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
121 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
122}
123
124/*
125 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
126 * command cmd. The data to be written must have been set up by the caller.
127 * Returns -1 on failure, 0 on success.
128 */
129static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
130{
131 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
132 if (mc5_cmd_write(adapter, cmd) == 0)
133 return 0;
134 CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
135 addr_lo);
136 return -1;
137}
138
139static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
140 u32 data_array_base, u32 write_cmd,
141 int addr_shift)
142{
143 unsigned int i;
144 struct adapter *adap = mc5->adapter;
145
146 /*
147 * We need the size of the TCAM data and mask arrays in terms of
148 * 72-bit entries.
149 */
150 unsigned int size72 = mc5->tcam_size;
151 unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
152
153 if (mc5->mode == MC5_MODE_144_BIT) {
154 size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
155 server_base *= 2;
156 }
157
158 /* Clear the data array */
159 dbgi_wr_data3(adap, 0, 0, 0);
160 for (i = 0; i < size72; i++)
161 if (mc5_write(adap, data_array_base + (i << addr_shift),
162 write_cmd))
163 return -1;
164
165 /* Initialize the mask array. */
166 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
167 for (i = 0; i < size72; i++) {
168 if (i == server_base) /* entering server or routing region */
169 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
170 mc5->mode == MC5_MODE_144_BIT ?
171 0xfffffff9 : 0xfffffffd);
172 if (mc5_write(adap, mask_array_base + (i << addr_shift),
173 write_cmd))
174 return -1;
175 }
176 return 0;
177}
178
179static int init_idt52100(struct mc5 *mc5)
180{
181 int i;
182 struct adapter *adap = mc5->adapter;
183
184 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
185 V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
186 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
187
188 /*
189 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
190 * GMRs 8-9 for ACK- and AOPEN searches.
191 */
192 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
193 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
194 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
195 t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
196 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
197 t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
198 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
199 t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
200 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
201 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
202 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
203 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
204
205 /* Set DBGI command mode for IDT TCAM. */
206 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
207
208 /* Set up LAR */
209 dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
210 if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
211 goto err;
212
213 /* Set up SSRs */
214 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
215 if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
216 mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
217 goto err;
218
219 /* Set up GMRs */
220 for (i = 0; i < 32; ++i) {
221 if (i >= 12 && i < 15)
222 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
223 else if (i == 15)
224 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
225 else
226 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
227
228 if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
229 goto err;
230 }
231
232 /* Set up SCR */
233 dbgi_wr_data3(adap, 1, 0, 0);
234 if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
235 goto err;
236
237 return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
238 IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
239err:
240 return -EIO;
241}
242
243static int init_idt43102(struct mc5 *mc5)
244{
245 int i;
246 struct adapter *adap = mc5->adapter;
247
248 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
249 adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
250 V_RDLAT(0xd) | V_SRCHLAT(0x12));
251
252 /*
253 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
254 * for ACK- and AOPEN searches.
255 */
256 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
257 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
258 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
259 IDT4_CMD_SEARCH144 | 0x3800);
260 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
261 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
262 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
263 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
264 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
265 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
266
267 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
268
269 /* Set DBGI command mode for IDT TCAM. */
270 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
271
272 /* Set up GMRs */
273 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
274 for (i = 0; i < 7; ++i)
275 if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
276 goto err;
277
278 for (i = 0; i < 4; ++i)
279 if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
280 goto err;
281
282 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
283 if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
284 mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
285 mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
286 goto err;
287
288 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
289 if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
290 goto err;
291
292 /* Set up SCR */
293 dbgi_wr_data3(adap, 0xf0000000, 0, 0);
294 if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
295 goto err;
296
297 return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
298 IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
299err:
300 return -EIO;
301}
302
303/* Put MC5 in DBGI mode. */
304static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
305{
306 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
307 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
308}
309
310/* Put MC5 in M-Bus mode. */
311static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
312{
313 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
314 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
315 V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
316 V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
317}
318
319/*
320 * Initialization that requires the OS and protocol layers to already
321 * be intialized goes here.
322 */
323int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
324 unsigned int nroutes)
325{
326 u32 cfg;
327 int err;
328 unsigned int tcam_size = mc5->tcam_size;
329 struct adapter *adap = mc5->adapter;
330
331 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
332 return -EINVAL;
333
334 /* Reset the TCAM */
335 cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
336 cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
337 t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
338 if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
339 CH_ERR(adap, "TCAM reset timed out\n");
340 return -1;
341 }
342
343 t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
344 t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
345 tcam_size - nroutes - nfilters);
346 t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
347 tcam_size - nroutes - nfilters - nservers);
348
349 mc5->parity_enabled = 1;
350
351 /* All the TCAM addresses we access have only the low 32 bits non 0 */
352 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
353 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
354
355 mc5_dbgi_mode_enable(mc5);
356
357 switch (mc5->part_type) {
358 case IDT75P52100:
359 err = init_idt52100(mc5);
360 break;
361 case IDT75N43102:
362 err = init_idt43102(mc5);
363 break;
364 default:
365 CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
366 err = -EINVAL;
367 break;
368 }
369
370 mc5_dbgi_mode_disable(mc5);
371 return err;
372}
373
374/*
375 * read_mc5_range - dump a part of the memory managed by MC5
376 * @mc5: the MC5 handle
377 * @start: the start address for the dump
378 * @n: number of 72-bit words to read
379 * @buf: result buffer
380 *
381 * Read n 72-bit words from MC5 memory from the given start location.
382 */
383int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
384 unsigned int n, u32 *buf)
385{
386 u32 read_cmd;
387 int err = 0;
388 struct adapter *adap = mc5->adapter;
389
390 if (mc5->part_type == IDT75P52100)
391 read_cmd = IDT_CMD_READ;
392 else if (mc5->part_type == IDT75N43102)
393 read_cmd = IDT4_CMD_READ;
394 else
395 return -EINVAL;
396
397 mc5_dbgi_mode_enable(mc5);
398
399 while (n--) {
400 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
401 if (mc5_cmd_write(adap, read_cmd)) {
402 err = -EIO;
403 break;
404 }
405 dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
406 buf += 3;
407 }
408
409 mc5_dbgi_mode_disable(mc5);
410 return 0;
411}
412
413#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
414
415/*
416 * MC5 interrupt handler
417 */
418void t3_mc5_intr_handler(struct mc5 *mc5)
419{
420 struct adapter *adap = mc5->adapter;
421 u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
422
423 if ((cause & F_PARITYERR) && mc5->parity_enabled) {
424 CH_ALERT(adap, "MC5 parity error\n");
425 mc5->stats.parity_err++;
426 }
427
428 if (cause & F_REQQPARERR) {
429 CH_ALERT(adap, "MC5 request queue parity error\n");
430 mc5->stats.reqq_parity_err++;
431 }
432
433 if (cause & F_DISPQPARERR) {
434 CH_ALERT(adap, "MC5 dispatch queue parity error\n");
435 mc5->stats.dispq_parity_err++;
436 }
437
438 if (cause & F_ACTRGNFULL)
439 mc5->stats.active_rgn_full++;
440 if (cause & F_NFASRCHFAIL)
441 mc5->stats.nfa_srch_err++;
442 if (cause & F_UNKNOWNCMD)
443 mc5->stats.unknown_cmd++;
444 if (cause & F_DELACTEMPTY)
445 mc5->stats.del_act_empty++;
446 if (cause & MC5_INT_FATAL)
447 t3_fatal_err(adap);
448
449 t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
450}
451
452void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
453{
454#define K * 1024
455
456 static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
457 64 K, 128 K, 256 K, 32 K
458 };
459
460#undef K
461
462 u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
463
464 mc5->adapter = adapter;
465 mc5->mode = (unsigned char)mode;
466 mc5->part_type = (unsigned char)G_TMTYPE(cfg);
467 if (cfg & F_TMTYPEHI)
468 mc5->part_type |= 4;
469
470 mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
471 if (mode == MC5_MODE_144_BIT)
472 mc5->tcam_size /= 2;
473}
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
new file mode 100644
index 000000000000..b56c5f52bcdc
--- /dev/null
+++ b/drivers/net/cxgb3/regs.h
@@ -0,0 +1,2195 @@
1#define A_SG_CONTROL 0x0
2
3#define S_DROPPKT 20
4#define V_DROPPKT(x) ((x) << S_DROPPKT)
5#define F_DROPPKT V_DROPPKT(1U)
6
7#define S_EGRGENCTRL 19
8#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
9#define F_EGRGENCTRL V_EGRGENCTRL(1U)
10
11#define S_USERSPACESIZE 14
12#define M_USERSPACESIZE 0x1f
13#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
14
15#define S_HOSTPAGESIZE 11
16#define M_HOSTPAGESIZE 0x7
17#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
18
19#define S_FLMODE 9
20#define V_FLMODE(x) ((x) << S_FLMODE)
21#define F_FLMODE V_FLMODE(1U)
22
23#define S_PKTSHIFT 6
24#define M_PKTSHIFT 0x7
25#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
26
27#define S_ONEINTMULTQ 5
28#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
29#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
30
31#define S_BIGENDIANINGRESS 2
32#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
33#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
34
35#define S_ISCSICOALESCING 1
36#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
37#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
38
39#define S_GLOBALENABLE 0
40#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
41#define F_GLOBALENABLE V_GLOBALENABLE(1U)
42
43#define S_AVOIDCQOVFL 24
44#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
45#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
46
47#define S_OPTONEINTMULTQ 23
48#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
49#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
50
51#define S_CQCRDTCTRL 22
52#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
53#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
54
55#define A_SG_KDOORBELL 0x4
56
57#define S_SELEGRCNTX 31
58#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
59#define F_SELEGRCNTX V_SELEGRCNTX(1U)
60
61#define S_EGRCNTX 0
62#define M_EGRCNTX 0xffff
63#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
64
65#define A_SG_GTS 0x8
66
67#define S_RSPQ 29
68#define M_RSPQ 0x7
69#define V_RSPQ(x) ((x) << S_RSPQ)
70#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
71
72#define S_NEWTIMER 16
73#define M_NEWTIMER 0x1fff
74#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
75
76#define S_NEWINDEX 0
77#define M_NEWINDEX 0xffff
78#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
79
80#define A_SG_CONTEXT_CMD 0xc
81
82#define S_CONTEXT_CMD_OPCODE 28
83#define M_CONTEXT_CMD_OPCODE 0xf
84#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
85
86#define S_CONTEXT_CMD_BUSY 27
87#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
88#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
89
90#define S_CQ_CREDIT 20
91
92#define M_CQ_CREDIT 0x7f
93
94#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
95
96#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
97
98#define S_CQ 19
99
100#define V_CQ(x) ((x) << S_CQ)
101#define F_CQ V_CQ(1U)
102
103#define S_RESPONSEQ 18
104#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
105#define F_RESPONSEQ V_RESPONSEQ(1U)
106
107#define S_EGRESS 17
108#define V_EGRESS(x) ((x) << S_EGRESS)
109#define F_EGRESS V_EGRESS(1U)
110
111#define S_FREELIST 16
112#define V_FREELIST(x) ((x) << S_FREELIST)
113#define F_FREELIST V_FREELIST(1U)
114
115#define S_CONTEXT 0
116#define M_CONTEXT 0xffff
117#define V_CONTEXT(x) ((x) << S_CONTEXT)
118
119#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
120
121#define A_SG_CONTEXT_DATA0 0x10
122
123#define A_SG_CONTEXT_DATA1 0x14
124
125#define A_SG_CONTEXT_DATA2 0x18
126
127#define A_SG_CONTEXT_DATA3 0x1c
128
129#define A_SG_CONTEXT_MASK0 0x20
130
131#define A_SG_CONTEXT_MASK1 0x24
132
133#define A_SG_CONTEXT_MASK2 0x28
134
135#define A_SG_CONTEXT_MASK3 0x2c
136
137#define A_SG_RSPQ_CREDIT_RETURN 0x30
138
139#define S_CREDITS 0
140#define M_CREDITS 0xffff
141#define V_CREDITS(x) ((x) << S_CREDITS)
142
143#define A_SG_DATA_INTR 0x34
144
145#define S_ERRINTR 31
146#define V_ERRINTR(x) ((x) << S_ERRINTR)
147#define F_ERRINTR V_ERRINTR(1U)
148
149#define A_SG_HI_DRB_HI_THRSH 0x38
150
151#define A_SG_HI_DRB_LO_THRSH 0x3c
152
153#define A_SG_LO_DRB_HI_THRSH 0x40
154
155#define A_SG_LO_DRB_LO_THRSH 0x44
156
157#define A_SG_RSPQ_FL_STATUS 0x4c
158
159#define S_RSPQ0DISABLED 8
160
161#define A_SG_EGR_RCQ_DRB_THRSH 0x54
162
163#define S_HIRCQDRBTHRSH 16
164#define M_HIRCQDRBTHRSH 0x7ff
165#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
166
167#define S_LORCQDRBTHRSH 0
168#define M_LORCQDRBTHRSH 0x7ff
169#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
170
171#define A_SG_EGR_CNTX_BADDR 0x58
172
173#define A_SG_INT_CAUSE 0x5c
174
175#define S_RSPQDISABLED 3
176#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
177#define F_RSPQDISABLED V_RSPQDISABLED(1U)
178
179#define S_RSPQCREDITOVERFOW 2
180#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
181#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
182
183#define A_SG_INT_ENABLE 0x60
184
185#define A_SG_CMDQ_CREDIT_TH 0x64
186
187#define S_TIMEOUT 8
188#define M_TIMEOUT 0xffffff
189#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
190
191#define S_THRESHOLD 0
192#define M_THRESHOLD 0xff
193#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
194
195#define A_SG_TIMER_TICK 0x68
196
197#define A_SG_CQ_CONTEXT_BADDR 0x6c
198
199#define A_SG_OCO_BASE 0x70
200
201#define S_BASE1 16
202#define M_BASE1 0xffff
203#define V_BASE1(x) ((x) << S_BASE1)
204
205#define A_SG_DRB_PRI_THRESH 0x74
206
207#define A_PCIX_INT_ENABLE 0x80
208
209#define S_MSIXPARERR 22
210#define M_MSIXPARERR 0x7
211
212#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
213
214#define S_CFPARERR 18
215#define M_CFPARERR 0xf
216
217#define V_CFPARERR(x) ((x) << S_CFPARERR)
218
219#define S_RFPARERR 14
220#define M_RFPARERR 0xf
221
222#define V_RFPARERR(x) ((x) << S_RFPARERR)
223
224#define S_WFPARERR 12
225#define M_WFPARERR 0x3
226
227#define V_WFPARERR(x) ((x) << S_WFPARERR)
228
229#define S_PIOPARERR 11
230#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
231#define F_PIOPARERR V_PIOPARERR(1U)
232
233#define S_DETUNCECCERR 10
234#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
235#define F_DETUNCECCERR V_DETUNCECCERR(1U)
236
237#define S_DETCORECCERR 9
238#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
239#define F_DETCORECCERR V_DETCORECCERR(1U)
240
241#define S_RCVSPLCMPERR 8
242#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
243#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
244
245#define S_UNXSPLCMP 7
246#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
247#define F_UNXSPLCMP V_UNXSPLCMP(1U)
248
249#define S_SPLCMPDIS 6
250#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
251#define F_SPLCMPDIS V_SPLCMPDIS(1U)
252
253#define S_DETPARERR 5
254#define V_DETPARERR(x) ((x) << S_DETPARERR)
255#define F_DETPARERR V_DETPARERR(1U)
256
257#define S_SIGSYSERR 4
258#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
259#define F_SIGSYSERR V_SIGSYSERR(1U)
260
261#define S_RCVMSTABT 3
262#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
263#define F_RCVMSTABT V_RCVMSTABT(1U)
264
265#define S_RCVTARABT 2
266#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
267#define F_RCVTARABT V_RCVTARABT(1U)
268
269#define S_SIGTARABT 1
270#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
271#define F_SIGTARABT V_SIGTARABT(1U)
272
273#define S_MSTDETPARERR 0
274#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
275#define F_MSTDETPARERR V_MSTDETPARERR(1U)
276
277#define A_PCIX_INT_CAUSE 0x84
278
279#define A_PCIX_CFG 0x88
280
281#define S_CLIDECEN 18
282#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
283#define F_CLIDECEN V_CLIDECEN(1U)
284
285#define A_PCIX_MODE 0x8c
286
287#define S_PCLKRANGE 6
288#define M_PCLKRANGE 0x3
289#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
290#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
291
292#define S_PCIXINITPAT 2
293#define M_PCIXINITPAT 0xf
294#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
295#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
296
297#define S_64BIT 0
298#define V_64BIT(x) ((x) << S_64BIT)
299#define F_64BIT V_64BIT(1U)
300
301#define A_PCIE_INT_ENABLE 0x80
302
303#define S_BISTERR 15
304#define M_BISTERR 0xff
305
306#define V_BISTERR(x) ((x) << S_BISTERR)
307
308#define S_PCIE_MSIXPARERR 12
309#define M_PCIE_MSIXPARERR 0x7
310
311#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
312
313#define S_PCIE_CFPARERR 11
314#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
315#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
316
317#define S_PCIE_RFPARERR 10
318#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
319#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
320
321#define S_PCIE_WFPARERR 9
322#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
323#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
324
325#define S_PCIE_PIOPARERR 8
326#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
327#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
328
329#define S_UNXSPLCPLERRC 7
330#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
331#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
332
333#define S_UNXSPLCPLERRR 6
334#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
335#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
336
337#define S_PEXERR 0
338#define V_PEXERR(x) ((x) << S_PEXERR)
339#define F_PEXERR V_PEXERR(1U)
340
341#define A_PCIE_INT_CAUSE 0x84
342
343#define A_PCIE_CFG 0x88
344
345#define S_PCIE_CLIDECEN 16
346#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
347#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
348
349#define S_CRSTWRMMODE 0
350#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
351#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
352
353#define A_PCIE_MODE 0x8c
354
355#define S_NUMFSTTRNSEQRX 10
356#define M_NUMFSTTRNSEQRX 0xff
357#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
358#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
359
360#define A_PCIE_PEX_CTRL0 0x98
361
362#define S_NUMFSTTRNSEQ 22
363#define M_NUMFSTTRNSEQ 0xff
364#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
365#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
366
367#define S_REPLAYLMT 2
368#define M_REPLAYLMT 0xfffff
369
370#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
371
372#define A_PCIE_PEX_CTRL1 0x9c
373
374#define S_T3A_ACKLAT 0
375#define M_T3A_ACKLAT 0x7ff
376
377#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
378
379#define S_ACKLAT 0
380#define M_ACKLAT 0x1fff
381
382#define V_ACKLAT(x) ((x) << S_ACKLAT)
383
384#define A_PCIE_PEX_ERR 0xa4
385
386#define A_T3DBG_GPIO_EN 0xd0
387
388#define S_GPIO11_OEN 27
389#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
390#define F_GPIO11_OEN V_GPIO11_OEN(1U)
391
392#define S_GPIO10_OEN 26
393#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
394#define F_GPIO10_OEN V_GPIO10_OEN(1U)
395
396#define S_GPIO7_OEN 23
397#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
398#define F_GPIO7_OEN V_GPIO7_OEN(1U)
399
400#define S_GPIO6_OEN 22
401#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
402#define F_GPIO6_OEN V_GPIO6_OEN(1U)
403
404#define S_GPIO5_OEN 21
405#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
406#define F_GPIO5_OEN V_GPIO5_OEN(1U)
407
408#define S_GPIO4_OEN 20
409#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
410#define F_GPIO4_OEN V_GPIO4_OEN(1U)
411
412#define S_GPIO2_OEN 18
413#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
414#define F_GPIO2_OEN V_GPIO2_OEN(1U)
415
416#define S_GPIO1_OEN 17
417#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
418#define F_GPIO1_OEN V_GPIO1_OEN(1U)
419
420#define S_GPIO0_OEN 16
421#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
422#define F_GPIO0_OEN V_GPIO0_OEN(1U)
423
424#define S_GPIO10_OUT_VAL 10
425#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
426#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
427
428#define S_GPIO7_OUT_VAL 7
429#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
430#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
431
432#define S_GPIO6_OUT_VAL 6
433#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
434#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
435
436#define S_GPIO5_OUT_VAL 5
437#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
438#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
439
440#define S_GPIO4_OUT_VAL 4
441#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
442#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
443
444#define S_GPIO2_OUT_VAL 2
445#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
446#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
447
448#define S_GPIO1_OUT_VAL 1
449#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
450#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
451
452#define S_GPIO0_OUT_VAL 0
453#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
454#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
455
456#define A_T3DBG_INT_ENABLE 0xd8
457
458#define S_GPIO11 11
459#define V_GPIO11(x) ((x) << S_GPIO11)
460#define F_GPIO11 V_GPIO11(1U)
461
462#define S_GPIO10 10
463#define V_GPIO10(x) ((x) << S_GPIO10)
464#define F_GPIO10 V_GPIO10(1U)
465
466#define S_GPIO7 7
467#define V_GPIO7(x) ((x) << S_GPIO7)
468#define F_GPIO7 V_GPIO7(1U)
469
470#define S_GPIO6 6
471#define V_GPIO6(x) ((x) << S_GPIO6)
472#define F_GPIO6 V_GPIO6(1U)
473
474#define S_GPIO5 5
475#define V_GPIO5(x) ((x) << S_GPIO5)
476#define F_GPIO5 V_GPIO5(1U)
477
478#define S_GPIO4 4
479#define V_GPIO4(x) ((x) << S_GPIO4)
480#define F_GPIO4 V_GPIO4(1U)
481
482#define S_GPIO3 3
483#define V_GPIO3(x) ((x) << S_GPIO3)
484#define F_GPIO3 V_GPIO3(1U)
485
486#define S_GPIO2 2
487#define V_GPIO2(x) ((x) << S_GPIO2)
488#define F_GPIO2 V_GPIO2(1U)
489
490#define S_GPIO1 1
491#define V_GPIO1(x) ((x) << S_GPIO1)
492#define F_GPIO1 V_GPIO1(1U)
493
494#define S_GPIO0 0
495#define V_GPIO0(x) ((x) << S_GPIO0)
496#define F_GPIO0 V_GPIO0(1U)
497
498#define A_T3DBG_INT_CAUSE 0xdc
499
500#define A_T3DBG_GPIO_ACT_LOW 0xf0
501
502#define MC7_PMRX_BASE_ADDR 0x100
503
504#define A_MC7_CFG 0x100
505
506#define S_IFEN 13
507#define V_IFEN(x) ((x) << S_IFEN)
508#define F_IFEN V_IFEN(1U)
509
510#define S_TERM150 11
511#define V_TERM150(x) ((x) << S_TERM150)
512#define F_TERM150 V_TERM150(1U)
513
514#define S_SLOW 10
515#define V_SLOW(x) ((x) << S_SLOW)
516#define F_SLOW V_SLOW(1U)
517
518#define S_WIDTH 8
519#define M_WIDTH 0x3
520#define V_WIDTH(x) ((x) << S_WIDTH)
521#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
522
523#define S_BKS 6
524#define V_BKS(x) ((x) << S_BKS)
525#define F_BKS V_BKS(1U)
526
527#define S_ORG 5
528#define V_ORG(x) ((x) << S_ORG)
529#define F_ORG V_ORG(1U)
530
531#define S_DEN 2
532#define M_DEN 0x7
533#define V_DEN(x) ((x) << S_DEN)
534#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
535
536#define S_RDY 1
537#define V_RDY(x) ((x) << S_RDY)
538#define F_RDY V_RDY(1U)
539
540#define S_CLKEN 0
541#define V_CLKEN(x) ((x) << S_CLKEN)
542#define F_CLKEN V_CLKEN(1U)
543
544#define A_MC7_MODE 0x104
545
546#define S_BUSY 31
547#define V_BUSY(x) ((x) << S_BUSY)
548#define F_BUSY V_BUSY(1U)
549
550#define S_BUSY 31
551#define V_BUSY(x) ((x) << S_BUSY)
552#define F_BUSY V_BUSY(1U)
553
554#define A_MC7_EXT_MODE1 0x108
555
556#define A_MC7_EXT_MODE2 0x10c
557
558#define A_MC7_EXT_MODE3 0x110
559
560#define A_MC7_PRE 0x114
561
562#define A_MC7_REF 0x118
563
564#define S_PREREFDIV 1
565#define M_PREREFDIV 0x3fff
566#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
567
568#define S_PERREFEN 0
569#define V_PERREFEN(x) ((x) << S_PERREFEN)
570#define F_PERREFEN V_PERREFEN(1U)
571
572#define A_MC7_DLL 0x11c
573
574#define S_DLLENB 1
575#define V_DLLENB(x) ((x) << S_DLLENB)
576#define F_DLLENB V_DLLENB(1U)
577
578#define S_DLLRST 0
579#define V_DLLRST(x) ((x) << S_DLLRST)
580#define F_DLLRST V_DLLRST(1U)
581
582#define A_MC7_PARM 0x120
583
584#define S_ACTTOPREDLY 26
585#define M_ACTTOPREDLY 0xf
586#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
587
588#define S_ACTTORDWRDLY 23
589#define M_ACTTORDWRDLY 0x7
590#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
591
592#define S_PRECYC 20
593#define M_PRECYC 0x7
594#define V_PRECYC(x) ((x) << S_PRECYC)
595
596#define S_REFCYC 13
597#define M_REFCYC 0x7f
598#define V_REFCYC(x) ((x) << S_REFCYC)
599
600#define S_BKCYC 8
601#define M_BKCYC 0x1f
602#define V_BKCYC(x) ((x) << S_BKCYC)
603
604#define S_WRTORDDLY 4
605#define M_WRTORDDLY 0xf
606#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
607
608#define S_RDTOWRDLY 0
609#define M_RDTOWRDLY 0xf
610#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
611
612#define A_MC7_CAL 0x128
613
614#define S_BUSY 31
615#define V_BUSY(x) ((x) << S_BUSY)
616#define F_BUSY V_BUSY(1U)
617
618#define S_BUSY 31
619#define V_BUSY(x) ((x) << S_BUSY)
620#define F_BUSY V_BUSY(1U)
621
622#define S_CAL_FAULT 30
623#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
624#define F_CAL_FAULT V_CAL_FAULT(1U)
625
626#define S_SGL_CAL_EN 20
627#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
628#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
629
630#define A_MC7_ERR_ADDR 0x12c
631
632#define A_MC7_ECC 0x130
633
634#define S_ECCCHKEN 1
635#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
636#define F_ECCCHKEN V_ECCCHKEN(1U)
637
638#define S_ECCGENEN 0
639#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
640#define F_ECCGENEN V_ECCGENEN(1U)
641
642#define A_MC7_CE_ADDR 0x134
643
644#define A_MC7_CE_DATA0 0x138
645
646#define A_MC7_CE_DATA1 0x13c
647
648#define A_MC7_CE_DATA2 0x140
649
650#define S_DATA 0
651#define M_DATA 0xff
652
653#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
654
655#define A_MC7_UE_ADDR 0x144
656
657#define A_MC7_UE_DATA0 0x148
658
659#define A_MC7_UE_DATA1 0x14c
660
661#define A_MC7_UE_DATA2 0x150
662
663#define A_MC7_BD_ADDR 0x154
664
665#define S_ADDR 3
666
667#define M_ADDR 0x1fffffff
668
669#define A_MC7_BD_DATA0 0x158
670
671#define A_MC7_BD_DATA1 0x15c
672
673#define A_MC7_BD_OP 0x164
674
675#define S_OP 0
676
677#define V_OP(x) ((x) << S_OP)
678#define F_OP V_OP(1U)
679
680#define F_OP V_OP(1U)
681#define A_SF_OP 0x6dc
682
683#define A_MC7_BIST_ADDR_BEG 0x168
684
685#define A_MC7_BIST_ADDR_END 0x16c
686
687#define A_MC7_BIST_DATA 0x170
688
689#define A_MC7_BIST_OP 0x174
690
691#define S_CONT 3
692#define V_CONT(x) ((x) << S_CONT)
693#define F_CONT V_CONT(1U)
694
695#define F_CONT V_CONT(1U)
696
697#define A_MC7_INT_ENABLE 0x178
698
699#define S_AE 17
700#define V_AE(x) ((x) << S_AE)
701#define F_AE V_AE(1U)
702
703#define S_PE 2
704#define M_PE 0x7fff
705
706#define V_PE(x) ((x) << S_PE)
707
708#define G_PE(x) (((x) >> S_PE) & M_PE)
709
710#define S_UE 1
711#define V_UE(x) ((x) << S_UE)
712#define F_UE V_UE(1U)
713
714#define S_CE 0
715#define V_CE(x) ((x) << S_CE)
716#define F_CE V_CE(1U)
717
718#define A_MC7_INT_CAUSE 0x17c
719
720#define MC7_PMTX_BASE_ADDR 0x180
721
722#define MC7_CM_BASE_ADDR 0x200
723
724#define A_CIM_BOOT_CFG 0x280
725
726#define S_BOOTADDR 2
727#define M_BOOTADDR 0x3fffffff
728#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
729
730#define A_CIM_SDRAM_BASE_ADDR 0x28c
731
732#define A_CIM_SDRAM_ADDR_SIZE 0x290
733
734#define A_CIM_HOST_INT_ENABLE 0x298
735
736#define A_CIM_HOST_INT_CAUSE 0x29c
737
738#define S_BLKWRPLINT 12
739#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
740#define F_BLKWRPLINT V_BLKWRPLINT(1U)
741
742#define S_BLKRDPLINT 11
743#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
744#define F_BLKRDPLINT V_BLKRDPLINT(1U)
745
746#define S_BLKWRCTLINT 10
747#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
748#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
749
750#define S_BLKRDCTLINT 9
751#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
752#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
753
754#define S_BLKWRFLASHINT 8
755#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
756#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
757
758#define S_BLKRDFLASHINT 7
759#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
760#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
761
762#define S_SGLWRFLASHINT 6
763#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
764#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
765
766#define S_WRBLKFLASHINT 5
767#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
768#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
769
770#define S_BLKWRBOOTINT 4
771#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
772#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
773
774#define S_FLASHRANGEINT 2
775#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
776#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
777
778#define S_SDRAMRANGEINT 1
779#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
780#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
781
782#define S_RSVDSPACEINT 0
783#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
784#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
785
786#define A_CIM_HOST_ACC_CTRL 0x2b0
787
788#define S_HOSTBUSY 17
789#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
790#define F_HOSTBUSY V_HOSTBUSY(1U)
791
792#define A_CIM_HOST_ACC_DATA 0x2b4
793
794#define A_TP_IN_CONFIG 0x300
795
796#define S_NICMODE 14
797#define V_NICMODE(x) ((x) << S_NICMODE)
798#define F_NICMODE V_NICMODE(1U)
799
800#define F_NICMODE V_NICMODE(1U)
801
802#define S_IPV6ENABLE 15
803#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
804#define F_IPV6ENABLE V_IPV6ENABLE(1U)
805
806#define A_TP_OUT_CONFIG 0x304
807
808#define S_VLANEXTRACTIONENABLE 12
809
810#define A_TP_GLOBAL_CONFIG 0x308
811
812#define S_TXPACINGENABLE 24
813#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
814#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
815
816#define S_PATHMTU 15
817#define V_PATHMTU(x) ((x) << S_PATHMTU)
818#define F_PATHMTU V_PATHMTU(1U)
819
820#define S_IPCHECKSUMOFFLOAD 13
821#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
822#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
823
824#define S_UDPCHECKSUMOFFLOAD 12
825#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
826#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
827
828#define S_TCPCHECKSUMOFFLOAD 11
829#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
830#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
831
832#define S_IPTTL 0
833#define M_IPTTL 0xff
834#define V_IPTTL(x) ((x) << S_IPTTL)
835
836#define A_TP_CMM_MM_BASE 0x314
837
838#define A_TP_CMM_TIMER_BASE 0x318
839
840#define S_CMTIMERMAXNUM 28
841#define M_CMTIMERMAXNUM 0x3
842#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
843
844#define A_TP_PMM_SIZE 0x31c
845
846#define A_TP_PMM_TX_BASE 0x320
847
848#define A_TP_PMM_RX_BASE 0x328
849
850#define A_TP_PMM_RX_PAGE_SIZE 0x32c
851
852#define A_TP_PMM_RX_MAX_PAGE 0x330
853
854#define A_TP_PMM_TX_PAGE_SIZE 0x334
855
856#define A_TP_PMM_TX_MAX_PAGE 0x338
857
858#define A_TP_TCP_OPTIONS 0x340
859
860#define S_MTUDEFAULT 16
861#define M_MTUDEFAULT 0xffff
862#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
863
864#define S_MTUENABLE 10
865#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
866#define F_MTUENABLE V_MTUENABLE(1U)
867
868#define S_SACKRX 8
869#define V_SACKRX(x) ((x) << S_SACKRX)
870#define F_SACKRX V_SACKRX(1U)
871
872#define S_SACKMODE 4
873
874#define M_SACKMODE 0x3
875
876#define V_SACKMODE(x) ((x) << S_SACKMODE)
877
878#define S_WINDOWSCALEMODE 2
879#define M_WINDOWSCALEMODE 0x3
880#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
881
882#define S_TIMESTAMPSMODE 0
883
884#define M_TIMESTAMPSMODE 0x3
885
886#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
887
888#define A_TP_DACK_CONFIG 0x344
889
890#define S_AUTOSTATE3 30
891#define M_AUTOSTATE3 0x3
892#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
893
894#define S_AUTOSTATE2 28
895#define M_AUTOSTATE2 0x3
896#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
897
898#define S_AUTOSTATE1 26
899#define M_AUTOSTATE1 0x3
900#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
901
902#define S_BYTETHRESHOLD 5
903#define M_BYTETHRESHOLD 0xfffff
904#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
905
906#define S_MSSTHRESHOLD 3
907#define M_MSSTHRESHOLD 0x3
908#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
909
910#define S_AUTOCAREFUL 2
911#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
912#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
913
914#define S_AUTOENABLE 1
915#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
916#define F_AUTOENABLE V_AUTOENABLE(1U)
917
918#define S_DACK_MODE 0
919#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
920#define F_DACK_MODE V_DACK_MODE(1U)
921
922#define A_TP_PC_CONFIG 0x348
923
924#define S_TXTOSQUEUEMAPMODE 26
925#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
926#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
927
928#define S_ENABLEEPCMDAFULL 23
929#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
930#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
931
932#define S_MODULATEUNIONMODE 22
933#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
934#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
935
936#define S_TXDEFERENABLE 20
937#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
938#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
939
940#define S_RXCONGESTIONMODE 19
941#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
942#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
943
944#define S_HEARBEATDACK 16
945#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
946#define F_HEARBEATDACK V_HEARBEATDACK(1U)
947
948#define S_TXCONGESTIONMODE 15
949#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
950#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
951
952#define S_ENABLEOCSPIFULL 30
953#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
954#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
955
956#define S_LOCKTID 28
957#define V_LOCKTID(x) ((x) << S_LOCKTID)
958#define F_LOCKTID V_LOCKTID(1U)
959
960#define A_TP_PC_CONFIG2 0x34c
961
962#define S_CHDRAFULL 4
963#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
964#define F_CHDRAFULL V_CHDRAFULL(1U)
965
966#define A_TP_TCP_BACKOFF_REG0 0x350
967
968#define A_TP_TCP_BACKOFF_REG1 0x354
969
970#define A_TP_TCP_BACKOFF_REG2 0x358
971
972#define A_TP_TCP_BACKOFF_REG3 0x35c
973
974#define A_TP_PARA_REG2 0x368
975
976#define S_MAXRXDATA 16
977#define M_MAXRXDATA 0xffff
978#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
979
980#define S_RXCOALESCESIZE 0
981#define M_RXCOALESCESIZE 0xffff
982#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
983
984#define A_TP_PARA_REG3 0x36c
985
986#define S_TXDATAACKIDX 16
987#define M_TXDATAACKIDX 0xf
988
989#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
990
991#define S_TXPACEAUTOSTRICT 10
992#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
993#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
994
995#define S_TXPACEFIXED 9
996#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
997#define F_TXPACEFIXED V_TXPACEFIXED(1U)
998
999#define S_TXPACEAUTO 8
1000#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
1001#define F_TXPACEAUTO V_TXPACEAUTO(1U)
1002
1003#define S_RXCOALESCEENABLE 1
1004#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
1005#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
1006
1007#define S_RXCOALESCEPSHEN 0
1008#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
1009#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
1010
1011#define A_TP_PARA_REG4 0x370
1012
1013#define A_TP_PARA_REG6 0x378
1014
1015#define S_T3A_ENABLEESND 13
1016#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
1017#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
1018
1019#define S_ENABLEESND 11
1020#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
1021#define F_ENABLEESND V_ENABLEESND(1U)
1022
1023#define A_TP_PARA_REG7 0x37c
1024
1025#define S_PMMAXXFERLEN1 16
1026#define M_PMMAXXFERLEN1 0xffff
1027#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
1028
1029#define S_PMMAXXFERLEN0 0
1030#define M_PMMAXXFERLEN0 0xffff
1031#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
1032
1033#define A_TP_TIMER_RESOLUTION 0x390
1034
1035#define S_TIMERRESOLUTION 16
1036#define M_TIMERRESOLUTION 0xff
1037#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
1038
1039#define S_TIMESTAMPRESOLUTION 8
1040#define M_TIMESTAMPRESOLUTION 0xff
1041#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
1042
1043#define S_DELAYEDACKRESOLUTION 0
1044#define M_DELAYEDACKRESOLUTION 0xff
1045#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
1046
1047#define A_TP_MSL 0x394
1048
1049#define A_TP_RXT_MIN 0x398
1050
1051#define A_TP_RXT_MAX 0x39c
1052
1053#define A_TP_PERS_MIN 0x3a0
1054
1055#define A_TP_PERS_MAX 0x3a4
1056
1057#define A_TP_KEEP_IDLE 0x3a8
1058
1059#define A_TP_KEEP_INTVL 0x3ac
1060
1061#define A_TP_INIT_SRTT 0x3b0
1062
1063#define A_TP_DACK_TIMER 0x3b4
1064
1065#define A_TP_FINWAIT2_TIMER 0x3b8
1066
1067#define A_TP_SHIFT_CNT 0x3c0
1068
1069#define S_SYNSHIFTMAX 24
1070
1071#define M_SYNSHIFTMAX 0xff
1072
1073#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
1074
1075#define S_RXTSHIFTMAXR1 20
1076
1077#define M_RXTSHIFTMAXR1 0xf
1078
1079#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
1080
1081#define S_RXTSHIFTMAXR2 16
1082
1083#define M_RXTSHIFTMAXR2 0xf
1084
1085#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
1086
1087#define S_PERSHIFTBACKOFFMAX 12
1088#define M_PERSHIFTBACKOFFMAX 0xf
1089#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
1090
1091#define S_PERSHIFTMAX 8
1092#define M_PERSHIFTMAX 0xf
1093#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
1094
1095#define S_KEEPALIVEMAX 0
1096
1097#define M_KEEPALIVEMAX 0xff
1098
1099#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
1100
1101#define A_TP_MTU_PORT_TABLE 0x3d0
1102
1103#define A_TP_CCTRL_TABLE 0x3dc
1104
1105#define A_TP_MTU_TABLE 0x3e4
1106
1107#define A_TP_RSS_MAP_TABLE 0x3e8
1108
1109#define A_TP_RSS_LKP_TABLE 0x3ec
1110
1111#define A_TP_RSS_CONFIG 0x3f0
1112
1113#define S_TNL4TUPEN 29
1114#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
1115#define F_TNL4TUPEN V_TNL4TUPEN(1U)
1116
1117#define S_TNL2TUPEN 28
1118#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
1119#define F_TNL2TUPEN V_TNL2TUPEN(1U)
1120
1121#define S_TNLPRTEN 26
1122#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
1123#define F_TNLPRTEN V_TNLPRTEN(1U)
1124
1125#define S_TNLMAPEN 25
1126#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
1127#define F_TNLMAPEN V_TNLMAPEN(1U)
1128
1129#define S_TNLLKPEN 24
1130#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
1131#define F_TNLLKPEN V_TNLLKPEN(1U)
1132
1133#define S_RRCPLCPUSIZE 4
1134#define M_RRCPLCPUSIZE 0x7
1135#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
1136
1137#define S_RQFEEDBACKENABLE 3
1138#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
1139#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
1140
1141#define S_DISABLE 0
1142
1143#define A_TP_TM_PIO_ADDR 0x418
1144
1145#define A_TP_TM_PIO_DATA 0x41c
1146
1147#define A_TP_TX_MOD_QUE_TABLE 0x420
1148
1149#define A_TP_TX_RESOURCE_LIMIT 0x424
1150
1151#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
1152
1153#define S_TX_MOD_QUEUE_REQ_MAP 0
1154#define M_TX_MOD_QUEUE_REQ_MAP 0xff
1155#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
1156
1157#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
1158
1159#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
1160
1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1162
1163#define A_TP_PIO_ADDR 0x440
1164
1165#define A_TP_PIO_DATA 0x444
1166
1167#define A_TP_RESET 0x44c
1168
1169#define S_FLSTINITENABLE 1
1170#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
1171#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
1172
1173#define S_TPRESET 0
1174#define V_TPRESET(x) ((x) << S_TPRESET)
1175#define F_TPRESET V_TPRESET(1U)
1176
1177#define A_TP_CMM_MM_RX_FLST_BASE 0x460
1178
1179#define A_TP_CMM_MM_TX_FLST_BASE 0x464
1180
1181#define A_TP_CMM_MM_PS_FLST_BASE 0x468
1182
1183#define A_TP_MIB_INDEX 0x450
1184
1185#define A_TP_MIB_RDATA 0x454
1186
1187#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
1188
1189#define A_TP_INT_ENABLE 0x470
1190
1191#define A_TP_INT_CAUSE 0x474
1192
1193#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
1194
1195#define A_TP_TX_DROP_CFG_CH0 0x12b
1196
1197#define A_TP_TX_DROP_MODE 0x12f
1198
1199#define A_TP_EGRESS_CONFIG 0x145
1200
1201#define S_REWRITEFORCETOSIZE 0
1202#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
1203#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
1204
1205#define A_TP_TX_TRC_KEY0 0x20
1206
1207#define A_TP_RX_TRC_KEY0 0x120
1208
1209#define A_ULPRX_CTL 0x500
1210
1211#define S_ROUND_ROBIN 4
1212#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
1213#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
1214
1215#define A_ULPRX_INT_ENABLE 0x504
1216
1217#define S_PARERR 0
1218#define V_PARERR(x) ((x) << S_PARERR)
1219#define F_PARERR V_PARERR(1U)
1220
1221#define A_ULPRX_INT_CAUSE 0x508
1222
1223#define A_ULPRX_ISCSI_LLIMIT 0x50c
1224
1225#define A_ULPRX_ISCSI_ULIMIT 0x510
1226
1227#define A_ULPRX_ISCSI_TAGMASK 0x514
1228
1229#define A_ULPRX_TDDP_LLIMIT 0x51c
1230
1231#define A_ULPRX_TDDP_ULIMIT 0x520
1232
1233#define A_ULPRX_STAG_LLIMIT 0x52c
1234
1235#define A_ULPRX_STAG_ULIMIT 0x530
1236
1237#define A_ULPRX_RQ_LLIMIT 0x534
1238#define A_ULPRX_RQ_LLIMIT 0x534
1239
1240#define A_ULPRX_RQ_ULIMIT 0x538
1241#define A_ULPRX_RQ_ULIMIT 0x538
1242
1243#define A_ULPRX_PBL_LLIMIT 0x53c
1244
1245#define A_ULPRX_PBL_ULIMIT 0x540
1246#define A_ULPRX_PBL_ULIMIT 0x540
1247
1248#define A_ULPRX_TDDP_TAGMASK 0x524
1249
1250#define A_ULPRX_RQ_LLIMIT 0x534
1251#define A_ULPRX_RQ_LLIMIT 0x534
1252
1253#define A_ULPRX_RQ_ULIMIT 0x538
1254#define A_ULPRX_RQ_ULIMIT 0x538
1255
1256#define A_ULPRX_PBL_ULIMIT 0x540
1257#define A_ULPRX_PBL_ULIMIT 0x540
1258
1259#define A_ULPTX_CONFIG 0x580
1260
1261#define S_CFG_RR_ARB 0
1262#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
1263#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
1264
1265#define A_ULPTX_INT_ENABLE 0x584
1266
1267#define S_PBL_BOUND_ERR_CH1 1
1268#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
1269#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
1270
1271#define S_PBL_BOUND_ERR_CH0 0
1272#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
1273#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
1274
1275#define A_ULPTX_INT_CAUSE 0x588
1276
1277#define A_ULPTX_TPT_LLIMIT 0x58c
1278
1279#define A_ULPTX_TPT_ULIMIT 0x590
1280
1281#define A_ULPTX_PBL_LLIMIT 0x594
1282
1283#define A_ULPTX_PBL_ULIMIT 0x598
1284
1285#define A_ULPTX_DMA_WEIGHT 0x5ac
1286
1287#define S_D1_WEIGHT 16
1288#define M_D1_WEIGHT 0xffff
1289#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
1290
1291#define S_D0_WEIGHT 0
1292#define M_D0_WEIGHT 0xffff
1293#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
1294
1295#define A_PM1_RX_CFG 0x5c0
1296
1297#define A_PM1_RX_INT_ENABLE 0x5d8
1298
1299#define S_ZERO_E_CMD_ERROR 18
1300#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
1301#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
1302
1303#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
1304#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
1305#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1306
1307#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
1308#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
1309#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1310
1311#define S_IESPI0_RX_FRAMING_ERROR 15
1312#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
1313#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
1314
1315#define S_IESPI1_RX_FRAMING_ERROR 14
1316#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
1317#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
1318
1319#define S_IESPI0_TX_FRAMING_ERROR 13
1320#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
1321#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
1322
1323#define S_IESPI1_TX_FRAMING_ERROR 12
1324#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
1325#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
1326
1327#define S_OCSPI0_RX_FRAMING_ERROR 11
1328#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
1329#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
1330
1331#define S_OCSPI1_RX_FRAMING_ERROR 10
1332#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
1333#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
1334
1335#define S_OCSPI0_TX_FRAMING_ERROR 9
1336#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
1337#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
1338
1339#define S_OCSPI1_TX_FRAMING_ERROR 8
1340#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
1341#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
1342
1343#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
1344#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
1345#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1346
1347#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
1348#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1349#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1350
1351#define S_IESPI_PAR_ERROR 3
1352#define M_IESPI_PAR_ERROR 0x7
1353
1354#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
1355
1356#define S_OCSPI_PAR_ERROR 0
1357#define M_OCSPI_PAR_ERROR 0x7
1358
1359#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
1360
1361#define A_PM1_RX_INT_CAUSE 0x5dc
1362
1363#define A_PM1_TX_CFG 0x5e0
1364
1365#define A_PM1_TX_INT_ENABLE 0x5f8
1366
1367#define S_ZERO_C_CMD_ERROR 18
1368#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
1369#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
1370
1371#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
1372#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
1373#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1374
1375#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
1376#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
1377#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1378
1379#define S_ICSPI0_RX_FRAMING_ERROR 15
1380#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
1381#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
1382
1383#define S_ICSPI1_RX_FRAMING_ERROR 14
1384#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
1385#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
1386
1387#define S_ICSPI0_TX_FRAMING_ERROR 13
1388#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
1389#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
1390
1391#define S_ICSPI1_TX_FRAMING_ERROR 12
1392#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
1393#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
1394
1395#define S_OESPI0_RX_FRAMING_ERROR 11
1396#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
1397#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
1398
1399#define S_OESPI1_RX_FRAMING_ERROR 10
1400#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
1401#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
1402
1403#define S_OESPI0_TX_FRAMING_ERROR 9
1404#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
1405#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
1406
1407#define S_OESPI1_TX_FRAMING_ERROR 8
1408#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
1409#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
1410
1411#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
1412#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
1413#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1414
1415#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
1416#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1417#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1418
1419#define S_ICSPI_PAR_ERROR 3
1420#define M_ICSPI_PAR_ERROR 0x7
1421
1422#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
1423
1424#define S_OESPI_PAR_ERROR 0
1425#define M_OESPI_PAR_ERROR 0x7
1426
1427#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
1428
1429#define A_PM1_TX_INT_CAUSE 0x5fc
1430
1431#define A_MPS_CFG 0x600
1432
1433#define S_TPRXPORTEN 4
1434#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
1435#define F_TPRXPORTEN V_TPRXPORTEN(1U)
1436
1437#define S_TPTXPORT1EN 3
1438#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
1439#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
1440
1441#define S_TPTXPORT0EN 2
1442#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
1443#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
1444
1445#define S_PORT1ACTIVE 1
1446#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
1447#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
1448
1449#define S_PORT0ACTIVE 0
1450#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
1451#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
1452
1453#define S_ENFORCEPKT 11
1454#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
1455#define F_ENFORCEPKT V_ENFORCEPKT(1U)
1456
1457#define A_MPS_INT_ENABLE 0x61c
1458
1459#define S_MCAPARERRENB 6
1460#define M_MCAPARERRENB 0x7
1461
1462#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
1463
1464#define S_RXTPPARERRENB 4
1465#define M_RXTPPARERRENB 0x3
1466
1467#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
1468
1469#define S_TX1TPPARERRENB 2
1470#define M_TX1TPPARERRENB 0x3
1471
1472#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
1473
1474#define S_TX0TPPARERRENB 0
1475#define M_TX0TPPARERRENB 0x3
1476
1477#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
1478
1479#define A_MPS_INT_CAUSE 0x620
1480
1481#define S_MCAPARERR 6
1482#define M_MCAPARERR 0x7
1483
1484#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
1485
1486#define S_RXTPPARERR 4
1487#define M_RXTPPARERR 0x3
1488
1489#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
1490
1491#define S_TX1TPPARERR 2
1492#define M_TX1TPPARERR 0x3
1493
1494#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
1495
1496#define S_TX0TPPARERR 0
1497#define M_TX0TPPARERR 0x3
1498
1499#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
1500
1501#define A_CPL_SWITCH_CNTRL 0x640
1502
1503#define A_CPL_INTR_ENABLE 0x650
1504
1505#define S_CIM_OVFL_ERROR 4
1506#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
1507#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
1508
1509#define S_TP_FRAMING_ERROR 3
1510#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
1511#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
1512
1513#define S_SGE_FRAMING_ERROR 2
1514#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
1515#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
1516
1517#define S_CIM_FRAMING_ERROR 1
1518#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
1519#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
1520
1521#define S_ZERO_SWITCH_ERROR 0
1522#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
1523#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
1524
1525#define A_CPL_INTR_CAUSE 0x654
1526
1527#define A_CPL_MAP_TBL_DATA 0x65c
1528
1529#define A_SMB_GLOBAL_TIME_CFG 0x660
1530
1531#define A_I2C_CFG 0x6a0
1532
1533#define S_I2C_CLKDIV 0
1534#define M_I2C_CLKDIV 0xfff
1535#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
1536
1537#define A_MI1_CFG 0x6b0
1538
1539#define S_CLKDIV 5
1540#define M_CLKDIV 0xff
1541#define V_CLKDIV(x) ((x) << S_CLKDIV)
1542
1543#define S_ST 3
1544
1545#define M_ST 0x3
1546
1547#define V_ST(x) ((x) << S_ST)
1548
1549#define G_ST(x) (((x) >> S_ST) & M_ST)
1550
1551#define S_PREEN 2
1552#define V_PREEN(x) ((x) << S_PREEN)
1553#define F_PREEN V_PREEN(1U)
1554
1555#define S_MDIINV 1
1556#define V_MDIINV(x) ((x) << S_MDIINV)
1557#define F_MDIINV V_MDIINV(1U)
1558
1559#define S_MDIEN 0
1560#define V_MDIEN(x) ((x) << S_MDIEN)
1561#define F_MDIEN V_MDIEN(1U)
1562
1563#define A_MI1_ADDR 0x6b4
1564
1565#define S_PHYADDR 5
1566#define M_PHYADDR 0x1f
1567#define V_PHYADDR(x) ((x) << S_PHYADDR)
1568
1569#define S_REGADDR 0
1570#define M_REGADDR 0x1f
1571#define V_REGADDR(x) ((x) << S_REGADDR)
1572
1573#define A_MI1_DATA 0x6b8
1574
1575#define A_MI1_OP 0x6bc
1576
1577#define S_MDI_OP 0
1578#define M_MDI_OP 0x3
1579#define V_MDI_OP(x) ((x) << S_MDI_OP)
1580
1581#define A_SF_DATA 0x6d8
1582
1583#define A_SF_OP 0x6dc
1584
1585#define S_BYTECNT 1
1586#define M_BYTECNT 0x3
1587#define V_BYTECNT(x) ((x) << S_BYTECNT)
1588
1589#define A_PL_INT_ENABLE0 0x6e0
1590
1591#define S_T3DBG 23
1592#define V_T3DBG(x) ((x) << S_T3DBG)
1593#define F_T3DBG V_T3DBG(1U)
1594
1595#define S_XGMAC0_1 20
1596#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
1597#define F_XGMAC0_1 V_XGMAC0_1(1U)
1598
1599#define S_XGMAC0_0 19
1600#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
1601#define F_XGMAC0_0 V_XGMAC0_0(1U)
1602
1603#define S_MC5A 18
1604#define V_MC5A(x) ((x) << S_MC5A)
1605#define F_MC5A V_MC5A(1U)
1606
1607#define S_CPL_SWITCH 12
1608#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
1609#define F_CPL_SWITCH V_CPL_SWITCH(1U)
1610
1611#define S_MPS0 11
1612#define V_MPS0(x) ((x) << S_MPS0)
1613#define F_MPS0 V_MPS0(1U)
1614
1615#define S_PM1_TX 10
1616#define V_PM1_TX(x) ((x) << S_PM1_TX)
1617#define F_PM1_TX V_PM1_TX(1U)
1618
1619#define S_PM1_RX 9
1620#define V_PM1_RX(x) ((x) << S_PM1_RX)
1621#define F_PM1_RX V_PM1_RX(1U)
1622
1623#define S_ULP2_TX 8
1624#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
1625#define F_ULP2_TX V_ULP2_TX(1U)
1626
1627#define S_ULP2_RX 7
1628#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
1629#define F_ULP2_RX V_ULP2_RX(1U)
1630
1631#define S_TP1 6
1632#define V_TP1(x) ((x) << S_TP1)
1633#define F_TP1 V_TP1(1U)
1634
1635#define S_CIM 5
1636#define V_CIM(x) ((x) << S_CIM)
1637#define F_CIM V_CIM(1U)
1638
1639#define S_MC7_CM 4
1640#define V_MC7_CM(x) ((x) << S_MC7_CM)
1641#define F_MC7_CM V_MC7_CM(1U)
1642
1643#define S_MC7_PMTX 3
1644#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
1645#define F_MC7_PMTX V_MC7_PMTX(1U)
1646
1647#define S_MC7_PMRX 2
1648#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
1649#define F_MC7_PMRX V_MC7_PMRX(1U)
1650
1651#define S_PCIM0 1
1652#define V_PCIM0(x) ((x) << S_PCIM0)
1653#define F_PCIM0 V_PCIM0(1U)
1654
1655#define S_SGE3 0
1656#define V_SGE3(x) ((x) << S_SGE3)
1657#define F_SGE3 V_SGE3(1U)
1658
1659#define A_PL_INT_CAUSE0 0x6e4
1660
1661#define A_PL_RST 0x6f0
1662
1663#define S_CRSTWRM 1
1664#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
1665#define F_CRSTWRM V_CRSTWRM(1U)
1666
1667#define A_PL_REV 0x6f4
1668
1669#define A_PL_CLI 0x6f8
1670
1671#define A_MC5_DB_CONFIG 0x704
1672
1673#define S_TMTYPEHI 30
1674#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
1675#define F_TMTYPEHI V_TMTYPEHI(1U)
1676
1677#define S_TMPARTSIZE 28
1678#define M_TMPARTSIZE 0x3
1679#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
1680#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
1681
1682#define S_TMTYPE 26
1683#define M_TMTYPE 0x3
1684#define V_TMTYPE(x) ((x) << S_TMTYPE)
1685#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
1686
1687#define S_COMPEN 17
1688#define V_COMPEN(x) ((x) << S_COMPEN)
1689#define F_COMPEN V_COMPEN(1U)
1690
1691#define S_PRTYEN 6
1692#define V_PRTYEN(x) ((x) << S_PRTYEN)
1693#define F_PRTYEN V_PRTYEN(1U)
1694
1695#define S_MBUSEN 5
1696#define V_MBUSEN(x) ((x) << S_MBUSEN)
1697#define F_MBUSEN V_MBUSEN(1U)
1698
1699#define S_DBGIEN 4
1700#define V_DBGIEN(x) ((x) << S_DBGIEN)
1701#define F_DBGIEN V_DBGIEN(1U)
1702
1703#define S_TMRDY 2
1704#define V_TMRDY(x) ((x) << S_TMRDY)
1705#define F_TMRDY V_TMRDY(1U)
1706
1707#define S_TMRST 1
1708#define V_TMRST(x) ((x) << S_TMRST)
1709#define F_TMRST V_TMRST(1U)
1710
1711#define S_TMMODE 0
1712#define V_TMMODE(x) ((x) << S_TMMODE)
1713#define F_TMMODE V_TMMODE(1U)
1714
1715#define F_TMMODE V_TMMODE(1U)
1716
1717#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
1718
1719#define A_MC5_DB_FILTER_TABLE 0x710
1720
1721#define A_MC5_DB_SERVER_INDEX 0x714
1722
1723#define A_MC5_DB_RSP_LATENCY 0x720
1724
1725#define S_RDLAT 16
1726#define M_RDLAT 0x1f
1727#define V_RDLAT(x) ((x) << S_RDLAT)
1728
1729#define S_LRNLAT 8
1730#define M_LRNLAT 0x1f
1731#define V_LRNLAT(x) ((x) << S_LRNLAT)
1732
1733#define S_SRCHLAT 0
1734#define M_SRCHLAT 0x1f
1735#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
1736
1737#define A_MC5_DB_PART_ID_INDEX 0x72c
1738
1739#define A_MC5_DB_INT_ENABLE 0x740
1740
1741#define S_DELACTEMPTY 18
1742#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
1743#define F_DELACTEMPTY V_DELACTEMPTY(1U)
1744
1745#define S_DISPQPARERR 17
1746#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
1747#define F_DISPQPARERR V_DISPQPARERR(1U)
1748
1749#define S_REQQPARERR 16
1750#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
1751#define F_REQQPARERR V_REQQPARERR(1U)
1752
1753#define S_UNKNOWNCMD 15
1754#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
1755#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
1756
1757#define S_NFASRCHFAIL 8
1758#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
1759#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
1760
1761#define S_ACTRGNFULL 7
1762#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
1763#define F_ACTRGNFULL V_ACTRGNFULL(1U)
1764
1765#define S_PARITYERR 6
1766#define V_PARITYERR(x) ((x) << S_PARITYERR)
1767#define F_PARITYERR V_PARITYERR(1U)
1768
1769#define A_MC5_DB_INT_CAUSE 0x744
1770
1771#define A_MC5_DB_DBGI_CONFIG 0x774
1772
1773#define A_MC5_DB_DBGI_REQ_CMD 0x778
1774
1775#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
1776
1777#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
1778
1779#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
1780
1781#define A_MC5_DB_DBGI_REQ_DATA0 0x788
1782
1783#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
1784
1785#define A_MC5_DB_DBGI_REQ_DATA2 0x790
1786
1787#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
1788
1789#define S_DBGIRSPVALID 0
1790#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
1791#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
1792
1793#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
1794
1795#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
1796
1797#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
1798
1799#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
1800
1801#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
1802
1803#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
1804
1805#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
1806
1807#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
1808
1809#define A_MC5_DB_SYN_LRN_CMD 0x7e0
1810
1811#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
1812
1813#define A_MC5_DB_ACK_LRN_CMD 0x7e8
1814
1815#define A_MC5_DB_ILOOKUP_CMD 0x7ec
1816
1817#define A_MC5_DB_ELOOKUP_CMD 0x7f0
1818
1819#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
1820
1821#define A_MC5_DB_DATA_READ_CMD 0x7f8
1822
1823#define XGMAC0_0_BASE_ADDR 0x800
1824
1825#define A_XGM_TX_CTRL 0x800
1826
1827#define S_TXEN 0
1828#define V_TXEN(x) ((x) << S_TXEN)
1829#define F_TXEN V_TXEN(1U)
1830
1831#define A_XGM_TX_CFG 0x804
1832
1833#define S_TXPAUSEEN 0
1834#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
1835#define F_TXPAUSEEN V_TXPAUSEEN(1U)
1836
1837#define A_XGM_RX_CTRL 0x80c
1838
1839#define S_RXEN 0
1840#define V_RXEN(x) ((x) << S_RXEN)
1841#define F_RXEN V_RXEN(1U)
1842
1843#define A_XGM_RX_CFG 0x810
1844
1845#define S_DISPAUSEFRAMES 9
1846#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
1847#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
1848
1849#define S_EN1536BFRAMES 8
1850#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
1851#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
1852
1853#define S_ENJUMBO 7
1854#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
1855#define F_ENJUMBO V_ENJUMBO(1U)
1856
1857#define S_RMFCS 6
1858#define V_RMFCS(x) ((x) << S_RMFCS)
1859#define F_RMFCS V_RMFCS(1U)
1860
1861#define S_ENHASHMCAST 2
1862#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
1863#define F_ENHASHMCAST V_ENHASHMCAST(1U)
1864
1865#define S_COPYALLFRAMES 0
1866#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
1867#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
1868
1869#define A_XGM_RX_HASH_LOW 0x814
1870
1871#define A_XGM_RX_HASH_HIGH 0x818
1872
1873#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
1874
1875#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
1876
1877#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
1878
1879#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
1880
1881#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
1882
1883#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
1884
1885#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
1886
1887#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
1888
1889#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
1890
1891#define A_XGM_STAT_CTRL 0x880
1892
1893#define S_CLRSTATS 2
1894#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
1895#define F_CLRSTATS V_CLRSTATS(1U)
1896
1897#define A_XGM_RXFIFO_CFG 0x884
1898
1899#define S_RXFIFOPAUSEHWM 17
1900#define M_RXFIFOPAUSEHWM 0xfff
1901
1902#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
1903
1904#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
1905
1906#define S_RXFIFOPAUSELWM 5
1907#define M_RXFIFOPAUSELWM 0xfff
1908
1909#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
1910
1911#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
1912
1913#define S_RXSTRFRWRD 1
1914#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
1915#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
1916
1917#define S_DISERRFRAMES 0
1918#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
1919#define F_DISERRFRAMES V_DISERRFRAMES(1U)
1920
1921#define A_XGM_TXFIFO_CFG 0x888
1922
1923#define S_TXFIFOTHRESH 4
1924#define M_TXFIFOTHRESH 0x1ff
1925
1926#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
1927
1928#define A_XGM_SERDES_CTRL 0x890
1929#define A_XGM_SERDES_CTRL0 0x8e0
1930
1931#define S_SERDESRESET_ 24
1932#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
1933#define F_SERDESRESET_ V_SERDESRESET_(1U)
1934
1935#define S_RXENABLE 4
1936#define V_RXENABLE(x) ((x) << S_RXENABLE)
1937#define F_RXENABLE V_RXENABLE(1U)
1938
1939#define S_TXENABLE 3
1940#define V_TXENABLE(x) ((x) << S_TXENABLE)
1941#define F_TXENABLE V_TXENABLE(1U)
1942
1943#define A_XGM_PAUSE_TIMER 0x890
1944
1945#define A_XGM_RGMII_IMP 0x89c
1946
1947#define S_XGM_IMPSETUPDATE 6
1948#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
1949#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
1950
1951#define S_RGMIIIMPPD 3
1952#define M_RGMIIIMPPD 0x7
1953#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
1954
1955#define S_RGMIIIMPPU 0
1956#define M_RGMIIIMPPU 0x7
1957#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
1958
1959#define S_CALRESET 8
1960#define V_CALRESET(x) ((x) << S_CALRESET)
1961#define F_CALRESET V_CALRESET(1U)
1962
1963#define S_CALUPDATE 7
1964#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
1965#define F_CALUPDATE V_CALUPDATE(1U)
1966
1967#define A_XGM_XAUI_IMP 0x8a0
1968
1969#define S_CALBUSY 31
1970#define V_CALBUSY(x) ((x) << S_CALBUSY)
1971#define F_CALBUSY V_CALBUSY(1U)
1972
1973#define S_XGM_CALFAULT 29
1974#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
1975#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
1976
1977#define S_CALIMP 24
1978#define M_CALIMP 0x1f
1979#define V_CALIMP(x) ((x) << S_CALIMP)
1980#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
1981
1982#define S_XAUIIMP 0
1983#define M_XAUIIMP 0x7
1984#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
1985
1986#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
1987#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
1988
1989#define A_XGM_RESET_CTRL 0x8ac
1990
1991#define S_XG2G_RESET_ 3
1992#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
1993#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
1994
1995#define S_RGMII_RESET_ 2
1996#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
1997#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
1998
1999#define S_PCS_RESET_ 1
2000#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
2001#define F_PCS_RESET_ V_PCS_RESET_(1U)
2002
2003#define S_MAC_RESET_ 0
2004#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
2005#define F_MAC_RESET_ V_MAC_RESET_(1U)
2006
2007#define A_XGM_PORT_CFG 0x8b8
2008
2009#define S_CLKDIVRESET_ 3
2010#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
2011#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
2012
2013#define S_PORTSPEED 1
2014#define M_PORTSPEED 0x3
2015
2016#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
2017
2018#define S_ENRGMII 0
2019#define V_ENRGMII(x) ((x) << S_ENRGMII)
2020#define F_ENRGMII V_ENRGMII(1U)
2021
2022#define A_XGM_INT_ENABLE 0x8d4
2023
2024#define S_TXFIFO_PRTY_ERR 17
2025#define M_TXFIFO_PRTY_ERR 0x7
2026
2027#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
2028
2029#define S_RXFIFO_PRTY_ERR 14
2030#define M_RXFIFO_PRTY_ERR 0x7
2031
2032#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
2033
2034#define S_TXFIFO_UNDERRUN 13
2035#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
2036#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
2037
2038#define S_RXFIFO_OVERFLOW 12
2039#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
2040#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
2041
2042#define S_SERDES_LOS 4
2043#define M_SERDES_LOS 0xf
2044
2045#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
2046
2047#define S_XAUIPCSCTCERR 3
2048#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
2049#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
2050
2051#define S_XAUIPCSALIGNCHANGE 2
2052#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
2053#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
2054
2055#define A_XGM_INT_CAUSE 0x8d8
2056
2057#define A_XGM_XAUI_ACT_CTRL 0x8dc
2058
2059#define S_TXACTENABLE 1
2060#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
2061#define F_TXACTENABLE V_TXACTENABLE(1U)
2062
2063#define A_XGM_SERDES_CTRL0 0x8e0
2064
2065#define S_RESET3 23
2066#define V_RESET3(x) ((x) << S_RESET3)
2067#define F_RESET3 V_RESET3(1U)
2068
2069#define S_RESET2 22
2070#define V_RESET2(x) ((x) << S_RESET2)
2071#define F_RESET2 V_RESET2(1U)
2072
2073#define S_RESET1 21
2074#define V_RESET1(x) ((x) << S_RESET1)
2075#define F_RESET1 V_RESET1(1U)
2076
2077#define S_RESET0 20
2078#define V_RESET0(x) ((x) << S_RESET0)
2079#define F_RESET0 V_RESET0(1U)
2080
2081#define S_PWRDN3 19
2082#define V_PWRDN3(x) ((x) << S_PWRDN3)
2083#define F_PWRDN3 V_PWRDN3(1U)
2084
2085#define S_PWRDN2 18
2086#define V_PWRDN2(x) ((x) << S_PWRDN2)
2087#define F_PWRDN2 V_PWRDN2(1U)
2088
2089#define S_PWRDN1 17
2090#define V_PWRDN1(x) ((x) << S_PWRDN1)
2091#define F_PWRDN1 V_PWRDN1(1U)
2092
2093#define S_PWRDN0 16
2094#define V_PWRDN0(x) ((x) << S_PWRDN0)
2095#define F_PWRDN0 V_PWRDN0(1U)
2096
2097#define S_RESETPLL23 15
2098#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
2099#define F_RESETPLL23 V_RESETPLL23(1U)
2100
2101#define S_RESETPLL01 14
2102#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
2103#define F_RESETPLL01 V_RESETPLL01(1U)
2104
2105#define A_XGM_SERDES_STAT0 0x8f0
2106
2107#define S_LOWSIG0 0
2108#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
2109#define F_LOWSIG0 V_LOWSIG0(1U)
2110
2111#define A_XGM_SERDES_STAT3 0x8fc
2112
2113#define A_XGM_STAT_TX_BYTE_LOW 0x900
2114
2115#define A_XGM_STAT_TX_BYTE_HIGH 0x904
2116
2117#define A_XGM_STAT_TX_FRAME_LOW 0x908
2118
2119#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
2120
2121#define A_XGM_STAT_TX_BCAST 0x910
2122
2123#define A_XGM_STAT_TX_MCAST 0x914
2124
2125#define A_XGM_STAT_TX_PAUSE 0x918
2126
2127#define A_XGM_STAT_TX_64B_FRAMES 0x91c
2128
2129#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
2130
2131#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
2132
2133#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
2134
2135#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
2136
2137#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
2138
2139#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
2140
2141#define A_XGM_STAT_TX_ERR_FRAMES 0x938
2142
2143#define A_XGM_STAT_RX_BYTES_LOW 0x93c
2144
2145#define A_XGM_STAT_RX_BYTES_HIGH 0x940
2146
2147#define A_XGM_STAT_RX_FRAMES_LOW 0x944
2148
2149#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
2150
2151#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
2152
2153#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
2154
2155#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
2156
2157#define A_XGM_STAT_RX_64B_FRAMES 0x958
2158
2159#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
2160
2161#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
2162
2163#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
2164
2165#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
2166
2167#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
2168
2169#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
2170
2171#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
2172
2173#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
2174
2175#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
2176
2177#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
2178
2179#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
2180
2181#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
2182
2183#define A_XGM_SERDES_STATUS0 0x98c
2184
2185#define A_XGM_SERDES_STATUS1 0x990
2186
2187#define S_CMULOCK 31
2188#define V_CMULOCK(x) ((x) << S_CMULOCK)
2189#define F_CMULOCK V_CMULOCK(1U)
2190
2191#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
2192
2193#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
2194
2195#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
new file mode 100644
index 000000000000..3f2cf8a07c61
--- /dev/null
+++ b/drivers/net/cxgb3/sge.c
@@ -0,0 +1,2681 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
48#define SGE_RX_COPY_THRES 256
49
50# define SGE_RX_DROP_THRES 16
51
52/*
53 * Period of the Tx buffer reclaim timer. This timer does not need to run
54 * frequently as Tx buffers are usually reclaimed by new Tx packets.
55 */
56#define TX_RECLAIM_PERIOD (HZ / 4)
57
58/* WR size in bytes */
59#define WR_LEN (WR_FLITS * 8)
60
61/*
62 * Types of Tx queues in each queue set. Order here matters, do not change.
63 */
64enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
65
66/* Values for sge_txq.flags */
67enum {
68 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
69 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
70};
71
72struct tx_desc {
73 u64 flit[TX_DESC_FLITS];
74};
75
76struct rx_desc {
77 __be32 addr_lo;
78 __be32 len_gen;
79 __be32 gen2;
80 __be32 addr_hi;
81};
82
83struct tx_sw_desc { /* SW state per Tx descriptor */
84 struct sk_buff *skb;
85};
86
87struct rx_sw_desc { /* SW state per Rx descriptor */
88 struct sk_buff *skb;
89 DECLARE_PCI_UNMAP_ADDR(dma_addr);
90};
91
92struct rsp_desc { /* response queue descriptor */
93 struct rss_header rss_hdr;
94 __be32 flags;
95 __be32 len_cq;
96 u8 imm_data[47];
97 u8 intr_gen;
98};
99
100struct unmap_info { /* packet unmapping info, overlays skb->cb */
101 int sflit; /* start flit of first SGL entry in Tx descriptor */
102 u16 fragidx; /* first page fragment in current Tx descriptor */
103 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
104 u32 len; /* mapped length of skb main body */
105};
106
107/*
108 * Maps a number of flits to the number of Tx descriptors that can hold them.
109 * The formula is
110 *
111 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
112 *
113 * HW allows up to 4 descriptors to be combined into a WR.
114 */
115static u8 flit_desc_map[] = {
116 0,
117#if SGE_NUM_GENBITS == 1
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
120 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
121 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
122#elif SGE_NUM_GENBITS == 2
123 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
124 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
125 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
126 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
127#else
128# error "SGE_NUM_GENBITS must be 1 or 2"
129#endif
130};
131
132static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
133{
134 return container_of(q, struct sge_qset, fl[qidx]);
135}
136
137static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
138{
139 return container_of(q, struct sge_qset, rspq);
140}
141
142static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
143{
144 return container_of(q, struct sge_qset, txq[qidx]);
145}
146
147/**
148 * refill_rspq - replenish an SGE response queue
149 * @adapter: the adapter
150 * @q: the response queue to replenish
151 * @credits: how many new responses to make available
152 *
153 * Replenishes a response queue by making the supplied number of responses
154 * available to HW.
155 */
156static inline void refill_rspq(struct adapter *adapter,
157 const struct sge_rspq *q, unsigned int credits)
158{
159 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
160 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
161}
162
163/**
164 * need_skb_unmap - does the platform need unmapping of sk_buffs?
165 *
166 * Returns true if the platfrom needs sk_buff unmapping. The compiler
167 * optimizes away unecessary code if this returns true.
168 */
169static inline int need_skb_unmap(void)
170{
171 /*
172 * This structure is used to tell if the platfrom needs buffer
173 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
174 */
175 struct dummy {
176 DECLARE_PCI_UNMAP_ADDR(addr);
177 };
178
179 return sizeof(struct dummy) != 0;
180}
181
182/**
183 * unmap_skb - unmap a packet main body and its page fragments
184 * @skb: the packet
185 * @q: the Tx queue containing Tx descriptors for the packet
186 * @cidx: index of Tx descriptor
187 * @pdev: the PCI device
188 *
189 * Unmap the main body of an sk_buff and its page fragments, if any.
190 * Because of the fairly complicated structure of our SGLs and the desire
191 * to conserve space for metadata, we keep the information necessary to
192 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
193 * in the Tx descriptors (the physical addresses of the various data
194 * buffers). The send functions initialize the state in skb->cb so we
195 * can unmap the buffers held in the first Tx descriptor here, and we
196 * have enough information at this point to update the state for the next
197 * Tx descriptor.
198 */
199static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
200 unsigned int cidx, struct pci_dev *pdev)
201{
202 const struct sg_ent *sgp;
203 struct unmap_info *ui = (struct unmap_info *)skb->cb;
204 int nfrags, frag_idx, curflit, j = ui->addr_idx;
205
206 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
207
208 if (ui->len) {
209 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
210 PCI_DMA_TODEVICE);
211 ui->len = 0; /* so we know for next descriptor for this skb */
212 j = 1;
213 }
214
215 frag_idx = ui->fragidx;
216 curflit = ui->sflit + 1 + j;
217 nfrags = skb_shinfo(skb)->nr_frags;
218
219 while (frag_idx < nfrags && curflit < WR_FLITS) {
220 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
221 skb_shinfo(skb)->frags[frag_idx].size,
222 PCI_DMA_TODEVICE);
223 j ^= 1;
224 if (j == 0) {
225 sgp++;
226 curflit++;
227 }
228 curflit++;
229 frag_idx++;
230 }
231
232 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
233 ui->fragidx = frag_idx;
234 ui->addr_idx = j;
235 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
236 }
237}
238
239/**
240 * free_tx_desc - reclaims Tx descriptors and their buffers
241 * @adapter: the adapter
242 * @q: the Tx queue to reclaim descriptors from
243 * @n: the number of descriptors to reclaim
244 *
245 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
246 * Tx buffers. Called with the Tx queue lock held.
247 */
248static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
249 unsigned int n)
250{
251 struct tx_sw_desc *d;
252 struct pci_dev *pdev = adapter->pdev;
253 unsigned int cidx = q->cidx;
254
255 d = &q->sdesc[cidx];
256 while (n--) {
257 if (d->skb) { /* an SGL is present */
258 if (need_skb_unmap())
259 unmap_skb(d->skb, q, cidx, pdev);
260 if (d->skb->priority == cidx)
261 kfree_skb(d->skb);
262 }
263 ++d;
264 if (++cidx == q->size) {
265 cidx = 0;
266 d = q->sdesc;
267 }
268 }
269 q->cidx = cidx;
270}
271
272/**
273 * reclaim_completed_tx - reclaims completed Tx descriptors
274 * @adapter: the adapter
275 * @q: the Tx queue to reclaim completed descriptors from
276 *
277 * Reclaims Tx descriptors that the SGE has indicated it has processed,
278 * and frees the associated buffers if possible. Called with the Tx
279 * queue's lock held.
280 */
281static inline void reclaim_completed_tx(struct adapter *adapter,
282 struct sge_txq *q)
283{
284 unsigned int reclaim = q->processed - q->cleaned;
285
286 if (reclaim) {
287 free_tx_desc(adapter, q, reclaim);
288 q->cleaned += reclaim;
289 q->in_use -= reclaim;
290 }
291}
292
293/**
294 * should_restart_tx - are there enough resources to restart a Tx queue?
295 * @q: the Tx queue
296 *
297 * Checks if there are enough descriptors to restart a suspended Tx queue.
298 */
299static inline int should_restart_tx(const struct sge_txq *q)
300{
301 unsigned int r = q->processed - q->cleaned;
302
303 return q->in_use - r < (q->size >> 1);
304}
305
306/**
307 * free_rx_bufs - free the Rx buffers on an SGE free list
308 * @pdev: the PCI device associated with the adapter
309 * @rxq: the SGE free list to clean up
310 *
311 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
312 * this queue should be stopped before calling this function.
313 */
314static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
315{
316 unsigned int cidx = q->cidx;
317
318 while (q->credits--) {
319 struct rx_sw_desc *d = &q->sdesc[cidx];
320
321 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
322 q->buf_size, PCI_DMA_FROMDEVICE);
323 kfree_skb(d->skb);
324 d->skb = NULL;
325 if (++cidx == q->size)
326 cidx = 0;
327 }
328}
329
330/**
331 * add_one_rx_buf - add a packet buffer to a free-buffer list
332 * @skb: the buffer to add
333 * @len: the buffer length
334 * @d: the HW Rx descriptor to write
335 * @sd: the SW Rx descriptor to write
336 * @gen: the generation bit value
337 * @pdev: the PCI device associated with the adapter
338 *
339 * Add a buffer of the given length to the supplied HW and SW Rx
340 * descriptors.
341 */
342static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
343 struct rx_desc *d, struct rx_sw_desc *sd,
344 unsigned int gen, struct pci_dev *pdev)
345{
346 dma_addr_t mapping;
347
348 sd->skb = skb;
349 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
350 pci_unmap_addr_set(sd, dma_addr, mapping);
351
352 d->addr_lo = cpu_to_be32(mapping);
353 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
354 wmb();
355 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
356 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
357}
358
359/**
360 * refill_fl - refill an SGE free-buffer list
361 * @adapter: the adapter
362 * @q: the free-list to refill
363 * @n: the number of new buffers to allocate
364 * @gfp: the gfp flags for allocating new buffers
365 *
366 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
367 * allocated with the supplied gfp flags. The caller must assure that
368 * @n does not exceed the queue's capacity.
369 */
370static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
371{
372 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
373 struct rx_desc *d = &q->desc[q->pidx];
374
375 while (n--) {
376 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
377
378 if (!skb)
379 break;
380
381 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
382 d++;
383 sd++;
384 if (++q->pidx == q->size) {
385 q->pidx = 0;
386 q->gen ^= 1;
387 sd = q->sdesc;
388 d = q->desc;
389 }
390 q->credits++;
391 }
392
393 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
394}
395
396static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
397{
398 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
399}
400
401/**
402 * recycle_rx_buf - recycle a receive buffer
403 * @adapter: the adapter
404 * @q: the SGE free list
405 * @idx: index of buffer to recycle
406 *
407 * Recycles the specified buffer on the given free list by adding it at
408 * the next available slot on the list.
409 */
410static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
411 unsigned int idx)
412{
413 struct rx_desc *from = &q->desc[idx];
414 struct rx_desc *to = &q->desc[q->pidx];
415
416 q->sdesc[q->pidx] = q->sdesc[idx];
417 to->addr_lo = from->addr_lo; /* already big endian */
418 to->addr_hi = from->addr_hi; /* likewise */
419 wmb();
420 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
421 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
422 q->credits++;
423
424 if (++q->pidx == q->size) {
425 q->pidx = 0;
426 q->gen ^= 1;
427 }
428 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
429}
430
431/**
432 * alloc_ring - allocate resources for an SGE descriptor ring
433 * @pdev: the PCI device
434 * @nelem: the number of descriptors
435 * @elem_size: the size of each descriptor
436 * @sw_size: the size of the SW state associated with each ring element
437 * @phys: the physical address of the allocated ring
438 * @metadata: address of the array holding the SW state for the ring
439 *
440 * Allocates resources for an SGE descriptor ring, such as Tx queues,
441 * free buffer lists, or response queues. Each SGE ring requires
442 * space for its HW descriptors plus, optionally, space for the SW state
443 * associated with each HW entry (the metadata). The function returns
444 * three values: the virtual address for the HW ring (the return value
445 * of the function), the physical address of the HW ring, and the address
446 * of the SW ring.
447 */
448static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
449 size_t sw_size, dma_addr_t *phys, void *metadata)
450{
451 size_t len = nelem * elem_size;
452 void *s = NULL;
453 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
454
455 if (!p)
456 return NULL;
457 if (sw_size) {
458 s = kcalloc(nelem, sw_size, GFP_KERNEL);
459
460 if (!s) {
461 dma_free_coherent(&pdev->dev, len, p, *phys);
462 return NULL;
463 }
464 }
465 if (metadata)
466 *(void **)metadata = s;
467 memset(p, 0, len);
468 return p;
469}
470
471/**
472 * free_qset - free the resources of an SGE queue set
473 * @adapter: the adapter owning the queue set
474 * @q: the queue set
475 *
476 * Release the HW and SW resources associated with an SGE queue set, such
477 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
478 * queue set must be quiesced prior to calling this.
479 */
480void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
481{
482 int i;
483 struct pci_dev *pdev = adapter->pdev;
484
485 if (q->tx_reclaim_timer.function)
486 del_timer_sync(&q->tx_reclaim_timer);
487
488 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
489 if (q->fl[i].desc) {
490 spin_lock(&adapter->sge.reg_lock);
491 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
492 spin_unlock(&adapter->sge.reg_lock);
493 free_rx_bufs(pdev, &q->fl[i]);
494 kfree(q->fl[i].sdesc);
495 dma_free_coherent(&pdev->dev,
496 q->fl[i].size *
497 sizeof(struct rx_desc), q->fl[i].desc,
498 q->fl[i].phys_addr);
499 }
500
501 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
502 if (q->txq[i].desc) {
503 spin_lock(&adapter->sge.reg_lock);
504 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
505 spin_unlock(&adapter->sge.reg_lock);
506 if (q->txq[i].sdesc) {
507 free_tx_desc(adapter, &q->txq[i],
508 q->txq[i].in_use);
509 kfree(q->txq[i].sdesc);
510 }
511 dma_free_coherent(&pdev->dev,
512 q->txq[i].size *
513 sizeof(struct tx_desc),
514 q->txq[i].desc, q->txq[i].phys_addr);
515 __skb_queue_purge(&q->txq[i].sendq);
516 }
517
518 if (q->rspq.desc) {
519 spin_lock(&adapter->sge.reg_lock);
520 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
521 spin_unlock(&adapter->sge.reg_lock);
522 dma_free_coherent(&pdev->dev,
523 q->rspq.size * sizeof(struct rsp_desc),
524 q->rspq.desc, q->rspq.phys_addr);
525 }
526
527 if (q->netdev)
528 q->netdev->atalk_ptr = NULL;
529
530 memset(q, 0, sizeof(*q));
531}
532
533/**
534 * init_qset_cntxt - initialize an SGE queue set context info
535 * @qs: the queue set
536 * @id: the queue set id
537 *
538 * Initializes the TIDs and context ids for the queues of a queue set.
539 */
540static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
541{
542 qs->rspq.cntxt_id = id;
543 qs->fl[0].cntxt_id = 2 * id;
544 qs->fl[1].cntxt_id = 2 * id + 1;
545 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
546 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
547 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
548 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
549 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
550}
551
552/**
553 * sgl_len - calculates the size of an SGL of the given capacity
554 * @n: the number of SGL entries
555 *
556 * Calculates the number of flits needed for a scatter/gather list that
557 * can hold the given number of entries.
558 */
559static inline unsigned int sgl_len(unsigned int n)
560{
561 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
562 return (3 * n) / 2 + (n & 1);
563}
564
565/**
566 * flits_to_desc - returns the num of Tx descriptors for the given flits
567 * @n: the number of flits
568 *
569 * Calculates the number of Tx descriptors needed for the supplied number
570 * of flits.
571 */
572static inline unsigned int flits_to_desc(unsigned int n)
573{
574 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
575 return flit_desc_map[n];
576}
577
578/**
579 * get_packet - return the next ingress packet buffer from a free list
580 * @adap: the adapter that received the packet
581 * @fl: the SGE free list holding the packet
582 * @len: the packet length including any SGE padding
583 * @drop_thres: # of remaining buffers before we start dropping packets
584 *
585 * Get the next packet from a free list and complete setup of the
586 * sk_buff. If the packet is small we make a copy and recycle the
587 * original buffer, otherwise we use the original buffer itself. If a
588 * positive drop threshold is supplied packets are dropped and their
589 * buffers recycled if (a) the number of remaining buffers is under the
590 * threshold and the packet is too big to copy, or (b) the packet should
591 * be copied but there is no memory for the copy.
592 */
593static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
594 unsigned int len, unsigned int drop_thres)
595{
596 struct sk_buff *skb = NULL;
597 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
598
599 prefetch(sd->skb->data);
600
601 if (len <= SGE_RX_COPY_THRES) {
602 skb = alloc_skb(len, GFP_ATOMIC);
603 if (likely(skb != NULL)) {
604 __skb_put(skb, len);
605 pci_dma_sync_single_for_cpu(adap->pdev,
606 pci_unmap_addr(sd,
607 dma_addr),
608 len, PCI_DMA_FROMDEVICE);
609 memcpy(skb->data, sd->skb->data, len);
610 pci_dma_sync_single_for_device(adap->pdev,
611 pci_unmap_addr(sd,
612 dma_addr),
613 len, PCI_DMA_FROMDEVICE);
614 } else if (!drop_thres)
615 goto use_orig_buf;
616 recycle:
617 recycle_rx_buf(adap, fl, fl->cidx);
618 return skb;
619 }
620
621 if (unlikely(fl->credits < drop_thres))
622 goto recycle;
623
624 use_orig_buf:
625 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
626 fl->buf_size, PCI_DMA_FROMDEVICE);
627 skb = sd->skb;
628 skb_put(skb, len);
629 __refill_fl(adap, fl);
630 return skb;
631}
632
633/**
634 * get_imm_packet - return the next ingress packet buffer from a response
635 * @resp: the response descriptor containing the packet data
636 *
637 * Return a packet containing the immediate data of the given response.
638 */
639static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
640{
641 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
642
643 if (skb) {
644 __skb_put(skb, IMMED_PKT_SIZE);
645 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
646 }
647 return skb;
648}
649
650/**
651 * calc_tx_descs - calculate the number of Tx descriptors for a packet
652 * @skb: the packet
653 *
654 * Returns the number of Tx descriptors needed for the given Ethernet
655 * packet. Ethernet packets require addition of WR and CPL headers.
656 */
657static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
658{
659 unsigned int flits;
660
661 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
662 return 1;
663
664 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
665 if (skb_shinfo(skb)->gso_size)
666 flits++;
667 return flits_to_desc(flits);
668}
669
670/**
671 * make_sgl - populate a scatter/gather list for a packet
672 * @skb: the packet
673 * @sgp: the SGL to populate
674 * @start: start address of skb main body data to include in the SGL
675 * @len: length of skb main body data to include in the SGL
676 * @pdev: the PCI device
677 *
678 * Generates a scatter/gather list for the buffers that make up a packet
679 * and returns the SGL size in 8-byte words. The caller must size the SGL
680 * appropriately.
681 */
682static inline unsigned int make_sgl(const struct sk_buff *skb,
683 struct sg_ent *sgp, unsigned char *start,
684 unsigned int len, struct pci_dev *pdev)
685{
686 dma_addr_t mapping;
687 unsigned int i, j = 0, nfrags;
688
689 if (len) {
690 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
691 sgp->len[0] = cpu_to_be32(len);
692 sgp->addr[0] = cpu_to_be64(mapping);
693 j = 1;
694 }
695
696 nfrags = skb_shinfo(skb)->nr_frags;
697 for (i = 0; i < nfrags; i++) {
698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
699
700 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
701 frag->size, PCI_DMA_TODEVICE);
702 sgp->len[j] = cpu_to_be32(frag->size);
703 sgp->addr[j] = cpu_to_be64(mapping);
704 j ^= 1;
705 if (j == 0)
706 ++sgp;
707 }
708 if (j)
709 sgp->len[j] = 0;
710 return ((nfrags + (len != 0)) * 3) / 2 + j;
711}
712
713/**
714 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
715 * @adap: the adapter
716 * @q: the Tx queue
717 *
718 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
719 * where the HW is going to sleep just after we checked, however,
720 * then the interrupt handler will detect the outstanding TX packet
721 * and ring the doorbell for us.
722 *
723 * When GTS is disabled we unconditionally ring the doorbell.
724 */
725static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
726{
727#if USE_GTS
728 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
729 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
730 set_bit(TXQ_LAST_PKT_DB, &q->flags);
731 t3_write_reg(adap, A_SG_KDOORBELL,
732 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
733 }
734#else
735 wmb(); /* write descriptors before telling HW */
736 t3_write_reg(adap, A_SG_KDOORBELL,
737 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
738#endif
739}
740
741static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
742{
743#if SGE_NUM_GENBITS == 2
744 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
745#endif
746}
747
748/**
749 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
750 * @ndesc: number of Tx descriptors spanned by the SGL
751 * @skb: the packet corresponding to the WR
752 * @d: first Tx descriptor to be written
753 * @pidx: index of above descriptors
754 * @q: the SGE Tx queue
755 * @sgl: the SGL
756 * @flits: number of flits to the start of the SGL in the first descriptor
757 * @sgl_flits: the SGL size in flits
758 * @gen: the Tx descriptor generation
759 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
760 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
761 *
762 * Write a work request header and an associated SGL. If the SGL is
763 * small enough to fit into one Tx descriptor it has already been written
764 * and we just need to write the WR header. Otherwise we distribute the
765 * SGL across the number of descriptors it spans.
766 */
767static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
768 struct tx_desc *d, unsigned int pidx,
769 const struct sge_txq *q,
770 const struct sg_ent *sgl,
771 unsigned int flits, unsigned int sgl_flits,
772 unsigned int gen, unsigned int wr_hi,
773 unsigned int wr_lo)
774{
775 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
776 struct tx_sw_desc *sd = &q->sdesc[pidx];
777
778 sd->skb = skb;
779 if (need_skb_unmap()) {
780 struct unmap_info *ui = (struct unmap_info *)skb->cb;
781
782 ui->fragidx = 0;
783 ui->addr_idx = 0;
784 ui->sflit = flits;
785 }
786
787 if (likely(ndesc == 1)) {
788 skb->priority = pidx;
789 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
790 V_WR_SGLSFLT(flits)) | wr_hi;
791 wmb();
792 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
793 V_WR_GEN(gen)) | wr_lo;
794 wr_gen2(d, gen);
795 } else {
796 unsigned int ogen = gen;
797 const u64 *fp = (const u64 *)sgl;
798 struct work_request_hdr *wp = wrp;
799
800 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
801 V_WR_SGLSFLT(flits)) | wr_hi;
802
803 while (sgl_flits) {
804 unsigned int avail = WR_FLITS - flits;
805
806 if (avail > sgl_flits)
807 avail = sgl_flits;
808 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
809 sgl_flits -= avail;
810 ndesc--;
811 if (!sgl_flits)
812 break;
813
814 fp += avail;
815 d++;
816 sd++;
817 if (++pidx == q->size) {
818 pidx = 0;
819 gen ^= 1;
820 d = q->desc;
821 sd = q->sdesc;
822 }
823
824 sd->skb = skb;
825 wrp = (struct work_request_hdr *)d;
826 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
827 V_WR_SGLSFLT(1)) | wr_hi;
828 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
829 sgl_flits + 1)) |
830 V_WR_GEN(gen)) | wr_lo;
831 wr_gen2(d, gen);
832 flits = 1;
833 }
834 skb->priority = pidx;
835 wrp->wr_hi |= htonl(F_WR_EOP);
836 wmb();
837 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
838 wr_gen2((struct tx_desc *)wp, ogen);
839 WARN_ON(ndesc != 0);
840 }
841}
842
843/**
844 * write_tx_pkt_wr - write a TX_PKT work request
845 * @adap: the adapter
846 * @skb: the packet to send
847 * @pi: the egress interface
848 * @pidx: index of the first Tx descriptor to write
849 * @gen: the generation value to use
850 * @q: the Tx queue
851 * @ndesc: number of descriptors the packet will occupy
852 * @compl: the value of the COMPL bit to use
853 *
854 * Generate a TX_PKT work request to send the supplied packet.
855 */
856static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
857 const struct port_info *pi,
858 unsigned int pidx, unsigned int gen,
859 struct sge_txq *q, unsigned int ndesc,
860 unsigned int compl)
861{
862 unsigned int flits, sgl_flits, cntrl, tso_info;
863 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
864 struct tx_desc *d = &q->desc[pidx];
865 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
866
867 cpl->len = htonl(skb->len | 0x80000000);
868 cntrl = V_TXPKT_INTF(pi->port_id);
869
870 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
871 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
872
873 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
874 if (tso_info) {
875 int eth_type;
876 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
877
878 d->flit[2] = 0;
879 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
880 hdr->cntrl = htonl(cntrl);
881 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
882 CPL_ETH_II : CPL_ETH_II_VLAN;
883 tso_info |= V_LSO_ETH_TYPE(eth_type) |
884 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
885 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
886 hdr->lso_info = htonl(tso_info);
887 flits = 3;
888 } else {
889 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
890 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
891 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
892 cpl->cntrl = htonl(cntrl);
893
894 if (skb->len <= WR_LEN - sizeof(*cpl)) {
895 q->sdesc[pidx].skb = NULL;
896 if (!skb->data_len)
897 memcpy(&d->flit[2], skb->data, skb->len);
898 else
899 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
900
901 flits = (skb->len + 7) / 8 + 2;
902 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
903 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
904 | F_WR_SOP | F_WR_EOP | compl);
905 wmb();
906 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
907 V_WR_TID(q->token));
908 wr_gen2(d, gen);
909 kfree_skb(skb);
910 return;
911 }
912
913 flits = 2;
914 }
915
916 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
917 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
918 if (need_skb_unmap())
919 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
920
921 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
922 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
923 htonl(V_WR_TID(q->token)));
924}
925
926/**
927 * eth_xmit - add a packet to the Ethernet Tx queue
928 * @skb: the packet
929 * @dev: the egress net device
930 *
931 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
932 */
933int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
934{
935 unsigned int ndesc, pidx, credits, gen, compl;
936 const struct port_info *pi = netdev_priv(dev);
937 struct adapter *adap = dev->priv;
938 struct sge_qset *qs = dev2qset(dev);
939 struct sge_txq *q = &qs->txq[TXQ_ETH];
940
941 /*
942 * The chip min packet length is 9 octets but play safe and reject
943 * anything shorter than an Ethernet header.
944 */
945 if (unlikely(skb->len < ETH_HLEN)) {
946 dev_kfree_skb(skb);
947 return NETDEV_TX_OK;
948 }
949
950 spin_lock(&q->lock);
951 reclaim_completed_tx(adap, q);
952
953 credits = q->size - q->in_use;
954 ndesc = calc_tx_descs(skb);
955
956 if (unlikely(credits < ndesc)) {
957 if (!netif_queue_stopped(dev)) {
958 netif_stop_queue(dev);
959 set_bit(TXQ_ETH, &qs->txq_stopped);
960 q->stops++;
961 dev_err(&adap->pdev->dev,
962 "%s: Tx ring %u full while queue awake!\n",
963 dev->name, q->cntxt_id & 7);
964 }
965 spin_unlock(&q->lock);
966 return NETDEV_TX_BUSY;
967 }
968
969 q->in_use += ndesc;
970 if (unlikely(credits - ndesc < q->stop_thres)) {
971 q->stops++;
972 netif_stop_queue(dev);
973 set_bit(TXQ_ETH, &qs->txq_stopped);
974#if !USE_GTS
975 if (should_restart_tx(q) &&
976 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
977 q->restarts++;
978 netif_wake_queue(dev);
979 }
980#endif
981 }
982
983 gen = q->gen;
984 q->unacked += ndesc;
985 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
986 q->unacked &= 7;
987 pidx = q->pidx;
988 q->pidx += ndesc;
989 if (q->pidx >= q->size) {
990 q->pidx -= q->size;
991 q->gen ^= 1;
992 }
993
994 /* update port statistics */
995 if (skb->ip_summed == CHECKSUM_COMPLETE)
996 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
997 if (skb_shinfo(skb)->gso_size)
998 qs->port_stats[SGE_PSTAT_TSO]++;
999 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1000 qs->port_stats[SGE_PSTAT_VLANINS]++;
1001
1002 dev->trans_start = jiffies;
1003 spin_unlock(&q->lock);
1004
1005 /*
1006 * We do not use Tx completion interrupts to free DMAd Tx packets.
1007 * This is good for performamce but means that we rely on new Tx
1008 * packets arriving to run the destructors of completed packets,
1009 * which open up space in their sockets' send queues. Sometimes
1010 * we do not get such new packets causing Tx to stall. A single
1011 * UDP transmitter is a good example of this situation. We have
1012 * a clean up timer that periodically reclaims completed packets
1013 * but it doesn't run often enough (nor do we want it to) to prevent
1014 * lengthy stalls. A solution to this problem is to run the
1015 * destructor early, after the packet is queued but before it's DMAd.
1016 * A cons is that we lie to socket memory accounting, but the amount
1017 * of extra memory is reasonable (limited by the number of Tx
1018 * descriptors), the packets do actually get freed quickly by new
1019 * packets almost always, and for protocols like TCP that wait for
1020 * acks to really free up the data the extra memory is even less.
1021 * On the positive side we run the destructors on the sending CPU
1022 * rather than on a potentially different completing CPU, usually a
1023 * good thing. We also run them without holding our Tx queue lock,
1024 * unlike what reclaim_completed_tx() would otherwise do.
1025 *
1026 * Run the destructor before telling the DMA engine about the packet
1027 * to make sure it doesn't complete and get freed prematurely.
1028 */
1029 if (likely(!skb_shared(skb)))
1030 skb_orphan(skb);
1031
1032 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1033 check_ring_tx_db(adap, q);
1034 return NETDEV_TX_OK;
1035}
1036
1037/**
1038 * write_imm - write a packet into a Tx descriptor as immediate data
1039 * @d: the Tx descriptor to write
1040 * @skb: the packet
1041 * @len: the length of packet data to write as immediate data
1042 * @gen: the generation bit value to write
1043 *
1044 * Writes a packet as immediate data into a Tx descriptor. The packet
1045 * contains a work request at its beginning. We must write the packet
1046 * carefully so the SGE doesn't read accidentally before it's written in
1047 * its entirety.
1048 */
1049static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1050 unsigned int len, unsigned int gen)
1051{
1052 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1053 struct work_request_hdr *to = (struct work_request_hdr *)d;
1054
1055 memcpy(&to[1], &from[1], len - sizeof(*from));
1056 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1057 V_WR_BCNTLFLT(len & 7));
1058 wmb();
1059 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1060 V_WR_LEN((len + 7) / 8));
1061 wr_gen2(d, gen);
1062 kfree_skb(skb);
1063}
1064
1065/**
1066 * check_desc_avail - check descriptor availability on a send queue
1067 * @adap: the adapter
1068 * @q: the send queue
1069 * @skb: the packet needing the descriptors
1070 * @ndesc: the number of Tx descriptors needed
1071 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1072 *
1073 * Checks if the requested number of Tx descriptors is available on an
1074 * SGE send queue. If the queue is already suspended or not enough
1075 * descriptors are available the packet is queued for later transmission.
1076 * Must be called with the Tx queue locked.
1077 *
1078 * Returns 0 if enough descriptors are available, 1 if there aren't
1079 * enough descriptors and the packet has been queued, and 2 if the caller
1080 * needs to retry because there weren't enough descriptors at the
1081 * beginning of the call but some freed up in the mean time.
1082 */
1083static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1084 struct sk_buff *skb, unsigned int ndesc,
1085 unsigned int qid)
1086{
1087 if (unlikely(!skb_queue_empty(&q->sendq))) {
1088 addq_exit:__skb_queue_tail(&q->sendq, skb);
1089 return 1;
1090 }
1091 if (unlikely(q->size - q->in_use < ndesc)) {
1092 struct sge_qset *qs = txq_to_qset(q, qid);
1093
1094 set_bit(qid, &qs->txq_stopped);
1095 smp_mb__after_clear_bit();
1096
1097 if (should_restart_tx(q) &&
1098 test_and_clear_bit(qid, &qs->txq_stopped))
1099 return 2;
1100
1101 q->stops++;
1102 goto addq_exit;
1103 }
1104 return 0;
1105}
1106
1107/**
1108 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1109 * @q: the SGE control Tx queue
1110 *
1111 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1112 * that send only immediate data (presently just the control queues) and
1113 * thus do not have any sk_buffs to release.
1114 */
1115static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1116{
1117 unsigned int reclaim = q->processed - q->cleaned;
1118
1119 q->in_use -= reclaim;
1120 q->cleaned += reclaim;
1121}
1122
1123static inline int immediate(const struct sk_buff *skb)
1124{
1125 return skb->len <= WR_LEN && !skb->data_len;
1126}
1127
1128/**
1129 * ctrl_xmit - send a packet through an SGE control Tx queue
1130 * @adap: the adapter
1131 * @q: the control queue
1132 * @skb: the packet
1133 *
1134 * Send a packet through an SGE control Tx queue. Packets sent through
1135 * a control queue must fit entirely as immediate data in a single Tx
1136 * descriptor and have no page fragments.
1137 */
1138static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1139 struct sk_buff *skb)
1140{
1141 int ret;
1142 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1143
1144 if (unlikely(!immediate(skb))) {
1145 WARN_ON(1);
1146 dev_kfree_skb(skb);
1147 return NET_XMIT_SUCCESS;
1148 }
1149
1150 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1151 wrp->wr_lo = htonl(V_WR_TID(q->token));
1152
1153 spin_lock(&q->lock);
1154 again:reclaim_completed_tx_imm(q);
1155
1156 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1157 if (unlikely(ret)) {
1158 if (ret == 1) {
1159 spin_unlock(&q->lock);
1160 return NET_XMIT_CN;
1161 }
1162 goto again;
1163 }
1164
1165 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1166
1167 q->in_use++;
1168 if (++q->pidx >= q->size) {
1169 q->pidx = 0;
1170 q->gen ^= 1;
1171 }
1172 spin_unlock(&q->lock);
1173 wmb();
1174 t3_write_reg(adap, A_SG_KDOORBELL,
1175 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1176 return NET_XMIT_SUCCESS;
1177}
1178
1179/**
1180 * restart_ctrlq - restart a suspended control queue
1181 * @qs: the queue set cotaining the control queue
1182 *
1183 * Resumes transmission on a suspended Tx control queue.
1184 */
1185static void restart_ctrlq(unsigned long data)
1186{
1187 struct sk_buff *skb;
1188 struct sge_qset *qs = (struct sge_qset *)data;
1189 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1190 struct adapter *adap = qs->netdev->priv;
1191
1192 spin_lock(&q->lock);
1193 again:reclaim_completed_tx_imm(q);
1194
1195 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1196
1197 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1198
1199 if (++q->pidx >= q->size) {
1200 q->pidx = 0;
1201 q->gen ^= 1;
1202 }
1203 q->in_use++;
1204 }
1205
1206 if (!skb_queue_empty(&q->sendq)) {
1207 set_bit(TXQ_CTRL, &qs->txq_stopped);
1208 smp_mb__after_clear_bit();
1209
1210 if (should_restart_tx(q) &&
1211 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1212 goto again;
1213 q->stops++;
1214 }
1215
1216 spin_unlock(&q->lock);
1217 t3_write_reg(adap, A_SG_KDOORBELL,
1218 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1219}
1220
1221/*
1222 * Send a management message through control queue 0
1223 */
1224int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1225{
1226 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1227}
1228
1229/**
1230 * write_ofld_wr - write an offload work request
1231 * @adap: the adapter
1232 * @skb: the packet to send
1233 * @q: the Tx queue
1234 * @pidx: index of the first Tx descriptor to write
1235 * @gen: the generation value to use
1236 * @ndesc: number of descriptors the packet will occupy
1237 *
1238 * Write an offload work request to send the supplied packet. The packet
1239 * data already carry the work request with most fields populated.
1240 */
1241static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1242 struct sge_txq *q, unsigned int pidx,
1243 unsigned int gen, unsigned int ndesc)
1244{
1245 unsigned int sgl_flits, flits;
1246 struct work_request_hdr *from;
1247 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1248 struct tx_desc *d = &q->desc[pidx];
1249
1250 if (immediate(skb)) {
1251 q->sdesc[pidx].skb = NULL;
1252 write_imm(d, skb, skb->len, gen);
1253 return;
1254 }
1255
1256 /* Only TX_DATA builds SGLs */
1257
1258 from = (struct work_request_hdr *)skb->data;
1259 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1260
1261 flits = (skb->h.raw - skb->data) / 8;
1262 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1263 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1264 adap->pdev);
1265 if (need_skb_unmap())
1266 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1267
1268 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1269 gen, from->wr_hi, from->wr_lo);
1270}
1271
1272/**
1273 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1274 * @skb: the packet
1275 *
1276 * Returns the number of Tx descriptors needed for the given offload
1277 * packet. These packets are already fully constructed.
1278 */
1279static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1280{
1281 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1282
1283 if (skb->len <= WR_LEN && cnt == 0)
1284 return 1; /* packet fits as immediate data */
1285
1286 flits = (skb->h.raw - skb->data) / 8; /* headers */
1287 if (skb->tail != skb->h.raw)
1288 cnt++;
1289 return flits_to_desc(flits + sgl_len(cnt));
1290}
1291
1292/**
1293 * ofld_xmit - send a packet through an offload queue
1294 * @adap: the adapter
1295 * @q: the Tx offload queue
1296 * @skb: the packet
1297 *
1298 * Send an offload packet through an SGE offload queue.
1299 */
1300static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1301 struct sk_buff *skb)
1302{
1303 int ret;
1304 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1305
1306 spin_lock(&q->lock);
1307 again:reclaim_completed_tx(adap, q);
1308
1309 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1310 if (unlikely(ret)) {
1311 if (ret == 1) {
1312 skb->priority = ndesc; /* save for restart */
1313 spin_unlock(&q->lock);
1314 return NET_XMIT_CN;
1315 }
1316 goto again;
1317 }
1318
1319 gen = q->gen;
1320 q->in_use += ndesc;
1321 pidx = q->pidx;
1322 q->pidx += ndesc;
1323 if (q->pidx >= q->size) {
1324 q->pidx -= q->size;
1325 q->gen ^= 1;
1326 }
1327 spin_unlock(&q->lock);
1328
1329 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1330 check_ring_tx_db(adap, q);
1331 return NET_XMIT_SUCCESS;
1332}
1333
1334/**
1335 * restart_offloadq - restart a suspended offload queue
1336 * @qs: the queue set cotaining the offload queue
1337 *
1338 * Resumes transmission on a suspended Tx offload queue.
1339 */
1340static void restart_offloadq(unsigned long data)
1341{
1342 struct sk_buff *skb;
1343 struct sge_qset *qs = (struct sge_qset *)data;
1344 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1345 struct adapter *adap = qs->netdev->priv;
1346
1347 spin_lock(&q->lock);
1348 again:reclaim_completed_tx(adap, q);
1349
1350 while ((skb = skb_peek(&q->sendq)) != NULL) {
1351 unsigned int gen, pidx;
1352 unsigned int ndesc = skb->priority;
1353
1354 if (unlikely(q->size - q->in_use < ndesc)) {
1355 set_bit(TXQ_OFLD, &qs->txq_stopped);
1356 smp_mb__after_clear_bit();
1357
1358 if (should_restart_tx(q) &&
1359 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1360 goto again;
1361 q->stops++;
1362 break;
1363 }
1364
1365 gen = q->gen;
1366 q->in_use += ndesc;
1367 pidx = q->pidx;
1368 q->pidx += ndesc;
1369 if (q->pidx >= q->size) {
1370 q->pidx -= q->size;
1371 q->gen ^= 1;
1372 }
1373 __skb_unlink(skb, &q->sendq);
1374 spin_unlock(&q->lock);
1375
1376 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1377 spin_lock(&q->lock);
1378 }
1379 spin_unlock(&q->lock);
1380
1381#if USE_GTS
1382 set_bit(TXQ_RUNNING, &q->flags);
1383 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1384#endif
1385 t3_write_reg(adap, A_SG_KDOORBELL,
1386 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1387}
1388
1389/**
1390 * queue_set - return the queue set a packet should use
1391 * @skb: the packet
1392 *
1393 * Maps a packet to the SGE queue set it should use. The desired queue
1394 * set is carried in bits 1-3 in the packet's priority.
1395 */
1396static inline int queue_set(const struct sk_buff *skb)
1397{
1398 return skb->priority >> 1;
1399}
1400
1401/**
1402 * is_ctrl_pkt - return whether an offload packet is a control packet
1403 * @skb: the packet
1404 *
1405 * Determines whether an offload packet should use an OFLD or a CTRL
1406 * Tx queue. This is indicated by bit 0 in the packet's priority.
1407 */
1408static inline int is_ctrl_pkt(const struct sk_buff *skb)
1409{
1410 return skb->priority & 1;
1411}
1412
1413/**
1414 * t3_offload_tx - send an offload packet
1415 * @tdev: the offload device to send to
1416 * @skb: the packet
1417 *
1418 * Sends an offload packet. We use the packet priority to select the
1419 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1420 * should be sent as regular or control, bits 1-3 select the queue set.
1421 */
1422int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1423{
1424 struct adapter *adap = tdev2adap(tdev);
1425 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1426
1427 if (unlikely(is_ctrl_pkt(skb)))
1428 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1429
1430 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1431}
1432
1433/**
1434 * offload_enqueue - add an offload packet to an SGE offload receive queue
1435 * @q: the SGE response queue
1436 * @skb: the packet
1437 *
1438 * Add a new offload packet to an SGE response queue's offload packet
1439 * queue. If the packet is the first on the queue it schedules the RX
1440 * softirq to process the queue.
1441 */
1442static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1443{
1444 skb->next = skb->prev = NULL;
1445 if (q->rx_tail)
1446 q->rx_tail->next = skb;
1447 else {
1448 struct sge_qset *qs = rspq_to_qset(q);
1449
1450 if (__netif_rx_schedule_prep(qs->netdev))
1451 __netif_rx_schedule(qs->netdev);
1452 q->rx_head = skb;
1453 }
1454 q->rx_tail = skb;
1455}
1456
1457/**
1458 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1459 * @tdev: the offload device that will be receiving the packets
1460 * @q: the SGE response queue that assembled the bundle
1461 * @skbs: the partial bundle
1462 * @n: the number of packets in the bundle
1463 *
1464 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1465 */
1466static inline void deliver_partial_bundle(struct t3cdev *tdev,
1467 struct sge_rspq *q,
1468 struct sk_buff *skbs[], int n)
1469{
1470 if (n) {
1471 q->offload_bundles++;
1472 tdev->recv(tdev, skbs, n);
1473 }
1474}
1475
1476/**
1477 * ofld_poll - NAPI handler for offload packets in interrupt mode
1478 * @dev: the network device doing the polling
1479 * @budget: polling budget
1480 *
1481 * The NAPI handler for offload packets when a response queue is serviced
1482 * by the hard interrupt handler, i.e., when it's operating in non-polling
1483 * mode. Creates small packet batches and sends them through the offload
1484 * receive handler. Batches need to be of modest size as we do prefetches
1485 * on the packets in each.
1486 */
1487static int ofld_poll(struct net_device *dev, int *budget)
1488{
1489 struct adapter *adapter = dev->priv;
1490 struct sge_qset *qs = dev2qset(dev);
1491 struct sge_rspq *q = &qs->rspq;
1492 int work_done, limit = min(*budget, dev->quota), avail = limit;
1493
1494 while (avail) {
1495 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1496 int ngathered;
1497
1498 spin_lock_irq(&q->lock);
1499 head = q->rx_head;
1500 if (!head) {
1501 work_done = limit - avail;
1502 *budget -= work_done;
1503 dev->quota -= work_done;
1504 __netif_rx_complete(dev);
1505 spin_unlock_irq(&q->lock);
1506 return 0;
1507 }
1508
1509 tail = q->rx_tail;
1510 q->rx_head = q->rx_tail = NULL;
1511 spin_unlock_irq(&q->lock);
1512
1513 for (ngathered = 0; avail && head; avail--) {
1514 prefetch(head->data);
1515 skbs[ngathered] = head;
1516 head = head->next;
1517 skbs[ngathered]->next = NULL;
1518 if (++ngathered == RX_BUNDLE_SIZE) {
1519 q->offload_bundles++;
1520 adapter->tdev.recv(&adapter->tdev, skbs,
1521 ngathered);
1522 ngathered = 0;
1523 }
1524 }
1525 if (head) { /* splice remaining packets back onto Rx queue */
1526 spin_lock_irq(&q->lock);
1527 tail->next = q->rx_head;
1528 if (!q->rx_head)
1529 q->rx_tail = tail;
1530 q->rx_head = head;
1531 spin_unlock_irq(&q->lock);
1532 }
1533 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1534 }
1535 work_done = limit - avail;
1536 *budget -= work_done;
1537 dev->quota -= work_done;
1538 return 1;
1539}
1540
1541/**
1542 * rx_offload - process a received offload packet
1543 * @tdev: the offload device receiving the packet
1544 * @rq: the response queue that received the packet
1545 * @skb: the packet
1546 * @rx_gather: a gather list of packets if we are building a bundle
1547 * @gather_idx: index of the next available slot in the bundle
1548 *
1549 * Process an ingress offload pakcet and add it to the offload ingress
1550 * queue. Returns the index of the next available slot in the bundle.
1551 */
1552static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1553 struct sk_buff *skb, struct sk_buff *rx_gather[],
1554 unsigned int gather_idx)
1555{
1556 rq->offload_pkts++;
1557 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1558
1559 if (rq->polling) {
1560 rx_gather[gather_idx++] = skb;
1561 if (gather_idx == RX_BUNDLE_SIZE) {
1562 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1563 gather_idx = 0;
1564 rq->offload_bundles++;
1565 }
1566 } else
1567 offload_enqueue(rq, skb);
1568
1569 return gather_idx;
1570}
1571
1572/**
1573 * restart_tx - check whether to restart suspended Tx queues
1574 * @qs: the queue set to resume
1575 *
1576 * Restarts suspended Tx queues of an SGE queue set if they have enough
1577 * free resources to resume operation.
1578 */
1579static void restart_tx(struct sge_qset *qs)
1580{
1581 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1582 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1583 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1584 qs->txq[TXQ_ETH].restarts++;
1585 if (netif_running(qs->netdev))
1586 netif_wake_queue(qs->netdev);
1587 }
1588
1589 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1590 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1591 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1592 qs->txq[TXQ_OFLD].restarts++;
1593 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1594 }
1595 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1596 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1597 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1598 qs->txq[TXQ_CTRL].restarts++;
1599 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1600 }
1601}
1602
1603/**
1604 * rx_eth - process an ingress ethernet packet
1605 * @adap: the adapter
1606 * @rq: the response queue that received the packet
1607 * @skb: the packet
1608 * @pad: amount of padding at the start of the buffer
1609 *
1610 * Process an ingress ethernet pakcet and deliver it to the stack.
1611 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1612 * if it was immediate data in a response.
1613 */
1614static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1615 struct sk_buff *skb, int pad)
1616{
1617 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1618 struct port_info *pi;
1619
1620 rq->eth_pkts++;
1621 skb_pull(skb, sizeof(*p) + pad);
1622 skb->dev = adap->port[p->iff];
1623 skb->dev->last_rx = jiffies;
1624 skb->protocol = eth_type_trans(skb, skb->dev);
1625 pi = netdev_priv(skb->dev);
1626 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1627 !p->fragment) {
1628 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1629 skb->ip_summed = CHECKSUM_UNNECESSARY;
1630 } else
1631 skb->ip_summed = CHECKSUM_NONE;
1632
1633 if (unlikely(p->vlan_valid)) {
1634 struct vlan_group *grp = pi->vlan_grp;
1635
1636 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1637 if (likely(grp))
1638 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1639 rq->polling);
1640 else
1641 dev_kfree_skb_any(skb);
1642 } else if (rq->polling)
1643 netif_receive_skb(skb);
1644 else
1645 netif_rx(skb);
1646}
1647
1648/**
1649 * handle_rsp_cntrl_info - handles control information in a response
1650 * @qs: the queue set corresponding to the response
1651 * @flags: the response control flags
1652 *
1653 * Handles the control information of an SGE response, such as GTS
1654 * indications and completion credits for the queue set's Tx queues.
1655 * HW coalesces credits, we don't do any extra SW coalescing.
1656 */
1657static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1658{
1659 unsigned int credits;
1660
1661#if USE_GTS
1662 if (flags & F_RSPD_TXQ0_GTS)
1663 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1664#endif
1665
1666 credits = G_RSPD_TXQ0_CR(flags);
1667 if (credits)
1668 qs->txq[TXQ_ETH].processed += credits;
1669
1670 credits = G_RSPD_TXQ2_CR(flags);
1671 if (credits)
1672 qs->txq[TXQ_CTRL].processed += credits;
1673
1674# if USE_GTS
1675 if (flags & F_RSPD_TXQ1_GTS)
1676 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1677# endif
1678 credits = G_RSPD_TXQ1_CR(flags);
1679 if (credits)
1680 qs->txq[TXQ_OFLD].processed += credits;
1681}
1682
1683/**
1684 * check_ring_db - check if we need to ring any doorbells
1685 * @adapter: the adapter
1686 * @qs: the queue set whose Tx queues are to be examined
1687 * @sleeping: indicates which Tx queue sent GTS
1688 *
1689 * Checks if some of a queue set's Tx queues need to ring their doorbells
1690 * to resume transmission after idling while they still have unprocessed
1691 * descriptors.
1692 */
1693static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1694 unsigned int sleeping)
1695{
1696 if (sleeping & F_RSPD_TXQ0_GTS) {
1697 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1698
1699 if (txq->cleaned + txq->in_use != txq->processed &&
1700 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1701 set_bit(TXQ_RUNNING, &txq->flags);
1702 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1703 V_EGRCNTX(txq->cntxt_id));
1704 }
1705 }
1706
1707 if (sleeping & F_RSPD_TXQ1_GTS) {
1708 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1709
1710 if (txq->cleaned + txq->in_use != txq->processed &&
1711 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1712 set_bit(TXQ_RUNNING, &txq->flags);
1713 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1714 V_EGRCNTX(txq->cntxt_id));
1715 }
1716 }
1717}
1718
1719/**
1720 * is_new_response - check if a response is newly written
1721 * @r: the response descriptor
1722 * @q: the response queue
1723 *
1724 * Returns true if a response descriptor contains a yet unprocessed
1725 * response.
1726 */
1727static inline int is_new_response(const struct rsp_desc *r,
1728 const struct sge_rspq *q)
1729{
1730 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1731}
1732
1733#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1734#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1735 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1736 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1737 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1738
1739/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1740#define NOMEM_INTR_DELAY 2500
1741
1742/**
1743 * process_responses - process responses from an SGE response queue
1744 * @adap: the adapter
1745 * @qs: the queue set to which the response queue belongs
1746 * @budget: how many responses can be processed in this round
1747 *
1748 * Process responses from an SGE response queue up to the supplied budget.
1749 * Responses include received packets as well as credits and other events
1750 * for the queues that belong to the response queue's queue set.
1751 * A negative budget is effectively unlimited.
1752 *
1753 * Additionally choose the interrupt holdoff time for the next interrupt
1754 * on this queue. If the system is under memory shortage use a fairly
1755 * long delay to help recovery.
1756 */
1757static int process_responses(struct adapter *adap, struct sge_qset *qs,
1758 int budget)
1759{
1760 struct sge_rspq *q = &qs->rspq;
1761 struct rsp_desc *r = &q->desc[q->cidx];
1762 int budget_left = budget;
1763 unsigned int sleeping = 0;
1764 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1765 int ngathered = 0;
1766
1767 q->next_holdoff = q->holdoff_tmr;
1768
1769 while (likely(budget_left && is_new_response(r, q))) {
1770 int eth, ethpad = 0;
1771 struct sk_buff *skb = NULL;
1772 u32 len, flags = ntohl(r->flags);
1773 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1774
1775 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1776
1777 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1778 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1779 if (!skb)
1780 goto no_mem;
1781
1782 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1783 skb->data[0] = CPL_ASYNC_NOTIF;
1784 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1785 q->async_notif++;
1786 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1787 skb = get_imm_packet(r);
1788 if (unlikely(!skb)) {
1789 no_mem:
1790 q->next_holdoff = NOMEM_INTR_DELAY;
1791 q->nomem++;
1792 /* consume one credit since we tried */
1793 budget_left--;
1794 break;
1795 }
1796 q->imm_data++;
1797 } else if ((len = ntohl(r->len_cq)) != 0) {
1798 struct sge_fl *fl;
1799
1800 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1801 fl->credits--;
1802 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1803 eth ? SGE_RX_DROP_THRES : 0);
1804 if (!skb)
1805 q->rx_drops++;
1806 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1807 __skb_pull(skb, 2);
1808 ethpad = 2;
1809 if (++fl->cidx == fl->size)
1810 fl->cidx = 0;
1811 } else
1812 q->pure_rsps++;
1813
1814 if (flags & RSPD_CTRL_MASK) {
1815 sleeping |= flags & RSPD_GTS_MASK;
1816 handle_rsp_cntrl_info(qs, flags);
1817 }
1818
1819 r++;
1820 if (unlikely(++q->cidx == q->size)) {
1821 q->cidx = 0;
1822 q->gen ^= 1;
1823 r = q->desc;
1824 }
1825 prefetch(r);
1826
1827 if (++q->credits >= (q->size / 4)) {
1828 refill_rspq(adap, q, q->credits);
1829 q->credits = 0;
1830 }
1831
1832 if (likely(skb != NULL)) {
1833 if (eth)
1834 rx_eth(adap, q, skb, ethpad);
1835 else {
1836 /* Preserve the RSS info in csum & priority */
1837 skb->csum = rss_hi;
1838 skb->priority = rss_lo;
1839 ngathered = rx_offload(&adap->tdev, q, skb,
1840 offload_skbs, ngathered);
1841 }
1842 }
1843
1844 --budget_left;
1845 }
1846
1847 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1848 if (sleeping)
1849 check_ring_db(adap, qs, sleeping);
1850
1851 smp_mb(); /* commit Tx queue .processed updates */
1852 if (unlikely(qs->txq_stopped != 0))
1853 restart_tx(qs);
1854
1855 budget -= budget_left;
1856 return budget;
1857}
1858
1859static inline int is_pure_response(const struct rsp_desc *r)
1860{
1861 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1862
1863 return (n | r->len_cq) == 0;
1864}
1865
1866/**
1867 * napi_rx_handler - the NAPI handler for Rx processing
1868 * @dev: the net device
1869 * @budget: how many packets we can process in this round
1870 *
1871 * Handler for new data events when using NAPI.
1872 */
1873static int napi_rx_handler(struct net_device *dev, int *budget)
1874{
1875 struct adapter *adap = dev->priv;
1876 struct sge_qset *qs = dev2qset(dev);
1877 int effective_budget = min(*budget, dev->quota);
1878
1879 int work_done = process_responses(adap, qs, effective_budget);
1880 *budget -= work_done;
1881 dev->quota -= work_done;
1882
1883 if (work_done >= effective_budget)
1884 return 1;
1885
1886 netif_rx_complete(dev);
1887
1888 /*
1889 * Because we don't atomically flush the following write it is
1890 * possible that in very rare cases it can reach the device in a way
1891 * that races with a new response being written plus an error interrupt
1892 * causing the NAPI interrupt handler below to return unhandled status
1893 * to the OS. To protect against this would require flushing the write
1894 * and doing both the write and the flush with interrupts off. Way too
1895 * expensive and unjustifiable given the rarity of the race.
1896 *
1897 * The race cannot happen at all with MSI-X.
1898 */
1899 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1900 V_NEWTIMER(qs->rspq.next_holdoff) |
1901 V_NEWINDEX(qs->rspq.cidx));
1902 return 0;
1903}
1904
1905/*
1906 * Returns true if the device is already scheduled for polling.
1907 */
1908static inline int napi_is_scheduled(struct net_device *dev)
1909{
1910 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1911}
1912
1913/**
1914 * process_pure_responses - process pure responses from a response queue
1915 * @adap: the adapter
1916 * @qs: the queue set owning the response queue
1917 * @r: the first pure response to process
1918 *
1919 * A simpler version of process_responses() that handles only pure (i.e.,
1920 * non data-carrying) responses. Such respones are too light-weight to
1921 * justify calling a softirq under NAPI, so we handle them specially in
1922 * the interrupt handler. The function is called with a pointer to a
1923 * response, which the caller must ensure is a valid pure response.
1924 *
1925 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1926 */
1927static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1928 struct rsp_desc *r)
1929{
1930 struct sge_rspq *q = &qs->rspq;
1931 unsigned int sleeping = 0;
1932
1933 do {
1934 u32 flags = ntohl(r->flags);
1935
1936 r++;
1937 if (unlikely(++q->cidx == q->size)) {
1938 q->cidx = 0;
1939 q->gen ^= 1;
1940 r = q->desc;
1941 }
1942 prefetch(r);
1943
1944 if (flags & RSPD_CTRL_MASK) {
1945 sleeping |= flags & RSPD_GTS_MASK;
1946 handle_rsp_cntrl_info(qs, flags);
1947 }
1948
1949 q->pure_rsps++;
1950 if (++q->credits >= (q->size / 4)) {
1951 refill_rspq(adap, q, q->credits);
1952 q->credits = 0;
1953 }
1954 } while (is_new_response(r, q) && is_pure_response(r));
1955
1956 if (sleeping)
1957 check_ring_db(adap, qs, sleeping);
1958
1959 smp_mb(); /* commit Tx queue .processed updates */
1960 if (unlikely(qs->txq_stopped != 0))
1961 restart_tx(qs);
1962
1963 return is_new_response(r, q);
1964}
1965
1966/**
1967 * handle_responses - decide what to do with new responses in NAPI mode
1968 * @adap: the adapter
1969 * @q: the response queue
1970 *
1971 * This is used by the NAPI interrupt handlers to decide what to do with
1972 * new SGE responses. If there are no new responses it returns -1. If
1973 * there are new responses and they are pure (i.e., non-data carrying)
1974 * it handles them straight in hard interrupt context as they are very
1975 * cheap and don't deliver any packets. Finally, if there are any data
1976 * signaling responses it schedules the NAPI handler. Returns 1 if it
1977 * schedules NAPI, 0 if all new responses were pure.
1978 *
1979 * The caller must ascertain NAPI is not already running.
1980 */
1981static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
1982{
1983 struct sge_qset *qs = rspq_to_qset(q);
1984 struct rsp_desc *r = &q->desc[q->cidx];
1985
1986 if (!is_new_response(r, q))
1987 return -1;
1988 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
1989 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
1990 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
1991 return 0;
1992 }
1993 if (likely(__netif_rx_schedule_prep(qs->netdev)))
1994 __netif_rx_schedule(qs->netdev);
1995 return 1;
1996}
1997
1998/*
1999 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2000 * (i.e., response queue serviced in hard interrupt).
2001 */
2002irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2003{
2004 struct sge_qset *qs = cookie;
2005 struct adapter *adap = qs->netdev->priv;
2006 struct sge_rspq *q = &qs->rspq;
2007
2008 spin_lock(&q->lock);
2009 if (process_responses(adap, qs, -1) == 0)
2010 q->unhandled_irqs++;
2011 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2012 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2013 spin_unlock(&q->lock);
2014 return IRQ_HANDLED;
2015}
2016
2017/*
2018 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2019 * (i.e., response queue serviced by NAPI polling).
2020 */
2021irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2022{
2023 struct sge_qset *qs = cookie;
2024 struct adapter *adap = qs->netdev->priv;
2025 struct sge_rspq *q = &qs->rspq;
2026
2027 spin_lock(&q->lock);
2028 BUG_ON(napi_is_scheduled(qs->netdev));
2029
2030 if (handle_responses(adap, q) < 0)
2031 q->unhandled_irqs++;
2032 spin_unlock(&q->lock);
2033 return IRQ_HANDLED;
2034}
2035
2036/*
2037 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2038 * SGE response queues as well as error and other async events as they all use
2039 * the same MSI vector. We use one SGE response queue per port in this mode
2040 * and protect all response queues with queue 0's lock.
2041 */
2042static irqreturn_t t3_intr_msi(int irq, void *cookie)
2043{
2044 int new_packets = 0;
2045 struct adapter *adap = cookie;
2046 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2047
2048 spin_lock(&q->lock);
2049
2050 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2051 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2052 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2053 new_packets = 1;
2054 }
2055
2056 if (adap->params.nports == 2 &&
2057 process_responses(adap, &adap->sge.qs[1], -1)) {
2058 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2059
2060 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2061 V_NEWTIMER(q1->next_holdoff) |
2062 V_NEWINDEX(q1->cidx));
2063 new_packets = 1;
2064 }
2065
2066 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2067 q->unhandled_irqs++;
2068
2069 spin_unlock(&q->lock);
2070 return IRQ_HANDLED;
2071}
2072
2073static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2074{
2075 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2076 if (likely(__netif_rx_schedule_prep(dev)))
2077 __netif_rx_schedule(dev);
2078 return 1;
2079 }
2080 return 0;
2081}
2082
2083/*
2084 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2085 * by NAPI polling). Handles data events from SGE response queues as well as
2086 * error and other async events as they all use the same MSI vector. We use
2087 * one SGE response queue per port in this mode and protect all response
2088 * queues with queue 0's lock.
2089 */
2090irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2091{
2092 int new_packets;
2093 struct adapter *adap = cookie;
2094 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2095
2096 spin_lock(&q->lock);
2097
2098 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2099 if (adap->params.nports == 2)
2100 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2101 &adap->sge.qs[1].rspq);
2102 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2103 q->unhandled_irqs++;
2104
2105 spin_unlock(&q->lock);
2106 return IRQ_HANDLED;
2107}
2108
2109/*
2110 * A helper function that processes responses and issues GTS.
2111 */
2112static inline int process_responses_gts(struct adapter *adap,
2113 struct sge_rspq *rq)
2114{
2115 int work;
2116
2117 work = process_responses(adap, rspq_to_qset(rq), -1);
2118 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2119 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2120 return work;
2121}
2122
2123/*
2124 * The legacy INTx interrupt handler. This needs to handle data events from
2125 * SGE response queues as well as error and other async events as they all use
2126 * the same interrupt pin. We use one SGE response queue per port in this mode
2127 * and protect all response queues with queue 0's lock.
2128 */
2129static irqreturn_t t3_intr(int irq, void *cookie)
2130{
2131 int work_done, w0, w1;
2132 struct adapter *adap = cookie;
2133 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2134 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2135
2136 spin_lock(&q0->lock);
2137
2138 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2139 w1 = adap->params.nports == 2 &&
2140 is_new_response(&q1->desc[q1->cidx], q1);
2141
2142 if (likely(w0 | w1)) {
2143 t3_write_reg(adap, A_PL_CLI, 0);
2144 t3_read_reg(adap, A_PL_CLI); /* flush */
2145
2146 if (likely(w0))
2147 process_responses_gts(adap, q0);
2148
2149 if (w1)
2150 process_responses_gts(adap, q1);
2151
2152 work_done = w0 | w1;
2153 } else
2154 work_done = t3_slow_intr_handler(adap);
2155
2156 spin_unlock(&q0->lock);
2157 return IRQ_RETVAL(work_done != 0);
2158}
2159
2160/*
2161 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2162 * Handles data events from SGE response queues as well as error and other
2163 * async events as they all use the same interrupt pin. We use one SGE
2164 * response queue per port in this mode and protect all response queues with
2165 * queue 0's lock.
2166 */
2167static irqreturn_t t3b_intr(int irq, void *cookie)
2168{
2169 u32 map;
2170 struct adapter *adap = cookie;
2171 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2172
2173 t3_write_reg(adap, A_PL_CLI, 0);
2174 map = t3_read_reg(adap, A_SG_DATA_INTR);
2175
2176 if (unlikely(!map)) /* shared interrupt, most likely */
2177 return IRQ_NONE;
2178
2179 spin_lock(&q0->lock);
2180
2181 if (unlikely(map & F_ERRINTR))
2182 t3_slow_intr_handler(adap);
2183
2184 if (likely(map & 1))
2185 process_responses_gts(adap, q0);
2186
2187 if (map & 2)
2188 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2189
2190 spin_unlock(&q0->lock);
2191 return IRQ_HANDLED;
2192}
2193
2194/*
2195 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2196 * Handles data events from SGE response queues as well as error and other
2197 * async events as they all use the same interrupt pin. We use one SGE
2198 * response queue per port in this mode and protect all response queues with
2199 * queue 0's lock.
2200 */
2201static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2202{
2203 u32 map;
2204 struct net_device *dev;
2205 struct adapter *adap = cookie;
2206 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2207
2208 t3_write_reg(adap, A_PL_CLI, 0);
2209 map = t3_read_reg(adap, A_SG_DATA_INTR);
2210
2211 if (unlikely(!map)) /* shared interrupt, most likely */
2212 return IRQ_NONE;
2213
2214 spin_lock(&q0->lock);
2215
2216 if (unlikely(map & F_ERRINTR))
2217 t3_slow_intr_handler(adap);
2218
2219 if (likely(map & 1)) {
2220 dev = adap->sge.qs[0].netdev;
2221
2222 if (likely(__netif_rx_schedule_prep(dev)))
2223 __netif_rx_schedule(dev);
2224 }
2225 if (map & 2) {
2226 dev = adap->sge.qs[1].netdev;
2227
2228 if (likely(__netif_rx_schedule_prep(dev)))
2229 __netif_rx_schedule(dev);
2230 }
2231
2232 spin_unlock(&q0->lock);
2233 return IRQ_HANDLED;
2234}
2235
2236/**
2237 * t3_intr_handler - select the top-level interrupt handler
2238 * @adap: the adapter
2239 * @polling: whether using NAPI to service response queues
2240 *
2241 * Selects the top-level interrupt handler based on the type of interrupts
2242 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2243 * response queues.
2244 */
2245intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2246{
2247 if (adap->flags & USING_MSIX)
2248 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2249 if (adap->flags & USING_MSI)
2250 return polling ? t3_intr_msi_napi : t3_intr_msi;
2251 if (adap->params.rev > 0)
2252 return polling ? t3b_intr_napi : t3b_intr;
2253 return t3_intr;
2254}
2255
2256/**
2257 * t3_sge_err_intr_handler - SGE async event interrupt handler
2258 * @adapter: the adapter
2259 *
2260 * Interrupt handler for SGE asynchronous (non-data) events.
2261 */
2262void t3_sge_err_intr_handler(struct adapter *adapter)
2263{
2264 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2265
2266 if (status & F_RSPQCREDITOVERFOW)
2267 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2268
2269 if (status & F_RSPQDISABLED) {
2270 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2271
2272 CH_ALERT(adapter,
2273 "packet delivered to disabled response queue "
2274 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2275 }
2276
2277 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2278 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2279 t3_fatal_err(adapter);
2280}
2281
2282/**
2283 * sge_timer_cb - perform periodic maintenance of an SGE qset
2284 * @data: the SGE queue set to maintain
2285 *
2286 * Runs periodically from a timer to perform maintenance of an SGE queue
2287 * set. It performs two tasks:
2288 *
2289 * a) Cleans up any completed Tx descriptors that may still be pending.
2290 * Normal descriptor cleanup happens when new packets are added to a Tx
2291 * queue so this timer is relatively infrequent and does any cleanup only
2292 * if the Tx queue has not seen any new packets in a while. We make a
2293 * best effort attempt to reclaim descriptors, in that we don't wait
2294 * around if we cannot get a queue's lock (which most likely is because
2295 * someone else is queueing new packets and so will also handle the clean
2296 * up). Since control queues use immediate data exclusively we don't
2297 * bother cleaning them up here.
2298 *
2299 * b) Replenishes Rx queues that have run out due to memory shortage.
2300 * Normally new Rx buffers are added when existing ones are consumed but
2301 * when out of memory a queue can become empty. We try to add only a few
2302 * buffers here, the queue will be replenished fully as these new buffers
2303 * are used up if memory shortage has subsided.
2304 */
2305static void sge_timer_cb(unsigned long data)
2306{
2307 spinlock_t *lock;
2308 struct sge_qset *qs = (struct sge_qset *)data;
2309 struct adapter *adap = qs->netdev->priv;
2310
2311 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2312 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2313 spin_unlock(&qs->txq[TXQ_ETH].lock);
2314 }
2315 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2316 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2317 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2318 }
2319 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2320 &adap->sge.qs[0].rspq.lock;
2321 if (spin_trylock_irq(lock)) {
2322 if (!napi_is_scheduled(qs->netdev)) {
2323 if (qs->fl[0].credits < qs->fl[0].size)
2324 __refill_fl(adap, &qs->fl[0]);
2325 if (qs->fl[1].credits < qs->fl[1].size)
2326 __refill_fl(adap, &qs->fl[1]);
2327 }
2328 spin_unlock_irq(lock);
2329 }
2330 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2331}
2332
2333/**
2334 * t3_update_qset_coalesce - update coalescing settings for a queue set
2335 * @qs: the SGE queue set
2336 * @p: new queue set parameters
2337 *
2338 * Update the coalescing settings for an SGE queue set. Nothing is done
2339 * if the queue set is not initialized yet.
2340 */
2341void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2342{
2343 if (!qs->netdev)
2344 return;
2345
2346 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2347 qs->rspq.polling = p->polling;
2348 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2349}
2350
2351/**
2352 * t3_sge_alloc_qset - initialize an SGE queue set
2353 * @adapter: the adapter
2354 * @id: the queue set id
2355 * @nports: how many Ethernet ports will be using this queue set
2356 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2357 * @p: configuration parameters for this queue set
2358 * @ntxq: number of Tx queues for the queue set
2359 * @netdev: net device associated with this queue set
2360 *
2361 * Allocate resources and initialize an SGE queue set. A queue set
2362 * comprises a response queue, two Rx free-buffer queues, and up to 3
2363 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2364 * queue, offload queue, and control queue.
2365 */
2366int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2367 int irq_vec_idx, const struct qset_params *p,
2368 int ntxq, struct net_device *netdev)
2369{
2370 int i, ret = -ENOMEM;
2371 struct sge_qset *q = &adapter->sge.qs[id];
2372
2373 init_qset_cntxt(q, id);
2374 init_timer(&q->tx_reclaim_timer);
2375 q->tx_reclaim_timer.data = (unsigned long)q;
2376 q->tx_reclaim_timer.function = sge_timer_cb;
2377
2378 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2379 sizeof(struct rx_desc),
2380 sizeof(struct rx_sw_desc),
2381 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2382 if (!q->fl[0].desc)
2383 goto err;
2384
2385 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2386 sizeof(struct rx_desc),
2387 sizeof(struct rx_sw_desc),
2388 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2389 if (!q->fl[1].desc)
2390 goto err;
2391
2392 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2393 sizeof(struct rsp_desc), 0,
2394 &q->rspq.phys_addr, NULL);
2395 if (!q->rspq.desc)
2396 goto err;
2397
2398 for (i = 0; i < ntxq; ++i) {
2399 /*
2400 * The control queue always uses immediate data so does not
2401 * need to keep track of any sk_buffs.
2402 */
2403 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2404
2405 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2406 sizeof(struct tx_desc), sz,
2407 &q->txq[i].phys_addr,
2408 &q->txq[i].sdesc);
2409 if (!q->txq[i].desc)
2410 goto err;
2411
2412 q->txq[i].gen = 1;
2413 q->txq[i].size = p->txq_size[i];
2414 spin_lock_init(&q->txq[i].lock);
2415 skb_queue_head_init(&q->txq[i].sendq);
2416 }
2417
2418 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2419 (unsigned long)q);
2420 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2421 (unsigned long)q);
2422
2423 q->fl[0].gen = q->fl[1].gen = 1;
2424 q->fl[0].size = p->fl_size;
2425 q->fl[1].size = p->jumbo_size;
2426
2427 q->rspq.gen = 1;
2428 q->rspq.size = p->rspq_size;
2429 spin_lock_init(&q->rspq.lock);
2430
2431 q->txq[TXQ_ETH].stop_thres = nports *
2432 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2433
2434 if (ntxq == 1) {
2435 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2436 sizeof(struct cpl_rx_pkt);
2437 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2438 sizeof(struct cpl_rx_pkt);
2439 } else {
2440 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2441 sizeof(struct cpl_rx_data);
2442 q->fl[1].buf_size = (16 * 1024) -
2443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2444 }
2445
2446 spin_lock(&adapter->sge.reg_lock);
2447
2448 /* FL threshold comparison uses < */
2449 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2450 q->rspq.phys_addr, q->rspq.size,
2451 q->fl[0].buf_size, 1, 0);
2452 if (ret)
2453 goto err_unlock;
2454
2455 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2456 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2457 q->fl[i].phys_addr, q->fl[i].size,
2458 q->fl[i].buf_size, p->cong_thres, 1,
2459 0);
2460 if (ret)
2461 goto err_unlock;
2462 }
2463
2464 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2465 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2466 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2467 1, 0);
2468 if (ret)
2469 goto err_unlock;
2470
2471 if (ntxq > 1) {
2472 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2473 USE_GTS, SGE_CNTXT_OFLD, id,
2474 q->txq[TXQ_OFLD].phys_addr,
2475 q->txq[TXQ_OFLD].size, 0, 1, 0);
2476 if (ret)
2477 goto err_unlock;
2478 }
2479
2480 if (ntxq > 2) {
2481 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2482 SGE_CNTXT_CTRL, id,
2483 q->txq[TXQ_CTRL].phys_addr,
2484 q->txq[TXQ_CTRL].size,
2485 q->txq[TXQ_CTRL].token, 1, 0);
2486 if (ret)
2487 goto err_unlock;
2488 }
2489
2490 spin_unlock(&adapter->sge.reg_lock);
2491 q->netdev = netdev;
2492 t3_update_qset_coalesce(q, p);
2493
2494 /*
2495 * We use atalk_ptr as a backpointer to a qset. In case a device is
2496 * associated with multiple queue sets only the first one sets
2497 * atalk_ptr.
2498 */
2499 if (netdev->atalk_ptr == NULL)
2500 netdev->atalk_ptr = q;
2501
2502 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2503 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2504 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2505
2506 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2507 V_NEWTIMER(q->rspq.holdoff_tmr));
2508
2509 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2510 return 0;
2511
2512 err_unlock:
2513 spin_unlock(&adapter->sge.reg_lock);
2514 err:
2515 t3_free_qset(adapter, q);
2516 return ret;
2517}
2518
2519/**
2520 * t3_free_sge_resources - free SGE resources
2521 * @adap: the adapter
2522 *
2523 * Frees resources used by the SGE queue sets.
2524 */
2525void t3_free_sge_resources(struct adapter *adap)
2526{
2527 int i;
2528
2529 for (i = 0; i < SGE_QSETS; ++i)
2530 t3_free_qset(adap, &adap->sge.qs[i]);
2531}
2532
2533/**
2534 * t3_sge_start - enable SGE
2535 * @adap: the adapter
2536 *
2537 * Enables the SGE for DMAs. This is the last step in starting packet
2538 * transfers.
2539 */
2540void t3_sge_start(struct adapter *adap)
2541{
2542 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2543}
2544
2545/**
2546 * t3_sge_stop - disable SGE operation
2547 * @adap: the adapter
2548 *
2549 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2550 * from error interrupts) or from normal process context. In the latter
2551 * case it also disables any pending queue restart tasklets. Note that
2552 * if it is called in interrupt context it cannot disable the restart
2553 * tasklets as it cannot wait, however the tasklets will have no effect
2554 * since the doorbells are disabled and the driver will call this again
2555 * later from process context, at which time the tasklets will be stopped
2556 * if they are still running.
2557 */
2558void t3_sge_stop(struct adapter *adap)
2559{
2560 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2561 if (!in_interrupt()) {
2562 int i;
2563
2564 for (i = 0; i < SGE_QSETS; ++i) {
2565 struct sge_qset *qs = &adap->sge.qs[i];
2566
2567 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2568 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2569 }
2570 }
2571}
2572
2573/**
2574 * t3_sge_init - initialize SGE
2575 * @adap: the adapter
2576 * @p: the SGE parameters
2577 *
2578 * Performs SGE initialization needed every time after a chip reset.
2579 * We do not initialize any of the queue sets here, instead the driver
2580 * top-level must request those individually. We also do not enable DMA
2581 * here, that should be done after the queues have been set up.
2582 */
2583void t3_sge_init(struct adapter *adap, struct sge_params *p)
2584{
2585 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2586
2587 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2588 F_CQCRDTCTRL |
2589 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2590 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2591#if SGE_NUM_GENBITS == 1
2592 ctrl |= F_EGRGENCTRL;
2593#endif
2594 if (adap->params.rev > 0) {
2595 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2596 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2597 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2598 }
2599 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2600 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2601 V_LORCQDRBTHRSH(512));
2602 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2603 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2604 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2605 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2606 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2607 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2608 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2609 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2610 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2611}
2612
2613/**
2614 * t3_sge_prep - one-time SGE initialization
2615 * @adap: the associated adapter
2616 * @p: SGE parameters
2617 *
2618 * Performs one-time initialization of SGE SW state. Includes determining
2619 * defaults for the assorted SGE parameters, which admins can change until
2620 * they are used to initialize the SGE.
2621 */
2622void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2623{
2624 int i;
2625
2626 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2628
2629 for (i = 0; i < SGE_QSETS; ++i) {
2630 struct qset_params *q = p->qset + i;
2631
2632 q->polling = adap->params.rev > 0;
2633 q->coalesce_usecs = 5;
2634 q->rspq_size = 1024;
2635 q->fl_size = 4096;
2636 q->jumbo_size = 512;
2637 q->txq_size[TXQ_ETH] = 1024;
2638 q->txq_size[TXQ_OFLD] = 1024;
2639 q->txq_size[TXQ_CTRL] = 256;
2640 q->cong_thres = 0;
2641 }
2642
2643 spin_lock_init(&adap->sge.reg_lock);
2644}
2645
2646/**
2647 * t3_get_desc - dump an SGE descriptor for debugging purposes
2648 * @qs: the queue set
2649 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2650 * @idx: the descriptor index in the queue
2651 * @data: where to dump the descriptor contents
2652 *
2653 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2654 * size of the descriptor.
2655 */
2656int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2657 unsigned char *data)
2658{
2659 if (qnum >= 6)
2660 return -EINVAL;
2661
2662 if (qnum < 3) {
2663 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2664 return -EINVAL;
2665 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2666 return sizeof(struct tx_desc);
2667 }
2668
2669 if (qnum == 3) {
2670 if (!qs->rspq.desc || idx >= qs->rspq.size)
2671 return -EINVAL;
2672 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2673 return sizeof(struct rsp_desc);
2674 }
2675
2676 qnum -= 4;
2677 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2678 return -EINVAL;
2679 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2680 return sizeof(struct rx_desc);
2681}
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h
new file mode 100644
index 000000000000..514869e26a76
--- /dev/null
+++ b/drivers/net/cxgb3/sge_defs.h
@@ -0,0 +1,251 @@
1/*
2 * This file is automatically generated --- any changes will be lost.
3 */
4
5#ifndef _SGE_DEFS_H
6#define _SGE_DEFS_H
7
8#define S_EC_CREDITS 0
9#define M_EC_CREDITS 0x7FFF
10#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
11#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
12
13#define S_EC_GTS 15
14#define V_EC_GTS(x) ((x) << S_EC_GTS)
15#define F_EC_GTS V_EC_GTS(1U)
16
17#define S_EC_INDEX 16
18#define M_EC_INDEX 0xFFFF
19#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
20#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
21
22#define S_EC_SIZE 0
23#define M_EC_SIZE 0xFFFF
24#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
25#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
26
27#define S_EC_BASE_LO 16
28#define M_EC_BASE_LO 0xFFFF
29#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
30#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
31
32#define S_EC_BASE_HI 0
33#define M_EC_BASE_HI 0xF
34#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
35#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
36
37#define S_EC_RESPQ 4
38#define M_EC_RESPQ 0x7
39#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
40#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
41
42#define S_EC_TYPE 7
43#define M_EC_TYPE 0x7
44#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
45#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
46
47#define S_EC_GEN 10
48#define V_EC_GEN(x) ((x) << S_EC_GEN)
49#define F_EC_GEN V_EC_GEN(1U)
50
51#define S_EC_UP_TOKEN 11
52#define M_EC_UP_TOKEN 0xFFFFF
53#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
54#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
55
56#define S_EC_VALID 31
57#define V_EC_VALID(x) ((x) << S_EC_VALID)
58#define F_EC_VALID V_EC_VALID(1U)
59
60#define S_RQ_MSI_VEC 20
61#define M_RQ_MSI_VEC 0x3F
62#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
63#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
64
65#define S_RQ_INTR_EN 26
66#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
67#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
68
69#define S_RQ_GEN 28
70#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
71#define F_RQ_GEN V_RQ_GEN(1U)
72
73#define S_CQ_INDEX 0
74#define M_CQ_INDEX 0xFFFF
75#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
76#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
77
78#define S_CQ_SIZE 16
79#define M_CQ_SIZE 0xFFFF
80#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
81#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
82
83#define S_CQ_BASE_HI 0
84#define M_CQ_BASE_HI 0xFFFFF
85#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
86#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
87
88#define S_CQ_RSPQ 20
89#define M_CQ_RSPQ 0x3F
90#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
91#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
92
93#define S_CQ_ASYNC_NOTIF 26
94#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
95#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
96
97#define S_CQ_ARMED 27
98#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
99#define F_CQ_ARMED V_CQ_ARMED(1U)
100
101#define S_CQ_ASYNC_NOTIF_SOL 28
102#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
103#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
104
105#define S_CQ_GEN 29
106#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
107#define F_CQ_GEN V_CQ_GEN(1U)
108
109#define S_CQ_OVERFLOW_MODE 31
110#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
111#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
112
113#define S_CQ_CREDITS 0
114#define M_CQ_CREDITS 0xFFFF
115#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
116#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
117
118#define S_CQ_CREDIT_THRES 16
119#define M_CQ_CREDIT_THRES 0x1FFF
120#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
121#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
122
123#define S_FL_BASE_HI 0
124#define M_FL_BASE_HI 0xFFFFF
125#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
126#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
127
128#define S_FL_INDEX_LO 20
129#define M_FL_INDEX_LO 0xFFF
130#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
131#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
132
133#define S_FL_INDEX_HI 0
134#define M_FL_INDEX_HI 0xF
135#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
136#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
137
138#define S_FL_SIZE 4
139#define M_FL_SIZE 0xFFFF
140#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
141#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
142
143#define S_FL_GEN 20
144#define V_FL_GEN(x) ((x) << S_FL_GEN)
145#define F_FL_GEN V_FL_GEN(1U)
146
147#define S_FL_ENTRY_SIZE_LO 21
148#define M_FL_ENTRY_SIZE_LO 0x7FF
149#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
150#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
151
152#define S_FL_ENTRY_SIZE_HI 0
153#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
154#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
155#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
156
157#define S_FL_CONG_THRES 21
158#define M_FL_CONG_THRES 0x3FF
159#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
160#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
161
162#define S_FL_GTS 31
163#define V_FL_GTS(x) ((x) << S_FL_GTS)
164#define F_FL_GTS V_FL_GTS(1U)
165
166#define S_FLD_GEN1 31
167#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
168#define F_FLD_GEN1 V_FLD_GEN1(1U)
169
170#define S_FLD_GEN2 0
171#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
172#define F_FLD_GEN2 V_FLD_GEN2(1U)
173
174#define S_RSPD_TXQ1_CR 0
175#define M_RSPD_TXQ1_CR 0x7F
176#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
177#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
178
179#define S_RSPD_TXQ1_GTS 7
180#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
181#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
182
183#define S_RSPD_TXQ2_CR 8
184#define M_RSPD_TXQ2_CR 0x7F
185#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
186#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
187
188#define S_RSPD_TXQ2_GTS 15
189#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
190#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
191
192#define S_RSPD_TXQ0_CR 16
193#define M_RSPD_TXQ0_CR 0x7F
194#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
195#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
196
197#define S_RSPD_TXQ0_GTS 23
198#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
199#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
200
201#define S_RSPD_EOP 24
202#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
203#define F_RSPD_EOP V_RSPD_EOP(1U)
204
205#define S_RSPD_SOP 25
206#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
207#define F_RSPD_SOP V_RSPD_SOP(1U)
208
209#define S_RSPD_ASYNC_NOTIF 26
210#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
211#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
212
213#define S_RSPD_FL0_GTS 27
214#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
215#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
216
217#define S_RSPD_FL1_GTS 28
218#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
219#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
220
221#define S_RSPD_IMM_DATA_VALID 29
222#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
223#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
224
225#define S_RSPD_OFFLOAD 30
226#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
227#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
228
229#define S_RSPD_GEN1 31
230#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
231#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
232
233#define S_RSPD_LEN 0
234#define M_RSPD_LEN 0x7FFFFFFF
235#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
236#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
237
238#define S_RSPD_FLQ 31
239#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
240#define F_RSPD_FLQ V_RSPD_FLQ(1U)
241
242#define S_RSPD_GEN2 0
243#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
244#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
245
246#define S_RSPD_INR_VEC 1
247#define M_RSPD_INR_VEC 0x7F
248#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
249#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
250
251#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
new file mode 100644
index 000000000000..b7a1a310dfd4
--- /dev/null
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -0,0 +1,1444 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef T3_CPL_H
33#define T3_CPL_H
34
35#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
36# include <asm/byteorder.h>
37#endif
38
39enum CPL_opcode {
40 CPL_PASS_OPEN_REQ = 0x1,
41 CPL_PASS_ACCEPT_RPL = 0x2,
42 CPL_ACT_OPEN_REQ = 0x3,
43 CPL_SET_TCB = 0x4,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_PCMD = 0x7,
47 CPL_CLOSE_CON_REQ = 0x8,
48 CPL_CLOSE_LISTSRV_REQ = 0x9,
49 CPL_ABORT_REQ = 0xA,
50 CPL_ABORT_RPL = 0xB,
51 CPL_TX_DATA = 0xC,
52 CPL_RX_DATA_ACK = 0xD,
53 CPL_TX_PKT = 0xE,
54 CPL_RTE_DELETE_REQ = 0xF,
55 CPL_RTE_WRITE_REQ = 0x10,
56 CPL_RTE_READ_REQ = 0x11,
57 CPL_L2T_WRITE_REQ = 0x12,
58 CPL_L2T_READ_REQ = 0x13,
59 CPL_SMT_WRITE_REQ = 0x14,
60 CPL_SMT_READ_REQ = 0x15,
61 CPL_TX_PKT_LSO = 0x16,
62 CPL_PCMD_READ = 0x17,
63 CPL_BARRIER = 0x18,
64 CPL_TID_RELEASE = 0x1A,
65
66 CPL_CLOSE_LISTSRV_RPL = 0x20,
67 CPL_ERROR = 0x21,
68 CPL_GET_TCB_RPL = 0x22,
69 CPL_L2T_WRITE_RPL = 0x23,
70 CPL_PCMD_READ_RPL = 0x24,
71 CPL_PCMD_RPL = 0x25,
72 CPL_PEER_CLOSE = 0x26,
73 CPL_RTE_DELETE_RPL = 0x27,
74 CPL_RTE_WRITE_RPL = 0x28,
75 CPL_RX_DDP_COMPLETE = 0x29,
76 CPL_RX_PHYS_ADDR = 0x2A,
77 CPL_RX_PKT = 0x2B,
78 CPL_RX_URG_NOTIFY = 0x2C,
79 CPL_SET_TCB_RPL = 0x2D,
80 CPL_SMT_WRITE_RPL = 0x2E,
81 CPL_TX_DATA_ACK = 0x2F,
82
83 CPL_ABORT_REQ_RSS = 0x30,
84 CPL_ABORT_RPL_RSS = 0x31,
85 CPL_CLOSE_CON_RPL = 0x32,
86 CPL_ISCSI_HDR = 0x33,
87 CPL_L2T_READ_RPL = 0x34,
88 CPL_RDMA_CQE = 0x35,
89 CPL_RDMA_CQE_READ_RSP = 0x36,
90 CPL_RDMA_CQE_ERR = 0x37,
91 CPL_RTE_READ_RPL = 0x38,
92 CPL_RX_DATA = 0x39,
93
94 CPL_ACT_OPEN_RPL = 0x40,
95 CPL_PASS_OPEN_RPL = 0x41,
96 CPL_RX_DATA_DDP = 0x42,
97 CPL_SMT_READ_RPL = 0x43,
98
99 CPL_ACT_ESTABLISH = 0x50,
100 CPL_PASS_ESTABLISH = 0x51,
101
102 CPL_PASS_ACCEPT_REQ = 0x70,
103
104 CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
105
106 CPL_TX_DMA_ACK = 0xA0,
107 CPL_RDMA_READ_REQ = 0xA1,
108 CPL_RDMA_TERMINATE = 0xA2,
109 CPL_TRACE_PKT = 0xA3,
110 CPL_RDMA_EC_STATUS = 0xA5,
111
112 NUM_CPL_CMDS /* must be last and previous entries must be sorted */
113};
114
115enum CPL_error {
116 CPL_ERR_NONE = 0,
117 CPL_ERR_TCAM_PARITY = 1,
118 CPL_ERR_TCAM_FULL = 3,
119 CPL_ERR_CONN_RESET = 20,
120 CPL_ERR_CONN_EXIST = 22,
121 CPL_ERR_ARP_MISS = 23,
122 CPL_ERR_BAD_SYN = 24,
123 CPL_ERR_CONN_TIMEDOUT = 30,
124 CPL_ERR_XMIT_TIMEDOUT = 31,
125 CPL_ERR_PERSIST_TIMEDOUT = 32,
126 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
127 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
128 CPL_ERR_RTX_NEG_ADVICE = 35,
129 CPL_ERR_PERSIST_NEG_ADVICE = 36,
130 CPL_ERR_ABORT_FAILED = 42,
131 CPL_ERR_GENERAL = 99
132};
133
134enum {
135 CPL_CONN_POLICY_AUTO = 0,
136 CPL_CONN_POLICY_ASK = 1,
137 CPL_CONN_POLICY_DENY = 3
138};
139
140enum {
141 ULP_MODE_NONE = 0,
142 ULP_MODE_ISCSI = 2,
143 ULP_MODE_RDMA = 4,
144 ULP_MODE_TCPDDP = 5
145};
146
147enum {
148 ULP_CRC_HEADER = 1 << 0,
149 ULP_CRC_DATA = 1 << 1
150};
151
152enum {
153 CPL_PASS_OPEN_ACCEPT,
154 CPL_PASS_OPEN_REJECT
155};
156
157enum {
158 CPL_ABORT_SEND_RST = 0,
159 CPL_ABORT_NO_RST,
160 CPL_ABORT_POST_CLOSE_REQ = 2
161};
162
163enum { /* TX_PKT_LSO ethernet types */
164 CPL_ETH_II,
165 CPL_ETH_II_VLAN,
166 CPL_ETH_802_3,
167 CPL_ETH_802_3_VLAN
168};
169
170enum { /* TCP congestion control algorithms */
171 CONG_ALG_RENO,
172 CONG_ALG_TAHOE,
173 CONG_ALG_NEWRENO,
174 CONG_ALG_HIGHSPEED
175};
176
177union opcode_tid {
178 __be32 opcode_tid;
179 __u8 opcode;
180};
181
182#define S_OPCODE 24
183#define V_OPCODE(x) ((x) << S_OPCODE)
184#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
185#define G_TID(x) ((x) & 0xFFFFFF)
186
187/* tid is assumed to be 24-bits */
188#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
189
190#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
191
192/* extract the TID from a CPL command */
193#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
194
195struct tcp_options {
196 __be16 mss;
197 __u8 wsf;
198#if defined(__LITTLE_ENDIAN_BITFIELD)
199 __u8:5;
200 __u8 ecn:1;
201 __u8 sack:1;
202 __u8 tstamp:1;
203#else
204 __u8 tstamp:1;
205 __u8 sack:1;
206 __u8 ecn:1;
207 __u8:5;
208#endif
209};
210
211struct rss_header {
212 __u8 opcode;
213#if defined(__LITTLE_ENDIAN_BITFIELD)
214 __u8 cpu_idx:6;
215 __u8 hash_type:2;
216#else
217 __u8 hash_type:2;
218 __u8 cpu_idx:6;
219#endif
220 __be16 cq_idx;
221 __be32 rss_hash_val;
222};
223
224#ifndef CHELSIO_FW
225struct work_request_hdr {
226 __be32 wr_hi;
227 __be32 wr_lo;
228};
229
230/* wr_hi fields */
231#define S_WR_SGE_CREDITS 0
232#define M_WR_SGE_CREDITS 0xFF
233#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
234#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
235
236#define S_WR_SGLSFLT 8
237#define M_WR_SGLSFLT 0xFF
238#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
239#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
240
241#define S_WR_BCNTLFLT 16
242#define M_WR_BCNTLFLT 0xF
243#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
244#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
245
246#define S_WR_DATATYPE 20
247#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
248#define F_WR_DATATYPE V_WR_DATATYPE(1U)
249
250#define S_WR_COMPL 21
251#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
252#define F_WR_COMPL V_WR_COMPL(1U)
253
254#define S_WR_EOP 22
255#define V_WR_EOP(x) ((x) << S_WR_EOP)
256#define F_WR_EOP V_WR_EOP(1U)
257
258#define S_WR_SOP 23
259#define V_WR_SOP(x) ((x) << S_WR_SOP)
260#define F_WR_SOP V_WR_SOP(1U)
261
262#define S_WR_OP 24
263#define M_WR_OP 0xFF
264#define V_WR_OP(x) ((x) << S_WR_OP)
265#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
266
267/* wr_lo fields */
268#define S_WR_LEN 0
269#define M_WR_LEN 0xFF
270#define V_WR_LEN(x) ((x) << S_WR_LEN)
271#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
272
273#define S_WR_TID 8
274#define M_WR_TID 0xFFFFF
275#define V_WR_TID(x) ((x) << S_WR_TID)
276#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
277
278#define S_WR_CR_FLUSH 30
279#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
280#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
281
282#define S_WR_GEN 31
283#define V_WR_GEN(x) ((x) << S_WR_GEN)
284#define F_WR_GEN V_WR_GEN(1U)
285
286# define WR_HDR struct work_request_hdr wr
287# define RSS_HDR
288#else
289# define WR_HDR
290# define RSS_HDR struct rss_header rss_hdr;
291#endif
292
293/* option 0 lower-half fields */
294#define S_CPL_STATUS 0
295#define M_CPL_STATUS 0xFF
296#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
297#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
298
299#define S_INJECT_TIMER 6
300#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
301#define F_INJECT_TIMER V_INJECT_TIMER(1U)
302
303#define S_NO_OFFLOAD 7
304#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
305#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
306
307#define S_ULP_MODE 8
308#define M_ULP_MODE 0xF
309#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
310#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
311
312#define S_RCV_BUFSIZ 12
313#define M_RCV_BUFSIZ 0x3FFF
314#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
315#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
316
317#define S_TOS 26
318#define M_TOS 0x3F
319#define V_TOS(x) ((x) << S_TOS)
320#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
321
322/* option 0 upper-half fields */
323#define S_DELACK 0
324#define V_DELACK(x) ((x) << S_DELACK)
325#define F_DELACK V_DELACK(1U)
326
327#define S_NO_CONG 1
328#define V_NO_CONG(x) ((x) << S_NO_CONG)
329#define F_NO_CONG V_NO_CONG(1U)
330
331#define S_SRC_MAC_SEL 2
332#define M_SRC_MAC_SEL 0x3
333#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
334#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
335
336#define S_L2T_IDX 4
337#define M_L2T_IDX 0x7FF
338#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
339#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
340
341#define S_TX_CHANNEL 15
342#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
343#define F_TX_CHANNEL V_TX_CHANNEL(1U)
344
345#define S_TCAM_BYPASS 16
346#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
347#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
348
349#define S_NAGLE 17
350#define V_NAGLE(x) ((x) << S_NAGLE)
351#define F_NAGLE V_NAGLE(1U)
352
353#define S_WND_SCALE 18
354#define M_WND_SCALE 0xF
355#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
356#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
357
358#define S_KEEP_ALIVE 22
359#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
360#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
361
362#define S_MAX_RETRANS 23
363#define M_MAX_RETRANS 0xF
364#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
365#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
366
367#define S_MAX_RETRANS_OVERRIDE 27
368#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
369#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
370
371#define S_MSS_IDX 28
372#define M_MSS_IDX 0xF
373#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
374#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
375
376/* option 1 fields */
377#define S_RSS_ENABLE 0
378#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
379#define F_RSS_ENABLE V_RSS_ENABLE(1U)
380
381#define S_RSS_MASK_LEN 1
382#define M_RSS_MASK_LEN 0x7
383#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
384#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
385
386#define S_CPU_IDX 4
387#define M_CPU_IDX 0x3F
388#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
389#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
390
391#define S_MAC_MATCH_VALID 18
392#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
393#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
394
395#define S_CONN_POLICY 19
396#define M_CONN_POLICY 0x3
397#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
398#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
399
400#define S_SYN_DEFENSE 21
401#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
402#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
403
404#define S_VLAN_PRI 22
405#define M_VLAN_PRI 0x3
406#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
407#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
408
409#define S_VLAN_PRI_VALID 24
410#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
411#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
412
413#define S_PKT_TYPE 25
414#define M_PKT_TYPE 0x3
415#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
416#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
417
418#define S_MAC_MATCH 27
419#define M_MAC_MATCH 0x1F
420#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
421#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
422
423/* option 2 fields */
424#define S_CPU_INDEX 0
425#define M_CPU_INDEX 0x7F
426#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
427#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
428
429#define S_CPU_INDEX_VALID 7
430#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
431#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
432
433#define S_RX_COALESCE 8
434#define M_RX_COALESCE 0x3
435#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
436#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
437
438#define S_RX_COALESCE_VALID 10
439#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
440#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
441
442#define S_CONG_CONTROL_FLAVOR 11
443#define M_CONG_CONTROL_FLAVOR 0x3
444#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
445#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
446
447#define S_PACING_FLAVOR 13
448#define M_PACING_FLAVOR 0x3
449#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
450#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
451
452#define S_FLAVORS_VALID 15
453#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
454#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
455
456#define S_RX_FC_DISABLE 16
457#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
458#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
459
460#define S_RX_FC_VALID 17
461#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
462#define F_RX_FC_VALID V_RX_FC_VALID(1U)
463
464struct cpl_pass_open_req {
465 WR_HDR;
466 union opcode_tid ot;
467 __be16 local_port;
468 __be16 peer_port;
469 __be32 local_ip;
470 __be32 peer_ip;
471 __be32 opt0h;
472 __be32 opt0l;
473 __be32 peer_netmask;
474 __be32 opt1;
475};
476
477struct cpl_pass_open_rpl {
478 RSS_HDR union opcode_tid ot;
479 __be16 local_port;
480 __be16 peer_port;
481 __be32 local_ip;
482 __be32 peer_ip;
483 __u8 resvd[7];
484 __u8 status;
485};
486
487struct cpl_pass_establish {
488 RSS_HDR union opcode_tid ot;
489 __be16 local_port;
490 __be16 peer_port;
491 __be32 local_ip;
492 __be32 peer_ip;
493 __be32 tos_tid;
494 __be16 l2t_idx;
495 __be16 tcp_opt;
496 __be32 snd_isn;
497 __be32 rcv_isn;
498};
499
500/* cpl_pass_establish.tos_tid fields */
501#define S_PASS_OPEN_TID 0
502#define M_PASS_OPEN_TID 0xFFFFFF
503#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
504#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
505
506#define S_PASS_OPEN_TOS 24
507#define M_PASS_OPEN_TOS 0xFF
508#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
509#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
510
511/* cpl_pass_establish.l2t_idx fields */
512#define S_L2T_IDX16 5
513#define M_L2T_IDX16 0x7FF
514#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
515#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
516
517/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
518#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
519#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
520#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
521#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
522#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
523
524struct cpl_pass_accept_req {
525 RSS_HDR union opcode_tid ot;
526 __be16 local_port;
527 __be16 peer_port;
528 __be32 local_ip;
529 __be32 peer_ip;
530 __be32 tos_tid;
531 struct tcp_options tcp_options;
532 __u8 dst_mac[6];
533 __be16 vlan_tag;
534 __u8 src_mac[6];
535#if defined(__LITTLE_ENDIAN_BITFIELD)
536 __u8:3;
537 __u8 addr_idx:3;
538 __u8 port_idx:1;
539 __u8 exact_match:1;
540#else
541 __u8 exact_match:1;
542 __u8 port_idx:1;
543 __u8 addr_idx:3;
544 __u8:3;
545#endif
546 __u8 rsvd;
547 __be32 rcv_isn;
548 __be32 rsvd2;
549};
550
551struct cpl_pass_accept_rpl {
552 WR_HDR;
553 union opcode_tid ot;
554 __be32 opt2;
555 __be32 rsvd;
556 __be32 peer_ip;
557 __be32 opt0h;
558 __be32 opt0l_status;
559};
560
561struct cpl_act_open_req {
562 WR_HDR;
563 union opcode_tid ot;
564 __be16 local_port;
565 __be16 peer_port;
566 __be32 local_ip;
567 __be32 peer_ip;
568 __be32 opt0h;
569 __be32 opt0l;
570 __be32 params;
571 __be32 opt2;
572};
573
574/* cpl_act_open_req.params fields */
575#define S_AOPEN_VLAN_PRI 9
576#define M_AOPEN_VLAN_PRI 0x3
577#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
578#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
579
580#define S_AOPEN_VLAN_PRI_VALID 11
581#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
582#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
583
584#define S_AOPEN_PKT_TYPE 12
585#define M_AOPEN_PKT_TYPE 0x3
586#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
587#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
588
589#define S_AOPEN_MAC_MATCH 14
590#define M_AOPEN_MAC_MATCH 0x1F
591#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
592#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
593
594#define S_AOPEN_MAC_MATCH_VALID 19
595#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
596#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
597
598#define S_AOPEN_IFF_VLAN 20
599#define M_AOPEN_IFF_VLAN 0xFFF
600#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
601#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
602
603struct cpl_act_open_rpl {
604 RSS_HDR union opcode_tid ot;
605 __be16 local_port;
606 __be16 peer_port;
607 __be32 local_ip;
608 __be32 peer_ip;
609 __be32 atid;
610 __u8 rsvd[3];
611 __u8 status;
612};
613
614struct cpl_act_establish {
615 RSS_HDR union opcode_tid ot;
616 __be16 local_port;
617 __be16 peer_port;
618 __be32 local_ip;
619 __be32 peer_ip;
620 __be32 tos_tid;
621 __be16 l2t_idx;
622 __be16 tcp_opt;
623 __be32 snd_isn;
624 __be32 rcv_isn;
625};
626
627struct cpl_get_tcb {
628 WR_HDR;
629 union opcode_tid ot;
630 __be16 cpuno;
631 __be16 rsvd;
632};
633
634struct cpl_get_tcb_rpl {
635 RSS_HDR union opcode_tid ot;
636 __u8 rsvd;
637 __u8 status;
638 __be16 len;
639};
640
641struct cpl_set_tcb {
642 WR_HDR;
643 union opcode_tid ot;
644 __u8 reply;
645 __u8 cpu_idx;
646 __be16 len;
647};
648
649/* cpl_set_tcb.reply fields */
650#define S_NO_REPLY 7
651#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
652#define F_NO_REPLY V_NO_REPLY(1U)
653
654struct cpl_set_tcb_field {
655 WR_HDR;
656 union opcode_tid ot;
657 __u8 reply;
658 __u8 cpu_idx;
659 __be16 word;
660 __be64 mask;
661 __be64 val;
662};
663
664struct cpl_set_tcb_rpl {
665 RSS_HDR union opcode_tid ot;
666 __u8 rsvd[3];
667 __u8 status;
668};
669
670struct cpl_pcmd {
671 WR_HDR;
672 union opcode_tid ot;
673 __u8 rsvd[3];
674#if defined(__LITTLE_ENDIAN_BITFIELD)
675 __u8 src:1;
676 __u8 bundle:1;
677 __u8 channel:1;
678 __u8:5;
679#else
680 __u8:5;
681 __u8 channel:1;
682 __u8 bundle:1;
683 __u8 src:1;
684#endif
685 __be32 pcmd_parm[2];
686};
687
688struct cpl_pcmd_reply {
689 RSS_HDR union opcode_tid ot;
690 __u8 status;
691 __u8 rsvd;
692 __be16 len;
693};
694
695struct cpl_close_con_req {
696 WR_HDR;
697 union opcode_tid ot;
698 __be32 rsvd;
699};
700
701struct cpl_close_con_rpl {
702 RSS_HDR union opcode_tid ot;
703 __u8 rsvd[3];
704 __u8 status;
705 __be32 snd_nxt;
706 __be32 rcv_nxt;
707};
708
709struct cpl_close_listserv_req {
710 WR_HDR;
711 union opcode_tid ot;
712 __u8 rsvd0;
713 __u8 cpu_idx;
714 __be16 rsvd1;
715};
716
717struct cpl_close_listserv_rpl {
718 RSS_HDR union opcode_tid ot;
719 __u8 rsvd[3];
720 __u8 status;
721};
722
723struct cpl_abort_req_rss {
724 RSS_HDR union opcode_tid ot;
725 __be32 rsvd0;
726 __u8 rsvd1;
727 __u8 status;
728 __u8 rsvd2[6];
729};
730
731struct cpl_abort_req {
732 WR_HDR;
733 union opcode_tid ot;
734 __be32 rsvd0;
735 __u8 rsvd1;
736 __u8 cmd;
737 __u8 rsvd2[6];
738};
739
740struct cpl_abort_rpl_rss {
741 RSS_HDR union opcode_tid ot;
742 __be32 rsvd0;
743 __u8 rsvd1;
744 __u8 status;
745 __u8 rsvd2[6];
746};
747
748struct cpl_abort_rpl {
749 WR_HDR;
750 union opcode_tid ot;
751 __be32 rsvd0;
752 __u8 rsvd1;
753 __u8 cmd;
754 __u8 rsvd2[6];
755};
756
757struct cpl_peer_close {
758 RSS_HDR union opcode_tid ot;
759 __be32 rcv_nxt;
760};
761
762struct tx_data_wr {
763 __be32 wr_hi;
764 __be32 wr_lo;
765 __be32 len;
766 __be32 flags;
767 __be32 sndseq;
768 __be32 param;
769};
770
771/* tx_data_wr.param fields */
772#define S_TX_PORT 0
773#define M_TX_PORT 0x7
774#define V_TX_PORT(x) ((x) << S_TX_PORT)
775#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
776
777#define S_TX_MSS 4
778#define M_TX_MSS 0xF
779#define V_TX_MSS(x) ((x) << S_TX_MSS)
780#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
781
782#define S_TX_QOS 8
783#define M_TX_QOS 0xFF
784#define V_TX_QOS(x) ((x) << S_TX_QOS)
785#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
786
787#define S_TX_SNDBUF 16
788#define M_TX_SNDBUF 0xFFFF
789#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
790#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
791
792struct cpl_tx_data {
793 union opcode_tid ot;
794 __be32 len;
795 __be32 rsvd;
796 __be16 urg;
797 __be16 flags;
798};
799
800/* cpl_tx_data.flags fields */
801#define S_TX_ULP_SUBMODE 6
802#define M_TX_ULP_SUBMODE 0xF
803#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
804#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
805
806#define S_TX_ULP_MODE 10
807#define M_TX_ULP_MODE 0xF
808#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
809#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
810
811#define S_TX_SHOVE 14
812#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
813#define F_TX_SHOVE V_TX_SHOVE(1U)
814
815#define S_TX_MORE 15
816#define V_TX_MORE(x) ((x) << S_TX_MORE)
817#define F_TX_MORE V_TX_MORE(1U)
818
819/* additional tx_data_wr.flags fields */
820#define S_TX_CPU_IDX 0
821#define M_TX_CPU_IDX 0x3F
822#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
823#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
824
825#define S_TX_URG 16
826#define V_TX_URG(x) ((x) << S_TX_URG)
827#define F_TX_URG V_TX_URG(1U)
828
829#define S_TX_CLOSE 17
830#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
831#define F_TX_CLOSE V_TX_CLOSE(1U)
832
833#define S_TX_INIT 18
834#define V_TX_INIT(x) ((x) << S_TX_INIT)
835#define F_TX_INIT V_TX_INIT(1U)
836
837#define S_TX_IMM_ACK 19
838#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
839#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
840
841#define S_TX_IMM_DMA 20
842#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
843#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
844
845struct cpl_tx_data_ack {
846 RSS_HDR union opcode_tid ot;
847 __be32 ack_seq;
848};
849
850struct cpl_wr_ack {
851 RSS_HDR union opcode_tid ot;
852 __be16 credits;
853 __be16 rsvd;
854 __be32 snd_nxt;
855 __be32 snd_una;
856};
857
858struct cpl_rdma_ec_status {
859 RSS_HDR union opcode_tid ot;
860 __u8 rsvd[3];
861 __u8 status;
862};
863
864struct mngt_pktsched_wr {
865 __be32 wr_hi;
866 __be32 wr_lo;
867 __u8 mngt_opcode;
868 __u8 rsvd[7];
869 __u8 sched;
870 __u8 idx;
871 __u8 min;
872 __u8 max;
873 __u8 binding;
874 __u8 rsvd1[3];
875};
876
877struct cpl_iscsi_hdr {
878 RSS_HDR union opcode_tid ot;
879 __be16 pdu_len_ddp;
880 __be16 len;
881 __be32 seq;
882 __be16 urg;
883 __u8 rsvd;
884 __u8 status;
885};
886
887/* cpl_iscsi_hdr.pdu_len_ddp fields */
888#define S_ISCSI_PDU_LEN 0
889#define M_ISCSI_PDU_LEN 0x7FFF
890#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
891#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
892
893#define S_ISCSI_DDP 15
894#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
895#define F_ISCSI_DDP V_ISCSI_DDP(1U)
896
897struct cpl_rx_data {
898 RSS_HDR union opcode_tid ot;
899 __be16 rsvd;
900 __be16 len;
901 __be32 seq;
902 __be16 urg;
903#if defined(__LITTLE_ENDIAN_BITFIELD)
904 __u8 dack_mode:2;
905 __u8 psh:1;
906 __u8 heartbeat:1;
907 __u8:4;
908#else
909 __u8:4;
910 __u8 heartbeat:1;
911 __u8 psh:1;
912 __u8 dack_mode:2;
913#endif
914 __u8 status;
915};
916
917struct cpl_rx_data_ack {
918 WR_HDR;
919 union opcode_tid ot;
920 __be32 credit_dack;
921};
922
923/* cpl_rx_data_ack.ack_seq fields */
924#define S_RX_CREDITS 0
925#define M_RX_CREDITS 0x7FFFFFF
926#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
927#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
928
929#define S_RX_MODULATE 27
930#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
931#define F_RX_MODULATE V_RX_MODULATE(1U)
932
933#define S_RX_FORCE_ACK 28
934#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
935#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
936
937#define S_RX_DACK_MODE 29
938#define M_RX_DACK_MODE 0x3
939#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
940#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
941
942#define S_RX_DACK_CHANGE 31
943#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
944#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
945
946struct cpl_rx_urg_notify {
947 RSS_HDR union opcode_tid ot;
948 __be32 seq;
949};
950
951struct cpl_rx_ddp_complete {
952 RSS_HDR union opcode_tid ot;
953 __be32 ddp_report;
954};
955
956struct cpl_rx_data_ddp {
957 RSS_HDR union opcode_tid ot;
958 __be16 urg;
959 __be16 len;
960 __be32 seq;
961 union {
962 __be32 nxt_seq;
963 __be32 ddp_report;
964 };
965 __be32 ulp_crc;
966 __be32 ddpvld_status;
967};
968
969/* cpl_rx_data_ddp.ddpvld_status fields */
970#define S_DDP_STATUS 0
971#define M_DDP_STATUS 0xFF
972#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
973#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
974
975#define S_DDP_VALID 15
976#define M_DDP_VALID 0x1FFFF
977#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
978#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
979
980#define S_DDP_PPOD_MISMATCH 15
981#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
982#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
983
984#define S_DDP_PDU 16
985#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
986#define F_DDP_PDU V_DDP_PDU(1U)
987
988#define S_DDP_LLIMIT_ERR 17
989#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
990#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
991
992#define S_DDP_PPOD_PARITY_ERR 18
993#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
994#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
995
996#define S_DDP_PADDING_ERR 19
997#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
998#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
999
1000#define S_DDP_HDRCRC_ERR 20
1001#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
1002#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
1003
1004#define S_DDP_DATACRC_ERR 21
1005#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
1006#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
1007
1008#define S_DDP_INVALID_TAG 22
1009#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
1010#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
1011
1012#define S_DDP_ULIMIT_ERR 23
1013#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
1014#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
1015
1016#define S_DDP_OFFSET_ERR 24
1017#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
1018#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
1019
1020#define S_DDP_COLOR_ERR 25
1021#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
1022#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
1023
1024#define S_DDP_TID_MISMATCH 26
1025#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
1026#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
1027
1028#define S_DDP_INVALID_PPOD 27
1029#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
1030#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
1031
1032#define S_DDP_ULP_MODE 28
1033#define M_DDP_ULP_MODE 0xF
1034#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
1035#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
1036
1037/* cpl_rx_data_ddp.ddp_report fields */
1038#define S_DDP_OFFSET 0
1039#define M_DDP_OFFSET 0x3FFFFF
1040#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
1041#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
1042
1043#define S_DDP_URG 24
1044#define V_DDP_URG(x) ((x) << S_DDP_URG)
1045#define F_DDP_URG V_DDP_URG(1U)
1046
1047#define S_DDP_PSH 25
1048#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
1049#define F_DDP_PSH V_DDP_PSH(1U)
1050
1051#define S_DDP_BUF_COMPLETE 26
1052#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
1053#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
1054
1055#define S_DDP_BUF_TIMED_OUT 27
1056#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
1057#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
1058
1059#define S_DDP_BUF_IDX 28
1060#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
1061#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
1062
1063struct cpl_tx_pkt {
1064 WR_HDR;
1065 __be32 cntrl;
1066 __be32 len;
1067};
1068
1069struct cpl_tx_pkt_lso {
1070 WR_HDR;
1071 __be32 cntrl;
1072 __be32 len;
1073
1074 __be32 rsvd;
1075 __be32 lso_info;
1076};
1077
1078/* cpl_tx_pkt*.cntrl fields */
1079#define S_TXPKT_VLAN 0
1080#define M_TXPKT_VLAN 0xFFFF
1081#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
1082#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
1083
1084#define S_TXPKT_INTF 16
1085#define M_TXPKT_INTF 0xF
1086#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
1087#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
1088
1089#define S_TXPKT_IPCSUM_DIS 20
1090#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
1091#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
1092
1093#define S_TXPKT_L4CSUM_DIS 21
1094#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
1095#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
1096
1097#define S_TXPKT_VLAN_VLD 22
1098#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
1099#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
1100
1101#define S_TXPKT_LOOPBACK 23
1102#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
1103#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
1104
1105#define S_TXPKT_OPCODE 24
1106#define M_TXPKT_OPCODE 0xFF
1107#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
1108#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
1109
1110/* cpl_tx_pkt_lso.lso_info fields */
1111#define S_LSO_MSS 0
1112#define M_LSO_MSS 0x3FFF
1113#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
1114#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
1115
1116#define S_LSO_ETH_TYPE 14
1117#define M_LSO_ETH_TYPE 0x3
1118#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
1119#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
1120
1121#define S_LSO_TCPHDR_WORDS 16
1122#define M_LSO_TCPHDR_WORDS 0xF
1123#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
1124#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
1125
1126#define S_LSO_IPHDR_WORDS 20
1127#define M_LSO_IPHDR_WORDS 0xF
1128#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
1129#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
1130
1131#define S_LSO_IPV6 24
1132#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
1133#define F_LSO_IPV6 V_LSO_IPV6(1U)
1134
1135struct cpl_trace_pkt {
1136#ifdef CHELSIO_FW
1137 __u8 rss_opcode;
1138#if defined(__LITTLE_ENDIAN_BITFIELD)
1139 __u8 err:1;
1140 __u8:7;
1141#else
1142 __u8:7;
1143 __u8 err:1;
1144#endif
1145 __u8 rsvd0;
1146#if defined(__LITTLE_ENDIAN_BITFIELD)
1147 __u8 qid:4;
1148 __u8:4;
1149#else
1150 __u8:4;
1151 __u8 qid:4;
1152#endif
1153 __be32 tstamp;
1154#endif /* CHELSIO_FW */
1155
1156 __u8 opcode;
1157#if defined(__LITTLE_ENDIAN_BITFIELD)
1158 __u8 iff:4;
1159 __u8:4;
1160#else
1161 __u8:4;
1162 __u8 iff:4;
1163#endif
1164 __u8 rsvd[4];
1165 __be16 len;
1166};
1167
1168struct cpl_rx_pkt {
1169 RSS_HDR __u8 opcode;
1170#if defined(__LITTLE_ENDIAN_BITFIELD)
1171 __u8 iff:4;
1172 __u8 csum_valid:1;
1173 __u8 ipmi_pkt:1;
1174 __u8 vlan_valid:1;
1175 __u8 fragment:1;
1176#else
1177 __u8 fragment:1;
1178 __u8 vlan_valid:1;
1179 __u8 ipmi_pkt:1;
1180 __u8 csum_valid:1;
1181 __u8 iff:4;
1182#endif
1183 __be16 csum;
1184 __be16 vlan;
1185 __be16 len;
1186};
1187
1188struct cpl_l2t_write_req {
1189 WR_HDR;
1190 union opcode_tid ot;
1191 __be32 params;
1192 __u8 rsvd[2];
1193 __u8 dst_mac[6];
1194};
1195
1196/* cpl_l2t_write_req.params fields */
1197#define S_L2T_W_IDX 0
1198#define M_L2T_W_IDX 0x7FF
1199#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
1200#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
1201
1202#define S_L2T_W_VLAN 11
1203#define M_L2T_W_VLAN 0xFFF
1204#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
1205#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
1206
1207#define S_L2T_W_IFF 23
1208#define M_L2T_W_IFF 0xF
1209#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
1210#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
1211
1212#define S_L2T_W_PRIO 27
1213#define M_L2T_W_PRIO 0x7
1214#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
1215#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
1216
1217struct cpl_l2t_write_rpl {
1218 RSS_HDR union opcode_tid ot;
1219 __u8 status;
1220 __u8 rsvd[3];
1221};
1222
1223struct cpl_l2t_read_req {
1224 WR_HDR;
1225 union opcode_tid ot;
1226 __be16 rsvd;
1227 __be16 l2t_idx;
1228};
1229
1230struct cpl_l2t_read_rpl {
1231 RSS_HDR union opcode_tid ot;
1232 __be32 params;
1233 __u8 rsvd[2];
1234 __u8 dst_mac[6];
1235};
1236
1237/* cpl_l2t_read_rpl.params fields */
1238#define S_L2T_R_PRIO 0
1239#define M_L2T_R_PRIO 0x7
1240#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
1241#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
1242
1243#define S_L2T_R_VLAN 8
1244#define M_L2T_R_VLAN 0xFFF
1245#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
1246#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
1247
1248#define S_L2T_R_IFF 20
1249#define M_L2T_R_IFF 0xF
1250#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
1251#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
1252
1253#define S_L2T_STATUS 24
1254#define M_L2T_STATUS 0xFF
1255#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
1256#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
1257
1258struct cpl_smt_write_req {
1259 WR_HDR;
1260 union opcode_tid ot;
1261 __u8 rsvd0;
1262#if defined(__LITTLE_ENDIAN_BITFIELD)
1263 __u8 mtu_idx:4;
1264 __u8 iff:4;
1265#else
1266 __u8 iff:4;
1267 __u8 mtu_idx:4;
1268#endif
1269 __be16 rsvd2;
1270 __be16 rsvd3;
1271 __u8 src_mac1[6];
1272 __be16 rsvd4;
1273 __u8 src_mac0[6];
1274};
1275
1276struct cpl_smt_write_rpl {
1277 RSS_HDR union opcode_tid ot;
1278 __u8 status;
1279 __u8 rsvd[3];
1280};
1281
1282struct cpl_smt_read_req {
1283 WR_HDR;
1284 union opcode_tid ot;
1285 __u8 rsvd0;
1286#if defined(__LITTLE_ENDIAN_BITFIELD)
1287 __u8:4;
1288 __u8 iff:4;
1289#else
1290 __u8 iff:4;
1291 __u8:4;
1292#endif
1293 __be16 rsvd2;
1294};
1295
1296struct cpl_smt_read_rpl {
1297 RSS_HDR union opcode_tid ot;
1298 __u8 status;
1299#if defined(__LITTLE_ENDIAN_BITFIELD)
1300 __u8 mtu_idx:4;
1301 __u8:4;
1302#else
1303 __u8:4;
1304 __u8 mtu_idx:4;
1305#endif
1306 __be16 rsvd2;
1307 __be16 rsvd3;
1308 __u8 src_mac1[6];
1309 __be16 rsvd4;
1310 __u8 src_mac0[6];
1311};
1312
1313struct cpl_rte_delete_req {
1314 WR_HDR;
1315 union opcode_tid ot;
1316 __be32 params;
1317};
1318
1319/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
1320#define S_RTE_REQ_LUT_IX 8
1321#define M_RTE_REQ_LUT_IX 0x7FF
1322#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
1323#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
1324
1325#define S_RTE_REQ_LUT_BASE 19
1326#define M_RTE_REQ_LUT_BASE 0x7FF
1327#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
1328#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
1329
1330#define S_RTE_READ_REQ_SELECT 31
1331#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
1332#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
1333
1334struct cpl_rte_delete_rpl {
1335 RSS_HDR union opcode_tid ot;
1336 __u8 status;
1337 __u8 rsvd[3];
1338};
1339
1340struct cpl_rte_write_req {
1341 WR_HDR;
1342 union opcode_tid ot;
1343#if defined(__LITTLE_ENDIAN_BITFIELD)
1344 __u8:6;
1345 __u8 write_tcam:1;
1346 __u8 write_l2t_lut:1;
1347#else
1348 __u8 write_l2t_lut:1;
1349 __u8 write_tcam:1;
1350 __u8:6;
1351#endif
1352 __u8 rsvd[3];
1353 __be32 lut_params;
1354 __be16 rsvd2;
1355 __be16 l2t_idx;
1356 __be32 netmask;
1357 __be32 faddr;
1358};
1359
1360/* cpl_rte_write_req.lut_params fields */
1361#define S_RTE_WRITE_REQ_LUT_IX 10
1362#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
1363#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
1364#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
1365
1366#define S_RTE_WRITE_REQ_LUT_BASE 21
1367#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
1368#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
1369#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
1370
1371struct cpl_rte_write_rpl {
1372 RSS_HDR union opcode_tid ot;
1373 __u8 status;
1374 __u8 rsvd[3];
1375};
1376
1377struct cpl_rte_read_req {
1378 WR_HDR;
1379 union opcode_tid ot;
1380 __be32 params;
1381};
1382
1383struct cpl_rte_read_rpl {
1384 RSS_HDR union opcode_tid ot;
1385 __u8 status;
1386 __u8 rsvd0;
1387 __be16 l2t_idx;
1388#if defined(__LITTLE_ENDIAN_BITFIELD)
1389 __u8:7;
1390 __u8 select:1;
1391#else
1392 __u8 select:1;
1393 __u8:7;
1394#endif
1395 __u8 rsvd2[3];
1396 __be32 addr;
1397};
1398
1399struct cpl_tid_release {
1400 WR_HDR;
1401 union opcode_tid ot;
1402 __be32 rsvd;
1403};
1404
1405struct cpl_barrier {
1406 WR_HDR;
1407 __u8 opcode;
1408 __u8 rsvd[7];
1409};
1410
1411struct cpl_rdma_read_req {
1412 __u8 opcode;
1413 __u8 rsvd[15];
1414};
1415
1416struct cpl_rdma_terminate {
1417#ifdef CHELSIO_FW
1418 __u8 opcode;
1419 __u8 rsvd[2];
1420#if defined(__LITTLE_ENDIAN_BITFIELD)
1421 __u8 rspq:3;
1422 __u8:5;
1423#else
1424 __u8:5;
1425 __u8 rspq:3;
1426#endif
1427 __be32 tid_len;
1428#endif
1429 __be32 msn;
1430 __be32 mo;
1431 __u8 data[0];
1432};
1433
1434/* cpl_rdma_terminate.tid_len fields */
1435#define S_FLIT_CNT 0
1436#define M_FLIT_CNT 0xFF
1437#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
1438#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
1439
1440#define S_TERM_TID 8
1441#define M_TERM_TID 0xFFFFF
1442#define V_TERM_TID(x) ((x) << S_TERM_TID)
1443#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
1444#endif /* T3_CPL_H */
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
new file mode 100644
index 000000000000..365a7f5b1f94
--- /dev/null
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -0,0 +1,3375 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
441 SUPPORTED_OFFLOAD,
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 SUPPORTED_OFFLOAD,
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
661 }
662
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
667}
668
669/* serial flash and firmware constants */
670enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
674
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
685};
686
687/**
688 * sf1_read - read data from the serial flash
689 * @adapter: the adapter
690 * @byte_cnt: number of bytes to read
691 * @cont: whether another operation will be chained
692 * @valp: where to store the read data
693 *
694 * Reads up to 4 bytes of data from the serial flash. The location of
695 * the read needs to be specified prior to calling this by issuing the
696 * appropriate commands to the serial flash.
697 */
698static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
699 u32 *valp)
700{
701 int ret;
702
703 if (!byte_cnt || byte_cnt > 4)
704 return -EINVAL;
705 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
706 return -EBUSY;
707 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
708 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
709 if (!ret)
710 *valp = t3_read_reg(adapter, A_SF_DATA);
711 return ret;
712}
713
714/**
715 * sf1_write - write data to the serial flash
716 * @adapter: the adapter
717 * @byte_cnt: number of bytes to write
718 * @cont: whether another operation will be chained
719 * @val: value to write
720 *
721 * Writes up to 4 bytes of data to the serial flash. The location of
722 * the write needs to be specified prior to calling this by issuing the
723 * appropriate commands to the serial flash.
724 */
725static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
726 u32 val)
727{
728 if (!byte_cnt || byte_cnt > 4)
729 return -EINVAL;
730 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
731 return -EBUSY;
732 t3_write_reg(adapter, A_SF_DATA, val);
733 t3_write_reg(adapter, A_SF_OP,
734 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
735 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
736}
737
738/**
739 * flash_wait_op - wait for a flash operation to complete
740 * @adapter: the adapter
741 * @attempts: max number of polls of the status register
742 * @delay: delay between polls in ms
743 *
744 * Wait for a flash operation to complete by polling the status register.
745 */
746static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
747{
748 int ret;
749 u32 status;
750
751 while (1) {
752 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
753 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
754 return ret;
755 if (!(status & 1))
756 return 0;
757 if (--attempts == 0)
758 return -EAGAIN;
759 if (delay)
760 msleep(delay);
761 }
762}
763
764/**
765 * t3_read_flash - read words from serial flash
766 * @adapter: the adapter
767 * @addr: the start address for the read
768 * @nwords: how many 32-bit words to read
769 * @data: where to store the read data
770 * @byte_oriented: whether to store data as bytes or as words
771 *
772 * Read the specified number of 32-bit words from the serial flash.
773 * If @byte_oriented is set the read data is stored as a byte array
774 * (i.e., big-endian), otherwise as 32-bit words in the platform's
775 * natural endianess.
776 */
777int t3_read_flash(struct adapter *adapter, unsigned int addr,
778 unsigned int nwords, u32 *data, int byte_oriented)
779{
780 int ret;
781
782 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
783 return -EINVAL;
784
785 addr = swab32(addr) | SF_RD_DATA_FAST;
786
787 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
788 (ret = sf1_read(adapter, 1, 1, data)) != 0)
789 return ret;
790
791 for (; nwords; nwords--, data++) {
792 ret = sf1_read(adapter, 4, nwords > 1, data);
793 if (ret)
794 return ret;
795 if (byte_oriented)
796 *data = htonl(*data);
797 }
798 return 0;
799}
800
801/**
802 * t3_write_flash - write up to a page of data to the serial flash
803 * @adapter: the adapter
804 * @addr: the start address to write
805 * @n: length of data to write
806 * @data: the data to write
807 *
808 * Writes up to a page of data (256 bytes) to the serial flash starting
809 * at the given address.
810 */
811static int t3_write_flash(struct adapter *adapter, unsigned int addr,
812 unsigned int n, const u8 *data)
813{
814 int ret;
815 u32 buf[64];
816 unsigned int i, c, left, val, offset = addr & 0xff;
817
818 if (addr + n > SF_SIZE || offset + n > 256)
819 return -EINVAL;
820
821 val = swab32(addr) | SF_PROG_PAGE;
822
823 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
824 (ret = sf1_write(adapter, 4, 1, val)) != 0)
825 return ret;
826
827 for (left = n; left; left -= c) {
828 c = min(left, 4U);
829 for (val = 0, i = 0; i < c; ++i)
830 val = (val << 8) + *data++;
831
832 ret = sf1_write(adapter, c, c != left, val);
833 if (ret)
834 return ret;
835 }
836 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
837 return ret;
838
839 /* Read the page to verify the write succeeded */
840 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
841 if (ret)
842 return ret;
843
844 if (memcmp(data - n, (u8 *) buf + offset, n))
845 return -EIO;
846 return 0;
847}
848
849enum fw_version_type {
850 FW_VERSION_N3,
851 FW_VERSION_T3
852};
853
854/**
855 * t3_get_fw_version - read the firmware version
856 * @adapter: the adapter
857 * @vers: where to place the version
858 *
859 * Reads the FW version from flash.
860 */
861int t3_get_fw_version(struct adapter *adapter, u32 *vers)
862{
863 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
864}
865
866/**
867 * t3_check_fw_version - check if the FW is compatible with this driver
868 * @adapter: the adapter
869 *
870 * Checks if an adapter's FW is compatible with the driver. Returns 0
871 * if the versions are compatible, a negative error otherwise.
872 */
873int t3_check_fw_version(struct adapter *adapter)
874{
875 int ret;
876 u32 vers;
877 unsigned int type, major, minor;
878
879 ret = t3_get_fw_version(adapter, &vers);
880 if (ret)
881 return ret;
882
883 type = G_FW_VERSION_TYPE(vers);
884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers);
886
887 if (type == FW_VERSION_T3 && major == 3 && minor == 1)
888 return 0;
889
890 CH_ERR(adapter, "found wrong FW version(%u.%u), "
891 "driver needs version 3.1\n", major, minor);
892 return -EINVAL;
893}
894
895/**
896 * t3_flash_erase_sectors - erase a range of flash sectors
897 * @adapter: the adapter
898 * @start: the first sector to erase
899 * @end: the last sector to erase
900 *
901 * Erases the sectors in the given range.
902 */
903static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
904{
905 while (start <= end) {
906 int ret;
907
908 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
909 (ret = sf1_write(adapter, 4, 0,
910 SF_ERASE_SECTOR | (start << 8))) != 0 ||
911 (ret = flash_wait_op(adapter, 5, 500)) != 0)
912 return ret;
913 start++;
914 }
915 return 0;
916}
917
918/*
919 * t3_load_fw - download firmware
920 * @adapter: the adapter
921 * @fw_data: the firrware image to write
922 * @size: image size
923 *
924 * Write the supplied firmware image to the card's serial flash.
925 * The FW image has the following sections: @size - 8 bytes of code and
926 * data, followed by 4 bytes of FW version, followed by the 32-bit
927 * 1's complement checksum of the whole image.
928 */
929int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
930{
931 u32 csum;
932 unsigned int i;
933 const u32 *p = (const u32 *)fw_data;
934 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
935
936 if (size & 3)
937 return -EINVAL;
938 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
939 return -EFBIG;
940
941 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
942 csum += ntohl(p[i]);
943 if (csum != 0xffffffff) {
944 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
945 csum);
946 return -EINVAL;
947 }
948
949 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
950 if (ret)
951 goto out;
952
953 size -= 8; /* trim off version and checksum */
954 for (addr = FW_FLASH_BOOT_ADDR; size;) {
955 unsigned int chunk_size = min(size, 256U);
956
957 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
958 if (ret)
959 goto out;
960
961 addr += chunk_size;
962 fw_data += chunk_size;
963 size -= chunk_size;
964 }
965
966 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
967out:
968 if (ret)
969 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
970 return ret;
971}
972
973#define CIM_CTL_BASE 0x2000
974
975/**
976 * t3_cim_ctl_blk_read - read a block from CIM control region
977 *
978 * @adap: the adapter
979 * @addr: the start address within the CIM control region
980 * @n: number of words to read
981 * @valp: where to store the result
982 *
983 * Reads a block of 4-byte words from the CIM control region.
984 */
985int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
986 unsigned int n, unsigned int *valp)
987{
988 int ret = 0;
989
990 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
991 return -EBUSY;
992
993 for ( ; !ret && n--; addr += 4) {
994 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
995 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
996 0, 5, 2);
997 if (!ret)
998 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
999 }
1000 return ret;
1001}
1002
1003
1004/**
1005 * t3_link_changed - handle interface link changes
1006 * @adapter: the adapter
1007 * @port_id: the port index that changed link state
1008 *
1009 * Called when a port's link settings change to propagate the new values
1010 * to the associated PHY and MAC. After performing the common tasks it
1011 * invokes an OS-specific handler.
1012 */
1013void t3_link_changed(struct adapter *adapter, int port_id)
1014{
1015 int link_ok, speed, duplex, fc;
1016 struct port_info *pi = adap2pinfo(adapter, port_id);
1017 struct cphy *phy = &pi->phy;
1018 struct cmac *mac = &pi->mac;
1019 struct link_config *lc = &pi->link_config;
1020
1021 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1022
1023 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1024 uses_xaui(adapter)) {
1025 if (link_ok)
1026 t3b_pcs_reset(mac);
1027 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1028 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1029 }
1030 lc->link_ok = link_ok;
1031 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1032 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1033 if (lc->requested_fc & PAUSE_AUTONEG)
1034 fc &= lc->requested_fc;
1035 else
1036 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1037
1038 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1039 /* Set MAC speed, duplex, and flow control to match PHY. */
1040 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1041 lc->fc = fc;
1042 }
1043
1044 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1045}
1046
1047/**
1048 * t3_link_start - apply link configuration to MAC/PHY
1049 * @phy: the PHY to setup
1050 * @mac: the MAC to setup
1051 * @lc: the requested link configuration
1052 *
1053 * Set up a port's MAC and PHY according to a desired link configuration.
1054 * - If the PHY can auto-negotiate first decide what to advertise, then
1055 * enable/disable auto-negotiation as desired, and reset.
1056 * - If the PHY does not auto-negotiate just reset it.
1057 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1058 * otherwise do it later based on the outcome of auto-negotiation.
1059 */
1060int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1061{
1062 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1063
1064 lc->link_ok = 0;
1065 if (lc->supported & SUPPORTED_Autoneg) {
1066 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1067 if (fc) {
1068 lc->advertising |= ADVERTISED_Asym_Pause;
1069 if (fc & PAUSE_RX)
1070 lc->advertising |= ADVERTISED_Pause;
1071 }
1072 phy->ops->advertise(phy, lc->advertising);
1073
1074 if (lc->autoneg == AUTONEG_DISABLE) {
1075 lc->speed = lc->requested_speed;
1076 lc->duplex = lc->requested_duplex;
1077 lc->fc = (unsigned char)fc;
1078 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1079 fc);
1080 /* Also disables autoneg */
1081 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1082 phy->ops->reset(phy, 0);
1083 } else
1084 phy->ops->autoneg_enable(phy);
1085 } else {
1086 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1087 lc->fc = (unsigned char)fc;
1088 phy->ops->reset(phy, 0);
1089 }
1090 return 0;
1091}
1092
1093/**
1094 * t3_set_vlan_accel - control HW VLAN extraction
1095 * @adapter: the adapter
1096 * @ports: bitmap of adapter ports to operate on
1097 * @on: enable (1) or disable (0) HW VLAN extraction
1098 *
1099 * Enables or disables HW extraction of VLAN tags for the given port.
1100 */
1101void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1102{
1103 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1104 ports << S_VLANEXTRACTIONENABLE,
1105 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1106}
1107
1108struct intr_info {
1109 unsigned int mask; /* bits to check in interrupt status */
1110 const char *msg; /* message to print or NULL */
1111 short stat_idx; /* stat counter to increment or -1 */
1112 unsigned short fatal:1; /* whether the condition reported is fatal */
1113};
1114
1115/**
1116 * t3_handle_intr_status - table driven interrupt handler
1117 * @adapter: the adapter that generated the interrupt
1118 * @reg: the interrupt status register to process
1119 * @mask: a mask to apply to the interrupt status
1120 * @acts: table of interrupt actions
1121 * @stats: statistics counters tracking interrupt occurences
1122 *
1123 * A table driven interrupt handler that applies a set of masks to an
1124 * interrupt status word and performs the corresponding actions if the
1125 * interrupts described by the mask have occured. The actions include
1126 * optionally printing a warning or alert message, and optionally
1127 * incrementing a stat counter. The table is terminated by an entry
1128 * specifying mask 0. Returns the number of fatal interrupt conditions.
1129 */
1130static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1131 unsigned int mask,
1132 const struct intr_info *acts,
1133 unsigned long *stats)
1134{
1135 int fatal = 0;
1136 unsigned int status = t3_read_reg(adapter, reg) & mask;
1137
1138 for (; acts->mask; ++acts) {
1139 if (!(status & acts->mask))
1140 continue;
1141 if (acts->fatal) {
1142 fatal++;
1143 CH_ALERT(adapter, "%s (0x%x)\n",
1144 acts->msg, status & acts->mask);
1145 } else if (acts->msg)
1146 CH_WARN(adapter, "%s (0x%x)\n",
1147 acts->msg, status & acts->mask);
1148 if (acts->stat_idx >= 0)
1149 stats[acts->stat_idx]++;
1150 }
1151 if (status) /* clear processed interrupts */
1152 t3_write_reg(adapter, reg, status);
1153 return fatal;
1154}
1155
1156#define SGE_INTR_MASK (F_RSPQDISABLED)
1157#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1158 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1159 F_NFASRCHFAIL)
1160#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1161#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1162 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1163 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1164#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1165 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1166 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1167 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1168 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1169 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1170#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1171 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1172 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1173 V_BISTERR(M_BISTERR) | F_PEXERR)
1174#define ULPRX_INTR_MASK F_PARERR
1175#define ULPTX_INTR_MASK 0
1176#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1177 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1178 F_ZERO_SWITCH_ERROR)
1179#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1180 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1181 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1182 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1183#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1184 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1185 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1186#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1187 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1188 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1189#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1190 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1191 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1192 V_MCAPARERRENB(M_MCAPARERRENB))
1193#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1194 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1195 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1196 F_MPS0 | F_CPL_SWITCH)
1197
1198/*
1199 * Interrupt handler for the PCIX1 module.
1200 */
1201static void pci_intr_handler(struct adapter *adapter)
1202{
1203 static const struct intr_info pcix1_intr_info[] = {
1204 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1205 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1206 {F_RCVTARABT, "PCI received target abort", -1, 1},
1207 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1208 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1209 {F_DETPARERR, "PCI detected parity error", -1, 1},
1210 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1211 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1212 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1213 1},
1214 {F_DETCORECCERR, "PCI correctable ECC error",
1215 STAT_PCI_CORR_ECC, 0},
1216 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1217 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1218 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1219 1},
1220 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1221 1},
1222 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1223 1},
1224 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1225 "error", -1, 1},
1226 {0}
1227 };
1228
1229 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1230 pcix1_intr_info, adapter->irq_stats))
1231 t3_fatal_err(adapter);
1232}
1233
1234/*
1235 * Interrupt handler for the PCIE module.
1236 */
1237static void pcie_intr_handler(struct adapter *adapter)
1238{
1239 static const struct intr_info pcie_intr_info[] = {
1240 {F_PEXERR, "PCI PEX error", -1, 1},
1241 {F_UNXSPLCPLERRR,
1242 "PCI unexpected split completion DMA read error", -1, 1},
1243 {F_UNXSPLCPLERRC,
1244 "PCI unexpected split completion DMA command error", -1, 1},
1245 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1246 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1247 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1248 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1249 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1250 "PCI MSI-X table/PBA parity error", -1, 1},
1251 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1252 {0}
1253 };
1254
1255 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1256 pcie_intr_info, adapter->irq_stats))
1257 t3_fatal_err(adapter);
1258}
1259
1260/*
1261 * TP interrupt handler.
1262 */
1263static void tp_intr_handler(struct adapter *adapter)
1264{
1265 static const struct intr_info tp_intr_info[] = {
1266 {0xffffff, "TP parity error", -1, 1},
1267 {0x1000000, "TP out of Rx pages", -1, 1},
1268 {0x2000000, "TP out of Tx pages", -1, 1},
1269 {0}
1270 };
1271
1272 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1273 tp_intr_info, NULL))
1274 t3_fatal_err(adapter);
1275}
1276
1277/*
1278 * CIM interrupt handler.
1279 */
1280static void cim_intr_handler(struct adapter *adapter)
1281{
1282 static const struct intr_info cim_intr_info[] = {
1283 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1284 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1285 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1286 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1287 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1288 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1289 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1290 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1291 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1292 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1293 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1294 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1295 {0}
1296 };
1297
1298 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1299 cim_intr_info, NULL))
1300 t3_fatal_err(adapter);
1301}
1302
1303/*
1304 * ULP RX interrupt handler.
1305 */
1306static void ulprx_intr_handler(struct adapter *adapter)
1307{
1308 static const struct intr_info ulprx_intr_info[] = {
1309 {F_PARERR, "ULP RX parity error", -1, 1},
1310 {0}
1311 };
1312
1313 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1314 ulprx_intr_info, NULL))
1315 t3_fatal_err(adapter);
1316}
1317
1318/*
1319 * ULP TX interrupt handler.
1320 */
1321static void ulptx_intr_handler(struct adapter *adapter)
1322{
1323 static const struct intr_info ulptx_intr_info[] = {
1324 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1325 STAT_ULP_CH0_PBL_OOB, 0},
1326 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1327 STAT_ULP_CH1_PBL_OOB, 0},
1328 {0}
1329 };
1330
1331 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1332 ulptx_intr_info, adapter->irq_stats))
1333 t3_fatal_err(adapter);
1334}
1335
1336#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1337 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1338 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1339 F_ICSPI1_TX_FRAMING_ERROR)
1340#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1341 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1342 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1343 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1344
1345/*
1346 * PM TX interrupt handler.
1347 */
1348static void pmtx_intr_handler(struct adapter *adapter)
1349{
1350 static const struct intr_info pmtx_intr_info[] = {
1351 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1352 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1353 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1354 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1355 "PMTX ispi parity error", -1, 1},
1356 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1357 "PMTX ospi parity error", -1, 1},
1358 {0}
1359 };
1360
1361 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1362 pmtx_intr_info, NULL))
1363 t3_fatal_err(adapter);
1364}
1365
1366#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1367 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1368 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1369 F_IESPI1_TX_FRAMING_ERROR)
1370#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1371 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1372 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1373 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1374
1375/*
1376 * PM RX interrupt handler.
1377 */
1378static void pmrx_intr_handler(struct adapter *adapter)
1379{
1380 static const struct intr_info pmrx_intr_info[] = {
1381 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1382 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1383 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1384 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1385 "PMRX ispi parity error", -1, 1},
1386 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1387 "PMRX ospi parity error", -1, 1},
1388 {0}
1389 };
1390
1391 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1392 pmrx_intr_info, NULL))
1393 t3_fatal_err(adapter);
1394}
1395
1396/*
1397 * CPL switch interrupt handler.
1398 */
1399static void cplsw_intr_handler(struct adapter *adapter)
1400{
1401 static const struct intr_info cplsw_intr_info[] = {
1402/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1403 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1404 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1405 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1406 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1407 {0}
1408 };
1409
1410 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1411 cplsw_intr_info, NULL))
1412 t3_fatal_err(adapter);
1413}
1414
1415/*
1416 * MPS interrupt handler.
1417 */
1418static void mps_intr_handler(struct adapter *adapter)
1419{
1420 static const struct intr_info mps_intr_info[] = {
1421 {0x1ff, "MPS parity error", -1, 1},
1422 {0}
1423 };
1424
1425 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1426 mps_intr_info, NULL))
1427 t3_fatal_err(adapter);
1428}
1429
1430#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1431
1432/*
1433 * MC7 interrupt handler.
1434 */
1435static void mc7_intr_handler(struct mc7 *mc7)
1436{
1437 struct adapter *adapter = mc7->adapter;
1438 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1439
1440 if (cause & F_CE) {
1441 mc7->stats.corr_err++;
1442 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1443 "data 0x%x 0x%x 0x%x\n", mc7->name,
1444 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1445 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1446 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1448 }
1449
1450 if (cause & F_UE) {
1451 mc7->stats.uncorr_err++;
1452 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1453 "data 0x%x 0x%x 0x%x\n", mc7->name,
1454 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1455 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1456 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1458 }
1459
1460 if (G_PE(cause)) {
1461 mc7->stats.parity_err++;
1462 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1463 mc7->name, G_PE(cause));
1464 }
1465
1466 if (cause & F_AE) {
1467 u32 addr = 0;
1468
1469 if (adapter->params.rev > 0)
1470 addr = t3_read_reg(adapter,
1471 mc7->offset + A_MC7_ERR_ADDR);
1472 mc7->stats.addr_err++;
1473 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1474 mc7->name, addr);
1475 }
1476
1477 if (cause & MC7_INTR_FATAL)
1478 t3_fatal_err(adapter);
1479
1480 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1481}
1482
1483#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1484 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1485/*
1486 * XGMAC interrupt handler.
1487 */
1488static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1489{
1490 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1491 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1492
1493 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1494 mac->stats.tx_fifo_parity_err++;
1495 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1496 }
1497 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1498 mac->stats.rx_fifo_parity_err++;
1499 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1500 }
1501 if (cause & F_TXFIFO_UNDERRUN)
1502 mac->stats.tx_fifo_urun++;
1503 if (cause & F_RXFIFO_OVERFLOW)
1504 mac->stats.rx_fifo_ovfl++;
1505 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1506 mac->stats.serdes_signal_loss++;
1507 if (cause & F_XAUIPCSCTCERR)
1508 mac->stats.xaui_pcs_ctc_err++;
1509 if (cause & F_XAUIPCSALIGNCHANGE)
1510 mac->stats.xaui_pcs_align_change++;
1511
1512 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1513 if (cause & XGM_INTR_FATAL)
1514 t3_fatal_err(adap);
1515 return cause != 0;
1516}
1517
1518/*
1519 * Interrupt handler for PHY events.
1520 */
1521int t3_phy_intr_handler(struct adapter *adapter)
1522{
1523 static const int intr_gpio_bits[] = { 8, 0x20 };
1524
1525 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1526
1527 for_each_port(adapter, i) {
1528 if (cause & intr_gpio_bits[i]) {
1529 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1530 int phy_cause = phy->ops->intr_handler(phy);
1531
1532 if (phy_cause & cphy_cause_link_change)
1533 t3_link_changed(adapter, i);
1534 if (phy_cause & cphy_cause_fifo_error)
1535 phy->fifo_errors++;
1536 }
1537 }
1538
1539 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1540 return 0;
1541}
1542
1543/*
1544 * T3 slow path (non-data) interrupt handler.
1545 */
1546int t3_slow_intr_handler(struct adapter *adapter)
1547{
1548 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1549
1550 cause &= adapter->slow_intr_mask;
1551 if (!cause)
1552 return 0;
1553 if (cause & F_PCIM0) {
1554 if (is_pcie(adapter))
1555 pcie_intr_handler(adapter);
1556 else
1557 pci_intr_handler(adapter);
1558 }
1559 if (cause & F_SGE3)
1560 t3_sge_err_intr_handler(adapter);
1561 if (cause & F_MC7_PMRX)
1562 mc7_intr_handler(&adapter->pmrx);
1563 if (cause & F_MC7_PMTX)
1564 mc7_intr_handler(&adapter->pmtx);
1565 if (cause & F_MC7_CM)
1566 mc7_intr_handler(&adapter->cm);
1567 if (cause & F_CIM)
1568 cim_intr_handler(adapter);
1569 if (cause & F_TP1)
1570 tp_intr_handler(adapter);
1571 if (cause & F_ULP2_RX)
1572 ulprx_intr_handler(adapter);
1573 if (cause & F_ULP2_TX)
1574 ulptx_intr_handler(adapter);
1575 if (cause & F_PM1_RX)
1576 pmrx_intr_handler(adapter);
1577 if (cause & F_PM1_TX)
1578 pmtx_intr_handler(adapter);
1579 if (cause & F_CPL_SWITCH)
1580 cplsw_intr_handler(adapter);
1581 if (cause & F_MPS0)
1582 mps_intr_handler(adapter);
1583 if (cause & F_MC5A)
1584 t3_mc5_intr_handler(&adapter->mc5);
1585 if (cause & F_XGMAC0_0)
1586 mac_intr_handler(adapter, 0);
1587 if (cause & F_XGMAC0_1)
1588 mac_intr_handler(adapter, 1);
1589 if (cause & F_T3DBG)
1590 t3_os_ext_intr_handler(adapter);
1591
1592 /* Clear the interrupts just processed. */
1593 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1594 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1595 return 1;
1596}
1597
1598/**
1599 * t3_intr_enable - enable interrupts
1600 * @adapter: the adapter whose interrupts should be enabled
1601 *
1602 * Enable interrupts by setting the interrupt enable registers of the
1603 * various HW modules and then enabling the top-level interrupt
1604 * concentrator.
1605 */
1606void t3_intr_enable(struct adapter *adapter)
1607{
1608 static const struct addr_val_pair intr_en_avp[] = {
1609 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1610 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1611 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1612 MC7_INTR_MASK},
1613 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1614 MC7_INTR_MASK},
1615 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1616 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1617 {A_TP_INT_ENABLE, 0x3bfffff},
1618 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1619 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1620 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1621 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1622 };
1623
1624 adapter->slow_intr_mask = PL_INTR_MASK;
1625
1626 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1627
1628 if (adapter->params.rev > 0) {
1629 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1630 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1631 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1632 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1633 F_PBL_BOUND_ERR_CH1);
1634 } else {
1635 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1636 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1637 }
1638
1639 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1640 adapter_info(adapter)->gpio_intr);
1641 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1642 adapter_info(adapter)->gpio_intr);
1643 if (is_pcie(adapter))
1644 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1645 else
1646 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1647 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1648 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1649}
1650
1651/**
1652 * t3_intr_disable - disable a card's interrupts
1653 * @adapter: the adapter whose interrupts should be disabled
1654 *
1655 * Disable interrupts. We only disable the top-level interrupt
1656 * concentrator and the SGE data interrupts.
1657 */
1658void t3_intr_disable(struct adapter *adapter)
1659{
1660 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1661 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1662 adapter->slow_intr_mask = 0;
1663}
1664
1665/**
1666 * t3_intr_clear - clear all interrupts
1667 * @adapter: the adapter whose interrupts should be cleared
1668 *
1669 * Clears all interrupts.
1670 */
1671void t3_intr_clear(struct adapter *adapter)
1672{
1673 static const unsigned int cause_reg_addr[] = {
1674 A_SG_INT_CAUSE,
1675 A_SG_RSPQ_FL_STATUS,
1676 A_PCIX_INT_CAUSE,
1677 A_MC7_INT_CAUSE,
1678 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1679 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1680 A_CIM_HOST_INT_CAUSE,
1681 A_TP_INT_CAUSE,
1682 A_MC5_DB_INT_CAUSE,
1683 A_ULPRX_INT_CAUSE,
1684 A_ULPTX_INT_CAUSE,
1685 A_CPL_INTR_CAUSE,
1686 A_PM1_TX_INT_CAUSE,
1687 A_PM1_RX_INT_CAUSE,
1688 A_MPS_INT_CAUSE,
1689 A_T3DBG_INT_CAUSE,
1690 };
1691 unsigned int i;
1692
1693 /* Clear PHY and MAC interrupts for each port. */
1694 for_each_port(adapter, i)
1695 t3_port_intr_clear(adapter, i);
1696
1697 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1698 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1699
1700 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1701 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1702}
1703
1704/**
1705 * t3_port_intr_enable - enable port-specific interrupts
1706 * @adapter: associated adapter
1707 * @idx: index of port whose interrupts should be enabled
1708 *
1709 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1710 * adapter port.
1711 */
1712void t3_port_intr_enable(struct adapter *adapter, int idx)
1713{
1714 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1715
1716 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1717 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1718 phy->ops->intr_enable(phy);
1719}
1720
1721/**
1722 * t3_port_intr_disable - disable port-specific interrupts
1723 * @adapter: associated adapter
1724 * @idx: index of port whose interrupts should be disabled
1725 *
1726 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1727 * adapter port.
1728 */
1729void t3_port_intr_disable(struct adapter *adapter, int idx)
1730{
1731 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1732
1733 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1734 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1735 phy->ops->intr_disable(phy);
1736}
1737
1738/**
1739 * t3_port_intr_clear - clear port-specific interrupts
1740 * @adapter: associated adapter
1741 * @idx: index of port whose interrupts to clear
1742 *
1743 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1744 * adapter port.
1745 */
1746void t3_port_intr_clear(struct adapter *adapter, int idx)
1747{
1748 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1749
1750 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1751 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1752 phy->ops->intr_clear(phy);
1753}
1754
1755/**
1756 * t3_sge_write_context - write an SGE context
1757 * @adapter: the adapter
1758 * @id: the context id
1759 * @type: the context type
1760 *
1761 * Program an SGE context with the values already loaded in the
1762 * CONTEXT_DATA? registers.
1763 */
1764static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1765 unsigned int type)
1766{
1767 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1768 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1769 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1770 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1771 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1772 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1773 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1774 0, 5, 1);
1775}
1776
1777/**
1778 * t3_sge_init_ecntxt - initialize an SGE egress context
1779 * @adapter: the adapter to configure
1780 * @id: the context id
1781 * @gts_enable: whether to enable GTS for the context
1782 * @type: the egress context type
1783 * @respq: associated response queue
1784 * @base_addr: base address of queue
1785 * @size: number of queue entries
1786 * @token: uP token
1787 * @gen: initial generation value for the context
1788 * @cidx: consumer pointer
1789 *
1790 * Initialize an SGE egress context and make it ready for use. If the
1791 * platform allows concurrent context operations, the caller is
1792 * responsible for appropriate locking.
1793 */
1794int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1795 enum sge_context_type type, int respq, u64 base_addr,
1796 unsigned int size, unsigned int token, int gen,
1797 unsigned int cidx)
1798{
1799 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1800
1801 if (base_addr & 0xfff) /* must be 4K aligned */
1802 return -EINVAL;
1803 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1804 return -EBUSY;
1805
1806 base_addr >>= 12;
1807 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1808 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1809 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1810 V_EC_BASE_LO(base_addr & 0xffff));
1811 base_addr >>= 16;
1812 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1813 base_addr >>= 32;
1814 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1815 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1816 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1817 F_EC_VALID);
1818 return t3_sge_write_context(adapter, id, F_EGRESS);
1819}
1820
1821/**
1822 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1823 * @adapter: the adapter to configure
1824 * @id: the context id
1825 * @gts_enable: whether to enable GTS for the context
1826 * @base_addr: base address of queue
1827 * @size: number of queue entries
1828 * @bsize: size of each buffer for this queue
1829 * @cong_thres: threshold to signal congestion to upstream producers
1830 * @gen: initial generation value for the context
1831 * @cidx: consumer pointer
1832 *
1833 * Initialize an SGE free list context and make it ready for use. The
1834 * caller is responsible for ensuring only one context operation occurs
1835 * at a time.
1836 */
1837int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1838 int gts_enable, u64 base_addr, unsigned int size,
1839 unsigned int bsize, unsigned int cong_thres, int gen,
1840 unsigned int cidx)
1841{
1842 if (base_addr & 0xfff) /* must be 4K aligned */
1843 return -EINVAL;
1844 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1845 return -EBUSY;
1846
1847 base_addr >>= 12;
1848 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1849 base_addr >>= 32;
1850 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1851 V_FL_BASE_HI((u32) base_addr) |
1852 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1853 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1854 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1855 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1856 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1857 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1858 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1859 return t3_sge_write_context(adapter, id, F_FREELIST);
1860}
1861
1862/**
1863 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1864 * @adapter: the adapter to configure
1865 * @id: the context id
1866 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1867 * @base_addr: base address of queue
1868 * @size: number of queue entries
1869 * @fl_thres: threshold for selecting the normal or jumbo free list
1870 * @gen: initial generation value for the context
1871 * @cidx: consumer pointer
1872 *
1873 * Initialize an SGE response queue context and make it ready for use.
1874 * The caller is responsible for ensuring only one context operation
1875 * occurs at a time.
1876 */
1877int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1878 int irq_vec_idx, u64 base_addr, unsigned int size,
1879 unsigned int fl_thres, int gen, unsigned int cidx)
1880{
1881 unsigned int intr = 0;
1882
1883 if (base_addr & 0xfff) /* must be 4K aligned */
1884 return -EINVAL;
1885 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1886 return -EBUSY;
1887
1888 base_addr >>= 12;
1889 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1890 V_CQ_INDEX(cidx));
1891 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1892 base_addr >>= 32;
1893 if (irq_vec_idx >= 0)
1894 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1895 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1896 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1897 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1898 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1899}
1900
1901/**
1902 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1903 * @adapter: the adapter to configure
1904 * @id: the context id
1905 * @base_addr: base address of queue
1906 * @size: number of queue entries
1907 * @rspq: response queue for async notifications
1908 * @ovfl_mode: CQ overflow mode
1909 * @credits: completion queue credits
1910 * @credit_thres: the credit threshold
1911 *
1912 * Initialize an SGE completion queue context and make it ready for use.
1913 * The caller is responsible for ensuring only one context operation
1914 * occurs at a time.
1915 */
1916int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1917 unsigned int size, int rspq, int ovfl_mode,
1918 unsigned int credits, unsigned int credit_thres)
1919{
1920 if (base_addr & 0xfff) /* must be 4K aligned */
1921 return -EINVAL;
1922 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1923 return -EBUSY;
1924
1925 base_addr >>= 12;
1926 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1928 base_addr >>= 32;
1929 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1930 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1931 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1932 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1933 V_CQ_CREDIT_THRES(credit_thres));
1934 return t3_sge_write_context(adapter, id, F_CQ);
1935}
1936
1937/**
1938 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1939 * @adapter: the adapter
1940 * @id: the egress context id
1941 * @enable: enable (1) or disable (0) the context
1942 *
1943 * Enable or disable an SGE egress context. The caller is responsible for
1944 * ensuring only one context operation occurs at a time.
1945 */
1946int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1947{
1948 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1949 return -EBUSY;
1950
1951 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1952 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1954 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1955 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1956 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1957 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1958 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1959 0, 5, 1);
1960}
1961
1962/**
1963 * t3_sge_disable_fl - disable an SGE free-buffer list
1964 * @adapter: the adapter
1965 * @id: the free list context id
1966 *
1967 * Disable an SGE free-buffer list. The caller is responsible for
1968 * ensuring only one context operation occurs at a time.
1969 */
1970int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1971{
1972 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1973 return -EBUSY;
1974
1975 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1976 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1979 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1980 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1981 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1982 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1983 0, 5, 1);
1984}
1985
1986/**
1987 * t3_sge_disable_rspcntxt - disable an SGE response queue
1988 * @adapter: the adapter
1989 * @id: the response queue context id
1990 *
1991 * Disable an SGE response queue. The caller is responsible for
1992 * ensuring only one context operation occurs at a time.
1993 */
1994int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1995{
1996 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1997 return -EBUSY;
1998
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2000 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2004 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2005 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2006 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2007 0, 5, 1);
2008}
2009
2010/**
2011 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2012 * @adapter: the adapter
2013 * @id: the completion queue context id
2014 *
2015 * Disable an SGE completion queue. The caller is responsible for
2016 * ensuring only one context operation occurs at a time.
2017 */
2018int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2019{
2020 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2021 return -EBUSY;
2022
2023 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2024 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2025 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2026 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2027 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2028 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2029 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2030 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2031 0, 5, 1);
2032}
2033
2034/**
2035 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2036 * @adapter: the adapter
2037 * @id: the context id
2038 * @op: the operation to perform
2039 *
2040 * Perform the selected operation on an SGE completion queue context.
2041 * The caller is responsible for ensuring only one context operation
2042 * occurs at a time.
2043 */
2044int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2045 unsigned int credits)
2046{
2047 u32 val;
2048
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2050 return -EBUSY;
2051
2052 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2053 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2054 V_CONTEXT(id) | F_CQ);
2055 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2056 0, 5, 1, &val))
2057 return -EIO;
2058
2059 if (op >= 2 && op < 7) {
2060 if (adapter->params.rev > 0)
2061 return G_CQ_INDEX(val);
2062
2063 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2064 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2065 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2066 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2067 return -EIO;
2068 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2069 }
2070 return 0;
2071}
2072
2073/**
2074 * t3_sge_read_context - read an SGE context
2075 * @type: the context type
2076 * @adapter: the adapter
2077 * @id: the context id
2078 * @data: holds the retrieved context
2079 *
2080 * Read an SGE egress context. The caller is responsible for ensuring
2081 * only one context operation occurs at a time.
2082 */
2083static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2084 unsigned int id, u32 data[4])
2085{
2086 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2087 return -EBUSY;
2088
2089 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2090 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2091 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2092 5, 1))
2093 return -EIO;
2094 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2095 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2096 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2097 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2098 return 0;
2099}
2100
2101/**
2102 * t3_sge_read_ecntxt - read an SGE egress context
2103 * @adapter: the adapter
2104 * @id: the context id
2105 * @data: holds the retrieved context
2106 *
2107 * Read an SGE egress context. The caller is responsible for ensuring
2108 * only one context operation occurs at a time.
2109 */
2110int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2111{
2112 if (id >= 65536)
2113 return -EINVAL;
2114 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2115}
2116
2117/**
2118 * t3_sge_read_cq - read an SGE CQ context
2119 * @adapter: the adapter
2120 * @id: the context id
2121 * @data: holds the retrieved context
2122 *
2123 * Read an SGE CQ context. The caller is responsible for ensuring
2124 * only one context operation occurs at a time.
2125 */
2126int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2127{
2128 if (id >= 65536)
2129 return -EINVAL;
2130 return t3_sge_read_context(F_CQ, adapter, id, data);
2131}
2132
2133/**
2134 * t3_sge_read_fl - read an SGE free-list context
2135 * @adapter: the adapter
2136 * @id: the context id
2137 * @data: holds the retrieved context
2138 *
2139 * Read an SGE free-list context. The caller is responsible for ensuring
2140 * only one context operation occurs at a time.
2141 */
2142int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2143{
2144 if (id >= SGE_QSETS * 2)
2145 return -EINVAL;
2146 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2147}
2148
2149/**
2150 * t3_sge_read_rspq - read an SGE response queue context
2151 * @adapter: the adapter
2152 * @id: the context id
2153 * @data: holds the retrieved context
2154 *
2155 * Read an SGE response queue context. The caller is responsible for
2156 * ensuring only one context operation occurs at a time.
2157 */
2158int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2159{
2160 if (id >= SGE_QSETS)
2161 return -EINVAL;
2162 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2163}
2164
2165/**
2166 * t3_config_rss - configure Rx packet steering
2167 * @adapter: the adapter
2168 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2169 * @cpus: values for the CPU lookup table (0xff terminated)
2170 * @rspq: values for the response queue lookup table (0xffff terminated)
2171 *
2172 * Programs the receive packet steering logic. @cpus and @rspq provide
2173 * the values for the CPU and response queue lookup tables. If they
2174 * provide fewer values than the size of the tables the supplied values
2175 * are used repeatedly until the tables are fully populated.
2176 */
2177void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2178 const u8 * cpus, const u16 *rspq)
2179{
2180 int i, j, cpu_idx = 0, q_idx = 0;
2181
2182 if (cpus)
2183 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2184 u32 val = i << 16;
2185
2186 for (j = 0; j < 2; ++j) {
2187 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2188 if (cpus[cpu_idx] == 0xff)
2189 cpu_idx = 0;
2190 }
2191 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2192 }
2193
2194 if (rspq)
2195 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2196 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2197 (i << 16) | rspq[q_idx++]);
2198 if (rspq[q_idx] == 0xffff)
2199 q_idx = 0;
2200 }
2201
2202 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2203}
2204
2205/**
2206 * t3_read_rss - read the contents of the RSS tables
2207 * @adapter: the adapter
2208 * @lkup: holds the contents of the RSS lookup table
2209 * @map: holds the contents of the RSS map table
2210 *
2211 * Reads the contents of the receive packet steering tables.
2212 */
2213int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2214{
2215 int i;
2216 u32 val;
2217
2218 if (lkup)
2219 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2220 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2221 0xffff0000 | i);
2222 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2223 if (!(val & 0x80000000))
2224 return -EAGAIN;
2225 *lkup++ = val;
2226 *lkup++ = (val >> 8);
2227 }
2228
2229 if (map)
2230 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2231 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2232 0xffff0000 | i);
2233 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2234 if (!(val & 0x80000000))
2235 return -EAGAIN;
2236 *map++ = val;
2237 }
2238 return 0;
2239}
2240
2241/**
2242 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2243 * @adap: the adapter
2244 * @enable: 1 to select offload mode, 0 for regular NIC
2245 *
2246 * Switches TP to NIC/offload mode.
2247 */
2248void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2249{
2250 if (is_offload(adap) || !enable)
2251 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2252 V_NICMODE(!enable));
2253}
2254
2255/**
2256 * pm_num_pages - calculate the number of pages of the payload memory
2257 * @mem_size: the size of the payload memory
2258 * @pg_size: the size of each payload memory page
2259 *
2260 * Calculate the number of pages, each of the given size, that fit in a
2261 * memory of the specified size, respecting the HW requirement that the
2262 * number of pages must be a multiple of 24.
2263 */
2264static inline unsigned int pm_num_pages(unsigned int mem_size,
2265 unsigned int pg_size)
2266{
2267 unsigned int n = mem_size / pg_size;
2268
2269 return n - n % 24;
2270}
2271
2272#define mem_region(adap, start, size, reg) \
2273 t3_write_reg((adap), A_ ## reg, (start)); \
2274 start += size
2275
2276/*
2277 * partition_mem - partition memory and configure TP memory settings
2278 * @adap: the adapter
2279 * @p: the TP parameters
2280 *
2281 * Partitions context and payload memory and configures TP's memory
2282 * registers.
2283 */
2284static void partition_mem(struct adapter *adap, const struct tp_params *p)
2285{
2286 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2287 unsigned int timers = 0, timers_shift = 22;
2288
2289 if (adap->params.rev > 0) {
2290 if (tids <= 16 * 1024) {
2291 timers = 1;
2292 timers_shift = 16;
2293 } else if (tids <= 64 * 1024) {
2294 timers = 2;
2295 timers_shift = 18;
2296 } else if (tids <= 256 * 1024) {
2297 timers = 3;
2298 timers_shift = 20;
2299 }
2300 }
2301
2302 t3_write_reg(adap, A_TP_PMM_SIZE,
2303 p->chan_rx_size | (p->chan_tx_size >> 16));
2304
2305 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2306 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2307 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2308 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2309 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2310
2311 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2312 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2313 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2314
2315 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2316 /* Add a bit of headroom and make multiple of 24 */
2317 pstructs += 48;
2318 pstructs -= pstructs % 24;
2319 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2320
2321 m = tids * TCB_SIZE;
2322 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2323 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2324 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2325 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2326 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2327 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2328 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2329 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2330
2331 m = (m + 4095) & ~0xfff;
2332 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2333 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2334
2335 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2336 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2337 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2338 if (tids < m)
2339 adap->params.mc5.nservers += m - tids;
2340}
2341
2342static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2343 u32 val)
2344{
2345 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2346 t3_write_reg(adap, A_TP_PIO_DATA, val);
2347}
2348
2349static void tp_config(struct adapter *adap, const struct tp_params *p)
2350{
2351 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2352 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2353 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2354 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2355 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2356 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2357 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2358 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2359 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2360 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2361 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2362 F_IPV6ENABLE | F_NICMODE);
2363 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2364 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2365 t3_set_reg_field(adap, A_TP_PARA_REG6,
2366 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2367 0);
2368
2369 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2370 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2371 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2372 F_RXCONGESTIONMODE);
2373 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2374
2375 if (adap->params.rev > 0) {
2376 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2377 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2378 F_TXPACEAUTO);
2379 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2380 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2381 } else
2382 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2383
2384 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2385 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2386 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2387}
2388
2389/* Desired TP timer resolution in usec */
2390#define TP_TMR_RES 50
2391
2392/* TCP timer values in ms */
2393#define TP_DACK_TIMER 50
2394#define TP_RTO_MIN 250
2395
2396/**
2397 * tp_set_timers - set TP timing parameters
2398 * @adap: the adapter to set
2399 * @core_clk: the core clock frequency in Hz
2400 *
2401 * Set TP's timing parameters, such as the various timer resolutions and
2402 * the TCP timer values.
2403 */
2404static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2405{
2406 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2407 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2408 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2409 unsigned int tps = core_clk >> tre;
2410
2411 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2412 V_DELAYEDACKRESOLUTION(dack_re) |
2413 V_TIMESTAMPRESOLUTION(tstamp_re));
2414 t3_write_reg(adap, A_TP_DACK_TIMER,
2415 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2416 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2417 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2418 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2419 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2420 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2421 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2422 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2423 V_KEEPALIVEMAX(9));
2424
2425#define SECONDS * tps
2426
2427 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2428 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2429 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2430 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2431 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2432 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2433 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2434 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2435 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2436
2437#undef SECONDS
2438}
2439
2440/**
2441 * t3_tp_set_coalescing_size - set receive coalescing size
2442 * @adap: the adapter
2443 * @size: the receive coalescing size
2444 * @psh: whether a set PSH bit should deliver coalesced data
2445 *
2446 * Set the receive coalescing size and PSH bit handling.
2447 */
2448int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2449{
2450 u32 val;
2451
2452 if (size > MAX_RX_COALESCING_LEN)
2453 return -EINVAL;
2454
2455 val = t3_read_reg(adap, A_TP_PARA_REG3);
2456 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2457
2458 if (size) {
2459 val |= F_RXCOALESCEENABLE;
2460 if (psh)
2461 val |= F_RXCOALESCEPSHEN;
2462 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2463 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2464 }
2465 t3_write_reg(adap, A_TP_PARA_REG3, val);
2466 return 0;
2467}
2468
2469/**
2470 * t3_tp_set_max_rxsize - set the max receive size
2471 * @adap: the adapter
2472 * @size: the max receive size
2473 *
2474 * Set TP's max receive size. This is the limit that applies when
2475 * receive coalescing is disabled.
2476 */
2477void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2478{
2479 t3_write_reg(adap, A_TP_PARA_REG7,
2480 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2481}
2482
2483static void __devinit init_mtus(unsigned short mtus[])
2484{
2485 /*
2486 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2487 * it can accomodate max size TCP/IP headers when SACK and timestamps
2488 * are enabled and still have at least 8 bytes of payload.
2489 */
2490 mtus[0] = 88;
2491 mtus[1] = 256;
2492 mtus[2] = 512;
2493 mtus[3] = 576;
2494 mtus[4] = 808;
2495 mtus[5] = 1024;
2496 mtus[6] = 1280;
2497 mtus[7] = 1492;
2498 mtus[8] = 1500;
2499 mtus[9] = 2002;
2500 mtus[10] = 2048;
2501 mtus[11] = 4096;
2502 mtus[12] = 4352;
2503 mtus[13] = 8192;
2504 mtus[14] = 9000;
2505 mtus[15] = 9600;
2506}
2507
2508/*
2509 * Initial congestion control parameters.
2510 */
2511static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2512{
2513 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2514 a[9] = 2;
2515 a[10] = 3;
2516 a[11] = 4;
2517 a[12] = 5;
2518 a[13] = 6;
2519 a[14] = 7;
2520 a[15] = 8;
2521 a[16] = 9;
2522 a[17] = 10;
2523 a[18] = 14;
2524 a[19] = 17;
2525 a[20] = 21;
2526 a[21] = 25;
2527 a[22] = 30;
2528 a[23] = 35;
2529 a[24] = 45;
2530 a[25] = 60;
2531 a[26] = 80;
2532 a[27] = 100;
2533 a[28] = 200;
2534 a[29] = 300;
2535 a[30] = 400;
2536 a[31] = 500;
2537
2538 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2539 b[9] = b[10] = 1;
2540 b[11] = b[12] = 2;
2541 b[13] = b[14] = b[15] = b[16] = 3;
2542 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2543 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2544 b[28] = b[29] = 6;
2545 b[30] = b[31] = 7;
2546}
2547
2548/* The minimum additive increment value for the congestion control table */
2549#define CC_MIN_INCR 2U
2550
2551/**
2552 * t3_load_mtus - write the MTU and congestion control HW tables
2553 * @adap: the adapter
2554 * @mtus: the unrestricted values for the MTU table
2555 * @alphs: the values for the congestion control alpha parameter
2556 * @beta: the values for the congestion control beta parameter
2557 * @mtu_cap: the maximum permitted effective MTU
2558 *
2559 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2560 * Update the high-speed congestion control table with the supplied alpha,
2561 * beta, and MTUs.
2562 */
2563void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2564 unsigned short alpha[NCCTRL_WIN],
2565 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2566{
2567 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2568 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2569 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2570 28672, 40960, 57344, 81920, 114688, 163840, 229376
2571 };
2572
2573 unsigned int i, w;
2574
2575 for (i = 0; i < NMTUS; ++i) {
2576 unsigned int mtu = min(mtus[i], mtu_cap);
2577 unsigned int log2 = fls(mtu);
2578
2579 if (!(mtu & ((1 << log2) >> 2))) /* round */
2580 log2--;
2581 t3_write_reg(adap, A_TP_MTU_TABLE,
2582 (i << 24) | (log2 << 16) | mtu);
2583
2584 for (w = 0; w < NCCTRL_WIN; ++w) {
2585 unsigned int inc;
2586
2587 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2588 CC_MIN_INCR);
2589
2590 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2591 (w << 16) | (beta[w] << 13) | inc);
2592 }
2593 }
2594}
2595
2596/**
2597 * t3_read_hw_mtus - returns the values in the HW MTU table
2598 * @adap: the adapter
2599 * @mtus: where to store the HW MTU values
2600 *
2601 * Reads the HW MTU table.
2602 */
2603void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2604{
2605 int i;
2606
2607 for (i = 0; i < NMTUS; ++i) {
2608 unsigned int val;
2609
2610 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2611 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2612 mtus[i] = val & 0x3fff;
2613 }
2614}
2615
2616/**
2617 * t3_get_cong_cntl_tab - reads the congestion control table
2618 * @adap: the adapter
2619 * @incr: where to store the alpha values
2620 *
2621 * Reads the additive increments programmed into the HW congestion
2622 * control table.
2623 */
2624void t3_get_cong_cntl_tab(struct adapter *adap,
2625 unsigned short incr[NMTUS][NCCTRL_WIN])
2626{
2627 unsigned int mtu, w;
2628
2629 for (mtu = 0; mtu < NMTUS; ++mtu)
2630 for (w = 0; w < NCCTRL_WIN; ++w) {
2631 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2632 0xffff0000 | (mtu << 5) | w);
2633 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2634 0x1fff;
2635 }
2636}
2637
2638/**
2639 * t3_tp_get_mib_stats - read TP's MIB counters
2640 * @adap: the adapter
2641 * @tps: holds the returned counter values
2642 *
2643 * Returns the values of TP's MIB counters.
2644 */
2645void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2646{
2647 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2648 sizeof(*tps) / sizeof(u32), 0);
2649}
2650
2651#define ulp_region(adap, name, start, len) \
2652 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2653 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2654 (start) + (len) - 1); \
2655 start += len
2656
2657#define ulptx_region(adap, name, start, len) \
2658 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2659 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2660 (start) + (len) - 1)
2661
2662static void ulp_config(struct adapter *adap, const struct tp_params *p)
2663{
2664 unsigned int m = p->chan_rx_size;
2665
2666 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2667 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2668 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2669 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2670 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2671 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2672 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2673 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2674}
2675
2676void t3_config_trace_filter(struct adapter *adapter,
2677 const struct trace_params *tp, int filter_index,
2678 int invert, int enable)
2679{
2680 u32 addr, key[4], mask[4];
2681
2682 key[0] = tp->sport | (tp->sip << 16);
2683 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2684 key[2] = tp->dip;
2685 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2686
2687 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2688 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2689 mask[2] = tp->dip_mask;
2690 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2691
2692 if (invert)
2693 key[3] |= (1 << 29);
2694 if (enable)
2695 key[3] |= (1 << 28);
2696
2697 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2698 tp_wr_indirect(adapter, addr++, key[0]);
2699 tp_wr_indirect(adapter, addr++, mask[0]);
2700 tp_wr_indirect(adapter, addr++, key[1]);
2701 tp_wr_indirect(adapter, addr++, mask[1]);
2702 tp_wr_indirect(adapter, addr++, key[2]);
2703 tp_wr_indirect(adapter, addr++, mask[2]);
2704 tp_wr_indirect(adapter, addr++, key[3]);
2705 tp_wr_indirect(adapter, addr, mask[3]);
2706 t3_read_reg(adapter, A_TP_PIO_DATA);
2707}
2708
2709/**
2710 * t3_config_sched - configure a HW traffic scheduler
2711 * @adap: the adapter
2712 * @kbps: target rate in Kbps
2713 * @sched: the scheduler index
2714 *
2715 * Configure a HW scheduler for the target rate
2716 */
2717int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2718{
2719 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2720 unsigned int clk = adap->params.vpd.cclk * 1000;
2721 unsigned int selected_cpt = 0, selected_bpt = 0;
2722
2723 if (kbps > 0) {
2724 kbps *= 125; /* -> bytes */
2725 for (cpt = 1; cpt <= 255; cpt++) {
2726 tps = clk / cpt;
2727 bpt = (kbps + tps / 2) / tps;
2728 if (bpt > 0 && bpt <= 255) {
2729 v = bpt * tps;
2730 delta = v >= kbps ? v - kbps : kbps - v;
2731 if (delta <= mindelta) {
2732 mindelta = delta;
2733 selected_cpt = cpt;
2734 selected_bpt = bpt;
2735 }
2736 } else if (selected_cpt)
2737 break;
2738 }
2739 if (!selected_cpt)
2740 return -EINVAL;
2741 }
2742 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2743 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2744 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2745 if (sched & 1)
2746 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2747 else
2748 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2749 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2750 return 0;
2751}
2752
2753static int tp_init(struct adapter *adap, const struct tp_params *p)
2754{
2755 int busy = 0;
2756
2757 tp_config(adap, p);
2758 t3_set_vlan_accel(adap, 3, 0);
2759
2760 if (is_offload(adap)) {
2761 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2762 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2763 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2764 0, 1000, 5);
2765 if (busy)
2766 CH_ERR(adap, "TP initialization timed out\n");
2767 }
2768
2769 if (!busy)
2770 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2771 return busy;
2772}
2773
2774int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2775{
2776 if (port_mask & ~((1 << adap->params.nports) - 1))
2777 return -EINVAL;
2778 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2779 port_mask << S_PORT0ACTIVE);
2780 return 0;
2781}
2782
2783/*
2784 * Perform the bits of HW initialization that are dependent on the number
2785 * of available ports.
2786 */
2787static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2788{
2789 int i;
2790
2791 if (nports == 1) {
2792 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2793 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2794 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2795 F_PORT0ACTIVE | F_ENFORCEPKT);
2796 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2797 } else {
2798 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2799 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2800 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2801 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2802 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2803 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2804 F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2807 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2808 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2809 for (i = 0; i < 16; i++)
2810 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2811 (i << 16) | 0x1010);
2812 }
2813}
2814
2815static int calibrate_xgm(struct adapter *adapter)
2816{
2817 if (uses_xaui(adapter)) {
2818 unsigned int v, i;
2819
2820 for (i = 0; i < 5; ++i) {
2821 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2822 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2823 msleep(1);
2824 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2825 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2826 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2827 V_XAUIIMP(G_CALIMP(v) >> 2));
2828 return 0;
2829 }
2830 }
2831 CH_ERR(adapter, "MAC calibration failed\n");
2832 return -1;
2833 } else {
2834 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2835 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2836 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2837 F_XGM_IMPSETUPDATE);
2838 }
2839 return 0;
2840}
2841
2842static void calibrate_xgm_t3b(struct adapter *adapter)
2843{
2844 if (!uses_xaui(adapter)) {
2845 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2846 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2847 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2848 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2849 F_XGM_IMPSETUPDATE);
2850 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2851 0);
2852 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2853 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2854 }
2855}
2856
2857struct mc7_timing_params {
2858 unsigned char ActToPreDly;
2859 unsigned char ActToRdWrDly;
2860 unsigned char PreCyc;
2861 unsigned char RefCyc[5];
2862 unsigned char BkCyc;
2863 unsigned char WrToRdDly;
2864 unsigned char RdToWrDly;
2865};
2866
2867/*
2868 * Write a value to a register and check that the write completed. These
2869 * writes normally complete in a cycle or two, so one read should suffice.
2870 * The very first read exists to flush the posted write to the device.
2871 */
2872static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2873{
2874 t3_write_reg(adapter, addr, val);
2875 t3_read_reg(adapter, addr); /* flush */
2876 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2877 return 0;
2878 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2879 return -EIO;
2880}
2881
2882static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2883{
2884 static const unsigned int mc7_mode[] = {
2885 0x632, 0x642, 0x652, 0x432, 0x442
2886 };
2887 static const struct mc7_timing_params mc7_timings[] = {
2888 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2889 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2890 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2891 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2892 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2893 };
2894
2895 u32 val;
2896 unsigned int width, density, slow, attempts;
2897 struct adapter *adapter = mc7->adapter;
2898 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2899
2900 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2901 slow = val & F_SLOW;
2902 width = G_WIDTH(val);
2903 density = G_DEN(val);
2904
2905 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2906 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2907 msleep(1);
2908
2909 if (!slow) {
2910 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2911 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2912 msleep(1);
2913 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2914 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2915 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2916 mc7->name);
2917 goto out_fail;
2918 }
2919 }
2920
2921 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2922 V_ACTTOPREDLY(p->ActToPreDly) |
2923 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2924 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2925 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2926
2927 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2928 val | F_CLKEN | F_TERM150);
2929 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2930
2931 if (!slow)
2932 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2933 F_DLLENB);
2934 udelay(1);
2935
2936 val = slow ? 3 : 6;
2937 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2938 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2939 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2941 goto out_fail;
2942
2943 if (!slow) {
2944 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2945 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2946 udelay(5);
2947 }
2948
2949 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2950 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2951 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2953 mc7_mode[mem_type]) ||
2954 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2955 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2956 goto out_fail;
2957
2958 /* clock value is in KHz */
2959 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2960 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2961
2962 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2963 F_PERREFEN | V_PREREFDIV(mc7_clock));
2964 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2965
2966 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2967 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2968 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2969 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2970 (mc7->size << width) - 1);
2971 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2972 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2973
2974 attempts = 50;
2975 do {
2976 msleep(250);
2977 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2978 } while ((val & F_BUSY) && --attempts);
2979 if (val & F_BUSY) {
2980 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2981 goto out_fail;
2982 }
2983
2984 /* Enable normal memory accesses. */
2985 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2986 return 0;
2987
2988out_fail:
2989 return -1;
2990}
2991
2992static void config_pcie(struct adapter *adap)
2993{
2994 static const u16 ack_lat[4][6] = {
2995 {237, 416, 559, 1071, 2095, 4143},
2996 {128, 217, 289, 545, 1057, 2081},
2997 {73, 118, 154, 282, 538, 1050},
2998 {67, 107, 86, 150, 278, 534}
2999 };
3000 static const u16 rpl_tmr[4][6] = {
3001 {711, 1248, 1677, 3213, 6285, 12429},
3002 {384, 651, 867, 1635, 3171, 6243},
3003 {219, 354, 462, 846, 1614, 3150},
3004 {201, 321, 258, 450, 834, 1602}
3005 };
3006
3007 u16 val;
3008 unsigned int log2_width, pldsize;
3009 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3010
3011 pci_read_config_word(adap->pdev,
3012 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3013 &val);
3014 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3015 pci_read_config_word(adap->pdev,
3016 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3017 &val);
3018
3019 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3020 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3021 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3022 log2_width = fls(adap->params.pci.width) - 1;
3023 acklat = ack_lat[log2_width][pldsize];
3024 if (val & 1) /* check LOsEnable */
3025 acklat += fst_trn_tx * 4;
3026 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3027
3028 if (adap->params.rev == 0)
3029 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3030 V_T3A_ACKLAT(M_T3A_ACKLAT),
3031 V_T3A_ACKLAT(acklat));
3032 else
3033 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3034 V_ACKLAT(acklat));
3035
3036 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3037 V_REPLAYLMT(rpllmt));
3038
3039 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3040 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3041}
3042
3043/*
3044 * Initialize and configure T3 HW modules. This performs the
3045 * initialization steps that need to be done once after a card is reset.
3046 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3047 *
3048 * fw_params are passed to FW and their value is platform dependent. Only the
3049 * top 8 bits are available for use, the rest must be 0.
3050 */
3051int t3_init_hw(struct adapter *adapter, u32 fw_params)
3052{
3053 int err = -EIO, attempts = 100;
3054 const struct vpd_params *vpd = &adapter->params.vpd;
3055
3056 if (adapter->params.rev > 0)
3057 calibrate_xgm_t3b(adapter);
3058 else if (calibrate_xgm(adapter))
3059 goto out_err;
3060
3061 if (vpd->mclk) {
3062 partition_mem(adapter, &adapter->params.tp);
3063
3064 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3065 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3066 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3067 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3068 adapter->params.mc5.nfilters,
3069 adapter->params.mc5.nroutes))
3070 goto out_err;
3071 }
3072
3073 if (tp_init(adapter, &adapter->params.tp))
3074 goto out_err;
3075
3076 t3_tp_set_coalescing_size(adapter,
3077 min(adapter->params.sge.max_pkt_size,
3078 MAX_RX_COALESCING_LEN), 1);
3079 t3_tp_set_max_rxsize(adapter,
3080 min(adapter->params.sge.max_pkt_size, 16384U));
3081 ulp_config(adapter, &adapter->params.tp);
3082
3083 if (is_pcie(adapter))
3084 config_pcie(adapter);
3085 else
3086 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3087
3088 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3089 init_hw_for_avail_ports(adapter, adapter->params.nports);
3090 t3_sge_init(adapter, &adapter->params.sge);
3091
3092 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3093 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3094 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3095 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3096
3097 do { /* wait for uP to initialize */
3098 msleep(20);
3099 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3100 if (!attempts)
3101 goto out_err;
3102
3103 err = 0;
3104out_err:
3105 return err;
3106}
3107
3108/**
3109 * get_pci_mode - determine a card's PCI mode
3110 * @adapter: the adapter
3111 * @p: where to store the PCI settings
3112 *
3113 * Determines a card's PCI mode and associated parameters, such as speed
3114 * and width.
3115 */
3116static void __devinit get_pci_mode(struct adapter *adapter,
3117 struct pci_params *p)
3118{
3119 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3120 u32 pci_mode, pcie_cap;
3121
3122 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3123 if (pcie_cap) {
3124 u16 val;
3125
3126 p->variant = PCI_VARIANT_PCIE;
3127 p->pcie_cap_addr = pcie_cap;
3128 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3129 &val);
3130 p->width = (val >> 4) & 0x3f;
3131 return;
3132 }
3133
3134 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3135 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3136 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3137 pci_mode = G_PCIXINITPAT(pci_mode);
3138 if (pci_mode == 0)
3139 p->variant = PCI_VARIANT_PCI;
3140 else if (pci_mode < 4)
3141 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3142 else if (pci_mode < 8)
3143 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3144 else
3145 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3146}
3147
3148/**
3149 * init_link_config - initialize a link's SW state
3150 * @lc: structure holding the link state
3151 * @ai: information about the current card
3152 *
3153 * Initializes the SW state maintained for each link, including the link's
3154 * capabilities and default speed/duplex/flow-control/autonegotiation
3155 * settings.
3156 */
3157static void __devinit init_link_config(struct link_config *lc,
3158 unsigned int caps)
3159{
3160 lc->supported = caps;
3161 lc->requested_speed = lc->speed = SPEED_INVALID;
3162 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3163 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3164 if (lc->supported & SUPPORTED_Autoneg) {
3165 lc->advertising = lc->supported;
3166 lc->autoneg = AUTONEG_ENABLE;
3167 lc->requested_fc |= PAUSE_AUTONEG;
3168 } else {
3169 lc->advertising = 0;
3170 lc->autoneg = AUTONEG_DISABLE;
3171 }
3172}
3173
3174/**
3175 * mc7_calc_size - calculate MC7 memory size
3176 * @cfg: the MC7 configuration
3177 *
3178 * Calculates the size of an MC7 memory in bytes from the value of its
3179 * configuration register.
3180 */
3181static unsigned int __devinit mc7_calc_size(u32 cfg)
3182{
3183 unsigned int width = G_WIDTH(cfg);
3184 unsigned int banks = !!(cfg & F_BKS) + 1;
3185 unsigned int org = !!(cfg & F_ORG) + 1;
3186 unsigned int density = G_DEN(cfg);
3187 unsigned int MBs = ((256 << density) * banks) / (org << width);
3188
3189 return MBs << 20;
3190}
3191
3192static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3193 unsigned int base_addr, const char *name)
3194{
3195 u32 cfg;
3196
3197 mc7->adapter = adapter;
3198 mc7->name = name;
3199 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3200 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3201 mc7->size = mc7_calc_size(cfg);
3202 mc7->width = G_WIDTH(cfg);
3203}
3204
3205void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3206{
3207 mac->adapter = adapter;
3208 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3209 mac->nucast = 1;
3210
3211 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3212 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3213 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3214 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3215 F_ENRGMII, 0);
3216 }
3217}
3218
3219void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3220{
3221 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3222
3223 mi1_init(adapter, ai);
3224 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3225 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3226 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3227 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3228
3229 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3230 val |= F_ENRGMII;
3231
3232 /* Enable MAC clocks so we can access the registers */
3233 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3234 t3_read_reg(adapter, A_XGM_PORT_CFG);
3235
3236 val |= F_CLKDIVRESET_;
3237 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3238 t3_read_reg(adapter, A_XGM_PORT_CFG);
3239 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3240 t3_read_reg(adapter, A_XGM_PORT_CFG);
3241}
3242
3243/*
3244 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3245 * ones don't.
3246 */
3247int t3_reset_adapter(struct adapter *adapter)
3248{
3249 int i;
3250 uint16_t devid = 0;
3251
3252 if (is_pcie(adapter))
3253 pci_save_state(adapter->pdev);
3254 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3255
3256 /*
3257 * Delay. Give Some time to device to reset fully.
3258 * XXX The delay time should be modified.
3259 */
3260 for (i = 0; i < 10; i++) {
3261 msleep(50);
3262 pci_read_config_word(adapter->pdev, 0x00, &devid);
3263 if (devid == 0x1425)
3264 break;
3265 }
3266
3267 if (devid != 0x1425)
3268 return -1;
3269
3270 if (is_pcie(adapter))
3271 pci_restore_state(adapter->pdev);
3272 return 0;
3273}
3274
3275/*
3276 * Initialize adapter SW state for the various HW modules, set initial values
3277 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3278 * interface.
3279 */
3280int __devinit t3_prep_adapter(struct adapter *adapter,
3281 const struct adapter_info *ai, int reset)
3282{
3283 int ret;
3284 unsigned int i, j = 0;
3285
3286 get_pci_mode(adapter, &adapter->params.pci);
3287
3288 adapter->params.info = ai;
3289 adapter->params.nports = ai->nports;
3290 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3291 adapter->params.linkpoll_period = 0;
3292 adapter->params.stats_update_period = is_10G(adapter) ?
3293 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3294 adapter->params.pci.vpd_cap_addr =
3295 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3296 ret = get_vpd_params(adapter, &adapter->params.vpd);
3297 if (ret < 0)
3298 return ret;
3299
3300 if (reset && t3_reset_adapter(adapter))
3301 return -1;
3302
3303 t3_sge_prep(adapter, &adapter->params.sge);
3304
3305 if (adapter->params.vpd.mclk) {
3306 struct tp_params *p = &adapter->params.tp;
3307
3308 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3309 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3310 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3311
3312 p->nchan = ai->nports;
3313 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3314 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3315 p->cm_size = t3_mc7_size(&adapter->cm);
3316 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3317 p->chan_tx_size = p->pmtx_size / p->nchan;
3318 p->rx_pg_size = 64 * 1024;
3319 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3320 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3321 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3322 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3323 adapter->params.rev > 0 ? 12 : 6;
3324
3325 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3326 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3327 DEFAULT_NFILTERS : 0;
3328 adapter->params.mc5.nroutes = 0;
3329 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3330
3331 init_mtus(adapter->params.mtus);
3332 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3333 }
3334
3335 early_hw_init(adapter, ai);
3336
3337 for_each_port(adapter, i) {
3338 u8 hw_addr[6];
3339 struct port_info *p = adap2pinfo(adapter, i);
3340
3341 while (!adapter->params.vpd.port_type[j])
3342 ++j;
3343
3344 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3345 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3346 ai->mdio_ops);
3347 mac_prep(&p->mac, adapter, j);
3348 ++j;
3349
3350 /*
3351 * The VPD EEPROM stores the base Ethernet address for the
3352 * card. A port's address is derived from the base by adding
3353 * the port's index to the base's low octet.
3354 */
3355 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3356 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3357
3358 memcpy(adapter->port[i]->dev_addr, hw_addr,
3359 ETH_ALEN);
3360 memcpy(adapter->port[i]->perm_addr, hw_addr,
3361 ETH_ALEN);
3362 init_link_config(&p->link_config, p->port_type->caps);
3363 p->phy.ops->power_down(&p->phy, 1);
3364 if (!(p->port_type->caps & SUPPORTED_IRQ))
3365 adapter->params.linkpoll_period = 10;
3366 }
3367
3368 return 0;
3369}
3370
3371void t3_led_ready(struct adapter *adapter)
3372{
3373 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3374 F_GPIO0_OUT_VAL);
3375}
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
new file mode 100644
index 000000000000..9af3bcd64b3b
--- /dev/null
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2006-2007 Chelsio Communications. All rights reserved.
3 * Copyright (C) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _T3CDEV_H_
34#define _T3CDEV_H_
35
36#include <linux/list.h>
37#include <asm/atomic.h>
38#include <asm/semaphore.h>
39#include <linux/netdevice.h>
40#include <linux/proc_fs.h>
41#include <linux/skbuff.h>
42#include <net/neighbour.h>
43
44#define T3CNAMSIZ 16
45
46/* Get the t3cdev associated with a net_device */
47#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
48
49struct cxgb3_client;
50
51enum t3ctype {
52 T3A = 0,
53 T3B
54};
55
56struct t3cdev {
57 char name[T3CNAMSIZ]; /* T3C device name */
58 enum t3ctype type;
59 struct list_head ofld_dev_list; /* for list linking */
60 struct net_device *lldev; /* LL dev associated with T3C messages */
61 struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
62 int (*send)(struct t3cdev *dev, struct sk_buff *skb);
63 int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
64 int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
65 void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
66 void *priv; /* driver private data */
67 void *l2opt; /* optional layer 2 data */
68 void *l3opt; /* optional layer 3 data */
69 void *l4opt; /* optional layer 4 data */
70 void *ulp; /* ulp stuff */
71};
72
73#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
new file mode 100644
index 000000000000..2b67dd523cc1
--- /dev/null
+++ b/drivers/net/cxgb3/version.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
33#ifndef __CHELSIO_VERSION_H
34#define __CHELSIO_VERSION_H
35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3"
37/* Driver version */
38#define DRV_VERSION "1.0"
39#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
new file mode 100644
index 000000000000..eee4285b31be
--- /dev/null
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33
34/* VSC8211 PHY specific registers. */
35enum {
36 VSC8211_INTR_ENABLE = 25,
37 VSC8211_INTR_STATUS = 26,
38 VSC8211_AUX_CTRL_STAT = 28,
39};
40
41enum {
42 VSC_INTR_RX_ERR = 1 << 0,
43 VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
44 VSC_INTR_CABLE = 1 << 2, /* cable impairment */
45 VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
46 VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
47 VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
48 VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
49 VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
50 VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
51 VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
52 VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
53 VSC_INTR_LINK_CHG = 1 << 13, /* link change */
54 VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
55};
56
57#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
58 VSC_INTR_NEG_DONE)
59#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
60 VSC_INTR_ENABLE)
61
62/* PHY specific auxiliary control & status register fields */
63#define S_ACSR_ACTIPHY_TMR 0
64#define M_ACSR_ACTIPHY_TMR 0x3
65#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
66
67#define S_ACSR_SPEED 3
68#define M_ACSR_SPEED 0x3
69#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
70
71#define S_ACSR_DUPLEX 5
72#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
73
74#define S_ACSR_ACTIPHY 6
75#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
76
77/*
78 * Reset the PHY. This PHY completes reset immediately so we never wait.
79 */
80static int vsc8211_reset(struct cphy *cphy, int wait)
81{
82 return t3_phy_reset(cphy, 0, 0);
83}
84
85static int vsc8211_intr_enable(struct cphy *cphy)
86{
87 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
88}
89
90static int vsc8211_intr_disable(struct cphy *cphy)
91{
92 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
93}
94
95static int vsc8211_intr_clear(struct cphy *cphy)
96{
97 u32 val;
98
99 /* Clear PHY interrupts by reading the register. */
100 return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
101}
102
103static int vsc8211_autoneg_enable(struct cphy *cphy)
104{
105 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
106 BMCR_ANENABLE | BMCR_ANRESTART);
107}
108
109static int vsc8211_autoneg_restart(struct cphy *cphy)
110{
111 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
112 BMCR_ANRESTART);
113}
114
115static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
116 int *speed, int *duplex, int *fc)
117{
118 unsigned int bmcr, status, lpa, adv;
119 int err, sp = -1, dplx = -1, pause = 0;
120
121 err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
122 if (!err)
123 err = mdio_read(cphy, 0, MII_BMSR, &status);
124 if (err)
125 return err;
126
127 if (link_ok) {
128 /*
129 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
130 * once more to get the current link state.
131 */
132 if (!(status & BMSR_LSTATUS))
133 err = mdio_read(cphy, 0, MII_BMSR, &status);
134 if (err)
135 return err;
136 *link_ok = (status & BMSR_LSTATUS) != 0;
137 }
138 if (!(bmcr & BMCR_ANENABLE)) {
139 dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
140 if (bmcr & BMCR_SPEED1000)
141 sp = SPEED_1000;
142 else if (bmcr & BMCR_SPEED100)
143 sp = SPEED_100;
144 else
145 sp = SPEED_10;
146 } else if (status & BMSR_ANEGCOMPLETE) {
147 err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
148 if (err)
149 return err;
150
151 dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
152 sp = G_ACSR_SPEED(status);
153 if (sp == 0)
154 sp = SPEED_10;
155 else if (sp == 1)
156 sp = SPEED_100;
157 else
158 sp = SPEED_1000;
159
160 if (fc && dplx == DUPLEX_FULL) {
161 err = mdio_read(cphy, 0, MII_LPA, &lpa);
162 if (!err)
163 err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
164 if (err)
165 return err;
166
167 if (lpa & adv & ADVERTISE_PAUSE_CAP)
168 pause = PAUSE_RX | PAUSE_TX;
169 else if ((lpa & ADVERTISE_PAUSE_CAP) &&
170 (lpa & ADVERTISE_PAUSE_ASYM) &&
171 (adv & ADVERTISE_PAUSE_ASYM))
172 pause = PAUSE_TX;
173 else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
174 (adv & ADVERTISE_PAUSE_CAP))
175 pause = PAUSE_RX;
176 }
177 }
178 if (speed)
179 *speed = sp;
180 if (duplex)
181 *duplex = dplx;
182 if (fc)
183 *fc = pause;
184 return 0;
185}
186
187static int vsc8211_power_down(struct cphy *cphy, int enable)
188{
189 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
190 enable ? BMCR_PDOWN : 0);
191}
192
193static int vsc8211_intr_handler(struct cphy *cphy)
194{
195 unsigned int cause;
196 int err, cphy_cause = 0;
197
198 err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
199 if (err)
200 return err;
201
202 cause &= INTR_MASK;
203 if (cause & CFG_CHG_INTR_MASK)
204 cphy_cause |= cphy_cause_link_change;
205 if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
206 cphy_cause |= cphy_cause_fifo_error;
207 return cphy_cause;
208}
209
210static struct cphy_ops vsc8211_ops = {
211 .reset = vsc8211_reset,
212 .intr_enable = vsc8211_intr_enable,
213 .intr_disable = vsc8211_intr_disable,
214 .intr_clear = vsc8211_intr_clear,
215 .intr_handler = vsc8211_intr_handler,
216 .autoneg_enable = vsc8211_autoneg_enable,
217 .autoneg_restart = vsc8211_autoneg_restart,
218 .advertise = t3_phy_advertise,
219 .set_speed_duplex = t3_set_phy_speed_duplex,
220 .get_link_status = vsc8211_get_link_status,
221 .power_down = vsc8211_power_down,
222};
223
224void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
225 int phy_addr, const struct mdio_ops *mdio_ops)
226{
227 cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
228}
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
new file mode 100644
index 000000000000..907a272ae32d
--- /dev/null
+++ b/drivers/net/cxgb3/xgmac.c
@@ -0,0 +1,409 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35/*
36 * # of exact address filters. The first one is used for the station address,
37 * the rest are available for multicast addresses.
38 */
39#define EXACT_ADDR_FILTERS 8
40
41static inline int macidx(const struct cmac *mac)
42{
43 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44}
45
46static void xaui_serdes_reset(struct cmac *mac)
47{
48 static const unsigned int clear[] = {
49 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51 };
52
53 int i;
54 struct adapter *adap = mac->adapter;
55 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56
57 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60 F_RESETPLL23 | F_RESETPLL01);
61 t3_read_reg(adap, ctrl);
62 udelay(15);
63
64 for (i = 0; i < ARRAY_SIZE(clear); i++) {
65 t3_set_reg_field(adap, ctrl, clear[i], 0);
66 udelay(15);
67 }
68}
69
70void t3b_pcs_reset(struct cmac *mac)
71{
72 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73 F_PCS_RESET_, 0);
74 udelay(20);
75 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76 F_PCS_RESET_);
77}
78
79int t3_mac_reset(struct cmac *mac)
80{
81 static const struct addr_val_pair mac_reset_avp[] = {
82 {A_XGM_TX_CTRL, 0},
83 {A_XGM_RX_CTRL, 0},
84 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86 {A_XGM_RX_HASH_LOW, 0},
87 {A_XGM_RX_HASH_HIGH, 0},
88 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96 {A_XGM_STAT_CTRL, F_CLRSTATS}
97 };
98 u32 val;
99 struct adapter *adap = mac->adapter;
100 unsigned int oft = mac->offset;
101
102 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
104
105 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107 F_RXSTRFRWRD | F_DISERRFRAMES,
108 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109
110 if (uses_xaui(adap)) {
111 if (adap->params.rev == 0) {
112 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
113 F_RXENABLE | F_TXENABLE);
114 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
115 F_CMULOCK, 1, 5, 2)) {
116 CH_ERR(adap,
117 "MAC %d XAUI SERDES CMU lock failed\n",
118 macidx(mac));
119 return -1;
120 }
121 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
122 F_SERDESRESET_);
123 } else
124 xaui_serdes_reset(mac);
125 }
126
127 if (adap->params.rev > 0)
128 t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
129
130 val = F_MAC_RESET_;
131 if (is_10G(adap))
132 val |= F_PCS_RESET_;
133 else if (uses_xaui(adap))
134 val |= F_PCS_RESET_ | F_XG2G_RESET_;
135 else
136 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
137 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
138 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
139 if ((val & F_PCS_RESET_) && adap->params.rev) {
140 msleep(1);
141 t3b_pcs_reset(mac);
142 }
143
144 memset(&mac->stats, 0, sizeof(mac->stats));
145 return 0;
146}
147
148/*
149 * Set the exact match register 'idx' to recognize the given Ethernet address.
150 */
151static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
152{
153 u32 addr_lo, addr_hi;
154 unsigned int oft = mac->offset + idx * 8;
155
156 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
157 addr_hi = (addr[5] << 8) | addr[4];
158
159 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
160 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
161}
162
163/* Set one of the station's unicast MAC addresses. */
164int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
165{
166 if (idx >= mac->nucast)
167 return -EINVAL;
168 set_addr_filter(mac, idx, addr);
169 return 0;
170}
171
172/*
173 * Specify the number of exact address filters that should be reserved for
174 * unicast addresses. Caller should reload the unicast and multicast addresses
175 * after calling this.
176 */
177int t3_mac_set_num_ucast(struct cmac *mac, int n)
178{
179 if (n > EXACT_ADDR_FILTERS)
180 return -EINVAL;
181 mac->nucast = n;
182 return 0;
183}
184
185/* Calculate the RX hash filter index of an Ethernet address */
186static int hash_hw_addr(const u8 * addr)
187{
188 int hash = 0, octet, bit, i = 0, c;
189
190 for (octet = 0; octet < 6; ++octet)
191 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
192 hash ^= (c & 1) << i;
193 if (++i == 6)
194 i = 0;
195 }
196 return hash;
197}
198
199int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
200{
201 u32 val, hash_lo, hash_hi;
202 struct adapter *adap = mac->adapter;
203 unsigned int oft = mac->offset;
204
205 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
206 if (rm->dev->flags & IFF_PROMISC)
207 val |= F_COPYALLFRAMES;
208 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
209
210 if (rm->dev->flags & IFF_ALLMULTI)
211 hash_lo = hash_hi = 0xffffffff;
212 else {
213 u8 *addr;
214 int exact_addr_idx = mac->nucast;
215
216 hash_lo = hash_hi = 0;
217 while ((addr = t3_get_next_mcaddr(rm)))
218 if (exact_addr_idx < EXACT_ADDR_FILTERS)
219 set_addr_filter(mac, exact_addr_idx++, addr);
220 else {
221 int hash = hash_hw_addr(addr);
222
223 if (hash < 32)
224 hash_lo |= (1 << hash);
225 else
226 hash_hi |= (1 << (hash - 32));
227 }
228 }
229
230 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
231 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
232 return 0;
233}
234
235int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
236{
237 int hwm, lwm;
238 unsigned int thres, v;
239 struct adapter *adap = mac->adapter;
240
241 /*
242 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
243 * packet size register includes header, but not FCS.
244 */
245 mtu += 14;
246 if (mtu > MAX_FRAME_SIZE - 4)
247 return -EINVAL;
248 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
249
250 /*
251 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
252 * HWM only if flow-control is enabled.
253 */
254 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
255 hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
256 lwm = hwm - 1024;
257 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
258 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
259 v |= V_RXFIFOPAUSELWM(lwm / 8);
260 if (G_RXFIFOPAUSEHWM(v))
261 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
262 V_RXFIFOPAUSEHWM(hwm / 8);
263 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
264
265 /* Adjust the TX FIFO threshold based on the MTU */
266 thres = (adap->params.vpd.cclk * 1000) / 15625;
267 thres = (thres * mtu) / 1000;
268 if (is_10G(adap))
269 thres /= 10;
270 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
271 thres = max(thres, 8U); /* need at least 8 */
272 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
273 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
274 return 0;
275}
276
277int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
278{
279 u32 val;
280 struct adapter *adap = mac->adapter;
281 unsigned int oft = mac->offset;
282
283 if (duplex >= 0 && duplex != DUPLEX_FULL)
284 return -EINVAL;
285 if (speed >= 0) {
286 if (speed == SPEED_10)
287 val = V_PORTSPEED(0);
288 else if (speed == SPEED_100)
289 val = V_PORTSPEED(1);
290 else if (speed == SPEED_1000)
291 val = V_PORTSPEED(2);
292 else if (speed == SPEED_10000)
293 val = V_PORTSPEED(3);
294 else
295 return -EINVAL;
296
297 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
298 V_PORTSPEED(M_PORTSPEED), val);
299 }
300
301 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
302 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
303 if (fc & PAUSE_TX)
304 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
305 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
306
307 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
308 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
309 return 0;
310}
311
312int t3_mac_enable(struct cmac *mac, int which)
313{
314 int idx = macidx(mac);
315 struct adapter *adap = mac->adapter;
316 unsigned int oft = mac->offset;
317
318 if (which & MAC_DIRECTION_TX) {
319 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
320 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
321 t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
322 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
323 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
324 }
325 if (which & MAC_DIRECTION_RX)
326 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
327 return 0;
328}
329
330int t3_mac_disable(struct cmac *mac, int which)
331{
332 int idx = macidx(mac);
333 struct adapter *adap = mac->adapter;
334
335 if (which & MAC_DIRECTION_TX) {
336 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
337 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
338 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
339 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
340 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
341 }
342 if (which & MAC_DIRECTION_RX)
343 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
344 return 0;
345}
346
347/*
348 * This function is called periodically to accumulate the current values of the
349 * RMON counters into the port statistics. Since the packet counters are only
350 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
351 * called more frequently than that. The byte counters are 45-bit wide, they
352 * would overflow in ~7.8 hours.
353 */
354const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
355{
356#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
357#define RMON_UPDATE(mac, name, reg) \
358 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
359#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
360 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
361 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
362
363 u32 v, lo;
364
365 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
366 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
367 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
368 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
369 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
370 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
371 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
372 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
373 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
374
375 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
376 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
377
378 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
379 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
380 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
381 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
382 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
383 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
384 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
385
386 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
387 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
388 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
389 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
390 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
391 /* This counts error frames in general (bad FCS, underrun, etc). */
392 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
393
394 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
395 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
396 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
397 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
398 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
399 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
400 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
401
402 /* The next stat isn't clear-on-read. */
403 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
404 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
405 lo = (u32) mac->stats.rx_cong_drops;
406 mac->stats.rx_cong_drops += (u64) (v - lo);
407
408 return &mac->stats;
409}
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 4ae0fed7122e..9f7e1db8ce62 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * adopted from sunlance.c by Richard van den Berg 6 * adopted from sunlance.c by Richard van den Berg
7 * 7 *
8 * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki 8 * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
9 * 9 *
10 * additional sources: 10 * additional sources:
11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, 11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
@@ -44,6 +44,8 @@
44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the 44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
45 * PMAX requirement to only use halfword accesses to the 45 * PMAX requirement to only use halfword accesses to the
46 * buffer. macro 46 * buffer. macro
47 *
48 * v0.011: Converted the PMAD to the driver model. macro
47 */ 49 */
48 50
49#include <linux/crc32.h> 51#include <linux/crc32.h>
@@ -58,6 +60,7 @@
58#include <linux/spinlock.h> 60#include <linux/spinlock.h>
59#include <linux/stddef.h> 61#include <linux/stddef.h>
60#include <linux/string.h> 62#include <linux/string.h>
63#include <linux/tc.h>
61#include <linux/types.h> 64#include <linux/types.h>
62 65
63#include <asm/addrspace.h> 66#include <asm/addrspace.h>
@@ -69,15 +72,16 @@
69#include <asm/dec/kn01.h> 72#include <asm/dec/kn01.h>
70#include <asm/dec/machtype.h> 73#include <asm/dec/machtype.h>
71#include <asm/dec/system.h> 74#include <asm/dec/system.h>
72#include <asm/dec/tc.h>
73 75
74static char version[] __devinitdata = 76static char version[] __devinitdata =
75"declance.c: v0.010 by Linux MIPS DECstation task force\n"; 77"declance.c: v0.011 by Linux MIPS DECstation task force\n";
76 78
77MODULE_AUTHOR("Linux MIPS DECstation task force"); 79MODULE_AUTHOR("Linux MIPS DECstation task force");
78MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver"); 80MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
79MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
80 82
83#define __unused __attribute__ ((unused))
84
81/* 85/*
82 * card types 86 * card types
83 */ 87 */
@@ -246,7 +250,6 @@ struct lance_init_block {
246struct lance_private { 250struct lance_private {
247 struct net_device *next; 251 struct net_device *next;
248 int type; 252 int type;
249 int slot;
250 int dma_irq; 253 int dma_irq;
251 volatile struct lance_regs *ll; 254 volatile struct lance_regs *ll;
252 255
@@ -288,6 +291,7 @@ struct lance_regs {
288 291
289int dec_lance_debug = 2; 292int dec_lance_debug = 2;
290 293
294static struct tc_driver dec_lance_tc_driver;
291static struct net_device *root_lance_dev; 295static struct net_device *root_lance_dev;
292 296
293static inline void writereg(volatile unsigned short *regptr, short value) 297static inline void writereg(volatile unsigned short *regptr, short value)
@@ -1023,7 +1027,7 @@ static void lance_set_multicast_retry(unsigned long _opaque)
1023 lance_set_multicast(dev); 1027 lance_set_multicast(dev);
1024} 1028}
1025 1029
1026static int __init dec_lance_init(const int type, const int slot) 1030static int __init dec_lance_probe(struct device *bdev, const int type)
1027{ 1031{
1028 static unsigned version_printed; 1032 static unsigned version_printed;
1029 static const char fmt[] = "declance%d"; 1033 static const char fmt[] = "declance%d";
@@ -1031,6 +1035,7 @@ static int __init dec_lance_init(const int type, const int slot)
1031 struct net_device *dev; 1035 struct net_device *dev;
1032 struct lance_private *lp; 1036 struct lance_private *lp;
1033 volatile struct lance_regs *ll; 1037 volatile struct lance_regs *ll;
1038 resource_size_t start = 0, len = 0;
1034 int i, ret; 1039 int i, ret;
1035 unsigned long esar_base; 1040 unsigned long esar_base;
1036 unsigned char *esar; 1041 unsigned char *esar;
@@ -1038,14 +1043,18 @@ static int __init dec_lance_init(const int type, const int slot)
1038 if (dec_lance_debug && version_printed++ == 0) 1043 if (dec_lance_debug && version_printed++ == 0)
1039 printk(version); 1044 printk(version);
1040 1045
1041 i = 0; 1046 if (bdev)
1042 dev = root_lance_dev; 1047 snprintf(name, sizeof(name), "%s", bdev->bus_id);
1043 while (dev) { 1048 else {
1044 i++; 1049 i = 0;
1045 lp = (struct lance_private *)dev->priv; 1050 dev = root_lance_dev;
1046 dev = lp->next; 1051 while (dev) {
1052 i++;
1053 lp = (struct lance_private *)dev->priv;
1054 dev = lp->next;
1055 }
1056 snprintf(name, sizeof(name), fmt, i);
1047 } 1057 }
1048 snprintf(name, sizeof(name), fmt, i);
1049 1058
1050 dev = alloc_etherdev(sizeof(struct lance_private)); 1059 dev = alloc_etherdev(sizeof(struct lance_private));
1051 if (!dev) { 1060 if (!dev) {
@@ -1063,7 +1072,6 @@ static int __init dec_lance_init(const int type, const int slot)
1063 spin_lock_init(&lp->lock); 1072 spin_lock_init(&lp->lock);
1064 1073
1065 lp->type = type; 1074 lp->type = type;
1066 lp->slot = slot;
1067 switch (type) { 1075 switch (type) {
1068 case ASIC_LANCE: 1076 case ASIC_LANCE:
1069 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE); 1077 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
@@ -1110,12 +1118,22 @@ static int __init dec_lance_init(const int type, const int slot)
1110 break; 1118 break;
1111#ifdef CONFIG_TC 1119#ifdef CONFIG_TC
1112 case PMAD_LANCE: 1120 case PMAD_LANCE:
1113 claim_tc_card(slot); 1121 dev_set_drvdata(bdev, dev);
1122
1123 start = to_tc_dev(bdev)->resource.start;
1124 len = to_tc_dev(bdev)->resource.end - start + 1;
1125 if (!request_mem_region(start, len, bdev->bus_id)) {
1126 printk(KERN_ERR
1127 "%s: Unable to reserve MMIO resource\n",
1128 bdev->bus_id);
1129 ret = -EBUSY;
1130 goto err_out_dev;
1131 }
1114 1132
1115 dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot)); 1133 dev->mem_start = CKSEG1ADDR(start);
1116 dev->mem_end = dev->mem_start + 0x100000; 1134 dev->mem_end = dev->mem_start + 0x100000;
1117 dev->base_addr = dev->mem_start + 0x100000; 1135 dev->base_addr = dev->mem_start + 0x100000;
1118 dev->irq = get_tc_irq_nr(slot); 1136 dev->irq = to_tc_dev(bdev)->interrupt;
1119 esar_base = dev->mem_start + 0x1c0002; 1137 esar_base = dev->mem_start + 0x1c0002;
1120 lp->dma_irq = -1; 1138 lp->dma_irq = -1;
1121 1139
@@ -1174,7 +1192,7 @@ static int __init dec_lance_init(const int type, const int slot)
1174 printk(KERN_ERR "%s: declance_init called with unknown type\n", 1192 printk(KERN_ERR "%s: declance_init called with unknown type\n",
1175 name); 1193 name);
1176 ret = -ENODEV; 1194 ret = -ENODEV;
1177 goto err_out_free_dev; 1195 goto err_out_dev;
1178 } 1196 }
1179 1197
1180 ll = (struct lance_regs *) dev->base_addr; 1198 ll = (struct lance_regs *) dev->base_addr;
@@ -1188,7 +1206,7 @@ static int __init dec_lance_init(const int type, const int slot)
1188 "%s: Ethernet station address prom not found!\n", 1206 "%s: Ethernet station address prom not found!\n",
1189 name); 1207 name);
1190 ret = -ENODEV; 1208 ret = -ENODEV;
1191 goto err_out_free_dev; 1209 goto err_out_resource;
1192 } 1210 }
1193 /* Check the prom contents */ 1211 /* Check the prom contents */
1194 for (i = 0; i < 8; i++) { 1212 for (i = 0; i < 8; i++) {
@@ -1198,7 +1216,7 @@ static int __init dec_lance_init(const int type, const int slot)
1198 printk(KERN_ERR "%s: Something is wrong with the " 1216 printk(KERN_ERR "%s: Something is wrong with the "
1199 "ethernet station address prom!\n", name); 1217 "ethernet station address prom!\n", name);
1200 ret = -ENODEV; 1218 ret = -ENODEV;
1201 goto err_out_free_dev; 1219 goto err_out_resource;
1202 } 1220 }
1203 } 1221 }
1204 1222
@@ -1255,48 +1273,51 @@ static int __init dec_lance_init(const int type, const int slot)
1255 if (ret) { 1273 if (ret) {
1256 printk(KERN_ERR 1274 printk(KERN_ERR
1257 "%s: Unable to register netdev, aborting.\n", name); 1275 "%s: Unable to register netdev, aborting.\n", name);
1258 goto err_out_free_dev; 1276 goto err_out_resource;
1259 } 1277 }
1260 1278
1261 lp->next = root_lance_dev; 1279 if (!bdev) {
1262 root_lance_dev = dev; 1280 lp->next = root_lance_dev;
1281 root_lance_dev = dev;
1282 }
1263 1283
1264 printk("%s: registered as %s.\n", name, dev->name); 1284 printk("%s: registered as %s.\n", name, dev->name);
1265 return 0; 1285 return 0;
1266 1286
1267err_out_free_dev: 1287err_out_resource:
1288 if (bdev)
1289 release_mem_region(start, len);
1290
1291err_out_dev:
1268 free_netdev(dev); 1292 free_netdev(dev);
1269 1293
1270err_out: 1294err_out:
1271 return ret; 1295 return ret;
1272} 1296}
1273 1297
1298static void __exit dec_lance_remove(struct device *bdev)
1299{
1300 struct net_device *dev = dev_get_drvdata(bdev);
1301 resource_size_t start, len;
1302
1303 unregister_netdev(dev);
1304 start = to_tc_dev(bdev)->resource.start;
1305 len = to_tc_dev(bdev)->resource.end - start + 1;
1306 release_mem_region(start, len);
1307 free_netdev(dev);
1308}
1274 1309
1275/* Find all the lance cards on the system and initialize them */ 1310/* Find all the lance cards on the system and initialize them */
1276static int __init dec_lance_probe(void) 1311static int __init dec_lance_platform_probe(void)
1277{ 1312{
1278 int count = 0; 1313 int count = 0;
1279 1314
1280 /* Scan slots for PMAD-AA cards first. */
1281#ifdef CONFIG_TC
1282 if (TURBOCHANNEL) {
1283 int slot;
1284
1285 while ((slot = search_tc_card("PMAD-AA")) >= 0) {
1286 if (dec_lance_init(PMAD_LANCE, slot) < 0)
1287 break;
1288 count++;
1289 }
1290 }
1291#endif
1292
1293 /* Then handle onboard devices. */
1294 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) { 1315 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
1295 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) { 1316 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
1296 if (dec_lance_init(ASIC_LANCE, -1) >= 0) 1317 if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
1297 count++; 1318 count++;
1298 } else if (!TURBOCHANNEL) { 1319 } else if (!TURBOCHANNEL) {
1299 if (dec_lance_init(PMAX_LANCE, -1) >= 0) 1320 if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
1300 count++; 1321 count++;
1301 } 1322 }
1302 } 1323 }
@@ -1304,21 +1325,70 @@ static int __init dec_lance_probe(void)
1304 return (count > 0) ? 0 : -ENODEV; 1325 return (count > 0) ? 0 : -ENODEV;
1305} 1326}
1306 1327
1307static void __exit dec_lance_cleanup(void) 1328static void __exit dec_lance_platform_remove(void)
1308{ 1329{
1309 while (root_lance_dev) { 1330 while (root_lance_dev) {
1310 struct net_device *dev = root_lance_dev; 1331 struct net_device *dev = root_lance_dev;
1311 struct lance_private *lp = netdev_priv(dev); 1332 struct lance_private *lp = netdev_priv(dev);
1312 1333
1313 unregister_netdev(dev); 1334 unregister_netdev(dev);
1314#ifdef CONFIG_TC
1315 if (lp->slot >= 0)
1316 release_tc_card(lp->slot);
1317#endif
1318 root_lance_dev = lp->next; 1335 root_lance_dev = lp->next;
1319 free_netdev(dev); 1336 free_netdev(dev);
1320 } 1337 }
1321} 1338}
1322 1339
1323module_init(dec_lance_probe); 1340#ifdef CONFIG_TC
1324module_exit(dec_lance_cleanup); 1341static int __init dec_lance_tc_probe(struct device *dev);
1342static int __exit dec_lance_tc_remove(struct device *dev);
1343
1344static const struct tc_device_id dec_lance_tc_table[] = {
1345 { "DEC ", "PMAD-AA " },
1346 { }
1347};
1348MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
1349
1350static struct tc_driver dec_lance_tc_driver = {
1351 .id_table = dec_lance_tc_table,
1352 .driver = {
1353 .name = "declance",
1354 .bus = &tc_bus_type,
1355 .probe = dec_lance_tc_probe,
1356 .remove = __exit_p(dec_lance_tc_remove),
1357 },
1358};
1359
1360static int __init dec_lance_tc_probe(struct device *dev)
1361{
1362 int status = dec_lance_probe(dev, PMAD_LANCE);
1363 if (!status)
1364 get_device(dev);
1365 return status;
1366}
1367
1368static int __exit dec_lance_tc_remove(struct device *dev)
1369{
1370 put_device(dev);
1371 dec_lance_remove(dev);
1372 return 0;
1373}
1374#endif
1375
1376static int __init dec_lance_init(void)
1377{
1378 int status;
1379
1380 status = tc_register_driver(&dec_lance_tc_driver);
1381 if (!status)
1382 dec_lance_platform_probe();
1383 return status;
1384}
1385
1386static void __exit dec_lance_exit(void)
1387{
1388 dec_lance_platform_remove();
1389 tc_unregister_driver(&dec_lance_tc_driver);
1390}
1391
1392
1393module_init(dec_lance_init);
1394module_exit(dec_lance_exit);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f091042b146e..689f158a469e 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -59,17 +59,13 @@
59#include <linux/capability.h> 59#include <linux/capability.h>
60#include <linux/in.h> 60#include <linux/in.h>
61#include <linux/ip.h> 61#include <linux/ip.h>
62#ifdef NETIF_F_TSO6
63#include <linux/ipv6.h> 62#include <linux/ipv6.h>
64#endif
65#include <linux/tcp.h> 63#include <linux/tcp.h>
66#include <linux/udp.h> 64#include <linux/udp.h>
67#include <net/pkt_sched.h> 65#include <net/pkt_sched.h>
68#include <linux/list.h> 66#include <linux/list.h>
69#include <linux/reboot.h> 67#include <linux/reboot.h>
70#ifdef NETIF_F_TSO
71#include <net/checksum.h> 68#include <net/checksum.h>
72#endif
73#include <linux/mii.h> 69#include <linux/mii.h>
74#include <linux/ethtool.h> 70#include <linux/ethtool.h>
75#include <linux/if_vlan.h> 71#include <linux/if_vlan.h>
@@ -257,7 +253,6 @@ struct e1000_adapter {
257 spinlock_t tx_queue_lock; 253 spinlock_t tx_queue_lock;
258#endif 254#endif
259 atomic_t irq_sem; 255 atomic_t irq_sem;
260 unsigned int detect_link;
261 unsigned int total_tx_bytes; 256 unsigned int total_tx_bytes;
262 unsigned int total_tx_packets; 257 unsigned int total_tx_packets;
263 unsigned int total_rx_bytes; 258 unsigned int total_rx_bytes;
@@ -348,9 +343,7 @@ struct e1000_adapter {
348 boolean_t have_msi; 343 boolean_t have_msi;
349#endif 344#endif
350 /* to not mess up cache alignment, always add to the bottom */ 345 /* to not mess up cache alignment, always add to the bottom */
351#ifdef NETIF_F_TSO
352 boolean_t tso_force; 346 boolean_t tso_force;
353#endif
354 boolean_t smart_power_down; /* phy smart power down */ 347 boolean_t smart_power_down; /* phy smart power down */
355 boolean_t quad_port_a; 348 boolean_t quad_port_a;
356 unsigned long flags; 349 unsigned long flags;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index fb96c87f9e56..44ebc72962dc 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -338,7 +338,6 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
338 return 0; 338 return 0;
339} 339}
340 340
341#ifdef NETIF_F_TSO
342static int 341static int
343e1000_set_tso(struct net_device *netdev, uint32_t data) 342e1000_set_tso(struct net_device *netdev, uint32_t data)
344{ 343{
@@ -352,18 +351,15 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
352 else 351 else
353 netdev->features &= ~NETIF_F_TSO; 352 netdev->features &= ~NETIF_F_TSO;
354 353
355#ifdef NETIF_F_TSO6
356 if (data) 354 if (data)
357 netdev->features |= NETIF_F_TSO6; 355 netdev->features |= NETIF_F_TSO6;
358 else 356 else
359 netdev->features &= ~NETIF_F_TSO6; 357 netdev->features &= ~NETIF_F_TSO6;
360#endif
361 358
362 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 359 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
363 adapter->tso_force = TRUE; 360 adapter->tso_force = TRUE;
364 return 0; 361 return 0;
365} 362}
366#endif /* NETIF_F_TSO */
367 363
368static uint32_t 364static uint32_t
369e1000_get_msglevel(struct net_device *netdev) 365e1000_get_msglevel(struct net_device *netdev)
@@ -1971,10 +1967,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1971 .set_tx_csum = e1000_set_tx_csum, 1967 .set_tx_csum = e1000_set_tx_csum,
1972 .get_sg = ethtool_op_get_sg, 1968 .get_sg = ethtool_op_get_sg,
1973 .set_sg = ethtool_op_set_sg, 1969 .set_sg = ethtool_op_set_sg,
1974#ifdef NETIF_F_TSO
1975 .get_tso = ethtool_op_get_tso, 1970 .get_tso = ethtool_op_get_tso,
1976 .set_tso = e1000_set_tso, 1971 .set_tso = e1000_set_tso,
1977#endif
1978 .self_test_count = e1000_diag_test_count, 1972 .self_test_count = e1000_diag_test_count,
1979 .self_test = e1000_diag_test, 1973 .self_test = e1000_diag_test,
1980 .get_strings = e1000_get_strings, 1974 .get_strings = e1000_get_strings,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index c6259c7127f6..619c89218b4b 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.3.15-k2"DRIVERNAPI 39#define DRV_VERSION "7.3.20-k2"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -990,16 +990,12 @@ e1000_probe(struct pci_dev *pdev,
990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER; 990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
991 } 991 }
992 992
993#ifdef NETIF_F_TSO
994 if ((adapter->hw.mac_type >= e1000_82544) && 993 if ((adapter->hw.mac_type >= e1000_82544) &&
995 (adapter->hw.mac_type != e1000_82547)) 994 (adapter->hw.mac_type != e1000_82547))
996 netdev->features |= NETIF_F_TSO; 995 netdev->features |= NETIF_F_TSO;
997 996
998#ifdef NETIF_F_TSO6
999 if (adapter->hw.mac_type > e1000_82547_rev_2) 997 if (adapter->hw.mac_type > e1000_82547_rev_2)
1000 netdev->features |= NETIF_F_TSO6; 998 netdev->features |= NETIF_F_TSO6;
1001#endif
1002#endif
1003 if (pci_using_dac) 999 if (pci_using_dac)
1004 netdev->features |= NETIF_F_HIGHDMA; 1000 netdev->features |= NETIF_F_HIGHDMA;
1005 1001
@@ -2583,15 +2579,22 @@ e1000_watchdog(unsigned long data)
2583 2579
2584 if (link) { 2580 if (link) {
2585 if (!netif_carrier_ok(netdev)) { 2581 if (!netif_carrier_ok(netdev)) {
2582 uint32_t ctrl;
2586 boolean_t txb2b = 1; 2583 boolean_t txb2b = 1;
2587 e1000_get_speed_and_duplex(&adapter->hw, 2584 e1000_get_speed_and_duplex(&adapter->hw,
2588 &adapter->link_speed, 2585 &adapter->link_speed,
2589 &adapter->link_duplex); 2586 &adapter->link_duplex);
2590 2587
2591 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n", 2588 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2592 adapter->link_speed, 2589 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2593 adapter->link_duplex == FULL_DUPLEX ? 2590 "Flow Control: %s\n",
2594 "Full Duplex" : "Half Duplex"); 2591 adapter->link_speed,
2592 adapter->link_duplex == FULL_DUPLEX ?
2593 "Full Duplex" : "Half Duplex",
2594 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2595 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2596 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2597 E1000_CTRL_TFCE) ? "TX" : "None" )));
2595 2598
2596 /* tweak tx_queue_len according to speed/duplex 2599 /* tweak tx_queue_len according to speed/duplex
2597 * and adjust the timeout factor */ 2600 * and adjust the timeout factor */
@@ -2619,7 +2622,6 @@ e1000_watchdog(unsigned long data)
2619 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 2622 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2620 } 2623 }
2621 2624
2622#ifdef NETIF_F_TSO
2623 /* disable TSO for pcie and 10/100 speeds, to avoid 2625 /* disable TSO for pcie and 10/100 speeds, to avoid
2624 * some hardware issues */ 2626 * some hardware issues */
2625 if (!adapter->tso_force && 2627 if (!adapter->tso_force &&
@@ -2630,22 +2632,17 @@ e1000_watchdog(unsigned long data)
2630 DPRINTK(PROBE,INFO, 2632 DPRINTK(PROBE,INFO,
2631 "10/100 speed: disabling TSO\n"); 2633 "10/100 speed: disabling TSO\n");
2632 netdev->features &= ~NETIF_F_TSO; 2634 netdev->features &= ~NETIF_F_TSO;
2633#ifdef NETIF_F_TSO6
2634 netdev->features &= ~NETIF_F_TSO6; 2635 netdev->features &= ~NETIF_F_TSO6;
2635#endif
2636 break; 2636 break;
2637 case SPEED_1000: 2637 case SPEED_1000:
2638 netdev->features |= NETIF_F_TSO; 2638 netdev->features |= NETIF_F_TSO;
2639#ifdef NETIF_F_TSO6
2640 netdev->features |= NETIF_F_TSO6; 2639 netdev->features |= NETIF_F_TSO6;
2641#endif
2642 break; 2640 break;
2643 default: 2641 default:
2644 /* oops */ 2642 /* oops */
2645 break; 2643 break;
2646 } 2644 }
2647 } 2645 }
2648#endif
2649 2646
2650 /* enable transmits in the hardware, need to do this 2647 /* enable transmits in the hardware, need to do this
2651 * after setting TARC0 */ 2648 * after setting TARC0 */
@@ -2875,7 +2872,6 @@ static int
2875e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 2872e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2876 struct sk_buff *skb) 2873 struct sk_buff *skb)
2877{ 2874{
2878#ifdef NETIF_F_TSO
2879 struct e1000_context_desc *context_desc; 2875 struct e1000_context_desc *context_desc;
2880 struct e1000_buffer *buffer_info; 2876 struct e1000_buffer *buffer_info;
2881 unsigned int i; 2877 unsigned int i;
@@ -2904,7 +2900,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2904 0); 2900 0);
2905 cmd_length = E1000_TXD_CMD_IP; 2901 cmd_length = E1000_TXD_CMD_IP;
2906 ipcse = skb->h.raw - skb->data - 1; 2902 ipcse = skb->h.raw - skb->data - 1;
2907#ifdef NETIF_F_TSO6
2908 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2903 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2909 skb->nh.ipv6h->payload_len = 0; 2904 skb->nh.ipv6h->payload_len = 0;
2910 skb->h.th->check = 2905 skb->h.th->check =
@@ -2914,7 +2909,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2914 IPPROTO_TCP, 2909 IPPROTO_TCP,
2915 0); 2910 0);
2916 ipcse = 0; 2911 ipcse = 0;
2917#endif
2918 } 2912 }
2919 ipcss = skb->nh.raw - skb->data; 2913 ipcss = skb->nh.raw - skb->data;
2920 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 2914 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
@@ -2947,8 +2941,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2947 2941
2948 return TRUE; 2942 return TRUE;
2949 } 2943 }
2950#endif
2951
2952 return FALSE; 2944 return FALSE;
2953} 2945}
2954 2946
@@ -2968,8 +2960,9 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2968 buffer_info = &tx_ring->buffer_info[i]; 2960 buffer_info = &tx_ring->buffer_info[i];
2969 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2961 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2970 2962
2963 context_desc->lower_setup.ip_config = 0;
2971 context_desc->upper_setup.tcp_fields.tucss = css; 2964 context_desc->upper_setup.tcp_fields.tucss = css;
2972 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; 2965 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
2973 context_desc->upper_setup.tcp_fields.tucse = 0; 2966 context_desc->upper_setup.tcp_fields.tucse = 0;
2974 context_desc->tcp_seg_setup.data = 0; 2967 context_desc->tcp_seg_setup.data = 0;
2975 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2968 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
@@ -3005,7 +2998,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3005 while (len) { 2998 while (len) {
3006 buffer_info = &tx_ring->buffer_info[i]; 2999 buffer_info = &tx_ring->buffer_info[i];
3007 size = min(len, max_per_txd); 3000 size = min(len, max_per_txd);
3008#ifdef NETIF_F_TSO
3009 /* Workaround for Controller erratum -- 3001 /* Workaround for Controller erratum --
3010 * descriptor for non-tso packet in a linear SKB that follows a 3002 * descriptor for non-tso packet in a linear SKB that follows a
3011 * tso gets written back prematurely before the data is fully 3003 * tso gets written back prematurely before the data is fully
@@ -3020,7 +3012,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3020 * in TSO mode. Append 4-byte sentinel desc */ 3012 * in TSO mode. Append 4-byte sentinel desc */
3021 if (unlikely(mss && !nr_frags && size == len && size > 8)) 3013 if (unlikely(mss && !nr_frags && size == len && size > 8))
3022 size -= 4; 3014 size -= 4;
3023#endif
3024 /* work-around for errata 10 and it applies 3015 /* work-around for errata 10 and it applies
3025 * to all controllers in PCI-X mode 3016 * to all controllers in PCI-X mode
3026 * The fix is to make sure that the first descriptor of a 3017 * The fix is to make sure that the first descriptor of a
@@ -3062,12 +3053,10 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3062 while (len) { 3053 while (len) {
3063 buffer_info = &tx_ring->buffer_info[i]; 3054 buffer_info = &tx_ring->buffer_info[i];
3064 size = min(len, max_per_txd); 3055 size = min(len, max_per_txd);
3065#ifdef NETIF_F_TSO
3066 /* Workaround for premature desc write-backs 3056 /* Workaround for premature desc write-backs
3067 * in TSO mode. Append 4-byte sentinel desc */ 3057 * in TSO mode. Append 4-byte sentinel desc */
3068 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 3058 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
3069 size -= 4; 3059 size -= 4;
3070#endif
3071 /* Workaround for potential 82544 hang in PCI-X. 3060 /* Workaround for potential 82544 hang in PCI-X.
3072 * Avoid terminating buffers within evenly-aligned 3061 * Avoid terminating buffers within evenly-aligned
3073 * dwords. */ 3062 * dwords. */
@@ -3292,7 +3281,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3292 if (adapter->hw.mac_type >= e1000_82571) 3281 if (adapter->hw.mac_type >= e1000_82571)
3293 max_per_txd = 8192; 3282 max_per_txd = 8192;
3294 3283
3295#ifdef NETIF_F_TSO
3296 mss = skb_shinfo(skb)->gso_size; 3284 mss = skb_shinfo(skb)->gso_size;
3297 /* The controller does a simple calculation to 3285 /* The controller does a simple calculation to
3298 * make sure there is enough room in the FIFO before 3286 * make sure there is enough room in the FIFO before
@@ -3346,16 +3334,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3346 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3334 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3347 count++; 3335 count++;
3348 count++; 3336 count++;
3349#else
3350 if (skb->ip_summed == CHECKSUM_PARTIAL)
3351 count++;
3352#endif
3353 3337
3354#ifdef NETIF_F_TSO
3355 /* Controller Erratum workaround */ 3338 /* Controller Erratum workaround */
3356 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3339 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3357 count++; 3340 count++;
3358#endif
3359 3341
3360 count += TXD_USE_COUNT(len, max_txd_pwr); 3342 count += TXD_USE_COUNT(len, max_txd_pwr);
3361 3343
@@ -3602,7 +3584,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3602 */ 3584 */
3603 if (adapter->link_speed == 0) 3585 if (adapter->link_speed == 0)
3604 return; 3586 return;
3605 if (pdev->error_state && pdev->error_state != pci_channel_io_normal) 3587 if (pci_channel_offline(pdev))
3606 return; 3588 return;
3607 3589
3608 spin_lock_irqsave(&adapter->stats_lock, flags); 3590 spin_lock_irqsave(&adapter->stats_lock, flags);
@@ -3765,8 +3747,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
3765 * @data: pointer to a network interface device structure 3747 * @data: pointer to a network interface device structure
3766 **/ 3748 **/
3767 3749
3768static 3750static irqreturn_t
3769irqreturn_t e1000_intr_msi(int irq, void *data) 3751e1000_intr_msi(int irq, void *data)
3770{ 3752{
3771 struct net_device *netdev = data; 3753 struct net_device *netdev = data;
3772 struct e1000_adapter *adapter = netdev_priv(netdev); 3754 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3774,49 +3756,27 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
3774#ifndef CONFIG_E1000_NAPI 3756#ifndef CONFIG_E1000_NAPI
3775 int i; 3757 int i;
3776#endif 3758#endif
3759 uint32_t icr = E1000_READ_REG(hw, ICR);
3777 3760
3778 /* this code avoids the read of ICR but has to get 1000 interrupts
3779 * at every link change event before it will notice the change */
3780 if (++adapter->detect_link >= 1000) {
3781 uint32_t icr = E1000_READ_REG(hw, ICR);
3782#ifdef CONFIG_E1000_NAPI 3761#ifdef CONFIG_E1000_NAPI
3783 /* read ICR disables interrupts using IAM, so keep up with our 3762 /* read ICR disables interrupts using IAM, so keep up with our
3784 * enable/disable accounting */ 3763 * enable/disable accounting */
3785 atomic_inc(&adapter->irq_sem); 3764 atomic_inc(&adapter->irq_sem);
3786#endif 3765#endif
3787 adapter->detect_link = 0; 3766 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3788 if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) && 3767 hw->get_link_status = 1;
3789 (icr & E1000_ICR_INT_ASSERTED)) { 3768 /* 80003ES2LAN workaround-- For packet buffer work-around on
3790 hw->get_link_status = 1; 3769 * link down event; disable receives here in the ISR and reset
3791 /* 80003ES2LAN workaround-- 3770 * adapter in watchdog */
3792 * For packet buffer work-around on link down event; 3771 if (netif_carrier_ok(netdev) &&
3793 * disable receives here in the ISR and 3772 (adapter->hw.mac_type == e1000_80003es2lan)) {
3794 * reset adapter in watchdog 3773 /* disable receives */
3795 */ 3774 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3796 if (netif_carrier_ok(netdev) && 3775 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3797 (adapter->hw.mac_type == e1000_80003es2lan)) {
3798 /* disable receives */
3799 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3800 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3801 }
3802 /* guard against interrupt when we're going down */
3803 if (!test_bit(__E1000_DOWN, &adapter->flags))
3804 mod_timer(&adapter->watchdog_timer,
3805 jiffies + 1);
3806 } 3776 }
3807 } else { 3777 /* guard against interrupt when we're going down */
3808 E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ | 3778 if (!test_bit(__E1000_DOWN, &adapter->flags))
3809 E1000_ICR_LSC))); 3779 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3810 /* bummer we have to flush here, but things break otherwise as
3811 * some event appears to be lost or delayed and throughput
3812 * drops. In almost all tests this flush is un-necessary */
3813 E1000_WRITE_FLUSH(hw);
3814#ifdef CONFIG_E1000_NAPI
3815 /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
3816 * masked. No need for the IMC write, but it does mean we
3817 * should account for it ASAP. */
3818 atomic_inc(&adapter->irq_sem);
3819#endif
3820 } 3780 }
3821 3781
3822#ifdef CONFIG_E1000_NAPI 3782#ifdef CONFIG_E1000_NAPI
@@ -3836,7 +3796,7 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
3836 3796
3837 for (i = 0; i < E1000_MAX_INTR; i++) 3797 for (i = 0; i < E1000_MAX_INTR; i++)
3838 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3798 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3839 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3799 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3840 break; 3800 break;
3841 3801
3842 if (likely(adapter->itr_setting & 3)) 3802 if (likely(adapter->itr_setting & 3))
@@ -3939,7 +3899,7 @@ e1000_intr(int irq, void *data)
3939 3899
3940 for (i = 0; i < E1000_MAX_INTR; i++) 3900 for (i = 0; i < E1000_MAX_INTR; i++)
3941 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3901 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3942 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3902 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3943 break; 3903 break;
3944 3904
3945 if (likely(adapter->itr_setting & 3)) 3905 if (likely(adapter->itr_setting & 3))
@@ -3989,7 +3949,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3989 poll_dev->quota -= work_done; 3949 poll_dev->quota -= work_done;
3990 3950
3991 /* If no Tx and not enough Rx work done, exit the polling mode */ 3951 /* If no Tx and not enough Rx work done, exit the polling mode */
3992 if ((!tx_cleaned && (work_done == 0)) || 3952 if ((tx_cleaned && (work_done < work_to_do)) ||
3993 !netif_running(poll_dev)) { 3953 !netif_running(poll_dev)) {
3994quit_polling: 3954quit_polling:
3995 if (likely(adapter->itr_setting & 3)) 3955 if (likely(adapter->itr_setting & 3))
@@ -4019,7 +3979,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4019#ifdef CONFIG_E1000_NAPI 3979#ifdef CONFIG_E1000_NAPI
4020 unsigned int count = 0; 3980 unsigned int count = 0;
4021#endif 3981#endif
4022 boolean_t cleaned = FALSE; 3982 boolean_t cleaned = TRUE;
4023 unsigned int total_tx_bytes=0, total_tx_packets=0; 3983 unsigned int total_tx_bytes=0, total_tx_packets=0;
4024 3984
4025 i = tx_ring->next_to_clean; 3985 i = tx_ring->next_to_clean;
@@ -4034,10 +3994,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4034 3994
4035 if (cleaned) { 3995 if (cleaned) {
4036 struct sk_buff *skb = buffer_info->skb; 3996 struct sk_buff *skb = buffer_info->skb;
4037 unsigned int segs = skb_shinfo(skb)->gso_segs; 3997 unsigned int segs, bytecount;
3998 segs = skb_shinfo(skb)->gso_segs ?: 1;
3999 /* multiply data chunks by size of headers */
4000 bytecount = ((segs - 1) * skb_headlen(skb)) +
4001 skb->len;
4038 total_tx_packets += segs; 4002 total_tx_packets += segs;
4039 total_tx_packets++; 4003 total_tx_bytes += bytecount;
4040 total_tx_bytes += skb->len;
4041 } 4004 }
4042 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 4005 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
4043 tx_desc->upper.data = 0; 4006 tx_desc->upper.data = 0;
@@ -4050,7 +4013,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4050#ifdef CONFIG_E1000_NAPI 4013#ifdef CONFIG_E1000_NAPI
4051#define E1000_TX_WEIGHT 64 4014#define E1000_TX_WEIGHT 64
4052 /* weight of a sort for tx, to avoid endless transmit cleanup */ 4015 /* weight of a sort for tx, to avoid endless transmit cleanup */
4053 if (count++ == E1000_TX_WEIGHT) break; 4016 if (count++ == E1000_TX_WEIGHT) {
4017 cleaned = FALSE;
4018 break;
4019 }
4054#endif 4020#endif
4055 } 4021 }
4056 4022
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 18afc0c25dac..10af742d8a20 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -48,8 +48,6 @@ typedef enum {
48 TRUE = 1 48 TRUE = 1
49} boolean_t; 49} boolean_t;
50 50
51#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
52
53#ifdef DBG 51#ifdef DBG
54#define DEBUGOUT(S) printk(KERN_DEBUG S "\n") 52#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
55#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A) 53#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
@@ -58,7 +56,7 @@ typedef enum {
58#define DEBUGOUT1(S, A...) 56#define DEBUGOUT1(S, A...)
59#endif 57#endif
60 58
61#define DEBUGFUNC(F) DEBUGOUT(F) 59#define DEBUGFUNC(F) DEBUGOUT(F "\n")
62#define DEBUGOUT2 DEBUGOUT1 60#define DEBUGOUT2 DEBUGOUT1
63#define DEBUGOUT3 DEBUGOUT2 61#define DEBUGOUT3 DEBUGOUT2
64#define DEBUGOUT7 DEBUGOUT3 62#define DEBUGOUT7 DEBUGOUT3
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index cf2a279307e1..f8862e203ac9 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -760,22 +760,13 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
760 case SPEED_1000: 760 case SPEED_1000:
761 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " 761 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
762 "Duplex\n"); 762 "Duplex\n");
763 DPRINTK(PROBE, INFO, 763 goto full_duplex_only;
764 "Using Autonegotiation at 1000 Mbps "
765 "Full Duplex only\n");
766 adapter->hw.autoneg = adapter->fc_autoneg = 1;
767 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
768 break;
769 case SPEED_1000 + HALF_DUPLEX: 764 case SPEED_1000 + HALF_DUPLEX:
770 DPRINTK(PROBE, INFO, 765 DPRINTK(PROBE, INFO,
771 "Half Duplex is not supported at 1000 Mbps\n"); 766 "Half Duplex is not supported at 1000 Mbps\n");
772 DPRINTK(PROBE, INFO, 767 /* fall through */
773 "Using Autonegotiation at 1000 Mbps "
774 "Full Duplex only\n");
775 adapter->hw.autoneg = adapter->fc_autoneg = 1;
776 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
777 break;
778 case SPEED_1000 + FULL_DUPLEX: 768 case SPEED_1000 + FULL_DUPLEX:
769full_duplex_only:
779 DPRINTK(PROBE, INFO, 770 DPRINTK(PROBE, INFO,
780 "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); 771 "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
781 adapter->hw.autoneg = adapter->fc_autoneg = 1; 772 adapter->hw.autoneg = adapter->fc_autoneg = 1;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 93f2b7a22160..a363148d0198 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -111,6 +111,7 @@
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit. 112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error. 113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
114 * 115 *
115 * Known bugs: 116 * Known bugs:
116 * We suspect that on some hardware no TX done interrupts are generated. 117 * We suspect that on some hardware no TX done interrupts are generated.
@@ -127,7 +128,7 @@
127#else 128#else
128#define DRIVERNAPI 129#define DRIVERNAPI
129#endif 130#endif
130#define FORCEDETH_VERSION "0.59" 131#define FORCEDETH_VERSION "0.60"
131#define DRV_NAME "forcedeth" 132#define DRV_NAME "forcedeth"
132 133
133#include <linux/module.h> 134#include <linux/module.h>
@@ -173,9 +174,10 @@
173#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 174#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
174#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 175#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
175#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 176#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
176#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ 177#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
177#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ 178#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
178#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */ 179#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
180#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
179 181
180enum { 182enum {
181 NvRegIrqStatus = 0x000, 183 NvRegIrqStatus = 0x000,
@@ -210,7 +212,7 @@ enum {
210 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 212 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
211 */ 213 */
212 NvRegPollingInterval = 0x00c, 214 NvRegPollingInterval = 0x00c,
213#define NVREG_POLL_DEFAULT_THROUGHPUT 970 215#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
214#define NVREG_POLL_DEFAULT_CPU 13 216#define NVREG_POLL_DEFAULT_CPU 13
215 NvRegMSIMap0 = 0x020, 217 NvRegMSIMap0 = 0x020,
216 NvRegMSIMap1 = 0x024, 218 NvRegMSIMap1 = 0x024,
@@ -304,8 +306,8 @@ enum {
304#define NVREG_TXRXCTL_RESET 0x0010 306#define NVREG_TXRXCTL_RESET 0x0010
305#define NVREG_TXRXCTL_RXCHECK 0x0400 307#define NVREG_TXRXCTL_RXCHECK 0x0400
306#define NVREG_TXRXCTL_DESC_1 0 308#define NVREG_TXRXCTL_DESC_1 0
307#define NVREG_TXRXCTL_DESC_2 0x02100 309#define NVREG_TXRXCTL_DESC_2 0x002100
308#define NVREG_TXRXCTL_DESC_3 0x02200 310#define NVREG_TXRXCTL_DESC_3 0xc02200
309#define NVREG_TXRXCTL_VLANSTRIP 0x00040 311#define NVREG_TXRXCTL_VLANSTRIP 0x00040
310#define NVREG_TXRXCTL_VLANINS 0x00080 312#define NVREG_TXRXCTL_VLANINS 0x00080
311 NvRegTxRingPhysAddrHigh = 0x148, 313 NvRegTxRingPhysAddrHigh = 0x148,
@@ -487,7 +489,8 @@ union ring_type {
487 489
488/* Miscelaneous hardware related defines: */ 490/* Miscelaneous hardware related defines: */
489#define NV_PCI_REGSZ_VER1 0x270 491#define NV_PCI_REGSZ_VER1 0x270
490#define NV_PCI_REGSZ_VER2 0x604 492#define NV_PCI_REGSZ_VER2 0x2d4
493#define NV_PCI_REGSZ_VER3 0x604
491 494
492/* various timeout delays: all in usec */ 495/* various timeout delays: all in usec */
493#define NV_TXRX_RESET_DELAY 4 496#define NV_TXRX_RESET_DELAY 4
@@ -518,12 +521,6 @@ union ring_type {
518#define TX_RING_MIN 64 521#define TX_RING_MIN 64
519#define RING_MAX_DESC_VER_1 1024 522#define RING_MAX_DESC_VER_1 1024
520#define RING_MAX_DESC_VER_2_3 16384 523#define RING_MAX_DESC_VER_2_3 16384
521/*
522 * Difference between the get and put pointers for the tx ring.
523 * This is used to throttle the amount of data outstanding in the
524 * tx ring.
525 */
526#define TX_LIMIT_DIFFERENCE 1
527 524
528/* rx/tx mac addr + type + vlan + align + slack*/ 525/* rx/tx mac addr + type + vlan + align + slack*/
529#define NV_RX_HEADERS (64) 526#define NV_RX_HEADERS (64)
@@ -611,9 +608,6 @@ static const struct nv_ethtool_str nv_estats_str[] = {
611 { "tx_carrier_errors" }, 608 { "tx_carrier_errors" },
612 { "tx_excess_deferral" }, 609 { "tx_excess_deferral" },
613 { "tx_retry_error" }, 610 { "tx_retry_error" },
614 { "tx_deferral" },
615 { "tx_packets" },
616 { "tx_pause" },
617 { "rx_frame_error" }, 611 { "rx_frame_error" },
618 { "rx_extra_byte" }, 612 { "rx_extra_byte" },
619 { "rx_late_collision" }, 613 { "rx_late_collision" },
@@ -626,11 +620,17 @@ static const struct nv_ethtool_str nv_estats_str[] = {
626 { "rx_unicast" }, 620 { "rx_unicast" },
627 { "rx_multicast" }, 621 { "rx_multicast" },
628 { "rx_broadcast" }, 622 { "rx_broadcast" },
623 { "rx_packets" },
624 { "rx_errors_total" },
625 { "tx_errors_total" },
626
627 /* version 2 stats */
628 { "tx_deferral" },
629 { "tx_packets" },
629 { "rx_bytes" }, 630 { "rx_bytes" },
631 { "tx_pause" },
630 { "rx_pause" }, 632 { "rx_pause" },
631 { "rx_drop_frame" }, 633 { "rx_drop_frame" }
632 { "rx_packets" },
633 { "rx_errors_total" }
634}; 634};
635 635
636struct nv_ethtool_stats { 636struct nv_ethtool_stats {
@@ -643,9 +643,6 @@ struct nv_ethtool_stats {
643 u64 tx_carrier_errors; 643 u64 tx_carrier_errors;
644 u64 tx_excess_deferral; 644 u64 tx_excess_deferral;
645 u64 tx_retry_error; 645 u64 tx_retry_error;
646 u64 tx_deferral;
647 u64 tx_packets;
648 u64 tx_pause;
649 u64 rx_frame_error; 646 u64 rx_frame_error;
650 u64 rx_extra_byte; 647 u64 rx_extra_byte;
651 u64 rx_late_collision; 648 u64 rx_late_collision;
@@ -658,13 +655,22 @@ struct nv_ethtool_stats {
658 u64 rx_unicast; 655 u64 rx_unicast;
659 u64 rx_multicast; 656 u64 rx_multicast;
660 u64 rx_broadcast; 657 u64 rx_broadcast;
658 u64 rx_packets;
659 u64 rx_errors_total;
660 u64 tx_errors_total;
661
662 /* version 2 stats */
663 u64 tx_deferral;
664 u64 tx_packets;
661 u64 rx_bytes; 665 u64 rx_bytes;
666 u64 tx_pause;
662 u64 rx_pause; 667 u64 rx_pause;
663 u64 rx_drop_frame; 668 u64 rx_drop_frame;
664 u64 rx_packets;
665 u64 rx_errors_total;
666}; 669};
667 670
671#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
672#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
673
668/* diagnostics */ 674/* diagnostics */
669#define NV_TEST_COUNT_BASE 3 675#define NV_TEST_COUNT_BASE 3
670#define NV_TEST_COUNT_EXTENDED 4 676#define NV_TEST_COUNT_EXTENDED 4
@@ -691,6 +697,12 @@ static const struct register_test nv_registers_test[] = {
691 { 0,0 } 697 { 0,0 }
692}; 698};
693 699
700struct nv_skb_map {
701 struct sk_buff *skb;
702 dma_addr_t dma;
703 unsigned int dma_len;
704};
705
694/* 706/*
695 * SMP locking: 707 * SMP locking:
696 * All hardware access under dev->priv->lock, except the performance 708 * All hardware access under dev->priv->lock, except the performance
@@ -741,10 +753,12 @@ struct fe_priv {
741 /* rx specific fields. 753 /* rx specific fields.
742 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 754 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
743 */ 755 */
756 union ring_type get_rx, put_rx, first_rx, last_rx;
757 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
758 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
759 struct nv_skb_map *rx_skb;
760
744 union ring_type rx_ring; 761 union ring_type rx_ring;
745 unsigned int cur_rx, refill_rx;
746 struct sk_buff **rx_skbuff;
747 dma_addr_t *rx_dma;
748 unsigned int rx_buf_sz; 762 unsigned int rx_buf_sz;
749 unsigned int pkt_limit; 763 unsigned int pkt_limit;
750 struct timer_list oom_kick; 764 struct timer_list oom_kick;
@@ -761,15 +775,15 @@ struct fe_priv {
761 /* 775 /*
762 * tx specific fields. 776 * tx specific fields.
763 */ 777 */
778 union ring_type get_tx, put_tx, first_tx, last_tx;
779 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
780 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
781 struct nv_skb_map *tx_skb;
782
764 union ring_type tx_ring; 783 union ring_type tx_ring;
765 unsigned int next_tx, nic_tx;
766 struct sk_buff **tx_skbuff;
767 dma_addr_t *tx_dma;
768 unsigned int *tx_dma_len;
769 u32 tx_flags; 784 u32 tx_flags;
770 int tx_ring_size; 785 int tx_ring_size;
771 int tx_limit_start; 786 int tx_stop;
772 int tx_limit_stop;
773 787
774 /* vlan fields */ 788 /* vlan fields */
775 struct vlan_group *vlangrp; 789 struct vlan_group *vlangrp;
@@ -921,16 +935,10 @@ static void free_rings(struct net_device *dev)
921 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 935 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
922 np->rx_ring.ex, np->ring_addr); 936 np->rx_ring.ex, np->ring_addr);
923 } 937 }
924 if (np->rx_skbuff) 938 if (np->rx_skb)
925 kfree(np->rx_skbuff); 939 kfree(np->rx_skb);
926 if (np->rx_dma) 940 if (np->tx_skb)
927 kfree(np->rx_dma); 941 kfree(np->tx_skb);
928 if (np->tx_skbuff)
929 kfree(np->tx_skbuff);
930 if (np->tx_dma)
931 kfree(np->tx_dma);
932 if (np->tx_dma_len)
933 kfree(np->tx_dma_len);
934} 942}
935 943
936static int using_multi_irqs(struct net_device *dev) 944static int using_multi_irqs(struct net_device *dev)
@@ -1279,6 +1287,61 @@ static void nv_mac_reset(struct net_device *dev)
1279 pci_push(base); 1287 pci_push(base);
1280} 1288}
1281 1289
1290static void nv_get_hw_stats(struct net_device *dev)
1291{
1292 struct fe_priv *np = netdev_priv(dev);
1293 u8 __iomem *base = get_hwbase(dev);
1294
1295 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1296 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1297 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1298 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1299 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1300 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1301 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1302 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1303 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1304 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1305 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1306 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1307 np->estats.rx_runt += readl(base + NvRegRxRunt);
1308 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1309 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1310 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1311 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1312 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1313 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1314 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1315 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1316 np->estats.rx_packets =
1317 np->estats.rx_unicast +
1318 np->estats.rx_multicast +
1319 np->estats.rx_broadcast;
1320 np->estats.rx_errors_total =
1321 np->estats.rx_crc_errors +
1322 np->estats.rx_over_errors +
1323 np->estats.rx_frame_error +
1324 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1325 np->estats.rx_late_collision +
1326 np->estats.rx_runt +
1327 np->estats.rx_frame_too_long;
1328 np->estats.tx_errors_total =
1329 np->estats.tx_late_collision +
1330 np->estats.tx_fifo_errors +
1331 np->estats.tx_carrier_errors +
1332 np->estats.tx_excess_deferral +
1333 np->estats.tx_retry_error;
1334
1335 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1336 np->estats.tx_deferral += readl(base + NvRegTxDef);
1337 np->estats.tx_packets += readl(base + NvRegTxFrame);
1338 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1339 np->estats.tx_pause += readl(base + NvRegTxPause);
1340 np->estats.rx_pause += readl(base + NvRegRxPause);
1341 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1342 }
1343}
1344
1282/* 1345/*
1283 * nv_get_stats: dev->get_stats function 1346 * nv_get_stats: dev->get_stats function
1284 * Get latest stats value from the nic. 1347 * Get latest stats value from the nic.
@@ -1289,10 +1352,19 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1289{ 1352{
1290 struct fe_priv *np = netdev_priv(dev); 1353 struct fe_priv *np = netdev_priv(dev);
1291 1354
1292 /* It seems that the nic always generates interrupts and doesn't 1355 /* If the nic supports hw counters then retrieve latest values */
1293 * accumulate errors internally. Thus the current values in np->stats 1356 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1294 * are already up to date. 1357 nv_get_hw_stats(dev);
1295 */ 1358
1359 /* copy to net_device stats */
1360 np->stats.tx_bytes = np->estats.tx_bytes;
1361 np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1362 np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1363 np->stats.rx_crc_errors = np->estats.rx_crc_errors;
1364 np->stats.rx_over_errors = np->estats.rx_over_errors;
1365 np->stats.rx_errors = np->estats.rx_errors_total;
1366 np->stats.tx_errors = np->estats.tx_errors_total;
1367 }
1296 return &np->stats; 1368 return &np->stats;
1297} 1369}
1298 1370
@@ -1304,43 +1376,63 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1304static int nv_alloc_rx(struct net_device *dev) 1376static int nv_alloc_rx(struct net_device *dev)
1305{ 1377{
1306 struct fe_priv *np = netdev_priv(dev); 1378 struct fe_priv *np = netdev_priv(dev);
1307 unsigned int refill_rx = np->refill_rx; 1379 struct ring_desc* less_rx;
1308 int nr;
1309 1380
1310 while (np->cur_rx != refill_rx) { 1381 less_rx = np->get_rx.orig;
1311 struct sk_buff *skb; 1382 if (less_rx-- == np->first_rx.orig)
1312 1383 less_rx = np->last_rx.orig;
1313 nr = refill_rx % np->rx_ring_size;
1314 if (np->rx_skbuff[nr] == NULL) {
1315
1316 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1317 if (!skb)
1318 break;
1319 1384
1385 while (np->put_rx.orig != less_rx) {
1386 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1387 if (skb) {
1320 skb->dev = dev; 1388 skb->dev = dev;
1321 np->rx_skbuff[nr] = skb; 1389 np->put_rx_ctx->skb = skb;
1390 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1391 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1392 np->put_rx_ctx->dma_len = skb->end-skb->data;
1393 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1394 wmb();
1395 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1396 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1397 np->put_rx.orig = np->first_rx.orig;
1398 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1399 np->put_rx_ctx = np->first_rx_ctx;
1322 } else { 1400 } else {
1323 skb = np->rx_skbuff[nr]; 1401 return 1;
1324 } 1402 }
1325 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1403 }
1326 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1404 return 0;
1327 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1405}
1328 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); 1406
1407static int nv_alloc_rx_optimized(struct net_device *dev)
1408{
1409 struct fe_priv *np = netdev_priv(dev);
1410 struct ring_desc_ex* less_rx;
1411
1412 less_rx = np->get_rx.ex;
1413 if (less_rx-- == np->first_rx.ex)
1414 less_rx = np->last_rx.ex;
1415
1416 while (np->put_rx.ex != less_rx) {
1417 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1418 if (skb) {
1419 skb->dev = dev;
1420 np->put_rx_ctx->skb = skb;
1421 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1422 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1423 np->put_rx_ctx->dma_len = skb->end-skb->data;
1424 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1425 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1329 wmb(); 1426 wmb();
1330 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1427 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1428 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1429 np->put_rx.ex = np->first_rx.ex;
1430 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1431 np->put_rx_ctx = np->first_rx_ctx;
1331 } else { 1432 } else {
1332 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1433 return 1;
1333 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1334 wmb();
1335 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1336 } 1434 }
1337 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1338 dev->name, refill_rx);
1339 refill_rx++;
1340 } 1435 }
1341 np->refill_rx = refill_rx;
1342 if (np->cur_rx - refill_rx == np->rx_ring_size)
1343 return 1;
1344 return 0; 1436 return 0;
1345} 1437}
1346 1438
@@ -1358,6 +1450,7 @@ static void nv_do_rx_refill(unsigned long data)
1358{ 1450{
1359 struct net_device *dev = (struct net_device *) data; 1451 struct net_device *dev = (struct net_device *) data;
1360 struct fe_priv *np = netdev_priv(dev); 1452 struct fe_priv *np = netdev_priv(dev);
1453 int retcode;
1361 1454
1362 if (!using_multi_irqs(dev)) { 1455 if (!using_multi_irqs(dev)) {
1363 if (np->msi_flags & NV_MSI_X_ENABLED) 1456 if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -1367,7 +1460,11 @@ static void nv_do_rx_refill(unsigned long data)
1367 } else { 1460 } else {
1368 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1461 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1369 } 1462 }
1370 if (nv_alloc_rx(dev)) { 1463 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1464 retcode = nv_alloc_rx(dev);
1465 else
1466 retcode = nv_alloc_rx_optimized(dev);
1467 if (retcode) {
1371 spin_lock_irq(&np->lock); 1468 spin_lock_irq(&np->lock);
1372 if (!np->in_shutdown) 1469 if (!np->in_shutdown)
1373 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1470 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -1388,56 +1485,81 @@ static void nv_init_rx(struct net_device *dev)
1388{ 1485{
1389 struct fe_priv *np = netdev_priv(dev); 1486 struct fe_priv *np = netdev_priv(dev);
1390 int i; 1487 int i;
1488 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1490 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1491 else
1492 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1493 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1494 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1391 1495
1392 np->cur_rx = np->rx_ring_size; 1496 for (i = 0; i < np->rx_ring_size; i++) {
1393 np->refill_rx = 0; 1497 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1394 for (i = 0; i < np->rx_ring_size; i++)
1395 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1396 np->rx_ring.orig[i].flaglen = 0; 1498 np->rx_ring.orig[i].flaglen = 0;
1397 else 1499 np->rx_ring.orig[i].buf = 0;
1500 } else {
1398 np->rx_ring.ex[i].flaglen = 0; 1501 np->rx_ring.ex[i].flaglen = 0;
1502 np->rx_ring.ex[i].txvlan = 0;
1503 np->rx_ring.ex[i].bufhigh = 0;
1504 np->rx_ring.ex[i].buflow = 0;
1505 }
1506 np->rx_skb[i].skb = NULL;
1507 np->rx_skb[i].dma = 0;
1508 }
1399} 1509}
1400 1510
1401static void nv_init_tx(struct net_device *dev) 1511static void nv_init_tx(struct net_device *dev)
1402{ 1512{
1403 struct fe_priv *np = netdev_priv(dev); 1513 struct fe_priv *np = netdev_priv(dev);
1404 int i; 1514 int i;
1515 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1516 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1517 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1518 else
1519 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1520 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1521 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1405 1522
1406 np->next_tx = np->nic_tx = 0;
1407 for (i = 0; i < np->tx_ring_size; i++) { 1523 for (i = 0; i < np->tx_ring_size; i++) {
1408 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1524 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1409 np->tx_ring.orig[i].flaglen = 0; 1525 np->tx_ring.orig[i].flaglen = 0;
1410 else 1526 np->tx_ring.orig[i].buf = 0;
1527 } else {
1411 np->tx_ring.ex[i].flaglen = 0; 1528 np->tx_ring.ex[i].flaglen = 0;
1412 np->tx_skbuff[i] = NULL; 1529 np->tx_ring.ex[i].txvlan = 0;
1413 np->tx_dma[i] = 0; 1530 np->tx_ring.ex[i].bufhigh = 0;
1531 np->tx_ring.ex[i].buflow = 0;
1532 }
1533 np->tx_skb[i].skb = NULL;
1534 np->tx_skb[i].dma = 0;
1414 } 1535 }
1415} 1536}
1416 1537
1417static int nv_init_ring(struct net_device *dev) 1538static int nv_init_ring(struct net_device *dev)
1418{ 1539{
1540 struct fe_priv *np = netdev_priv(dev);
1541
1419 nv_init_tx(dev); 1542 nv_init_tx(dev);
1420 nv_init_rx(dev); 1543 nv_init_rx(dev);
1421 return nv_alloc_rx(dev); 1544 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1545 return nv_alloc_rx(dev);
1546 else
1547 return nv_alloc_rx_optimized(dev);
1422} 1548}
1423 1549
1424static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) 1550static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1425{ 1551{
1426 struct fe_priv *np = netdev_priv(dev); 1552 struct fe_priv *np = netdev_priv(dev);
1427 1553
1428 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", 1554 if (tx_skb->dma) {
1429 dev->name, skbnr); 1555 pci_unmap_page(np->pci_dev, tx_skb->dma,
1430 1556 tx_skb->dma_len,
1431 if (np->tx_dma[skbnr]) {
1432 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
1433 np->tx_dma_len[skbnr],
1434 PCI_DMA_TODEVICE); 1557 PCI_DMA_TODEVICE);
1435 np->tx_dma[skbnr] = 0; 1558 tx_skb->dma = 0;
1436 } 1559 }
1437 1560 if (tx_skb->skb) {
1438 if (np->tx_skbuff[skbnr]) { 1561 dev_kfree_skb_any(tx_skb->skb);
1439 dev_kfree_skb_any(np->tx_skbuff[skbnr]); 1562 tx_skb->skb = NULL;
1440 np->tx_skbuff[skbnr] = NULL;
1441 return 1; 1563 return 1;
1442 } else { 1564 } else {
1443 return 0; 1565 return 0;
@@ -1450,11 +1572,16 @@ static void nv_drain_tx(struct net_device *dev)
1450 unsigned int i; 1572 unsigned int i;
1451 1573
1452 for (i = 0; i < np->tx_ring_size; i++) { 1574 for (i = 0; i < np->tx_ring_size; i++) {
1453 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1575 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1454 np->tx_ring.orig[i].flaglen = 0; 1576 np->tx_ring.orig[i].flaglen = 0;
1455 else 1577 np->tx_ring.orig[i].buf = 0;
1578 } else {
1456 np->tx_ring.ex[i].flaglen = 0; 1579 np->tx_ring.ex[i].flaglen = 0;
1457 if (nv_release_txskb(dev, i)) 1580 np->tx_ring.ex[i].txvlan = 0;
1581 np->tx_ring.ex[i].bufhigh = 0;
1582 np->tx_ring.ex[i].buflow = 0;
1583 }
1584 if (nv_release_txskb(dev, &np->tx_skb[i]))
1458 np->stats.tx_dropped++; 1585 np->stats.tx_dropped++;
1459 } 1586 }
1460} 1587}
@@ -1463,18 +1590,24 @@ static void nv_drain_rx(struct net_device *dev)
1463{ 1590{
1464 struct fe_priv *np = netdev_priv(dev); 1591 struct fe_priv *np = netdev_priv(dev);
1465 int i; 1592 int i;
1593
1466 for (i = 0; i < np->rx_ring_size; i++) { 1594 for (i = 0; i < np->rx_ring_size; i++) {
1467 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1595 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1468 np->rx_ring.orig[i].flaglen = 0; 1596 np->rx_ring.orig[i].flaglen = 0;
1469 else 1597 np->rx_ring.orig[i].buf = 0;
1598 } else {
1470 np->rx_ring.ex[i].flaglen = 0; 1599 np->rx_ring.ex[i].flaglen = 0;
1600 np->rx_ring.ex[i].txvlan = 0;
1601 np->rx_ring.ex[i].bufhigh = 0;
1602 np->rx_ring.ex[i].buflow = 0;
1603 }
1471 wmb(); 1604 wmb();
1472 if (np->rx_skbuff[i]) { 1605 if (np->rx_skb[i].skb) {
1473 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1606 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1474 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1607 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
1475 PCI_DMA_FROMDEVICE); 1608 PCI_DMA_FROMDEVICE);
1476 dev_kfree_skb(np->rx_skbuff[i]); 1609 dev_kfree_skb(np->rx_skb[i].skb);
1477 np->rx_skbuff[i] = NULL; 1610 np->rx_skb[i].skb = NULL;
1478 } 1611 }
1479 } 1612 }
1480} 1613}
@@ -1485,6 +1618,11 @@ static void drain_ring(struct net_device *dev)
1485 nv_drain_rx(dev); 1618 nv_drain_rx(dev);
1486} 1619}
1487 1620
1621static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1622{
1623 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1624}
1625
1488/* 1626/*
1489 * nv_start_xmit: dev->hard_start_xmit function 1627 * nv_start_xmit: dev->hard_start_xmit function
1490 * Called with netif_tx_lock held. 1628 * Called with netif_tx_lock held.
@@ -1495,14 +1633,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1495 u32 tx_flags = 0; 1633 u32 tx_flags = 0;
1496 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1634 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1497 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1635 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1498 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
1499 unsigned int start_nr = np->next_tx % np->tx_ring_size;
1500 unsigned int i; 1636 unsigned int i;
1501 u32 offset = 0; 1637 u32 offset = 0;
1502 u32 bcnt; 1638 u32 bcnt;
1503 u32 size = skb->len-skb->data_len; 1639 u32 size = skb->len-skb->data_len;
1504 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1640 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1505 u32 tx_flags_vlan = 0; 1641 u32 empty_slots;
1642 struct ring_desc* put_tx;
1643 struct ring_desc* start_tx;
1644 struct ring_desc* prev_tx;
1645 struct nv_skb_map* prev_tx_ctx;
1506 1646
1507 /* add fragments to entries count */ 1647 /* add fragments to entries count */
1508 for (i = 0; i < fragments; i++) { 1648 for (i = 0; i < fragments; i++) {
@@ -1510,34 +1650,35 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1510 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1650 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1511 } 1651 }
1512 1652
1513 spin_lock_irq(&np->lock); 1653 empty_slots = nv_get_empty_tx_slots(np);
1514 1654 if (unlikely(empty_slots <= entries)) {
1515 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { 1655 spin_lock_irq(&np->lock);
1516 spin_unlock_irq(&np->lock);
1517 netif_stop_queue(dev); 1656 netif_stop_queue(dev);
1657 np->tx_stop = 1;
1658 spin_unlock_irq(&np->lock);
1518 return NETDEV_TX_BUSY; 1659 return NETDEV_TX_BUSY;
1519 } 1660 }
1520 1661
1662 start_tx = put_tx = np->put_tx.orig;
1663
1521 /* setup the header buffer */ 1664 /* setup the header buffer */
1522 do { 1665 do {
1666 prev_tx = put_tx;
1667 prev_tx_ctx = np->put_tx_ctx;
1523 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1668 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1524 nr = (nr + 1) % np->tx_ring_size; 1669 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1525
1526 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1527 PCI_DMA_TODEVICE); 1670 PCI_DMA_TODEVICE);
1528 np->tx_dma_len[nr] = bcnt; 1671 np->put_tx_ctx->dma_len = bcnt;
1672 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1673 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1529 1674
1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1531 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1532 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1533 } else {
1534 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1535 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1536 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1537 }
1538 tx_flags = np->tx_flags; 1675 tx_flags = np->tx_flags;
1539 offset += bcnt; 1676 offset += bcnt;
1540 size -= bcnt; 1677 size -= bcnt;
1678 if (unlikely(put_tx++ == np->last_tx.orig))
1679 put_tx = np->first_tx.orig;
1680 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1681 np->put_tx_ctx = np->first_tx_ctx;
1541 } while (size); 1682 } while (size);
1542 1683
1543 /* setup the fragments */ 1684 /* setup the fragments */
@@ -1547,58 +1688,174 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547 offset = 0; 1688 offset = 0;
1548 1689
1549 do { 1690 do {
1691 prev_tx = put_tx;
1692 prev_tx_ctx = np->put_tx_ctx;
1550 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1693 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1551 nr = (nr + 1) % np->tx_ring_size; 1694 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1552 1695 PCI_DMA_TODEVICE);
1553 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1696 np->put_tx_ctx->dma_len = bcnt;
1554 PCI_DMA_TODEVICE); 1697 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1555 np->tx_dma_len[nr] = bcnt; 1698 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1556 1699
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1558 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1559 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1560 } else {
1561 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1562 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1563 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1564 }
1565 offset += bcnt; 1700 offset += bcnt;
1566 size -= bcnt; 1701 size -= bcnt;
1702 if (unlikely(put_tx++ == np->last_tx.orig))
1703 put_tx = np->first_tx.orig;
1704 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1705 np->put_tx_ctx = np->first_tx_ctx;
1567 } while (size); 1706 } while (size);
1568 } 1707 }
1569 1708
1570 /* set last fragment flag */ 1709 /* set last fragment flag */
1571 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1710 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1572 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1711
1573 } else { 1712 /* save skb in this slot's context area */
1574 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1713 prev_tx_ctx->skb = skb;
1714
1715 if (skb_is_gso(skb))
1716 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1717 else
1718 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1719 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1720
1721 spin_lock_irq(&np->lock);
1722
1723 /* set tx flags */
1724 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1725 np->put_tx.orig = put_tx;
1726
1727 spin_unlock_irq(&np->lock);
1728
1729 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1730 dev->name, entries, tx_flags_extra);
1731 {
1732 int j;
1733 for (j=0; j<64; j++) {
1734 if ((j%16) == 0)
1735 dprintk("\n%03x:", j);
1736 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1737 }
1738 dprintk("\n");
1739 }
1740
1741 dev->trans_start = jiffies;
1742 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1743 return NETDEV_TX_OK;
1744}
1745
1746static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1747{
1748 struct fe_priv *np = netdev_priv(dev);
1749 u32 tx_flags = 0;
1750 u32 tx_flags_extra;
1751 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1752 unsigned int i;
1753 u32 offset = 0;
1754 u32 bcnt;
1755 u32 size = skb->len-skb->data_len;
1756 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1757 u32 empty_slots;
1758 struct ring_desc_ex* put_tx;
1759 struct ring_desc_ex* start_tx;
1760 struct ring_desc_ex* prev_tx;
1761 struct nv_skb_map* prev_tx_ctx;
1762
1763 /* add fragments to entries count */
1764 for (i = 0; i < fragments; i++) {
1765 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1766 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1767 }
1768
1769 empty_slots = nv_get_empty_tx_slots(np);
1770 if (unlikely(empty_slots <= entries)) {
1771 spin_lock_irq(&np->lock);
1772 netif_stop_queue(dev);
1773 np->tx_stop = 1;
1774 spin_unlock_irq(&np->lock);
1775 return NETDEV_TX_BUSY;
1776 }
1777
1778 start_tx = put_tx = np->put_tx.ex;
1779
1780 /* setup the header buffer */
1781 do {
1782 prev_tx = put_tx;
1783 prev_tx_ctx = np->put_tx_ctx;
1784 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1785 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1786 PCI_DMA_TODEVICE);
1787 np->put_tx_ctx->dma_len = bcnt;
1788 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1789 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1790 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1791
1792 tx_flags = NV_TX2_VALID;
1793 offset += bcnt;
1794 size -= bcnt;
1795 if (unlikely(put_tx++ == np->last_tx.ex))
1796 put_tx = np->first_tx.ex;
1797 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1798 np->put_tx_ctx = np->first_tx_ctx;
1799 } while (size);
1800
1801 /* setup the fragments */
1802 for (i = 0; i < fragments; i++) {
1803 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1804 u32 size = frag->size;
1805 offset = 0;
1806
1807 do {
1808 prev_tx = put_tx;
1809 prev_tx_ctx = np->put_tx_ctx;
1810 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1811 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1812 PCI_DMA_TODEVICE);
1813 np->put_tx_ctx->dma_len = bcnt;
1814 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1815 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1816 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1817
1818 offset += bcnt;
1819 size -= bcnt;
1820 if (unlikely(put_tx++ == np->last_tx.ex))
1821 put_tx = np->first_tx.ex;
1822 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1823 np->put_tx_ctx = np->first_tx_ctx;
1824 } while (size);
1575 } 1825 }
1576 1826
1577 np->tx_skbuff[nr] = skb; 1827 /* set last fragment flag */
1828 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
1829
1830 /* save skb in this slot's context area */
1831 prev_tx_ctx->skb = skb;
1578 1832
1579#ifdef NETIF_F_TSO
1580 if (skb_is_gso(skb)) 1833 if (skb_is_gso(skb))
1581 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1834 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1582 else 1835 else
1583#endif 1836 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1584 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1585 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1837 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1586 1838
1587 /* vlan tag */ 1839 /* vlan tag */
1588 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1840 if (likely(!np->vlangrp)) {
1589 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); 1841 start_tx->txvlan = 0;
1842 } else {
1843 if (vlan_tx_tag_present(skb))
1844 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
1845 else
1846 start_tx->txvlan = 0;
1590 } 1847 }
1591 1848
1849 spin_lock_irq(&np->lock);
1850
1592 /* set tx flags */ 1851 /* set tx flags */
1593 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1852 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1594 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1853 np->put_tx.ex = put_tx;
1595 } else { 1854
1596 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); 1855 spin_unlock_irq(&np->lock);
1597 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1598 }
1599 1856
1600 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1857 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
1601 dev->name, np->next_tx, entries, tx_flags_extra); 1858 dev->name, entries, tx_flags_extra);
1602 { 1859 {
1603 int j; 1860 int j;
1604 for (j=0; j<64; j++) { 1861 for (j=0; j<64; j++) {
@@ -1609,12 +1866,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1609 dprintk("\n"); 1866 dprintk("\n");
1610 } 1867 }
1611 1868
1612 np->next_tx += entries;
1613
1614 dev->trans_start = jiffies; 1869 dev->trans_start = jiffies;
1615 spin_unlock_irq(&np->lock);
1616 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1870 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1617 pci_push(get_hwbase(dev));
1618 return NETDEV_TX_OK; 1871 return NETDEV_TX_OK;
1619} 1872}
1620 1873
@@ -1627,26 +1880,22 @@ static void nv_tx_done(struct net_device *dev)
1627{ 1880{
1628 struct fe_priv *np = netdev_priv(dev); 1881 struct fe_priv *np = netdev_priv(dev);
1629 u32 flags; 1882 u32 flags;
1630 unsigned int i; 1883 struct ring_desc* orig_get_tx = np->get_tx.orig;
1631 struct sk_buff *skb;
1632 1884
1633 while (np->nic_tx != np->next_tx) { 1885 while ((np->get_tx.orig != np->put_tx.orig) &&
1634 i = np->nic_tx % np->tx_ring_size; 1886 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
1635 1887
1636 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1888 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1637 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); 1889 dev->name, flags);
1638 else 1890
1639 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); 1891 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1892 np->get_tx_ctx->dma_len,
1893 PCI_DMA_TODEVICE);
1894 np->get_tx_ctx->dma = 0;
1640 1895
1641 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1642 dev->name, np->nic_tx, flags);
1643 if (flags & NV_TX_VALID)
1644 break;
1645 if (np->desc_ver == DESC_VER_1) { 1896 if (np->desc_ver == DESC_VER_1) {
1646 if (flags & NV_TX_LASTPACKET) { 1897 if (flags & NV_TX_LASTPACKET) {
1647 skb = np->tx_skbuff[i]; 1898 if (flags & NV_TX_ERROR) {
1648 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1649 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1650 if (flags & NV_TX_UNDERFLOW) 1899 if (flags & NV_TX_UNDERFLOW)
1651 np->stats.tx_fifo_errors++; 1900 np->stats.tx_fifo_errors++;
1652 if (flags & NV_TX_CARRIERLOST) 1901 if (flags & NV_TX_CARRIERLOST)
@@ -1654,14 +1903,14 @@ static void nv_tx_done(struct net_device *dev)
1654 np->stats.tx_errors++; 1903 np->stats.tx_errors++;
1655 } else { 1904 } else {
1656 np->stats.tx_packets++; 1905 np->stats.tx_packets++;
1657 np->stats.tx_bytes += skb->len; 1906 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1658 } 1907 }
1908 dev_kfree_skb_any(np->get_tx_ctx->skb);
1909 np->get_tx_ctx->skb = NULL;
1659 } 1910 }
1660 } else { 1911 } else {
1661 if (flags & NV_TX2_LASTPACKET) { 1912 if (flags & NV_TX2_LASTPACKET) {
1662 skb = np->tx_skbuff[i]; 1913 if (flags & NV_TX2_ERROR) {
1663 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1664 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1665 if (flags & NV_TX2_UNDERFLOW) 1914 if (flags & NV_TX2_UNDERFLOW)
1666 np->stats.tx_fifo_errors++; 1915 np->stats.tx_fifo_errors++;
1667 if (flags & NV_TX2_CARRIERLOST) 1916 if (flags & NV_TX2_CARRIERLOST)
@@ -1669,15 +1918,56 @@ static void nv_tx_done(struct net_device *dev)
1669 np->stats.tx_errors++; 1918 np->stats.tx_errors++;
1670 } else { 1919 } else {
1671 np->stats.tx_packets++; 1920 np->stats.tx_packets++;
1672 np->stats.tx_bytes += skb->len; 1921 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1673 } 1922 }
1923 dev_kfree_skb_any(np->get_tx_ctx->skb);
1924 np->get_tx_ctx->skb = NULL;
1674 } 1925 }
1675 } 1926 }
1676 nv_release_txskb(dev, i); 1927 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
1677 np->nic_tx++; 1928 np->get_tx.orig = np->first_tx.orig;
1929 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1930 np->get_tx_ctx = np->first_tx_ctx;
1678 } 1931 }
1679 if (np->next_tx - np->nic_tx < np->tx_limit_start) 1932 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
1933 np->tx_stop = 0;
1680 netif_wake_queue(dev); 1934 netif_wake_queue(dev);
1935 }
1936}
1937
1938static void nv_tx_done_optimized(struct net_device *dev, int limit)
1939{
1940 struct fe_priv *np = netdev_priv(dev);
1941 u32 flags;
1942 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
1943
1944 while ((np->get_tx.ex != np->put_tx.ex) &&
1945 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
1946 (limit-- > 0)) {
1947
1948 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
1949 dev->name, flags);
1950
1951 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1952 np->get_tx_ctx->dma_len,
1953 PCI_DMA_TODEVICE);
1954 np->get_tx_ctx->dma = 0;
1955
1956 if (flags & NV_TX2_LASTPACKET) {
1957 if (!(flags & NV_TX2_ERROR))
1958 np->stats.tx_packets++;
1959 dev_kfree_skb_any(np->get_tx_ctx->skb);
1960 np->get_tx_ctx->skb = NULL;
1961 }
1962 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
1963 np->get_tx.ex = np->first_tx.ex;
1964 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1965 np->get_tx_ctx = np->first_tx_ctx;
1966 }
1967 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
1968 np->tx_stop = 0;
1969 netif_wake_queue(dev);
1970 }
1681} 1971}
1682 1972
1683/* 1973/*
@@ -1700,9 +1990,8 @@ static void nv_tx_timeout(struct net_device *dev)
1700 { 1990 {
1701 int i; 1991 int i;
1702 1992
1703 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", 1993 printk(KERN_INFO "%s: Ring at %lx\n",
1704 dev->name, (unsigned long)np->ring_addr, 1994 dev->name, (unsigned long)np->ring_addr);
1705 np->next_tx, np->nic_tx);
1706 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1995 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1707 for (i=0;i<=np->register_size;i+= 32) { 1996 for (i=0;i<=np->register_size;i+= 32) {
1708 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1997 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
@@ -1750,13 +2039,16 @@ static void nv_tx_timeout(struct net_device *dev)
1750 nv_stop_tx(dev); 2039 nv_stop_tx(dev);
1751 2040
1752 /* 2) check that the packets were not sent already: */ 2041 /* 2) check that the packets were not sent already: */
1753 nv_tx_done(dev); 2042 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2043 nv_tx_done(dev);
2044 else
2045 nv_tx_done_optimized(dev, np->tx_ring_size);
1754 2046
1755 /* 3) if there are dead entries: clear everything */ 2047 /* 3) if there are dead entries: clear everything */
1756 if (np->next_tx != np->nic_tx) { 2048 if (np->get_tx_ctx != np->put_tx_ctx) {
1757 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2049 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1758 nv_drain_tx(dev); 2050 nv_drain_tx(dev);
1759 np->next_tx = np->nic_tx = 0; 2051 nv_init_tx(dev);
1760 setup_hw_rings(dev, NV_SETUP_TX_RING); 2052 setup_hw_rings(dev, NV_SETUP_TX_RING);
1761 netif_wake_queue(dev); 2053 netif_wake_queue(dev);
1762 } 2054 }
@@ -1823,40 +2115,27 @@ static int nv_rx_process(struct net_device *dev, int limit)
1823{ 2115{
1824 struct fe_priv *np = netdev_priv(dev); 2116 struct fe_priv *np = netdev_priv(dev);
1825 u32 flags; 2117 u32 flags;
1826 u32 vlanflags = 0; 2118 u32 rx_processed_cnt = 0;
1827 int count; 2119 struct sk_buff *skb;
1828 2120 int len;
1829 for (count = 0; count < limit; ++count) {
1830 struct sk_buff *skb;
1831 int len;
1832 int i;
1833 if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
1834 break; /* we scanned the whole ring - do not continue */
1835
1836 i = np->cur_rx % np->rx_ring_size;
1837 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1838 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1839 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1840 } else {
1841 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1842 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1843 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1844 }
1845 2121
1846 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", 2122 while((np->get_rx.orig != np->put_rx.orig) &&
1847 dev->name, np->cur_rx, flags); 2123 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2124 (rx_processed_cnt++ < limit)) {
1848 2125
1849 if (flags & NV_RX_AVAIL) 2126 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
1850 break; /* still owned by hardware, */ 2127 dev->name, flags);
1851 2128
1852 /* 2129 /*
1853 * the packet is for us - immediately tear down the pci mapping. 2130 * the packet is for us - immediately tear down the pci mapping.
1854 * TODO: check if a prefetch of the first cacheline improves 2131 * TODO: check if a prefetch of the first cacheline improves
1855 * the performance. 2132 * the performance.
1856 */ 2133 */
1857 pci_unmap_single(np->pci_dev, np->rx_dma[i], 2134 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
1858 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 2135 np->get_rx_ctx->dma_len,
1859 PCI_DMA_FROMDEVICE); 2136 PCI_DMA_FROMDEVICE);
2137 skb = np->get_rx_ctx->skb;
2138 np->get_rx_ctx->skb = NULL;
1860 2139
1861 { 2140 {
1862 int j; 2141 int j;
@@ -1864,123 +2143,228 @@ static int nv_rx_process(struct net_device *dev, int limit)
1864 for (j=0; j<64; j++) { 2143 for (j=0; j<64; j++) {
1865 if ((j%16) == 0) 2144 if ((j%16) == 0)
1866 dprintk("\n%03x:", j); 2145 dprintk("\n%03x:", j);
1867 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); 2146 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1868 } 2147 }
1869 dprintk("\n"); 2148 dprintk("\n");
1870 } 2149 }
1871 /* look at what we actually got: */ 2150 /* look at what we actually got: */
1872 if (np->desc_ver == DESC_VER_1) { 2151 if (np->desc_ver == DESC_VER_1) {
1873 if (!(flags & NV_RX_DESCRIPTORVALID)) 2152 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
1874 goto next_pkt; 2153 len = flags & LEN_MASK_V1;
1875 2154 if (unlikely(flags & NV_RX_ERROR)) {
1876 if (flags & NV_RX_ERROR) { 2155 if (flags & NV_RX_ERROR4) {
1877 if (flags & NV_RX_MISSEDFRAME) { 2156 len = nv_getlen(dev, skb->data, len);
1878 np->stats.rx_missed_errors++; 2157 if (len < 0) {
1879 np->stats.rx_errors++; 2158 np->stats.rx_errors++;
1880 goto next_pkt; 2159 dev_kfree_skb(skb);
1881 } 2160 goto next_pkt;
1882 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 2161 }
1883 np->stats.rx_errors++; 2162 }
1884 goto next_pkt; 2163 /* framing errors are soft errors */
1885 } 2164 else if (flags & NV_RX_FRAMINGERR) {
1886 if (flags & NV_RX_CRCERR) { 2165 if (flags & NV_RX_SUBSTRACT1) {
1887 np->stats.rx_crc_errors++; 2166 len--;
1888 np->stats.rx_errors++; 2167 }
1889 goto next_pkt; 2168 }
1890 } 2169 /* the rest are hard errors */
1891 if (flags & NV_RX_OVERFLOW) { 2170 else {
1892 np->stats.rx_over_errors++; 2171 if (flags & NV_RX_MISSEDFRAME)
1893 np->stats.rx_errors++; 2172 np->stats.rx_missed_errors++;
1894 goto next_pkt; 2173 if (flags & NV_RX_CRCERR)
2174 np->stats.rx_crc_errors++;
2175 if (flags & NV_RX_OVERFLOW)
2176 np->stats.rx_over_errors++;
2177 np->stats.rx_errors++;
2178 dev_kfree_skb(skb);
2179 goto next_pkt;
2180 }
1895 } 2181 }
1896 if (flags & NV_RX_ERROR4) { 2182 } else {
1897 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 2183 dev_kfree_skb(skb);
1898 if (len < 0) { 2184 goto next_pkt;
2185 }
2186 } else {
2187 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2188 len = flags & LEN_MASK_V2;
2189 if (unlikely(flags & NV_RX2_ERROR)) {
2190 if (flags & NV_RX2_ERROR4) {
2191 len = nv_getlen(dev, skb->data, len);
2192 if (len < 0) {
2193 np->stats.rx_errors++;
2194 dev_kfree_skb(skb);
2195 goto next_pkt;
2196 }
2197 }
2198 /* framing errors are soft errors */
2199 else if (flags & NV_RX2_FRAMINGERR) {
2200 if (flags & NV_RX2_SUBSTRACT1) {
2201 len--;
2202 }
2203 }
2204 /* the rest are hard errors */
2205 else {
2206 if (flags & NV_RX2_CRCERR)
2207 np->stats.rx_crc_errors++;
2208 if (flags & NV_RX2_OVERFLOW)
2209 np->stats.rx_over_errors++;
1899 np->stats.rx_errors++; 2210 np->stats.rx_errors++;
2211 dev_kfree_skb(skb);
1900 goto next_pkt; 2212 goto next_pkt;
1901 } 2213 }
1902 } 2214 }
1903 /* framing errors are soft errors. */ 2215 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
1904 if (flags & NV_RX_FRAMINGERR) { 2216 skb->ip_summed = CHECKSUM_UNNECESSARY;
1905 if (flags & NV_RX_SUBSTRACT1) { 2217 } else {
1906 len--; 2218 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2219 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2220 skb->ip_summed = CHECKSUM_UNNECESSARY;
1907 } 2221 }
1908 } 2222 }
1909 } 2223 } else {
1910 } else { 2224 dev_kfree_skb(skb);
1911 if (!(flags & NV_RX2_DESCRIPTORVALID))
1912 goto next_pkt; 2225 goto next_pkt;
2226 }
2227 }
2228 /* got a valid packet - forward it to the network core */
2229 skb_put(skb, len);
2230 skb->protocol = eth_type_trans(skb, dev);
2231 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2232 dev->name, len, skb->protocol);
2233#ifdef CONFIG_FORCEDETH_NAPI
2234 netif_receive_skb(skb);
2235#else
2236 netif_rx(skb);
2237#endif
2238 dev->last_rx = jiffies;
2239 np->stats.rx_packets++;
2240 np->stats.rx_bytes += len;
2241next_pkt:
2242 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2243 np->get_rx.orig = np->first_rx.orig;
2244 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2245 np->get_rx_ctx = np->first_rx_ctx;
2246 }
1913 2247
1914 if (flags & NV_RX2_ERROR) { 2248 return rx_processed_cnt;
1915 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 2249}
1916 np->stats.rx_errors++; 2250
1917 goto next_pkt; 2251static int nv_rx_process_optimized(struct net_device *dev, int limit)
1918 } 2252{
1919 if (flags & NV_RX2_CRCERR) { 2253 struct fe_priv *np = netdev_priv(dev);
1920 np->stats.rx_crc_errors++; 2254 u32 flags;
1921 np->stats.rx_errors++; 2255 u32 vlanflags = 0;
1922 goto next_pkt; 2256 u32 rx_processed_cnt = 0;
1923 } 2257 struct sk_buff *skb;
1924 if (flags & NV_RX2_OVERFLOW) { 2258 int len;
1925 np->stats.rx_over_errors++; 2259
1926 np->stats.rx_errors++; 2260 while((np->get_rx.ex != np->put_rx.ex) &&
1927 goto next_pkt; 2261 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
1928 } 2262 (rx_processed_cnt++ < limit)) {
2263
2264 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2265 dev->name, flags);
2266
2267 /*
2268 * the packet is for us - immediately tear down the pci mapping.
2269 * TODO: check if a prefetch of the first cacheline improves
2270 * the performance.
2271 */
2272 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2273 np->get_rx_ctx->dma_len,
2274 PCI_DMA_FROMDEVICE);
2275 skb = np->get_rx_ctx->skb;
2276 np->get_rx_ctx->skb = NULL;
2277
2278 {
2279 int j;
2280 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2281 for (j=0; j<64; j++) {
2282 if ((j%16) == 0)
2283 dprintk("\n%03x:", j);
2284 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2285 }
2286 dprintk("\n");
2287 }
2288 /* look at what we actually got: */
2289 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2290 len = flags & LEN_MASK_V2;
2291 if (unlikely(flags & NV_RX2_ERROR)) {
1929 if (flags & NV_RX2_ERROR4) { 2292 if (flags & NV_RX2_ERROR4) {
1930 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 2293 len = nv_getlen(dev, skb->data, len);
1931 if (len < 0) { 2294 if (len < 0) {
1932 np->stats.rx_errors++; 2295 dev_kfree_skb(skb);
1933 goto next_pkt; 2296 goto next_pkt;
1934 } 2297 }
1935 } 2298 }
1936 /* framing errors are soft errors */ 2299 /* framing errors are soft errors */
1937 if (flags & NV_RX2_FRAMINGERR) { 2300 else if (flags & NV_RX2_FRAMINGERR) {
1938 if (flags & NV_RX2_SUBSTRACT1) { 2301 if (flags & NV_RX2_SUBSTRACT1) {
1939 len--; 2302 len--;
1940 } 2303 }
1941 } 2304 }
2305 /* the rest are hard errors */
2306 else {
2307 dev_kfree_skb(skb);
2308 goto next_pkt;
2309 }
1942 } 2310 }
1943 if (np->rx_csum) { 2311
1944 flags &= NV_RX2_CHECKSUMMASK; 2312 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
1945 if (flags == NV_RX2_CHECKSUMOK1 || 2313 skb->ip_summed = CHECKSUM_UNNECESSARY;
1946 flags == NV_RX2_CHECKSUMOK2 || 2314 } else {
1947 flags == NV_RX2_CHECKSUMOK3) { 2315 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
1948 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 2316 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
1949 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 2317 skb->ip_summed = CHECKSUM_UNNECESSARY;
1950 } else {
1951 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1952 } 2318 }
1953 } 2319 }
1954 }
1955 /* got a valid packet - forward it to the network core */
1956 skb = np->rx_skbuff[i];
1957 np->rx_skbuff[i] = NULL;
1958 2320
1959 skb_put(skb, len); 2321 /* got a valid packet - forward it to the network core */
1960 skb->protocol = eth_type_trans(skb, dev); 2322 skb_put(skb, len);
1961 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 2323 skb->protocol = eth_type_trans(skb, dev);
1962 dev->name, np->cur_rx, len, skb->protocol); 2324 prefetch(skb->data);
2325
2326 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2327 dev->name, len, skb->protocol);
2328
2329 if (likely(!np->vlangrp)) {
1963#ifdef CONFIG_FORCEDETH_NAPI 2330#ifdef CONFIG_FORCEDETH_NAPI
1964 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 2331 netif_receive_skb(skb);
1965 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1966 vlanflags & NV_RX3_VLAN_TAG_MASK);
1967 else
1968 netif_receive_skb(skb);
1969#else 2332#else
1970 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 2333 netif_rx(skb);
1971 vlan_hwaccel_rx(skb, np->vlangrp,
1972 vlanflags & NV_RX3_VLAN_TAG_MASK);
1973 else
1974 netif_rx(skb);
1975#endif 2334#endif
1976 dev->last_rx = jiffies; 2335 } else {
1977 np->stats.rx_packets++; 2336 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
1978 np->stats.rx_bytes += len; 2337 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2338#ifdef CONFIG_FORCEDETH_NAPI
2339 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2340 vlanflags & NV_RX3_VLAN_TAG_MASK);
2341#else
2342 vlan_hwaccel_rx(skb, np->vlangrp,
2343 vlanflags & NV_RX3_VLAN_TAG_MASK);
2344#endif
2345 } else {
2346#ifdef CONFIG_FORCEDETH_NAPI
2347 netif_receive_skb(skb);
2348#else
2349 netif_rx(skb);
2350#endif
2351 }
2352 }
2353
2354 dev->last_rx = jiffies;
2355 np->stats.rx_packets++;
2356 np->stats.rx_bytes += len;
2357 } else {
2358 dev_kfree_skb(skb);
2359 }
1979next_pkt: 2360next_pkt:
1980 np->cur_rx++; 2361 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2362 np->get_rx.ex = np->first_rx.ex;
2363 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2364 np->get_rx_ctx = np->first_rx_ctx;
1981 } 2365 }
1982 2366
1983 return count; 2367 return rx_processed_cnt;
1984} 2368}
1985 2369
1986static void set_bufsize(struct net_device *dev) 2370static void set_bufsize(struct net_device *dev)
@@ -2456,7 +2840,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2456 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2840 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2457 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2841 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2458 } 2842 }
2459 pci_push(base);
2460 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2843 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2461 if (!(events & np->irqmask)) 2844 if (!(events & np->irqmask))
2462 break; 2845 break;
@@ -2465,22 +2848,46 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2465 nv_tx_done(dev); 2848 nv_tx_done(dev);
2466 spin_unlock(&np->lock); 2849 spin_unlock(&np->lock);
2467 2850
2468 if (events & NVREG_IRQ_LINK) { 2851#ifdef CONFIG_FORCEDETH_NAPI
2852 if (events & NVREG_IRQ_RX_ALL) {
2853 netif_rx_schedule(dev);
2854
2855 /* Disable furthur receive irq's */
2856 spin_lock(&np->lock);
2857 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2858
2859 if (np->msi_flags & NV_MSI_X_ENABLED)
2860 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2861 else
2862 writel(np->irqmask, base + NvRegIrqMask);
2863 spin_unlock(&np->lock);
2864 }
2865#else
2866 if (nv_rx_process(dev, dev->weight)) {
2867 if (unlikely(nv_alloc_rx(dev))) {
2868 spin_lock(&np->lock);
2869 if (!np->in_shutdown)
2870 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2871 spin_unlock(&np->lock);
2872 }
2873 }
2874#endif
2875 if (unlikely(events & NVREG_IRQ_LINK)) {
2469 spin_lock(&np->lock); 2876 spin_lock(&np->lock);
2470 nv_link_irq(dev); 2877 nv_link_irq(dev);
2471 spin_unlock(&np->lock); 2878 spin_unlock(&np->lock);
2472 } 2879 }
2473 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2880 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2474 spin_lock(&np->lock); 2881 spin_lock(&np->lock);
2475 nv_linkchange(dev); 2882 nv_linkchange(dev);
2476 spin_unlock(&np->lock); 2883 spin_unlock(&np->lock);
2477 np->link_timeout = jiffies + LINK_TIMEOUT; 2884 np->link_timeout = jiffies + LINK_TIMEOUT;
2478 } 2885 }
2479 if (events & (NVREG_IRQ_TX_ERR)) { 2886 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2480 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2887 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2481 dev->name, events); 2888 dev->name, events);
2482 } 2889 }
2483 if (events & (NVREG_IRQ_UNKNOWN)) { 2890 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
2484 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2891 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2485 dev->name, events); 2892 dev->name, events);
2486 } 2893 }
@@ -2501,6 +2908,63 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2501 spin_unlock(&np->lock); 2908 spin_unlock(&np->lock);
2502 break; 2909 break;
2503 } 2910 }
2911 if (unlikely(i > max_interrupt_work)) {
2912 spin_lock(&np->lock);
2913 /* disable interrupts on the nic */
2914 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2915 writel(0, base + NvRegIrqMask);
2916 else
2917 writel(np->irqmask, base + NvRegIrqMask);
2918 pci_push(base);
2919
2920 if (!np->in_shutdown) {
2921 np->nic_poll_irq = np->irqmask;
2922 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2923 }
2924 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
2925 spin_unlock(&np->lock);
2926 break;
2927 }
2928
2929 }
2930 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
2931
2932 return IRQ_RETVAL(i);
2933}
2934
2935#define TX_WORK_PER_LOOP 64
2936#define RX_WORK_PER_LOOP 64
2937/**
2938 * All _optimized functions are used to help increase performance
2939 * (reduce CPU and increase throughput). They use descripter version 3,
2940 * compiler directives, and reduce memory accesses.
2941 */
2942static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2943{
2944 struct net_device *dev = (struct net_device *) data;
2945 struct fe_priv *np = netdev_priv(dev);
2946 u8 __iomem *base = get_hwbase(dev);
2947 u32 events;
2948 int i;
2949
2950 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
2951
2952 for (i=0; ; i++) {
2953 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2954 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2955 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2956 } else {
2957 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2958 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2959 }
2960 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2961 if (!(events & np->irqmask))
2962 break;
2963
2964 spin_lock(&np->lock);
2965 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2966 spin_unlock(&np->lock);
2967
2504#ifdef CONFIG_FORCEDETH_NAPI 2968#ifdef CONFIG_FORCEDETH_NAPI
2505 if (events & NVREG_IRQ_RX_ALL) { 2969 if (events & NVREG_IRQ_RX_ALL) {
2506 netif_rx_schedule(dev); 2970 netif_rx_schedule(dev);
@@ -2516,15 +2980,53 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2516 spin_unlock(&np->lock); 2980 spin_unlock(&np->lock);
2517 } 2981 }
2518#else 2982#else
2519 nv_rx_process(dev, dev->weight); 2983 if (nv_rx_process_optimized(dev, dev->weight)) {
2520 if (nv_alloc_rx(dev)) { 2984 if (unlikely(nv_alloc_rx_optimized(dev))) {
2985 spin_lock(&np->lock);
2986 if (!np->in_shutdown)
2987 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2988 spin_unlock(&np->lock);
2989 }
2990 }
2991#endif
2992 if (unlikely(events & NVREG_IRQ_LINK)) {
2521 spin_lock(&np->lock); 2993 spin_lock(&np->lock);
2522 if (!np->in_shutdown) 2994 nv_link_irq(dev);
2523 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2524 spin_unlock(&np->lock); 2995 spin_unlock(&np->lock);
2525 } 2996 }
2526#endif 2997 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2527 if (i > max_interrupt_work) { 2998 spin_lock(&np->lock);
2999 nv_linkchange(dev);
3000 spin_unlock(&np->lock);
3001 np->link_timeout = jiffies + LINK_TIMEOUT;
3002 }
3003 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3004 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3005 dev->name, events);
3006 }
3007 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3008 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3009 dev->name, events);
3010 }
3011 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3012 spin_lock(&np->lock);
3013 /* disable interrupts on the nic */
3014 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3015 writel(0, base + NvRegIrqMask);
3016 else
3017 writel(np->irqmask, base + NvRegIrqMask);
3018 pci_push(base);
3019
3020 if (!np->in_shutdown) {
3021 np->nic_poll_irq = np->irqmask;
3022 np->recover_error = 1;
3023 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3024 }
3025 spin_unlock(&np->lock);
3026 break;
3027 }
3028
3029 if (unlikely(i > max_interrupt_work)) {
2528 spin_lock(&np->lock); 3030 spin_lock(&np->lock);
2529 /* disable interrupts on the nic */ 3031 /* disable interrupts on the nic */
2530 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3032 if (!(np->msi_flags & NV_MSI_X_ENABLED))
@@ -2543,7 +3045,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2543 } 3045 }
2544 3046
2545 } 3047 }
2546 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3048 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
2547 3049
2548 return IRQ_RETVAL(i); 3050 return IRQ_RETVAL(i);
2549} 3051}
@@ -2562,20 +3064,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2562 for (i=0; ; i++) { 3064 for (i=0; ; i++) {
2563 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3065 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2564 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3066 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2565 pci_push(base);
2566 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3067 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2567 if (!(events & np->irqmask)) 3068 if (!(events & np->irqmask))
2568 break; 3069 break;
2569 3070
2570 spin_lock_irqsave(&np->lock, flags); 3071 spin_lock_irqsave(&np->lock, flags);
2571 nv_tx_done(dev); 3072 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2572 spin_unlock_irqrestore(&np->lock, flags); 3073 spin_unlock_irqrestore(&np->lock, flags);
2573 3074
2574 if (events & (NVREG_IRQ_TX_ERR)) { 3075 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2575 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3076 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2576 dev->name, events); 3077 dev->name, events);
2577 } 3078 }
2578 if (i > max_interrupt_work) { 3079 if (unlikely(i > max_interrupt_work)) {
2579 spin_lock_irqsave(&np->lock, flags); 3080 spin_lock_irqsave(&np->lock, flags);
2580 /* disable interrupts on the nic */ 3081 /* disable interrupts on the nic */
2581 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3082 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
@@ -2604,7 +3105,10 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
2604 u8 __iomem *base = get_hwbase(dev); 3105 u8 __iomem *base = get_hwbase(dev);
2605 unsigned long flags; 3106 unsigned long flags;
2606 3107
2607 pkts = nv_rx_process(dev, limit); 3108 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3109 pkts = nv_rx_process(dev, limit);
3110 else
3111 pkts = nv_rx_process_optimized(dev, limit);
2608 3112
2609 if (nv_alloc_rx(dev)) { 3113 if (nv_alloc_rx(dev)) {
2610 spin_lock_irqsave(&np->lock, flags); 3114 spin_lock_irqsave(&np->lock, flags);
@@ -2670,20 +3174,20 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
2670 for (i=0; ; i++) { 3174 for (i=0; ; i++) {
2671 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3175 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2672 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3176 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2673 pci_push(base);
2674 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3177 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2675 if (!(events & np->irqmask)) 3178 if (!(events & np->irqmask))
2676 break; 3179 break;
2677 3180
2678 nv_rx_process(dev, dev->weight); 3181 if (nv_rx_process_optimized(dev, dev->weight)) {
2679 if (nv_alloc_rx(dev)) { 3182 if (unlikely(nv_alloc_rx_optimized(dev))) {
2680 spin_lock_irqsave(&np->lock, flags); 3183 spin_lock_irqsave(&np->lock, flags);
2681 if (!np->in_shutdown) 3184 if (!np->in_shutdown)
2682 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3185 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2683 spin_unlock_irqrestore(&np->lock, flags); 3186 spin_unlock_irqrestore(&np->lock, flags);
3187 }
2684 } 3188 }
2685 3189
2686 if (i > max_interrupt_work) { 3190 if (unlikely(i > max_interrupt_work)) {
2687 spin_lock_irqsave(&np->lock, flags); 3191 spin_lock_irqsave(&np->lock, flags);
2688 /* disable interrupts on the nic */ 3192 /* disable interrupts on the nic */
2689 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3193 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
@@ -2718,11 +3222,15 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2718 for (i=0; ; i++) { 3222 for (i=0; ; i++) {
2719 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3223 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
2720 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3224 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
2721 pci_push(base);
2722 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3225 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2723 if (!(events & np->irqmask)) 3226 if (!(events & np->irqmask))
2724 break; 3227 break;
2725 3228
3229 /* check tx in case we reached max loop limit in tx isr */
3230 spin_lock_irqsave(&np->lock, flags);
3231 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3232 spin_unlock_irqrestore(&np->lock, flags);
3233
2726 if (events & NVREG_IRQ_LINK) { 3234 if (events & NVREG_IRQ_LINK) {
2727 spin_lock_irqsave(&np->lock, flags); 3235 spin_lock_irqsave(&np->lock, flags);
2728 nv_link_irq(dev); 3236 nv_link_irq(dev);
@@ -2752,7 +3260,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2752 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3260 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2753 dev->name, events); 3261 dev->name, events);
2754 } 3262 }
2755 if (i > max_interrupt_work) { 3263 if (unlikely(i > max_interrupt_work)) {
2756 spin_lock_irqsave(&np->lock, flags); 3264 spin_lock_irqsave(&np->lock, flags);
2757 /* disable interrupts on the nic */ 3265 /* disable interrupts on the nic */
2758 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3266 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
@@ -2835,6 +3343,16 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2835 u8 __iomem *base = get_hwbase(dev); 3343 u8 __iomem *base = get_hwbase(dev);
2836 int ret = 1; 3344 int ret = 1;
2837 int i; 3345 int i;
3346 irqreturn_t (*handler)(int foo, void *data);
3347
3348 if (intr_test) {
3349 handler = nv_nic_irq_test;
3350 } else {
3351 if (np->desc_ver == DESC_VER_3)
3352 handler = nv_nic_irq_optimized;
3353 else
3354 handler = nv_nic_irq;
3355 }
2838 3356
2839 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3357 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2840 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3358 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
@@ -2872,10 +3390,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2872 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3390 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2873 } else { 3391 } else {
2874 /* Request irq for all interrupts */ 3392 /* Request irq for all interrupts */
2875 if ((!intr_test && 3393 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
2876 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
2877 (intr_test &&
2878 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2879 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3394 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2880 pci_disable_msix(np->pci_dev); 3395 pci_disable_msix(np->pci_dev);
2881 np->msi_flags &= ~NV_MSI_X_ENABLED; 3396 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2891,8 +3406,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2891 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3406 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2892 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3407 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2893 np->msi_flags |= NV_MSI_ENABLED; 3408 np->msi_flags |= NV_MSI_ENABLED;
2894 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3409 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
2895 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2896 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3410 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2897 pci_disable_msi(np->pci_dev); 3411 pci_disable_msi(np->pci_dev);
2898 np->msi_flags &= ~NV_MSI_ENABLED; 3412 np->msi_flags &= ~NV_MSI_ENABLED;
@@ -2907,8 +3421,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2907 } 3421 }
2908 } 3422 }
2909 if (ret != 0) { 3423 if (ret != 0) {
2910 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3424 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
2911 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
2912 goto out_err; 3425 goto out_err;
2913 3426
2914 } 3427 }
@@ -3051,47 +3564,8 @@ static void nv_do_stats_poll(unsigned long data)
3051{ 3564{
3052 struct net_device *dev = (struct net_device *) data; 3565 struct net_device *dev = (struct net_device *) data;
3053 struct fe_priv *np = netdev_priv(dev); 3566 struct fe_priv *np = netdev_priv(dev);
3054 u8 __iomem *base = get_hwbase(dev);
3055 3567
3056 np->estats.tx_bytes += readl(base + NvRegTxCnt); 3568 nv_get_hw_stats(dev);
3057 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
3058 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
3059 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
3060 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
3061 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
3062 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
3063 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
3064 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
3065 np->estats.tx_deferral += readl(base + NvRegTxDef);
3066 np->estats.tx_packets += readl(base + NvRegTxFrame);
3067 np->estats.tx_pause += readl(base + NvRegTxPause);
3068 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
3069 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
3070 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
3071 np->estats.rx_runt += readl(base + NvRegRxRunt);
3072 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
3073 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
3074 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
3075 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
3076 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
3077 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
3078 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
3079 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
3080 np->estats.rx_bytes += readl(base + NvRegRxCnt);
3081 np->estats.rx_pause += readl(base + NvRegRxPause);
3082 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
3083 np->estats.rx_packets =
3084 np->estats.rx_unicast +
3085 np->estats.rx_multicast +
3086 np->estats.rx_broadcast;
3087 np->estats.rx_errors_total =
3088 np->estats.rx_crc_errors +
3089 np->estats.rx_over_errors +
3090 np->estats.rx_frame_error +
3091 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
3092 np->estats.rx_late_collision +
3093 np->estats.rx_runt +
3094 np->estats.rx_frame_too_long;
3095 3569
3096 if (!np->in_shutdown) 3570 if (!np->in_shutdown)
3097 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3571 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
@@ -3465,7 +3939,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3465{ 3939{
3466 struct fe_priv *np = netdev_priv(dev); 3940 struct fe_priv *np = netdev_priv(dev);
3467 u8 __iomem *base = get_hwbase(dev); 3941 u8 __iomem *base = get_hwbase(dev);
3468 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; 3942 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
3469 dma_addr_t ring_addr; 3943 dma_addr_t ring_addr;
3470 3944
3471 if (ring->rx_pending < RX_RING_MIN || 3945 if (ring->rx_pending < RX_RING_MIN ||
@@ -3491,12 +3965,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3491 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3965 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3492 &ring_addr); 3966 &ring_addr);
3493 } 3967 }
3494 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); 3968 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
3495 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); 3969 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
3496 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); 3970 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
3497 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
3498 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
3499 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3500 /* fall back to old rings */ 3971 /* fall back to old rings */
3501 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3972 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3502 if (rxtx_ring) 3973 if (rxtx_ring)
@@ -3509,14 +3980,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3509 } 3980 }
3510 if (rx_skbuff) 3981 if (rx_skbuff)
3511 kfree(rx_skbuff); 3982 kfree(rx_skbuff);
3512 if (rx_dma)
3513 kfree(rx_dma);
3514 if (tx_skbuff) 3983 if (tx_skbuff)
3515 kfree(tx_skbuff); 3984 kfree(tx_skbuff);
3516 if (tx_dma)
3517 kfree(tx_dma);
3518 if (tx_dma_len)
3519 kfree(tx_dma_len);
3520 goto exit; 3985 goto exit;
3521 } 3986 }
3522 3987
@@ -3538,8 +4003,6 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3538 /* set new values */ 4003 /* set new values */
3539 np->rx_ring_size = ring->rx_pending; 4004 np->rx_ring_size = ring->rx_pending;
3540 np->tx_ring_size = ring->tx_pending; 4005 np->tx_ring_size = ring->tx_pending;
3541 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
3542 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
3543 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4006 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3544 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4007 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
3545 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4008 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -3547,18 +4010,12 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3547 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4010 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
3548 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4011 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
3549 } 4012 }
3550 np->rx_skbuff = (struct sk_buff**)rx_skbuff; 4013 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
3551 np->rx_dma = (dma_addr_t*)rx_dma; 4014 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
3552 np->tx_skbuff = (struct sk_buff**)tx_skbuff;
3553 np->tx_dma = (dma_addr_t*)tx_dma;
3554 np->tx_dma_len = (unsigned int*)tx_dma_len;
3555 np->ring_addr = ring_addr; 4015 np->ring_addr = ring_addr;
3556 4016
3557 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4017 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
3558 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4018 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
3559 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
3560 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
3561 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
3562 4019
3563 if (netif_running(dev)) { 4020 if (netif_running(dev)) {
3564 /* reinit driver view of the queues */ 4021 /* reinit driver view of the queues */
@@ -3727,8 +4184,10 @@ static int nv_get_stats_count(struct net_device *dev)
3727{ 4184{
3728 struct fe_priv *np = netdev_priv(dev); 4185 struct fe_priv *np = netdev_priv(dev);
3729 4186
3730 if (np->driver_data & DEV_HAS_STATISTICS) 4187 if (np->driver_data & DEV_HAS_STATISTICS_V1)
3731 return sizeof(struct nv_ethtool_stats)/sizeof(u64); 4188 return NV_DEV_STATISTICS_V1_COUNT;
4189 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4190 return NV_DEV_STATISTICS_V2_COUNT;
3732 else 4191 else
3733 return 0; 4192 return 0;
3734} 4193}
@@ -3955,7 +4414,7 @@ static int nv_loopback_test(struct net_device *dev)
3955 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4414 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
3956 dev->name, len, pkt_len); 4415 dev->name, len, pkt_len);
3957 } else { 4416 } else {
3958 rx_skb = np->rx_skbuff[0]; 4417 rx_skb = np->rx_skb[0].skb;
3959 for (i = 0; i < pkt_len; i++) { 4418 for (i = 0; i < pkt_len; i++) {
3960 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4419 if (rx_skb->data[i] != (u8)(i & 0xff)) {
3961 ret = 0; 4420 ret = 0;
@@ -4315,7 +4774,7 @@ static int nv_open(struct net_device *dev)
4315 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4774 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4316 4775
4317 /* start statistics timer */ 4776 /* start statistics timer */
4318 if (np->driver_data & DEV_HAS_STATISTICS) 4777 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
4319 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4778 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4320 4779
4321 spin_unlock_irq(&np->lock); 4780 spin_unlock_irq(&np->lock);
@@ -4412,7 +4871,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4412 if (err < 0) 4871 if (err < 0)
4413 goto out_disable; 4872 goto out_disable;
4414 4873
4415 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) 4874 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
4875 np->register_size = NV_PCI_REGSZ_VER3;
4876 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
4416 np->register_size = NV_PCI_REGSZ_VER2; 4877 np->register_size = NV_PCI_REGSZ_VER2;
4417 else 4878 else
4418 np->register_size = NV_PCI_REGSZ_VER1; 4879 np->register_size = NV_PCI_REGSZ_VER1;
@@ -4475,10 +4936,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4475 np->rx_csum = 1; 4936 np->rx_csum = 1;
4476 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4937 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4477 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4938 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4478#ifdef NETIF_F_TSO
4479 dev->features |= NETIF_F_TSO; 4939 dev->features |= NETIF_F_TSO;
4480#endif 4940 }
4481 }
4482 4941
4483 np->vlanctl_bits = 0; 4942 np->vlanctl_bits = 0;
4484 if (id->driver_data & DEV_HAS_VLAN) { 4943 if (id->driver_data & DEV_HAS_VLAN) {
@@ -4512,8 +4971,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4512 4971
4513 np->rx_ring_size = RX_RING_DEFAULT; 4972 np->rx_ring_size = RX_RING_DEFAULT;
4514 np->tx_ring_size = TX_RING_DEFAULT; 4973 np->tx_ring_size = TX_RING_DEFAULT;
4515 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
4516 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
4517 4974
4518 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4975 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4519 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4976 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
@@ -4530,22 +4987,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4530 goto out_unmap; 4987 goto out_unmap;
4531 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4988 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4532 } 4989 }
4533 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); 4990 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
4534 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); 4991 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
4535 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); 4992 if (!np->rx_skb || !np->tx_skb)
4536 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
4537 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
4538 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
4539 goto out_freering; 4993 goto out_freering;
4540 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4994 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4541 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4995 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4542 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
4543 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
4544 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
4545 4996
4546 dev->open = nv_open; 4997 dev->open = nv_open;
4547 dev->stop = nv_close; 4998 dev->stop = nv_close;
4548 dev->hard_start_xmit = nv_start_xmit; 4999 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5000 dev->hard_start_xmit = nv_start_xmit;
5001 else
5002 dev->hard_start_xmit = nv_start_xmit_optimized;
4549 dev->get_stats = nv_get_stats; 5003 dev->get_stats = nv_get_stats;
4550 dev->change_mtu = nv_change_mtu; 5004 dev->change_mtu = nv_change_mtu;
4551 dev->set_mac_address = nv_set_mac_address; 5005 dev->set_mac_address = nv_set_mac_address;
@@ -4553,7 +5007,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4553#ifdef CONFIG_NET_POLL_CONTROLLER 5007#ifdef CONFIG_NET_POLL_CONTROLLER
4554 dev->poll_controller = nv_poll_controller; 5008 dev->poll_controller = nv_poll_controller;
4555#endif 5009#endif
4556 dev->weight = 64; 5010 dev->weight = RX_WORK_PER_LOOP;
4557#ifdef CONFIG_FORCEDETH_NAPI 5011#ifdef CONFIG_FORCEDETH_NAPI
4558 dev->poll = nv_napi_poll; 5012 dev->poll = nv_napi_poll;
4559#endif 5013#endif
@@ -4868,83 +5322,83 @@ static struct pci_device_id pci_tbl[] = {
4868 }, 5322 },
4869 { /* CK804 Ethernet Controller */ 5323 { /* CK804 Ethernet Controller */
4870 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5324 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
4871 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5325 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4872 }, 5326 },
4873 { /* CK804 Ethernet Controller */ 5327 { /* CK804 Ethernet Controller */
4874 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5328 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
4875 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5329 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4876 }, 5330 },
4877 { /* MCP04 Ethernet Controller */ 5331 { /* MCP04 Ethernet Controller */
4878 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5332 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
4879 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5333 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4880 }, 5334 },
4881 { /* MCP04 Ethernet Controller */ 5335 { /* MCP04 Ethernet Controller */
4882 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 5336 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
4883 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5337 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4884 }, 5338 },
4885 { /* MCP51 Ethernet Controller */ 5339 { /* MCP51 Ethernet Controller */
4886 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 5340 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
4887 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 5341 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
4888 }, 5342 },
4889 { /* MCP51 Ethernet Controller */ 5343 { /* MCP51 Ethernet Controller */
4890 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 5344 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
4891 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 5345 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
4892 }, 5346 },
4893 { /* MCP55 Ethernet Controller */ 5347 { /* MCP55 Ethernet Controller */
4894 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5348 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
4895 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5349 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4896 }, 5350 },
4897 { /* MCP55 Ethernet Controller */ 5351 { /* MCP55 Ethernet Controller */
4898 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5352 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
4899 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5353 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4900 }, 5354 },
4901 { /* MCP61 Ethernet Controller */ 5355 { /* MCP61 Ethernet Controller */
4902 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5356 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
4903 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5357 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4904 }, 5358 },
4905 { /* MCP61 Ethernet Controller */ 5359 { /* MCP61 Ethernet Controller */
4906 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5360 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
4907 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5361 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4908 }, 5362 },
4909 { /* MCP61 Ethernet Controller */ 5363 { /* MCP61 Ethernet Controller */
4910 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5364 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
4911 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5365 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4912 }, 5366 },
4913 { /* MCP61 Ethernet Controller */ 5367 { /* MCP61 Ethernet Controller */
4914 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5368 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
4915 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5369 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4916 }, 5370 },
4917 { /* MCP65 Ethernet Controller */ 5371 { /* MCP65 Ethernet Controller */
4918 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
4919 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4920 }, 5374 },
4921 { /* MCP65 Ethernet Controller */ 5375 { /* MCP65 Ethernet Controller */
4922 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
4923 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4924 }, 5378 },
4925 { /* MCP65 Ethernet Controller */ 5379 { /* MCP65 Ethernet Controller */
4926 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
4927 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4928 }, 5382 },
4929 { /* MCP65 Ethernet Controller */ 5383 { /* MCP65 Ethernet Controller */
4930 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
4931 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4932 }, 5386 },
4933 { /* MCP67 Ethernet Controller */ 5387 { /* MCP67 Ethernet Controller */
4934 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
4935 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5389 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4936 }, 5390 },
4937 { /* MCP67 Ethernet Controller */ 5391 { /* MCP67 Ethernet Controller */
4938 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
4939 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5393 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4940 }, 5394 },
4941 { /* MCP67 Ethernet Controller */ 5395 { /* MCP67 Ethernet Controller */
4942 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5396 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
4943 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5397 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4944 }, 5398 },
4945 { /* MCP67 Ethernet Controller */ 5399 { /* MCP67 Ethernet Controller */
4946 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5400 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
4947 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5401 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4948 }, 5402 },
4949 {0,}, 5403 {0,},
4950}; 5404};
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 844c136e9920..7dc5185aa2c0 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -3034,7 +3034,7 @@ static int __init hp100_module_init(void)
3034 goto out2; 3034 goto out2;
3035#endif 3035#endif
3036#ifdef CONFIG_PCI 3036#ifdef CONFIG_PCI
3037 err = pci_module_init(&hp100_pci_driver); 3037 err = pci_register_driver(&hp100_pci_driver);
3038 if (err && err != -ENODEV) 3038 if (err && err != -ENODEV)
3039 goto out3; 3039 goto out3;
3040#endif 3040#endif
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 2194b567239f..0e9ba3c3faf7 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1102,7 +1102,7 @@ static struct net_device * __init veth_probe_one(int vlan,
1102 } 1102 }
1103 1103
1104 kobject_init(&port->kobject); 1104 kobject_init(&port->kobject);
1105 port->kobject.parent = &dev->class_dev.kobj; 1105 port->kobject.parent = &dev->dev.kobj;
1106 port->kobject.ktype = &veth_port_ktype; 1106 port->kobject.ktype = &veth_port_ktype;
1107 kobject_set_name(&port->kobject, "veth_port"); 1107 kobject_set_name(&port->kobject, "veth_port");
1108 if (0 != kobject_add(&port->kobject)) 1108 if (0 != kobject_add(&port->kobject))
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f4aba4355b19..cf30a1059ce0 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -61,9 +61,7 @@
61#include <net/pkt_sched.h> 61#include <net/pkt_sched.h>
62#include <linux/list.h> 62#include <linux/list.h>
63#include <linux/reboot.h> 63#include <linux/reboot.h>
64#ifdef NETIF_F_TSO
65#include <net/checksum.h> 64#include <net/checksum.h>
66#endif
67 65
68#include <linux/ethtool.h> 66#include <linux/ethtool.h>
69#include <linux/if_vlan.h> 67#include <linux/if_vlan.h>
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 82c044d6e08a..d6628bd9590a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -82,10 +82,8 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
82 {"tx_restart_queue", IXGB_STAT(restart_queue) }, 82 {"tx_restart_queue", IXGB_STAT(restart_queue) },
83 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
85#ifdef NETIF_F_TSO
86 {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, 85 {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
87 {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, 86 {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
88#endif
89 {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, 87 {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
90 {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, 88 {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
91 {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, 89 {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
@@ -240,7 +238,6 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
240 return 0; 238 return 0;
241} 239}
242 240
243#ifdef NETIF_F_TSO
244static int 241static int
245ixgb_set_tso(struct net_device *netdev, uint32_t data) 242ixgb_set_tso(struct net_device *netdev, uint32_t data)
246{ 243{
@@ -250,7 +247,6 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
250 netdev->features &= ~NETIF_F_TSO; 247 netdev->features &= ~NETIF_F_TSO;
251 return 0; 248 return 0;
252} 249}
253#endif /* NETIF_F_TSO */
254 250
255static uint32_t 251static uint32_t
256ixgb_get_msglevel(struct net_device *netdev) 252ixgb_get_msglevel(struct net_device *netdev)
@@ -722,10 +718,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
722 .set_sg = ethtool_op_set_sg, 718 .set_sg = ethtool_op_set_sg,
723 .get_msglevel = ixgb_get_msglevel, 719 .get_msglevel = ixgb_get_msglevel,
724 .set_msglevel = ixgb_set_msglevel, 720 .set_msglevel = ixgb_set_msglevel,
725#ifdef NETIF_F_TSO
726 .get_tso = ethtool_op_get_tso, 721 .get_tso = ethtool_op_get_tso,
727 .set_tso = ixgb_set_tso, 722 .set_tso = ixgb_set_tso,
728#endif
729 .get_strings = ixgb_get_strings, 723 .get_strings = ixgb_get_strings,
730 .phys_id = ixgb_phys_id, 724 .phys_id = ixgb_phys_id,
731 .get_stats_count = ixgb_get_stats_count, 725 .get_stats_count = ixgb_get_stats_count,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index a083a9189230..0c3682889344 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -456,9 +456,7 @@ ixgb_probe(struct pci_dev *pdev,
456 NETIF_F_HW_VLAN_TX | 456 NETIF_F_HW_VLAN_TX |
457 NETIF_F_HW_VLAN_RX | 457 NETIF_F_HW_VLAN_RX |
458 NETIF_F_HW_VLAN_FILTER; 458 NETIF_F_HW_VLAN_FILTER;
459#ifdef NETIF_F_TSO
460 netdev->features |= NETIF_F_TSO; 459 netdev->features |= NETIF_F_TSO;
461#endif
462#ifdef NETIF_F_LLTX 460#ifdef NETIF_F_LLTX
463 netdev->features |= NETIF_F_LLTX; 461 netdev->features |= NETIF_F_LLTX;
464#endif 462#endif
@@ -1176,7 +1174,6 @@ ixgb_watchdog(unsigned long data)
1176static int 1174static int
1177ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) 1175ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1178{ 1176{
1179#ifdef NETIF_F_TSO
1180 struct ixgb_context_desc *context_desc; 1177 struct ixgb_context_desc *context_desc;
1181 unsigned int i; 1178 unsigned int i;
1182 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1179 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
@@ -1233,7 +1230,6 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1233 1230
1234 return 1; 1231 return 1;
1235 } 1232 }
1236#endif
1237 1233
1238 return 0; 1234 return 0;
1239} 1235}
@@ -1609,7 +1605,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1609 struct pci_dev *pdev = adapter->pdev; 1605 struct pci_dev *pdev = adapter->pdev;
1610 1606
1611 /* Prevent stats update while adapter is being reset */ 1607 /* Prevent stats update while adapter is being reset */
1612 if (pdev->error_state && pdev->error_state != pci_channel_io_normal) 1608 if (pci_channel_offline(pdev))
1613 return; 1609 return;
1614 1610
1615 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1611 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 25b559b5d5ed..e67361e2bf5d 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -27,8 +27,6 @@
27 27
28#include "macb.h" 28#include "macb.h"
29 29
30#define to_net_dev(class) container_of(class, struct net_device, class_dev)
31
32#define RX_BUFFER_SIZE 128 30#define RX_BUFFER_SIZE 128
33#define RX_RING_SIZE 512 31#define RX_RING_SIZE 512
34#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) 32#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
@@ -945,10 +943,10 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
945 return ret; 943 return ret;
946} 944}
947 945
948static ssize_t macb_mii_show(const struct class_device *cd, char *buf, 946static ssize_t macb_mii_show(const struct device *_dev, char *buf,
949 unsigned long addr) 947 unsigned long addr)
950{ 948{
951 struct net_device *dev = to_net_dev(cd); 949 struct net_device *dev = to_net_dev(_dev);
952 struct macb *bp = netdev_priv(dev); 950 struct macb *bp = netdev_priv(dev);
953 ssize_t ret = -EINVAL; 951 ssize_t ret = -EINVAL;
954 952
@@ -962,11 +960,13 @@ static ssize_t macb_mii_show(const struct class_device *cd, char *buf,
962} 960}
963 961
964#define MII_ENTRY(name, addr) \ 962#define MII_ENTRY(name, addr) \
965static ssize_t show_##name(struct class_device *cd, char *buf) \ 963static ssize_t show_##name(struct device *_dev, \
964 struct device_attribute *attr, \
965 char *buf) \
966{ \ 966{ \
967 return macb_mii_show(cd, buf, addr); \ 967 return macb_mii_show(_dev, buf, addr); \
968} \ 968} \
969static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 969static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
970 970
971MII_ENTRY(bmcr, MII_BMCR); 971MII_ENTRY(bmcr, MII_BMCR);
972MII_ENTRY(bmsr, MII_BMSR); 972MII_ENTRY(bmsr, MII_BMSR);
@@ -977,13 +977,13 @@ MII_ENTRY(lpa, MII_LPA);
977MII_ENTRY(expansion, MII_EXPANSION); 977MII_ENTRY(expansion, MII_EXPANSION);
978 978
979static struct attribute *macb_mii_attrs[] = { 979static struct attribute *macb_mii_attrs[] = {
980 &class_device_attr_bmcr.attr, 980 &dev_attr_bmcr.attr,
981 &class_device_attr_bmsr.attr, 981 &dev_attr_bmsr.attr,
982 &class_device_attr_physid1.attr, 982 &dev_attr_physid1.attr,
983 &class_device_attr_physid2.attr, 983 &dev_attr_physid2.attr,
984 &class_device_attr_advertise.attr, 984 &dev_attr_advertise.attr,
985 &class_device_attr_lpa.attr, 985 &dev_attr_lpa.attr,
986 &class_device_attr_expansion.attr, 986 &dev_attr_expansion.attr,
987 NULL, 987 NULL,
988}; 988};
989 989
@@ -994,17 +994,17 @@ static struct attribute_group macb_mii_group = {
994 994
995static void macb_unregister_sysfs(struct net_device *net) 995static void macb_unregister_sysfs(struct net_device *net)
996{ 996{
997 struct class_device *class_dev = &net->class_dev; 997 struct device *_dev = &net->dev;
998 998
999 sysfs_remove_group(&class_dev->kobj, &macb_mii_group); 999 sysfs_remove_group(&_dev->kobj, &macb_mii_group);
1000} 1000}
1001 1001
1002static int macb_register_sysfs(struct net_device *net) 1002static int macb_register_sysfs(struct net_device *net)
1003{ 1003{
1004 struct class_device *class_dev = &net->class_dev; 1004 struct device *_dev = &net->dev;
1005 int ret; 1005 int ret;
1006 1006
1007 ret = sysfs_create_group(&class_dev->kobj, &macb_mii_group); 1007 ret = sysfs_create_group(&_dev->kobj, &macb_mii_group);
1008 if (ret) 1008 if (ret)
1009 printk(KERN_WARNING 1009 printk(KERN_WARNING
1010 "%s: sysfs mii attribute registration failed: %d\n", 1010 "%s: sysfs mii attribute registration failed: %d\n",
@@ -1046,6 +1046,14 @@ static int __devinit macb_probe(struct platform_device *pdev)
1046 1046
1047 spin_lock_init(&bp->lock); 1047 spin_lock_init(&bp->lock);
1048 1048
1049#if defined(CONFIG_ARCH_AT91)
1050 bp->pclk = clk_get(&pdev->dev, "macb_clk");
1051 if (IS_ERR(bp->pclk)) {
1052 dev_err(&pdev->dev, "failed to get macb_clk\n");
1053 goto err_out_free_dev;
1054 }
1055 clk_enable(bp->pclk);
1056#else
1049 bp->pclk = clk_get(&pdev->dev, "pclk"); 1057 bp->pclk = clk_get(&pdev->dev, "pclk");
1050 if (IS_ERR(bp->pclk)) { 1058 if (IS_ERR(bp->pclk)) {
1051 dev_err(&pdev->dev, "failed to get pclk\n"); 1059 dev_err(&pdev->dev, "failed to get pclk\n");
@@ -1059,6 +1067,7 @@ static int __devinit macb_probe(struct platform_device *pdev)
1059 1067
1060 clk_enable(bp->pclk); 1068 clk_enable(bp->pclk);
1061 clk_enable(bp->hclk); 1069 clk_enable(bp->hclk);
1070#endif
1062 1071
1063 bp->regs = ioremap(regs->start, regs->end - regs->start + 1); 1072 bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
1064 if (!bp->regs) { 1073 if (!bp->regs) {
@@ -1119,9 +1128,17 @@ static int __devinit macb_probe(struct platform_device *pdev)
1119 1128
1120 pdata = pdev->dev.platform_data; 1129 pdata = pdev->dev.platform_data;
1121 if (pdata && pdata->is_rmii) 1130 if (pdata && pdata->is_rmii)
1131#if defined(CONFIG_ARCH_AT91)
1132 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
1133#else
1122 macb_writel(bp, USRIO, 0); 1134 macb_writel(bp, USRIO, 0);
1135#endif
1123 else 1136 else
1137#if defined(CONFIG_ARCH_AT91)
1138 macb_writel(bp, USRIO, MACB_BIT(CLKEN));
1139#else
1124 macb_writel(bp, USRIO, MACB_BIT(MII)); 1140 macb_writel(bp, USRIO, MACB_BIT(MII));
1141#endif
1125 1142
1126 bp->tx_pending = DEF_TX_RING_PENDING; 1143 bp->tx_pending = DEF_TX_RING_PENDING;
1127 1144
@@ -1148,9 +1165,11 @@ err_out_free_irq:
1148err_out_iounmap: 1165err_out_iounmap:
1149 iounmap(bp->regs); 1166 iounmap(bp->regs);
1150err_out_disable_clocks: 1167err_out_disable_clocks:
1168#ifndef CONFIG_ARCH_AT91
1151 clk_disable(bp->hclk); 1169 clk_disable(bp->hclk);
1152 clk_disable(bp->pclk);
1153 clk_put(bp->hclk); 1170 clk_put(bp->hclk);
1171#endif
1172 clk_disable(bp->pclk);
1154err_out_put_pclk: 1173err_out_put_pclk:
1155 clk_put(bp->pclk); 1174 clk_put(bp->pclk);
1156err_out_free_dev: 1175err_out_free_dev:
@@ -1173,9 +1192,11 @@ static int __devexit macb_remove(struct platform_device *pdev)
1173 unregister_netdev(dev); 1192 unregister_netdev(dev);
1174 free_irq(dev->irq, dev); 1193 free_irq(dev->irq, dev);
1175 iounmap(bp->regs); 1194 iounmap(bp->regs);
1195#ifndef CONFIG_ARCH_AT91
1176 clk_disable(bp->hclk); 1196 clk_disable(bp->hclk);
1177 clk_disable(bp->pclk);
1178 clk_put(bp->hclk); 1197 clk_put(bp->hclk);
1198#endif
1199 clk_disable(bp->pclk);
1179 clk_put(bp->pclk); 1200 clk_put(bp->pclk);
1180 free_netdev(dev); 1201 free_netdev(dev);
1181 platform_set_drvdata(pdev, NULL); 1202 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
index 27bf0ae0f0bb..b3bb2182edd1 100644
--- a/drivers/net/macb.h
+++ b/drivers/net/macb.h
@@ -200,7 +200,7 @@
200#define MACB_SOF_OFFSET 30 200#define MACB_SOF_OFFSET 30
201#define MACB_SOF_SIZE 2 201#define MACB_SOF_SIZE 2
202 202
203/* Bitfields in USRIO */ 203/* Bitfields in USRIO (AVR32) */
204#define MACB_MII_OFFSET 0 204#define MACB_MII_OFFSET 0
205#define MACB_MII_SIZE 1 205#define MACB_MII_SIZE 1
206#define MACB_EAM_OFFSET 1 206#define MACB_EAM_OFFSET 1
@@ -210,6 +210,12 @@
210#define MACB_TX_PAUSE_ZERO_OFFSET 3 210#define MACB_TX_PAUSE_ZERO_OFFSET 3
211#define MACB_TX_PAUSE_ZERO_SIZE 1 211#define MACB_TX_PAUSE_ZERO_SIZE 1
212 212
213/* Bitfields in USRIO (AT91) */
214#define MACB_RMII_OFFSET 0
215#define MACB_RMII_SIZE 1
216#define MACB_CLKEN_OFFSET 1
217#define MACB_CLKEN_SIZE 1
218
213/* Bitfields in WOL */ 219/* Bitfields in WOL */
214#define MACB_IP_OFFSET 0 220#define MACB_IP_OFFSET 0
215#define MACB_IP_SIZE 16 221#define MACB_IP_SIZE 16
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 2907cfb12ada..9ec24f0d5d68 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/crc32.h> 16#include <linux/crc32.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/bitrev.h>
18#include <asm/prom.h> 19#include <asm/prom.h>
19#include <asm/dbdma.h> 20#include <asm/dbdma.h>
20#include <asm/io.h> 21#include <asm/io.h>
@@ -74,7 +75,6 @@ struct mace_data {
74#define PRIV_BYTES (sizeof(struct mace_data) \ 75#define PRIV_BYTES (sizeof(struct mace_data) \
75 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) 76 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
76 77
77static int bitrev(int);
78static int mace_open(struct net_device *dev); 78static int mace_open(struct net_device *dev);
79static int mace_close(struct net_device *dev); 79static int mace_close(struct net_device *dev);
80static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 80static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
@@ -96,18 +96,6 @@ static void __mace_set_address(struct net_device *dev, void *addr);
96 */ 96 */
97static unsigned char *dummy_buf; 97static unsigned char *dummy_buf;
98 98
99/* Bit-reverse one byte of an ethernet hardware address. */
100static inline int
101bitrev(int b)
102{
103 int d = 0, i;
104
105 for (i = 0; i < 8; ++i, b >>= 1)
106 d = (d << 1) | (b & 1);
107 return d;
108}
109
110
111static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match) 99static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
112{ 100{
113 struct device_node *mace = macio_get_of_node(mdev); 101 struct device_node *mace = macio_get_of_node(mdev);
@@ -173,7 +161,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
173 161
174 rev = addr[0] == 0 && addr[1] == 0xA0; 162 rev = addr[0] == 0 && addr[1] == 0xA0;
175 for (j = 0; j < 6; ++j) { 163 for (j = 0; j < 6; ++j) {
176 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; 164 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
177 } 165 }
178 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | 166 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
179 in_8(&mp->mace->chipid_lo); 167 in_8(&mp->mace->chipid_lo);
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 464e4a6f3d5f..5d541e873041 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25#include <linux/bitrev.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/irq.h> 28#include <asm/irq.h>
@@ -81,19 +82,6 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id);
81static irqreturn_t mace_dma_intr(int irq, void *dev_id); 82static irqreturn_t mace_dma_intr(int irq, void *dev_id);
82static void mace_tx_timeout(struct net_device *dev); 83static void mace_tx_timeout(struct net_device *dev);
83 84
84/* Bit-reverse one byte of an ethernet hardware address. */
85
86static int bitrev(int b)
87{
88 int d = 0, i;
89
90 for (i = 0; i < 8; ++i, b >>= 1) {
91 d = (d << 1) | (b & 1);
92 }
93
94 return d;
95}
96
97/* 85/*
98 * Load a receive DMA channel with a base address and ring length 86 * Load a receive DMA channel with a base address and ring length
99 */ 87 */
@@ -219,12 +207,12 @@ struct net_device *mace_probe(int unit)
219 addr = (void *)MACE_PROM; 207 addr = (void *)MACE_PROM;
220 208
221 for (j = 0; j < 6; ++j) { 209 for (j = 0; j < 6; ++j) {
222 u8 v=bitrev(addr[j<<4]); 210 u8 v = bitrev8(addr[j<<4]);
223 checksum ^= v; 211 checksum ^= v;
224 dev->dev_addr[j] = v; 212 dev->dev_addr[j] = v;
225 } 213 }
226 for (; j < 8; ++j) { 214 for (; j < 8; ++j) {
227 checksum ^= bitrev(addr[j<<4]); 215 checksum ^= bitrev8(addr[j<<4]);
228 } 216 }
229 217
230 if (checksum != 0xFF) { 218 if (checksum != 0xFF) {
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 393d995f1919..24f6050fbf33 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -121,16 +121,12 @@ enum macsonic_type {
121 * For reversing the PROM address 121 * For reversing the PROM address
122 */ 122 */
123 123
124static unsigned char nibbletab[] = {0, 8, 4, 12, 2, 10, 6, 14,
125 1, 9, 5, 13, 3, 11, 7, 15};
126
127static inline void bit_reverse_addr(unsigned char addr[6]) 124static inline void bit_reverse_addr(unsigned char addr[6])
128{ 125{
129 int i; 126 int i;
130 127
131 for(i = 0; i < 6; i++) 128 for(i = 0; i < 6; i++)
132 addr[i] = ((nibbletab[addr[i] & 0xf] << 4) | 129 addr[i] = bitrev8(addr[i]);
133 nibbletab[(addr[i] >> 4) &0xf]);
134} 130}
135 131
136int __init macsonic_init(struct net_device* dev) 132int __init macsonic_init(struct net_device* dev)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 61cbd4a60446..030924fb1ab3 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1412,10 +1412,8 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
1412 .set_tx_csum = ethtool_op_set_tx_hw_csum, 1412 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1413 .get_sg = ethtool_op_get_sg, 1413 .get_sg = ethtool_op_get_sg,
1414 .set_sg = ethtool_op_set_sg, 1414 .set_sg = ethtool_op_set_sg,
1415#ifdef NETIF_F_TSO
1416 .get_tso = ethtool_op_get_tso, 1415 .get_tso = ethtool_op_get_tso,
1417 .set_tso = ethtool_op_set_tso, 1416 .set_tso = ethtool_op_set_tso,
1418#endif
1419 .get_strings = myri10ge_get_strings, 1417 .get_strings = myri10ge_get_strings,
1420 .get_stats_count = myri10ge_get_stats_count, 1418 .get_stats_count = myri10ge_get_stats_count,
1421 .get_ethtool_stats = myri10ge_get_ethtool_stats, 1419 .get_ethtool_stats = myri10ge_get_ethtool_stats,
@@ -1975,13 +1973,11 @@ again:
1975 mss = 0; 1973 mss = 0;
1976 max_segments = MXGEFW_MAX_SEND_DESC; 1974 max_segments = MXGEFW_MAX_SEND_DESC;
1977 1975
1978#ifdef NETIF_F_TSO
1979 if (skb->len > (dev->mtu + ETH_HLEN)) { 1976 if (skb->len > (dev->mtu + ETH_HLEN)) {
1980 mss = skb_shinfo(skb)->gso_size; 1977 mss = skb_shinfo(skb)->gso_size;
1981 if (mss != 0) 1978 if (mss != 0)
1982 max_segments = MYRI10GE_MAX_SEND_DESC_TSO; 1979 max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
1983 } 1980 }
1984#endif /*NETIF_F_TSO */
1985 1981
1986 if ((unlikely(avail < max_segments))) { 1982 if ((unlikely(avail < max_segments))) {
1987 /* we are out of transmit resources */ 1983 /* we are out of transmit resources */
@@ -2013,7 +2009,6 @@ again:
2013 2009
2014 cum_len = 0; 2010 cum_len = 0;
2015 2011
2016#ifdef NETIF_F_TSO
2017 if (mss) { /* TSO */ 2012 if (mss) { /* TSO */
2018 /* this removes any CKSUM flag from before */ 2013 /* this removes any CKSUM flag from before */
2019 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); 2014 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
@@ -2029,7 +2024,6 @@ again:
2029 * the checksum by parsing the header. */ 2024 * the checksum by parsing the header. */
2030 pseudo_hdr_offset = mss; 2025 pseudo_hdr_offset = mss;
2031 } else 2026 } else
2032#endif /*NETIF_F_TSO */
2033 /* Mark small packets, and pad out tiny packets */ 2027 /* Mark small packets, and pad out tiny packets */
2034 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { 2028 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
2035 flags |= MXGEFW_FLAGS_SMALL; 2029 flags |= MXGEFW_FLAGS_SMALL;
@@ -2097,7 +2091,6 @@ again:
2097 seglen = len; 2091 seglen = len;
2098 flags_next = flags & ~MXGEFW_FLAGS_FIRST; 2092 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
2099 cum_len_next = cum_len + seglen; 2093 cum_len_next = cum_len + seglen;
2100#ifdef NETIF_F_TSO
2101 if (mss) { /* TSO */ 2094 if (mss) { /* TSO */
2102 (req - rdma_count)->rdma_count = rdma_count + 1; 2095 (req - rdma_count)->rdma_count = rdma_count + 1;
2103 2096
@@ -2124,7 +2117,6 @@ again:
2124 (small * MXGEFW_FLAGS_SMALL); 2117 (small * MXGEFW_FLAGS_SMALL);
2125 } 2118 }
2126 } 2119 }
2127#endif /* NETIF_F_TSO */
2128 req->addr_high = high_swapped; 2120 req->addr_high = high_swapped;
2129 req->addr_low = htonl(low); 2121 req->addr_low = htonl(low);
2130 req->pseudo_hdr_offset = htons(pseudo_hdr_offset); 2122 req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
@@ -2161,14 +2153,12 @@ again:
2161 } 2153 }
2162 2154
2163 (req - rdma_count)->rdma_count = rdma_count; 2155 (req - rdma_count)->rdma_count = rdma_count;
2164#ifdef NETIF_F_TSO
2165 if (mss) 2156 if (mss)
2166 do { 2157 do {
2167 req--; 2158 req--;
2168 req->flags |= MXGEFW_FLAGS_TSO_LAST; 2159 req->flags |= MXGEFW_FLAGS_TSO_LAST;
2169 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | 2160 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
2170 MXGEFW_FLAGS_FIRST))); 2161 MXGEFW_FLAGS_FIRST)));
2171#endif
2172 idx = ((count - 1) + tx->req) & tx->mask; 2162 idx = ((count - 1) + tx->req) & tx->mask;
2173 tx->info[idx].last = 1; 2163 tx->info[idx].last = 1;
2174 if (tx->wc_fifo == NULL) 2164 if (tx->wc_fifo == NULL)
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e8598b809228..3f3896e98879 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -63,11 +63,14 @@
63 63
64#include "netxen_nic_hw.h" 64#include "netxen_nic_hw.h"
65 65
66#define NETXEN_NIC_BUILD_NO "2"
67#define _NETXEN_NIC_LINUX_MAJOR 3 66#define _NETXEN_NIC_LINUX_MAJOR 3
68#define _NETXEN_NIC_LINUX_MINOR 3 67#define _NETXEN_NIC_LINUX_MINOR 3
69#define _NETXEN_NIC_LINUX_SUBVERSION 3 68#define _NETXEN_NIC_LINUX_SUBVERSION 3
70#define NETXEN_NIC_LINUX_VERSIONID "3.3.3" "-" NETXEN_NIC_BUILD_NO 69#define NETXEN_NIC_LINUX_VERSIONID "3.3.3"
70
71#define NUM_FLASH_SECTORS (64)
72#define FLASH_SECTOR_SIZE (64 * 1024)
73#define FLASH_TOTAL_SIZE (NUM_FLASH_SECTORS * FLASH_SECTOR_SIZE)
71 74
72#define RCV_DESC_RINGSIZE \ 75#define RCV_DESC_RINGSIZE \
73 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count) 76 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
@@ -85,6 +88,7 @@
85#define NETXEN_RCV_PRODUCER_OFFSET 0 88#define NETXEN_RCV_PRODUCER_OFFSET 0
86#define NETXEN_RCV_PEG_DB_ID 2 89#define NETXEN_RCV_PEG_DB_ID 2
87#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 90#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
91#define FLASH_SUCCESS 0
88 92
89#define ADDR_IN_WINDOW1(off) \ 93#define ADDR_IN_WINDOW1(off) \
90 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 94 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
@@ -1028,6 +1032,15 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
1028void netxen_load_firmware(struct netxen_adapter *adapter); 1032void netxen_load_firmware(struct netxen_adapter *adapter);
1029int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1033int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
1030int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); 1034int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
1035int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
1036 u8 *bytes, size_t size);
1037int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
1038 u8 *bytes, size_t size);
1039int netxen_flash_unlock(struct netxen_adapter *adapter);
1040int netxen_backup_crbinit(struct netxen_adapter *adapter);
1041int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
1042int netxen_flash_erase_primary(struct netxen_adapter *adapter);
1043
1031int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data); 1044int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
1032int netxen_rom_se(struct netxen_adapter *adapter, int addr); 1045int netxen_rom_se(struct netxen_adapter *adapter, int addr);
1033int netxen_do_rom_se(struct netxen_adapter *adapter, int addr); 1046int netxen_do_rom_se(struct netxen_adapter *adapter, int addr);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index c381d77a7336..cc0efe213e01 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/delay.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -94,17 +95,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
94 95
95static int netxen_nic_get_eeprom_len(struct net_device *dev) 96static int netxen_nic_get_eeprom_len(struct net_device *dev)
96{ 97{
97 struct netxen_port *port = netdev_priv(dev); 98 return FLASH_TOTAL_SIZE;
98 struct netxen_adapter *adapter = port->adapter;
99 int n;
100
101 if ((netxen_rom_fast_read(adapter, 0, &n) == 0)
102 && (n & NETXEN_ROM_ROUNDUP)) {
103 n &= ~NETXEN_ROM_ROUNDUP;
104 if (n < NETXEN_MAX_EEPROM_LEN)
105 return n;
106 }
107 return 0;
108} 99}
109 100
110static void 101static void
@@ -440,18 +431,92 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
440 struct netxen_port *port = netdev_priv(dev); 431 struct netxen_port *port = netdev_priv(dev);
441 struct netxen_adapter *adapter = port->adapter; 432 struct netxen_adapter *adapter = port->adapter;
442 int offset; 433 int offset;
434 int ret;
443 435
444 if (eeprom->len == 0) 436 if (eeprom->len == 0)
445 return -EINVAL; 437 return -EINVAL;
446 438
447 eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16); 439 eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16);
448 for (offset = 0; offset < eeprom->len; offset++) 440 offset = eeprom->offset;
449 if (netxen_rom_fast_read 441
450 (adapter, (8 * offset) + 8, (int *)eeprom->data) == -1) 442 ret = netxen_rom_fast_read_words(adapter, offset, bytes,
451 return -EIO; 443 eeprom->len);
444 if (ret < 0)
445 return ret;
446
452 return 0; 447 return 0;
453} 448}
454 449
450static int
451netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
452 u8 * bytes)
453{
454 struct netxen_port *port = netdev_priv(dev);
455 struct netxen_adapter *adapter = port->adapter;
456 int offset = eeprom->offset;
457 static int flash_start;
458 static int ready_to_flash;
459 int ret;
460
461 if (flash_start == 0) {
462 ret = netxen_flash_unlock(adapter);
463 if (ret < 0) {
464 printk(KERN_ERR "%s: Flash unlock failed.\n",
465 netxen_nic_driver_name);
466 return ret;
467 }
468 printk(KERN_INFO "%s: flash unlocked. \n",
469 netxen_nic_driver_name);
470 ret = netxen_flash_erase_secondary(adapter);
471 if (ret != FLASH_SUCCESS) {
472 printk(KERN_ERR "%s: Flash erase failed.\n",
473 netxen_nic_driver_name);
474 return ret;
475 }
476 printk(KERN_INFO "%s: secondary flash erased successfully.\n",
477 netxen_nic_driver_name);
478 flash_start = 1;
479 return 0;
480 }
481
482 if (offset == BOOTLD_START) {
483 ret = netxen_flash_erase_primary(adapter);
484 if (ret != FLASH_SUCCESS) {
485 printk(KERN_ERR "%s: Flash erase failed.\n",
486 netxen_nic_driver_name);
487 return ret;
488 }
489
490 ret = netxen_rom_se(adapter, USER_START);
491 if (ret != FLASH_SUCCESS)
492 return ret;
493 ret = netxen_rom_se(adapter, FIXED_START);
494 if (ret != FLASH_SUCCESS)
495 return ret;
496
497 printk(KERN_INFO "%s: primary flash erased successfully\n",
498 netxen_nic_driver_name);
499
500 ret = netxen_backup_crbinit(adapter);
501 if (ret != FLASH_SUCCESS) {
502 printk(KERN_ERR "%s: CRBinit backup failed.\n",
503 netxen_nic_driver_name);
504 return ret;
505 }
506 printk(KERN_INFO "%s: CRBinit backup done.\n",
507 netxen_nic_driver_name);
508 ready_to_flash = 1;
509 }
510
511 if (!ready_to_flash) {
512 printk(KERN_ERR "%s: Invalid write sequence, returning...\n",
513 netxen_nic_driver_name);
514 return -EINVAL;
515 }
516
517 return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len);
518}
519
455static void 520static void
456netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) 521netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
457{ 522{
@@ -721,6 +786,7 @@ struct ethtool_ops netxen_nic_ethtool_ops = {
721 .get_link = netxen_nic_get_link, 786 .get_link = netxen_nic_get_link,
722 .get_eeprom_len = netxen_nic_get_eeprom_len, 787 .get_eeprom_len = netxen_nic_get_eeprom_len,
723 .get_eeprom = netxen_nic_get_eeprom, 788 .get_eeprom = netxen_nic_get_eeprom,
789 .set_eeprom = netxen_nic_set_eeprom,
724 .get_ringparam = netxen_nic_get_ringparam, 790 .get_ringparam = netxen_nic_get_ringparam,
725 .get_pauseparam = netxen_nic_get_pauseparam, 791 .get_pauseparam = netxen_nic_get_pauseparam,
726 .set_pauseparam = netxen_nic_set_pauseparam, 792 .set_pauseparam = netxen_nic_set_pauseparam,
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 973af96337a9..f7bb8c90537c 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -110,6 +110,7 @@ static void crb_addr_transform_setup(void)
110 crb_addr_transform(CAM); 110 crb_addr_transform(CAM);
111 crb_addr_transform(C2C1); 111 crb_addr_transform(C2C1);
112 crb_addr_transform(C2C0); 112 crb_addr_transform(C2C0);
113 crb_addr_transform(SMB);
113} 114}
114 115
115int netxen_init_firmware(struct netxen_adapter *adapter) 116int netxen_init_firmware(struct netxen_adapter *adapter)
@@ -276,6 +277,7 @@ unsigned long netxen_decode_crb_addr(unsigned long addr)
276 277
277static long rom_max_timeout = 10000; 278static long rom_max_timeout = 10000;
278static long rom_lock_timeout = 1000000; 279static long rom_lock_timeout = 1000000;
280static long rom_write_timeout = 700;
279 281
280static inline int rom_lock(struct netxen_adapter *adapter) 282static inline int rom_lock(struct netxen_adapter *adapter)
281{ 283{
@@ -404,7 +406,7 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
404{ 406{
405 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); 407 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
406 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); 408 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
407 udelay(100); /* prevent bursting on CRB */ 409 udelay(70); /* prevent bursting on CRB */
408 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 410 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
409 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); 411 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
410 if (netxen_wait_rom_done(adapter)) { 412 if (netxen_wait_rom_done(adapter)) {
@@ -413,13 +415,46 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
413 } 415 }
414 /* reset abyte_cnt and dummy_byte_cnt */ 416 /* reset abyte_cnt and dummy_byte_cnt */
415 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); 417 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
416 udelay(100); /* prevent bursting on CRB */ 418 udelay(70); /* prevent bursting on CRB */
417 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 419 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
418 420
419 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA); 421 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
420 return 0; 422 return 0;
421} 423}
422 424
425static inline int
426do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
427 u8 *bytes, size_t size)
428{
429 int addridx;
430 int ret = 0;
431
432 for (addridx = addr; addridx < (addr + size); addridx += 4) {
433 ret = do_rom_fast_read(adapter, addridx, (int *)bytes);
434 if (ret != 0)
435 break;
436 bytes += 4;
437 }
438
439 return ret;
440}
441
442int
443netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
444 u8 *bytes, size_t size)
445{
446 int ret;
447
448 ret = rom_lock(adapter);
449 if (ret < 0)
450 return ret;
451
452 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
453
454 netxen_rom_unlock(adapter);
455 return ret;
456}
457
423int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) 458int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
424{ 459{
425 int ret; 460 int ret;
@@ -443,6 +478,152 @@ int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
443 netxen_rom_unlock(adapter); 478 netxen_rom_unlock(adapter);
444 return ret; 479 return ret;
445} 480}
481
482static inline int do_rom_fast_write_words(struct netxen_adapter *adapter,
483 int addr, u8 *bytes, size_t size)
484{
485 int addridx = addr;
486 int ret = 0;
487
488 while (addridx < (addr + size)) {
489 int last_attempt = 0;
490 int timeout = 0;
491 int data;
492
493 data = *(u32*)bytes;
494
495 ret = do_rom_fast_write(adapter, addridx, data);
496 if (ret < 0)
497 return ret;
498
499 while(1) {
500 int data1;
501
502 do_rom_fast_read(adapter, addridx, &data1);
503 if (data1 == data)
504 break;
505
506 if (timeout++ >= rom_write_timeout) {
507 if (last_attempt++ < 4) {
508 ret = do_rom_fast_write(adapter,
509 addridx, data);
510 if (ret < 0)
511 return ret;
512 }
513 else {
514 printk(KERN_INFO "Data write did not "
515 "succeed at address 0x%x\n", addridx);
516 break;
517 }
518 }
519 }
520
521 bytes += 4;
522 addridx += 4;
523 }
524
525 return ret;
526}
527
528int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
529 u8 *bytes, size_t size)
530{
531 int ret = 0;
532
533 ret = rom_lock(adapter);
534 if (ret < 0)
535 return ret;
536
537 ret = do_rom_fast_write_words(adapter, addr, bytes, size);
538 netxen_rom_unlock(adapter);
539
540 return ret;
541}
542
543int netxen_rom_wrsr(struct netxen_adapter *adapter, int data)
544{
545 int ret;
546
547 ret = netxen_rom_wren(adapter);
548 if (ret < 0)
549 return ret;
550
551 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
552 netxen_crb_writelit_adapter(adapter,
553 NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0x1);
554
555 ret = netxen_wait_rom_done(adapter);
556 if (ret < 0)
557 return ret;
558
559 return netxen_rom_wip_poll(adapter);
560}
561
562int netxen_rom_rdsr(struct netxen_adapter *adapter)
563{
564 int ret;
565
566 ret = rom_lock(adapter);
567 if (ret < 0)
568 return ret;
569
570 ret = netxen_do_rom_rdsr(adapter);
571 netxen_rom_unlock(adapter);
572 return ret;
573}
574
575int netxen_backup_crbinit(struct netxen_adapter *adapter)
576{
577 int ret = FLASH_SUCCESS;
578 int val;
579 char *buffer = kmalloc(FLASH_SECTOR_SIZE, GFP_KERNEL);
580
581 if (!buffer)
582 return -ENOMEM;
583 /* unlock sector 63 */
584 val = netxen_rom_rdsr(adapter);
585 val = val & 0xe3;
586 ret = netxen_rom_wrsr(adapter, val);
587 if (ret != FLASH_SUCCESS)
588 goto out_kfree;
589
590 ret = netxen_rom_wip_poll(adapter);
591 if (ret != FLASH_SUCCESS)
592 goto out_kfree;
593
594 /* copy sector 0 to sector 63 */
595 ret = netxen_rom_fast_read_words(adapter, CRBINIT_START,
596 buffer, FLASH_SECTOR_SIZE);
597 if (ret != FLASH_SUCCESS)
598 goto out_kfree;
599
600 ret = netxen_rom_fast_write_words(adapter, FIXED_START,
601 buffer, FLASH_SECTOR_SIZE);
602 if (ret != FLASH_SUCCESS)
603 goto out_kfree;
604
605 /* lock sector 63 */
606 val = netxen_rom_rdsr(adapter);
607 if (!(val & 0x8)) {
608 val |= (0x1 << 2);
609 /* lock sector 63 */
610 if (netxen_rom_wrsr(adapter, val) == 0) {
611 ret = netxen_rom_wip_poll(adapter);
612 if (ret != FLASH_SUCCESS)
613 goto out_kfree;
614
615 /* lock SR writes */
616 ret = netxen_rom_wip_poll(adapter);
617 if (ret != FLASH_SUCCESS)
618 goto out_kfree;
619 }
620 }
621
622out_kfree:
623 kfree(buffer);
624 return ret;
625}
626
446int netxen_do_rom_se(struct netxen_adapter *adapter, int addr) 627int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
447{ 628{
448 netxen_rom_wren(adapter); 629 netxen_rom_wren(adapter);
@@ -457,6 +638,27 @@ int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
457 return netxen_rom_wip_poll(adapter); 638 return netxen_rom_wip_poll(adapter);
458} 639}
459 640
641void check_erased_flash(struct netxen_adapter *adapter, int addr)
642{
643 int i;
644 int val;
645 int count = 0, erased_errors = 0;
646 int range;
647
648 range = (addr == USER_START) ? FIXED_START : addr + FLASH_SECTOR_SIZE;
649
650 for (i = addr; i < range; i += 4) {
651 netxen_rom_fast_read(adapter, i, &val);
652 if (val != 0xffffffff)
653 erased_errors++;
654 count++;
655 }
656
657 if (erased_errors)
658 printk(KERN_INFO "0x%x out of 0x%x words fail to be erased "
659 "for sector address: %x\n", erased_errors, count, addr);
660}
661
460int netxen_rom_se(struct netxen_adapter *adapter, int addr) 662int netxen_rom_se(struct netxen_adapter *adapter, int addr)
461{ 663{
462 int ret = 0; 664 int ret = 0;
@@ -465,6 +667,68 @@ int netxen_rom_se(struct netxen_adapter *adapter, int addr)
465 } 667 }
466 ret = netxen_do_rom_se(adapter, addr); 668 ret = netxen_do_rom_se(adapter, addr);
467 netxen_rom_unlock(adapter); 669 netxen_rom_unlock(adapter);
670 msleep(30);
671 check_erased_flash(adapter, addr);
672
673 return ret;
674}
675
676int
677netxen_flash_erase_sections(struct netxen_adapter *adapter, int start, int end)
678{
679 int ret = FLASH_SUCCESS;
680 int i;
681
682 for (i = start; i < end; i++) {
683 ret = netxen_rom_se(adapter, i * FLASH_SECTOR_SIZE);
684 if (ret)
685 break;
686 ret = netxen_rom_wip_poll(adapter);
687 if (ret < 0)
688 return ret;
689 }
690
691 return ret;
692}
693
694int
695netxen_flash_erase_secondary(struct netxen_adapter *adapter)
696{
697 int ret = FLASH_SUCCESS;
698 int start, end;
699
700 start = SECONDARY_START / FLASH_SECTOR_SIZE;
701 end = USER_START / FLASH_SECTOR_SIZE;
702 ret = netxen_flash_erase_sections(adapter, start, end);
703
704 return ret;
705}
706
707int
708netxen_flash_erase_primary(struct netxen_adapter *adapter)
709{
710 int ret = FLASH_SUCCESS;
711 int start, end;
712
713 start = PRIMARY_START / FLASH_SECTOR_SIZE;
714 end = SECONDARY_START / FLASH_SECTOR_SIZE;
715 ret = netxen_flash_erase_sections(adapter, start, end);
716
717 return ret;
718}
719
720int netxen_flash_unlock(struct netxen_adapter *adapter)
721{
722 int ret = 0;
723
724 ret = netxen_rom_wrsr(adapter, 0);
725 if (ret < 0)
726 return ret;
727
728 ret = netxen_rom_wren(adapter);
729 if (ret < 0)
730 return ret;
731
468 return ret; 732 return ret;
469} 733}
470 734
@@ -543,9 +807,13 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
543 } 807 }
544 for (i = 0; i < n; i++) { 808 for (i = 0; i < n; i++) {
545 809
546 off = 810 off = netxen_decode_crb_addr((unsigned long)buf[i].addr);
547 netxen_decode_crb_addr((unsigned long)buf[i].addr) + 811 if (off == NETXEN_ADDR_ERROR) {
548 NETXEN_PCI_CRBSPACE; 812 printk(KERN_ERR"CRB init value out of range %lx\n",
813 buf[i].addr);
814 continue;
815 }
816 off += NETXEN_PCI_CRBSPACE;
549 /* skipping cold reboot MAGIC */ 817 /* skipping cold reboot MAGIC */
550 if (off == NETXEN_CAM_RAM(0x1fc)) 818 if (off == NETXEN_CAM_RAM(0x1fc))
551 continue; 819 continue;
@@ -662,6 +930,7 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
662 int loops = 0; 930 int loops = 0;
663 931
664 if (!pegtune_val) { 932 if (!pegtune_val) {
933 val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
665 while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) { 934 while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
666 udelay(100); 935 udelay(100);
667 schedule(); 936 schedule();
diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c
deleted file mode 100644
index 702e3e95612a..000000000000
--- a/drivers/net/oaknet.c
+++ /dev/null
@@ -1,666 +0,0 @@
1/*
2 *
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Module name: oaknet.c
6 *
7 * Description:
8 * Driver for the National Semiconductor DP83902AV Ethernet controller
9 * on-board the IBM PowerPC "Oak" evaluation board. Adapted from the
10 * various other 8390 drivers written by Donald Becker and Paul Gortmaker.
11 *
12 * Additional inspiration from the "tcd8390.c" driver from TiVo, Inc.
13 * and "enetLib.c" from IBM.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/delay.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/init.h>
23#include <linux/jiffies.h>
24
25#include <asm/board.h>
26#include <asm/io.h>
27
28#include "8390.h"
29
30
31/* Preprocessor Defines */
32
33#if !defined(TRUE) || TRUE != 1
34#define TRUE 1
35#endif
36
37#if !defined(FALSE) || FALSE != 0
38#define FALSE 0
39#endif
40
41#define OAKNET_START_PG 0x20 /* First page of TX buffer */
42#define OAKNET_STOP_PG 0x40 /* Last pagge +1 of RX ring */
43
44#define OAKNET_WAIT (2 * HZ / 100) /* 20 ms */
45
46/* Experimenting with some fixes for a broken driver... */
47
48#define OAKNET_DISINT
49#define OAKNET_HEADCHECK
50#define OAKNET_RWFIX
51
52
53/* Global Variables */
54
55static const char *name = "National DP83902AV";
56
57static struct net_device *oaknet_devs;
58
59
60/* Function Prototypes */
61
62static int oaknet_open(struct net_device *dev);
63static int oaknet_close(struct net_device *dev);
64
65static void oaknet_reset_8390(struct net_device *dev);
66static void oaknet_get_8390_hdr(struct net_device *dev,
67 struct e8390_pkt_hdr *hdr, int ring_page);
68static void oaknet_block_input(struct net_device *dev, int count,
69 struct sk_buff *skb, int ring_offset);
70static void oaknet_block_output(struct net_device *dev, int count,
71 const unsigned char *buf, int start_page);
72
73static void oaknet_dma_error(struct net_device *dev, const char *name);
74
75
76/*
77 * int oaknet_init()
78 *
79 * Description:
80 * This routine performs all the necessary platform-specific initiali-
81 * zation and set-up for the IBM "Oak" evaluation board's National
82 * Semiconductor DP83902AV "ST-NIC" Ethernet controller.
83 *
84 * Input(s):
85 * N/A
86 *
87 * Output(s):
88 * N/A
89 *
90 * Returns:
91 * 0 if OK, otherwise system error number on error.
92 *
93 */
94static int __init oaknet_init(void)
95{
96 register int i;
97 int reg0, regd;
98 int ret = -ENOMEM;
99 struct net_device *dev;
100#if 0
101 unsigned long ioaddr = OAKNET_IO_BASE;
102#else
103 unsigned long ioaddr = ioremap(OAKNET_IO_BASE, OAKNET_IO_SIZE);
104#endif
105 bd_t *bip = (bd_t *)__res;
106
107 if (!ioaddr)
108 return -ENOMEM;
109
110 dev = alloc_ei_netdev();
111 if (!dev)
112 goto out_unmap;
113
114 ret = -EBUSY;
115 if (!request_region(OAKNET_IO_BASE, OAKNET_IO_SIZE, name))
116 goto out_dev;
117
118 /* Quick register check to see if the device is really there. */
119
120 ret = -ENODEV;
121 if ((reg0 = ei_ibp(ioaddr)) == 0xFF)
122 goto out_region;
123
124 /*
125 * That worked. Now a more thorough check, using the multicast
126 * address registers, that the device is definitely out there
127 * and semi-functional.
128 */
129
130 ei_obp(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
131 regd = ei_ibp(ioaddr + 0x0D);
132 ei_obp(0xFF, ioaddr + 0x0D);
133 ei_obp(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
134 ei_ibp(ioaddr + EN0_COUNTER0);
135
136 /* It's no good. Fix things back up and leave. */
137
138 ret = -ENODEV;
139 if (ei_ibp(ioaddr + EN0_COUNTER0) != 0) {
140 ei_obp(reg0, ioaddr);
141 ei_obp(regd, ioaddr + 0x0D);
142 goto out_region;
143 }
144
145 SET_MODULE_OWNER(dev);
146
147 /*
148 * This controller is on an embedded board, so the base address
149 * and interrupt assignments are pre-assigned and unchageable.
150 */
151
152 dev->base_addr = ioaddr;
153 dev->irq = OAKNET_INT;
154
155 /*
156 * Disable all chip interrupts for now and ACK all pending
157 * interrupts.
158 */
159
160 ei_obp(0x0, ioaddr + EN0_IMR);
161 ei_obp(0xFF, ioaddr + EN0_ISR);
162
163 /* Attempt to get the interrupt line */
164
165 ret = -EAGAIN;
166 if (request_irq(dev->irq, ei_interrupt, 0, name, dev)) {
167 printk("%s: unable to request interrupt %d.\n",
168 name, dev->irq);
169 goto out_region;
170 }
171
172 /* Tell the world about what and where we've found. */
173
174 printk("%s: %s at", dev->name, name);
175 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
176 dev->dev_addr[i] = bip->bi_enetaddr[i];
177 printk("%c%.2x", (i ? ':' : ' '), dev->dev_addr[i]);
178 }
179 printk(", found at %#lx, using IRQ %d.\n", dev->base_addr, dev->irq);
180
181 /* Set up some required driver fields and then we're done. */
182
183 ei_status.name = name;
184 ei_status.word16 = FALSE;
185 ei_status.tx_start_page = OAKNET_START_PG;
186 ei_status.rx_start_page = OAKNET_START_PG + TX_PAGES;
187 ei_status.stop_page = OAKNET_STOP_PG;
188
189 ei_status.reset_8390 = &oaknet_reset_8390;
190 ei_status.block_input = &oaknet_block_input;
191 ei_status.block_output = &oaknet_block_output;
192 ei_status.get_8390_hdr = &oaknet_get_8390_hdr;
193
194 dev->open = oaknet_open;
195 dev->stop = oaknet_close;
196#ifdef CONFIG_NET_POLL_CONTROLLER
197 dev->poll_controller = ei_poll;
198#endif
199
200 NS8390_init(dev, FALSE);
201 ret = register_netdev(dev);
202 if (ret)
203 goto out_irq;
204
205 oaknet_devs = dev;
206 return 0;
207
208out_irq;
209 free_irq(dev->irq, dev);
210out_region:
211 release_region(OAKNET_IO_BASE, OAKNET_IO_SIZE);
212out_dev:
213 free_netdev(dev);
214out_unmap:
215 iounmap(ioaddr);
216 return ret;
217}
218
219/*
220 * static int oaknet_open()
221 *
222 * Description:
223 * This routine is a modest wrapper around ei_open, the 8390-generic,
224 * driver open routine. This just increments the module usage count
225 * and passes along the status from ei_open.
226 *
227 * Input(s):
228 * *dev - Pointer to the device structure for this driver.
229 *
230 * Output(s):
231 * *dev - Pointer to the device structure for this driver, potentially
232 * modified by ei_open.
233 *
234 * Returns:
235 * 0 if OK, otherwise < 0 on error.
236 *
237 */
238static int
239oaknet_open(struct net_device *dev)
240{
241 int status = ei_open(dev);
242 return (status);
243}
244
245/*
246 * static int oaknet_close()
247 *
248 * Description:
249 * This routine is a modest wrapper around ei_close, the 8390-generic,
250 * driver close routine. This just decrements the module usage count
251 * and passes along the status from ei_close.
252 *
253 * Input(s):
254 * *dev - Pointer to the device structure for this driver.
255 *
256 * Output(s):
257 * *dev - Pointer to the device structure for this driver, potentially
258 * modified by ei_close.
259 *
260 * Returns:
261 * 0 if OK, otherwise < 0 on error.
262 *
263 */
264static int
265oaknet_close(struct net_device *dev)
266{
267 int status = ei_close(dev);
268 return (status);
269}
270
271/*
272 * static void oaknet_reset_8390()
273 *
274 * Description:
275 * This routine resets the DP83902 chip.
276 *
277 * Input(s):
278 * *dev - Pointer to the device structure for this driver.
279 *
280 * Output(s):
281 * N/A
282 *
283 * Returns:
284 * N/A
285 *
286 */
287static void
288oaknet_reset_8390(struct net_device *dev)
289{
290 int base = E8390_BASE;
291
292 /*
293 * We have no provision of reseting the controller as is done
294 * in other drivers, such as "ne.c". However, the following
295 * seems to work well enough in the TiVo driver.
296 */
297
298 printk("Resetting %s...\n", dev->name);
299 ei_obp(E8390_STOP | E8390_NODMA | E8390_PAGE0, base + E8390_CMD);
300 ei_status.txing = 0;
301 ei_status.dmaing = 0;
302}
303
304/*
305 * static void oaknet_get_8390_hdr()
306 *
307 * Description:
308 * This routine grabs the 8390-specific header. It's similar to the
309 * block input routine, but we don't need to be concerned with ring wrap
310 * as the header will be at the start of a page, so we optimize accordingly.
311 *
312 * Input(s):
313 * *dev - Pointer to the device structure for this driver.
314 * *hdr - Pointer to storage for the 8390-specific packet header.
315 * ring_page - ?
316 *
317 * Output(s):
318 * *hdr - Pointer to the 8390-specific packet header for the just-
319 * received frame.
320 *
321 * Returns:
322 * N/A
323 *
324 */
325static void
326oaknet_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
327 int ring_page)
328{
329 int base = dev->base_addr;
330
331 /*
332 * This should NOT happen. If it does, it is the LAST thing you'll
333 * see.
334 */
335
336 if (ei_status.dmaing) {
337 oaknet_dma_error(dev, "oaknet_get_8390_hdr");
338 return;
339 }
340
341 ei_status.dmaing |= 0x01;
342 outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, base + OAKNET_CMD);
343 outb_p(sizeof(struct e8390_pkt_hdr), base + EN0_RCNTLO);
344 outb_p(0, base + EN0_RCNTHI);
345 outb_p(0, base + EN0_RSARLO); /* On page boundary */
346 outb_p(ring_page, base + EN0_RSARHI);
347 outb_p(E8390_RREAD + E8390_START, base + OAKNET_CMD);
348
349 if (ei_status.word16)
350 insw(base + OAKNET_DATA, hdr,
351 sizeof(struct e8390_pkt_hdr) >> 1);
352 else
353 insb(base + OAKNET_DATA, hdr,
354 sizeof(struct e8390_pkt_hdr));
355
356 /* Byte-swap the packet byte count */
357
358 hdr->count = le16_to_cpu(hdr->count);
359
360 outb_p(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
361 ei_status.dmaing &= ~0x01;
362}
363
364/*
365 * XXX - Document me.
366 */
367static void
368oaknet_block_input(struct net_device *dev, int count, struct sk_buff *skb,
369 int ring_offset)
370{
371 int base = OAKNET_BASE;
372 char *buf = skb->data;
373
374 /*
375 * This should NOT happen. If it does, it is the LAST thing you'll
376 * see.
377 */
378
379 if (ei_status.dmaing) {
380 oaknet_dma_error(dev, "oaknet_block_input");
381 return;
382 }
383
384#ifdef OAKNET_DISINT
385 save_flags(flags);
386 cli();
387#endif
388
389 ei_status.dmaing |= 0x01;
390 ei_obp(E8390_NODMA + E8390_PAGE0 + E8390_START, base + E8390_CMD);
391 ei_obp(count & 0xff, base + EN0_RCNTLO);
392 ei_obp(count >> 8, base + EN0_RCNTHI);
393 ei_obp(ring_offset & 0xff, base + EN0_RSARLO);
394 ei_obp(ring_offset >> 8, base + EN0_RSARHI);
395 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
396 if (ei_status.word16) {
397 ei_isw(base + E8390_DATA, buf, count >> 1);
398 if (count & 0x01) {
399 buf[count - 1] = ei_ib(base + E8390_DATA);
400#ifdef OAKNET_HEADCHECK
401 bytes++;
402#endif
403 }
404 } else {
405 ei_isb(base + E8390_DATA, buf, count);
406 }
407#ifdef OAKNET_HEADCHECK
408 /*
409 * This was for the ALPHA version only, but enough people have
410 * been encountering problems so it is still here. If you see
411 * this message you either 1) have a slightly incompatible clone
412 * or 2) have noise/speed problems with your bus.
413 */
414
415 /* DMA termination address check... */
416 {
417 int addr, tries = 20;
418 do {
419 /* DON'T check for 'ei_ibp(EN0_ISR) & ENISR_RDC' here
420 -- it's broken for Rx on some cards! */
421 int high = ei_ibp(base + EN0_RSARHI);
422 int low = ei_ibp(base + EN0_RSARLO);
423 addr = (high << 8) + low;
424 if (((ring_offset + bytes) & 0xff) == low)
425 break;
426 } while (--tries > 0);
427 if (tries <= 0)
428 printk("%s: RX transfer address mismatch,"
429 "%#4.4x (expected) vs. %#4.4x (actual).\n",
430 dev->name, ring_offset + bytes, addr);
431 }
432#endif
433 ei_obp(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
434 ei_status.dmaing &= ~0x01;
435
436#ifdef OAKNET_DISINT
437 restore_flags(flags);
438#endif
439}
440
441/*
442 * static void oaknet_block_output()
443 *
444 * Description:
445 * This routine...
446 *
447 * Input(s):
448 * *dev - Pointer to the device structure for this driver.
449 * count - Number of bytes to be transferred.
450 * *buf -
451 * start_page -
452 *
453 * Output(s):
454 * N/A
455 *
456 * Returns:
457 * N/A
458 *
459 */
460static void
461oaknet_block_output(struct net_device *dev, int count,
462 const unsigned char *buf, int start_page)
463{
464 int base = E8390_BASE;
465#if 0
466 int bug;
467#endif
468 unsigned long start;
469#ifdef OAKNET_DISINT
470 unsigned long flags;
471#endif
472#ifdef OAKNET_HEADCHECK
473 int retries = 0;
474#endif
475
476 /* Round the count up for word writes. */
477
478 if (ei_status.word16 && (count & 0x1))
479 count++;
480
481 /*
482 * This should NOT happen. If it does, it is the LAST thing you'll
483 * see.
484 */
485
486 if (ei_status.dmaing) {
487 oaknet_dma_error(dev, "oaknet_block_output");
488 return;
489 }
490
491#ifdef OAKNET_DISINT
492 save_flags(flags);
493 cli();
494#endif
495
496 ei_status.dmaing |= 0x01;
497
498 /* Make sure we are in page 0. */
499
500 ei_obp(E8390_PAGE0 + E8390_START + E8390_NODMA, base + E8390_CMD);
501
502#ifdef OAKNET_HEADCHECK
503retry:
504#endif
505
506#if 0
507 /*
508 * The 83902 documentation states that the processor needs to
509 * do a "dummy read" before doing the remote write to work
510 * around a chip bug they don't feel like fixing.
511 */
512
513 bug = 0;
514 while (1) {
515 unsigned int rdhi;
516 unsigned int rdlo;
517
518 /* Now the normal output. */
519 ei_obp(ENISR_RDC, base + EN0_ISR);
520 ei_obp(count & 0xff, base + EN0_RCNTLO);
521 ei_obp(count >> 8, base + EN0_RCNTHI);
522 ei_obp(0x00, base + EN0_RSARLO);
523 ei_obp(start_page, base + EN0_RSARHI);
524
525 if (bug++)
526 break;
527
528 /* Perform the dummy read */
529 rdhi = ei_ibp(base + EN0_CRDAHI);
530 rdlo = ei_ibp(base + EN0_CRDALO);
531 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
532
533 while (1) {
534 unsigned int nrdhi;
535 unsigned int nrdlo;
536 nrdhi = ei_ibp(base + EN0_CRDAHI);
537 nrdlo = ei_ibp(base + EN0_CRDALO);
538 if ((rdhi != nrdhi) || (rdlo != nrdlo))
539 break;
540 }
541 }
542#else
543#ifdef OAKNET_RWFIX
544 /*
545 * Handle the read-before-write bug the same way as the
546 * Crynwr packet driver -- the Nat'l Semi. method doesn't work.
547 * Actually this doesn't always work either, but if you have
548 * problems with your 83902 this is better than nothing!
549 */
550
551 ei_obp(0x42, base + EN0_RCNTLO);
552 ei_obp(0x00, base + EN0_RCNTHI);
553 ei_obp(0x42, base + EN0_RSARLO);
554 ei_obp(0x00, base + EN0_RSARHI);
555 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
556 /* Make certain that the dummy read has occurred. */
557 udelay(6);
558#endif
559
560 ei_obp(ENISR_RDC, base + EN0_ISR);
561
562 /* Now the normal output. */
563 ei_obp(count & 0xff, base + EN0_RCNTLO);
564 ei_obp(count >> 8, base + EN0_RCNTHI);
565 ei_obp(0x00, base + EN0_RSARLO);
566 ei_obp(start_page, base + EN0_RSARHI);
567#endif /* 0/1 */
568
569 ei_obp(E8390_RWRITE + E8390_START, base + E8390_CMD);
570 if (ei_status.word16) {
571 ei_osw(E8390_BASE + E8390_DATA, buf, count >> 1);
572 } else {
573 ei_osb(E8390_BASE + E8390_DATA, buf, count);
574 }
575
576#ifdef OAKNET_DISINT
577 restore_flags(flags);
578#endif
579
580 start = jiffies;
581
582#ifdef OAKNET_HEADCHECK
583 /*
584 * This was for the ALPHA version only, but enough people have
585 * been encountering problems so it is still here.
586 */
587
588 {
589 /* DMA termination address check... */
590 int addr, tries = 20;
591 do {
592 int high = ei_ibp(base + EN0_RSARHI);
593 int low = ei_ibp(base + EN0_RSARLO);
594 addr = (high << 8) + low;
595 if ((start_page << 8) + count == addr)
596 break;
597 } while (--tries > 0);
598
599 if (tries <= 0) {
600 printk("%s: Tx packet transfer address mismatch,"
601 "%#4.4x (expected) vs. %#4.4x (actual).\n",
602 dev->name, (start_page << 8) + count, addr);
603 if (retries++ == 0)
604 goto retry;
605 }
606 }
607#endif
608
609 while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) {
610 if (time_after(jiffies, start + OAKNET_WAIT)) {
611 printk("%s: timeout waiting for Tx RDC.\n", dev->name);
612 oaknet_reset_8390(dev);
613 NS8390_init(dev, TRUE);
614 break;
615 }
616 }
617
618 ei_obp(ENISR_RDC, base + EN0_ISR); /* Ack intr. */
619 ei_status.dmaing &= ~0x01;
620}
621
622/*
623 * static void oaknet_dma_error()
624 *
625 * Description:
626 * This routine prints out a last-ditch informative message to the console
627 * indicating that a DMA error occurred. If you see this, it's the last
628 * thing you'll see.
629 *
630 * Input(s):
631 * *dev - Pointer to the device structure for this driver.
632 * *name - Informative text (e.g. function name) indicating where the
633 * DMA error occurred.
634 *
635 * Output(s):
636 * N/A
637 *
638 * Returns:
639 * N/A
640 *
641 */
642static void
643oaknet_dma_error(struct net_device *dev, const char *name)
644{
645 printk(KERN_EMERG "%s: DMAing conflict in %s."
646 "[DMAstat:%d][irqlock:%d][intr:%ld]\n",
647 dev->name, name, ei_status.dmaing, ei_status.irqlock,
648 dev->interrupt);
649}
650
651/*
652 * Oak Ethernet module unload interface.
653 */
654static void __exit oaknet_cleanup_module (void)
655{
656 /* Convert to loop once driver supports multiple devices. */
657 unregister_netdev(oaknet_dev);
658 free_irq(oaknet_devs->irq, oaknet_devs);
659 release_region(oaknet_devs->base_addr, OAKNET_IO_SIZE);
660 iounmap(ioaddr);
661 free_netdev(oaknet_devs);
662}
663
664module_init(oaknet_init);
665module_exit(oaknet_cleanup_module);
666MODULE_LICENSE("GPL");
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
new file mode 100644
index 000000000000..d670ac74824f
--- /dev/null
+++ b/drivers/net/pasemi_mac.c
@@ -0,0 +1,1019 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <asm/dma-mapping.h>
29#include <linux/in.h>
30#include <linux/skbuff.h>
31
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <net/checksum.h>
35
36#include "pasemi_mac.h"
37
38
39/* TODO list
40 *
41 * - Get rid of pci_{read,write}_config(), map registers with ioremap
42 * for performance
43 * - PHY support
44 * - Multicast support
45 * - Large MTU support
46 * - Other performance improvements
47 */
48
49
50/* Must be a power of two */
51#define RX_RING_SIZE 512
52#define TX_RING_SIZE 512
53
54#define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
55#define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
56#define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
57#define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
58#define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
59
60#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
61
62/* XXXOJN these should come out of the device tree some day */
63#define PAS_DMA_CAP_BASE 0xe00d0040
64#define PAS_DMA_CAP_SIZE 0x100
65#define PAS_DMA_COM_BASE 0xe00d0100
66#define PAS_DMA_COM_SIZE 0x100
67
68static struct pasdma_status *dma_status;
69
70static int pasemi_get_mac_addr(struct pasemi_mac *mac)
71{
72 struct pci_dev *pdev = mac->pdev;
73 struct device_node *dn = pci_device_to_OF_node(pdev);
74 const u8 *maddr;
75 u8 addr[6];
76
77 if (!dn) {
78 dev_dbg(&pdev->dev,
79 "No device node for mac, not configuring\n");
80 return -ENOENT;
81 }
82
83 maddr = get_property(dn, "mac-address", NULL);
84 if (maddr == NULL) {
85 dev_warn(&pdev->dev,
86 "no mac address in device tree, not configuring\n");
87 return -ENOENT;
88 }
89
90 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
91 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
92 dev_warn(&pdev->dev,
93 "can't parse mac address, not configuring\n");
94 return -EINVAL;
95 }
96
97 memcpy(mac->mac_addr, addr, sizeof(addr));
98 return 0;
99}
100
101static int pasemi_mac_setup_rx_resources(struct net_device *dev)
102{
103 struct pasemi_mac_rxring *ring;
104 struct pasemi_mac *mac = netdev_priv(dev);
105 int chan_id = mac->dma_rxch;
106
107 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
108
109 if (!ring)
110 goto out_ring;
111
112 spin_lock_init(&ring->lock);
113
114 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
115 RX_RING_SIZE, GFP_KERNEL);
116
117 if (!ring->desc_info)
118 goto out_desc_info;
119
120 /* Allocate descriptors */
121 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
122 RX_RING_SIZE *
123 sizeof(struct pas_dma_xct_descr),
124 &ring->dma, GFP_KERNEL);
125
126 if (!ring->desc)
127 goto out_desc;
128
129 memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
130
131 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
132 RX_RING_SIZE * sizeof(u64),
133 &ring->buf_dma, GFP_KERNEL);
134 if (!ring->buffers)
135 goto out_buffers;
136
137 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
138
139 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
140 PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
141
142 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
143 PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
144 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2));
145
146 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
147 PAS_DMA_RXCHAN_CFG_HBU(1));
148
149 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
150 PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
151
152 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
153 PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
154 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
155
156 ring->next_to_fill = 0;
157 ring->next_to_clean = 0;
158
159 snprintf(ring->irq_name, sizeof(ring->irq_name),
160 "%s rx", dev->name);
161 mac->rx = ring;
162
163 return 0;
164
165out_buffers:
166 dma_free_coherent(&mac->dma_pdev->dev,
167 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
168 mac->rx->desc, mac->rx->dma);
169out_desc:
170 kfree(ring->desc_info);
171out_desc_info:
172 kfree(ring);
173out_ring:
174 return -ENOMEM;
175}
176
177
178static int pasemi_mac_setup_tx_resources(struct net_device *dev)
179{
180 struct pasemi_mac *mac = netdev_priv(dev);
181 u32 val;
182 int chan_id = mac->dma_txch;
183 struct pasemi_mac_txring *ring;
184
185 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
186 if (!ring)
187 goto out_ring;
188
189 spin_lock_init(&ring->lock);
190
191 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
192 TX_RING_SIZE, GFP_KERNEL);
193 if (!ring->desc_info)
194 goto out_desc_info;
195
196 /* Allocate descriptors */
197 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
198 TX_RING_SIZE *
199 sizeof(struct pas_dma_xct_descr),
200 &ring->dma, GFP_KERNEL);
201 if (!ring->desc)
202 goto out_desc;
203
204 memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
205
206 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
207 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
208 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
209 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
210
211 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
212
213 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
214 PAS_DMA_TXCHAN_CFG_TY_IFACE |
215 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
216 PAS_DMA_TXCHAN_CFG_UP |
217 PAS_DMA_TXCHAN_CFG_WT(2));
218
219 ring->next_to_use = 0;
220 ring->next_to_clean = 0;
221
222 snprintf(ring->irq_name, sizeof(ring->irq_name),
223 "%s tx", dev->name);
224 mac->tx = ring;
225
226 return 0;
227
228out_desc:
229 kfree(ring->desc_info);
230out_desc_info:
231 kfree(ring);
232out_ring:
233 return -ENOMEM;
234}
235
236static void pasemi_mac_free_tx_resources(struct net_device *dev)
237{
238 struct pasemi_mac *mac = netdev_priv(dev);
239 unsigned int i;
240 struct pasemi_mac_buffer *info;
241 struct pas_dma_xct_descr *dp;
242
243 for (i = 0; i < TX_RING_SIZE; i++) {
244 info = &TX_DESC_INFO(mac, i);
245 dp = &TX_DESC(mac, i);
246 if (info->dma) {
247 if (info->skb) {
248 pci_unmap_single(mac->dma_pdev,
249 info->dma,
250 info->skb->len,
251 PCI_DMA_TODEVICE);
252 dev_kfree_skb_any(info->skb);
253 }
254 info->dma = 0;
255 info->skb = NULL;
256 dp->mactx = 0;
257 dp->ptr = 0;
258 }
259 }
260
261 dma_free_coherent(&mac->dma_pdev->dev,
262 TX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
263 mac->tx->desc, mac->tx->dma);
264
265 kfree(mac->tx->desc_info);
266 kfree(mac->tx);
267 mac->tx = NULL;
268}
269
270static void pasemi_mac_free_rx_resources(struct net_device *dev)
271{
272 struct pasemi_mac *mac = netdev_priv(dev);
273 unsigned int i;
274 struct pasemi_mac_buffer *info;
275 struct pas_dma_xct_descr *dp;
276
277 for (i = 0; i < RX_RING_SIZE; i++) {
278 info = &RX_DESC_INFO(mac, i);
279 dp = &RX_DESC(mac, i);
280 if (info->dma) {
281 if (info->skb) {
282 pci_unmap_single(mac->dma_pdev,
283 info->dma,
284 info->skb->len,
285 PCI_DMA_FROMDEVICE);
286 dev_kfree_skb_any(info->skb);
287 }
288 info->dma = 0;
289 info->skb = NULL;
290 dp->macrx = 0;
291 dp->ptr = 0;
292 }
293 }
294
295 dma_free_coherent(&mac->dma_pdev->dev,
296 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
297 mac->rx->desc, mac->rx->dma);
298
299 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
300 mac->rx->buffers, mac->rx->buf_dma);
301
302 kfree(mac->rx->desc_info);
303 kfree(mac->rx);
304 mac->rx = NULL;
305}
306
307static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
308{
309 struct pasemi_mac *mac = netdev_priv(dev);
310 unsigned int i;
311 int start = mac->rx->next_to_fill;
312 unsigned int count;
313
314 count = (mac->rx->next_to_clean + RX_RING_SIZE -
315 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
316
317 /* Check to see if we're doing first-time setup */
318 if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
319 count = RX_RING_SIZE;
320
321 if (count <= 0)
322 return;
323
324 for (i = start; i < start + count; i++) {
325 struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
326 u64 *buff = &RX_BUFF(mac, i);
327 struct sk_buff *skb;
328 dma_addr_t dma;
329
330 skb = dev_alloc_skb(BUF_SIZE);
331
332 if (!skb) {
333 count = i - start;
334 break;
335 }
336
337 skb->dev = dev;
338
339 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
340 PCI_DMA_FROMDEVICE);
341
342 if (dma_mapping_error(dma)) {
343 dev_kfree_skb_irq(info->skb);
344 count = i - start;
345 break;
346 }
347
348 info->skb = skb;
349 info->dma = dma;
350 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
351 }
352
353 wmb();
354
355 pci_write_config_dword(mac->dma_pdev,
356 PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
357 count);
358 pci_write_config_dword(mac->dma_pdev,
359 PAS_DMA_RXINT_INCR(mac->dma_if),
360 count);
361
362 mac->rx->next_to_fill += count;
363}
364
365static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
366{
367 unsigned int i;
368 int start, count;
369
370 spin_lock(&mac->rx->lock);
371
372 start = mac->rx->next_to_clean;
373 count = 0;
374
375 for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) {
376 struct pas_dma_xct_descr *dp;
377 struct pasemi_mac_buffer *info;
378 struct sk_buff *skb;
379 unsigned int j, len;
380 dma_addr_t dma;
381
382 rmb();
383
384 dp = &RX_DESC(mac, i);
385
386 if (!(dp->macrx & XCT_MACRX_O))
387 break;
388
389 count++;
390
391 info = NULL;
392
393 /* We have to scan for our skb since there's no way
394 * to back-map them from the descriptor, and if we
395 * have several receive channels then they might not
396 * show up in the same order as they were put on the
397 * interface ring.
398 */
399
400 dma = (dp->ptr & XCT_PTR_ADDR_M);
401 for (j = start; j < (start + RX_RING_SIZE); j++) {
402 info = &RX_DESC_INFO(mac, j);
403 if (info->dma == dma)
404 break;
405 }
406
407 BUG_ON(!info);
408 BUG_ON(info->dma != dma);
409
410 pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
411 PCI_DMA_FROMDEVICE);
412
413 skb = info->skb;
414
415 len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
416
417 skb_put(skb, len);
418
419 skb->protocol = eth_type_trans(skb, mac->netdev);
420
421 if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
422 skb->ip_summed = CHECKSUM_COMPLETE;
423 skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
424 XCT_MACRX_CSUM_S;
425 } else
426 skb->ip_summed = CHECKSUM_NONE;
427
428 mac->stats.rx_bytes += len;
429 mac->stats.rx_packets++;
430
431 netif_receive_skb(skb);
432
433 info->dma = 0;
434 info->skb = NULL;
435 dp->ptr = 0;
436 dp->macrx = 0;
437 }
438
439 mac->rx->next_to_clean += count;
440 pasemi_mac_replenish_rx_ring(mac->netdev);
441
442 spin_unlock(&mac->rx->lock);
443
444 return count;
445}
446
447static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
448{
449 int i;
450 struct pasemi_mac_buffer *info;
451 struct pas_dma_xct_descr *dp;
452 int start, count;
453 int flags;
454
455 spin_lock_irqsave(&mac->tx->lock, flags);
456
457 start = mac->tx->next_to_clean;
458 count = 0;
459
460 for (i = start; i < mac->tx->next_to_use; i++) {
461 dp = &TX_DESC(mac, i);
462 if (!dp || (dp->mactx & XCT_MACTX_O))
463 break;
464
465 count++;
466
467 info = &TX_DESC_INFO(mac, i);
468
469 pci_unmap_single(mac->dma_pdev, info->dma,
470 info->skb->len, PCI_DMA_TODEVICE);
471 dev_kfree_skb_irq(info->skb);
472
473 info->skb = NULL;
474 info->dma = 0;
475 dp->mactx = 0;
476 dp->ptr = 0;
477 }
478 mac->tx->next_to_clean += count;
479 spin_unlock_irqrestore(&mac->tx->lock, flags);
480
481 return count;
482}
483
484
485static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
486{
487 struct net_device *dev = data;
488 struct pasemi_mac *mac = netdev_priv(dev);
489 unsigned int reg;
490
491 if (!(*mac->rx_status & PAS_STATUS_INT))
492 return IRQ_NONE;
493
494 netif_rx_schedule(dev);
495 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
496 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
497
498 reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
499 PAS_IOB_DMA_RXCH_RESET_DINTC;
500 if (*mac->rx_status & PAS_STATUS_TIMER)
501 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
502
503 pci_write_config_dword(mac->iob_pdev,
504 PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
505
506
507 return IRQ_HANDLED;
508}
509
510static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
511{
512 struct net_device *dev = data;
513 struct pasemi_mac *mac = netdev_priv(dev);
514 unsigned int reg;
515 int was_full;
516
517 was_full = mac->tx->next_to_clean - mac->tx->next_to_use == TX_RING_SIZE;
518
519 if (!(*mac->tx_status & PAS_STATUS_INT))
520 return IRQ_NONE;
521
522 pasemi_mac_clean_tx(mac);
523
524 reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
525 if (*mac->tx_status & PAS_STATUS_TIMER)
526 reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
527
528 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
529 reg);
530
531 if (was_full)
532 netif_wake_queue(dev);
533
534 return IRQ_HANDLED;
535}
536
537static int pasemi_mac_open(struct net_device *dev)
538{
539 struct pasemi_mac *mac = netdev_priv(dev);
540 unsigned int flags;
541 int ret;
542
543 /* enable rx section */
544 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
545 PAS_DMA_COM_RXCMD_EN);
546
547 /* enable tx section */
548 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
549 PAS_DMA_COM_TXCMD_EN);
550
551 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
552 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
553 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
554
555 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
556
557 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
558 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
559
560 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
561
562 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
563 PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
564
565 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
566 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
567
568 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
569
570 ret = pasemi_mac_setup_rx_resources(dev);
571 if (ret)
572 goto out_rx_resources;
573
574 ret = pasemi_mac_setup_tx_resources(dev);
575 if (ret)
576 goto out_tx_resources;
577
578 pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
579 PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
580 PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
581
582 /* enable rx if */
583 pci_write_config_dword(mac->dma_pdev,
584 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
585 PAS_DMA_RXINT_RCMDSTA_EN);
586
587 /* enable rx channel */
588 pci_write_config_dword(mac->dma_pdev,
589 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
590 PAS_DMA_RXCHAN_CCMDSTA_EN |
591 PAS_DMA_RXCHAN_CCMDSTA_DU);
592
593 /* enable tx channel */
594 pci_write_config_dword(mac->dma_pdev,
595 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
596 PAS_DMA_TXCHAN_TCMDSTA_EN);
597
598 pasemi_mac_replenish_rx_ring(dev);
599
600 netif_start_queue(dev);
601 netif_poll_enable(dev);
602
603 ret = request_irq(mac->dma_pdev->irq + mac->dma_txch,
604 &pasemi_mac_tx_intr, IRQF_DISABLED,
605 mac->tx->irq_name, dev);
606 if (ret) {
607 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
608 mac->dma_pdev->irq + mac->dma_txch, ret);
609 goto out_tx_int;
610 }
611
612 ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch,
613 &pasemi_mac_rx_intr, IRQF_DISABLED,
614 mac->rx->irq_name, dev);
615 if (ret) {
616 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
617 mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
618 goto out_rx_int;
619 }
620
621 return 0;
622
623out_rx_int:
624 free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
625out_tx_int:
626 netif_poll_disable(dev);
627 netif_stop_queue(dev);
628 pasemi_mac_free_tx_resources(dev);
629out_tx_resources:
630 pasemi_mac_free_rx_resources(dev);
631out_rx_resources:
632
633 return ret;
634}
635
636#define MAX_RETRIES 5000
637
638static int pasemi_mac_close(struct net_device *dev)
639{
640 struct pasemi_mac *mac = netdev_priv(dev);
641 unsigned int stat;
642 int retries;
643
644 netif_stop_queue(dev);
645
646 /* Clean out any pending buffers */
647 pasemi_mac_clean_tx(mac);
648 pasemi_mac_clean_rx(mac, RX_RING_SIZE);
649
650 /* Disable interface */
651 pci_write_config_dword(mac->dma_pdev,
652 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
653 PAS_DMA_TXCHAN_TCMDSTA_ST);
654 pci_write_config_dword(mac->dma_pdev,
655 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
656 PAS_DMA_RXINT_RCMDSTA_ST);
657 pci_write_config_dword(mac->dma_pdev,
658 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
659 PAS_DMA_RXCHAN_CCMDSTA_ST);
660
661 for (retries = 0; retries < MAX_RETRIES; retries++) {
662 pci_read_config_dword(mac->dma_pdev,
663 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
664 &stat);
665 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
666 break;
667 cond_resched();
668 }
669
670 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
671 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
672 }
673
674 for (retries = 0; retries < MAX_RETRIES; retries++) {
675 pci_read_config_dword(mac->dma_pdev,
676 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
677 &stat);
678 if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
679 break;
680 cond_resched();
681 }
682
683 if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
684 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
685 }
686
687 for (retries = 0; retries < MAX_RETRIES; retries++) {
688 pci_read_config_dword(mac->dma_pdev,
689 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
690 &stat);
691 if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
692 break;
693 cond_resched();
694 }
695
696 if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) {
697 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
698 }
699
700 /* Then, disable the channel. This must be done separately from
701 * stopping, since you can't disable when active.
702 */
703
704 pci_write_config_dword(mac->dma_pdev,
705 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
706 pci_write_config_dword(mac->dma_pdev,
707 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
708 pci_write_config_dword(mac->dma_pdev,
709 PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
710
711 free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
712 free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
713
714 /* Free resources */
715 pasemi_mac_free_rx_resources(dev);
716 pasemi_mac_free_tx_resources(dev);
717
718 return 0;
719}
720
721static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
722{
723 struct pasemi_mac *mac = netdev_priv(dev);
724 struct pasemi_mac_txring *txring;
725 struct pasemi_mac_buffer *info;
726 struct pas_dma_xct_descr *dp;
727 u64 dflags;
728 dma_addr_t map;
729 int flags;
730
731 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
732
733 if (skb->ip_summed == CHECKSUM_PARTIAL) {
734 switch (skb->nh.iph->protocol) {
735 case IPPROTO_TCP:
736 dflags |= XCT_MACTX_CSUM_TCP;
737 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
738 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
739 break;
740 case IPPROTO_UDP:
741 dflags |= XCT_MACTX_CSUM_UDP;
742 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
743 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
744 break;
745 }
746 }
747
748 map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
749
750 if (dma_mapping_error(map))
751 return NETDEV_TX_BUSY;
752
753 txring = mac->tx;
754
755 spin_lock_irqsave(&txring->lock, flags);
756
757 if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
758 spin_unlock_irqrestore(&txring->lock, flags);
759 pasemi_mac_clean_tx(mac);
760 spin_lock_irqsave(&txring->lock, flags);
761
762 if (txring->next_to_clean - txring->next_to_use ==
763 TX_RING_SIZE) {
764 /* Still no room -- stop the queue and wait for tx
765 * intr when there's room.
766 */
767 netif_stop_queue(dev);
768 goto out_err;
769 }
770 }
771
772
773 dp = &TX_DESC(mac, txring->next_to_use);
774 info = &TX_DESC_INFO(mac, txring->next_to_use);
775
776 dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
777 dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
778 info->dma = map;
779 info->skb = skb;
780
781 txring->next_to_use++;
782 mac->stats.tx_packets++;
783 mac->stats.tx_bytes += skb->len;
784
785 spin_unlock_irqrestore(&txring->lock, flags);
786
787 pci_write_config_dword(mac->dma_pdev,
788 PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
789
790 return NETDEV_TX_OK;
791
792out_err:
793 spin_unlock_irqrestore(&txring->lock, flags);
794 pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE);
795 return NETDEV_TX_BUSY;
796}
797
798static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
799{
800 struct pasemi_mac *mac = netdev_priv(dev);
801
802 return &mac->stats;
803}
804
805static void pasemi_mac_set_rx_mode(struct net_device *dev)
806{
807 struct pasemi_mac *mac = netdev_priv(dev);
808 unsigned int flags;
809
810 pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
811
812 /* Set promiscuous */
813 if (dev->flags & IFF_PROMISC)
814 flags |= PAS_MAC_CFG_PCFG_PR;
815 else
816 flags &= ~PAS_MAC_CFG_PCFG_PR;
817
818 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
819}
820
821
822static int pasemi_mac_poll(struct net_device *dev, int *budget)
823{
824 int pkts, limit = min(*budget, dev->quota);
825 struct pasemi_mac *mac = netdev_priv(dev);
826
827 pkts = pasemi_mac_clean_rx(mac, limit);
828
829 if (pkts < limit) {
830 /* all done, no more packets present */
831 netif_rx_complete(dev);
832
833 /* re-enable receive interrupts */
834 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
835 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
836 return 0;
837 } else {
838 /* used up our quantum, so reschedule */
839 dev->quota -= pkts;
840 *budget -= pkts;
841 return 1;
842 }
843}
844
845static int __devinit
846pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
847{
848 static int index = 0;
849 struct net_device *dev;
850 struct pasemi_mac *mac;
851 int err;
852
853 err = pci_enable_device(pdev);
854 if (err)
855 return err;
856
857 dev = alloc_etherdev(sizeof(struct pasemi_mac));
858 if (dev == NULL) {
859 dev_err(&pdev->dev,
860 "pasemi_mac: Could not allocate ethernet device.\n");
861 err = -ENOMEM;
862 goto out_disable_device;
863 }
864
865 SET_MODULE_OWNER(dev);
866 pci_set_drvdata(pdev, dev);
867 SET_NETDEV_DEV(dev, &pdev->dev);
868
869 mac = netdev_priv(dev);
870
871 mac->pdev = pdev;
872 mac->netdev = dev;
873 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
874
875 if (!mac->dma_pdev) {
876 dev_err(&pdev->dev, "Can't find DMA Controller\n");
877 err = -ENODEV;
878 goto out_free_netdev;
879 }
880
881 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
882
883 if (!mac->iob_pdev) {
884 dev_err(&pdev->dev, "Can't find I/O Bridge\n");
885 err = -ENODEV;
886 goto out_put_dma_pdev;
887 }
888
889 /* These should come out of the device tree eventually */
890 mac->dma_txch = index;
891 mac->dma_rxch = index;
892
893 /* We probe GMAC before XAUI, but the DMA interfaces are
894 * in XAUI, GMAC order.
895 */
896 if (index < 4)
897 mac->dma_if = index + 2;
898 else
899 mac->dma_if = index - 4;
900 index++;
901
902 switch (pdev->device) {
903 case 0xa005:
904 mac->type = MAC_TYPE_GMAC;
905 break;
906 case 0xa006:
907 mac->type = MAC_TYPE_XAUI;
908 break;
909 default:
910 err = -ENODEV;
911 goto out;
912 }
913
914 /* get mac addr from device tree */
915 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
916 err = -ENODEV;
917 goto out;
918 }
919 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
920
921 dev->open = pasemi_mac_open;
922 dev->stop = pasemi_mac_close;
923 dev->hard_start_xmit = pasemi_mac_start_tx;
924 dev->get_stats = pasemi_mac_get_stats;
925 dev->set_multicast_list = pasemi_mac_set_rx_mode;
926 dev->weight = 64;
927 dev->poll = pasemi_mac_poll;
928 dev->features = NETIF_F_HW_CSUM;
929
930 /* The dma status structure is located in the I/O bridge, and
931 * is cache coherent.
932 */
933 if (!dma_status)
934 /* XXXOJN This should come from the device tree */
935 dma_status = __ioremap(0xfd800000, 0x1000, 0);
936
937 mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
938 mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
939
940 err = register_netdev(dev);
941
942 if (err) {
943 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
944 err);
945 goto out;
946 } else
947 printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
948 "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
949 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
950 mac->dma_if, mac->dma_txch, mac->dma_rxch,
951 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
952 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
953
954 return err;
955
956out:
957 pci_dev_put(mac->iob_pdev);
958out_put_dma_pdev:
959 pci_dev_put(mac->dma_pdev);
960out_free_netdev:
961 free_netdev(dev);
962out_disable_device:
963 pci_disable_device(pdev);
964 return err;
965
966}
967
968static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
969{
970 struct net_device *netdev = pci_get_drvdata(pdev);
971 struct pasemi_mac *mac;
972
973 if (!netdev)
974 return;
975
976 mac = netdev_priv(netdev);
977
978 unregister_netdev(netdev);
979
980 pci_disable_device(pdev);
981 pci_dev_put(mac->dma_pdev);
982 pci_dev_put(mac->iob_pdev);
983
984 pci_set_drvdata(pdev, NULL);
985 free_netdev(netdev);
986}
987
988static struct pci_device_id pasemi_mac_pci_tbl[] = {
989 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
990 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
991};
992
993MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
994
995static struct pci_driver pasemi_mac_driver = {
996 .name = "pasemi_mac",
997 .id_table = pasemi_mac_pci_tbl,
998 .probe = pasemi_mac_probe,
999 .remove = __devexit_p(pasemi_mac_remove),
1000};
1001
1002static void __exit pasemi_mac_cleanup_module(void)
1003{
1004 pci_unregister_driver(&pasemi_mac_driver);
1005 __iounmap(dma_status);
1006 dma_status = NULL;
1007}
1008
1009int pasemi_mac_init_module(void)
1010{
1011 return pci_register_driver(&pasemi_mac_driver);
1012}
1013
1014MODULE_LICENSE("GPL");
1015MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
1016MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
1017
1018module_init(pasemi_mac_init_module);
1019module_exit(pasemi_mac_cleanup_module);
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h
new file mode 100644
index 000000000000..c3e37e46a18a
--- /dev/null
+++ b/drivers/net/pasemi_mac.h
@@ -0,0 +1,460 @@
1/*
2 * Copyright (C) 2006 PA Semi, Inc
3 *
4 * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
5 * hardware register layouts.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef PASEMI_MAC_H
22#define PASEMI_MAC_H
23
24#include <linux/ethtool.h>
25#include <linux/netdevice.h>
26#include <linux/spinlock.h>
27
28struct pasemi_mac_txring {
29 spinlock_t lock;
30 struct pas_dma_xct_descr *desc;
31 dma_addr_t dma;
32 unsigned int size;
33 unsigned int next_to_use;
34 unsigned int next_to_clean;
35 struct pasemi_mac_buffer *desc_info;
36 char irq_name[10]; /* "eth%d tx" */
37};
38
39struct pasemi_mac_rxring {
40 spinlock_t lock;
41 struct pas_dma_xct_descr *desc; /* RX channel descriptor ring */
42 dma_addr_t dma;
43 u64 *buffers; /* RX interface buffer ring */
44 dma_addr_t buf_dma;
45 unsigned int size;
46 unsigned int next_to_fill;
47 unsigned int next_to_clean;
48 struct pasemi_mac_buffer *desc_info;
49 char irq_name[10]; /* "eth%d rx" */
50};
51
52struct pasemi_mac {
53 struct net_device *netdev;
54 struct pci_dev *pdev;
55 struct pci_dev *dma_pdev;
56 struct pci_dev *iob_pdev;
57 struct net_device_stats stats;
58
59 /* Pointer to the cacheable per-channel status registers */
60 u64 *rx_status;
61 u64 *tx_status;
62
63 u8 type;
64#define MAC_TYPE_GMAC 1
65#define MAC_TYPE_XAUI 2
66 u32 dma_txch;
67 u32 dma_if;
68 u32 dma_rxch;
69
70 u8 mac_addr[6];
71
72 struct timer_list rxtimer;
73
74 struct pasemi_mac_txring *tx;
75 struct pasemi_mac_rxring *rx;
76};
77
78/* Software status descriptor (desc_info) */
79struct pasemi_mac_buffer {
80 struct sk_buff *skb;
81 dma_addr_t dma;
82};
83
84
85/* status register layout in IOB region, at 0xfb800000 */
86struct pasdma_status {
87 u64 rx_sta[64];
88 u64 tx_sta[20];
89};
90
91/* descriptor structure */
92struct pas_dma_xct_descr {
93 union {
94 u64 mactx;
95 u64 macrx;
96 };
97 union {
98 u64 ptr;
99 u64 rxb;
100 };
101};
102
103/* MAC CFG register offsets */
104
105enum {
106 PAS_MAC_CFG_PCFG = 0x80,
107 PAS_MAC_CFG_TXP = 0x98,
108 PAS_MAC_IPC_CHNL = 0x208,
109};
110
111/* MAC CFG register fields */
112#define PAS_MAC_CFG_PCFG_PE 0x80000000
113#define PAS_MAC_CFG_PCFG_CE 0x40000000
114#define PAS_MAC_CFG_PCFG_BU 0x20000000
115#define PAS_MAC_CFG_PCFG_TT 0x10000000
116#define PAS_MAC_CFG_PCFG_TSR_M 0x0c000000
117#define PAS_MAC_CFG_PCFG_TSR_10M 0x00000000
118#define PAS_MAC_CFG_PCFG_TSR_100M 0x04000000
119#define PAS_MAC_CFG_PCFG_TSR_1G 0x08000000
120#define PAS_MAC_CFG_PCFG_TSR_10G 0x0c000000
121#define PAS_MAC_CFG_PCFG_T24 0x02000000
122#define PAS_MAC_CFG_PCFG_PR 0x01000000
123#define PAS_MAC_CFG_PCFG_CRO_M 0x00ff0000
124#define PAS_MAC_CFG_PCFG_CRO_S 16
125#define PAS_MAC_CFG_PCFG_IPO_M 0x0000ff00
126#define PAS_MAC_CFG_PCFG_IPO_S 8
127#define PAS_MAC_CFG_PCFG_S1 0x00000080
128#define PAS_MAC_CFG_PCFG_IO_M 0x00000060
129#define PAS_MAC_CFG_PCFG_IO_MAC 0x00000000
130#define PAS_MAC_CFG_PCFG_IO_OFF 0x00000020
131#define PAS_MAC_CFG_PCFG_IO_IND_ETH 0x00000040
132#define PAS_MAC_CFG_PCFG_IO_IND_IP 0x00000060
133#define PAS_MAC_CFG_PCFG_LP 0x00000010
134#define PAS_MAC_CFG_PCFG_TS 0x00000008
135#define PAS_MAC_CFG_PCFG_HD 0x00000004
136#define PAS_MAC_CFG_PCFG_SPD_M 0x00000003
137#define PAS_MAC_CFG_PCFG_SPD_10M 0x00000000
138#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
139#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
140#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
141#define PAS_MAC_CFG_TXP_FCF 0x01000000
142#define PAS_MAC_CFG_TXP_FCE 0x00800000
143#define PAS_MAC_CFG_TXP_FC 0x00400000
144#define PAS_MAC_CFG_TXP_FPC_M 0x00300000
145#define PAS_MAC_CFG_TXP_FPC_S 20
146#define PAS_MAC_CFG_TXP_FPC(x) (((x) << PAS_MAC_CFG_TXP_FPC_S) & \
147 PAS_MAC_CFG_TXP_FPC_M)
148#define PAS_MAC_CFG_TXP_RT 0x00080000
149#define PAS_MAC_CFG_TXP_BL 0x00040000
150#define PAS_MAC_CFG_TXP_SL_M 0x00030000
151#define PAS_MAC_CFG_TXP_SL_S 16
152#define PAS_MAC_CFG_TXP_SL(x) (((x) << PAS_MAC_CFG_TXP_SL_S) & \
153 PAS_MAC_CFG_TXP_SL_M)
154#define PAS_MAC_CFG_TXP_COB_M 0x0000f000
155#define PAS_MAC_CFG_TXP_COB_S 12
156#define PAS_MAC_CFG_TXP_COB(x) (((x) << PAS_MAC_CFG_TXP_COB_S) & \
157 PAS_MAC_CFG_TXP_COB_M)
158#define PAS_MAC_CFG_TXP_TIFT_M 0x00000f00
159#define PAS_MAC_CFG_TXP_TIFT_S 8
160#define PAS_MAC_CFG_TXP_TIFT(x) (((x) << PAS_MAC_CFG_TXP_TIFT_S) & \
161 PAS_MAC_CFG_TXP_TIFT_M)
162#define PAS_MAC_CFG_TXP_TIFG_M 0x000000ff
163#define PAS_MAC_CFG_TXP_TIFG_S 0
164#define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \
165 PAS_MAC_CFG_TXP_TIFG_M)
166
167#define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000
168#define PAS_MAC_IPC_CHNL_DCHNO_S 16
169#define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
170 PAS_MAC_IPC_CHNL_DCHNO_M)
171#define PAS_MAC_IPC_CHNL_BCH_M 0x0000003f
172#define PAS_MAC_IPC_CHNL_BCH_S 0
173#define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
174 PAS_MAC_IPC_CHNL_BCH_M)
175
176/* All these registers live in the PCI configuration space for the DMA PCI
177 * device. Use the normal PCI config access functions for them.
178 */
179enum {
180 PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
181 PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
182 PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
183 PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
184};
185#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
186#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
187#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
188#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
189
190
191/* Per-interface and per-channel registers */
192#define _PAS_DMA_RXINT_STRIDE 0x20
193#define PAS_DMA_RXINT_RCMDSTA(i) (0x200+(i)*_PAS_DMA_RXINT_STRIDE)
194#define PAS_DMA_RXINT_RCMDSTA_EN 0x00000001
195#define PAS_DMA_RXINT_RCMDSTA_ST 0x00000002
196#define PAS_DMA_RXINT_RCMDSTA_OO 0x00000100
197#define PAS_DMA_RXINT_RCMDSTA_BP 0x00000200
198#define PAS_DMA_RXINT_RCMDSTA_DR 0x00000400
199#define PAS_DMA_RXINT_RCMDSTA_BT 0x00000800
200#define PAS_DMA_RXINT_RCMDSTA_TB 0x00001000
201#define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000
202#define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000
203#define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17
204#define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE)
205#define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff
206#define PAS_DMA_RXINT_INCR_INCR_S 0
207#define PAS_DMA_RXINT_INCR_INCR(x) ((x) & 0x0000ffff)
208#define PAS_DMA_RXINT_BASEL(i) (0x218+(i)*_PAS_DMA_RXINT_STRIDE)
209#define PAS_DMA_RXINT_BASEL_BRBL(x) ((x) & ~0x3f)
210#define PAS_DMA_RXINT_BASEU(i) (0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
211#define PAS_DMA_RXINT_BASEU_BRBH(x) ((x) & 0xfff)
212#define PAS_DMA_RXINT_BASEU_SIZ_M 0x3fff0000 /* # of cache lines worth of buffer ring */
213#define PAS_DMA_RXINT_BASEU_SIZ_S 16 /* 0 = 16K */
214#define PAS_DMA_RXINT_BASEU_SIZ(x) (((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
215 PAS_DMA_RXINT_BASEU_SIZ_M)
216
217
218#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
219#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
220#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
221#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
222#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
223#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
224#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
225#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
226#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
227#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
228#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
229#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
230#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
231#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
232#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
233#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
234#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
235 PAS_DMA_TXCHAN_CFG_TATTR_M)
236#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0
237#define PAS_DMA_TXCHAN_CFG_WT_S 6
238#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
239 PAS_DMA_TXCHAN_CFG_WT_M)
240#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
241#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
242#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
243#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
244#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
245#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
246#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
247#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
248 PAS_DMA_TXCHAN_BASEL_BRBL_M)
249#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
250#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
251#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
252#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
253 PAS_DMA_TXCHAN_BASEU_BRBH_M)
254/* # of cache lines worth of buffer ring */
255#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
256#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
257#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
258 PAS_DMA_TXCHAN_BASEU_SIZ_M)
259
260#define _PAS_DMA_RXCHAN_STRIDE 0x20 /* Size per channel */
261#define _PAS_DMA_RXCHAN_CCMDSTA 0x800 /* Command / Status */
262#define _PAS_DMA_RXCHAN_CFG 0x804 /* Configuration */
263#define _PAS_DMA_RXCHAN_INCR 0x810 /* Descriptor increment */
264#define _PAS_DMA_RXCHAN_CNT 0x814 /* Descriptor count/offset */
265#define _PAS_DMA_RXCHAN_BASEL 0x818 /* Descriptor ring base (low) */
266#define _PAS_DMA_RXCHAN_BASEU 0x81c /* (high) */
267#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
268#define PAS_DMA_RXCHAN_CCMDSTA_EN 0x00000001 /* Enabled */
269#define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */
270#define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */
271#define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000
272#define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
273#define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380
274#define PAS_DMA_RXCHAN_CFG_HBU_S 7
275#define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
276 PAS_DMA_RXCHAN_CFG_HBU_M)
277#define PAS_DMA_RXCHAN_INCR(c) (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
278#define PAS_DMA_RXCHAN_BASEL(c) (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
279#define PAS_DMA_RXCHAN_BASEL_BRBL_M 0xffffffc0
280#define PAS_DMA_RXCHAN_BASEL_BRBL_S 0
281#define PAS_DMA_RXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
282 PAS_DMA_RXCHAN_BASEL_BRBL_M)
283#define PAS_DMA_RXCHAN_BASEU(c) (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
284#define PAS_DMA_RXCHAN_BASEU_BRBH_M 0x00000fff
285#define PAS_DMA_RXCHAN_BASEU_BRBH_S 0
286#define PAS_DMA_RXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
287 PAS_DMA_RXCHAN_BASEU_BRBH_M)
288/* # of cache lines worth of buffer ring */
289#define PAS_DMA_RXCHAN_BASEU_SIZ_M 0x3fff0000
290#define PAS_DMA_RXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
291#define PAS_DMA_RXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
292 PAS_DMA_RXCHAN_BASEU_SIZ_M)
293
294#define PAS_STATUS_PCNT_M 0x000000000000ffffull
295#define PAS_STATUS_PCNT_S 0
296#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
297#define PAS_STATUS_DCNT_S 16
298#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
299#define PAS_STATUS_BPCNT_S 32
300#define PAS_STATUS_TIMER 0x1000000000000000ull
301#define PAS_STATUS_ERROR 0x2000000000000000ull
302#define PAS_STATUS_SOFT 0x4000000000000000ull
303#define PAS_STATUS_INT 0x8000000000000000ull
304
305#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
306#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
307#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
308#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
309 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
310#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
311#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
312#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
313#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
314 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
315#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
316#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
317#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
318#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
319#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
320 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
321#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
322#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
323#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
324#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
325#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
326 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
327#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
328#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
329#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 0
330#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
331 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
332#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
333#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
334#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
335#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
336#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
337#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
338#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
339#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
340#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 0
341#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
342 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
343#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
344#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
345#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
346#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
347#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
348#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
349
350#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
351#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
352#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
353#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
354 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
355
356/* Transmit descriptor fields */
357#define XCT_MACTX_T 0x8000000000000000ull
358#define XCT_MACTX_ST 0x4000000000000000ull
359#define XCT_MACTX_NORES 0x0000000000000000ull
360#define XCT_MACTX_8BRES 0x1000000000000000ull
361#define XCT_MACTX_24BRES 0x2000000000000000ull
362#define XCT_MACTX_40BRES 0x3000000000000000ull
363#define XCT_MACTX_I 0x0800000000000000ull
364#define XCT_MACTX_O 0x0400000000000000ull
365#define XCT_MACTX_E 0x0200000000000000ull
366#define XCT_MACTX_VLAN_M 0x0180000000000000ull
367#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
368#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
369#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
370#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
371#define XCT_MACTX_CRC_M 0x0060000000000000ull
372#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
373#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
374#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
375#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
376#define XCT_MACTX_SS 0x0010000000000000ull
377#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
378#define XCT_MACTX_LLEN_S 32ull
379#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
380 XCT_MACTX_LLEN_M)
381#define XCT_MACTX_IPH_M 0x00000000f8000000ull
382#define XCT_MACTX_IPH_S 27ull
383#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
384 XCT_MACTX_IPH_M)
385#define XCT_MACTX_IPO_M 0x0000000007c00000ull
386#define XCT_MACTX_IPO_S 22ull
387#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
388 XCT_MACTX_IPO_M)
389#define XCT_MACTX_CSUM_M 0x0000000000000060ull
390#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
391#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
392#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
393#define XCT_MACTX_V6 0x0000000000000010ull
394#define XCT_MACTX_C 0x0000000000000004ull
395#define XCT_MACTX_AL2 0x0000000000000002ull
396
397/* Receive descriptor fields */
398#define XCT_MACRX_T 0x8000000000000000ull
399#define XCT_MACRX_ST 0x4000000000000000ull
400#define XCT_MACRX_NORES 0x0000000000000000ull
401#define XCT_MACRX_8BRES 0x1000000000000000ull
402#define XCT_MACRX_24BRES 0x2000000000000000ull
403#define XCT_MACRX_40BRES 0x3000000000000000ull
404#define XCT_MACRX_O 0x0400000000000000ull
405#define XCT_MACRX_E 0x0200000000000000ull
406#define XCT_MACRX_FF 0x0100000000000000ull
407#define XCT_MACRX_PF 0x0080000000000000ull
408#define XCT_MACRX_OB 0x0040000000000000ull
409#define XCT_MACRX_OD 0x0020000000000000ull
410#define XCT_MACRX_FS 0x0010000000000000ull
411#define XCT_MACRX_NB_M 0x000fc00000000000ull
412#define XCT_MACRX_NB_S 46ULL
413#define XCT_MACRX_NB(x) ((((long)(x)) << XCT_MACRX_NB_S) & \
414 XCT_MACRX_NB_M)
415#define XCT_MACRX_LLEN_M 0x00003fff00000000ull
416#define XCT_MACRX_LLEN_S 32ULL
417#define XCT_MACRX_LLEN(x) ((((long)(x)) << XCT_MACRX_LLEN_S) & \
418 XCT_MACRX_LLEN_M)
419#define XCT_MACRX_CRC 0x0000000080000000ull
420#define XCT_MACRX_LEN_M 0x0000000060000000ull
421#define XCT_MACRX_LEN_TOOSHORT 0x0000000020000000ull
422#define XCT_MACRX_LEN_BELOWMIN 0x0000000040000000ull
423#define XCT_MACRX_LEN_TRUNC 0x0000000060000000ull
424#define XCT_MACRX_CAST_M 0x0000000018000000ull
425#define XCT_MACRX_CAST_UNI 0x0000000000000000ull
426#define XCT_MACRX_CAST_MULTI 0x0000000008000000ull
427#define XCT_MACRX_CAST_BROAD 0x0000000010000000ull
428#define XCT_MACRX_CAST_PAUSE 0x0000000018000000ull
429#define XCT_MACRX_VLC_M 0x0000000006000000ull
430#define XCT_MACRX_FM 0x0000000001000000ull
431#define XCT_MACRX_HTY_M 0x0000000000c00000ull
432#define XCT_MACRX_HTY_IPV4_OK 0x0000000000000000ull
433#define XCT_MACRX_HTY_IPV6 0x0000000000400000ull
434#define XCT_MACRX_HTY_IPV4_BAD 0x0000000000800000ull
435#define XCT_MACRX_HTY_NONIP 0x0000000000c00000ull
436#define XCT_MACRX_IPP_M 0x00000000003f0000ull
437#define XCT_MACRX_IPP_S 16
438#define XCT_MACRX_CSUM_M 0x000000000000ffffull
439#define XCT_MACRX_CSUM_S 0
440
441#define XCT_PTR_T 0x8000000000000000ull
442#define XCT_PTR_LEN_M 0x7ffff00000000000ull
443#define XCT_PTR_LEN_S 44
444#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
445 XCT_PTR_LEN_M)
446#define XCT_PTR_ADDR_M 0x00000fffffffffffull
447#define XCT_PTR_ADDR_S 0
448#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
449 XCT_PTR_ADDR_M)
450
451/* Receive interface buffer fields */
452#define XCT_RXB_LEN_M 0x0ffff00000000000ull
453#define XCT_RXB_LEN_S 44
454#define XCT_RXB_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
455#define XCT_RXB_ADDR_M 0x00000fffffffffffull
456#define XCT_RXB_ADDR_S 0
457#define XCT_RXB_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
458
459
460#endif /* PASEMI_MAC_H */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8844c20eac2d..2429b274f0b0 100644..100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/in.h>
25#include <linux/if_arp.h> 26#include <linux/if_arp.h>
26#include <linux/if_ether.h> 27#include <linux/if_ether.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 64
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 65static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
66 /* required last entry */ 68 /* required last entry */
67 {0,} 69 {0,}
68}; 70};
@@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1475 2) << 7)) 1477 2) << 7))
1476 return -1; 1478 return -1;
1477 1479
1480 if (qdev->device_id == QL3032_DEVICE_ID)
1481 ql_write_page0_reg(qdev,
1482 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1483
1478 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1484 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1479 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1485 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1480 1486
@@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1706 struct ob_mac_iocb_rsp *mac_rsp) 1712 struct ob_mac_iocb_rsp *mac_rsp)
1707{ 1713{
1708 struct ql_tx_buf_cb *tx_cb; 1714 struct ql_tx_buf_cb *tx_cb;
1715 int i;
1709 1716
1710 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1711 pci_unmap_single(qdev->pdev, 1718 pci_unmap_single(qdev->pdev,
1712 pci_unmap_addr(tx_cb, mapaddr), 1719 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1713 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 1720 pci_unmap_len(&tx_cb->map[0], maplen),
1714 dev_kfree_skb_irq(tx_cb->skb); 1721 PCI_DMA_TODEVICE);
1722 tx_cb->seg_count--;
1723 if (tx_cb->seg_count) {
1724 for (i = 1; i < tx_cb->seg_count; i++) {
1725 pci_unmap_page(qdev->pdev,
1726 pci_unmap_addr(&tx_cb->map[i],
1727 mapaddr),
1728 pci_unmap_len(&tx_cb->map[i], maplen),
1729 PCI_DMA_TODEVICE);
1730 }
1731 }
1715 qdev->stats.tx_packets++; 1732 qdev->stats.tx_packets++;
1716 qdev->stats.tx_bytes += tx_cb->skb->len; 1733 qdev->stats.tx_bytes += tx_cb->skb->len;
1734 dev_kfree_skb_irq(tx_cb->skb);
1717 tx_cb->skb = NULL; 1735 tx_cb->skb = NULL;
1718 atomic_inc(&qdev->tx_count); 1736 atomic_inc(&qdev->tx_count);
1719} 1737}
1720 1738
1739/*
1740 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains
1742 * (some) header info, the second the remainder of the headers plus
1743 * the data. For this chip we reserve some space at the top of the
1744 * receive buffer so that the header info in buffer one can be
1745 * prepended to the buffer two. Buffer two is the sent up while
1746 * buffer one is returned to the hardware to be reused.
1747 * 3032 receives all of it's data and headers in one buffer for a
1748 * simpler process. 3032 also supports checksum verification as
1749 * can be seen in ql_process_macip_rx_intr().
1750 */
1721static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1722 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1723{ 1753{
@@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1740 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1741 qdev->small_buf_release_cnt++; 1771 qdev->small_buf_release_cnt++;
1742 1772
1743 /* start of first buffer */ 1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1774 /* start of first buffer (3022 only) */
1745 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1746 qdev->lrg_buf_release_cnt++; 1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1747 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1777 qdev->lrg_buf_release_cnt++;
1748 qdev->lrg_buf_index = 0; 1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1749 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1779 qdev->lrg_buf_index = 0;
1750 curr_ial_ptr++; 1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1751 1784
1752 /* start of second buffer */ 1785 /* start of second buffer */
1753 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1778,7 +1811,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1778 qdev->ndev->last_rx = jiffies; 1811 qdev->ndev->last_rx = jiffies;
1779 lrg_buf_cb2->skb = NULL; 1812 lrg_buf_cb2->skb = NULL;
1780 1813
1781 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1814 if (qdev->device_id == QL3022_DEVICE_ID)
1815 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1782 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1816 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1783} 1817}
1784 1818
@@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1790 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1791 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1792 u32 *curr_ial_ptr; 1826 u32 *curr_ial_ptr;
1793 struct sk_buff *skb1, *skb2; 1827 struct sk_buff *skb1 = NULL, *skb2;
1794 struct net_device *ndev = qdev->ndev; 1828 struct net_device *ndev = qdev->ndev;
1795 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1796 u16 size = 0; 1830 u16 size = 0;
@@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1806 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1807 qdev->small_buf_release_cnt++; 1841 qdev->small_buf_release_cnt++;
1808 1842
1809 /* start of first buffer */ 1843 if (qdev->device_id == QL3022_DEVICE_ID) {
1810 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1844 /* start of first buffer on 3022 */
1811 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1812 1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1813 qdev->lrg_buf_release_cnt++; 1847 qdev->lrg_buf_release_cnt++;
1814 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1815 qdev->lrg_buf_index = 0; 1849 qdev->lrg_buf_index = 0;
1816 skb1 = lrg_buf_cb1->skb; 1850 skb1 = lrg_buf_cb1->skb;
1817 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1818 curr_ial_ptr++; 1852 curr_ial_ptr++;
1853 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 }
1819 1857
1820 /* start of second buffer */ 1858 /* start of second buffer */
1821 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1825 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1826 qdev->lrg_buf_index = 0; 1864 qdev->lrg_buf_index = 0;
1827 1865
1828 qdev->stats.rx_packets++;
1829 qdev->stats.rx_bytes += length;
1830
1831 /*
1832 * Copy the ethhdr from first buffer to second. This
1833 * is necessary for IP completions.
1834 */
1835 if (*((u16 *) skb1->data) != 0xFFFF)
1836 size = VLAN_ETH_HLEN;
1837 else
1838 size = ETH_HLEN;
1839
1840 skb_put(skb2, length); /* Just the second buffer length here. */ 1866 skb_put(skb2, length); /* Just the second buffer length here. */
1841 pci_unmap_single(qdev->pdev, 1867 pci_unmap_single(qdev->pdev,
1842 pci_unmap_addr(lrg_buf_cb2, mapaddr), 1868 pci_unmap_addr(lrg_buf_cb2, mapaddr),
@@ -1844,16 +1870,40 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1844 PCI_DMA_FROMDEVICE); 1870 PCI_DMA_FROMDEVICE);
1845 prefetch(skb2->data); 1871 prefetch(skb2->data);
1846 1872
1847 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1848 skb2->dev = qdev->ndev;
1849 skb2->ip_summed = CHECKSUM_NONE; 1873 skb2->ip_summed = CHECKSUM_NONE;
1874 if (qdev->device_id == QL3022_DEVICE_ID) {
1875 /*
1876 * Copy the ethhdr from first buffer to second. This
1877 * is necessary for 3022 IP completions.
1878 */
1879 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1880 } else {
1881 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
1882 if (checksum &
1883 (IB_IP_IOCB_RSP_3032_ICE |
1884 IB_IP_IOCB_RSP_3032_CE |
1885 IB_IP_IOCB_RSP_3032_NUC)) {
1886 printk(KERN_ERR
1887 "%s: Bad checksum for this %s packet, checksum = %x.\n",
1888 __func__,
1889 ((checksum &
1890 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
1891 "UDP"),checksum);
1892 } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
1893 skb2->ip_summed = CHECKSUM_UNNECESSARY;
1894 }
1895 }
1896 skb2->dev = qdev->ndev;
1850 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 1897 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1851 1898
1852 netif_receive_skb(skb2); 1899 netif_receive_skb(skb2);
1900 qdev->stats.rx_packets++;
1901 qdev->stats.rx_bytes += length;
1853 ndev->last_rx = jiffies; 1902 ndev->last_rx = jiffies;
1854 lrg_buf_cb2->skb = NULL; 1903 lrg_buf_cb2->skb = NULL;
1855 1904
1856 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1905 if (qdev->device_id == QL3022_DEVICE_ID)
1906 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1857 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1907 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1858} 1908}
1859 1909
@@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1880 break; 1930 break;
1881 1931
1882 case OPCODE_IB_MAC_IOCB: 1932 case OPCODE_IB_MAC_IOCB:
1933 case OPCODE_IB_3032_MAC_IOCB:
1883 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 1934 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1884 net_rsp); 1935 net_rsp);
1885 (*rx_cleaned)++; 1936 (*rx_cleaned)++;
1886 break; 1937 break;
1887 1938
1888 case OPCODE_IB_IP_IOCB: 1939 case OPCODE_IB_IP_IOCB:
1940 case OPCODE_IB_3032_IP_IOCB:
1889 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 1941 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1890 net_rsp); 1942 net_rsp);
1891 (*rx_cleaned)++; 1943 (*rx_cleaned)++;
@@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2032 return IRQ_RETVAL(handled); 2084 return IRQ_RETVAL(handled);
2033} 2085}
2034 2086
2087/*
2088 * Get the total number of segments needed for the
2089 * given number of fragments. This is necessary because
2090 * outbound address lists (OAL) will be used when more than
2091 * two frags are given. Each address list has 5 addr/len
2092 * pairs. The 5th pair in each AOL is used to point to
2093 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear.
2095 */
2096static int ql_get_seg_count(unsigned short frags)
2097{
2098 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */
2101 case 2: return 3; /* skb->data + 2 frags */
2102 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2103 case 4: return 6;
2104 case 5: return 7;
2105 case 6: return 8;
2106 case 7: return 10;
2107 case 8: return 11;
2108 case 9: return 12;
2109 case 10: return 13;
2110 case 11: return 15;
2111 case 12: return 16;
2112 case 13: return 17;
2113 case 14: return 18;
2114 case 15: return 20;
2115 case 16: return 21;
2116 case 17: return 22;
2117 case 18: return 23;
2118 }
2119 return -1;
2120}
2121
2122static void ql_hw_csum_setup(struct sk_buff *skb,
2123 struct ob_mac_iocb_req *mac_iocb_ptr)
2124{
2125 struct ethhdr *eth;
2126 struct iphdr *ip = NULL;
2127 u8 offset = ETH_HLEN;
2128
2129 eth = (struct ethhdr *)(skb->data);
2130
2131 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
2132 ip = (struct iphdr *)&skb->data[ETH_HLEN];
2133 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2134 ((struct vlan_ethhdr *)skb->data)->
2135 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2136 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2137 offset = VLAN_ETH_HLEN;
2138 }
2139
2140 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
2143 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
2147 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 }
2150 }
2151}
2152
2153/*
2154 * The difference between 3022 and 3032 sends:
2155 * 3022 only supports a simple single segment transmission.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */
2035static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2036{ 2165{
2037 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2038 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2039 struct ql_tx_buf_cb *tx_cb; 2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal;
2171 struct oal_entry *oal_entry;
2172 int len;
2040 struct ob_mac_iocb_req *mac_iocb_ptr; 2173 struct ob_mac_iocb_req *mac_iocb_ptr;
2041 u64 map; 2174 u64 map;
2175 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2042 2177
2043 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2044 if (!netif_queue_stopped(ndev)) 2179 if (!netif_queue_stopped(ndev))
@@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2046 return NETDEV_TX_BUSY; 2181 return NETDEV_TX_BUSY;
2047 } 2182 }
2048 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK;
2188
2189 }
2049 mac_iocb_ptr = tx_cb->queue_entry; 2190 mac_iocb_ptr = tx_cb->queue_entry;
2050 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2051 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2052 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2053 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2054 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len); 2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2055 tx_cb->skb = skb; 2196 tx_cb->skb = skb;
2056 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2057 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map)); 2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2058 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map)); 2199 len = skb_headlen(skb);
2059 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E); 2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2060 pci_unmap_addr_set(tx_cb, mapaddr, map); 2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2061 pci_unmap_len_set(tx_cb, maplen, skb->len); 2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2062 atomic_dec(&qdev->tx_count); 2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2204 oal_entry->len = cpu_to_le32(len);
2205 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++;
2208
2209 if (!skb_shinfo(skb)->nr_frags) {
2210 /* Terminate the last segment. */
2211 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else {
2214 int i;
2215 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2218 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2221 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2222 (seg == 17 && seg_cnt > 18)) {
2223 /* Continuation entry points to outbound address list. */
2224 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal),
2226 PCI_DMA_TODEVICE);
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len =
2230 cpu_to_le32(sizeof(struct oal) |
2231 OAL_CONT_ENTRY);
2232 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2233 map);
2234 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2235 len);
2236 oal_entry = (struct oal_entry *)oal;
2237 oal++;
2238 seg++;
2239 }
2063 2240
2241 map =
2242 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE);
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size);
2248 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2249 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2250 frag->size);
2251 }
2252 /* Terminate the last segment. */
2253 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 }
2256 wmb();
2064 qdev->req_producer_index++; 2257 qdev->req_producer_index++;
2065 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2066 qdev->req_producer_index = 0; 2259 qdev->req_producer_index = 0;
@@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2074 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2267 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2075 ndev->name, qdev->req_producer_index, skb->len); 2268 ndev->name, qdev->req_producer_index, skb->len);
2076 2269
2270 atomic_dec(&qdev->tx_count);
2077 return NETDEV_TX_OK; 2271 return NETDEV_TX_OK;
2078} 2272}
2273
2079static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2274static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2080{ 2275{
2081 qdev->req_q_size = 2276 qdev->req_q_size =
@@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2359 return 0; 2554 return 0;
2360} 2555}
2361 2556
2362static void ql_create_send_free_list(struct ql3_adapter *qdev) 2557static void ql_free_send_free_list(struct ql3_adapter *qdev)
2558{
2559 struct ql_tx_buf_cb *tx_cb;
2560 int i;
2561
2562 tx_cb = &qdev->tx_buf[0];
2563 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2564 if (tx_cb->oal) {
2565 kfree(tx_cb->oal);
2566 tx_cb->oal = NULL;
2567 }
2568 tx_cb++;
2569 }
2570}
2571
2572static int ql_create_send_free_list(struct ql3_adapter *qdev)
2363{ 2573{
2364 struct ql_tx_buf_cb *tx_cb; 2574 struct ql_tx_buf_cb *tx_cb;
2365 int i; 2575 int i;
@@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
2368 2578
2369 /* Create free list of transmit buffers */ 2579 /* Create free list of transmit buffers */
2370 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2580 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2581
2371 tx_cb = &qdev->tx_buf[i]; 2582 tx_cb = &qdev->tx_buf[i];
2372 tx_cb->skb = NULL; 2583 tx_cb->skb = NULL;
2373 tx_cb->queue_entry = req_q_curr; 2584 tx_cb->queue_entry = req_q_curr;
2374 req_q_curr++; 2585 req_q_curr++;
2586 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2587 if (tx_cb->oal == NULL)
2588 return -1;
2375 } 2589 }
2590 return 0;
2376} 2591}
2377 2592
2378static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
@@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2447 2662
2448 /* Initialize the large buffer queue. */ 2663 /* Initialize the large buffer queue. */
2449 ql_init_large_buffers(qdev); 2664 ql_init_large_buffers(qdev);
2450 ql_create_send_free_list(qdev); 2665 if (ql_create_send_free_list(qdev))
2666 goto err_free_list;
2451 2667
2452 qdev->rsp_current = qdev->rsp_q_virt_addr; 2668 qdev->rsp_current = qdev->rsp_q_virt_addr;
2453 2669
2454 return 0; 2670 return 0;
2455 2671err_free_list:
2672 ql_free_send_free_list(qdev);
2456err_small_buffers: 2673err_small_buffers:
2457 ql_free_buffer_queues(qdev); 2674 ql_free_buffer_queues(qdev);
2458err_buffer_queues: 2675err_buffer_queues:
@@ -2468,6 +2685,7 @@ err_req_rsp:
2468 2685
2469static void ql_free_mem_resources(struct ql3_adapter *qdev) 2686static void ql_free_mem_resources(struct ql3_adapter *qdev)
2470{ 2687{
2688 ql_free_send_free_list(qdev);
2471 ql_free_large_buffers(qdev); 2689 ql_free_large_buffers(qdev);
2472 ql_free_small_buffers(qdev); 2690 ql_free_small_buffers(qdev);
2473 ql_free_buffer_queues(qdev); 2691 ql_free_buffer_queues(qdev);
@@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2766 } 2984 }
2767 2985
2768 /* Enable Ethernet Function */ 2986 /* Enable Ethernet Function */
2769 value = 2987 if (qdev->device_id == QL3032_DEVICE_ID) {
2770 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 2988 value =
2771 PORT_CONTROL_HH); 2989 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
2772 ql_write_page0_reg(qdev, &port_regs->portControl, 2990 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
2773 ((value << 16) | value)); 2991 ql_write_page0_reg(qdev, &port_regs->functionControl,
2992 ((value << 16) | value));
2993 } else {
2994 value =
2995 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2996 PORT_CONTROL_HH);
2997 ql_write_page0_reg(qdev, &port_regs->portControl,
2998 ((value << 16) | value));
2999 }
3000
2774 3001
2775out: 3002out:
2776 return status; 3003 return status;
@@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
2917 struct pci_dev *pdev = qdev->pdev; 3144 struct pci_dev *pdev = qdev->pdev;
2918 3145
2919 printk(KERN_INFO PFX 3146 printk(KERN_INFO PFX
2920 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n", 3147 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
2921 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot); 3148 DRV_NAME, qdev->index, qdev->chip_rev_id,
3149 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3150 qdev->pci_slot);
2922 printk(KERN_INFO PFX 3151 printk(KERN_INFO PFX
2923 "%s Interface.\n", 3152 "%s Interface.\n",
2924 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); 3153 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
@@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
3212 * Loop through the active list and return the skb. 3441 * Loop through the active list and return the skb.
3213 */ 3442 */
3214 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3443 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3444 int j;
3215 tx_cb = &qdev->tx_buf[i]; 3445 tx_cb = &qdev->tx_buf[i];
3216 if (tx_cb->skb) { 3446 if (tx_cb->skb) {
3217
3218 printk(KERN_DEBUG PFX 3447 printk(KERN_DEBUG PFX
3219 "%s: Freeing lost SKB.\n", 3448 "%s: Freeing lost SKB.\n",
3220 qdev->ndev->name); 3449 qdev->ndev->name);
3221 pci_unmap_single(qdev->pdev, 3450 pci_unmap_single(qdev->pdev,
3222 pci_unmap_addr(tx_cb, mapaddr), 3451 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3223 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 3452 pci_unmap_len(&tx_cb->map[0], maplen),
3453 PCI_DMA_TODEVICE);
3454 for(j=1;j<tx_cb->seg_count;j++) {
3455 pci_unmap_page(qdev->pdev,
3456 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3457 pci_unmap_len(&tx_cb->map[j],maplen),
3458 PCI_DMA_TODEVICE);
3459 }
3224 dev_kfree_skb(tx_cb->skb); 3460 dev_kfree_skb(tx_cb->skb);
3225 tx_cb->skb = NULL; 3461 tx_cb->skb = NULL;
3226 } 3462 }
@@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3379 SET_MODULE_OWNER(ndev); 3615 SET_MODULE_OWNER(ndev);
3380 SET_NETDEV_DEV(ndev, &pdev->dev); 3616 SET_NETDEV_DEV(ndev, &pdev->dev);
3381 3617
3382 if (pci_using_dac)
3383 ndev->features |= NETIF_F_HIGHDMA;
3384
3385 pci_set_drvdata(pdev, ndev); 3618 pci_set_drvdata(pdev, ndev);
3386 3619
3387 qdev = netdev_priv(ndev); 3620 qdev = netdev_priv(ndev);
3388 qdev->index = cards_found; 3621 qdev->index = cards_found;
3389 qdev->ndev = ndev; 3622 qdev->ndev = ndev;
3390 qdev->pdev = pdev; 3623 qdev->pdev = pdev;
3624 qdev->device_id = pci_entry->device;
3391 qdev->port_link_state = LS_DOWN; 3625 qdev->port_link_state = LS_DOWN;
3392 if (msi) 3626 if (msi)
3393 qdev->msi = 1; 3627 qdev->msi = 1;
3394 3628
3395 qdev->msg_enable = netif_msg_init(debug, default_msg); 3629 qdev->msg_enable = netif_msg_init(debug, default_msg);
3396 3630
3631 if (pci_using_dac)
3632 ndev->features |= NETIF_F_HIGHDMA;
3633 if (qdev->device_id == QL3032_DEVICE_ID)
3634 ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
3635
3397 qdev->mem_map_registers = 3636 qdev->mem_map_registers =
3398 ioremap_nocache(pci_resource_start(pdev, 1), 3637 ioremap_nocache(pci_resource_start(pdev, 1),
3399 pci_resource_len(qdev->pdev, 1)); 3638 pci_resource_len(qdev->pdev, 1));
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index ea94de7fd071..b2d76ea68827 100644..100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -21,7 +21,9 @@
21 21
22#define OPCODE_UPDATE_NCB_IOCB 0xF0 22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9 23#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_3032_MAC_IOCB 0x09
24#define OPCODE_IB_IP_IOCB 0xFA 25#define OPCODE_IB_IP_IOCB 0xFA
26#define OPCODE_IB_3032_IP_IOCB 0x0A
25#define OPCODE_IB_TCP_IOCB 0xFB 27#define OPCODE_IB_TCP_IOCB 0xFB
26#define OPCODE_DUMP_PROTO_IOCB 0xFE 28#define OPCODE_DUMP_PROTO_IOCB 0xFE
27#define OPCODE_BUFFER_ALERT_IOCB 0xFB 29#define OPCODE_BUFFER_ALERT_IOCB 0xFB
@@ -37,18 +39,23 @@
37struct ob_mac_iocb_req { 39struct ob_mac_iocb_req {
38 u8 opcode; 40 u8 opcode;
39 u8 flags; 41 u8 flags;
40#define OB_MAC_IOCB_REQ_MA 0xC0 42#define OB_MAC_IOCB_REQ_MA 0xe0
41#define OB_MAC_IOCB_REQ_F 0x20 43#define OB_MAC_IOCB_REQ_F 0x10
42#define OB_MAC_IOCB_REQ_X 0x10 44#define OB_MAC_IOCB_REQ_X 0x08
43#define OB_MAC_IOCB_REQ_D 0x02 45#define OB_MAC_IOCB_REQ_D 0x02
44#define OB_MAC_IOCB_REQ_I 0x01 46#define OB_MAC_IOCB_REQ_I 0x01
45 __le16 reserved0; 47 u8 flags1;
48#define OB_3032MAC_IOCB_REQ_IC 0x04
49#define OB_3032MAC_IOCB_REQ_TC 0x02
50#define OB_3032MAC_IOCB_REQ_UC 0x01
51 u8 reserved0;
46 52
47 __le32 transaction_id; 53 __le32 transaction_id;
48 __le16 data_len; 54 __le16 data_len;
49 __le16 reserved1; 55 u8 ip_hdr_off;
56 u8 ip_hdr_len;
57 __le32 reserved1;
50 __le32 reserved2; 58 __le32 reserved2;
51 __le32 reserved3;
52 __le32 buf_addr0_low; 59 __le32 buf_addr0_low;
53 __le32 buf_addr0_high; 60 __le32 buf_addr0_high;
54 __le32 buf_0_len; 61 __le32 buf_0_len;
@@ -58,8 +65,8 @@ struct ob_mac_iocb_req {
58 __le32 buf_addr2_low; 65 __le32 buf_addr2_low;
59 __le32 buf_addr2_high; 66 __le32 buf_addr2_high;
60 __le32 buf_2_len; 67 __le32 buf_2_len;
68 __le32 reserved3;
61 __le32 reserved4; 69 __le32 reserved4;
62 __le32 reserved5;
63}; 70};
64/* 71/*
65 * The following constants define control bits for buffer 72 * The following constants define control bits for buffer
@@ -74,6 +81,7 @@ struct ob_mac_iocb_rsp {
74 u8 opcode; 81 u8 opcode;
75 u8 flags; 82 u8 flags;
76#define OB_MAC_IOCB_RSP_P 0x08 83#define OB_MAC_IOCB_RSP_P 0x08
84#define OB_MAC_IOCB_RSP_L 0x04
77#define OB_MAC_IOCB_RSP_S 0x02 85#define OB_MAC_IOCB_RSP_S 0x02
78#define OB_MAC_IOCB_RSP_I 0x01 86#define OB_MAC_IOCB_RSP_I 0x01
79 87
@@ -85,6 +93,7 @@ struct ob_mac_iocb_rsp {
85 93
86struct ib_mac_iocb_rsp { 94struct ib_mac_iocb_rsp {
87 u8 opcode; 95 u8 opcode;
96#define IB_MAC_IOCB_RSP_V 0x80
88 u8 flags; 97 u8 flags;
89#define IB_MAC_IOCB_RSP_S 0x80 98#define IB_MAC_IOCB_RSP_S 0x80
90#define IB_MAC_IOCB_RSP_H1 0x40 99#define IB_MAC_IOCB_RSP_H1 0x40
@@ -138,6 +147,7 @@ struct ob_ip_iocb_req {
138struct ob_ip_iocb_rsp { 147struct ob_ip_iocb_rsp {
139 u8 opcode; 148 u8 opcode;
140 u8 flags; 149 u8 flags;
150#define OB_MAC_IOCB_RSP_H 0x10
141#define OB_MAC_IOCB_RSP_E 0x08 151#define OB_MAC_IOCB_RSP_E 0x08
142#define OB_MAC_IOCB_RSP_L 0x04 152#define OB_MAC_IOCB_RSP_L 0x04
143#define OB_MAC_IOCB_RSP_S 0x02 153#define OB_MAC_IOCB_RSP_S 0x02
@@ -220,6 +230,10 @@ struct ob_tcp_iocb_rsp {
220 230
221struct ib_ip_iocb_rsp { 231struct ib_ip_iocb_rsp {
222 u8 opcode; 232 u8 opcode;
233#define IB_IP_IOCB_RSP_3032_V 0x80
234#define IB_IP_IOCB_RSP_3032_O 0x40
235#define IB_IP_IOCB_RSP_3032_I 0x20
236#define IB_IP_IOCB_RSP_3032_R 0x10
223 u8 flags; 237 u8 flags;
224#define IB_IP_IOCB_RSP_S 0x80 238#define IB_IP_IOCB_RSP_S 0x80
225#define IB_IP_IOCB_RSP_H1 0x40 239#define IB_IP_IOCB_RSP_H1 0x40
@@ -230,6 +244,12 @@ struct ib_ip_iocb_rsp {
230 244
231 __le16 length; 245 __le16 length;
232 __le16 checksum; 246 __le16 checksum;
247#define IB_IP_IOCB_RSP_3032_ICE 0x01
248#define IB_IP_IOCB_RSP_3032_CE 0x02
249#define IB_IP_IOCB_RSP_3032_NUC 0x04
250#define IB_IP_IOCB_RSP_3032_UDP 0x08
251#define IB_IP_IOCB_RSP_3032_TCP 0x10
252#define IB_IP_IOCB_RSP_3032_IPE 0x20
233 __le16 reserved; 253 __le16 reserved;
234#define IB_IP_IOCB_RSP_R 0x01 254#define IB_IP_IOCB_RSP_R 0x01
235 __le32 ial_low; 255 __le32 ial_low;
@@ -524,6 +544,21 @@ enum {
524 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, 544 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
525 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, 545 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
526 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, 546 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
547 IP_ADDR_INDEX_REG_6 = 0x0008,
548 IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
549 IP_ADDR_INDEX_REG_E = 0x0040,
550};
551enum {
552 QL3032_PORT_CONTROL_DS = 0x0001,
553 QL3032_PORT_CONTROL_HH = 0x0002,
554 QL3032_PORT_CONTROL_EIv6 = 0x0004,
555 QL3032_PORT_CONTROL_EIv4 = 0x0008,
556 QL3032_PORT_CONTROL_ET = 0x0010,
557 QL3032_PORT_CONTROL_EF = 0x0020,
558 QL3032_PORT_CONTROL_DRM = 0x0040,
559 QL3032_PORT_CONTROL_RLB = 0x0080,
560 QL3032_PORT_CONTROL_RCB = 0x0100,
561 QL3032_PORT_CONTROL_KIE = 0x0200,
527}; 562};
528 563
529enum { 564enum {
@@ -657,7 +692,8 @@ struct ql3xxx_port_registers {
657 u32 internalRamWDataReg; 692 u32 internalRamWDataReg;
658 u32 reclaimedBufferAddrRegLow; 693 u32 reclaimedBufferAddrRegLow;
659 u32 reclaimedBufferAddrRegHigh; 694 u32 reclaimedBufferAddrRegHigh;
660 u32 reserved[2]; 695 u32 tcpConfiguration;
696 u32 functionControl;
661 u32 fpgaRevID; 697 u32 fpgaRevID;
662 u32 localRamAddr; 698 u32 localRamAddr;
663 u32 localRamDataAutoIncr; 699 u32 localRamDataAutoIncr;
@@ -963,6 +999,7 @@ struct eeprom_data {
963 999
964#define QL3XXX_VENDOR_ID 0x1077 1000#define QL3XXX_VENDOR_ID 0x1077
965#define QL3022_DEVICE_ID 0x3022 1001#define QL3022_DEVICE_ID 0x3022
1002#define QL3032_DEVICE_ID 0x3032
966 1003
967/* MTU & Frame Size stuff */ 1004/* MTU & Frame Size stuff */
968#define NORMAL_MTU_SIZE ETH_DATA_LEN 1005#define NORMAL_MTU_SIZE ETH_DATA_LEN
@@ -1038,11 +1075,41 @@ struct ql_rcv_buf_cb {
1038 int index; 1075 int index;
1039}; 1076};
1040 1077
1078/*
1079 * Original IOCB has 3 sg entries:
1080 * first points to skb-data area
1081 * second points to first frag
1082 * third points to next oal.
1083 * OAL has 5 entries:
1084 * 1 thru 4 point to frags
1085 * fifth points to next oal.
1086 */
1087#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
1088
1089struct oal_entry {
1090 u32 dma_lo;
1091 u32 dma_hi;
1092 u32 len;
1093#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
1094#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
1095 u32 reserved;
1096};
1097
1098struct oal {
1099 struct oal_entry oal_entry[5];
1100};
1101
1102struct map_list {
1103 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1104 DECLARE_PCI_UNMAP_LEN(maplen);
1105};
1106
1041struct ql_tx_buf_cb { 1107struct ql_tx_buf_cb {
1042 struct sk_buff *skb; 1108 struct sk_buff *skb;
1043 struct ob_mac_iocb_req *queue_entry ; 1109 struct ob_mac_iocb_req *queue_entry ;
1044 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1110 int seg_count;
1045 DECLARE_PCI_UNMAP_LEN(maplen); 1111 struct oal *oal;
1112 struct map_list map[MAX_SKB_FRAGS+1];
1046}; 1113};
1047 1114
1048/* definitions for type field */ 1115/* definitions for type field */
@@ -1189,6 +1256,7 @@ struct ql3_adapter {
1189 struct delayed_work reset_work; 1256 struct delayed_work reset_work;
1190 struct delayed_work tx_timeout_work; 1257 struct delayed_work tx_timeout_work;
1191 u32 max_frame_size; 1258 u32 max_frame_size;
1259 u32 device_id;
1192}; 1260};
1193 1261
1194#endif /* _QLA3XXX_H_ */ 1262#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index a914fef44309..0e345cbc2bf9 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -15,7 +15,7 @@
15 15
16#define TBD 0 16#define TBD 0
17 17
18typedef struct _XENA_dev_config { 18struct XENA_dev_config {
19/* Convention: mHAL_XXX is mask, vHAL_XXX is value */ 19/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
20 20
21/* General Control-Status Registers */ 21/* General Control-Status Registers */
@@ -300,6 +300,7 @@ typedef struct _XENA_dev_config {
300 u64 gpio_control; 300 u64 gpio_control;
301#define GPIO_CTRL_GPIO_0 BIT(8) 301#define GPIO_CTRL_GPIO_0 BIT(8)
302 u64 misc_control; 302 u64 misc_control;
303#define FAULT_BEHAVIOUR BIT(0)
303#define EXT_REQ_EN BIT(1) 304#define EXT_REQ_EN BIT(1)
304#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3) 305#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
305 306
@@ -851,9 +852,9 @@ typedef struct _XENA_dev_config {
851#define SPI_CONTROL_DONE BIT(6) 852#define SPI_CONTROL_DONE BIT(6)
852 u64 spi_data; 853 u64 spi_data;
853#define SPI_DATA_WRITE(data,len) vBIT(data,0,len) 854#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
854} XENA_dev_config_t; 855};
855 856
856#define XENA_REG_SPACE sizeof(XENA_dev_config_t) 857#define XENA_REG_SPACE sizeof(struct XENA_dev_config)
857#define XENA_EEPROM_SPACE (0x01 << 11) 858#define XENA_EEPROM_SPACE (0x01 << 11)
858 859
859#endif /* _REGS_H */ 860#endif /* _REGS_H */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 1dd66b8ea0fa..639fbc0f16f3 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -77,7 +77,7 @@
77#include "s2io.h" 77#include "s2io.h"
78#include "s2io-regs.h" 78#include "s2io-regs.h"
79 79
80#define DRV_VERSION "2.0.15.2" 80#define DRV_VERSION "2.0.16.1"
81 81
82/* S2io Driver name & version. */ 82/* S2io Driver name & version. */
83static char s2io_driver_name[] = "Neterion"; 83static char s2io_driver_name[] = "Neterion";
@@ -86,7 +86,7 @@ static char s2io_driver_version[] = DRV_VERSION;
86static int rxd_size[4] = {32,48,48,64}; 86static int rxd_size[4] = {32,48,48,64};
87static int rxd_count[4] = {127,85,85,63}; 87static int rxd_count[4] = {127,85,85,63};
88 88
89static inline int RXD_IS_UP2DT(RxD_t *rxdp) 89static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
90{ 90{
91 int ret; 91 int ret;
92 92
@@ -111,9 +111,9 @@ static inline int RXD_IS_UP2DT(RxD_t *rxdp)
111#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status)) 111#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
112#define PANIC 1 112#define PANIC 1
113#define LOW 2 113#define LOW 2
114static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) 114static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
115{ 115{
116 mac_info_t *mac_control; 116 struct mac_info *mac_control;
117 117
118 mac_control = &sp->mac_control; 118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode]) 119 if (rxb_size <= rxd_count[sp->rxd_mode])
@@ -286,7 +286,7 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
286static void s2io_vlan_rx_register(struct net_device *dev, 286static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp) 287 struct vlan_group *grp)
288{ 288{
289 nic_t *nic = dev->priv; 289 struct s2io_nic *nic = dev->priv;
290 unsigned long flags; 290 unsigned long flags;
291 291
292 spin_lock_irqsave(&nic->tx_lock, flags); 292 spin_lock_irqsave(&nic->tx_lock, flags);
@@ -297,7 +297,7 @@ static void s2io_vlan_rx_register(struct net_device *dev,
297/* Unregister the vlan */ 297/* Unregister the vlan */
298static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 298static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
299{ 299{
300 nic_t *nic = dev->priv; 300 struct s2io_nic *nic = dev->priv;
301 unsigned long flags; 301 unsigned long flags;
302 302
303 spin_lock_irqsave(&nic->tx_lock, flags); 303 spin_lock_irqsave(&nic->tx_lock, flags);
@@ -401,9 +401,10 @@ S2IO_PARM_INT(lro, 0);
401 * aggregation happens until we hit max IP pkt size(64K) 401 * aggregation happens until we hit max IP pkt size(64K)
402 */ 402 */
403S2IO_PARM_INT(lro_max_pkts, 0xFFFF); 403S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404#ifndef CONFIG_S2IO_NAPI
405S2IO_PARM_INT(indicate_max_pkts, 0); 404S2IO_PARM_INT(indicate_max_pkts, 0);
406#endif 405
406S2IO_PARM_INT(napi, 1);
407S2IO_PARM_INT(ufo, 0);
407 408
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 409static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 410 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
@@ -457,14 +458,14 @@ static int init_shared_mem(struct s2io_nic *nic)
457 u32 size; 458 u32 size;
458 void *tmp_v_addr, *tmp_v_addr_next; 459 void *tmp_v_addr, *tmp_v_addr_next;
459 dma_addr_t tmp_p_addr, tmp_p_addr_next; 460 dma_addr_t tmp_p_addr, tmp_p_addr_next;
460 RxD_block_t *pre_rxd_blk = NULL; 461 struct RxD_block *pre_rxd_blk = NULL;
461 int i, j, blk_cnt, rx_sz, tx_sz; 462 int i, j, blk_cnt;
462 int lst_size, lst_per_page; 463 int lst_size, lst_per_page;
463 struct net_device *dev = nic->dev; 464 struct net_device *dev = nic->dev;
464 unsigned long tmp; 465 unsigned long tmp;
465 buffAdd_t *ba; 466 struct buffAdd *ba;
466 467
467 mac_info_t *mac_control; 468 struct mac_info *mac_control;
468 struct config_param *config; 469 struct config_param *config;
469 470
470 mac_control = &nic->mac_control; 471 mac_control = &nic->mac_control;
@@ -482,13 +483,12 @@ static int init_shared_mem(struct s2io_nic *nic)
482 return -EINVAL; 483 return -EINVAL;
483 } 484 }
484 485
485 lst_size = (sizeof(TxD_t) * config->max_txds); 486 lst_size = (sizeof(struct TxD) * config->max_txds);
486 tx_sz = lst_size * size;
487 lst_per_page = PAGE_SIZE / lst_size; 487 lst_per_page = PAGE_SIZE / lst_size;
488 488
489 for (i = 0; i < config->tx_fifo_num; i++) { 489 for (i = 0; i < config->tx_fifo_num; i++) {
490 int fifo_len = config->tx_cfg[i].fifo_len; 490 int fifo_len = config->tx_cfg[i].fifo_len;
491 int list_holder_size = fifo_len * sizeof(list_info_hold_t); 491 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
492 mac_control->fifos[i].list_info = kmalloc(list_holder_size, 492 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
493 GFP_KERNEL); 493 GFP_KERNEL);
494 if (!mac_control->fifos[i].list_info) { 494 if (!mac_control->fifos[i].list_info) {
@@ -579,10 +579,9 @@ static int init_shared_mem(struct s2io_nic *nic)
579 mac_control->rings[i].block_count; 579 mac_control->rings[i].block_count;
580 } 580 }
581 if (nic->rxd_mode == RXD_MODE_1) 581 if (nic->rxd_mode == RXD_MODE_1)
582 size = (size * (sizeof(RxD1_t))); 582 size = (size * (sizeof(struct RxD1)));
583 else 583 else
584 size = (size * (sizeof(RxD3_t))); 584 size = (size * (sizeof(struct RxD3)));
585 rx_sz = size;
586 585
587 for (i = 0; i < config->rx_ring_num; i++) { 586 for (i = 0; i < config->rx_ring_num; i++) {
588 mac_control->rings[i].rx_curr_get_info.block_index = 0; 587 mac_control->rings[i].rx_curr_get_info.block_index = 0;
@@ -600,7 +599,7 @@ static int init_shared_mem(struct s2io_nic *nic)
600 (rxd_count[nic->rxd_mode] + 1); 599 (rxd_count[nic->rxd_mode] + 1);
601 /* Allocating all the Rx blocks */ 600 /* Allocating all the Rx blocks */
602 for (j = 0; j < blk_cnt; j++) { 601 for (j = 0; j < blk_cnt; j++) {
603 rx_block_info_t *rx_blocks; 602 struct rx_block_info *rx_blocks;
604 int l; 603 int l;
605 604
606 rx_blocks = &mac_control->rings[i].rx_blocks[j]; 605 rx_blocks = &mac_control->rings[i].rx_blocks[j];
@@ -620,9 +619,11 @@ static int init_shared_mem(struct s2io_nic *nic)
620 memset(tmp_v_addr, 0, size); 619 memset(tmp_v_addr, 0, size);
621 rx_blocks->block_virt_addr = tmp_v_addr; 620 rx_blocks->block_virt_addr = tmp_v_addr;
622 rx_blocks->block_dma_addr = tmp_p_addr; 621 rx_blocks->block_dma_addr = tmp_p_addr;
623 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)* 622 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
624 rxd_count[nic->rxd_mode], 623 rxd_count[nic->rxd_mode],
625 GFP_KERNEL); 624 GFP_KERNEL);
625 if (!rx_blocks->rxds)
626 return -ENOMEM;
626 for (l=0; l<rxd_count[nic->rxd_mode];l++) { 627 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
627 rx_blocks->rxds[l].virt_addr = 628 rx_blocks->rxds[l].virt_addr =
628 rx_blocks->block_virt_addr + 629 rx_blocks->block_virt_addr +
@@ -645,7 +646,7 @@ static int init_shared_mem(struct s2io_nic *nic)
645 mac_control->rings[i].rx_blocks[(j + 1) % 646 mac_control->rings[i].rx_blocks[(j + 1) %
646 blk_cnt].block_dma_addr; 647 blk_cnt].block_dma_addr;
647 648
648 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 649 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
649 pre_rxd_blk->reserved_2_pNext_RxD_block = 650 pre_rxd_blk->reserved_2_pNext_RxD_block =
650 (unsigned long) tmp_v_addr_next; 651 (unsigned long) tmp_v_addr_next;
651 pre_rxd_blk->pNext_RxD_Blk_physical = 652 pre_rxd_blk->pNext_RxD_Blk_physical =
@@ -661,14 +662,14 @@ static int init_shared_mem(struct s2io_nic *nic)
661 blk_cnt = config->rx_cfg[i].num_rxd / 662 blk_cnt = config->rx_cfg[i].num_rxd /
662 (rxd_count[nic->rxd_mode]+ 1); 663 (rxd_count[nic->rxd_mode]+ 1);
663 mac_control->rings[i].ba = 664 mac_control->rings[i].ba =
664 kmalloc((sizeof(buffAdd_t *) * blk_cnt), 665 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
665 GFP_KERNEL); 666 GFP_KERNEL);
666 if (!mac_control->rings[i].ba) 667 if (!mac_control->rings[i].ba)
667 return -ENOMEM; 668 return -ENOMEM;
668 for (j = 0; j < blk_cnt; j++) { 669 for (j = 0; j < blk_cnt; j++) {
669 int k = 0; 670 int k = 0;
670 mac_control->rings[i].ba[j] = 671 mac_control->rings[i].ba[j] =
671 kmalloc((sizeof(buffAdd_t) * 672 kmalloc((sizeof(struct buffAdd) *
672 (rxd_count[nic->rxd_mode] + 1)), 673 (rxd_count[nic->rxd_mode] + 1)),
673 GFP_KERNEL); 674 GFP_KERNEL);
674 if (!mac_control->rings[i].ba[j]) 675 if (!mac_control->rings[i].ba[j])
@@ -700,7 +701,7 @@ static int init_shared_mem(struct s2io_nic *nic)
700 } 701 }
701 702
702 /* Allocation and initialization of Statistics block */ 703 /* Allocation and initialization of Statistics block */
703 size = sizeof(StatInfo_t); 704 size = sizeof(struct stat_block);
704 mac_control->stats_mem = pci_alloc_consistent 705 mac_control->stats_mem = pci_alloc_consistent
705 (nic->pdev, size, &mac_control->stats_mem_phy); 706 (nic->pdev, size, &mac_control->stats_mem_phy);
706 707
@@ -715,7 +716,7 @@ static int init_shared_mem(struct s2io_nic *nic)
715 mac_control->stats_mem_sz = size; 716 mac_control->stats_mem_sz = size;
716 717
717 tmp_v_addr = mac_control->stats_mem; 718 tmp_v_addr = mac_control->stats_mem;
718 mac_control->stats_info = (StatInfo_t *) tmp_v_addr; 719 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
719 memset(tmp_v_addr, 0, size); 720 memset(tmp_v_addr, 0, size);
720 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, 721 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
721 (unsigned long long) tmp_p_addr); 722 (unsigned long long) tmp_p_addr);
@@ -735,7 +736,7 @@ static void free_shared_mem(struct s2io_nic *nic)
735 int i, j, blk_cnt, size; 736 int i, j, blk_cnt, size;
736 void *tmp_v_addr; 737 void *tmp_v_addr;
737 dma_addr_t tmp_p_addr; 738 dma_addr_t tmp_p_addr;
738 mac_info_t *mac_control; 739 struct mac_info *mac_control;
739 struct config_param *config; 740 struct config_param *config;
740 int lst_size, lst_per_page; 741 int lst_size, lst_per_page;
741 struct net_device *dev = nic->dev; 742 struct net_device *dev = nic->dev;
@@ -746,7 +747,7 @@ static void free_shared_mem(struct s2io_nic *nic)
746 mac_control = &nic->mac_control; 747 mac_control = &nic->mac_control;
747 config = &nic->config; 748 config = &nic->config;
748 749
749 lst_size = (sizeof(TxD_t) * config->max_txds); 750 lst_size = (sizeof(struct TxD) * config->max_txds);
750 lst_per_page = PAGE_SIZE / lst_size; 751 lst_per_page = PAGE_SIZE / lst_size;
751 752
752 for (i = 0; i < config->tx_fifo_num; i++) { 753 for (i = 0; i < config->tx_fifo_num; i++) {
@@ -809,7 +810,7 @@ static void free_shared_mem(struct s2io_nic *nic)
809 if (!mac_control->rings[i].ba[j]) 810 if (!mac_control->rings[i].ba[j])
810 continue; 811 continue;
811 while (k != rxd_count[nic->rxd_mode]) { 812 while (k != rxd_count[nic->rxd_mode]) {
812 buffAdd_t *ba = 813 struct buffAdd *ba =
813 &mac_control->rings[i].ba[j][k]; 814 &mac_control->rings[i].ba[j][k];
814 kfree(ba->ba_0_org); 815 kfree(ba->ba_0_org);
815 kfree(ba->ba_1_org); 816 kfree(ba->ba_1_org);
@@ -835,9 +836,9 @@ static void free_shared_mem(struct s2io_nic *nic)
835 * s2io_verify_pci_mode - 836 * s2io_verify_pci_mode -
836 */ 837 */
837 838
838static int s2io_verify_pci_mode(nic_t *nic) 839static int s2io_verify_pci_mode(struct s2io_nic *nic)
839{ 840{
840 XENA_dev_config_t __iomem *bar0 = nic->bar0; 841 struct XENA_dev_config __iomem *bar0 = nic->bar0;
841 register u64 val64 = 0; 842 register u64 val64 = 0;
842 int mode; 843 int mode;
843 844
@@ -868,9 +869,9 @@ static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
868/** 869/**
869 * s2io_print_pci_mode - 870 * s2io_print_pci_mode -
870 */ 871 */
871static int s2io_print_pci_mode(nic_t *nic) 872static int s2io_print_pci_mode(struct s2io_nic *nic)
872{ 873{
873 XENA_dev_config_t __iomem *bar0 = nic->bar0; 874 struct XENA_dev_config __iomem *bar0 = nic->bar0;
874 register u64 val64 = 0; 875 register u64 val64 = 0;
875 int mode; 876 int mode;
876 struct config_param *config = &nic->config; 877 struct config_param *config = &nic->config;
@@ -938,13 +939,13 @@ static int s2io_print_pci_mode(nic_t *nic)
938 939
939static int init_nic(struct s2io_nic *nic) 940static int init_nic(struct s2io_nic *nic)
940{ 941{
941 XENA_dev_config_t __iomem *bar0 = nic->bar0; 942 struct XENA_dev_config __iomem *bar0 = nic->bar0;
942 struct net_device *dev = nic->dev; 943 struct net_device *dev = nic->dev;
943 register u64 val64 = 0; 944 register u64 val64 = 0;
944 void __iomem *add; 945 void __iomem *add;
945 u32 time; 946 u32 time;
946 int i, j; 947 int i, j;
947 mac_info_t *mac_control; 948 struct mac_info *mac_control;
948 struct config_param *config; 949 struct config_param *config;
949 int dtx_cnt = 0; 950 int dtx_cnt = 0;
950 unsigned long long mem_share; 951 unsigned long long mem_share;
@@ -1414,7 +1415,7 @@ static int init_nic(struct s2io_nic *nic)
1414 1415
1415 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1416 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1416 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1417 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1417 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80); 1418 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1418 writeq(val64, &bar0->tti_data2_mem); 1419 writeq(val64, &bar0->tti_data2_mem);
1419 1420
1420 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; 1421 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
@@ -1610,7 +1611,8 @@ static int init_nic(struct s2io_nic *nic)
1610 * that does not start on an ADB to reduce disconnects. 1611 * that does not start on an ADB to reduce disconnects.
1611 */ 1612 */
1612 if (nic->device_type == XFRAME_II_DEVICE) { 1613 if (nic->device_type == XFRAME_II_DEVICE) {
1613 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3); 1614 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1615 MISC_LINK_STABILITY_PRD(3);
1614 writeq(val64, &bar0->misc_control); 1616 writeq(val64, &bar0->misc_control);
1615 val64 = readq(&bar0->pic_control2); 1617 val64 = readq(&bar0->pic_control2);
1616 val64 &= ~(BIT(13)|BIT(14)|BIT(15)); 1618 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
@@ -1626,7 +1628,7 @@ static int init_nic(struct s2io_nic *nic)
1626#define LINK_UP_DOWN_INTERRUPT 1 1628#define LINK_UP_DOWN_INTERRUPT 1
1627#define MAC_RMAC_ERR_TIMER 2 1629#define MAC_RMAC_ERR_TIMER 2
1628 1630
1629static int s2io_link_fault_indication(nic_t *nic) 1631static int s2io_link_fault_indication(struct s2io_nic *nic)
1630{ 1632{
1631 if (nic->intr_type != INTA) 1633 if (nic->intr_type != INTA)
1632 return MAC_RMAC_ERR_TIMER; 1634 return MAC_RMAC_ERR_TIMER;
@@ -1649,14 +1651,14 @@ static int s2io_link_fault_indication(nic_t *nic)
1649 1651
1650static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) 1652static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1651{ 1653{
1652 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1654 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1653 register u64 val64 = 0, temp64 = 0; 1655 register u64 val64 = 0, temp64 = 0;
1654 1656
1655 /* Top level interrupt classification */ 1657 /* Top level interrupt classification */
1656 /* PIC Interrupts */ 1658 /* PIC Interrupts */
1657 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) { 1659 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1658 /* Enable PIC Intrs in the general intr mask register */ 1660 /* Enable PIC Intrs in the general intr mask register */
1659 val64 = TXPIC_INT_M | PIC_RX_INT_M; 1661 val64 = TXPIC_INT_M;
1660 if (flag == ENABLE_INTRS) { 1662 if (flag == ENABLE_INTRS) {
1661 temp64 = readq(&bar0->general_int_mask); 1663 temp64 = readq(&bar0->general_int_mask);
1662 temp64 &= ~((u64) val64); 1664 temp64 &= ~((u64) val64);
@@ -1694,70 +1696,6 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1694 } 1696 }
1695 } 1697 }
1696 1698
1697 /* DMA Interrupts */
1698 /* Enabling/Disabling Tx DMA interrupts */
1699 if (mask & TX_DMA_INTR) {
1700 /* Enable TxDMA Intrs in the general intr mask register */
1701 val64 = TXDMA_INT_M;
1702 if (flag == ENABLE_INTRS) {
1703 temp64 = readq(&bar0->general_int_mask);
1704 temp64 &= ~((u64) val64);
1705 writeq(temp64, &bar0->general_int_mask);
1706 /*
1707 * Keep all interrupts other than PFC interrupt
1708 * and PCC interrupt disabled in DMA level.
1709 */
1710 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1711 TXDMA_PCC_INT_M);
1712 writeq(val64, &bar0->txdma_int_mask);
1713 /*
1714 * Enable only the MISC error 1 interrupt in PFC block
1715 */
1716 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1717 writeq(val64, &bar0->pfc_err_mask);
1718 /*
1719 * Enable only the FB_ECC error interrupt in PCC block
1720 */
1721 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1722 writeq(val64, &bar0->pcc_err_mask);
1723 } else if (flag == DISABLE_INTRS) {
1724 /*
1725 * Disable TxDMA Intrs in the general intr mask
1726 * register
1727 */
1728 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1729 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1730 temp64 = readq(&bar0->general_int_mask);
1731 val64 |= temp64;
1732 writeq(val64, &bar0->general_int_mask);
1733 }
1734 }
1735
1736 /* Enabling/Disabling Rx DMA interrupts */
1737 if (mask & RX_DMA_INTR) {
1738 /* Enable RxDMA Intrs in the general intr mask register */
1739 val64 = RXDMA_INT_M;
1740 if (flag == ENABLE_INTRS) {
1741 temp64 = readq(&bar0->general_int_mask);
1742 temp64 &= ~((u64) val64);
1743 writeq(temp64, &bar0->general_int_mask);
1744 /*
1745 * All RxDMA block interrupts are disabled for now
1746 * TODO
1747 */
1748 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1749 } else if (flag == DISABLE_INTRS) {
1750 /*
1751 * Disable RxDMA Intrs in the general intr mask
1752 * register
1753 */
1754 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1755 temp64 = readq(&bar0->general_int_mask);
1756 val64 |= temp64;
1757 writeq(val64, &bar0->general_int_mask);
1758 }
1759 }
1760
1761 /* MAC Interrupts */ 1699 /* MAC Interrupts */
1762 /* Enabling/Disabling MAC interrupts */ 1700 /* Enabling/Disabling MAC interrupts */
1763 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) { 1701 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
@@ -1784,53 +1722,6 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1784 } 1722 }
1785 } 1723 }
1786 1724
1787 /* XGXS Interrupts */
1788 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1789 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1790 if (flag == ENABLE_INTRS) {
1791 temp64 = readq(&bar0->general_int_mask);
1792 temp64 &= ~((u64) val64);
1793 writeq(temp64, &bar0->general_int_mask);
1794 /*
1795 * All XGXS block error interrupts are disabled for now
1796 * TODO
1797 */
1798 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1799 } else if (flag == DISABLE_INTRS) {
1800 /*
1801 * Disable MC Intrs in the general intr mask register
1802 */
1803 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1804 temp64 = readq(&bar0->general_int_mask);
1805 val64 |= temp64;
1806 writeq(val64, &bar0->general_int_mask);
1807 }
1808 }
1809
1810 /* Memory Controller(MC) interrupts */
1811 if (mask & MC_INTR) {
1812 val64 = MC_INT_M;
1813 if (flag == ENABLE_INTRS) {
1814 temp64 = readq(&bar0->general_int_mask);
1815 temp64 &= ~((u64) val64);
1816 writeq(temp64, &bar0->general_int_mask);
1817 /*
1818 * Enable all MC Intrs.
1819 */
1820 writeq(0x0, &bar0->mc_int_mask);
1821 writeq(0x0, &bar0->mc_err_mask);
1822 } else if (flag == DISABLE_INTRS) {
1823 /*
1824 * Disable MC Intrs in the general intr mask register
1825 */
1826 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1827 temp64 = readq(&bar0->general_int_mask);
1828 val64 |= temp64;
1829 writeq(val64, &bar0->general_int_mask);
1830 }
1831 }
1832
1833
1834 /* Tx traffic interrupts */ 1725 /* Tx traffic interrupts */
1835 if (mask & TX_TRAFFIC_INTR) { 1726 if (mask & TX_TRAFFIC_INTR) {
1836 val64 = TXTRAFFIC_INT_M; 1727 val64 = TXTRAFFIC_INT_M;
@@ -1877,41 +1768,36 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1877 } 1768 }
1878} 1769}
1879 1770
1880static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc) 1771/**
1772 * verify_pcc_quiescent- Checks for PCC quiescent state
1773 * Return: 1 If PCC is quiescence
1774 * 0 If PCC is not quiescence
1775 */
1776static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1881{ 1777{
1882 int ret = 0; 1778 int ret = 0, herc;
1779 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1780 u64 val64 = readq(&bar0->adapter_status);
1781
1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1883 1783
1884 if (flag == FALSE) { 1784 if (flag == FALSE) {
1885 if ((!herc && (rev_id >= 4)) || herc) { 1785 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1886 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) && 1786 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1887 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1888 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1889 ret = 1; 1787 ret = 1;
1890 } 1788 } else {
1891 }else { 1789 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1892 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1893 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1894 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1895 ret = 1; 1790 ret = 1;
1896 }
1897 } 1791 }
1898 } else { 1792 } else {
1899 if ((!herc && (rev_id >= 4)) || herc) { 1793 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1900 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) == 1794 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1901 ADAPTER_STATUS_RMAC_PCC_IDLE) && 1795 ADAPTER_STATUS_RMAC_PCC_IDLE))
1902 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1903 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1904 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1905 ret = 1; 1796 ret = 1;
1906 }
1907 } else { 1797 } else {
1908 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) == 1798 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1909 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) && 1799 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1910 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1911 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1912 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1913 ret = 1; 1800 ret = 1;
1914 }
1915 } 1801 }
1916 } 1802 }
1917 1803
@@ -1919,9 +1805,6 @@ static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1919} 1805}
1920/** 1806/**
1921 * verify_xena_quiescence - Checks whether the H/W is ready 1807 * verify_xena_quiescence - Checks whether the H/W is ready
1922 * @val64 : Value read from adapter status register.
1923 * @flag : indicates if the adapter enable bit was ever written once
1924 * before.
1925 * Description: Returns whether the H/W is ready to go or not. Depending 1808 * Description: Returns whether the H/W is ready to go or not. Depending
1926 * on whether adapter enable bit was written or not the comparison 1809 * on whether adapter enable bit was written or not the comparison
1927 * differs and the calling function passes the input argument flag to 1810 * differs and the calling function passes the input argument flag to
@@ -1930,24 +1813,63 @@ static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1930 * 0 If Xena is not quiescence 1813 * 0 If Xena is not quiescence
1931 */ 1814 */
1932 1815
1933static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag) 1816static int verify_xena_quiescence(struct s2io_nic *sp)
1934{ 1817{
1935 int ret = 0, herc; 1818 int mode;
1936 u64 tmp64 = ~((u64) val64); 1819 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1937 int rev_id = get_xena_rev_id(sp->pdev); 1820 u64 val64 = readq(&bar0->adapter_status);
1821 mode = s2io_verify_pci_mode(sp);
1938 1822
1939 herc = (sp->device_type == XFRAME_II_DEVICE); 1823 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1940 if (! 1824 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1941 (tmp64 & 1825 return 0;
1942 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY | 1826 }
1943 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY | 1827 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1944 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY | 1828 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1945 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK | 1829 return 0;
1946 ADAPTER_STATUS_P_PLL_LOCK))) { 1830 }
1947 ret = check_prc_pcc_state(val64, flag, rev_id, herc); 1831 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1832 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1833 return 0;
1834 }
1835 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1836 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1837 return 0;
1838 }
1839 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1840 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1841 return 0;
1842 }
1843 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1844 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1845 return 0;
1846 }
1847 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1848 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1849 return 0;
1850 }
1851 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1852 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1853 return 0;
1948 } 1854 }
1949 1855
1950 return ret; 1856 /*
1857 * In PCI 33 mode, the P_PLL is not used, and therefore,
1858 * the the P_PLL_LOCK bit in the adapter_status register will
1859 * not be asserted.
1860 */
1861 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1862 sp->device_type == XFRAME_II_DEVICE && mode !=
1863 PCI_MODE_PCI_33) {
1864 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1865 return 0;
1866 }
1867 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1868 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1869 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1870 return 0;
1871 }
1872 return 1;
1951} 1873}
1952 1874
1953/** 1875/**
@@ -1958,9 +1880,9 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1958 * 1880 *
1959 */ 1881 */
1960 1882
1961static void fix_mac_address(nic_t * sp) 1883static void fix_mac_address(struct s2io_nic * sp)
1962{ 1884{
1963 XENA_dev_config_t __iomem *bar0 = sp->bar0; 1885 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1964 u64 val64; 1886 u64 val64;
1965 int i = 0; 1887 int i = 0;
1966 1888
@@ -1986,11 +1908,11 @@ static void fix_mac_address(nic_t * sp)
1986 1908
1987static int start_nic(struct s2io_nic *nic) 1909static int start_nic(struct s2io_nic *nic)
1988{ 1910{
1989 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1911 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1990 struct net_device *dev = nic->dev; 1912 struct net_device *dev = nic->dev;
1991 register u64 val64 = 0; 1913 register u64 val64 = 0;
1992 u16 subid, i; 1914 u16 subid, i;
1993 mac_info_t *mac_control; 1915 struct mac_info *mac_control;
1994 struct config_param *config; 1916 struct config_param *config;
1995 1917
1996 mac_control = &nic->mac_control; 1918 mac_control = &nic->mac_control;
@@ -2052,7 +1974,7 @@ static int start_nic(struct s2io_nic *nic)
2052 * it. 1974 * it.
2053 */ 1975 */
2054 val64 = readq(&bar0->adapter_status); 1976 val64 = readq(&bar0->adapter_status);
2055 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) { 1977 if (!verify_xena_quiescence(nic)) {
2056 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); 1978 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2057 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", 1979 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2058 (unsigned long long) val64); 1980 (unsigned long long) val64);
@@ -2095,11 +2017,12 @@ static int start_nic(struct s2io_nic *nic)
2095/** 2017/**
2096 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb 2018 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2097 */ 2019 */
2098static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off) 2020static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2021 TxD *txdlp, int get_off)
2099{ 2022{
2100 nic_t *nic = fifo_data->nic; 2023 struct s2io_nic *nic = fifo_data->nic;
2101 struct sk_buff *skb; 2024 struct sk_buff *skb;
2102 TxD_t *txds; 2025 struct TxD *txds;
2103 u16 j, frg_cnt; 2026 u16 j, frg_cnt;
2104 2027
2105 txds = txdlp; 2028 txds = txdlp;
@@ -2113,7 +2036,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2113 skb = (struct sk_buff *) ((unsigned long) 2036 skb = (struct sk_buff *) ((unsigned long)
2114 txds->Host_Control); 2037 txds->Host_Control);
2115 if (!skb) { 2038 if (!skb) {
2116 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds)); 2039 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2117 return NULL; 2040 return NULL;
2118 } 2041 }
2119 pci_unmap_single(nic->pdev, (dma_addr_t) 2042 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -2132,7 +2055,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2132 frag->size, PCI_DMA_TODEVICE); 2055 frag->size, PCI_DMA_TODEVICE);
2133 } 2056 }
2134 } 2057 }
2135 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); 2058 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2136 return(skb); 2059 return(skb);
2137} 2060}
2138 2061
@@ -2148,9 +2071,9 @@ static void free_tx_buffers(struct s2io_nic *nic)
2148{ 2071{
2149 struct net_device *dev = nic->dev; 2072 struct net_device *dev = nic->dev;
2150 struct sk_buff *skb; 2073 struct sk_buff *skb;
2151 TxD_t *txdp; 2074 struct TxD *txdp;
2152 int i, j; 2075 int i, j;
2153 mac_info_t *mac_control; 2076 struct mac_info *mac_control;
2154 struct config_param *config; 2077 struct config_param *config;
2155 int cnt = 0; 2078 int cnt = 0;
2156 2079
@@ -2159,7 +2082,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
2159 2082
2160 for (i = 0; i < config->tx_fifo_num; i++) { 2083 for (i = 0; i < config->tx_fifo_num; i++) {
2161 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2084 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2162 txdp = (TxD_t *) mac_control->fifos[i].list_info[j]. 2085 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2163 list_virt_addr; 2086 list_virt_addr;
2164 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2087 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2165 if (skb) { 2088 if (skb) {
@@ -2187,10 +2110,10 @@ static void free_tx_buffers(struct s2io_nic *nic)
2187 2110
2188static void stop_nic(struct s2io_nic *nic) 2111static void stop_nic(struct s2io_nic *nic)
2189{ 2112{
2190 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2191 register u64 val64 = 0; 2114 register u64 val64 = 0;
2192 u16 interruptible; 2115 u16 interruptible;
2193 mac_info_t *mac_control; 2116 struct mac_info *mac_control;
2194 struct config_param *config; 2117 struct config_param *config;
2195 2118
2196 mac_control = &nic->mac_control; 2119 mac_control = &nic->mac_control;
@@ -2208,14 +2131,15 @@ static void stop_nic(struct s2io_nic *nic)
2208 writeq(val64, &bar0->adapter_control); 2131 writeq(val64, &bar0->adapter_control);
2209} 2132}
2210 2133
2211static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) 2134static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2135 sk_buff *skb)
2212{ 2136{
2213 struct net_device *dev = nic->dev; 2137 struct net_device *dev = nic->dev;
2214 struct sk_buff *frag_list; 2138 struct sk_buff *frag_list;
2215 void *tmp; 2139 void *tmp;
2216 2140
2217 /* Buffer-1 receives L3/L4 headers */ 2141 /* Buffer-1 receives L3/L4 headers */
2218 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single 2142 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2219 (nic->pdev, skb->data, l3l4hdr_size + 4, 2143 (nic->pdev, skb->data, l3l4hdr_size + 4,
2220 PCI_DMA_FROMDEVICE); 2144 PCI_DMA_FROMDEVICE);
2221 2145
@@ -2226,13 +2150,14 @@ static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2226 return -ENOMEM ; 2150 return -ENOMEM ;
2227 } 2151 }
2228 frag_list = skb_shinfo(skb)->frag_list; 2152 frag_list = skb_shinfo(skb)->frag_list;
2153 skb->truesize += frag_list->truesize;
2229 frag_list->next = NULL; 2154 frag_list->next = NULL;
2230 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); 2155 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2231 frag_list->data = tmp; 2156 frag_list->data = tmp;
2232 frag_list->tail = tmp; 2157 frag_list->tail = tmp;
2233 2158
2234 /* Buffer-2 receives L4 data payload */ 2159 /* Buffer-2 receives L4 data payload */
2235 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, 2160 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2236 frag_list->data, dev->mtu, 2161 frag_list->data, dev->mtu,
2237 PCI_DMA_FROMDEVICE); 2162 PCI_DMA_FROMDEVICE);
2238 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); 2163 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
@@ -2266,18 +2191,16 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2266{ 2191{
2267 struct net_device *dev = nic->dev; 2192 struct net_device *dev = nic->dev;
2268 struct sk_buff *skb; 2193 struct sk_buff *skb;
2269 RxD_t *rxdp; 2194 struct RxD_t *rxdp;
2270 int off, off1, size, block_no, block_no1; 2195 int off, off1, size, block_no, block_no1;
2271 u32 alloc_tab = 0; 2196 u32 alloc_tab = 0;
2272 u32 alloc_cnt; 2197 u32 alloc_cnt;
2273 mac_info_t *mac_control; 2198 struct mac_info *mac_control;
2274 struct config_param *config; 2199 struct config_param *config;
2275 u64 tmp; 2200 u64 tmp;
2276 buffAdd_t *ba; 2201 struct buffAdd *ba;
2277#ifndef CONFIG_S2IO_NAPI
2278 unsigned long flags; 2202 unsigned long flags;
2279#endif 2203 struct RxD_t *first_rxdp = NULL;
2280 RxD_t *first_rxdp = NULL;
2281 2204
2282 mac_control = &nic->mac_control; 2205 mac_control = &nic->mac_control;
2283 config = &nic->config; 2206 config = &nic->config;
@@ -2320,12 +2243,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2320 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2243 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2321 dev->name, rxdp); 2244 dev->name, rxdp);
2322 } 2245 }
2323#ifndef CONFIG_S2IO_NAPI 2246 if(!napi) {
2324 spin_lock_irqsave(&nic->put_lock, flags); 2247 spin_lock_irqsave(&nic->put_lock, flags);
2325 mac_control->rings[ring_no].put_pos = 2248 mac_control->rings[ring_no].put_pos =
2326 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; 2249 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2327 spin_unlock_irqrestore(&nic->put_lock, flags); 2250 spin_unlock_irqrestore(&nic->put_lock, flags);
2328#endif 2251 } else {
2252 mac_control->rings[ring_no].put_pos =
2253 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2254 }
2329 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2255 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2330 ((nic->rxd_mode >= RXD_MODE_3A) && 2256 ((nic->rxd_mode >= RXD_MODE_3A) &&
2331 (rxdp->Control_2 & BIT(0)))) { 2257 (rxdp->Control_2 & BIT(0)))) {
@@ -2356,9 +2282,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2356 } 2282 }
2357 if (nic->rxd_mode == RXD_MODE_1) { 2283 if (nic->rxd_mode == RXD_MODE_1) {
2358 /* 1 buffer mode - normal operation mode */ 2284 /* 1 buffer mode - normal operation mode */
2359 memset(rxdp, 0, sizeof(RxD1_t)); 2285 memset(rxdp, 0, sizeof(struct RxD1));
2360 skb_reserve(skb, NET_IP_ALIGN); 2286 skb_reserve(skb, NET_IP_ALIGN);
2361 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single 2287 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2362 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2288 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2363 PCI_DMA_FROMDEVICE); 2289 PCI_DMA_FROMDEVICE);
2364 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2290 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
@@ -2375,7 +2301,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2375 * payload 2301 * payload
2376 */ 2302 */
2377 2303
2378 memset(rxdp, 0, sizeof(RxD3_t)); 2304 memset(rxdp, 0, sizeof(struct RxD3));
2379 ba = &mac_control->rings[ring_no].ba[block_no][off]; 2305 ba = &mac_control->rings[ring_no].ba[block_no][off];
2380 skb_reserve(skb, BUF0_LEN); 2306 skb_reserve(skb, BUF0_LEN);
2381 tmp = (u64)(unsigned long) skb->data; 2307 tmp = (u64)(unsigned long) skb->data;
@@ -2384,13 +2310,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2384 skb->data = (void *) (unsigned long)tmp; 2310 skb->data = (void *) (unsigned long)tmp;
2385 skb->tail = (void *) (unsigned long)tmp; 2311 skb->tail = (void *) (unsigned long)tmp;
2386 2312
2387 if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) 2313 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2388 ((RxD3_t*)rxdp)->Buffer0_ptr = 2314 ((struct RxD3*)rxdp)->Buffer0_ptr =
2389 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2315 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2390 PCI_DMA_FROMDEVICE); 2316 PCI_DMA_FROMDEVICE);
2391 else 2317 else
2392 pci_dma_sync_single_for_device(nic->pdev, 2318 pci_dma_sync_single_for_device(nic->pdev,
2393 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, 2319 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2394 BUF0_LEN, PCI_DMA_FROMDEVICE); 2320 BUF0_LEN, PCI_DMA_FROMDEVICE);
2395 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2321 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2396 if (nic->rxd_mode == RXD_MODE_3B) { 2322 if (nic->rxd_mode == RXD_MODE_3B) {
@@ -2400,13 +2326,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2400 * Buffer2 will have L3/L4 header plus 2326 * Buffer2 will have L3/L4 header plus
2401 * L4 payload 2327 * L4 payload
2402 */ 2328 */
2403 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single 2329 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2404 (nic->pdev, skb->data, dev->mtu + 4, 2330 (nic->pdev, skb->data, dev->mtu + 4,
2405 PCI_DMA_FROMDEVICE); 2331 PCI_DMA_FROMDEVICE);
2406 2332
2407 /* Buffer-1 will be dummy buffer. Not used */ 2333 /* Buffer-1 will be dummy buffer. Not used */
2408 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { 2334 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2409 ((RxD3_t*)rxdp)->Buffer1_ptr = 2335 ((struct RxD3*)rxdp)->Buffer1_ptr =
2410 pci_map_single(nic->pdev, 2336 pci_map_single(nic->pdev,
2411 ba->ba_1, BUF1_LEN, 2337 ba->ba_1, BUF1_LEN,
2412 PCI_DMA_FROMDEVICE); 2338 PCI_DMA_FROMDEVICE);
@@ -2466,9 +2392,9 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2466 struct net_device *dev = sp->dev; 2392 struct net_device *dev = sp->dev;
2467 int j; 2393 int j;
2468 struct sk_buff *skb; 2394 struct sk_buff *skb;
2469 RxD_t *rxdp; 2395 struct RxD_t *rxdp;
2470 mac_info_t *mac_control; 2396 struct mac_info *mac_control;
2471 buffAdd_t *ba; 2397 struct buffAdd *ba;
2472 2398
2473 mac_control = &sp->mac_control; 2399 mac_control = &sp->mac_control;
2474 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { 2400 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
@@ -2481,41 +2407,41 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2481 } 2407 }
2482 if (sp->rxd_mode == RXD_MODE_1) { 2408 if (sp->rxd_mode == RXD_MODE_1) {
2483 pci_unmap_single(sp->pdev, (dma_addr_t) 2409 pci_unmap_single(sp->pdev, (dma_addr_t)
2484 ((RxD1_t*)rxdp)->Buffer0_ptr, 2410 ((struct RxD1*)rxdp)->Buffer0_ptr,
2485 dev->mtu + 2411 dev->mtu +
2486 HEADER_ETHERNET_II_802_3_SIZE 2412 HEADER_ETHERNET_II_802_3_SIZE
2487 + HEADER_802_2_SIZE + 2413 + HEADER_802_2_SIZE +
2488 HEADER_SNAP_SIZE, 2414 HEADER_SNAP_SIZE,
2489 PCI_DMA_FROMDEVICE); 2415 PCI_DMA_FROMDEVICE);
2490 memset(rxdp, 0, sizeof(RxD1_t)); 2416 memset(rxdp, 0, sizeof(struct RxD1));
2491 } else if(sp->rxd_mode == RXD_MODE_3B) { 2417 } else if(sp->rxd_mode == RXD_MODE_3B) {
2492 ba = &mac_control->rings[ring_no]. 2418 ba = &mac_control->rings[ring_no].
2493 ba[blk][j]; 2419 ba[blk][j];
2494 pci_unmap_single(sp->pdev, (dma_addr_t) 2420 pci_unmap_single(sp->pdev, (dma_addr_t)
2495 ((RxD3_t*)rxdp)->Buffer0_ptr, 2421 ((struct RxD3*)rxdp)->Buffer0_ptr,
2496 BUF0_LEN, 2422 BUF0_LEN,
2497 PCI_DMA_FROMDEVICE); 2423 PCI_DMA_FROMDEVICE);
2498 pci_unmap_single(sp->pdev, (dma_addr_t) 2424 pci_unmap_single(sp->pdev, (dma_addr_t)
2499 ((RxD3_t*)rxdp)->Buffer1_ptr, 2425 ((struct RxD3*)rxdp)->Buffer1_ptr,
2500 BUF1_LEN, 2426 BUF1_LEN,
2501 PCI_DMA_FROMDEVICE); 2427 PCI_DMA_FROMDEVICE);
2502 pci_unmap_single(sp->pdev, (dma_addr_t) 2428 pci_unmap_single(sp->pdev, (dma_addr_t)
2503 ((RxD3_t*)rxdp)->Buffer2_ptr, 2429 ((struct RxD3*)rxdp)->Buffer2_ptr,
2504 dev->mtu + 4, 2430 dev->mtu + 4,
2505 PCI_DMA_FROMDEVICE); 2431 PCI_DMA_FROMDEVICE);
2506 memset(rxdp, 0, sizeof(RxD3_t)); 2432 memset(rxdp, 0, sizeof(struct RxD3));
2507 } else { 2433 } else {
2508 pci_unmap_single(sp->pdev, (dma_addr_t) 2434 pci_unmap_single(sp->pdev, (dma_addr_t)
2509 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2435 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2510 PCI_DMA_FROMDEVICE); 2436 PCI_DMA_FROMDEVICE);
2511 pci_unmap_single(sp->pdev, (dma_addr_t) 2437 pci_unmap_single(sp->pdev, (dma_addr_t)
2512 ((RxD3_t*)rxdp)->Buffer1_ptr, 2438 ((struct RxD3*)rxdp)->Buffer1_ptr,
2513 l3l4hdr_size + 4, 2439 l3l4hdr_size + 4,
2514 PCI_DMA_FROMDEVICE); 2440 PCI_DMA_FROMDEVICE);
2515 pci_unmap_single(sp->pdev, (dma_addr_t) 2441 pci_unmap_single(sp->pdev, (dma_addr_t)
2516 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu, 2442 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2517 PCI_DMA_FROMDEVICE); 2443 PCI_DMA_FROMDEVICE);
2518 memset(rxdp, 0, sizeof(RxD3_t)); 2444 memset(rxdp, 0, sizeof(struct RxD3));
2519 } 2445 }
2520 dev_kfree_skb(skb); 2446 dev_kfree_skb(skb);
2521 atomic_dec(&sp->rx_bufs_left[ring_no]); 2447 atomic_dec(&sp->rx_bufs_left[ring_no]);
@@ -2535,7 +2461,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
2535{ 2461{
2536 struct net_device *dev = sp->dev; 2462 struct net_device *dev = sp->dev;
2537 int i, blk = 0, buf_cnt = 0; 2463 int i, blk = 0, buf_cnt = 0;
2538 mac_info_t *mac_control; 2464 struct mac_info *mac_control;
2539 struct config_param *config; 2465 struct config_param *config;
2540 2466
2541 mac_control = &sp->mac_control; 2467 mac_control = &sp->mac_control;
@@ -2568,15 +2494,13 @@ static void free_rx_buffers(struct s2io_nic *sp)
2568 * 0 on success and 1 if there are No Rx packets to be processed. 2494 * 0 on success and 1 if there are No Rx packets to be processed.
2569 */ 2495 */
2570 2496
2571#if defined(CONFIG_S2IO_NAPI)
2572static int s2io_poll(struct net_device *dev, int *budget) 2497static int s2io_poll(struct net_device *dev, int *budget)
2573{ 2498{
2574 nic_t *nic = dev->priv; 2499 struct s2io_nic *nic = dev->priv;
2575 int pkt_cnt = 0, org_pkts_to_process; 2500 int pkt_cnt = 0, org_pkts_to_process;
2576 mac_info_t *mac_control; 2501 struct mac_info *mac_control;
2577 struct config_param *config; 2502 struct config_param *config;
2578 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2503 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2579 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2580 int i; 2504 int i;
2581 2505
2582 atomic_inc(&nic->isr_cnt); 2506 atomic_inc(&nic->isr_cnt);
@@ -2588,8 +2512,8 @@ static int s2io_poll(struct net_device *dev, int *budget)
2588 nic->pkts_to_process = dev->quota; 2512 nic->pkts_to_process = dev->quota;
2589 org_pkts_to_process = nic->pkts_to_process; 2513 org_pkts_to_process = nic->pkts_to_process;
2590 2514
2591 writeq(val64, &bar0->rx_traffic_int); 2515 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2592 val64 = readl(&bar0->rx_traffic_int); 2516 readl(&bar0->rx_traffic_int);
2593 2517
2594 for (i = 0; i < config->rx_ring_num; i++) { 2518 for (i = 0; i < config->rx_ring_num; i++) {
2595 rx_intr_handler(&mac_control->rings[i]); 2519 rx_intr_handler(&mac_control->rings[i]);
@@ -2615,7 +2539,7 @@ static int s2io_poll(struct net_device *dev, int *budget)
2615 } 2539 }
2616 /* Re enable the Rx interrupts. */ 2540 /* Re enable the Rx interrupts. */
2617 writeq(0x0, &bar0->rx_traffic_mask); 2541 writeq(0x0, &bar0->rx_traffic_mask);
2618 val64 = readl(&bar0->rx_traffic_mask); 2542 readl(&bar0->rx_traffic_mask);
2619 atomic_dec(&nic->isr_cnt); 2543 atomic_dec(&nic->isr_cnt);
2620 return 0; 2544 return 0;
2621 2545
@@ -2633,7 +2557,6 @@ no_rx:
2633 atomic_dec(&nic->isr_cnt); 2557 atomic_dec(&nic->isr_cnt);
2634 return 1; 2558 return 1;
2635} 2559}
2636#endif
2637 2560
2638#ifdef CONFIG_NET_POLL_CONTROLLER 2561#ifdef CONFIG_NET_POLL_CONTROLLER
2639/** 2562/**
@@ -2647,10 +2570,10 @@ no_rx:
2647 */ 2570 */
2648static void s2io_netpoll(struct net_device *dev) 2571static void s2io_netpoll(struct net_device *dev)
2649{ 2572{
2650 nic_t *nic = dev->priv; 2573 struct s2io_nic *nic = dev->priv;
2651 mac_info_t *mac_control; 2574 struct mac_info *mac_control;
2652 struct config_param *config; 2575 struct config_param *config;
2653 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2576 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2654 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2577 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2655 int i; 2578 int i;
2656 2579
@@ -2699,17 +2622,15 @@ static void s2io_netpoll(struct net_device *dev)
2699 * Return Value: 2622 * Return Value:
2700 * NONE. 2623 * NONE.
2701 */ 2624 */
2702static void rx_intr_handler(ring_info_t *ring_data) 2625static void rx_intr_handler(struct ring_info *ring_data)
2703{ 2626{
2704 nic_t *nic = ring_data->nic; 2627 struct s2io_nic *nic = ring_data->nic;
2705 struct net_device *dev = (struct net_device *) nic->dev; 2628 struct net_device *dev = (struct net_device *) nic->dev;
2706 int get_block, put_block, put_offset; 2629 int get_block, put_block, put_offset;
2707 rx_curr_get_info_t get_info, put_info; 2630 struct rx_curr_get_info get_info, put_info;
2708 RxD_t *rxdp; 2631 struct RxD_t *rxdp;
2709 struct sk_buff *skb; 2632 struct sk_buff *skb;
2710#ifndef CONFIG_S2IO_NAPI
2711 int pkt_cnt = 0; 2633 int pkt_cnt = 0;
2712#endif
2713 int i; 2634 int i;
2714 2635
2715 spin_lock(&nic->rx_lock); 2636 spin_lock(&nic->rx_lock);
@@ -2722,19 +2643,21 @@ static void rx_intr_handler(ring_info_t *ring_data)
2722 2643
2723 get_info = ring_data->rx_curr_get_info; 2644 get_info = ring_data->rx_curr_get_info;
2724 get_block = get_info.block_index; 2645 get_block = get_info.block_index;
2725 put_info = ring_data->rx_curr_put_info; 2646 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2726 put_block = put_info.block_index; 2647 put_block = put_info.block_index;
2727 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; 2648 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2728#ifndef CONFIG_S2IO_NAPI 2649 if (!napi) {
2729 spin_lock(&nic->put_lock); 2650 spin_lock(&nic->put_lock);
2730 put_offset = ring_data->put_pos; 2651 put_offset = ring_data->put_pos;
2731 spin_unlock(&nic->put_lock); 2652 spin_unlock(&nic->put_lock);
2732#else 2653 } else
2733 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) + 2654 put_offset = ring_data->put_pos;
2734 put_info.offset; 2655
2735#endif
2736 while (RXD_IS_UP2DT(rxdp)) { 2656 while (RXD_IS_UP2DT(rxdp)) {
2737 /* If your are next to put index then it's FIFO full condition */ 2657 /*
2658 * If your are next to put index then it's
2659 * FIFO full condition
2660 */
2738 if ((get_block == put_block) && 2661 if ((get_block == put_block) &&
2739 (get_info.offset + 1) == put_info.offset) { 2662 (get_info.offset + 1) == put_info.offset) {
2740 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2663 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
@@ -2750,7 +2673,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2750 } 2673 }
2751 if (nic->rxd_mode == RXD_MODE_1) { 2674 if (nic->rxd_mode == RXD_MODE_1) {
2752 pci_unmap_single(nic->pdev, (dma_addr_t) 2675 pci_unmap_single(nic->pdev, (dma_addr_t)
2753 ((RxD1_t*)rxdp)->Buffer0_ptr, 2676 ((struct RxD1*)rxdp)->Buffer0_ptr,
2754 dev->mtu + 2677 dev->mtu +
2755 HEADER_ETHERNET_II_802_3_SIZE + 2678 HEADER_ETHERNET_II_802_3_SIZE +
2756 HEADER_802_2_SIZE + 2679 HEADER_802_2_SIZE +
@@ -2758,22 +2681,22 @@ static void rx_intr_handler(ring_info_t *ring_data)
2758 PCI_DMA_FROMDEVICE); 2681 PCI_DMA_FROMDEVICE);
2759 } else if (nic->rxd_mode == RXD_MODE_3B) { 2682 } else if (nic->rxd_mode == RXD_MODE_3B) {
2760 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2683 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2761 ((RxD3_t*)rxdp)->Buffer0_ptr, 2684 ((struct RxD3*)rxdp)->Buffer0_ptr,
2762 BUF0_LEN, PCI_DMA_FROMDEVICE); 2685 BUF0_LEN, PCI_DMA_FROMDEVICE);
2763 pci_unmap_single(nic->pdev, (dma_addr_t) 2686 pci_unmap_single(nic->pdev, (dma_addr_t)
2764 ((RxD3_t*)rxdp)->Buffer2_ptr, 2687 ((struct RxD3*)rxdp)->Buffer2_ptr,
2765 dev->mtu + 4, 2688 dev->mtu + 4,
2766 PCI_DMA_FROMDEVICE); 2689 PCI_DMA_FROMDEVICE);
2767 } else { 2690 } else {
2768 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2691 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2769 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2692 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2770 PCI_DMA_FROMDEVICE); 2693 PCI_DMA_FROMDEVICE);
2771 pci_unmap_single(nic->pdev, (dma_addr_t) 2694 pci_unmap_single(nic->pdev, (dma_addr_t)
2772 ((RxD3_t*)rxdp)->Buffer1_ptr, 2695 ((struct RxD3*)rxdp)->Buffer1_ptr,
2773 l3l4hdr_size + 4, 2696 l3l4hdr_size + 4,
2774 PCI_DMA_FROMDEVICE); 2697 PCI_DMA_FROMDEVICE);
2775 pci_unmap_single(nic->pdev, (dma_addr_t) 2698 pci_unmap_single(nic->pdev, (dma_addr_t)
2776 ((RxD3_t*)rxdp)->Buffer2_ptr, 2699 ((struct RxD3*)rxdp)->Buffer2_ptr,
2777 dev->mtu, PCI_DMA_FROMDEVICE); 2700 dev->mtu, PCI_DMA_FROMDEVICE);
2778 } 2701 }
2779 prefetch(skb->data); 2702 prefetch(skb->data);
@@ -2792,20 +2715,17 @@ static void rx_intr_handler(ring_info_t *ring_data)
2792 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2715 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2793 } 2716 }
2794 2717
2795#ifdef CONFIG_S2IO_NAPI
2796 nic->pkts_to_process -= 1; 2718 nic->pkts_to_process -= 1;
2797 if (!nic->pkts_to_process) 2719 if ((napi) && (!nic->pkts_to_process))
2798 break; 2720 break;
2799#else
2800 pkt_cnt++; 2721 pkt_cnt++;
2801 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 2722 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2802 break; 2723 break;
2803#endif
2804 } 2724 }
2805 if (nic->lro) { 2725 if (nic->lro) {
2806 /* Clear all LRO sessions before exiting */ 2726 /* Clear all LRO sessions before exiting */
2807 for (i=0; i<MAX_LRO_SESSIONS; i++) { 2727 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2808 lro_t *lro = &nic->lro0_n[i]; 2728 struct lro *lro = &nic->lro0_n[i];
2809 if (lro->in_use) { 2729 if (lro->in_use) {
2810 update_L3L4_header(nic, lro); 2730 update_L3L4_header(nic, lro);
2811 queue_rx_frame(lro->parent); 2731 queue_rx_frame(lro->parent);
@@ -2829,17 +2749,17 @@ static void rx_intr_handler(ring_info_t *ring_data)
2829 * NONE 2749 * NONE
2830 */ 2750 */
2831 2751
2832static void tx_intr_handler(fifo_info_t *fifo_data) 2752static void tx_intr_handler(struct fifo_info *fifo_data)
2833{ 2753{
2834 nic_t *nic = fifo_data->nic; 2754 struct s2io_nic *nic = fifo_data->nic;
2835 struct net_device *dev = (struct net_device *) nic->dev; 2755 struct net_device *dev = (struct net_device *) nic->dev;
2836 tx_curr_get_info_t get_info, put_info; 2756 struct tx_curr_get_info get_info, put_info;
2837 struct sk_buff *skb; 2757 struct sk_buff *skb;
2838 TxD_t *txdlp; 2758 struct TxD *txdlp;
2839 2759
2840 get_info = fifo_data->tx_curr_get_info; 2760 get_info = fifo_data->tx_curr_get_info;
2841 put_info = fifo_data->tx_curr_put_info; 2761 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2842 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset]. 2762 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2843 list_virt_addr; 2763 list_virt_addr;
2844 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && 2764 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2845 (get_info.offset != put_info.offset) && 2765 (get_info.offset != put_info.offset) &&
@@ -2854,11 +2774,10 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2854 } 2774 }
2855 if ((err >> 48) == 0xA) { 2775 if ((err >> 48) == 0xA) {
2856 DBG_PRINT(TX_DBG, "TxD returned due \ 2776 DBG_PRINT(TX_DBG, "TxD returned due \
2857to loss of link\n"); 2777 to loss of link\n");
2858 } 2778 }
2859 else { 2779 else {
2860 DBG_PRINT(ERR_DBG, "***TxD error \ 2780 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2861%llx\n", err);
2862 } 2781 }
2863 } 2782 }
2864 2783
@@ -2877,7 +2796,7 @@ to loss of link\n");
2877 get_info.offset++; 2796 get_info.offset++;
2878 if (get_info.offset == get_info.fifo_len + 1) 2797 if (get_info.offset == get_info.fifo_len + 1)
2879 get_info.offset = 0; 2798 get_info.offset = 0;
2880 txdlp = (TxD_t *) fifo_data->list_info 2799 txdlp = (struct TxD *) fifo_data->list_info
2881 [get_info.offset].list_virt_addr; 2800 [get_info.offset].list_virt_addr;
2882 fifo_data->tx_curr_get_info.offset = 2801 fifo_data->tx_curr_get_info.offset =
2883 get_info.offset; 2802 get_info.offset;
@@ -2902,8 +2821,8 @@ to loss of link\n");
2902static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev) 2821static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2903{ 2822{
2904 u64 val64 = 0x0; 2823 u64 val64 = 0x0;
2905 nic_t *sp = dev->priv; 2824 struct s2io_nic *sp = dev->priv;
2906 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2825 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2907 2826
2908 //address transaction 2827 //address transaction
2909 val64 = val64 | MDIO_MMD_INDX_ADDR(addr) 2828 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
@@ -2951,8 +2870,8 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2951{ 2870{
2952 u64 val64 = 0x0; 2871 u64 val64 = 0x0;
2953 u64 rval64 = 0x0; 2872 u64 rval64 = 0x0;
2954 nic_t *sp = dev->priv; 2873 struct s2io_nic *sp = dev->priv;
2955 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2874 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2956 2875
2957 /* address transaction */ 2876 /* address transaction */
2958 val64 = val64 | MDIO_MMD_INDX_ADDR(addr) 2877 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
@@ -3055,8 +2974,8 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
3055 u64 val64 = 0x0; 2974 u64 val64 = 0x0;
3056 u64 addr = 0x0; 2975 u64 addr = 0x0;
3057 2976
3058 nic_t *sp = dev->priv; 2977 struct s2io_nic *sp = dev->priv;
3059 StatInfo_t *stat_info = sp->mac_control.stats_info; 2978 struct stat_block *stat_info = sp->mac_control.stats_info;
3060 2979
3061 /* Check the communication with the MDIO slave */ 2980 /* Check the communication with the MDIO slave */
3062 addr = 0x0000; 2981 addr = 0x0000;
@@ -3154,10 +3073,12 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
3154static void alarm_intr_handler(struct s2io_nic *nic) 3073static void alarm_intr_handler(struct s2io_nic *nic)
3155{ 3074{
3156 struct net_device *dev = (struct net_device *) nic->dev; 3075 struct net_device *dev = (struct net_device *) nic->dev;
3157 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3158 register u64 val64 = 0, err_reg = 0; 3077 register u64 val64 = 0, err_reg = 0;
3159 u64 cnt; 3078 u64 cnt;
3160 int i; 3079 int i;
3080 if (atomic_read(&nic->card_state) == CARD_DOWN)
3081 return;
3161 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0; 3082 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3162 /* Handling the XPAK counters update */ 3083 /* Handling the XPAK counters update */
3163 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) { 3084 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
@@ -3297,6 +3218,25 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3297 } 3218 }
3298 return ret; 3219 return ret;
3299} 3220}
3221/*
3222 * check_pci_device_id - Checks if the device id is supported
3223 * @id : device id
3224 * Description: Function to check if the pci device id is supported by driver.
3225 * Return value: Actual device id if supported else PCI_ANY_ID
3226 */
3227static u16 check_pci_device_id(u16 id)
3228{
3229 switch (id) {
3230 case PCI_DEVICE_ID_HERC_WIN:
3231 case PCI_DEVICE_ID_HERC_UNI:
3232 return XFRAME_II_DEVICE;
3233 case PCI_DEVICE_ID_S2IO_UNI:
3234 case PCI_DEVICE_ID_S2IO_WIN:
3235 return XFRAME_I_DEVICE;
3236 default:
3237 return PCI_ANY_ID;
3238 }
3239}
3300 3240
3301/** 3241/**
3302 * s2io_reset - Resets the card. 3242 * s2io_reset - Resets the card.
@@ -3308,42 +3248,57 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3308 * void. 3248 * void.
3309 */ 3249 */
3310 3250
3311static void s2io_reset(nic_t * sp) 3251static void s2io_reset(struct s2io_nic * sp)
3312{ 3252{
3313 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3253 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3314 u64 val64; 3254 u64 val64;
3315 u16 subid, pci_cmd; 3255 u16 subid, pci_cmd;
3256 int i;
3257 u16 val16;
3258 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3259 __FUNCTION__, sp->dev->name);
3316 3260
3317 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3261 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3318 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3262 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3319 3263
3264 if (sp->device_type == XFRAME_II_DEVICE) {
3265 int ret;
3266 ret = pci_set_power_state(sp->pdev, 3);
3267 if (!ret)
3268 ret = pci_set_power_state(sp->pdev, 0);
3269 else {
3270 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3271 __FUNCTION__);
3272 goto old_way;
3273 }
3274 msleep(20);
3275 goto new_way;
3276 }
3277old_way:
3320 val64 = SW_RESET_ALL; 3278 val64 = SW_RESET_ALL;
3321 writeq(val64, &bar0->sw_reset); 3279 writeq(val64, &bar0->sw_reset);
3322 3280new_way:
3323 /*
3324 * At this stage, if the PCI write is indeed completed, the
3325 * card is reset and so is the PCI Config space of the device.
3326 * So a read cannot be issued at this stage on any of the
3327 * registers to ensure the write into "sw_reset" register
3328 * has gone through.
3329 * Question: Is there any system call that will explicitly force
3330 * all the write commands still pending on the bus to be pushed
3331 * through?
3332 * As of now I'am just giving a 250ms delay and hoping that the
3333 * PCI write to sw_reset register is done by this time.
3334 */
3335 msleep(250);
3336 if (strstr(sp->product_name, "CX4")) { 3281 if (strstr(sp->product_name, "CX4")) {
3337 msleep(750); 3282 msleep(750);
3338 } 3283 }
3284 msleep(250);
3285 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3339 3286
3340 /* Restore the PCI state saved during initialization. */ 3287 /* Restore the PCI state saved during initialization. */
3341 pci_restore_state(sp->pdev); 3288 pci_restore_state(sp->pdev);
3342 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 3289 pci_read_config_word(sp->pdev, 0x2, &val16);
3343 pci_cmd); 3290 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3344 s2io_init_pci(sp); 3291 break;
3292 msleep(200);
3293 }
3345 3294
3346 msleep(250); 3295 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3296 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3297 }
3298
3299 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3300
3301 s2io_init_pci(sp);
3347 3302
3348 /* Set swapper to enable I/O register access */ 3303 /* Set swapper to enable I/O register access */
3349 s2io_set_swapper(sp); 3304 s2io_set_swapper(sp);
@@ -3399,10 +3354,10 @@ static void s2io_reset(nic_t * sp)
3399 * SUCCESS on success and FAILURE on failure. 3354 * SUCCESS on success and FAILURE on failure.
3400 */ 3355 */
3401 3356
3402static int s2io_set_swapper(nic_t * sp) 3357static int s2io_set_swapper(struct s2io_nic * sp)
3403{ 3358{
3404 struct net_device *dev = sp->dev; 3359 struct net_device *dev = sp->dev;
3405 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3406 u64 val64, valt, valr; 3361 u64 val64, valt, valr;
3407 3362
3408 /* 3363 /*
@@ -3527,9 +3482,9 @@ static int s2io_set_swapper(nic_t * sp)
3527 return SUCCESS; 3482 return SUCCESS;
3528} 3483}
3529 3484
3530static int wait_for_msix_trans(nic_t *nic, int i) 3485static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3531{ 3486{
3532 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3487 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3533 u64 val64; 3488 u64 val64;
3534 int ret = 0, cnt = 0; 3489 int ret = 0, cnt = 0;
3535 3490
@@ -3548,9 +3503,9 @@ static int wait_for_msix_trans(nic_t *nic, int i)
3548 return ret; 3503 return ret;
3549} 3504}
3550 3505
3551static void restore_xmsi_data(nic_t *nic) 3506static void restore_xmsi_data(struct s2io_nic *nic)
3552{ 3507{
3553 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3508 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3554 u64 val64; 3509 u64 val64;
3555 int i; 3510 int i;
3556 3511
@@ -3566,9 +3521,9 @@ static void restore_xmsi_data(nic_t *nic)
3566 } 3521 }
3567} 3522}
3568 3523
3569static void store_xmsi_data(nic_t *nic) 3524static void store_xmsi_data(struct s2io_nic *nic)
3570{ 3525{
3571 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3526 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3572 u64 val64, addr, data; 3527 u64 val64, addr, data;
3573 int i; 3528 int i;
3574 3529
@@ -3589,9 +3544,9 @@ static void store_xmsi_data(nic_t *nic)
3589 } 3544 }
3590} 3545}
3591 3546
3592int s2io_enable_msi(nic_t *nic) 3547int s2io_enable_msi(struct s2io_nic *nic)
3593{ 3548{
3594 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3549 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3595 u16 msi_ctrl, msg_val; 3550 u16 msi_ctrl, msg_val;
3596 struct config_param *config = &nic->config; 3551 struct config_param *config = &nic->config;
3597 struct net_device *dev = nic->dev; 3552 struct net_device *dev = nic->dev;
@@ -3639,9 +3594,9 @@ int s2io_enable_msi(nic_t *nic)
3639 return 0; 3594 return 0;
3640} 3595}
3641 3596
3642static int s2io_enable_msi_x(nic_t *nic) 3597static int s2io_enable_msi_x(struct s2io_nic *nic)
3643{ 3598{
3644 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3599 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3645 u64 tx_mat, rx_mat; 3600 u64 tx_mat, rx_mat;
3646 u16 msi_control; /* Temp variable */ 3601 u16 msi_control; /* Temp variable */
3647 int ret, i, j, msix_indx = 1; 3602 int ret, i, j, msix_indx = 1;
@@ -3749,7 +3704,7 @@ static int s2io_enable_msi_x(nic_t *nic)
3749 3704
3750static int s2io_open(struct net_device *dev) 3705static int s2io_open(struct net_device *dev)
3751{ 3706{
3752 nic_t *sp = dev->priv; 3707 struct s2io_nic *sp = dev->priv;
3753 int err = 0; 3708 int err = 0;
3754 3709
3755 /* 3710 /*
@@ -3802,7 +3757,7 @@ hw_init_failed:
3802 3757
3803static int s2io_close(struct net_device *dev) 3758static int s2io_close(struct net_device *dev)
3804{ 3759{
3805 nic_t *sp = dev->priv; 3760 struct s2io_nic *sp = dev->priv;
3806 3761
3807 flush_scheduled_work(); 3762 flush_scheduled_work();
3808 netif_stop_queue(dev); 3763 netif_stop_queue(dev);
@@ -3828,15 +3783,15 @@ static int s2io_close(struct net_device *dev)
3828 3783
3829static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 3784static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3830{ 3785{
3831 nic_t *sp = dev->priv; 3786 struct s2io_nic *sp = dev->priv;
3832 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 3787 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3833 register u64 val64; 3788 register u64 val64;
3834 TxD_t *txdp; 3789 struct TxD *txdp;
3835 TxFIFO_element_t __iomem *tx_fifo; 3790 struct TxFIFO_element __iomem *tx_fifo;
3836 unsigned long flags; 3791 unsigned long flags;
3837 u16 vlan_tag = 0; 3792 u16 vlan_tag = 0;
3838 int vlan_priority = 0; 3793 int vlan_priority = 0;
3839 mac_info_t *mac_control; 3794 struct mac_info *mac_control;
3840 struct config_param *config; 3795 struct config_param *config;
3841 int offload_type; 3796 int offload_type;
3842 3797
@@ -3864,7 +3819,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3864 3819
3865 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset; 3820 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3866 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset; 3821 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3867 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off]. 3822 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3868 list_virt_addr; 3823 list_virt_addr;
3869 3824
3870 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; 3825 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
@@ -3887,12 +3842,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3887 } 3842 }
3888 3843
3889 offload_type = s2io_offload_type(skb); 3844 offload_type = s2io_offload_type(skb);
3890#ifdef NETIF_F_TSO
3891 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3845 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3892 txdp->Control_1 |= TXD_TCP_LSO_EN; 3846 txdp->Control_1 |= TXD_TCP_LSO_EN;
3893 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 3847 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3894 } 3848 }
3895#endif
3896 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3849 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3897 txdp->Control_2 |= 3850 txdp->Control_2 |=
3898 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3851 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -3993,13 +3946,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3993static void 3946static void
3994s2io_alarm_handle(unsigned long data) 3947s2io_alarm_handle(unsigned long data)
3995{ 3948{
3996 nic_t *sp = (nic_t *)data; 3949 struct s2io_nic *sp = (struct s2io_nic *)data;
3997 3950
3998 alarm_intr_handler(sp); 3951 alarm_intr_handler(sp);
3999 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3952 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4000} 3953}
4001 3954
4002static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) 3955static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4003{ 3956{
4004 int rxb_size, level; 3957 int rxb_size, level;
4005 3958
@@ -4031,9 +3984,9 @@ static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4031static irqreturn_t s2io_msi_handle(int irq, void *dev_id) 3984static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4032{ 3985{
4033 struct net_device *dev = (struct net_device *) dev_id; 3986 struct net_device *dev = (struct net_device *) dev_id;
4034 nic_t *sp = dev->priv; 3987 struct s2io_nic *sp = dev->priv;
4035 int i; 3988 int i;
4036 mac_info_t *mac_control; 3989 struct mac_info *mac_control;
4037 struct config_param *config; 3990 struct config_param *config;
4038 3991
4039 atomic_inc(&sp->isr_cnt); 3992 atomic_inc(&sp->isr_cnt);
@@ -4063,8 +4016,8 @@ static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4063 4016
4064static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4017static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4065{ 4018{
4066 ring_info_t *ring = (ring_info_t *)dev_id; 4019 struct ring_info *ring = (struct ring_info *)dev_id;
4067 nic_t *sp = ring->nic; 4020 struct s2io_nic *sp = ring->nic;
4068 4021
4069 atomic_inc(&sp->isr_cnt); 4022 atomic_inc(&sp->isr_cnt);
4070 4023
@@ -4077,17 +4030,17 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4077 4030
4078static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4031static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4079{ 4032{
4080 fifo_info_t *fifo = (fifo_info_t *)dev_id; 4033 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4081 nic_t *sp = fifo->nic; 4034 struct s2io_nic *sp = fifo->nic;
4082 4035
4083 atomic_inc(&sp->isr_cnt); 4036 atomic_inc(&sp->isr_cnt);
4084 tx_intr_handler(fifo); 4037 tx_intr_handler(fifo);
4085 atomic_dec(&sp->isr_cnt); 4038 atomic_dec(&sp->isr_cnt);
4086 return IRQ_HANDLED; 4039 return IRQ_HANDLED;
4087} 4040}
4088static void s2io_txpic_intr_handle(nic_t *sp) 4041static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4089{ 4042{
4090 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4043 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4091 u64 val64; 4044 u64 val64;
4092 4045
4093 val64 = readq(&bar0->pic_int_status); 4046 val64 = readq(&bar0->pic_int_status);
@@ -4109,39 +4062,33 @@ static void s2io_txpic_intr_handle(nic_t *sp)
4109 } 4062 }
4110 else if (val64 & GPIO_INT_REG_LINK_UP) { 4063 else if (val64 & GPIO_INT_REG_LINK_UP) {
4111 val64 = readq(&bar0->adapter_status); 4064 val64 = readq(&bar0->adapter_status);
4112 if (verify_xena_quiescence(sp, val64,
4113 sp->device_enabled_once)) {
4114 /* Enable Adapter */ 4065 /* Enable Adapter */
4115 val64 = readq(&bar0->adapter_control); 4066 val64 = readq(&bar0->adapter_control);
4116 val64 |= ADAPTER_CNTL_EN; 4067 val64 |= ADAPTER_CNTL_EN;
4117 writeq(val64, &bar0->adapter_control); 4068 writeq(val64, &bar0->adapter_control);
4118 val64 |= ADAPTER_LED_ON; 4069 val64 |= ADAPTER_LED_ON;
4119 writeq(val64, &bar0->adapter_control); 4070 writeq(val64, &bar0->adapter_control);
4120 if (!sp->device_enabled_once) 4071 if (!sp->device_enabled_once)
4121 sp->device_enabled_once = 1; 4072 sp->device_enabled_once = 1;
4122 4073
4123 s2io_link(sp, LINK_UP); 4074 s2io_link(sp, LINK_UP);
4124 /* 4075 /*
4125 * unmask link down interrupt and mask link-up 4076 * unmask link down interrupt and mask link-up
4126 * intr 4077 * intr
4127 */ 4078 */
4128 val64 = readq(&bar0->gpio_int_mask); 4079 val64 = readq(&bar0->gpio_int_mask);
4129 val64 &= ~GPIO_INT_MASK_LINK_DOWN; 4080 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4130 val64 |= GPIO_INT_MASK_LINK_UP; 4081 val64 |= GPIO_INT_MASK_LINK_UP;
4131 writeq(val64, &bar0->gpio_int_mask); 4082 writeq(val64, &bar0->gpio_int_mask);
4132 4083
4133 }
4134 }else if (val64 & GPIO_INT_REG_LINK_DOWN) { 4084 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4135 val64 = readq(&bar0->adapter_status); 4085 val64 = readq(&bar0->adapter_status);
4136 if (verify_xena_quiescence(sp, val64, 4086 s2io_link(sp, LINK_DOWN);
4137 sp->device_enabled_once)) { 4087 /* Link is down so unmaks link up interrupt */
4138 s2io_link(sp, LINK_DOWN); 4088 val64 = readq(&bar0->gpio_int_mask);
4139 /* Link is down so unmaks link up interrupt */ 4089 val64 &= ~GPIO_INT_MASK_LINK_UP;
4140 val64 = readq(&bar0->gpio_int_mask); 4090 val64 |= GPIO_INT_MASK_LINK_DOWN;
4141 val64 &= ~GPIO_INT_MASK_LINK_UP; 4091 writeq(val64, &bar0->gpio_int_mask);
4142 val64 |= GPIO_INT_MASK_LINK_DOWN;
4143 writeq(val64, &bar0->gpio_int_mask);
4144 }
4145 } 4092 }
4146 } 4093 }
4147 val64 = readq(&bar0->gpio_int_mask); 4094 val64 = readq(&bar0->gpio_int_mask);
@@ -4163,11 +4110,11 @@ static void s2io_txpic_intr_handle(nic_t *sp)
4163static irqreturn_t s2io_isr(int irq, void *dev_id) 4110static irqreturn_t s2io_isr(int irq, void *dev_id)
4164{ 4111{
4165 struct net_device *dev = (struct net_device *) dev_id; 4112 struct net_device *dev = (struct net_device *) dev_id;
4166 nic_t *sp = dev->priv; 4113 struct s2io_nic *sp = dev->priv;
4167 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4114 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4168 int i; 4115 int i;
4169 u64 reason = 0, val64, org_mask; 4116 u64 reason = 0;
4170 mac_info_t *mac_control; 4117 struct mac_info *mac_control;
4171 struct config_param *config; 4118 struct config_param *config;
4172 4119
4173 atomic_inc(&sp->isr_cnt); 4120 atomic_inc(&sp->isr_cnt);
@@ -4185,43 +4132,48 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4185 reason = readq(&bar0->general_int_status); 4132 reason = readq(&bar0->general_int_status);
4186 4133
4187 if (!reason) { 4134 if (!reason) {
4188 /* The interrupt was not raised by Xena. */ 4135 /* The interrupt was not raised by us. */
4136 atomic_dec(&sp->isr_cnt);
4137 return IRQ_NONE;
4138 }
4139 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4140 /* Disable device and get out */
4189 atomic_dec(&sp->isr_cnt); 4141 atomic_dec(&sp->isr_cnt);
4190 return IRQ_NONE; 4142 return IRQ_NONE;
4191 } 4143 }
4192 4144
4193 val64 = 0xFFFFFFFFFFFFFFFFULL; 4145 if (napi) {
4194 /* Store current mask before masking all interrupts */ 4146 if (reason & GEN_INTR_RXTRAFFIC) {
4195 org_mask = readq(&bar0->general_int_mask); 4147 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4196 writeq(val64, &bar0->general_int_mask); 4148 __netif_rx_schedule(dev);
4149 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4150 }
4151 else
4152 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4153 }
4154 } else {
4155 /*
4156 * Rx handler is called by default, without checking for the
4157 * cause of interrupt.
4158 * rx_traffic_int reg is an R1 register, writing all 1's
4159 * will ensure that the actual interrupt causing bit get's
4160 * cleared and hence a read can be avoided.
4161 */
4162 if (reason & GEN_INTR_RXTRAFFIC)
4163 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4197 4164
4198#ifdef CONFIG_S2IO_NAPI 4165 for (i = 0; i < config->rx_ring_num; i++) {
4199 if (reason & GEN_INTR_RXTRAFFIC) { 4166 rx_intr_handler(&mac_control->rings[i]);
4200 if (netif_rx_schedule_prep(dev)) {
4201 writeq(val64, &bar0->rx_traffic_mask);
4202 __netif_rx_schedule(dev);
4203 } 4167 }
4204 } 4168 }
4205#else
4206 /*
4207 * Rx handler is called by default, without checking for the
4208 * cause of interrupt.
4209 * rx_traffic_int reg is an R1 register, writing all 1's
4210 * will ensure that the actual interrupt causing bit get's
4211 * cleared and hence a read can be avoided.
4212 */
4213 writeq(val64, &bar0->rx_traffic_int);
4214 for (i = 0; i < config->rx_ring_num; i++) {
4215 rx_intr_handler(&mac_control->rings[i]);
4216 }
4217#endif
4218 4169
4219 /* 4170 /*
4220 * tx_traffic_int reg is an R1 register, writing all 1's 4171 * tx_traffic_int reg is an R1 register, writing all 1's
4221 * will ensure that the actual interrupt causing bit get's 4172 * will ensure that the actual interrupt causing bit get's
4222 * cleared and hence a read can be avoided. 4173 * cleared and hence a read can be avoided.
4223 */ 4174 */
4224 writeq(val64, &bar0->tx_traffic_int); 4175 if (reason & GEN_INTR_TXTRAFFIC)
4176 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4225 4177
4226 for (i = 0; i < config->tx_fifo_num; i++) 4178 for (i = 0; i < config->tx_fifo_num; i++)
4227 tx_intr_handler(&mac_control->fifos[i]); 4179 tx_intr_handler(&mac_control->fifos[i]);
@@ -4233,11 +4185,14 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4233 * reallocate the buffers from the interrupt handler itself, 4185 * reallocate the buffers from the interrupt handler itself,
4234 * else schedule a tasklet to reallocate the buffers. 4186 * else schedule a tasklet to reallocate the buffers.
4235 */ 4187 */
4236#ifndef CONFIG_S2IO_NAPI 4188 if (!napi) {
4237 for (i = 0; i < config->rx_ring_num; i++) 4189 for (i = 0; i < config->rx_ring_num; i++)
4238 s2io_chk_rx_buffers(sp, i); 4190 s2io_chk_rx_buffers(sp, i);
4239#endif 4191 }
4240 writeq(org_mask, &bar0->general_int_mask); 4192
4193 writeq(0, &bar0->general_int_mask);
4194 readl(&bar0->general_int_status);
4195
4241 atomic_dec(&sp->isr_cnt); 4196 atomic_dec(&sp->isr_cnt);
4242 return IRQ_HANDLED; 4197 return IRQ_HANDLED;
4243} 4198}
@@ -4245,9 +4200,9 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4245/** 4200/**
4246 * s2io_updt_stats - 4201 * s2io_updt_stats -
4247 */ 4202 */
4248static void s2io_updt_stats(nic_t *sp) 4203static void s2io_updt_stats(struct s2io_nic *sp)
4249{ 4204{
4250 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4205 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4251 u64 val64; 4206 u64 val64;
4252 int cnt = 0; 4207 int cnt = 0;
4253 4208
@@ -4266,7 +4221,7 @@ static void s2io_updt_stats(nic_t *sp)
4266 break; /* Updt failed */ 4221 break; /* Updt failed */
4267 } while(1); 4222 } while(1);
4268 } else { 4223 } else {
4269 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); 4224 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4270 } 4225 }
4271} 4226}
4272 4227
@@ -4282,8 +4237,8 @@ static void s2io_updt_stats(nic_t *sp)
4282 4237
4283static struct net_device_stats *s2io_get_stats(struct net_device *dev) 4238static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4284{ 4239{
4285 nic_t *sp = dev->priv; 4240 struct s2io_nic *sp = dev->priv;
4286 mac_info_t *mac_control; 4241 struct mac_info *mac_control;
4287 struct config_param *config; 4242 struct config_param *config;
4288 4243
4289 4244
@@ -4324,8 +4279,8 @@ static void s2io_set_multicast(struct net_device *dev)
4324{ 4279{
4325 int i, j, prev_cnt; 4280 int i, j, prev_cnt;
4326 struct dev_mc_list *mclist; 4281 struct dev_mc_list *mclist;
4327 nic_t *sp = dev->priv; 4282 struct s2io_nic *sp = dev->priv;
4328 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4283 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4329 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = 4284 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4330 0xfeffffffffffULL; 4285 0xfeffffffffffULL;
4331 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0; 4286 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
@@ -4478,8 +4433,8 @@ static void s2io_set_multicast(struct net_device *dev)
4478 4433
4479static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) 4434static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4480{ 4435{
4481 nic_t *sp = dev->priv; 4436 struct s2io_nic *sp = dev->priv;
4482 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4437 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4483 register u64 val64, mac_addr = 0; 4438 register u64 val64, mac_addr = 0;
4484 int i; 4439 int i;
4485 4440
@@ -4525,7 +4480,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4525static int s2io_ethtool_sset(struct net_device *dev, 4480static int s2io_ethtool_sset(struct net_device *dev,
4526 struct ethtool_cmd *info) 4481 struct ethtool_cmd *info)
4527{ 4482{
4528 nic_t *sp = dev->priv; 4483 struct s2io_nic *sp = dev->priv;
4529 if ((info->autoneg == AUTONEG_ENABLE) || 4484 if ((info->autoneg == AUTONEG_ENABLE) ||
4530 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) 4485 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4531 return -EINVAL; 4486 return -EINVAL;
@@ -4551,7 +4506,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
4551 4506
4552static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) 4507static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4553{ 4508{
4554 nic_t *sp = dev->priv; 4509 struct s2io_nic *sp = dev->priv;
4555 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 4510 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4556 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 4511 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4557 info->port = PORT_FIBRE; 4512 info->port = PORT_FIBRE;
@@ -4584,7 +4539,7 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4584static void s2io_ethtool_gdrvinfo(struct net_device *dev, 4539static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4585 struct ethtool_drvinfo *info) 4540 struct ethtool_drvinfo *info)
4586{ 4541{
4587 nic_t *sp = dev->priv; 4542 struct s2io_nic *sp = dev->priv;
4588 4543
4589 strncpy(info->driver, s2io_driver_name, sizeof(info->driver)); 4544 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4590 strncpy(info->version, s2io_driver_version, sizeof(info->version)); 4545 strncpy(info->version, s2io_driver_version, sizeof(info->version));
@@ -4616,7 +4571,7 @@ static void s2io_ethtool_gregs(struct net_device *dev,
4616 int i; 4571 int i;
4617 u64 reg; 4572 u64 reg;
4618 u8 *reg_space = (u8 *) space; 4573 u8 *reg_space = (u8 *) space;
4619 nic_t *sp = dev->priv; 4574 struct s2io_nic *sp = dev->priv;
4620 4575
4621 regs->len = XENA_REG_SPACE; 4576 regs->len = XENA_REG_SPACE;
4622 regs->version = sp->pdev->subsystem_device; 4577 regs->version = sp->pdev->subsystem_device;
@@ -4638,8 +4593,8 @@ static void s2io_ethtool_gregs(struct net_device *dev,
4638*/ 4593*/
4639static void s2io_phy_id(unsigned long data) 4594static void s2io_phy_id(unsigned long data)
4640{ 4595{
4641 nic_t *sp = (nic_t *) data; 4596 struct s2io_nic *sp = (struct s2io_nic *) data;
4642 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4597 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4643 u64 val64 = 0; 4598 u64 val64 = 0;
4644 u16 subid; 4599 u16 subid;
4645 4600
@@ -4676,8 +4631,8 @@ static void s2io_phy_id(unsigned long data)
4676static int s2io_ethtool_idnic(struct net_device *dev, u32 data) 4631static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4677{ 4632{
4678 u64 val64 = 0, last_gpio_ctrl_val; 4633 u64 val64 = 0, last_gpio_ctrl_val;
4679 nic_t *sp = dev->priv; 4634 struct s2io_nic *sp = dev->priv;
4680 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4635 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4681 u16 subid; 4636 u16 subid;
4682 4637
4683 subid = sp->pdev->subsystem_device; 4638 subid = sp->pdev->subsystem_device;
@@ -4725,8 +4680,8 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
4725 struct ethtool_pauseparam *ep) 4680 struct ethtool_pauseparam *ep)
4726{ 4681{
4727 u64 val64; 4682 u64 val64;
4728 nic_t *sp = dev->priv; 4683 struct s2io_nic *sp = dev->priv;
4729 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4684 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4730 4685
4731 val64 = readq(&bar0->rmac_pause_cfg); 4686 val64 = readq(&bar0->rmac_pause_cfg);
4732 if (val64 & RMAC_PAUSE_GEN_ENABLE) 4687 if (val64 & RMAC_PAUSE_GEN_ENABLE)
@@ -4752,8 +4707,8 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
4752 struct ethtool_pauseparam *ep) 4707 struct ethtool_pauseparam *ep)
4753{ 4708{
4754 u64 val64; 4709 u64 val64;
4755 nic_t *sp = dev->priv; 4710 struct s2io_nic *sp = dev->priv;
4756 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4711 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4757 4712
4758 val64 = readq(&bar0->rmac_pause_cfg); 4713 val64 = readq(&bar0->rmac_pause_cfg);
4759 if (ep->tx_pause) 4714 if (ep->tx_pause)
@@ -4785,12 +4740,12 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
4785 */ 4740 */
4786 4741
4787#define S2IO_DEV_ID 5 4742#define S2IO_DEV_ID 5
4788static int read_eeprom(nic_t * sp, int off, u64 * data) 4743static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4789{ 4744{
4790 int ret = -1; 4745 int ret = -1;
4791 u32 exit_cnt = 0; 4746 u32 exit_cnt = 0;
4792 u64 val64; 4747 u64 val64;
4793 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4748 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4794 4749
4795 if (sp->device_type == XFRAME_I_DEVICE) { 4750 if (sp->device_type == XFRAME_I_DEVICE) {
4796 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4751 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
@@ -4850,11 +4805,11 @@ static int read_eeprom(nic_t * sp, int off, u64 * data)
4850 * 0 on success, -1 on failure. 4805 * 0 on success, -1 on failure.
4851 */ 4806 */
4852 4807
4853static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) 4808static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4854{ 4809{
4855 int exit_cnt = 0, ret = -1; 4810 int exit_cnt = 0, ret = -1;
4856 u64 val64; 4811 u64 val64;
4857 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4812 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4858 4813
4859 if (sp->device_type == XFRAME_I_DEVICE) { 4814 if (sp->device_type == XFRAME_I_DEVICE) {
4860 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4815 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
@@ -4899,7 +4854,7 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4899 } 4854 }
4900 return ret; 4855 return ret;
4901} 4856}
4902static void s2io_vpd_read(nic_t *nic) 4857static void s2io_vpd_read(struct s2io_nic *nic)
4903{ 4858{
4904 u8 *vpd_data; 4859 u8 *vpd_data;
4905 u8 data; 4860 u8 data;
@@ -4914,6 +4869,7 @@ static void s2io_vpd_read(nic_t *nic)
4914 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); 4869 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4915 vpd_addr = 0x50; 4870 vpd_addr = 0x50;
4916 } 4871 }
4872 strcpy(nic->serial_num, "NOT AVAILABLE");
4917 4873
4918 vpd_data = kmalloc(256, GFP_KERNEL); 4874 vpd_data = kmalloc(256, GFP_KERNEL);
4919 if (!vpd_data) 4875 if (!vpd_data)
@@ -4937,7 +4893,22 @@ static void s2io_vpd_read(nic_t *nic)
4937 pci_read_config_dword(nic->pdev, (vpd_addr + 4), 4893 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4938 (u32 *)&vpd_data[i]); 4894 (u32 *)&vpd_data[i]);
4939 } 4895 }
4940 if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) { 4896
4897 if(!fail) {
4898 /* read serial number of adapter */
4899 for (cnt = 0; cnt < 256; cnt++) {
4900 if ((vpd_data[cnt] == 'S') &&
4901 (vpd_data[cnt+1] == 'N') &&
4902 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4903 memset(nic->serial_num, 0, VPD_STRING_LEN);
4904 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4905 vpd_data[cnt+2]);
4906 break;
4907 }
4908 }
4909 }
4910
4911 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4941 memset(nic->product_name, 0, vpd_data[1]); 4912 memset(nic->product_name, 0, vpd_data[1]);
4942 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4913 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4943 } 4914 }
@@ -4962,7 +4933,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
4962{ 4933{
4963 u32 i, valid; 4934 u32 i, valid;
4964 u64 data; 4935 u64 data;
4965 nic_t *sp = dev->priv; 4936 struct s2io_nic *sp = dev->priv;
4966 4937
4967 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); 4938 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4968 4939
@@ -5000,7 +4971,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
5000{ 4971{
5001 int len = eeprom->len, cnt = 0; 4972 int len = eeprom->len, cnt = 0;
5002 u64 valid = 0, data; 4973 u64 valid = 0, data;
5003 nic_t *sp = dev->priv; 4974 struct s2io_nic *sp = dev->priv;
5004 4975
5005 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { 4976 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5006 DBG_PRINT(ERR_DBG, 4977 DBG_PRINT(ERR_DBG,
@@ -5044,9 +5015,9 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
5044 * 0 on success. 5015 * 0 on success.
5045 */ 5016 */
5046 5017
5047static int s2io_register_test(nic_t * sp, uint64_t * data) 5018static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5048{ 5019{
5049 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5020 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5050 u64 val64 = 0, exp_val; 5021 u64 val64 = 0, exp_val;
5051 int fail = 0; 5022 int fail = 0;
5052 5023
@@ -5111,7 +5082,7 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
5111 * 0 on success. 5082 * 0 on success.
5112 */ 5083 */
5113 5084
5114static int s2io_eeprom_test(nic_t * sp, uint64_t * data) 5085static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5115{ 5086{
5116 int fail = 0; 5087 int fail = 0;
5117 u64 ret_data, org_4F0, org_7F0; 5088 u64 ret_data, org_4F0, org_7F0;
@@ -5213,7 +5184,7 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5213 * 0 on success and -1 on failure. 5184 * 0 on success and -1 on failure.
5214 */ 5185 */
5215 5186
5216static int s2io_bist_test(nic_t * sp, uint64_t * data) 5187static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5217{ 5188{
5218 u8 bist = 0; 5189 u8 bist = 0;
5219 int cnt = 0, ret = -1; 5190 int cnt = 0, ret = -1;
@@ -5249,9 +5220,9 @@ static int s2io_bist_test(nic_t * sp, uint64_t * data)
5249 * 0 on success. 5220 * 0 on success.
5250 */ 5221 */
5251 5222
5252static int s2io_link_test(nic_t * sp, uint64_t * data) 5223static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5253{ 5224{
5254 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5225 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5255 u64 val64; 5226 u64 val64;
5256 5227
5257 val64 = readq(&bar0->adapter_status); 5228 val64 = readq(&bar0->adapter_status);
@@ -5276,9 +5247,9 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
5276 * 0 on success. 5247 * 0 on success.
5277 */ 5248 */
5278 5249
5279static int s2io_rldram_test(nic_t * sp, uint64_t * data) 5250static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5280{ 5251{
5281 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5252 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5282 u64 val64; 5253 u64 val64;
5283 int cnt, iteration = 0, test_fail = 0; 5254 int cnt, iteration = 0, test_fail = 0;
5284 5255
@@ -5380,7 +5351,7 @@ static void s2io_ethtool_test(struct net_device *dev,
5380 struct ethtool_test *ethtest, 5351 struct ethtool_test *ethtest,
5381 uint64_t * data) 5352 uint64_t * data)
5382{ 5353{
5383 nic_t *sp = dev->priv; 5354 struct s2io_nic *sp = dev->priv;
5384 int orig_state = netif_running(sp->dev); 5355 int orig_state = netif_running(sp->dev);
5385 5356
5386 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 5357 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
@@ -5436,8 +5407,8 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5436 u64 * tmp_stats) 5407 u64 * tmp_stats)
5437{ 5408{
5438 int i = 0; 5409 int i = 0;
5439 nic_t *sp = dev->priv; 5410 struct s2io_nic *sp = dev->priv;
5440 StatInfo_t *stat_info = sp->mac_control.stats_info; 5411 struct stat_block *stat_info = sp->mac_control.stats_info;
5441 5412
5442 s2io_updt_stats(sp); 5413 s2io_updt_stats(sp);
5443 tmp_stats[i++] = 5414 tmp_stats[i++] =
@@ -5664,14 +5635,14 @@ static int s2io_ethtool_get_regs_len(struct net_device *dev)
5664 5635
5665static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) 5636static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5666{ 5637{
5667 nic_t *sp = dev->priv; 5638 struct s2io_nic *sp = dev->priv;
5668 5639
5669 return (sp->rx_csum); 5640 return (sp->rx_csum);
5670} 5641}
5671 5642
5672static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data) 5643static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5673{ 5644{
5674 nic_t *sp = dev->priv; 5645 struct s2io_nic *sp = dev->priv;
5675 5646
5676 if (data) 5647 if (data)
5677 sp->rx_csum = 1; 5648 sp->rx_csum = 1;
@@ -5750,10 +5721,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
5750 .set_tx_csum = s2io_ethtool_op_set_tx_csum, 5721 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5751 .get_sg = ethtool_op_get_sg, 5722 .get_sg = ethtool_op_get_sg,
5752 .set_sg = ethtool_op_set_sg, 5723 .set_sg = ethtool_op_set_sg,
5753#ifdef NETIF_F_TSO
5754 .get_tso = s2io_ethtool_op_get_tso, 5724 .get_tso = s2io_ethtool_op_get_tso,
5755 .set_tso = s2io_ethtool_op_set_tso, 5725 .set_tso = s2io_ethtool_op_set_tso,
5756#endif
5757 .get_ufo = ethtool_op_get_ufo, 5726 .get_ufo = ethtool_op_get_ufo,
5758 .set_ufo = ethtool_op_set_ufo, 5727 .set_ufo = ethtool_op_set_ufo,
5759 .self_test_count = s2io_ethtool_self_test_count, 5728 .self_test_count = s2io_ethtool_self_test_count,
@@ -5794,7 +5763,7 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5794 5763
5795static int s2io_change_mtu(struct net_device *dev, int new_mtu) 5764static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5796{ 5765{
5797 nic_t *sp = dev->priv; 5766 struct s2io_nic *sp = dev->priv;
5798 5767
5799 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { 5768 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5800 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", 5769 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -5813,7 +5782,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5813 if (netif_queue_stopped(dev)) 5782 if (netif_queue_stopped(dev))
5814 netif_wake_queue(dev); 5783 netif_wake_queue(dev);
5815 } else { /* Device is down */ 5784 } else { /* Device is down */
5816 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5785 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5817 u64 val64 = new_mtu; 5786 u64 val64 = new_mtu;
5818 5787
5819 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 5788 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
@@ -5838,9 +5807,9 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5838static void s2io_tasklet(unsigned long dev_addr) 5807static void s2io_tasklet(unsigned long dev_addr)
5839{ 5808{
5840 struct net_device *dev = (struct net_device *) dev_addr; 5809 struct net_device *dev = (struct net_device *) dev_addr;
5841 nic_t *sp = dev->priv; 5810 struct s2io_nic *sp = dev->priv;
5842 int i, ret; 5811 int i, ret;
5843 mac_info_t *mac_control; 5812 struct mac_info *mac_control;
5844 struct config_param *config; 5813 struct config_param *config;
5845 5814
5846 mac_control = &sp->mac_control; 5815 mac_control = &sp->mac_control;
@@ -5873,9 +5842,9 @@ static void s2io_tasklet(unsigned long dev_addr)
5873 5842
5874static void s2io_set_link(struct work_struct *work) 5843static void s2io_set_link(struct work_struct *work)
5875{ 5844{
5876 nic_t *nic = container_of(work, nic_t, set_link_task); 5845 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5877 struct net_device *dev = nic->dev; 5846 struct net_device *dev = nic->dev;
5878 XENA_dev_config_t __iomem *bar0 = nic->bar0; 5847 struct XENA_dev_config __iomem *bar0 = nic->bar0;
5879 register u64 val64; 5848 register u64 val64;
5880 u16 subid; 5849 u16 subid;
5881 5850
@@ -5894,57 +5863,53 @@ static void s2io_set_link(struct work_struct *work)
5894 } 5863 }
5895 5864
5896 val64 = readq(&bar0->adapter_status); 5865 val64 = readq(&bar0->adapter_status);
5897 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) { 5866 if (LINK_IS_UP(val64)) {
5898 if (LINK_IS_UP(val64)) { 5867 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5899 val64 = readq(&bar0->adapter_control); 5868 if (verify_xena_quiescence(nic)) {
5900 val64 |= ADAPTER_CNTL_EN; 5869 val64 = readq(&bar0->adapter_control);
5901 writeq(val64, &bar0->adapter_control); 5870 val64 |= ADAPTER_CNTL_EN;
5902 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5903 subid)) {
5904 val64 = readq(&bar0->gpio_control);
5905 val64 |= GPIO_CTRL_GPIO_0;
5906 writeq(val64, &bar0->gpio_control);
5907 val64 = readq(&bar0->gpio_control);
5908 } else {
5909 val64 |= ADAPTER_LED_ON;
5910 writeq(val64, &bar0->adapter_control); 5871 writeq(val64, &bar0->adapter_control);
5911 } 5872 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5912 if (s2io_link_fault_indication(nic) == 5873 nic->device_type, subid)) {
5913 MAC_RMAC_ERR_TIMER) { 5874 val64 = readq(&bar0->gpio_control);
5914 val64 = readq(&bar0->adapter_status); 5875 val64 |= GPIO_CTRL_GPIO_0;
5915 if (!LINK_IS_UP(val64)) { 5876 writeq(val64, &bar0->gpio_control);
5916 DBG_PRINT(ERR_DBG, "%s:", dev->name); 5877 val64 = readq(&bar0->gpio_control);
5917 DBG_PRINT(ERR_DBG, " Link down"); 5878 } else {
5918 DBG_PRINT(ERR_DBG, "after "); 5879 val64 |= ADAPTER_LED_ON;
5919 DBG_PRINT(ERR_DBG, "enabling "); 5880 writeq(val64, &bar0->adapter_control);
5920 DBG_PRINT(ERR_DBG, "device \n");
5921 } 5881 }
5922 }
5923 if (nic->device_enabled_once == FALSE) {
5924 nic->device_enabled_once = TRUE; 5882 nic->device_enabled_once = TRUE;
5883 } else {
5884 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5885 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5886 netif_stop_queue(dev);
5925 } 5887 }
5888 }
5889 val64 = readq(&bar0->adapter_status);
5890 if (!LINK_IS_UP(val64)) {
5891 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5892 DBG_PRINT(ERR_DBG, " Link down after enabling ");
5893 DBG_PRINT(ERR_DBG, "device \n");
5894 } else
5926 s2io_link(nic, LINK_UP); 5895 s2io_link(nic, LINK_UP);
5927 } else { 5896 } else {
5928 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, 5897 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5929 subid)) { 5898 subid)) {
5930 val64 = readq(&bar0->gpio_control); 5899 val64 = readq(&bar0->gpio_control);
5931 val64 &= ~GPIO_CTRL_GPIO_0; 5900 val64 &= ~GPIO_CTRL_GPIO_0;
5932 writeq(val64, &bar0->gpio_control); 5901 writeq(val64, &bar0->gpio_control);
5933 val64 = readq(&bar0->gpio_control); 5902 val64 = readq(&bar0->gpio_control);
5934 }
5935 s2io_link(nic, LINK_DOWN);
5936 } 5903 }
5937 } else { /* NIC is not Quiescent. */ 5904 s2io_link(nic, LINK_DOWN);
5938 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5939 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5940 netif_stop_queue(dev);
5941 } 5905 }
5942 clear_bit(0, &(nic->link_state)); 5906 clear_bit(0, &(nic->link_state));
5943} 5907}
5944 5908
5945static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba, 5909static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5946 struct sk_buff **skb, u64 *temp0, u64 *temp1, 5910 struct buffAdd *ba,
5947 u64 *temp2, int size) 5911 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5912 u64 *temp2, int size)
5948{ 5913{
5949 struct net_device *dev = sp->dev; 5914 struct net_device *dev = sp->dev;
5950 struct sk_buff *frag_list; 5915 struct sk_buff *frag_list;
@@ -5958,7 +5923,7 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5958 * using same mapped address for the Rxd 5923 * using same mapped address for the Rxd
5959 * buffer pointer 5924 * buffer pointer
5960 */ 5925 */
5961 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0; 5926 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5962 } else { 5927 } else {
5963 *skb = dev_alloc_skb(size); 5928 *skb = dev_alloc_skb(size);
5964 if (!(*skb)) { 5929 if (!(*skb)) {
@@ -5970,7 +5935,7 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5970 * such it will be used for next rxd whose 5935 * such it will be used for next rxd whose
5971 * Host Control is NULL 5936 * Host Control is NULL
5972 */ 5937 */
5973 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 = 5938 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5974 pci_map_single( sp->pdev, (*skb)->data, 5939 pci_map_single( sp->pdev, (*skb)->data,
5975 size - NET_IP_ALIGN, 5940 size - NET_IP_ALIGN,
5976 PCI_DMA_FROMDEVICE); 5941 PCI_DMA_FROMDEVICE);
@@ -5979,36 +5944,36 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5979 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { 5944 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
5980 /* Two buffer Mode */ 5945 /* Two buffer Mode */
5981 if (*skb) { 5946 if (*skb) {
5982 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; 5947 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5983 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; 5948 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
5984 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; 5949 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5985 } else { 5950 } else {
5986 *skb = dev_alloc_skb(size); 5951 *skb = dev_alloc_skb(size);
5987 if (!(*skb)) { 5952 if (!(*skb)) {
5988 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n", 5953 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
5989 dev->name); 5954 dev->name);
5990 return -ENOMEM; 5955 return -ENOMEM;
5991 } 5956 }
5992 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = 5957 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5993 pci_map_single(sp->pdev, (*skb)->data, 5958 pci_map_single(sp->pdev, (*skb)->data,
5994 dev->mtu + 4, 5959 dev->mtu + 4,
5995 PCI_DMA_FROMDEVICE); 5960 PCI_DMA_FROMDEVICE);
5996 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = 5961 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5997 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 5962 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
5998 PCI_DMA_FROMDEVICE); 5963 PCI_DMA_FROMDEVICE);
5999 rxdp->Host_Control = (unsigned long) (*skb); 5964 rxdp->Host_Control = (unsigned long) (*skb);
6000 5965
6001 /* Buffer-1 will be dummy buffer not used */ 5966 /* Buffer-1 will be dummy buffer not used */
6002 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = 5967 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6003 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 5968 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6004 PCI_DMA_FROMDEVICE); 5969 PCI_DMA_FROMDEVICE);
6005 } 5970 }
6006 } else if ((rxdp->Host_Control == 0)) { 5971 } else if ((rxdp->Host_Control == 0)) {
6007 /* Three buffer mode */ 5972 /* Three buffer mode */
6008 if (*skb) { 5973 if (*skb) {
6009 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; 5974 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6010 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; 5975 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6011 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; 5976 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6012 } else { 5977 } else {
6013 *skb = dev_alloc_skb(size); 5978 *skb = dev_alloc_skb(size);
6014 if (!(*skb)) { 5979 if (!(*skb)) {
@@ -6016,11 +5981,11 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6016 dev->name); 5981 dev->name);
6017 return -ENOMEM; 5982 return -ENOMEM;
6018 } 5983 }
6019 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = 5984 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6020 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, 5985 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6021 PCI_DMA_FROMDEVICE); 5986 PCI_DMA_FROMDEVICE);
6022 /* Buffer-1 receives L3/L4 headers */ 5987 /* Buffer-1 receives L3/L4 headers */
6023 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = 5988 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6024 pci_map_single( sp->pdev, (*skb)->data, 5989 pci_map_single( sp->pdev, (*skb)->data,
6025 l3l4hdr_size + 4, 5990 l3l4hdr_size + 4,
6026 PCI_DMA_FROMDEVICE); 5991 PCI_DMA_FROMDEVICE);
@@ -6040,14 +6005,15 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6040 /* 6005 /*
6041 * Buffer-2 receives L4 data payload 6006 * Buffer-2 receives L4 data payload
6042 */ 6007 */
6043 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = 6008 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6044 pci_map_single( sp->pdev, frag_list->data, 6009 pci_map_single( sp->pdev, frag_list->data,
6045 dev->mtu, PCI_DMA_FROMDEVICE); 6010 dev->mtu, PCI_DMA_FROMDEVICE);
6046 } 6011 }
6047 } 6012 }
6048 return 0; 6013 return 0;
6049} 6014}
6050static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size) 6015static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6016 int size)
6051{ 6017{
6052 struct net_device *dev = sp->dev; 6018 struct net_device *dev = sp->dev;
6053 if (sp->rxd_mode == RXD_MODE_1) { 6019 if (sp->rxd_mode == RXD_MODE_1) {
@@ -6063,15 +6029,15 @@ static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6063 } 6029 }
6064} 6030}
6065 6031
6066static int rxd_owner_bit_reset(nic_t *sp) 6032static int rxd_owner_bit_reset(struct s2io_nic *sp)
6067{ 6033{
6068 int i, j, k, blk_cnt = 0, size; 6034 int i, j, k, blk_cnt = 0, size;
6069 mac_info_t * mac_control = &sp->mac_control; 6035 struct mac_info * mac_control = &sp->mac_control;
6070 struct config_param *config = &sp->config; 6036 struct config_param *config = &sp->config;
6071 struct net_device *dev = sp->dev; 6037 struct net_device *dev = sp->dev;
6072 RxD_t *rxdp = NULL; 6038 struct RxD_t *rxdp = NULL;
6073 struct sk_buff *skb = NULL; 6039 struct sk_buff *skb = NULL;
6074 buffAdd_t *ba = NULL; 6040 struct buffAdd *ba = NULL;
6075 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0; 6041 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6076 6042
6077 /* Calculate the size based on ring mode */ 6043 /* Calculate the size based on ring mode */
@@ -6110,7 +6076,7 @@ static int rxd_owner_bit_reset(nic_t *sp)
6110 6076
6111} 6077}
6112 6078
6113static int s2io_add_isr(nic_t * sp) 6079static int s2io_add_isr(struct s2io_nic * sp)
6114{ 6080{
6115 int ret = 0; 6081 int ret = 0;
6116 struct net_device *dev = sp->dev; 6082 struct net_device *dev = sp->dev;
@@ -6125,7 +6091,7 @@ static int s2io_add_isr(nic_t * sp)
6125 sp->intr_type = INTA; 6091 sp->intr_type = INTA;
6126 } 6092 }
6127 6093
6128 /* Store the values of the MSIX table in the nic_t structure */ 6094 /* Store the values of the MSIX table in the struct s2io_nic structure */
6129 store_xmsi_data(sp); 6095 store_xmsi_data(sp);
6130 6096
6131 /* After proper initialization of H/W, register ISR */ 6097 /* After proper initialization of H/W, register ISR */
@@ -6180,7 +6146,7 @@ static int s2io_add_isr(nic_t * sp)
6180 } 6146 }
6181 return 0; 6147 return 0;
6182} 6148}
6183static void s2io_rem_isr(nic_t * sp) 6149static void s2io_rem_isr(struct s2io_nic * sp)
6184{ 6150{
6185 int cnt = 0; 6151 int cnt = 0;
6186 struct net_device *dev = sp->dev; 6152 struct net_device *dev = sp->dev;
@@ -6222,10 +6188,10 @@ static void s2io_rem_isr(nic_t * sp)
6222 } while(cnt < 5); 6188 } while(cnt < 5);
6223} 6189}
6224 6190
6225static void s2io_card_down(nic_t * sp) 6191static void s2io_card_down(struct s2io_nic * sp)
6226{ 6192{
6227 int cnt = 0; 6193 int cnt = 0;
6228 XENA_dev_config_t __iomem *bar0 = sp->bar0; 6194 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6229 unsigned long flags; 6195 unsigned long flags;
6230 register u64 val64 = 0; 6196 register u64 val64 = 0;
6231 6197
@@ -6256,7 +6222,8 @@ static void s2io_card_down(nic_t * sp)
6256 rxd_owner_bit_reset(sp); 6222 rxd_owner_bit_reset(sp);
6257 6223
6258 val64 = readq(&bar0->adapter_status); 6224 val64 = readq(&bar0->adapter_status);
6259 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) { 6225 if (verify_xena_quiescence(sp)) {
6226 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6260 break; 6227 break;
6261 } 6228 }
6262 6229
@@ -6285,10 +6252,10 @@ static void s2io_card_down(nic_t * sp)
6285 clear_bit(0, &(sp->link_state)); 6252 clear_bit(0, &(sp->link_state));
6286} 6253}
6287 6254
6288static int s2io_card_up(nic_t * sp) 6255static int s2io_card_up(struct s2io_nic * sp)
6289{ 6256{
6290 int i, ret = 0; 6257 int i, ret = 0;
6291 mac_info_t *mac_control; 6258 struct mac_info *mac_control;
6292 struct config_param *config; 6259 struct config_param *config;
6293 struct net_device *dev = (struct net_device *) sp->dev; 6260 struct net_device *dev = (struct net_device *) sp->dev;
6294 u16 interruptible; 6261 u16 interruptible;
@@ -6319,6 +6286,13 @@ static int s2io_card_up(nic_t * sp)
6319 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 6286 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6320 atomic_read(&sp->rx_bufs_left[i])); 6287 atomic_read(&sp->rx_bufs_left[i]));
6321 } 6288 }
6289 /* Maintain the state prior to the open */
6290 if (sp->promisc_flg)
6291 sp->promisc_flg = 0;
6292 if (sp->m_cast_flg) {
6293 sp->m_cast_flg = 0;
6294 sp->all_multi_pos= 0;
6295 }
6322 6296
6323 /* Setting its receive mode */ 6297 /* Setting its receive mode */
6324 s2io_set_multicast(dev); 6298 s2io_set_multicast(dev);
@@ -6380,7 +6354,7 @@ static int s2io_card_up(nic_t * sp)
6380 6354
6381static void s2io_restart_nic(struct work_struct *work) 6355static void s2io_restart_nic(struct work_struct *work)
6382{ 6356{
6383 nic_t *sp = container_of(work, nic_t, rst_timer_task); 6357 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6384 struct net_device *dev = sp->dev; 6358 struct net_device *dev = sp->dev;
6385 6359
6386 s2io_card_down(sp); 6360 s2io_card_down(sp);
@@ -6409,7 +6383,7 @@ static void s2io_restart_nic(struct work_struct *work)
6409 6383
6410static void s2io_tx_watchdog(struct net_device *dev) 6384static void s2io_tx_watchdog(struct net_device *dev)
6411{ 6385{
6412 nic_t *sp = dev->priv; 6386 struct s2io_nic *sp = dev->priv;
6413 6387
6414 if (netif_carrier_ok(dev)) { 6388 if (netif_carrier_ok(dev)) {
6415 schedule_work(&sp->rst_timer_task); 6389 schedule_work(&sp->rst_timer_task);
@@ -6434,16 +6408,16 @@ static void s2io_tx_watchdog(struct net_device *dev)
6434 * Return value: 6408 * Return value:
6435 * SUCCESS on success and -1 on failure. 6409 * SUCCESS on success and -1 on failure.
6436 */ 6410 */
6437static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) 6411static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6438{ 6412{
6439 nic_t *sp = ring_data->nic; 6413 struct s2io_nic *sp = ring_data->nic;
6440 struct net_device *dev = (struct net_device *) sp->dev; 6414 struct net_device *dev = (struct net_device *) sp->dev;
6441 struct sk_buff *skb = (struct sk_buff *) 6415 struct sk_buff *skb = (struct sk_buff *)
6442 ((unsigned long) rxdp->Host_Control); 6416 ((unsigned long) rxdp->Host_Control);
6443 int ring_no = ring_data->ring_no; 6417 int ring_no = ring_data->ring_no;
6444 u16 l3_csum, l4_csum; 6418 u16 l3_csum, l4_csum;
6445 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 6419 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6446 lro_t *lro; 6420 struct lro *lro;
6447 6421
6448 skb->dev = dev; 6422 skb->dev = dev;
6449 6423
@@ -6488,7 +6462,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6488 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); 6462 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6489 unsigned char *buff = skb_push(skb, buf0_len); 6463 unsigned char *buff = skb_push(skb, buf0_len);
6490 6464
6491 buffAdd_t *ba = &ring_data->ba[get_block][get_off]; 6465 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6492 sp->stats.rx_bytes += buf0_len + buf2_len; 6466 sp->stats.rx_bytes += buf0_len + buf2_len;
6493 memcpy(buff, ba->ba_0, buf0_len); 6467 memcpy(buff, ba->ba_0, buf0_len);
6494 6468
@@ -6498,7 +6472,6 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6498 skb_put(skb, buf1_len); 6472 skb_put(skb, buf1_len);
6499 skb->len += buf2_len; 6473 skb->len += buf2_len;
6500 skb->data_len += buf2_len; 6474 skb->data_len += buf2_len;
6501 skb->truesize += buf2_len;
6502 skb_put(skb_shinfo(skb)->frag_list, buf2_len); 6475 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6503 sp->stats.rx_bytes += buf1_len; 6476 sp->stats.rx_bytes += buf1_len;
6504 6477
@@ -6582,23 +6555,20 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6582 6555
6583 if (!sp->lro) { 6556 if (!sp->lro) {
6584 skb->protocol = eth_type_trans(skb, dev); 6557 skb->protocol = eth_type_trans(skb, dev);
6585#ifdef CONFIG_S2IO_NAPI
6586 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6587 /* Queueing the vlan frame to the upper layer */
6588 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6589 RXD_GET_VLAN_TAG(rxdp->Control_2));
6590 } else {
6591 netif_receive_skb(skb);
6592 }
6593#else
6594 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { 6558 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6595 /* Queueing the vlan frame to the upper layer */ 6559 /* Queueing the vlan frame to the upper layer */
6596 vlan_hwaccel_rx(skb, sp->vlgrp, 6560 if (napi)
6597 RXD_GET_VLAN_TAG(rxdp->Control_2)); 6561 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6562 RXD_GET_VLAN_TAG(rxdp->Control_2));
6563 else
6564 vlan_hwaccel_rx(skb, sp->vlgrp,
6565 RXD_GET_VLAN_TAG(rxdp->Control_2));
6598 } else { 6566 } else {
6599 netif_rx(skb); 6567 if (napi)
6568 netif_receive_skb(skb);
6569 else
6570 netif_rx(skb);
6600 } 6571 }
6601#endif
6602 } else { 6572 } else {
6603send_up: 6573send_up:
6604 queue_rx_frame(skb); 6574 queue_rx_frame(skb);
@@ -6622,7 +6592,7 @@ aggregate:
6622 * void. 6592 * void.
6623 */ 6593 */
6624 6594
6625static void s2io_link(nic_t * sp, int link) 6595static void s2io_link(struct s2io_nic * sp, int link)
6626{ 6596{
6627 struct net_device *dev = (struct net_device *) sp->dev; 6597 struct net_device *dev = (struct net_device *) sp->dev;
6628 6598
@@ -6666,7 +6636,7 @@ static int get_xena_rev_id(struct pci_dev *pdev)
6666 * void 6636 * void
6667 */ 6637 */
6668 6638
6669static void s2io_init_pci(nic_t * sp) 6639static void s2io_init_pci(struct s2io_nic * sp)
6670{ 6640{
6671 u16 pci_cmd = 0, pcix_cmd = 0; 6641 u16 pci_cmd = 0, pcix_cmd = 0;
6672 6642
@@ -6699,13 +6669,9 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6699 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); 6669 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6700 rx_ring_num = 8; 6670 rx_ring_num = 8;
6701 } 6671 }
6702#ifdef CONFIG_S2IO_NAPI 6672 if (*dev_intr_type != INTA)
6703 if (*dev_intr_type != INTA) { 6673 napi = 0;
6704 DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when " 6674
6705 "MSI/MSI-X is enabled. Defaulting to INTA\n");
6706 *dev_intr_type = INTA;
6707 }
6708#endif
6709#ifndef CONFIG_PCI_MSI 6675#ifndef CONFIG_PCI_MSI
6710 if (*dev_intr_type != INTA) { 6676 if (*dev_intr_type != INTA) {
6711 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" 6677 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
@@ -6726,6 +6692,8 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6726 "Defaulting to INTA\n"); 6692 "Defaulting to INTA\n");
6727 *dev_intr_type = INTA; 6693 *dev_intr_type = INTA;
6728 } 6694 }
6695 if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) )
6696 napi = 0;
6729 if (rx_ring_mode > 3) { 6697 if (rx_ring_mode > 3) {
6730 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); 6698 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6731 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); 6699 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
@@ -6751,15 +6719,15 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6751static int __devinit 6719static int __devinit
6752s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) 6720s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6753{ 6721{
6754 nic_t *sp; 6722 struct s2io_nic *sp;
6755 struct net_device *dev; 6723 struct net_device *dev;
6756 int i, j, ret; 6724 int i, j, ret;
6757 int dma_flag = FALSE; 6725 int dma_flag = FALSE;
6758 u32 mac_up, mac_down; 6726 u32 mac_up, mac_down;
6759 u64 val64 = 0, tmp64 = 0; 6727 u64 val64 = 0, tmp64 = 0;
6760 XENA_dev_config_t __iomem *bar0 = NULL; 6728 struct XENA_dev_config __iomem *bar0 = NULL;
6761 u16 subid; 6729 u16 subid;
6762 mac_info_t *mac_control; 6730 struct mac_info *mac_control;
6763 struct config_param *config; 6731 struct config_param *config;
6764 int mode; 6732 int mode;
6765 u8 dev_intr_type = intr_type; 6733 u8 dev_intr_type = intr_type;
@@ -6814,7 +6782,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6814 } 6782 }
6815 } 6783 }
6816 6784
6817 dev = alloc_etherdev(sizeof(nic_t)); 6785 dev = alloc_etherdev(sizeof(struct s2io_nic));
6818 if (dev == NULL) { 6786 if (dev == NULL) {
6819 DBG_PRINT(ERR_DBG, "Device allocation failed\n"); 6787 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6820 pci_disable_device(pdev); 6788 pci_disable_device(pdev);
@@ -6829,7 +6797,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6829 6797
6830 /* Private member variable initialized to s2io NIC structure */ 6798 /* Private member variable initialized to s2io NIC structure */
6831 sp = dev->priv; 6799 sp = dev->priv;
6832 memset(sp, 0, sizeof(nic_t)); 6800 memset(sp, 0, sizeof(struct s2io_nic));
6833 sp->dev = dev; 6801 sp->dev = dev;
6834 sp->pdev = pdev; 6802 sp->pdev = pdev;
6835 sp->high_dma_flag = dma_flag; 6803 sp->high_dma_flag = dma_flag;
@@ -6925,7 +6893,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6925 sp->bar0 = ioremap(pci_resource_start(pdev, 0), 6893 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6926 pci_resource_len(pdev, 0)); 6894 pci_resource_len(pdev, 0));
6927 if (!sp->bar0) { 6895 if (!sp->bar0) {
6928 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n", 6896 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
6929 dev->name); 6897 dev->name);
6930 ret = -ENOMEM; 6898 ret = -ENOMEM;
6931 goto bar0_remap_failed; 6899 goto bar0_remap_failed;
@@ -6934,7 +6902,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6934 sp->bar1 = ioremap(pci_resource_start(pdev, 2), 6902 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6935 pci_resource_len(pdev, 2)); 6903 pci_resource_len(pdev, 2));
6936 if (!sp->bar1) { 6904 if (!sp->bar1) {
6937 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n", 6905 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
6938 dev->name); 6906 dev->name);
6939 ret = -ENOMEM; 6907 ret = -ENOMEM;
6940 goto bar1_remap_failed; 6908 goto bar1_remap_failed;
@@ -6945,7 +6913,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6945 6913
6946 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 6914 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6947 for (j = 0; j < MAX_TX_FIFOS; j++) { 6915 for (j = 0; j < MAX_TX_FIFOS; j++) {
6948 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *) 6916 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
6949 (sp->bar1 + (j * 0x00020000)); 6917 (sp->bar1 + (j * 0x00020000));
6950 } 6918 }
6951 6919
@@ -6966,10 +6934,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6966 * will use eth_mac_addr() for dev->set_mac_address 6934 * will use eth_mac_addr() for dev->set_mac_address
6967 * mac address will be set every time dev->open() is called 6935 * mac address will be set every time dev->open() is called
6968 */ 6936 */
6969#if defined(CONFIG_S2IO_NAPI)
6970 dev->poll = s2io_poll; 6937 dev->poll = s2io_poll;
6971 dev->weight = 32; 6938 dev->weight = 32;
6972#endif
6973 6939
6974#ifdef CONFIG_NET_POLL_CONTROLLER 6940#ifdef CONFIG_NET_POLL_CONTROLLER
6975 dev->poll_controller = s2io_netpoll; 6941 dev->poll_controller = s2io_netpoll;
@@ -6978,13 +6944,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6978 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 6944 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6979 if (sp->high_dma_flag == TRUE) 6945 if (sp->high_dma_flag == TRUE)
6980 dev->features |= NETIF_F_HIGHDMA; 6946 dev->features |= NETIF_F_HIGHDMA;
6981#ifdef NETIF_F_TSO
6982 dev->features |= NETIF_F_TSO; 6947 dev->features |= NETIF_F_TSO;
6983#endif
6984#ifdef NETIF_F_TSO6
6985 dev->features |= NETIF_F_TSO6; 6948 dev->features |= NETIF_F_TSO6;
6986#endif 6949 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
6987 if (sp->device_type & XFRAME_II_DEVICE) {
6988 dev->features |= NETIF_F_UFO; 6950 dev->features |= NETIF_F_UFO;
6989 dev->features |= NETIF_F_HW_CSUM; 6951 dev->features |= NETIF_F_HW_CSUM;
6990 } 6952 }
@@ -7065,9 +7027,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7065 7027
7066 /* Initialize spinlocks */ 7028 /* Initialize spinlocks */
7067 spin_lock_init(&sp->tx_lock); 7029 spin_lock_init(&sp->tx_lock);
7068#ifndef CONFIG_S2IO_NAPI 7030
7069 spin_lock_init(&sp->put_lock); 7031 if (!napi)
7070#endif 7032 spin_lock_init(&sp->put_lock);
7071 spin_lock_init(&sp->rx_lock); 7033 spin_lock_init(&sp->rx_lock);
7072 7034
7073 /* 7035 /*
@@ -7098,13 +7060,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7098 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 7060 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7099 s2io_driver_version); 7061 s2io_driver_version);
7100 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7062 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7101 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7063 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7102 sp->def_mac_addr[0].mac_addr[0], 7064 sp->def_mac_addr[0].mac_addr[0],
7103 sp->def_mac_addr[0].mac_addr[1], 7065 sp->def_mac_addr[0].mac_addr[1],
7104 sp->def_mac_addr[0].mac_addr[2], 7066 sp->def_mac_addr[0].mac_addr[2],
7105 sp->def_mac_addr[0].mac_addr[3], 7067 sp->def_mac_addr[0].mac_addr[3],
7106 sp->def_mac_addr[0].mac_addr[4], 7068 sp->def_mac_addr[0].mac_addr[4],
7107 sp->def_mac_addr[0].mac_addr[5]); 7069 sp->def_mac_addr[0].mac_addr[5]);
7070 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7108 if (sp->device_type & XFRAME_II_DEVICE) { 7071 if (sp->device_type & XFRAME_II_DEVICE) {
7109 mode = s2io_print_pci_mode(sp); 7072 mode = s2io_print_pci_mode(sp);
7110 if (mode < 0) { 7073 if (mode < 0) {
@@ -7128,9 +7091,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7128 dev->name); 7091 dev->name);
7129 break; 7092 break;
7130 } 7093 }
7131#ifdef CONFIG_S2IO_NAPI 7094
7132 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 7095 if (napi)
7133#endif 7096 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7134 switch(sp->intr_type) { 7097 switch(sp->intr_type) {
7135 case INTA: 7098 case INTA:
7136 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 7099 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
@@ -7145,7 +7108,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7145 if (sp->lro) 7108 if (sp->lro)
7146 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", 7109 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7147 dev->name); 7110 dev->name);
7148 7111 if (ufo)
7112 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7113 " enabled\n", dev->name);
7149 /* Initialize device name */ 7114 /* Initialize device name */
7150 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 7115 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7151 7116
@@ -7202,7 +7167,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7202{ 7167{
7203 struct net_device *dev = 7168 struct net_device *dev =
7204 (struct net_device *) pci_get_drvdata(pdev); 7169 (struct net_device *) pci_get_drvdata(pdev);
7205 nic_t *sp; 7170 struct s2io_nic *sp;
7206 7171
7207 if (dev == NULL) { 7172 if (dev == NULL) {
7208 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n"); 7173 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
@@ -7215,7 +7180,6 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7215 free_shared_mem(sp); 7180 free_shared_mem(sp);
7216 iounmap(sp->bar0); 7181 iounmap(sp->bar0);
7217 iounmap(sp->bar1); 7182 iounmap(sp->bar1);
7218 pci_disable_device(pdev);
7219 if (sp->intr_type != MSI_X) 7183 if (sp->intr_type != MSI_X)
7220 pci_release_regions(pdev); 7184 pci_release_regions(pdev);
7221 else { 7185 else {
@@ -7226,6 +7190,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7226 } 7190 }
7227 pci_set_drvdata(pdev, NULL); 7191 pci_set_drvdata(pdev, NULL);
7228 free_netdev(dev); 7192 free_netdev(dev);
7193 pci_disable_device(pdev);
7229} 7194}
7230 7195
7231/** 7196/**
@@ -7244,7 +7209,7 @@ int __init s2io_starter(void)
7244 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 7209 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7245 */ 7210 */
7246 7211
7247static void s2io_closer(void) 7212static __exit void s2io_closer(void)
7248{ 7213{
7249 pci_unregister_driver(&s2io_driver); 7214 pci_unregister_driver(&s2io_driver);
7250 DBG_PRINT(INIT_DBG, "cleanup done\n"); 7215 DBG_PRINT(INIT_DBG, "cleanup done\n");
@@ -7254,7 +7219,7 @@ module_init(s2io_starter);
7254module_exit(s2io_closer); 7219module_exit(s2io_closer);
7255 7220
7256static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, 7221static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7257 struct tcphdr **tcp, RxD_t *rxdp) 7222 struct tcphdr **tcp, struct RxD_t *rxdp)
7258{ 7223{
7259 int ip_off; 7224 int ip_off;
7260 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; 7225 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
@@ -7288,7 +7253,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7288 return 0; 7253 return 0;
7289} 7254}
7290 7255
7291static int check_for_socket_match(lro_t *lro, struct iphdr *ip, 7256static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7292 struct tcphdr *tcp) 7257 struct tcphdr *tcp)
7293{ 7258{
7294 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7259 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7303,7 +7268,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7303 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2)); 7268 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7304} 7269}
7305 7270
7306static void initiate_new_session(lro_t *lro, u8 *l2h, 7271static void initiate_new_session(struct lro *lro, u8 *l2h,
7307 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len) 7272 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7308{ 7273{
7309 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7274 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7329,12 +7294,12 @@ static void initiate_new_session(lro_t *lro, u8 *l2h,
7329 lro->in_use = 1; 7294 lro->in_use = 1;
7330} 7295}
7331 7296
7332static void update_L3L4_header(nic_t *sp, lro_t *lro) 7297static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7333{ 7298{
7334 struct iphdr *ip = lro->iph; 7299 struct iphdr *ip = lro->iph;
7335 struct tcphdr *tcp = lro->tcph; 7300 struct tcphdr *tcp = lro->tcph;
7336 u16 nchk; 7301 u16 nchk;
7337 StatInfo_t *statinfo = sp->mac_control.stats_info; 7302 struct stat_block *statinfo = sp->mac_control.stats_info;
7338 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7303 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7339 7304
7340 /* Update L3 header */ 7305 /* Update L3 header */
@@ -7360,7 +7325,7 @@ static void update_L3L4_header(nic_t *sp, lro_t *lro)
7360 statinfo->sw_stat.num_aggregations++; 7325 statinfo->sw_stat.num_aggregations++;
7361} 7326}
7362 7327
7363static void aggregate_new_rx(lro_t *lro, struct iphdr *ip, 7328static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7364 struct tcphdr *tcp, u32 l4_pyld) 7329 struct tcphdr *tcp, u32 l4_pyld)
7365{ 7330{
7366 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7331 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7382,7 +7347,7 @@ static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7382 } 7347 }
7383} 7348}
7384 7349
7385static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, 7350static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7386 struct tcphdr *tcp, u32 tcp_pyld_len) 7351 struct tcphdr *tcp, u32 tcp_pyld_len)
7387{ 7352{
7388 u8 *ptr; 7353 u8 *ptr;
@@ -7440,8 +7405,8 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7440} 7405}
7441 7406
7442static int 7407static int
7443s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, 7408s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7444 RxD_t *rxdp, nic_t *sp) 7409 struct RxD_t *rxdp, struct s2io_nic *sp)
7445{ 7410{
7446 struct iphdr *ip; 7411 struct iphdr *ip;
7447 struct tcphdr *tcph; 7412 struct tcphdr *tcph;
@@ -7458,7 +7423,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7458 tcph = (struct tcphdr *)*tcp; 7423 tcph = (struct tcphdr *)*tcp;
7459 *tcp_len = get_l4_pyld_length(ip, tcph); 7424 *tcp_len = get_l4_pyld_length(ip, tcph);
7460 for (i=0; i<MAX_LRO_SESSIONS; i++) { 7425 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7461 lro_t *l_lro = &sp->lro0_n[i]; 7426 struct lro *l_lro = &sp->lro0_n[i];
7462 if (l_lro->in_use) { 7427 if (l_lro->in_use) {
7463 if (check_for_socket_match(l_lro, ip, tcph)) 7428 if (check_for_socket_match(l_lro, ip, tcph))
7464 continue; 7429 continue;
@@ -7496,7 +7461,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7496 } 7461 }
7497 7462
7498 for (i=0; i<MAX_LRO_SESSIONS; i++) { 7463 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7499 lro_t *l_lro = &sp->lro0_n[i]; 7464 struct lro *l_lro = &sp->lro0_n[i];
7500 if (!(l_lro->in_use)) { 7465 if (!(l_lro->in_use)) {
7501 *lro = l_lro; 7466 *lro = l_lro;
7502 ret = 3; /* Begin anew */ 7467 ret = 3; /* Begin anew */
@@ -7535,9 +7500,9 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7535 return ret; 7500 return ret;
7536} 7501}
7537 7502
7538static void clear_lro_session(lro_t *lro) 7503static void clear_lro_session(struct lro *lro)
7539{ 7504{
7540 static u16 lro_struct_size = sizeof(lro_t); 7505 static u16 lro_struct_size = sizeof(struct lro);
7541 7506
7542 memset(lro, 0, lro_struct_size); 7507 memset(lro, 0, lro_struct_size);
7543} 7508}
@@ -7547,14 +7512,14 @@ static void queue_rx_frame(struct sk_buff *skb)
7547 struct net_device *dev = skb->dev; 7512 struct net_device *dev = skb->dev;
7548 7513
7549 skb->protocol = eth_type_trans(skb, dev); 7514 skb->protocol = eth_type_trans(skb, dev);
7550#ifdef CONFIG_S2IO_NAPI 7515 if (napi)
7551 netif_receive_skb(skb); 7516 netif_receive_skb(skb);
7552#else 7517 else
7553 netif_rx(skb); 7518 netif_rx(skb);
7554#endif
7555} 7519}
7556 7520
7557static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7521static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7522 struct sk_buff *skb,
7558 u32 tcp_len) 7523 u32 tcp_len)
7559{ 7524{
7560 struct sk_buff *first = lro->parent; 7525 struct sk_buff *first = lro->parent;
@@ -7566,6 +7531,7 @@ static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7566 lro->last_frag->next = skb; 7531 lro->last_frag->next = skb;
7567 else 7532 else
7568 skb_shinfo(first)->frag_list = skb; 7533 skb_shinfo(first)->frag_list = skb;
7534 first->truesize += skb->truesize;
7569 lro->last_frag = skb; 7535 lro->last_frag = skb;
7570 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7536 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7571 return; 7537 return;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 3b0bafd273c8..a5e1a513deb5 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -30,6 +30,8 @@
30#undef SUCCESS 30#undef SUCCESS
31#define SUCCESS 0 31#define SUCCESS 0
32#define FAILURE -1 32#define FAILURE -1
33#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
34#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
33 35
34#define CHECKBIT(value, nbit) (value & (1 << nbit)) 36#define CHECKBIT(value, nbit) (value & (1 << nbit))
35 37
@@ -37,7 +39,7 @@
37#define MAX_FLICKER_TIME 60000 /* 60 Secs */ 39#define MAX_FLICKER_TIME 60000 /* 60 Secs */
38 40
39/* Maximum outstanding splits to be configured into xena. */ 41/* Maximum outstanding splits to be configured into xena. */
40typedef enum xena_max_outstanding_splits { 42enum {
41 XENA_ONE_SPLIT_TRANSACTION = 0, 43 XENA_ONE_SPLIT_TRANSACTION = 0,
42 XENA_TWO_SPLIT_TRANSACTION = 1, 44 XENA_TWO_SPLIT_TRANSACTION = 1,
43 XENA_THREE_SPLIT_TRANSACTION = 2, 45 XENA_THREE_SPLIT_TRANSACTION = 2,
@@ -46,7 +48,7 @@ typedef enum xena_max_outstanding_splits {
46 XENA_TWELVE_SPLIT_TRANSACTION = 5, 48 XENA_TWELVE_SPLIT_TRANSACTION = 5,
47 XENA_SIXTEEN_SPLIT_TRANSACTION = 6, 49 XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
48 XENA_THIRTYTWO_SPLIT_TRANSACTION = 7 50 XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
49} xena_max_outstanding_splits; 51};
50#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) 52#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
51 53
52/* OS concerned variables and constants */ 54/* OS concerned variables and constants */
@@ -77,7 +79,7 @@ static int debug_level = ERR_DBG;
77#define S2IO_JUMBO_SIZE 9600 79#define S2IO_JUMBO_SIZE 9600
78 80
79/* Driver statistics maintained by driver */ 81/* Driver statistics maintained by driver */
80typedef struct { 82struct swStat {
81 unsigned long long single_ecc_errs; 83 unsigned long long single_ecc_errs;
82 unsigned long long double_ecc_errs; 84 unsigned long long double_ecc_errs;
83 unsigned long long parity_err_cnt; 85 unsigned long long parity_err_cnt;
@@ -92,10 +94,10 @@ typedef struct {
92 unsigned long long flush_max_pkts; 94 unsigned long long flush_max_pkts;
93 unsigned long long sum_avg_pkts_aggregated; 95 unsigned long long sum_avg_pkts_aggregated;
94 unsigned long long num_aggregations; 96 unsigned long long num_aggregations;
95} swStat_t; 97};
96 98
97/* Xpak releated alarm and warnings */ 99/* Xpak releated alarm and warnings */
98typedef struct { 100struct xpakStat {
99 u64 alarm_transceiver_temp_high; 101 u64 alarm_transceiver_temp_high;
100 u64 alarm_transceiver_temp_low; 102 u64 alarm_transceiver_temp_low;
101 u64 alarm_laser_bias_current_high; 103 u64 alarm_laser_bias_current_high;
@@ -110,11 +112,11 @@ typedef struct {
110 u64 warn_laser_output_power_low; 112 u64 warn_laser_output_power_low;
111 u64 xpak_regs_stat; 113 u64 xpak_regs_stat;
112 u32 xpak_timer_count; 114 u32 xpak_timer_count;
113} xpakStat_t; 115};
114 116
115 117
116/* The statistics block of Xena */ 118/* The statistics block of Xena */
117typedef struct stat_block { 119struct stat_block {
118/* Tx MAC statistics counters. */ 120/* Tx MAC statistics counters. */
119 __le32 tmac_data_octets; 121 __le32 tmac_data_octets;
120 __le32 tmac_frms; 122 __le32 tmac_frms;
@@ -290,9 +292,9 @@ typedef struct stat_block {
290 __le32 reserved_14; 292 __le32 reserved_14;
291 __le32 link_fault_cnt; 293 __le32 link_fault_cnt;
292 u8 buffer[20]; 294 u8 buffer[20];
293 swStat_t sw_stat; 295 struct swStat sw_stat;
294 xpakStat_t xpak_stat; 296 struct xpakStat xpak_stat;
295} StatInfo_t; 297};
296 298
297/* 299/*
298 * Structures representing different init time configuration 300 * Structures representing different init time configuration
@@ -315,7 +317,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
315}; 317};
316 318
317/* Maintains Per FIFO related information. */ 319/* Maintains Per FIFO related information. */
318typedef struct tx_fifo_config { 320struct tx_fifo_config {
319#define MAX_AVAILABLE_TXDS 8192 321#define MAX_AVAILABLE_TXDS 8192
320 u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */ 322 u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */
321/* Priority definition */ 323/* Priority definition */
@@ -332,11 +334,11 @@ typedef struct tx_fifo_config {
332 u8 f_no_snoop; 334 u8 f_no_snoop;
333#define NO_SNOOP_TXD 0x01 335#define NO_SNOOP_TXD 0x01
334#define NO_SNOOP_TXD_BUFFER 0x02 336#define NO_SNOOP_TXD_BUFFER 0x02
335} tx_fifo_config_t; 337};
336 338
337 339
338/* Maintains per Ring related information */ 340/* Maintains per Ring related information */
339typedef struct rx_ring_config { 341struct rx_ring_config {
340 u32 num_rxd; /*No of RxDs per Rx Ring */ 342 u32 num_rxd; /*No of RxDs per Rx Ring */
341#define RX_RING_PRI_0 0 /* highest */ 343#define RX_RING_PRI_0 0 /* highest */
342#define RX_RING_PRI_1 1 344#define RX_RING_PRI_1 1
@@ -357,7 +359,7 @@ typedef struct rx_ring_config {
357 u8 f_no_snoop; 359 u8 f_no_snoop;
358#define NO_SNOOP_RXD 0x01 360#define NO_SNOOP_RXD 0x01
359#define NO_SNOOP_RXD_BUFFER 0x02 361#define NO_SNOOP_RXD_BUFFER 0x02
360} rx_ring_config_t; 362};
361 363
362/* This structure provides contains values of the tunable parameters 364/* This structure provides contains values of the tunable parameters
363 * of the H/W 365 * of the H/W
@@ -367,7 +369,7 @@ struct config_param {
367 u32 tx_fifo_num; /*Number of Tx FIFOs */ 369 u32 tx_fifo_num; /*Number of Tx FIFOs */
368 370
369 u8 fifo_mapping[MAX_TX_FIFOS]; 371 u8 fifo_mapping[MAX_TX_FIFOS];
370 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ 372 struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
371 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ 373 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
372 u64 tx_intr_type; 374 u64 tx_intr_type;
373 /* Specifies if Tx Intr is UTILZ or PER_LIST type. */ 375 /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
@@ -376,7 +378,7 @@ struct config_param {
376 u32 rx_ring_num; /*Number of receive rings */ 378 u32 rx_ring_num; /*Number of receive rings */
377#define MAX_RX_BLOCKS_PER_RING 150 379#define MAX_RX_BLOCKS_PER_RING 150
378 380
379 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ 381 struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
380 u8 bimodal; /*Flag for setting bimodal interrupts*/ 382 u8 bimodal; /*Flag for setting bimodal interrupts*/
381 383
382#define HEADER_ETHERNET_II_802_3_SIZE 14 384#define HEADER_ETHERNET_II_802_3_SIZE 14
@@ -395,14 +397,14 @@ struct config_param {
395}; 397};
396 398
397/* Structure representing MAC Addrs */ 399/* Structure representing MAC Addrs */
398typedef struct mac_addr { 400struct mac_addr {
399 u8 mac_addr[ETH_ALEN]; 401 u8 mac_addr[ETH_ALEN];
400} macaddr_t; 402};
401 403
402/* Structure that represent every FIFO element in the BAR1 404/* Structure that represent every FIFO element in the BAR1
403 * Address location. 405 * Address location.
404 */ 406 */
405typedef struct _TxFIFO_element { 407struct TxFIFO_element {
406 u64 TxDL_Pointer; 408 u64 TxDL_Pointer;
407 409
408 u64 List_Control; 410 u64 List_Control;
@@ -413,10 +415,10 @@ typedef struct _TxFIFO_element {
413#define TX_FIFO_SPECIAL_FUNC BIT(23) 415#define TX_FIFO_SPECIAL_FUNC BIT(23)
414#define TX_FIFO_DS_NO_SNOOP BIT(31) 416#define TX_FIFO_DS_NO_SNOOP BIT(31)
415#define TX_FIFO_BUFF_NO_SNOOP BIT(30) 417#define TX_FIFO_BUFF_NO_SNOOP BIT(30)
416} TxFIFO_element_t; 418};
417 419
418/* Tx descriptor structure */ 420/* Tx descriptor structure */
419typedef struct _TxD { 421struct TxD {
420 u64 Control_1; 422 u64 Control_1;
421/* bit mask */ 423/* bit mask */
422#define TXD_LIST_OWN_XENA BIT(7) 424#define TXD_LIST_OWN_XENA BIT(7)
@@ -447,16 +449,16 @@ typedef struct _TxD {
447 449
448 u64 Buffer_Pointer; 450 u64 Buffer_Pointer;
449 u64 Host_Control; /* reserved for host */ 451 u64 Host_Control; /* reserved for host */
450} TxD_t; 452};
451 453
452/* Structure to hold the phy and virt addr of every TxDL. */ 454/* Structure to hold the phy and virt addr of every TxDL. */
453typedef struct list_info_hold { 455struct list_info_hold {
454 dma_addr_t list_phy_addr; 456 dma_addr_t list_phy_addr;
455 void *list_virt_addr; 457 void *list_virt_addr;
456} list_info_hold_t; 458};
457 459
458/* Rx descriptor structure for 1 buffer mode */ 460/* Rx descriptor structure for 1 buffer mode */
459typedef struct _RxD_t { 461struct RxD_t {
460 u64 Host_Control; /* reserved for host */ 462 u64 Host_Control; /* reserved for host */
461 u64 Control_1; 463 u64 Control_1;
462#define RXD_OWN_XENA BIT(7) 464#define RXD_OWN_XENA BIT(7)
@@ -481,21 +483,21 @@ typedef struct _RxD_t {
481#define SET_NUM_TAG(val) vBIT(val,16,32) 483#define SET_NUM_TAG(val) vBIT(val,16,32)
482 484
483 485
484} RxD_t; 486};
485/* Rx descriptor structure for 1 buffer mode */ 487/* Rx descriptor structure for 1 buffer mode */
486typedef struct _RxD1_t { 488struct RxD1 {
487 struct _RxD_t h; 489 struct RxD_t h;
488 490
489#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14) 491#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
490#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14) 492#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
491#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \ 493#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
492 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48) 494 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
493 u64 Buffer0_ptr; 495 u64 Buffer0_ptr;
494} RxD1_t; 496};
495/* Rx descriptor structure for 3 or 2 buffer mode */ 497/* Rx descriptor structure for 3 or 2 buffer mode */
496 498
497typedef struct _RxD3_t { 499struct RxD3 {
498 struct _RxD_t h; 500 struct RxD_t h;
499 501
500#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14) 502#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
501#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16) 503#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
@@ -515,15 +517,15 @@ typedef struct _RxD3_t {
515 u64 Buffer0_ptr; 517 u64 Buffer0_ptr;
516 u64 Buffer1_ptr; 518 u64 Buffer1_ptr;
517 u64 Buffer2_ptr; 519 u64 Buffer2_ptr;
518} RxD3_t; 520};
519 521
520 522
521/* Structure that represents the Rx descriptor block which contains 523/* Structure that represents the Rx descriptor block which contains
522 * 128 Rx descriptors. 524 * 128 Rx descriptors.
523 */ 525 */
524typedef struct _RxD_block { 526struct RxD_block {
525#define MAX_RXDS_PER_BLOCK_1 127 527#define MAX_RXDS_PER_BLOCK_1 127
526 RxD1_t rxd[MAX_RXDS_PER_BLOCK_1]; 528 struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
527 529
528 u64 reserved_0; 530 u64 reserved_0;
529#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 531#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
@@ -533,22 +535,22 @@ typedef struct _RxD_block {
533 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch 535 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
534 * the upper 32 bits should 536 * the upper 32 bits should
535 * be 0 */ 537 * be 0 */
536} RxD_block_t; 538};
537 539
538#define SIZE_OF_BLOCK 4096 540#define SIZE_OF_BLOCK 4096
539 541
540#define RXD_MODE_1 0 542#define RXD_MODE_1 0 /* One Buffer mode */
541#define RXD_MODE_3A 1 543#define RXD_MODE_3A 1 /* Three Buffer mode */
542#define RXD_MODE_3B 2 544#define RXD_MODE_3B 2 /* Two Buffer mode */
543 545
544/* Structure to hold virtual addresses of Buf0 and Buf1 in 546/* Structure to hold virtual addresses of Buf0 and Buf1 in
545 * 2buf mode. */ 547 * 2buf mode. */
546typedef struct bufAdd { 548struct buffAdd {
547 void *ba_0_org; 549 void *ba_0_org;
548 void *ba_1_org; 550 void *ba_1_org;
549 void *ba_0; 551 void *ba_0;
550 void *ba_1; 552 void *ba_1;
551} buffAdd_t; 553};
552 554
553/* Structure which stores all the MAC control parameters */ 555/* Structure which stores all the MAC control parameters */
554 556
@@ -556,43 +558,46 @@ typedef struct bufAdd {
556 * from which the Rx Interrupt processor can start picking 558 * from which the Rx Interrupt processor can start picking
557 * up the RxDs for processing. 559 * up the RxDs for processing.
558 */ 560 */
559typedef struct _rx_curr_get_info_t { 561struct rx_curr_get_info {
560 u32 block_index; 562 u32 block_index;
561 u32 offset; 563 u32 offset;
562 u32 ring_len; 564 u32 ring_len;
563} rx_curr_get_info_t; 565};
564 566
565typedef rx_curr_get_info_t rx_curr_put_info_t; 567struct rx_curr_put_info {
568 u32 block_index;
569 u32 offset;
570 u32 ring_len;
571};
566 572
567/* This structure stores the offset of the TxDl in the FIFO 573/* This structure stores the offset of the TxDl in the FIFO
568 * from which the Tx Interrupt processor can start picking 574 * from which the Tx Interrupt processor can start picking
569 * up the TxDLs for send complete interrupt processing. 575 * up the TxDLs for send complete interrupt processing.
570 */ 576 */
571typedef struct { 577struct tx_curr_get_info {
572 u32 offset; 578 u32 offset;
573 u32 fifo_len; 579 u32 fifo_len;
574} tx_curr_get_info_t; 580};
575
576typedef tx_curr_get_info_t tx_curr_put_info_t;
577 581
582struct tx_curr_put_info {
583 u32 offset;
584 u32 fifo_len;
585};
578 586
579typedef struct rxd_info { 587struct rxd_info {
580 void *virt_addr; 588 void *virt_addr;
581 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
582}rxd_info_t; 590};
583 591
584/* Structure that holds the Phy and virt addresses of the Blocks */ 592/* Structure that holds the Phy and virt addresses of the Blocks */
585typedef struct rx_block_info { 593struct rx_block_info {
586 void *block_virt_addr; 594 void *block_virt_addr;
587 dma_addr_t block_dma_addr; 595 dma_addr_t block_dma_addr;
588 rxd_info_t *rxds; 596 struct rxd_info *rxds;
589} rx_block_info_t; 597};
590
591/* pre declaration of the nic structure */
592typedef struct s2io_nic nic_t;
593 598
594/* Ring specific structure */ 599/* Ring specific structure */
595typedef struct ring_info { 600struct ring_info {
596 /* The ring number */ 601 /* The ring number */
597 int ring_no; 602 int ring_no;
598 603
@@ -600,7 +605,7 @@ typedef struct ring_info {
600 * Place holders for the virtual and physical addresses of 605 * Place holders for the virtual and physical addresses of
601 * all the Rx Blocks 606 * all the Rx Blocks
602 */ 607 */
603 rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING]; 608 struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
604 int block_count; 609 int block_count;
605 int pkt_cnt; 610 int pkt_cnt;
606 611
@@ -608,26 +613,24 @@ typedef struct ring_info {
608 * Put pointer info which indictes which RxD has to be replenished 613 * Put pointer info which indictes which RxD has to be replenished
609 * with a new buffer. 614 * with a new buffer.
610 */ 615 */
611 rx_curr_put_info_t rx_curr_put_info; 616 struct rx_curr_put_info rx_curr_put_info;
612 617
613 /* 618 /*
614 * Get pointer info which indictes which is the last RxD that was 619 * Get pointer info which indictes which is the last RxD that was
615 * processed by the driver. 620 * processed by the driver.
616 */ 621 */
617 rx_curr_get_info_t rx_curr_get_info; 622 struct rx_curr_get_info rx_curr_get_info;
618 623
619#ifndef CONFIG_S2IO_NAPI
620 /* Index to the absolute position of the put pointer of Rx ring */ 624 /* Index to the absolute position of the put pointer of Rx ring */
621 int put_pos; 625 int put_pos;
622#endif
623 626
624 /* Buffer Address store. */ 627 /* Buffer Address store. */
625 buffAdd_t **ba; 628 struct buffAdd **ba;
626 nic_t *nic; 629 struct s2io_nic *nic;
627} ring_info_t; 630};
628 631
629/* Fifo specific structure */ 632/* Fifo specific structure */
630typedef struct fifo_info { 633struct fifo_info {
631 /* FIFO number */ 634 /* FIFO number */
632 int fifo_no; 635 int fifo_no;
633 636
@@ -635,40 +638,40 @@ typedef struct fifo_info {
635 int max_txds; 638 int max_txds;
636 639
637 /* Place holder of all the TX List's Phy and Virt addresses. */ 640 /* Place holder of all the TX List's Phy and Virt addresses. */
638 list_info_hold_t *list_info; 641 struct list_info_hold *list_info;
639 642
640 /* 643 /*
641 * Current offset within the tx FIFO where driver would write 644 * Current offset within the tx FIFO where driver would write
642 * new Tx frame 645 * new Tx frame
643 */ 646 */
644 tx_curr_put_info_t tx_curr_put_info; 647 struct tx_curr_put_info tx_curr_put_info;
645 648
646 /* 649 /*
647 * Current offset within tx FIFO from where the driver would start freeing 650 * Current offset within tx FIFO from where the driver would start freeing
648 * the buffers 651 * the buffers
649 */ 652 */
650 tx_curr_get_info_t tx_curr_get_info; 653 struct tx_curr_get_info tx_curr_get_info;
651 654
652 nic_t *nic; 655 struct s2io_nic *nic;
653}fifo_info_t; 656};
654 657
655/* Information related to the Tx and Rx FIFOs and Rings of Xena 658/* Information related to the Tx and Rx FIFOs and Rings of Xena
656 * is maintained in this structure. 659 * is maintained in this structure.
657 */ 660 */
658typedef struct mac_info { 661struct mac_info {
659/* tx side stuff */ 662/* tx side stuff */
660 /* logical pointer of start of each Tx FIFO */ 663 /* logical pointer of start of each Tx FIFO */
661 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS]; 664 struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
662 665
663 /* Fifo specific structure */ 666 /* Fifo specific structure */
664 fifo_info_t fifos[MAX_TX_FIFOS]; 667 struct fifo_info fifos[MAX_TX_FIFOS];
665 668
666 /* Save virtual address of TxD page with zero DMA addr(if any) */ 669 /* Save virtual address of TxD page with zero DMA addr(if any) */
667 void *zerodma_virt_addr; 670 void *zerodma_virt_addr;
668 671
669/* rx side stuff */ 672/* rx side stuff */
670 /* Ring specific structure */ 673 /* Ring specific structure */
671 ring_info_t rings[MAX_RX_RINGS]; 674 struct ring_info rings[MAX_RX_RINGS];
672 675
673 u16 rmac_pause_time; 676 u16 rmac_pause_time;
674 u16 mc_pause_threshold_q0q3; 677 u16 mc_pause_threshold_q0q3;
@@ -677,14 +680,14 @@ typedef struct mac_info {
677 void *stats_mem; /* orignal pointer to allocated mem */ 680 void *stats_mem; /* orignal pointer to allocated mem */
678 dma_addr_t stats_mem_phy; /* Physical address of the stat block */ 681 dma_addr_t stats_mem_phy; /* Physical address of the stat block */
679 u32 stats_mem_sz; 682 u32 stats_mem_sz;
680 StatInfo_t *stats_info; /* Logical address of the stat block */ 683 struct stat_block *stats_info; /* Logical address of the stat block */
681} mac_info_t; 684};
682 685
683/* structure representing the user defined MAC addresses */ 686/* structure representing the user defined MAC addresses */
684typedef struct { 687struct usr_addr {
685 char addr[ETH_ALEN]; 688 char addr[ETH_ALEN];
686 int usage_cnt; 689 int usage_cnt;
687} usr_addr_t; 690};
688 691
689/* Default Tunable parameters of the NIC. */ 692/* Default Tunable parameters of the NIC. */
690#define DEFAULT_FIFO_0_LEN 4096 693#define DEFAULT_FIFO_0_LEN 4096
@@ -717,7 +720,7 @@ struct msix_info_st {
717}; 720};
718 721
719/* Data structure to represent a LRO session */ 722/* Data structure to represent a LRO session */
720typedef struct lro { 723struct lro {
721 struct sk_buff *parent; 724 struct sk_buff *parent;
722 struct sk_buff *last_frag; 725 struct sk_buff *last_frag;
723 u8 *l2h; 726 u8 *l2h;
@@ -733,20 +736,18 @@ typedef struct lro {
733 u32 cur_tsval; 736 u32 cur_tsval;
734 u32 cur_tsecr; 737 u32 cur_tsecr;
735 u8 saw_ts; 738 u8 saw_ts;
736}lro_t; 739};
737 740
738/* Structure representing one instance of the NIC */ 741/* Structure representing one instance of the NIC */
739struct s2io_nic { 742struct s2io_nic {
740 int rxd_mode; 743 int rxd_mode;
741#ifdef CONFIG_S2IO_NAPI
742 /* 744 /*
743 * Count of packets to be processed in a given iteration, it will be indicated 745 * Count of packets to be processed in a given iteration, it will be indicated
744 * by the quota field of the device structure when NAPI is enabled. 746 * by the quota field of the device structure when NAPI is enabled.
745 */ 747 */
746 int pkts_to_process; 748 int pkts_to_process;
747#endif
748 struct net_device *dev; 749 struct net_device *dev;
749 mac_info_t mac_control; 750 struct mac_info mac_control;
750 struct config_param config; 751 struct config_param config;
751 struct pci_dev *pdev; 752 struct pci_dev *pdev;
752 void __iomem *bar0; 753 void __iomem *bar0;
@@ -754,8 +755,8 @@ struct s2io_nic {
754#define MAX_MAC_SUPPORTED 16 755#define MAX_MAC_SUPPORTED 16
755#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED 756#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
756 757
757 macaddr_t def_mac_addr[MAX_MAC_SUPPORTED]; 758 struct mac_addr def_mac_addr[MAX_MAC_SUPPORTED];
758 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED]; 759 struct mac_addr pre_mac_addr[MAX_MAC_SUPPORTED];
759 760
760 struct net_device_stats stats; 761 struct net_device_stats stats;
761 int high_dma_flag; 762 int high_dma_flag;
@@ -775,9 +776,7 @@ struct s2io_nic {
775 atomic_t rx_bufs_left[MAX_RX_RINGS]; 776 atomic_t rx_bufs_left[MAX_RX_RINGS];
776 777
777 spinlock_t tx_lock; 778 spinlock_t tx_lock;
778#ifndef CONFIG_S2IO_NAPI
779 spinlock_t put_lock; 779 spinlock_t put_lock;
780#endif
781 780
782#define PROMISC 1 781#define PROMISC 1
783#define ALL_MULTI 2 782#define ALL_MULTI 2
@@ -785,7 +784,7 @@ struct s2io_nic {
785#define MAX_ADDRS_SUPPORTED 64 784#define MAX_ADDRS_SUPPORTED 64
786 u16 usr_addr_count; 785 u16 usr_addr_count;
787 u16 mc_addr_count; 786 u16 mc_addr_count;
788 usr_addr_t usr_addrs[MAX_ADDRS_SUPPORTED]; 787 struct usr_addr usr_addrs[MAX_ADDRS_SUPPORTED];
789 788
790 u16 m_cast_flg; 789 u16 m_cast_flg;
791 u16 all_multi_pos; 790 u16 all_multi_pos;
@@ -841,7 +840,7 @@ struct s2io_nic {
841 u8 device_type; 840 u8 device_type;
842 841
843#define MAX_LRO_SESSIONS 32 842#define MAX_LRO_SESSIONS 32
844 lro_t lro0_n[MAX_LRO_SESSIONS]; 843 struct lro lro0_n[MAX_LRO_SESSIONS];
845 unsigned long clubbed_frms_cnt; 844 unsigned long clubbed_frms_cnt;
846 unsigned long sending_both; 845 unsigned long sending_both;
847 u8 lro; 846 u8 lro;
@@ -855,8 +854,9 @@ struct s2io_nic {
855 spinlock_t rx_lock; 854 spinlock_t rx_lock;
856 atomic_t isr_cnt; 855 atomic_t isr_cnt;
857 u64 *ufo_in_band_v; 856 u64 *ufo_in_band_v;
858#define VPD_PRODUCT_NAME_LEN 50 857#define VPD_STRING_LEN 80
859 u8 product_name[VPD_PRODUCT_NAME_LEN]; 858 u8 product_name[VPD_STRING_LEN];
859 u8 serial_num[VPD_STRING_LEN];
860}; 860};
861 861
862#define RESET_ERROR 1; 862#define RESET_ERROR 1;
@@ -975,43 +975,50 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
975static int init_shared_mem(struct s2io_nic *sp); 975static int init_shared_mem(struct s2io_nic *sp);
976static void free_shared_mem(struct s2io_nic *sp); 976static void free_shared_mem(struct s2io_nic *sp);
977static int init_nic(struct s2io_nic *nic); 977static int init_nic(struct s2io_nic *nic);
978static void rx_intr_handler(ring_info_t *ring_data); 978static void rx_intr_handler(struct ring_info *ring_data);
979static void tx_intr_handler(fifo_info_t *fifo_data); 979static void tx_intr_handler(struct fifo_info *fifo_data);
980static void alarm_intr_handler(struct s2io_nic *sp); 980static void alarm_intr_handler(struct s2io_nic *sp);
981 981
982static int s2io_starter(void); 982static int s2io_starter(void);
983static void s2io_closer(void);
983static void s2io_tx_watchdog(struct net_device *dev); 984static void s2io_tx_watchdog(struct net_device *dev);
984static void s2io_tasklet(unsigned long dev_addr); 985static void s2io_tasklet(unsigned long dev_addr);
985static void s2io_set_multicast(struct net_device *dev); 986static void s2io_set_multicast(struct net_device *dev);
986static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); 987static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
987static void s2io_link(nic_t * sp, int link); 988static void s2io_link(struct s2io_nic * sp, int link);
988#if defined(CONFIG_S2IO_NAPI) 989static void s2io_reset(struct s2io_nic * sp);
989static int s2io_poll(struct net_device *dev, int *budget); 990static int s2io_poll(struct net_device *dev, int *budget);
990#endif 991static void s2io_init_pci(struct s2io_nic * sp);
991static void s2io_init_pci(nic_t * sp);
992static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 992static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
993static void s2io_alarm_handle(unsigned long data); 993static void s2io_alarm_handle(unsigned long data);
994static int s2io_enable_msi(nic_t *nic); 994static int s2io_enable_msi(struct s2io_nic *nic);
995static irqreturn_t s2io_msi_handle(int irq, void *dev_id); 995static irqreturn_t s2io_msi_handle(int irq, void *dev_id);
996static irqreturn_t 996static irqreturn_t
997s2io_msix_ring_handle(int irq, void *dev_id); 997s2io_msix_ring_handle(int irq, void *dev_id);
998static irqreturn_t 998static irqreturn_t
999s2io_msix_fifo_handle(int irq, void *dev_id); 999s2io_msix_fifo_handle(int irq, void *dev_id);
1000static irqreturn_t s2io_isr(int irq, void *dev_id); 1000static irqreturn_t s2io_isr(int irq, void *dev_id);
1001static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 1001static int verify_xena_quiescence(struct s2io_nic *sp);
1002static const struct ethtool_ops netdev_ethtool_ops; 1002static const struct ethtool_ops netdev_ethtool_ops;
1003static void s2io_set_link(struct work_struct *work); 1003static void s2io_set_link(struct work_struct *work);
1004static int s2io_set_swapper(nic_t * sp); 1004static int s2io_set_swapper(struct s2io_nic * sp);
1005static void s2io_card_down(nic_t *nic); 1005static void s2io_card_down(struct s2io_nic *nic);
1006static int s2io_card_up(nic_t *nic); 1006static int s2io_card_up(struct s2io_nic *nic);
1007static int get_xena_rev_id(struct pci_dev *pdev); 1007static int get_xena_rev_id(struct pci_dev *pdev);
1008static void restore_xmsi_data(nic_t *nic); 1008static int wait_for_cmd_complete(void *addr, u64 busy_bit);
1009static int s2io_add_isr(struct s2io_nic * sp);
1010static void s2io_rem_isr(struct s2io_nic * sp);
1011
1012static void restore_xmsi_data(struct s2io_nic *nic);
1009 1013
1010static int s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, RxD_t *rxdp, nic_t *sp); 1014static int
1011static void clear_lro_session(lro_t *lro); 1015s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
1016 struct RxD_t *rxdp, struct s2io_nic *sp);
1017static void clear_lro_session(struct lro *lro);
1012static void queue_rx_frame(struct sk_buff *skb); 1018static void queue_rx_frame(struct sk_buff *skb);
1013static void update_L3L4_header(nic_t *sp, lro_t *lro); 1019static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
1014static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1020static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1021 struct sk_buff *skb, u32 tcp_len);
1015 1022
1016#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1023#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1017#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1024#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
new file mode 100644
index 000000000000..7f800feaa9a2
--- /dev/null
+++ b/drivers/net/sc92031.c
@@ -0,0 +1,1620 @@
1/* Silan SC92031 PCI Fast Ethernet Adapter driver
2 *
3 * Based on vendor drivers:
4 * Silan Fast Ethernet Netcard Driver:
5 * MODULE_AUTHOR ("gaoyonghong");
6 * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
7 * MODULE_LICENSE("GPL");
8 * 8139D Fast Ethernet driver:
9 * (C) 2002 by gaoyonghong
10 * MODULE_AUTHOR ("gaoyonghong");
11 * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
12 * MODULE_LICENSE("GPL");
13 * Both are almost identical and seem to be based on pci-skeleton.c
14 *
15 * Rewritten for 2.6 by Cesar Eduardo Barros
16 */
17
18/* Note about set_mac_address: I don't know how to change the hardware
19 * matching, so you need to enable IFF_PROMISC when using it.
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/pci.h>
26#include <linux/dma-mapping.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/crc32.h>
31
32#include <asm/irq.h>
33
34#define PCI_VENDOR_ID_SILAN 0x1904
35#define PCI_DEVICE_ID_SILAN_SC92031 0x2031
36#define PCI_DEVICE_ID_SILAN_8139D 0x8139
37
38#define SC92031_NAME "sc92031"
39#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver"
40#define SC92031_VERSION "2.0c"
41
42/* BAR 0 is MMIO, BAR 1 is PIO */
43#ifndef SC92031_USE_BAR
44#define SC92031_USE_BAR 0
45#endif
46
47/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
48static int multicast_filter_limit = 64;
49module_param(multicast_filter_limit, int, 0);
50MODULE_PARM_DESC(multicast_filter_limit,
51 "Maximum number of filtered multicast addresses");
52
53static int media;
54module_param(media, int, 0);
55MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
56 " 0x01 = 10M half, 0x02 = 10M full,"
57 " 0x04 = 100M half, 0x08 = 100M full)");
58
59/* Size of the in-memory receive ring. */
60#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
61#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
62
63/* Number of Tx descriptor registers. */
64#define NUM_TX_DESC 4
65
66/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
67#define MAX_ETH_FRAME_SIZE 1536
68
69/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
70#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
71#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
72
73/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
74#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
75
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78
79#define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */
80
81/* media options */
82#define AUTOSELECT 0x00
83#define M10_HALF 0x01
84#define M10_FULL 0x02
85#define M100_HALF 0x04
86#define M100_FULL 0x08
87
88 /* Symbolic offsets to registers. */
89enum silan_registers {
90 Config0 = 0x00, // Config0
91 Config1 = 0x04, // Config1
92 RxBufWPtr = 0x08, // Rx buffer writer poiter
93 IntrStatus = 0x0C, // Interrupt status
94 IntrMask = 0x10, // Interrupt mask
95 RxbufAddr = 0x14, // Rx buffer start address
96 RxBufRPtr = 0x18, // Rx buffer read pointer
97 Txstatusall = 0x1C, // Transmit status of all descriptors
98 TxStatus0 = 0x20, // Transmit status (Four 32bit registers).
99 TxAddr0 = 0x30, // Tx descriptors (also four 32bit).
100 RxConfig = 0x40, // Rx configuration
101 MAC0 = 0x44, // Ethernet hardware address.
102 MAR0 = 0x4C, // Multicast filter.
103 RxStatus0 = 0x54, // Rx status
104 TxConfig = 0x5C, // Tx configuration
105 PhyCtrl = 0x60, // physical control
106 FlowCtrlConfig = 0x64, // flow control
107 Miicmd0 = 0x68, // Mii command0 register
108 Miicmd1 = 0x6C, // Mii command1 register
109 Miistatus = 0x70, // Mii status register
110 Timercnt = 0x74, // Timer counter register
111 TimerIntr = 0x78, // Timer interrupt register
112 PMConfig = 0x7C, // Power Manager configuration
113 CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers)
114 Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser)
115 LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser)
116 TestD0 = 0xD0,
117 TestD4 = 0xD4,
118 TestD8 = 0xD8,
119};
120
121#define MII_BMCR 0 // Basic mode control register
122#define MII_BMSR 1 // Basic mode status register
123#define MII_JAB 16
124#define MII_OutputStatus 24
125
126#define BMCR_FULLDPLX 0x0100 // Full duplex
127#define BMCR_ANRESTART 0x0200 // Auto negotiation restart
128#define BMCR_ANENABLE 0x1000 // Enable auto negotiation
129#define BMCR_SPEED100 0x2000 // Select 100Mbps
130#define BMSR_LSTATUS 0x0004 // Link status
131#define PHY_16_JAB_ENB 0x1000
132#define PHY_16_PORT_ENB 0x1
133
134enum IntrStatusBits {
135 LinkFail = 0x80000000,
136 LinkOK = 0x40000000,
137 TimeOut = 0x20000000,
138 RxOverflow = 0x0040,
139 RxOK = 0x0020,
140 TxOK = 0x0001,
141 IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
142};
143
144enum TxStatusBits {
145 TxCarrierLost = 0x20000000,
146 TxAborted = 0x10000000,
147 TxOutOfWindow = 0x08000000,
148 TxNccShift = 22,
149 EarlyTxThresShift = 16,
150 TxStatOK = 0x8000,
151 TxUnderrun = 0x4000,
152 TxOwn = 0x2000,
153};
154
155enum RxStatusBits {
156 RxStatesOK = 0x80000,
157 RxBadAlign = 0x40000,
158 RxHugeFrame = 0x20000,
159 RxSmallFrame = 0x10000,
160 RxCRCOK = 0x8000,
161 RxCrlFrame = 0x4000,
162 Rx_Broadcast = 0x2000,
163 Rx_Multicast = 0x1000,
164 RxAddrMatch = 0x0800,
165 MiiErr = 0x0400,
166};
167
168enum RxConfigBits {
169 RxFullDx = 0x80000000,
170 RxEnb = 0x40000000,
171 RxSmall = 0x20000000,
172 RxHuge = 0x10000000,
173 RxErr = 0x08000000,
174 RxAllphys = 0x04000000,
175 RxMulticast = 0x02000000,
176 RxBroadcast = 0x01000000,
177 RxLoopBack = (1 << 23) | (1 << 22),
178 LowThresholdShift = 12,
179 HighThresholdShift = 2,
180};
181
182enum TxConfigBits {
183 TxFullDx = 0x80000000,
184 TxEnb = 0x40000000,
185 TxEnbPad = 0x20000000,
186 TxEnbHuge = 0x10000000,
187 TxEnbFCS = 0x08000000,
188 TxNoBackOff = 0x04000000,
189 TxEnbPrem = 0x02000000,
190 TxCareLostCrs = 0x1000000,
191 TxExdCollNum = 0xf00000,
192 TxDataRate = 0x80000,
193};
194
195enum PhyCtrlconfigbits {
196 PhyCtrlAne = 0x80000000,
197 PhyCtrlSpd100 = 0x40000000,
198 PhyCtrlSpd10 = 0x20000000,
199 PhyCtrlPhyBaseAddr = 0x1f000000,
200 PhyCtrlDux = 0x800000,
201 PhyCtrlReset = 0x400000,
202};
203
204enum FlowCtrlConfigBits {
205 FlowCtrlFullDX = 0x80000000,
206 FlowCtrlEnb = 0x40000000,
207};
208
209enum Config0Bits {
210 Cfg0_Reset = 0x80000000,
211 Cfg0_Anaoff = 0x40000000,
212 Cfg0_LDPS = 0x20000000,
213};
214
215enum Config1Bits {
216 Cfg1_EarlyRx = 1 << 31,
217 Cfg1_EarlyTx = 1 << 30,
218
219 //rx buffer size
220 Cfg1_Rcv8K = 0x0,
221 Cfg1_Rcv16K = 0x1,
222 Cfg1_Rcv32K = 0x3,
223 Cfg1_Rcv64K = 0x7,
224 Cfg1_Rcv128K = 0xf,
225};
226
227enum MiiCmd0Bits {
228 Mii_Divider = 0x20000000,
229 Mii_WRITE = 0x400000,
230 Mii_READ = 0x200000,
231 Mii_SCAN = 0x100000,
232 Mii_Tamod = 0x80000,
233 Mii_Drvmod = 0x40000,
234 Mii_mdc = 0x20000,
235 Mii_mdoen = 0x10000,
236 Mii_mdo = 0x8000,
237 Mii_mdi = 0x4000,
238};
239
240enum MiiStatusBits {
241 Mii_StatusBusy = 0x80000000,
242};
243
244enum PMConfigBits {
245 PM_Enable = 1 << 31,
246 PM_LongWF = 1 << 30,
247 PM_Magic = 1 << 29,
248 PM_LANWake = 1 << 28,
249 PM_LWPTN = (1 << 27 | 1<< 26),
250 PM_LinkUp = 1 << 25,
251 PM_WakeUp = 1 << 24,
252};
253
254/* Locking rules:
255 * priv->lock protects most of the fields of priv and most of the
256 * hardware registers. It does not have to protect against softirqs
257 * between sc92031_disable_interrupts and sc92031_enable_interrupts;
258 * it also does not need to be used in ->open and ->stop while the
259 * device interrupts are off.
260 * Not having to protect against softirqs is very useful due to heavy
261 * use of mdelay() at _sc92031_reset.
262 * Functions prefixed with _sc92031_ must be called with the lock held;
263 * functions prefixed with sc92031_ must be called without the lock held.
264 * Use mmiowb() before unlocking if the hardware was written to.
265 */
266
267/* Locking rules for the interrupt:
268 * - the interrupt and the tasklet never run at the same time
269 * - neither run between sc92031_disable_interrupts and
270 * sc92031_enable_interrupt
271 */
272
273struct sc92031_priv {
274 spinlock_t lock;
275 /* iomap.h cookie */
276 void __iomem *port_base;
277 /* pci device structure */
278 struct pci_dev *pdev;
279 /* tasklet */
280 struct tasklet_struct tasklet;
281
282 /* CPU address of rx ring */
283 void *rx_ring;
284 /* PCI address of rx ring */
285 dma_addr_t rx_ring_dma_addr;
286 /* PCI address of rx ring read pointer */
287 dma_addr_t rx_ring_tail;
288
289 /* tx ring write index */
290 unsigned tx_head;
291 /* tx ring read index */
292 unsigned tx_tail;
293 /* CPU address of tx bounce buffer */
294 void *tx_bufs;
295 /* PCI address of tx bounce buffer */
296 dma_addr_t tx_bufs_dma_addr;
297
298 /* copies of some hardware registers */
299 u32 intr_status;
300 atomic_t intr_mask;
301 u32 rx_config;
302 u32 tx_config;
303 u32 pm_config;
304
305 /* copy of some flags from dev->flags */
306 unsigned int mc_flags;
307
308 /* for ETHTOOL_GSTATS */
309 u64 tx_timeouts;
310 u64 rx_loss;
311
312 /* for dev->get_stats */
313 long rx_value;
314 struct net_device_stats stats;
315};
316
317/* I don't know which registers can be safely read; however, I can guess
318 * MAC0 is one of them. */
319static inline void _sc92031_dummy_read(void __iomem *port_base)
320{
321 ioread32(port_base + MAC0);
322}
323
324static u32 _sc92031_mii_wait(void __iomem *port_base)
325{
326 u32 mii_status;
327
328 do {
329 udelay(10);
330 mii_status = ioread32(port_base + Miistatus);
331 } while (mii_status & Mii_StatusBusy);
332
333 return mii_status;
334}
335
336static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
337{
338 iowrite32(Mii_Divider, port_base + Miicmd0);
339
340 _sc92031_mii_wait(port_base);
341
342 iowrite32(cmd1, port_base + Miicmd1);
343 iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
344
345 return _sc92031_mii_wait(port_base);
346}
347
348static void _sc92031_mii_scan(void __iomem *port_base)
349{
350 _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
351}
352
353static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
354{
355 return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
356}
357
358static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
359{
360 _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
361}
362
363static void sc92031_disable_interrupts(struct net_device *dev)
364{
365 struct sc92031_priv *priv = netdev_priv(dev);
366 void __iomem *port_base = priv->port_base;
367
368 /* tell the tasklet/interrupt not to enable interrupts */
369 atomic_set(&priv->intr_mask, 0);
370 wmb();
371
372 /* stop interrupts */
373 iowrite32(0, port_base + IntrMask);
374 _sc92031_dummy_read(port_base);
375 mmiowb();
376
377 /* wait for any concurrent interrupt/tasklet to finish */
378 synchronize_irq(dev->irq);
379 tasklet_disable(&priv->tasklet);
380}
381
382static void sc92031_enable_interrupts(struct net_device *dev)
383{
384 struct sc92031_priv *priv = netdev_priv(dev);
385 void __iomem *port_base = priv->port_base;
386
387 tasklet_enable(&priv->tasklet);
388
389 atomic_set(&priv->intr_mask, IntrBits);
390 wmb();
391
392 iowrite32(IntrBits, port_base + IntrMask);
393 mmiowb();
394}
395
396static void _sc92031_disable_tx_rx(struct net_device *dev)
397{
398 struct sc92031_priv *priv = netdev_priv(dev);
399 void __iomem *port_base = priv->port_base;
400
401 priv->rx_config &= ~RxEnb;
402 priv->tx_config &= ~TxEnb;
403 iowrite32(priv->rx_config, port_base + RxConfig);
404 iowrite32(priv->tx_config, port_base + TxConfig);
405}
406
407static void _sc92031_enable_tx_rx(struct net_device *dev)
408{
409 struct sc92031_priv *priv = netdev_priv(dev);
410 void __iomem *port_base = priv->port_base;
411
412 priv->rx_config |= RxEnb;
413 priv->tx_config |= TxEnb;
414 iowrite32(priv->rx_config, port_base + RxConfig);
415 iowrite32(priv->tx_config, port_base + TxConfig);
416}
417
418static void _sc92031_tx_clear(struct net_device *dev)
419{
420 struct sc92031_priv *priv = netdev_priv(dev);
421
422 while (priv->tx_head - priv->tx_tail > 0) {
423 priv->tx_tail++;
424 priv->stats.tx_dropped++;
425 }
426 priv->tx_head = priv->tx_tail = 0;
427}
428
429static void _sc92031_set_mar(struct net_device *dev)
430{
431 struct sc92031_priv *priv = netdev_priv(dev);
432 void __iomem *port_base = priv->port_base;
433 u32 mar0 = 0, mar1 = 0;
434
435 if ((dev->flags & IFF_PROMISC)
436 || dev->mc_count > multicast_filter_limit
437 || (dev->flags & IFF_ALLMULTI))
438 mar0 = mar1 = 0xffffffff;
439 else if (dev->flags & IFF_MULTICAST) {
440 struct dev_mc_list *mc_list;
441
442 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
443 u32 crc;
444 unsigned bit = 0;
445
446 crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
447 crc >>= 24;
448
449 if (crc & 0x01) bit |= 0x02;
450 if (crc & 0x02) bit |= 0x01;
451 if (crc & 0x10) bit |= 0x20;
452 if (crc & 0x20) bit |= 0x10;
453 if (crc & 0x40) bit |= 0x08;
454 if (crc & 0x80) bit |= 0x04;
455
456 if (bit > 31)
457 mar0 |= 0x1 << (bit - 32);
458 else
459 mar1 |= 0x1 << bit;
460 }
461 }
462
463 iowrite32(mar0, port_base + MAR0);
464 iowrite32(mar1, port_base + MAR0 + 4);
465}
466
467static void _sc92031_set_rx_config(struct net_device *dev)
468{
469 struct sc92031_priv *priv = netdev_priv(dev);
470 void __iomem *port_base = priv->port_base;
471 unsigned int old_mc_flags;
472 u32 rx_config_bits = 0;
473
474 old_mc_flags = priv->mc_flags;
475
476 if (dev->flags & IFF_PROMISC)
477 rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
478 | RxMulticast | RxAllphys;
479
480 if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
481 rx_config_bits |= RxMulticast;
482
483 if (dev->flags & IFF_BROADCAST)
484 rx_config_bits |= RxBroadcast;
485
486 priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
487 | RxMulticast | RxAllphys);
488 priv->rx_config |= rx_config_bits;
489
490 priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
491 | IFF_MULTICAST | IFF_BROADCAST);
492
493 if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
494 iowrite32(priv->rx_config, port_base + RxConfig);
495}
496
497static bool _sc92031_check_media(struct net_device *dev)
498{
499 struct sc92031_priv *priv = netdev_priv(dev);
500 void __iomem *port_base = priv->port_base;
501 u16 bmsr;
502
503 bmsr = _sc92031_mii_read(port_base, MII_BMSR);
504 rmb();
505 if (bmsr & BMSR_LSTATUS) {
506 bool speed_100, duplex_full;
507 u32 flow_ctrl_config = 0;
508 u16 output_status = _sc92031_mii_read(port_base,
509 MII_OutputStatus);
510 _sc92031_mii_scan(port_base);
511
512 speed_100 = output_status & 0x2;
513 duplex_full = output_status & 0x4;
514
515 /* Initial Tx/Rx configuration */
516 priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
517 priv->tx_config = 0x48800000;
518
519 /* NOTE: vendor driver had dead code here to enable tx padding */
520
521 if (!speed_100)
522 priv->tx_config |= 0x80000;
523
524 // configure rx mode
525 _sc92031_set_rx_config(dev);
526
527 if (duplex_full) {
528 priv->rx_config |= RxFullDx;
529 priv->tx_config |= TxFullDx;
530 flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
531 } else {
532 priv->rx_config &= ~RxFullDx;
533 priv->tx_config &= ~TxFullDx;
534 }
535
536 _sc92031_set_mar(dev);
537 _sc92031_set_rx_config(dev);
538 _sc92031_enable_tx_rx(dev);
539 iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
540
541 netif_carrier_on(dev);
542
543 if (printk_ratelimit())
544 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
545 dev->name,
546 speed_100 ? "100" : "10",
547 duplex_full ? "full" : "half");
548 return true;
549 } else {
550 _sc92031_mii_scan(port_base);
551
552 netif_carrier_off(dev);
553
554 _sc92031_disable_tx_rx(dev);
555
556 if (printk_ratelimit())
557 printk(KERN_INFO "%s: link down\n", dev->name);
558 return false;
559 }
560}
561
562static void _sc92031_phy_reset(struct net_device *dev)
563{
564 struct sc92031_priv *priv = netdev_priv(dev);
565 void __iomem *port_base = priv->port_base;
566 u32 phy_ctrl;
567
568 phy_ctrl = ioread32(port_base + PhyCtrl);
569 phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
570 phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
571
572 switch (media) {
573 default:
574 case AUTOSELECT:
575 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
576 break;
577 case M10_HALF:
578 phy_ctrl |= PhyCtrlSpd10;
579 break;
580 case M10_FULL:
581 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
582 break;
583 case M100_HALF:
584 phy_ctrl |= PhyCtrlSpd100;
585 break;
586 case M100_FULL:
587 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
588 break;
589 }
590
591 iowrite32(phy_ctrl, port_base + PhyCtrl);
592 mdelay(10);
593
594 phy_ctrl &= ~PhyCtrlReset;
595 iowrite32(phy_ctrl, port_base + PhyCtrl);
596 mdelay(1);
597
598 _sc92031_mii_write(port_base, MII_JAB,
599 PHY_16_JAB_ENB | PHY_16_PORT_ENB);
600 _sc92031_mii_scan(port_base);
601
602 netif_carrier_off(dev);
603 netif_stop_queue(dev);
604}
605
606static void _sc92031_reset(struct net_device *dev)
607{
608 struct sc92031_priv *priv = netdev_priv(dev);
609 void __iomem *port_base = priv->port_base;
610
611 /* disable PM */
612 iowrite32(0, port_base + PMConfig);
613
614 /* soft reset the chip */
615 iowrite32(Cfg0_Reset, port_base + Config0);
616 mdelay(200);
617
618 iowrite32(0, port_base + Config0);
619 mdelay(10);
620
621 /* disable interrupts */
622 iowrite32(0, port_base + IntrMask);
623
624 /* clear multicast address */
625 iowrite32(0, port_base + MAR0);
626 iowrite32(0, port_base + MAR0 + 4);
627
628 /* init rx ring */
629 iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
630 priv->rx_ring_tail = priv->rx_ring_dma_addr;
631
632 /* init tx ring */
633 _sc92031_tx_clear(dev);
634
635 /* clear old register values */
636 priv->intr_status = 0;
637 atomic_set(&priv->intr_mask, 0);
638 priv->rx_config = 0;
639 priv->tx_config = 0;
640 priv->mc_flags = 0;
641
642 /* configure rx buffer size */
643 /* NOTE: vendor driver had dead code here to enable early tx/rx */
644 iowrite32(Cfg1_Rcv64K, port_base + Config1);
645
646 _sc92031_phy_reset(dev);
647 _sc92031_check_media(dev);
648
649 /* calculate rx fifo overflow */
650 priv->rx_value = 0;
651
652 /* enable PM */
653 iowrite32(priv->pm_config, port_base + PMConfig);
654
655 /* clear intr register */
656 ioread32(port_base + IntrStatus);
657}
658
659static void _sc92031_tx_tasklet(struct net_device *dev)
660{
661 struct sc92031_priv *priv = netdev_priv(dev);
662 void __iomem *port_base = priv->port_base;
663
664 unsigned old_tx_tail;
665 unsigned entry;
666 u32 tx_status;
667
668 old_tx_tail = priv->tx_tail;
669 while (priv->tx_head - priv->tx_tail > 0) {
670 entry = priv->tx_tail % NUM_TX_DESC;
671 tx_status = ioread32(port_base + TxStatus0 + entry * 4);
672
673 if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
674 break;
675
676 priv->tx_tail++;
677
678 if (tx_status & TxStatOK) {
679 priv->stats.tx_bytes += tx_status & 0x1fff;
680 priv->stats.tx_packets++;
681 /* Note: TxCarrierLost is always asserted at 100mbps. */
682 priv->stats.collisions += (tx_status >> 22) & 0xf;
683 }
684
685 if (tx_status & (TxOutOfWindow | TxAborted)) {
686 priv->stats.tx_errors++;
687
688 if (tx_status & TxAborted)
689 priv->stats.tx_aborted_errors++;
690
691 if (tx_status & TxCarrierLost)
692 priv->stats.tx_carrier_errors++;
693
694 if (tx_status & TxOutOfWindow)
695 priv->stats.tx_window_errors++;
696 }
697
698 if (tx_status & TxUnderrun)
699 priv->stats.tx_fifo_errors++;
700 }
701
702 if (priv->tx_tail != old_tx_tail)
703 if (netif_queue_stopped(dev))
704 netif_wake_queue(dev);
705}
706
707static void _sc92031_rx_tasklet_error(u32 rx_status,
708 struct sc92031_priv *priv, unsigned rx_size)
709{
710 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
711 priv->stats.rx_errors++;
712 priv->stats.rx_length_errors++;
713 }
714
715 if (!(rx_status & RxStatesOK)) {
716 priv->stats.rx_errors++;
717
718 if (rx_status & (RxHugeFrame | RxSmallFrame))
719 priv->stats.rx_length_errors++;
720
721 if (rx_status & RxBadAlign)
722 priv->stats.rx_frame_errors++;
723
724 if (!(rx_status & RxCRCOK))
725 priv->stats.rx_crc_errors++;
726 } else
727 priv->rx_loss++;
728}
729
730static void _sc92031_rx_tasklet(struct net_device *dev)
731{
732 struct sc92031_priv *priv = netdev_priv(dev);
733 void __iomem *port_base = priv->port_base;
734
735 dma_addr_t rx_ring_head;
736 unsigned rx_len;
737 unsigned rx_ring_offset;
738 void *rx_ring = priv->rx_ring;
739
740 rx_ring_head = ioread32(port_base + RxBufWPtr);
741 rmb();
742
743 /* rx_ring_head is only 17 bits in the RxBufWPtr register.
744 * we need to change it to 32 bits physical address
745 */
746 rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
747 rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
748 if (rx_ring_head < priv->rx_ring_dma_addr)
749 rx_ring_head += RX_BUF_LEN;
750
751 if (rx_ring_head >= priv->rx_ring_tail)
752 rx_len = rx_ring_head - priv->rx_ring_tail;
753 else
754 rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
755
756 if (!rx_len)
757 return;
758
759 if (unlikely(rx_len > RX_BUF_LEN)) {
760 if (printk_ratelimit())
761 printk(KERN_ERR "%s: rx packets length > rx buffer\n",
762 dev->name);
763 return;
764 }
765
766 rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
767
768 while (rx_len) {
769 u32 rx_status;
770 unsigned rx_size, rx_size_align, pkt_size;
771 struct sk_buff *skb;
772
773 rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
774 rmb();
775
776 rx_size = rx_status >> 20;
777 rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned
778 pkt_size = rx_size - 4; // Omit the four octet CRC from the length.
779
780 rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
781
782 if (unlikely(rx_status == 0
783 || rx_size > (MAX_ETH_FRAME_SIZE + 4)
784 || rx_size < 16
785 || !(rx_status & RxStatesOK))) {
786 _sc92031_rx_tasklet_error(rx_status, priv, rx_size);
787 break;
788 }
789
790 if (unlikely(rx_size_align + 4 > rx_len)) {
791 if (printk_ratelimit())
792 printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
793 break;
794 }
795
796 rx_len -= rx_size_align + 4;
797
798 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
799 if (unlikely(!skb)) {
800 if (printk_ratelimit())
801 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
802 dev->name, pkt_size);
803 goto next;
804 }
805
806 skb_reserve(skb, NET_IP_ALIGN);
807
808 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
809 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
810 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
811 memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
812 rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
813 } else {
814 memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
815 }
816
817 skb->dev = dev;
818 skb->protocol = eth_type_trans(skb, dev);
819 dev->last_rx = jiffies;
820 netif_rx(skb);
821
822 priv->stats.rx_bytes += pkt_size;
823 priv->stats.rx_packets++;
824
825 if (rx_status & Rx_Multicast)
826 priv->stats.multicast++;
827
828 next:
829 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
830 }
831 mb();
832
833 priv->rx_ring_tail = rx_ring_head;
834 iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
835}
836
837static void _sc92031_link_tasklet(struct net_device *dev)
838{
839 struct sc92031_priv *priv = netdev_priv(dev);
840
841 if (_sc92031_check_media(dev))
842 netif_wake_queue(dev);
843 else {
844 netif_stop_queue(dev);
845 priv->stats.tx_carrier_errors++;
846 }
847}
848
849static void sc92031_tasklet(unsigned long data)
850{
851 struct net_device *dev = (struct net_device *)data;
852 struct sc92031_priv *priv = netdev_priv(dev);
853 void __iomem *port_base = priv->port_base;
854 u32 intr_status, intr_mask;
855
856 intr_status = priv->intr_status;
857
858 spin_lock(&priv->lock);
859
860 if (unlikely(!netif_running(dev)))
861 goto out;
862
863 if (intr_status & TxOK)
864 _sc92031_tx_tasklet(dev);
865
866 if (intr_status & RxOK)
867 _sc92031_rx_tasklet(dev);
868
869 if (intr_status & RxOverflow)
870 priv->stats.rx_errors++;
871
872 if (intr_status & TimeOut) {
873 priv->stats.rx_errors++;
874 priv->stats.rx_length_errors++;
875 }
876
877 if (intr_status & (LinkFail | LinkOK))
878 _sc92031_link_tasklet(dev);
879
880out:
881 intr_mask = atomic_read(&priv->intr_mask);
882 rmb();
883
884 iowrite32(intr_mask, port_base + IntrMask);
885 mmiowb();
886
887 spin_unlock(&priv->lock);
888}
889
890static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
891{
892 struct net_device *dev = dev_id;
893 struct sc92031_priv *priv = netdev_priv(dev);
894 void __iomem *port_base = priv->port_base;
895 u32 intr_status, intr_mask;
896
897 /* mask interrupts before clearing IntrStatus */
898 iowrite32(0, port_base + IntrMask);
899 _sc92031_dummy_read(port_base);
900
901 intr_status = ioread32(port_base + IntrStatus);
902 if (unlikely(intr_status == 0xffffffff))
903 return IRQ_NONE; // hardware has gone missing
904
905 intr_status &= IntrBits;
906 if (!intr_status)
907 goto out_none;
908
909 priv->intr_status = intr_status;
910 tasklet_schedule(&priv->tasklet);
911
912 return IRQ_HANDLED;
913
914out_none:
915 intr_mask = atomic_read(&priv->intr_mask);
916 rmb();
917
918 iowrite32(intr_mask, port_base + IntrMask);
919 mmiowb();
920
921 return IRQ_NONE;
922}
923
924static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
925{
926 struct sc92031_priv *priv = netdev_priv(dev);
927 void __iomem *port_base = priv->port_base;
928
929 // FIXME I do not understand what is this trying to do.
930 if (netif_running(dev)) {
931 int temp;
932
933 spin_lock_bh(&priv->lock);
934
935 /* Update the error count. */
936 temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
937
938 if (temp == 0xffff) {
939 priv->rx_value += temp;
940 priv->stats.rx_fifo_errors = priv->rx_value;
941 } else {
942 priv->stats.rx_fifo_errors = temp + priv->rx_value;
943 }
944
945 spin_unlock_bh(&priv->lock);
946 }
947
948 return &priv->stats;
949}
950
951static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
952{
953 int err = 0;
954 struct sc92031_priv *priv = netdev_priv(dev);
955 void __iomem *port_base = priv->port_base;
956
957 unsigned len;
958 unsigned entry;
959 u32 tx_status;
960
961 if (unlikely(skb->len > TX_BUF_SIZE)) {
962 err = -EMSGSIZE;
963 priv->stats.tx_dropped++;
964 goto out;
965 }
966
967 spin_lock_bh(&priv->lock);
968
969 if (unlikely(!netif_carrier_ok(dev))) {
970 err = -ENOLINK;
971 priv->stats.tx_dropped++;
972 goto out_unlock;
973 }
974
975 BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
976
977 entry = priv->tx_head++ % NUM_TX_DESC;
978
979 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
980
981 len = skb->len;
982 if (unlikely(len < ETH_ZLEN)) {
983 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
984 0, ETH_ZLEN - len);
985 len = ETH_ZLEN;
986 }
987
988 wmb();
989
990 if (len < 100)
991 tx_status = len;
992 else if (len < 300)
993 tx_status = 0x30000 | len;
994 else
995 tx_status = 0x50000 | len;
996
997 iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
998 port_base + TxAddr0 + entry * 4);
999 iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
1000 mmiowb();
1001
1002 dev->trans_start = jiffies;
1003
1004 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
1005 netif_stop_queue(dev);
1006
1007out_unlock:
1008 spin_unlock_bh(&priv->lock);
1009
1010out:
1011 dev_kfree_skb(skb);
1012
1013 return err;
1014}
1015
1016static int sc92031_open(struct net_device *dev)
1017{
1018 int err;
1019 struct sc92031_priv *priv = netdev_priv(dev);
1020 struct pci_dev *pdev = priv->pdev;
1021
1022 priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
1023 &priv->rx_ring_dma_addr);
1024 if (unlikely(!priv->rx_ring)) {
1025 err = -ENOMEM;
1026 goto out_alloc_rx_ring;
1027 }
1028
1029 priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
1030 &priv->tx_bufs_dma_addr);
1031 if (unlikely(!priv->tx_bufs)) {
1032 err = -ENOMEM;
1033 goto out_alloc_tx_bufs;
1034 }
1035 priv->tx_head = priv->tx_tail = 0;
1036
1037 err = request_irq(pdev->irq, sc92031_interrupt,
1038 SA_SHIRQ, dev->name, dev);
1039 if (unlikely(err < 0))
1040 goto out_request_irq;
1041
1042 priv->pm_config = 0;
1043
1044 /* Interrupts already disabled by sc92031_stop or sc92031_probe */
1045 spin_lock(&priv->lock);
1046
1047 _sc92031_reset(dev);
1048 mmiowb();
1049
1050 spin_unlock(&priv->lock);
1051 sc92031_enable_interrupts(dev);
1052
1053 if (netif_carrier_ok(dev))
1054 netif_start_queue(dev);
1055 else
1056 netif_tx_disable(dev);
1057
1058 return 0;
1059
1060out_request_irq:
1061 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1062 priv->tx_bufs_dma_addr);
1063out_alloc_tx_bufs:
1064 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1065 priv->rx_ring_dma_addr);
1066out_alloc_rx_ring:
1067 return err;
1068}
1069
1070static int sc92031_stop(struct net_device *dev)
1071{
1072 struct sc92031_priv *priv = netdev_priv(dev);
1073 struct pci_dev *pdev = priv->pdev;
1074
1075 netif_tx_disable(dev);
1076
1077 /* Disable interrupts, stop Tx and Rx. */
1078 sc92031_disable_interrupts(dev);
1079
1080 spin_lock(&priv->lock);
1081
1082 _sc92031_disable_tx_rx(dev);
1083 _sc92031_tx_clear(dev);
1084 mmiowb();
1085
1086 spin_unlock(&priv->lock);
1087
1088 free_irq(pdev->irq, dev);
1089 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1090 priv->tx_bufs_dma_addr);
1091 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1092 priv->rx_ring_dma_addr);
1093
1094 return 0;
1095}
1096
1097static void sc92031_set_multicast_list(struct net_device *dev)
1098{
1099 struct sc92031_priv *priv = netdev_priv(dev);
1100
1101 spin_lock_bh(&priv->lock);
1102
1103 _sc92031_set_mar(dev);
1104 _sc92031_set_rx_config(dev);
1105 mmiowb();
1106
1107 spin_unlock_bh(&priv->lock);
1108}
1109
1110static void sc92031_tx_timeout(struct net_device *dev)
1111{
1112 struct sc92031_priv *priv = netdev_priv(dev);
1113
1114 /* Disable interrupts by clearing the interrupt mask.*/
1115 sc92031_disable_interrupts(dev);
1116
1117 spin_lock(&priv->lock);
1118
1119 priv->tx_timeouts++;
1120
1121 _sc92031_reset(dev);
1122 mmiowb();
1123
1124 spin_unlock(&priv->lock);
1125
1126 /* enable interrupts */
1127 sc92031_enable_interrupts(dev);
1128
1129 if (netif_carrier_ok(dev))
1130 netif_wake_queue(dev);
1131}
1132
1133#ifdef CONFIG_NET_POLL_CONTROLLER
1134static void sc92031_poll_controller(struct net_device *dev)
1135{
1136 disable_irq(dev->irq);
1137 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
1138 sc92031_tasklet((unsigned long)dev);
1139 enable_irq(dev->irq);
1140}
1141#endif
1142
1143static int sc92031_ethtool_get_settings(struct net_device *dev,
1144 struct ethtool_cmd *cmd)
1145{
1146 struct sc92031_priv *priv = netdev_priv(dev);
1147 void __iomem *port_base = priv->port_base;
1148 u8 phy_address;
1149 u32 phy_ctrl;
1150 u16 output_status;
1151
1152 spin_lock_bh(&priv->lock);
1153
1154 phy_address = ioread32(port_base + Miicmd1) >> 27;
1155 phy_ctrl = ioread32(port_base + PhyCtrl);
1156
1157 output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
1158 _sc92031_mii_scan(port_base);
1159 mmiowb();
1160
1161 spin_unlock_bh(&priv->lock);
1162
1163 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1164 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1165 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1166
1167 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
1168
1169 if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1170 == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1171 cmd->advertising |= ADVERTISED_Autoneg;
1172
1173 if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1174 cmd->advertising |= ADVERTISED_10baseT_Half;
1175
1176 if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1177 == (PhyCtrlSpd10 | PhyCtrlDux))
1178 cmd->advertising |= ADVERTISED_10baseT_Full;
1179
1180 if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1181 cmd->advertising |= ADVERTISED_100baseT_Half;
1182
1183 if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1184 == (PhyCtrlSpd100 | PhyCtrlDux))
1185 cmd->advertising |= ADVERTISED_100baseT_Full;
1186
1187 if (phy_ctrl & PhyCtrlAne)
1188 cmd->advertising |= ADVERTISED_Autoneg;
1189
1190 cmd->speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
1191 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1192 cmd->port = PORT_MII;
1193 cmd->phy_address = phy_address;
1194 cmd->transceiver = XCVR_INTERNAL;
1195 cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1196
1197 return 0;
1198}
1199
1200static int sc92031_ethtool_set_settings(struct net_device *dev,
1201 struct ethtool_cmd *cmd)
1202{
1203 struct sc92031_priv *priv = netdev_priv(dev);
1204 void __iomem *port_base = priv->port_base;
1205 u32 phy_ctrl;
1206 u32 old_phy_ctrl;
1207
1208 if (!(cmd->speed == SPEED_10 || cmd->speed == SPEED_100))
1209 return -EINVAL;
1210 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1211 return -EINVAL;
1212 if (!(cmd->port == PORT_MII))
1213 return -EINVAL;
1214 if (!(cmd->phy_address == 0x1f))
1215 return -EINVAL;
1216 if (!(cmd->transceiver == XCVR_INTERNAL))
1217 return -EINVAL;
1218 if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
1219 return -EINVAL;
1220
1221 if (cmd->autoneg == AUTONEG_ENABLE) {
1222 if (!(cmd->advertising & (ADVERTISED_Autoneg
1223 | ADVERTISED_100baseT_Full
1224 | ADVERTISED_100baseT_Half
1225 | ADVERTISED_10baseT_Full
1226 | ADVERTISED_10baseT_Half)))
1227 return -EINVAL;
1228
1229 phy_ctrl = PhyCtrlAne;
1230
1231 // FIXME: I'm not sure what the original code was trying to do
1232 if (cmd->advertising & ADVERTISED_Autoneg)
1233 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1234 if (cmd->advertising & ADVERTISED_100baseT_Full)
1235 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1236 if (cmd->advertising & ADVERTISED_100baseT_Half)
1237 phy_ctrl |= PhyCtrlSpd100;
1238 if (cmd->advertising & ADVERTISED_10baseT_Full)
1239 phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1240 if (cmd->advertising & ADVERTISED_10baseT_Half)
1241 phy_ctrl |= PhyCtrlSpd10;
1242 } else {
1243 // FIXME: Whole branch guessed
1244 phy_ctrl = 0;
1245
1246 if (cmd->speed == SPEED_10)
1247 phy_ctrl |= PhyCtrlSpd10;
1248 else /* cmd->speed == SPEED_100 */
1249 phy_ctrl |= PhyCtrlSpd100;
1250
1251 if (cmd->duplex == DUPLEX_FULL)
1252 phy_ctrl |= PhyCtrlDux;
1253 }
1254
1255 spin_lock_bh(&priv->lock);
1256
1257 old_phy_ctrl = ioread32(port_base + PhyCtrl);
1258 phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
1259 | PhyCtrlSpd100 | PhyCtrlSpd10);
1260 if (phy_ctrl != old_phy_ctrl)
1261 iowrite32(phy_ctrl, port_base + PhyCtrl);
1262
1263 spin_unlock_bh(&priv->lock);
1264
1265 return 0;
1266}
1267
1268static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
1269 struct ethtool_drvinfo *drvinfo)
1270{
1271 struct sc92031_priv *priv = netdev_priv(dev);
1272 struct pci_dev *pdev = priv->pdev;
1273
1274 strcpy(drvinfo->driver, SC92031_NAME);
1275 strcpy(drvinfo->version, SC92031_VERSION);
1276 strcpy(drvinfo->bus_info, pci_name(pdev));
1277}
1278
1279static void sc92031_ethtool_get_wol(struct net_device *dev,
1280 struct ethtool_wolinfo *wolinfo)
1281{
1282 struct sc92031_priv *priv = netdev_priv(dev);
1283 void __iomem *port_base = priv->port_base;
1284 u32 pm_config;
1285
1286 spin_lock_bh(&priv->lock);
1287 pm_config = ioread32(port_base + PMConfig);
1288 spin_unlock_bh(&priv->lock);
1289
1290 // FIXME: Guessed
1291 wolinfo->supported = WAKE_PHY | WAKE_MAGIC
1292 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1293 wolinfo->wolopts = 0;
1294
1295 if (pm_config & PM_LinkUp)
1296 wolinfo->wolopts |= WAKE_PHY;
1297
1298 if (pm_config & PM_Magic)
1299 wolinfo->wolopts |= WAKE_MAGIC;
1300
1301 if (pm_config & PM_WakeUp)
1302 // FIXME: Guessed
1303 wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1304}
1305
1306static int sc92031_ethtool_set_wol(struct net_device *dev,
1307 struct ethtool_wolinfo *wolinfo)
1308{
1309 struct sc92031_priv *priv = netdev_priv(dev);
1310 void __iomem *port_base = priv->port_base;
1311 u32 pm_config;
1312
1313 spin_lock_bh(&priv->lock);
1314
1315 pm_config = ioread32(port_base + PMConfig)
1316 & ~(PM_LinkUp | PM_Magic | PM_WakeUp);
1317
1318 if (wolinfo->wolopts & WAKE_PHY)
1319 pm_config |= PM_LinkUp;
1320
1321 if (wolinfo->wolopts & WAKE_MAGIC)
1322 pm_config |= PM_Magic;
1323
1324 // FIXME: Guessed
1325 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
1326 pm_config |= PM_WakeUp;
1327
1328 priv->pm_config = pm_config;
1329 iowrite32(pm_config, port_base + PMConfig);
1330 mmiowb();
1331
1332 spin_unlock_bh(&priv->lock);
1333
1334 return 0;
1335}
1336
1337static int sc92031_ethtool_nway_reset(struct net_device *dev)
1338{
1339 int err = 0;
1340 struct sc92031_priv *priv = netdev_priv(dev);
1341 void __iomem *port_base = priv->port_base;
1342 u16 bmcr;
1343
1344 spin_lock_bh(&priv->lock);
1345
1346 bmcr = _sc92031_mii_read(port_base, MII_BMCR);
1347 if (!(bmcr & BMCR_ANENABLE)) {
1348 err = -EINVAL;
1349 goto out;
1350 }
1351
1352 _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
1353
1354out:
1355 _sc92031_mii_scan(port_base);
1356 mmiowb();
1357
1358 spin_unlock_bh(&priv->lock);
1359
1360 return err;
1361}
1362
1363static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
1364 "tx_timeout",
1365 "rx_loss",
1366};
1367
1368static void sc92031_ethtool_get_strings(struct net_device *dev,
1369 u32 stringset, u8 *data)
1370{
1371 if (stringset == ETH_SS_STATS)
1372 memcpy(data, sc92031_ethtool_stats_strings,
1373 SILAN_STATS_NUM * ETH_GSTRING_LEN);
1374}
1375
1376static int sc92031_ethtool_get_stats_count(struct net_device *dev)
1377{
1378 return SILAN_STATS_NUM;
1379}
1380
1381static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1382 struct ethtool_stats *stats, u64 *data)
1383{
1384 struct sc92031_priv *priv = netdev_priv(dev);
1385
1386 spin_lock_bh(&priv->lock);
1387 data[0] = priv->tx_timeouts;
1388 data[1] = priv->rx_loss;
1389 spin_unlock_bh(&priv->lock);
1390}
1391
1392static struct ethtool_ops sc92031_ethtool_ops = {
1393 .get_settings = sc92031_ethtool_get_settings,
1394 .set_settings = sc92031_ethtool_set_settings,
1395 .get_drvinfo = sc92031_ethtool_get_drvinfo,
1396 .get_wol = sc92031_ethtool_get_wol,
1397 .set_wol = sc92031_ethtool_set_wol,
1398 .nway_reset = sc92031_ethtool_nway_reset,
1399 .get_link = ethtool_op_get_link,
1400 .get_tx_csum = ethtool_op_get_tx_csum,
1401 .get_sg = ethtool_op_get_sg,
1402 .get_tso = ethtool_op_get_tso,
1403 .get_strings = sc92031_ethtool_get_strings,
1404 .get_stats_count = sc92031_ethtool_get_stats_count,
1405 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
1406 .get_perm_addr = ethtool_op_get_perm_addr,
1407 .get_ufo = ethtool_op_get_ufo,
1408};
1409
1410static int __devinit sc92031_probe(struct pci_dev *pdev,
1411 const struct pci_device_id *id)
1412{
1413 int err;
1414 void __iomem* port_base;
1415 struct net_device *dev;
1416 struct sc92031_priv *priv;
1417 u32 mac0, mac1;
1418
1419 err = pci_enable_device(pdev);
1420 if (unlikely(err < 0))
1421 goto out_enable_device;
1422
1423 pci_set_master(pdev);
1424
1425 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1426 if (unlikely(err < 0))
1427 goto out_set_dma_mask;
1428
1429 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1430 if (unlikely(err < 0))
1431 goto out_set_dma_mask;
1432
1433 err = pci_request_regions(pdev, SC92031_NAME);
1434 if (unlikely(err < 0))
1435 goto out_request_regions;
1436
1437 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
1438 if (unlikely(!port_base)) {
1439 err = -EIO;
1440 goto out_iomap;
1441 }
1442
1443 dev = alloc_etherdev(sizeof(struct sc92031_priv));
1444 if (unlikely(!dev)) {
1445 err = -ENOMEM;
1446 goto out_alloc_etherdev;
1447 }
1448
1449 pci_set_drvdata(pdev, dev);
1450
1451#if SC92031_USE_BAR == 0
1452 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1453 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1454#elif SC92031_USE_BAR == 1
1455 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1456#endif
1457 dev->irq = pdev->irq;
1458
1459 /* faked with skb_copy_and_csum_dev */
1460 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
1461
1462 dev->get_stats = sc92031_get_stats;
1463 dev->ethtool_ops = &sc92031_ethtool_ops;
1464 dev->hard_start_xmit = sc92031_start_xmit;
1465 dev->watchdog_timeo = TX_TIMEOUT;
1466 dev->open = sc92031_open;
1467 dev->stop = sc92031_stop;
1468 dev->set_multicast_list = sc92031_set_multicast_list;
1469 dev->tx_timeout = sc92031_tx_timeout;
1470#ifdef CONFIG_NET_POLL_CONTROLLER
1471 dev->poll_controller = sc92031_poll_controller;
1472#endif
1473
1474 priv = netdev_priv(dev);
1475 spin_lock_init(&priv->lock);
1476 priv->port_base = port_base;
1477 priv->pdev = pdev;
1478 tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
1479 /* Fudge tasklet count so the call to sc92031_enable_interrupts at
1480 * sc92031_open will work correctly */
1481 tasklet_disable_nosync(&priv->tasklet);
1482
1483 /* PCI PM Wakeup */
1484 iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
1485
1486 mac0 = ioread32(port_base + MAC0);
1487 mac1 = ioread32(port_base + MAC0 + 4);
1488 dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
1489 dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
1490 dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
1491 dev->dev_addr[3] = dev->perm_addr[3] = mac0;
1492 dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
1493 dev->dev_addr[5] = dev->perm_addr[5] = mac1;
1494
1495 err = register_netdev(dev);
1496 if (err < 0)
1497 goto out_register_netdev;
1498
1499 return 0;
1500
1501out_register_netdev:
1502 free_netdev(dev);
1503out_alloc_etherdev:
1504 pci_iounmap(pdev, port_base);
1505out_iomap:
1506 pci_release_regions(pdev);
1507out_request_regions:
1508out_set_dma_mask:
1509 pci_disable_device(pdev);
1510out_enable_device:
1511 return err;
1512}
1513
1514static void __devexit sc92031_remove(struct pci_dev *pdev)
1515{
1516 struct net_device *dev = pci_get_drvdata(pdev);
1517 struct sc92031_priv *priv = netdev_priv(dev);
1518 void __iomem* port_base = priv->port_base;
1519
1520 unregister_netdev(dev);
1521 free_netdev(dev);
1522 pci_iounmap(pdev, port_base);
1523 pci_release_regions(pdev);
1524 pci_disable_device(pdev);
1525}
1526
1527static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
1528{
1529 struct net_device *dev = pci_get_drvdata(pdev);
1530 struct sc92031_priv *priv = netdev_priv(dev);
1531
1532 pci_save_state(pdev);
1533
1534 if (!netif_running(dev))
1535 goto out;
1536
1537 netif_device_detach(dev);
1538
1539 /* Disable interrupts, stop Tx and Rx. */
1540 sc92031_disable_interrupts(dev);
1541
1542 spin_lock(&priv->lock);
1543
1544 _sc92031_disable_tx_rx(dev);
1545 _sc92031_tx_clear(dev);
1546 mmiowb();
1547
1548 spin_unlock(&priv->lock);
1549
1550out:
1551 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1552
1553 return 0;
1554}
1555
1556static int sc92031_resume(struct pci_dev *pdev)
1557{
1558 struct net_device *dev = pci_get_drvdata(pdev);
1559 struct sc92031_priv *priv = netdev_priv(dev);
1560
1561 pci_restore_state(pdev);
1562 pci_set_power_state(pdev, PCI_D0);
1563
1564 if (!netif_running(dev))
1565 goto out;
1566
1567 /* Interrupts already disabled by sc92031_suspend */
1568 spin_lock(&priv->lock);
1569
1570 _sc92031_reset(dev);
1571 mmiowb();
1572
1573 spin_unlock(&priv->lock);
1574 sc92031_enable_interrupts(dev);
1575
1576 netif_device_attach(dev);
1577
1578 if (netif_carrier_ok(dev))
1579 netif_wake_queue(dev);
1580 else
1581 netif_tx_disable(dev);
1582
1583out:
1584 return 0;
1585}
1586
1587static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
1588 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) },
1589 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) },
1590 { 0, }
1591};
1592MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
1593
1594static struct pci_driver sc92031_pci_driver = {
1595 .name = SC92031_NAME,
1596 .id_table = sc92031_pci_device_id_table,
1597 .probe = sc92031_probe,
1598 .remove = __devexit_p(sc92031_remove),
1599 .suspend = sc92031_suspend,
1600 .resume = sc92031_resume,
1601};
1602
1603static int __init sc92031_init(void)
1604{
1605 printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n");
1606 return pci_register_driver(&sc92031_pci_driver);
1607}
1608
1609static void __exit sc92031_exit(void)
1610{
1611 pci_unregister_driver(&sc92031_pci_driver);
1612}
1613
1614module_init(sc92031_init);
1615module_exit(sc92031_exit);
1616
1617MODULE_LICENSE("GPL");
1618MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1619MODULE_DESCRIPTION(SC92031_DESCRIPTION);
1620MODULE_VERSION(SC92031_VERSION);
diff --git a/drivers/net/sk_mca.c b/drivers/net/sk_mca.c
deleted file mode 100644
index 96e06c51b75d..000000000000
--- a/drivers/net/sk_mca.c
+++ /dev/null
@@ -1,1216 +0,0 @@
1/*
2net-3-driver for the SKNET MCA-based cards
3
4This is an extension to the Linux operating system, and is covered by the
5same GNU General Public License that covers that work.
6
7Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
8 alfred.arnold@lancom.de)
9
10This driver is based both on the 3C523 driver and the SK_G16 driver.
11
12paper sources:
13 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
14 Hans-Peter Messmer for the basic Microchannel stuff
15
16 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
17 for help on Ethernet driver programming
18
19 'Ethernet/IEEE 802.3 Family 1992 World Network Data Book/Handbook' by AMD
20 for documentation on the AM7990 LANCE
21
22 'SKNET Personal Technisches Manual', Version 1.2 by Schneider&Koch
23 for documentation on the Junior board
24
25 'SK-NET MC2+ Technical Manual", Version 1.1 by Schneider&Koch for
26 documentation on the MC2 bord
27
28 A big thank you to the S&K support for providing me so quickly with
29 documentation!
30
31 Also see http://www.syskonnect.com/
32
33 Missing things:
34
35 -> set debug level via ioctl instead of compile-time switches
36 -> I didn't follow the development of the 2.1.x kernels, so my
37 assumptions about which things changed with which kernel version
38 are probably nonsense
39
40History:
41 May 16th, 1999
42 startup
43 May 22st, 1999
44 added private structure, methods
45 begun building data structures in RAM
46 May 23nd, 1999
47 can receive frames, send frames
48 May 24th, 1999
49 modularized initialization of LANCE
50 loadable as module
51 still Tx problem :-(
52 May 26th, 1999
53 MC2 works
54 support for multiple devices
55 display media type for MC2+
56 May 28th, 1999
57 fixed problem in GetLANCE leaving interrupts turned off
58 increase TX queue to 4 packets to improve send performance
59 May 29th, 1999
60 a few corrections in statistics, caught rcvr overruns
61 reinitialization of LANCE/board in critical situations
62 MCA info implemented
63 implemented LANCE multicast filter
64 Jun 6th, 1999
65 additions for Linux 2.2
66 Dec 25th, 1999
67 unfortunately there seem to be newer MC2+ boards that react
68 on IRQ 3/5/9/10 instead of 3/5/10/11, so we have to autoprobe
69 in questionable cases...
70 Dec 28th, 1999
71 integrated patches from David Weinehall & Bill Wendling for 2.3
72 kernels (isa_...functions). Things are defined in a way that
73 it still works with 2.0.x 8-)
74 Dec 30th, 1999
75 added handling of the remaining interrupt conditions. That
76 should cure the spurious hangs.
77 Jan 30th, 2000
78 newer kernels automatically probe more than one board, so the
79 'startslot' as a variable is also needed here
80 June 1st, 2000
81 added changes for recent 2.3 kernels
82
83 *************************************************************************/
84
85#include <linux/kernel.h>
86#include <linux/string.h>
87#include <linux/errno.h>
88#include <linux/ioport.h>
89#include <linux/slab.h>
90#include <linux/interrupt.h>
91#include <linux/delay.h>
92#include <linux/time.h>
93#include <linux/mca-legacy.h>
94#include <linux/init.h>
95#include <linux/module.h>
96#include <linux/netdevice.h>
97#include <linux/etherdevice.h>
98#include <linux/skbuff.h>
99#include <linux/bitops.h>
100
101#include <asm/processor.h>
102#include <asm/io.h>
103
104#define _SK_MCA_DRIVER_
105#include "sk_mca.h"
106
107/* ------------------------------------------------------------------------
108 * global static data - not more since we can handle multiple boards and
109 * have to pack all state info into the device struct!
110 * ------------------------------------------------------------------------ */
111
112static char *MediaNames[Media_Count] =
113 { "10Base2", "10BaseT", "10Base5", "Unknown" };
114
115static unsigned char poly[] =
116 { 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
117 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0
118};
119
120/* ------------------------------------------------------------------------
121 * private subfunctions
122 * ------------------------------------------------------------------------ */
123
124/* dump parts of shared memory - only needed during debugging */
125
126#ifdef DEBUG
127static void dumpmem(struct net_device *dev, u32 start, u32 len)
128{
129 skmca_priv *priv = netdev_priv(dev);
130 int z;
131
132 for (z = 0; z < len; z++) {
133 if ((z & 15) == 0)
134 printk("%04x:", z);
135 printk(" %02x", readb(priv->base + start + z));
136 if ((z & 15) == 15)
137 printk("\n");
138 }
139}
140
141/* print exact time - ditto */
142
143static void PrTime(void)
144{
145 struct timeval tv;
146
147 do_gettimeofday(&tv);
148 printk("%9d:%06d: ", tv.tv_sec, tv.tv_usec);
149}
150#endif
151
152/* deduce resources out of POS registers */
153
154static void __init getaddrs(int slot, int junior, int *base, int *irq,
155 skmca_medium * medium)
156{
157 u_char pos0, pos1, pos2;
158
159 if (junior) {
160 pos0 = mca_read_stored_pos(slot, 2);
161 *base = ((pos0 & 0x0e) << 13) + 0xc0000;
162 *irq = ((pos0 & 0x10) >> 4) + 10;
163 *medium = Media_Unknown;
164 } else {
165 /* reset POS 104 Bits 0+1 so the shared memory region goes to the
166 configured area between 640K and 1M. Afterwards, enable the MC2.
167 I really don't know what rode SK to do this... */
168
169 mca_write_pos(slot, 4,
170 mca_read_stored_pos(slot, 4) & 0xfc);
171 mca_write_pos(slot, 2,
172 mca_read_stored_pos(slot, 2) | 0x01);
173
174 pos1 = mca_read_stored_pos(slot, 3);
175 pos2 = mca_read_stored_pos(slot, 4);
176 *base = ((pos1 & 0x07) << 14) + 0xc0000;
177 switch (pos2 & 0x0c) {
178 case 0:
179 *irq = 3;
180 break;
181 case 4:
182 *irq = 5;
183 break;
184 case 8:
185 *irq = -10;
186 break;
187 case 12:
188 *irq = -11;
189 break;
190 }
191 *medium = (pos2 >> 6) & 3;
192 }
193}
194
195/* check for both cards:
196 When the MC2 is turned off, it was configured for more than 15MB RAM,
197 is disabled and won't get detected using the standard probe. We
198 therefore have to scan the slots manually :-( */
199
200static int __init dofind(int *junior, int firstslot)
201{
202 int slot;
203 unsigned int id;
204
205 for (slot = firstslot; slot < MCA_MAX_SLOT_NR; slot++) {
206 id = mca_read_stored_pos(slot, 0)
207 + (((unsigned int) mca_read_stored_pos(slot, 1)) << 8);
208
209 *junior = 0;
210 if (id == SKNET_MCA_ID)
211 return slot;
212 *junior = 1;
213 if (id == SKNET_JUNIOR_MCA_ID)
214 return slot;
215 }
216 return MCA_NOTFOUND;
217}
218
219/* reset the whole board */
220
221static void ResetBoard(struct net_device *dev)
222{
223 skmca_priv *priv = netdev_priv(dev);
224
225 writeb(CTRL_RESET_ON, priv->ctrladdr);
226 udelay(10);
227 writeb(CTRL_RESET_OFF, priv->ctrladdr);
228}
229
230/* wait for LANCE interface to become not busy */
231
232static int WaitLANCE(struct net_device *dev)
233{
234 skmca_priv *priv = netdev_priv(dev);
235 int t = 0;
236
237 while ((readb(priv->ctrladdr) & STAT_IO_BUSY) ==
238 STAT_IO_BUSY) {
239 udelay(1);
240 if (++t > 1000) {
241 printk("%s: LANCE access timeout", dev->name);
242 return 0;
243 }
244 }
245
246 return 1;
247}
248
249/* set LANCE register - must be atomic */
250
251static void SetLANCE(struct net_device *dev, u16 addr, u16 value)
252{
253 skmca_priv *priv = netdev_priv(dev);
254 unsigned long flags;
255
256 /* disable interrupts */
257
258 spin_lock_irqsave(&priv->lock, flags);
259
260 /* wait until no transfer is pending */
261
262 WaitLANCE(dev);
263
264 /* transfer register address to RAP */
265
266 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
267 writew(addr, priv->ioregaddr);
268 writeb(IOCMD_GO, priv->cmdaddr);
269 udelay(1);
270 WaitLANCE(dev);
271
272 /* transfer data to register */
273
274 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_DATA, priv->ctrladdr);
275 writew(value, priv->ioregaddr);
276 writeb(IOCMD_GO, priv->cmdaddr);
277 udelay(1);
278 WaitLANCE(dev);
279
280 /* reenable interrupts */
281
282 spin_unlock_irqrestore(&priv->lock, flags);
283}
284
285/* get LANCE register */
286
287static u16 GetLANCE(struct net_device *dev, u16 addr)
288{
289 skmca_priv *priv = netdev_priv(dev);
290 unsigned long flags;
291 unsigned int res;
292
293 /* disable interrupts */
294
295 spin_lock_irqsave(&priv->lock, flags);
296
297 /* wait until no transfer is pending */
298
299 WaitLANCE(dev);
300
301 /* transfer register address to RAP */
302
303 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
304 writew(addr, priv->ioregaddr);
305 writeb(IOCMD_GO, priv->cmdaddr);
306 udelay(1);
307 WaitLANCE(dev);
308
309 /* transfer data from register */
310
311 writeb(CTRL_RESET_OFF | CTRL_RW_READ | CTRL_ADR_DATA, priv->ctrladdr);
312 writeb(IOCMD_GO, priv->cmdaddr);
313 udelay(1);
314 WaitLANCE(dev);
315 res = readw(priv->ioregaddr);
316
317 /* reenable interrupts */
318
319 spin_unlock_irqrestore(&priv->lock, flags);
320
321 return res;
322}
323
324/* build up descriptors in shared RAM */
325
326static void InitDscrs(struct net_device *dev)
327{
328 skmca_priv *priv = netdev_priv(dev);
329 u32 bufaddr;
330
331 /* Set up Tx descriptors. The board has only 16K RAM so bits 16..23
332 are always 0. */
333
334 bufaddr = RAM_DATABASE;
335 {
336 LANCE_TxDescr descr;
337 int z;
338
339 for (z = 0; z < TXCOUNT; z++) {
340 descr.LowAddr = bufaddr;
341 descr.Flags = 0;
342 descr.Len = 0xf000;
343 descr.Status = 0;
344 memcpy_toio(priv->base + RAM_TXBASE +
345 (z * sizeof(LANCE_TxDescr)), &descr,
346 sizeof(LANCE_TxDescr));
347 memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
348 bufaddr += RAM_BUFSIZE;
349 }
350 }
351
352 /* do the same for the Rx descriptors */
353
354 {
355 LANCE_RxDescr descr;
356 int z;
357
358 for (z = 0; z < RXCOUNT; z++) {
359 descr.LowAddr = bufaddr;
360 descr.Flags = RXDSCR_FLAGS_OWN;
361 descr.MaxLen = -RAM_BUFSIZE;
362 descr.Len = 0;
363 memcpy_toio(priv->base + RAM_RXBASE +
364 (z * sizeof(LANCE_RxDescr)), &descr,
365 sizeof(LANCE_RxDescr));
366 memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
367 bufaddr += RAM_BUFSIZE;
368 }
369 }
370}
371
372/* calculate the hash bit position for a given multicast address
373 taken more or less directly from the AMD datasheet... */
374
375static void UpdateCRC(unsigned char *CRC, int bit)
376{
377 int j;
378
379 /* shift CRC one bit */
380
381 memmove(CRC + 1, CRC, 32 * sizeof(unsigned char));
382 CRC[0] = 0;
383
384 /* if bit XOR controlbit = 1, set CRC = CRC XOR polynomial */
385
386 if (bit ^ CRC[32])
387 for (j = 0; j < 32; j++)
388 CRC[j] ^= poly[j];
389}
390
391static unsigned int GetHash(char *address)
392{
393 unsigned char CRC[33];
394 int i, byte, hashcode;
395
396 /* a multicast address has bit 0 in the first byte set */
397
398 if ((address[0] & 1) == 0)
399 return -1;
400
401 /* initialize CRC */
402
403 memset(CRC, 1, sizeof(CRC));
404
405 /* loop through address bits */
406
407 for (byte = 0; byte < 6; byte++)
408 for (i = 0; i < 8; i++)
409 UpdateCRC(CRC, (address[byte] >> i) & 1);
410
411 /* hashcode is the 6 least significant bits of the CRC */
412
413 hashcode = 0;
414 for (i = 0; i < 6; i++)
415 hashcode = (hashcode << 1) + CRC[i];
416 return hashcode;
417}
418
419/* feed ready-built initialization block into LANCE */
420
421static void InitLANCE(struct net_device *dev)
422{
423 skmca_priv *priv = netdev_priv(dev);
424
425 /* build up descriptors. */
426
427 InitDscrs(dev);
428
429 /* next RX descriptor to be read is the first one. Since the LANCE
430 will start from the beginning after initialization, we have to
431 reset out pointers too. */
432
433 priv->nextrx = 0;
434
435 /* no TX descriptors active */
436
437 priv->nexttxput = priv->nexttxdone = priv->txbusy = 0;
438
439 /* set up the LANCE bus control register - constant for SKnet boards */
440
441 SetLANCE(dev, LANCE_CSR3,
442 CSR3_BSWAP_OFF | CSR3_ALE_LOW | CSR3_BCON_HOLD);
443
444 /* write address of initialization block into LANCE */
445
446 SetLANCE(dev, LANCE_CSR1, RAM_INITBASE & 0xffff);
447 SetLANCE(dev, LANCE_CSR2, (RAM_INITBASE >> 16) & 0xff);
448
449 /* we don't get ready until the LANCE has read the init block */
450
451 netif_stop_queue(dev);
452
453 /* let LANCE read the initialization block. LANCE is ready
454 when we receive the corresponding interrupt. */
455
456 SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_INIT);
457}
458
459/* stop the LANCE so we can reinitialize it */
460
461static void StopLANCE(struct net_device *dev)
462{
463 /* can't take frames any more */
464
465 netif_stop_queue(dev);
466
467 /* disable interrupts, stop it */
468
469 SetLANCE(dev, LANCE_CSR0, CSR0_STOP);
470}
471
472/* initialize card and LANCE for proper operation */
473
474static void InitBoard(struct net_device *dev)
475{
476 skmca_priv *priv = netdev_priv(dev);
477 LANCE_InitBlock block;
478
479 /* Lay out the shared RAM - first we create the init block for the LANCE.
480 We do not overwrite it later because we need it again when we switch
481 promiscous mode on/off. */
482
483 block.Mode = 0;
484 if (dev->flags & IFF_PROMISC)
485 block.Mode |= LANCE_INIT_PROM;
486 memcpy(block.PAdr, dev->dev_addr, 6);
487 memset(block.LAdrF, 0, sizeof(block.LAdrF));
488 block.RdrP = (RAM_RXBASE & 0xffffff) | (LRXCOUNT << 29);
489 block.TdrP = (RAM_TXBASE & 0xffffff) | (LTXCOUNT << 29);
490
491 memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
492
493 /* initialize LANCE. Implicitly sets up other structures in RAM. */
494
495 InitLANCE(dev);
496}
497
498/* deinitialize card and LANCE */
499
500static void DeinitBoard(struct net_device *dev)
501{
502 /* stop LANCE */
503
504 StopLANCE(dev);
505
506 /* reset board */
507
508 ResetBoard(dev);
509}
510
511/* probe for device's irq */
512
513static int __init ProbeIRQ(struct net_device *dev)
514{
515 unsigned long imaskval, njiffies, irq;
516 u16 csr0val;
517
518 /* enable all interrupts */
519
520 imaskval = probe_irq_on();
521
522 /* initialize the board. Wait for interrupt 'Initialization done'. */
523
524 ResetBoard(dev);
525 InitBoard(dev);
526
527 njiffies = jiffies + HZ;
528 do {
529 csr0val = GetLANCE(dev, LANCE_CSR0);
530 }
531 while (((csr0val & CSR0_IDON) == 0) && (jiffies != njiffies));
532
533 /* turn of interrupts again */
534
535 irq = probe_irq_off(imaskval);
536
537 /* if we found something, ack the interrupt */
538
539 if (irq)
540 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_IDON);
541
542 /* back to idle state */
543
544 DeinitBoard(dev);
545
546 return irq;
547}
548
549/* ------------------------------------------------------------------------
550 * interrupt handler(s)
551 * ------------------------------------------------------------------------ */
552
553/* LANCE has read initialization block -> start it */
554
555static u16 irqstart_handler(struct net_device *dev, u16 oldcsr0)
556{
557 /* now we're ready to transmit */
558
559 netif_wake_queue(dev);
560
561 /* reset IDON bit, start LANCE */
562
563 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_IDON | CSR0_STRT);
564 return GetLANCE(dev, LANCE_CSR0);
565}
566
567/* did we lose blocks due to a FIFO overrun ? */
568
569static u16 irqmiss_handler(struct net_device *dev, u16 oldcsr0)
570{
571 skmca_priv *priv = netdev_priv(dev);
572
573 /* update statistics */
574
575 priv->stat.rx_fifo_errors++;
576
577 /* reset MISS bit */
578
579 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_MISS);
580 return GetLANCE(dev, LANCE_CSR0);
581}
582
583/* receive interrupt */
584
585static u16 irqrx_handler(struct net_device *dev, u16 oldcsr0)
586{
587 skmca_priv *priv = netdev_priv(dev);
588 LANCE_RxDescr descr;
589 unsigned int descraddr;
590
591 /* run through queue until we reach a descriptor we do not own */
592
593 descraddr = RAM_RXBASE + (priv->nextrx * sizeof(LANCE_RxDescr));
594 while (1) {
595 /* read descriptor */
596 memcpy_fromio(&descr, priv->base + descraddr,
597 sizeof(LANCE_RxDescr));
598
599 /* if we reach a descriptor we do not own, we're done */
600 if ((descr.Flags & RXDSCR_FLAGS_OWN) != 0)
601 break;
602
603#ifdef DEBUG
604 PrTime();
605 printk("Receive packet on descr %d len %d\n", priv->nextrx,
606 descr.Len);
607#endif
608
609 /* erroneous packet ? */
610 if ((descr.Flags & RXDSCR_FLAGS_ERR) != 0) {
611 priv->stat.rx_errors++;
612 if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
613 priv->stat.rx_crc_errors++;
614 else if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
615 priv->stat.rx_frame_errors++;
616 else if ((descr.Flags & RXDSCR_FLAGS_OFLO) != 0)
617 priv->stat.rx_fifo_errors++;
618 }
619
620 /* good packet ? */
621 else {
622 struct sk_buff *skb;
623
624 skb = dev_alloc_skb(descr.Len + 2);
625 if (skb == NULL)
626 priv->stat.rx_dropped++;
627 else {
628 memcpy_fromio(skb_put(skb, descr.Len),
629 priv->base +
630 descr.LowAddr, descr.Len);
631 skb->dev = dev;
632 skb->protocol = eth_type_trans(skb, dev);
633 skb->ip_summed = CHECKSUM_NONE;
634 priv->stat.rx_packets++;
635 priv->stat.rx_bytes += descr.Len;
636 netif_rx(skb);
637 dev->last_rx = jiffies;
638 }
639 }
640
641 /* give descriptor back to LANCE */
642 descr.Len = 0;
643 descr.Flags |= RXDSCR_FLAGS_OWN;
644
645 /* update descriptor in shared RAM */
646 memcpy_toio(priv->base + descraddr, &descr,
647 sizeof(LANCE_RxDescr));
648
649 /* go to next descriptor */
650 priv->nextrx++;
651 descraddr += sizeof(LANCE_RxDescr);
652 if (priv->nextrx >= RXCOUNT) {
653 priv->nextrx = 0;
654 descraddr = RAM_RXBASE;
655 }
656 }
657
658 /* reset RINT bit */
659
660 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_RINT);
661 return GetLANCE(dev, LANCE_CSR0);
662}
663
664/* transmit interrupt */
665
666static u16 irqtx_handler(struct net_device *dev, u16 oldcsr0)
667{
668 skmca_priv *priv = netdev_priv(dev);
669 LANCE_TxDescr descr;
670 unsigned int descraddr;
671
672 /* check descriptors at most until no busy one is left */
673
674 descraddr =
675 RAM_TXBASE + (priv->nexttxdone * sizeof(LANCE_TxDescr));
676 while (priv->txbusy > 0) {
677 /* read descriptor */
678 memcpy_fromio(&descr, priv->base + descraddr,
679 sizeof(LANCE_TxDescr));
680
681 /* if the LANCE still owns this one, we've worked out all sent packets */
682 if ((descr.Flags & TXDSCR_FLAGS_OWN) != 0)
683 break;
684
685#ifdef DEBUG
686 PrTime();
687 printk("Send packet done on descr %d\n", priv->nexttxdone);
688#endif
689
690 /* update statistics */
691 if ((descr.Flags & TXDSCR_FLAGS_ERR) == 0) {
692 priv->stat.tx_packets++;
693 priv->stat.tx_bytes++;
694 } else {
695 priv->stat.tx_errors++;
696 if ((descr.Status & TXDSCR_STATUS_UFLO) != 0) {
697 priv->stat.tx_fifo_errors++;
698 InitLANCE(dev);
699 }
700 else
701 if ((descr.Status & TXDSCR_STATUS_LCOL) !=
702 0) priv->stat.tx_window_errors++;
703 else if ((descr.Status & TXDSCR_STATUS_LCAR) != 0)
704 priv->stat.tx_carrier_errors++;
705 else if ((descr.Status & TXDSCR_STATUS_RTRY) != 0)
706 priv->stat.tx_aborted_errors++;
707 }
708
709 /* go to next descriptor */
710 priv->nexttxdone++;
711 descraddr += sizeof(LANCE_TxDescr);
712 if (priv->nexttxdone >= TXCOUNT) {
713 priv->nexttxdone = 0;
714 descraddr = RAM_TXBASE;
715 }
716 priv->txbusy--;
717 }
718
719 /* reset TX interrupt bit */
720
721 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_TINT);
722 oldcsr0 = GetLANCE(dev, LANCE_CSR0);
723
724 /* at least one descriptor is freed. Therefore we can accept
725 a new one */
726 /* inform upper layers we're in business again */
727
728 netif_wake_queue(dev);
729
730 return oldcsr0;
731}
732
733/* general interrupt entry */
734
735static irqreturn_t irq_handler(int irq, void *device)
736{
737 struct net_device *dev = (struct net_device *) device;
738 u16 csr0val;
739
740 /* read CSR0 to get interrupt cause */
741
742 csr0val = GetLANCE(dev, LANCE_CSR0);
743
744 /* in case we're not meant... */
745
746 if ((csr0val & CSR0_INTR) == 0)
747 return IRQ_NONE;
748
749#if 0
750 set_bit(LINK_STATE_RXSEM, &dev->state);
751#endif
752
753 /* loop through the interrupt bits until everything is clear */
754
755 do {
756 if ((csr0val & CSR0_IDON) != 0)
757 csr0val = irqstart_handler(dev, csr0val);
758 if ((csr0val & CSR0_RINT) != 0)
759 csr0val = irqrx_handler(dev, csr0val);
760 if ((csr0val & CSR0_MISS) != 0)
761 csr0val = irqmiss_handler(dev, csr0val);
762 if ((csr0val & CSR0_TINT) != 0)
763 csr0val = irqtx_handler(dev, csr0val);
764 if ((csr0val & CSR0_MERR) != 0) {
765 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_MERR);
766 csr0val = GetLANCE(dev, LANCE_CSR0);
767 }
768 if ((csr0val & CSR0_BABL) != 0) {
769 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_BABL);
770 csr0val = GetLANCE(dev, LANCE_CSR0);
771 }
772 }
773 while ((csr0val & CSR0_INTR) != 0);
774
775#if 0
776 clear_bit(LINK_STATE_RXSEM, &dev->state);
777#endif
778 return IRQ_HANDLED;
779}
780
781/* ------------------------------------------------------------------------
782 * driver methods
783 * ------------------------------------------------------------------------ */
784
785/* MCA info */
786
787static int skmca_getinfo(char *buf, int slot, void *d)
788{
789 int len = 0, i;
790 struct net_device *dev = (struct net_device *) d;
791 skmca_priv *priv;
792
793 /* can't say anything about an uninitialized device... */
794
795 if (dev == NULL)
796 return len;
797 priv = netdev_priv(dev);
798
799 /* print info */
800
801 len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
802 len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
803 dev->mem_end - 1);
804 len +=
805 sprintf(buf + len, "Transceiver: %s\n",
806 MediaNames[priv->medium]);
807 len += sprintf(buf + len, "Device: %s\n", dev->name);
808 len += sprintf(buf + len, "MAC address:");
809 for (i = 0; i < 6; i++)
810 len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
811 buf[len++] = '\n';
812 buf[len] = 0;
813
814 return len;
815}
816
817/* open driver. Means also initialization and start of LANCE */
818
819static int skmca_open(struct net_device *dev)
820{
821 int result;
822 skmca_priv *priv = netdev_priv(dev);
823
824 /* register resources - only necessary for IRQ */
825 result =
826 request_irq(priv->realirq, irq_handler,
827 IRQF_SHARED | IRQF_SAMPLE_RANDOM, "sk_mca", dev);
828 if (result != 0) {
829 printk("%s: failed to register irq %d\n", dev->name,
830 dev->irq);
831 return result;
832 }
833 dev->irq = priv->realirq;
834
835 /* set up the card and LANCE */
836
837 InitBoard(dev);
838
839 /* set up flags */
840
841 netif_start_queue(dev);
842
843 return 0;
844}
845
846/* close driver. Shut down board and free allocated resources */
847
848static int skmca_close(struct net_device *dev)
849{
850 /* turn off board */
851 DeinitBoard(dev);
852
853 /* release resources */
854 if (dev->irq != 0)
855 free_irq(dev->irq, dev);
856 dev->irq = 0;
857
858 return 0;
859}
860
861/* transmit a block. */
862
863static int skmca_tx(struct sk_buff *skb, struct net_device *dev)
864{
865 skmca_priv *priv = netdev_priv(dev);
866 LANCE_TxDescr descr;
867 unsigned int address;
868 int tmplen, retval = 0;
869 unsigned long flags;
870
871 /* if we get called with a NULL descriptor, the Ethernet layer thinks
872 our card is stuck an we should reset it. We'll do this completely: */
873
874 if (skb == NULL) {
875 DeinitBoard(dev);
876 InitBoard(dev);
877 return 0; /* don't try to free the block here ;-) */
878 }
879
880 /* is there space in the Tx queue ? If no, the upper layer gave us a
881 packet in spite of us not being ready and is really in trouble.
882 We'll do the dropping for him: */
883 if (priv->txbusy >= TXCOUNT) {
884 priv->stat.tx_dropped++;
885 retval = -EIO;
886 goto tx_done;
887 }
888
889 /* get TX descriptor */
890 address = RAM_TXBASE + (priv->nexttxput * sizeof(LANCE_TxDescr));
891 memcpy_fromio(&descr, priv->base + address, sizeof(LANCE_TxDescr));
892
893 /* enter packet length as 2s complement - assure minimum length */
894 tmplen = skb->len;
895 if (tmplen < 60)
896 tmplen = 60;
897 descr.Len = 65536 - tmplen;
898
899 /* copy filler into RAM - in case we're filling up...
900 we're filling a bit more than necessary, but that doesn't harm
901 since the buffer is far larger... */
902 if (tmplen > skb->len) {
903 char *fill = "NetBSD is a nice OS too! ";
904 unsigned int destoffs = 0, l = strlen(fill);
905
906 while (destoffs < tmplen) {
907 memcpy_toio(priv->base + descr.LowAddr +
908 destoffs, fill, l);
909 destoffs += l;
910 }
911 }
912
913 /* do the real data copying */
914 memcpy_toio(priv->base + descr.LowAddr, skb->data, skb->len);
915
916 /* hand descriptor over to LANCE - this is the first and last chunk */
917 descr.Flags =
918 TXDSCR_FLAGS_OWN | TXDSCR_FLAGS_STP | TXDSCR_FLAGS_ENP;
919
920#ifdef DEBUG
921 PrTime();
922 printk("Send packet on descr %d len %d\n", priv->nexttxput,
923 skb->len);
924#endif
925
926 /* one more descriptor busy */
927
928 spin_lock_irqsave(&priv->lock, flags);
929
930 priv->nexttxput++;
931 if (priv->nexttxput >= TXCOUNT)
932 priv->nexttxput = 0;
933 priv->txbusy++;
934
935 /* are we saturated ? */
936
937 if (priv->txbusy >= TXCOUNT)
938 netif_stop_queue(dev);
939
940 /* write descriptor back to RAM */
941 memcpy_toio(priv->base + address, &descr, sizeof(LANCE_TxDescr));
942
943 /* if no descriptors were active, give the LANCE a hint to read it
944 immediately */
945
946 if (priv->txbusy == 0)
947 SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_TDMD);
948
949 spin_unlock_irqrestore(&priv->lock, flags);
950
951 tx_done:
952
953 dev_kfree_skb(skb);
954
955 return retval;
956}
957
958/* return pointer to Ethernet statistics */
959
960static struct net_device_stats *skmca_stats(struct net_device *dev)
961{
962 skmca_priv *priv = netdev_priv(dev);
963
964 return &(priv->stat);
965}
966
967/* switch receiver mode. We use the LANCE's multicast filter to prefilter
968 multicast addresses. */
969
970static void skmca_set_multicast_list(struct net_device *dev)
971{
972 skmca_priv *priv = netdev_priv(dev);
973 LANCE_InitBlock block;
974
975 /* first stop the LANCE... */
976 StopLANCE(dev);
977
978 /* ...then modify the initialization block... */
979 memcpy_fromio(&block, priv->base + RAM_INITBASE, sizeof(block));
980 if (dev->flags & IFF_PROMISC)
981 block.Mode |= LANCE_INIT_PROM;
982 else
983 block.Mode &= ~LANCE_INIT_PROM;
984
985 if (dev->flags & IFF_ALLMULTI) { /* get all multicasts */
986 memset(block.LAdrF, 0xff, sizeof(block.LAdrF));
987 } else { /* get selected/no multicasts */
988
989 struct dev_mc_list *mptr;
990 int code;
991
992 memset(block.LAdrF, 0, sizeof(block.LAdrF));
993 for (mptr = dev->mc_list; mptr != NULL; mptr = mptr->next) {
994 code = GetHash(mptr->dmi_addr);
995 block.LAdrF[(code >> 3) & 7] |= 1 << (code & 7);
996 }
997 }
998
999 memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
1000
1001 /* ...then reinit LANCE with the correct flags */
1002 InitLANCE(dev);
1003}
1004
1005/* ------------------------------------------------------------------------
1006 * hardware check
1007 * ------------------------------------------------------------------------ */
1008
1009static int startslot; /* counts through slots when probing multiple devices */
1010
1011static void cleanup_card(struct net_device *dev)
1012{
1013 skmca_priv *priv = netdev_priv(dev);
1014 DeinitBoard(dev);
1015 if (dev->irq != 0)
1016 free_irq(dev->irq, dev);
1017 iounmap(priv->base);
1018 mca_mark_as_unused(priv->slot);
1019 mca_set_adapter_procfn(priv->slot, NULL, NULL);
1020}
1021
1022struct net_device * __init skmca_probe(int unit)
1023{
1024 struct net_device *dev;
1025 int force_detect = 0;
1026 int junior, slot, i;
1027 int base = 0, irq = 0;
1028 skmca_priv *priv;
1029 skmca_medium medium;
1030 int err;
1031
1032 /* can't work without an MCA bus ;-) */
1033
1034 if (MCA_bus == 0)
1035 return ERR_PTR(-ENODEV);
1036
1037 dev = alloc_etherdev(sizeof(skmca_priv));
1038 if (!dev)
1039 return ERR_PTR(-ENOMEM);
1040
1041 if (unit >= 0) {
1042 sprintf(dev->name, "eth%d", unit);
1043 netdev_boot_setup_check(dev);
1044 }
1045
1046 SET_MODULE_OWNER(dev);
1047
1048 /* start address of 1 --> forced detection */
1049
1050 if (dev->mem_start == 1)
1051 force_detect = 1;
1052
1053 /* search through slots */
1054
1055 base = dev->mem_start;
1056 irq = dev->base_addr;
1057 for (slot = startslot; (slot = dofind(&junior, slot)) != -1; slot++) {
1058 /* deduce card addresses */
1059
1060 getaddrs(slot, junior, &base, &irq, &medium);
1061
1062 /* slot already in use ? */
1063
1064 if (mca_is_adapter_used(slot))
1065 continue;
1066
1067 /* were we looking for something different ? */
1068
1069 if (dev->irq && dev->irq != irq)
1070 continue;
1071 if (dev->mem_start && dev->mem_start != base)
1072 continue;
1073
1074 /* found something that matches */
1075
1076 break;
1077 }
1078
1079 /* nothing found ? */
1080
1081 if (slot == -1) {
1082 free_netdev(dev);
1083 return (base || irq) ? ERR_PTR(-ENXIO) : ERR_PTR(-ENODEV);
1084 }
1085
1086 /* make procfs entries */
1087
1088 if (junior)
1089 mca_set_adapter_name(slot,
1090 "SKNET junior MC2 Ethernet Adapter");
1091 else
1092 mca_set_adapter_name(slot, "SKNET MC2+ Ethernet Adapter");
1093 mca_set_adapter_procfn(slot, (MCA_ProcFn) skmca_getinfo, dev);
1094
1095 mca_mark_as_used(slot);
1096
1097 /* announce success */
1098 printk("%s: SKNet %s adapter found in slot %d\n", dev->name,
1099 junior ? "Junior MC2" : "MC2+", slot + 1);
1100
1101 priv = netdev_priv(dev);
1102 priv->base = ioremap(base, 0x4000);
1103 if (!priv->base) {
1104 mca_set_adapter_procfn(slot, NULL, NULL);
1105 mca_mark_as_unused(slot);
1106 free_netdev(dev);
1107 return ERR_PTR(-ENOMEM);
1108 }
1109
1110 priv->slot = slot;
1111 priv->macbase = priv->base + 0x3fc0;
1112 priv->ioregaddr = priv->base + 0x3ff0;
1113 priv->ctrladdr = priv->base + 0x3ff2;
1114 priv->cmdaddr = priv->base + 0x3ff3;
1115 priv->medium = medium;
1116 memset(&priv->stat, 0, sizeof(struct net_device_stats));
1117 spin_lock_init(&priv->lock);
1118
1119 /* set base + irq for this device (irq not allocated so far) */
1120 dev->irq = 0;
1121 dev->mem_start = base;
1122 dev->mem_end = base + 0x4000;
1123
1124 /* autoprobe ? */
1125 if (irq < 0) {
1126 int nirq;
1127
1128 printk
1129 ("%s: ambigous POS bit combination, must probe for IRQ...\n",
1130 dev->name);
1131 nirq = ProbeIRQ(dev);
1132 if (nirq <= 0)
1133 printk("%s: IRQ probe failed, assuming IRQ %d",
1134 dev->name, priv->realirq = -irq);
1135 else
1136 priv->realirq = nirq;
1137 } else
1138 priv->realirq = irq;
1139
1140 /* set methods */
1141 dev->open = skmca_open;
1142 dev->stop = skmca_close;
1143 dev->hard_start_xmit = skmca_tx;
1144 dev->do_ioctl = NULL;
1145 dev->get_stats = skmca_stats;
1146 dev->set_multicast_list = skmca_set_multicast_list;
1147 dev->flags |= IFF_MULTICAST;
1148
1149 /* copy out MAC address */
1150 for (i = 0; i < 6; i++)
1151 dev->dev_addr[i] = readb(priv->macbase + (i << 1));
1152
1153 /* print config */
1154 printk("%s: IRQ %d, memory %#lx-%#lx, "
1155 "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n",
1156 dev->name, priv->realirq, dev->mem_start, dev->mem_end - 1,
1157 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1158 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1159 printk("%s: %s medium\n", dev->name, MediaNames[priv->medium]);
1160
1161 /* reset board */
1162
1163 ResetBoard(dev);
1164
1165 startslot = slot + 1;
1166
1167 err = register_netdev(dev);
1168 if (err) {
1169 cleanup_card(dev);
1170 free_netdev(dev);
1171 dev = ERR_PTR(err);
1172 }
1173 return dev;
1174}
1175
1176/* ------------------------------------------------------------------------
1177 * modularization support
1178 * ------------------------------------------------------------------------ */
1179
1180#ifdef MODULE
1181MODULE_LICENSE("GPL");
1182
1183#define DEVMAX 5
1184
1185static struct net_device *moddevs[DEVMAX];
1186
1187int init_module(void)
1188{
1189 int z;
1190
1191 startslot = 0;
1192 for (z = 0; z < DEVMAX; z++) {
1193 struct net_device *dev = skmca_probe(-1);
1194 if (IS_ERR(dev))
1195 break;
1196 moddevs[z] = dev;
1197 }
1198 if (!z)
1199 return -EIO;
1200 return 0;
1201}
1202
1203void cleanup_module(void)
1204{
1205 int z;
1206
1207 for (z = 0; z < DEVMAX; z++) {
1208 struct net_device *dev = moddevs[z];
1209 if (dev) {
1210 unregister_netdev(dev);
1211 cleanup_card(dev);
1212 free_netdev(dev);
1213 }
1214 }
1215}
1216#endif /* MODULE */
diff --git a/drivers/net/sk_mca.h b/drivers/net/sk_mca.h
deleted file mode 100644
index 0dae056fed99..000000000000
--- a/drivers/net/sk_mca.h
+++ /dev/null
@@ -1,170 +0,0 @@
1#ifndef _SK_MCA_INCLUDE_
2#define _SK_MCA_INCLUDE_
3
4#ifdef _SK_MCA_DRIVER_
5
6/* Adapter ID's */
7#define SKNET_MCA_ID 0x6afd
8#define SKNET_JUNIOR_MCA_ID 0x6be9
9
10/* media enumeration - defined in a way that it fits onto the MC2+'s
11 POS registers... */
12
13typedef enum { Media_10Base2, Media_10BaseT,
14 Media_10Base5, Media_Unknown, Media_Count
15} skmca_medium;
16
17/* private structure */
18typedef struct {
19 unsigned int slot; /* MCA-Slot-# */
20 void __iomem *base;
21 void __iomem *macbase; /* base address of MAC address PROM */
22 void __iomem *ioregaddr;/* address of I/O-register (Lo) */
23 void __iomem *ctrladdr; /* address of control/stat register */
24 void __iomem *cmdaddr; /* address of I/O-command register */
25 int nextrx; /* index of next RX descriptor to
26 be read */
27 int nexttxput; /* index of next free TX descriptor */
28 int nexttxdone; /* index of next TX descriptor to
29 be finished */
30 int txbusy; /* # of busy TX descriptors */
31 struct net_device_stats stat; /* packet statistics */
32 int realirq; /* memorizes actual IRQ, even when
33 currently not allocated */
34 skmca_medium medium; /* physical cannector */
35 spinlock_t lock;
36} skmca_priv;
37
38/* card registers: control/status register bits */
39
40#define CTRL_ADR_DATA 0 /* Bit 0 = 0 ->access data register */
41#define CTRL_ADR_RAP 1 /* Bit 0 = 1 ->access RAP register */
42#define CTRL_RW_WRITE 0 /* Bit 1 = 0 ->write register */
43#define CTRL_RW_READ 2 /* Bit 1 = 1 ->read register */
44#define CTRL_RESET_ON 0 /* Bit 3 = 0 ->reset board */
45#define CTRL_RESET_OFF 8 /* Bit 3 = 1 ->no reset of board */
46
47#define STAT_ADR_DATA 0 /* Bit 0 of ctrl register read back */
48#define STAT_ADR_RAP 1
49#define STAT_RW_WRITE 0 /* Bit 1 of ctrl register read back */
50#define STAT_RW_READ 2
51#define STAT_RESET_ON 0 /* Bit 3 of ctrl register read back */
52#define STAT_RESET_OFF 8
53#define STAT_IRQ_ACT 0 /* interrupt pending */
54#define STAT_IRQ_NOACT 16 /* no interrupt pending */
55#define STAT_IO_NOBUSY 0 /* no transfer busy */
56#define STAT_IO_BUSY 32 /* transfer busy */
57
58/* I/O command register bits */
59
60#define IOCMD_GO 128 /* Bit 7 = 1 -> start register xfer */
61
62/* LANCE registers */
63
64#define LANCE_CSR0 0 /* Status/Control */
65
66#define CSR0_ERR 0x8000 /* general error flag */
67#define CSR0_BABL 0x4000 /* transmitter timeout */
68#define CSR0_CERR 0x2000 /* collision error */
69#define CSR0_MISS 0x1000 /* lost Rx block */
70#define CSR0_MERR 0x0800 /* memory access error */
71#define CSR0_RINT 0x0400 /* receiver interrupt */
72#define CSR0_TINT 0x0200 /* transmitter interrupt */
73#define CSR0_IDON 0x0100 /* initialization done */
74#define CSR0_INTR 0x0080 /* general interrupt flag */
75#define CSR0_INEA 0x0040 /* interrupt enable */
76#define CSR0_RXON 0x0020 /* receiver enabled */
77#define CSR0_TXON 0x0010 /* transmitter enabled */
78#define CSR0_TDMD 0x0008 /* force transmission now */
79#define CSR0_STOP 0x0004 /* stop LANCE */
80#define CSR0_STRT 0x0002 /* start LANCE */
81#define CSR0_INIT 0x0001 /* read initialization block */
82
83#define LANCE_CSR1 1 /* addr bit 0..15 of initialization */
84#define LANCE_CSR2 2 /* 16..23 block */
85
86#define LANCE_CSR3 3 /* Bus control */
87#define CSR3_BCON_HOLD 0 /* Bit 0 = 0 -> BM1,BM0,HOLD */
88#define CSR3_BCON_BUSRQ 1 /* Bit 0 = 1 -> BUSAK0,BYTE,BUSRQ */
89#define CSR3_ALE_HIGH 0 /* Bit 1 = 0 -> ALE asserted high */
90#define CSR3_ALE_LOW 2 /* Bit 1 = 1 -> ALE asserted low */
91#define CSR3_BSWAP_OFF 0 /* Bit 2 = 0 -> no byte swap */
92#define CSR3_BSWAP_ON 4 /* Bit 2 = 1 -> byte swap */
93
94/* LANCE structures */
95
96typedef struct { /* LANCE initialization block */
97 u16 Mode; /* mode flags */
98 u8 PAdr[6]; /* MAC address */
99 u8 LAdrF[8]; /* Multicast filter */
100 u32 RdrP; /* Receive descriptor */
101 u32 TdrP; /* Transmit descriptor */
102} LANCE_InitBlock;
103
104/* Mode flags init block */
105
106#define LANCE_INIT_PROM 0x8000 /* enable promiscous mode */
107#define LANCE_INIT_INTL 0x0040 /* internal loopback */
108#define LANCE_INIT_DRTY 0x0020 /* disable retry */
109#define LANCE_INIT_COLL 0x0010 /* force collision */
110#define LANCE_INIT_DTCR 0x0008 /* disable transmit CRC */
111#define LANCE_INIT_LOOP 0x0004 /* loopback */
112#define LANCE_INIT_DTX 0x0002 /* disable transmitter */
113#define LANCE_INIT_DRX 0x0001 /* disable receiver */
114
115typedef struct { /* LANCE Tx descriptor */
116 u16 LowAddr; /* bit 0..15 of address */
117 u16 Flags; /* bit 16..23 of address + Flags */
118 u16 Len; /* 2s complement of packet length */
119 u16 Status; /* Result of transmission */
120} LANCE_TxDescr;
121
122#define TXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
123#define TXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
124#define TXDSCR_FLAGS_MORE 0x1000 /* more than one retry needed? */
125#define TXDSCR_FLAGS_ONE 0x0800 /* one retry? */
126#define TXDSCR_FLAGS_DEF 0x0400 /* transmission deferred? */
127#define TXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
128#define TXDSCR_FLAGS_ENP 0x0100 /* last packet in chain? */
129
130#define TXDSCR_STATUS_BUFF 0x8000 /* buffer error? */
131#define TXDSCR_STATUS_UFLO 0x4000 /* silo underflow during transmit? */
132#define TXDSCR_STATUS_LCOL 0x1000 /* late collision? */
133#define TXDSCR_STATUS_LCAR 0x0800 /* loss of carrier? */
134#define TXDSCR_STATUS_RTRY 0x0400 /* retry error? */
135
136typedef struct { /* LANCE Rx descriptor */
137 u16 LowAddr; /* bit 0..15 of address */
138 u16 Flags; /* bit 16..23 of address + Flags */
139 u16 MaxLen; /* 2s complement of buffer length */
140 u16 Len; /* packet length */
141} LANCE_RxDescr;
142
143#define RXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
144#define RXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
145#define RXDSCR_FLAGS_FRAM 0x2000 /* framing error flag */
146#define RXDSCR_FLAGS_OFLO 0x1000 /* FIFO overflow? */
147#define RXDSCR_FLAGS_CRC 0x0800 /* CRC error? */
148#define RXDSCR_FLAGS_BUFF 0x0400 /* buffer error? */
149#define RXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
150#define RXDCSR_FLAGS_ENP 0x0100 /* last packet in chain? */
151
152/* RAM layout */
153
154#define TXCOUNT 4 /* length of TX descriptor queue */
155#define LTXCOUNT 2 /* log2 of it */
156#define RXCOUNT 4 /* length of RX descriptor queue */
157#define LRXCOUNT 2 /* log2 of it */
158
159#define RAM_INITBASE 0 /* LANCE init block */
160#define RAM_TXBASE 24 /* Start of TX descriptor queue */
161#define RAM_RXBASE \
162(RAM_TXBASE + (TXCOUNT * 8)) /* Start of RX descriptor queue */
163#define RAM_DATABASE \
164(RAM_RXBASE + (RXCOUNT * 8)) /* Start of data area for frames */
165#define RAM_BUFSIZE 1580 /* max. frame size - should never be
166 reached */
167
168#endif /* _SK_MCA_DRIVER_ */
169
170#endif /* _SK_MCA_INCLUDE_ */
diff --git a/drivers/net/skfp/can.c b/drivers/net/skfp/can.c
deleted file mode 100644
index 8a49abce7961..000000000000
--- a/drivers/net/skfp/can.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/******************************************************************************
2 *
3 * (C)Copyright 1998,1999 SysKonnect,
4 * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
5 *
6 * See the file "skfddi.c" for further information.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * The information in this file is provided "AS IS" without warranty.
14 *
15 ******************************************************************************/
16
17#ifndef lint
18static const char xID_sccs[] = "@(#)can.c 1.5 97/04/07 (C) SK " ;
19#endif
20
21/*
22 * canonical bit order
23 */
24const u_char canonical[256] = {
25 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0,
26 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0,
27 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8,
28 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8,
29 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4,
30 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4,
31 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec,
32 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc,
33 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2,
34 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2,
35 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea,
36 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa,
37 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6,
38 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6,
39 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee,
40 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe,
41 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1,
42 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1,
43 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9,
44 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9,
45 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5,
46 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5,
47 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed,
48 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd,
49 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3,
50 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3,
51 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb,
52 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb,
53 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7,
54 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7,
55 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef,
56 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
57} ;
58
59#ifdef MAKE_TABLE
60int byte_reverse(x)
61int x ;
62{
63 int y = 0 ;
64
65 if (x & 0x01)
66 y |= 0x80 ;
67 if (x & 0x02)
68 y |= 0x40 ;
69 if (x & 0x04)
70 y |= 0x20 ;
71 if (x & 0x08)
72 y |= 0x10 ;
73 if (x & 0x10)
74 y |= 0x08 ;
75 if (x & 0x20)
76 y |= 0x04 ;
77 if (x & 0x40)
78 y |= 0x02 ;
79 if (x & 0x80)
80 y |= 0x01 ;
81 return(y) ;
82}
83#endif
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c
index 5b475833f645..4fe624b0dd25 100644
--- a/drivers/net/skfp/drvfbi.c
+++ b/drivers/net/skfp/drvfbi.c
@@ -23,6 +23,7 @@
23#include "h/smc.h" 23#include "h/smc.h"
24#include "h/supern_2.h" 24#include "h/supern_2.h"
25#include "h/skfbiinc.h" 25#include "h/skfbiinc.h"
26#include <linux/bitrev.h>
26 27
27#ifndef lint 28#ifndef lint
28static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ; 29static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
@@ -445,16 +446,14 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
445 char PmdType ; 446 char PmdType ;
446 int i ; 447 int i ;
447 448
448 extern const u_char canonical[256] ;
449
450#if (defined(ISA) || defined(MCA)) 449#if (defined(ISA) || defined(MCA))
451 for (i = 0; i < 4 ;i++) { /* read mac address from board */ 450 for (i = 0; i < 4 ;i++) { /* read mac address from board */
452 smc->hw.fddi_phys_addr.a[i] = 451 smc->hw.fddi_phys_addr.a[i] =
453 canonical[(inpw(PR_A(i+SA_MAC))&0xff)] ; 452 bitrev8(inpw(PR_A(i+SA_MAC)));
454 } 453 }
455 for (i = 4; i < 6; i++) { 454 for (i = 4; i < 6; i++) {
456 smc->hw.fddi_phys_addr.a[i] = 455 smc->hw.fddi_phys_addr.a[i] =
457 canonical[(inpw(PR_A(i+SA_MAC+PRA_OFF))&0xff)] ; 456 bitrev8(inpw(PR_A(i+SA_MAC+PRA_OFF)));
458 } 457 }
459#endif 458#endif
460#ifdef EISA 459#ifdef EISA
@@ -464,17 +463,17 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
464 */ 463 */
465 for (i = 0; i < 4 ;i++) { /* read mac address from board */ 464 for (i = 0; i < 4 ;i++) { /* read mac address from board */
466 smc->hw.fddi_phys_addr.a[i] = 465 smc->hw.fddi_phys_addr.a[i] =
467 canonical[inp(PR_A(i+SA_MAC))] ; 466 bitrev8(inp(PR_A(i+SA_MAC)));
468 } 467 }
469 for (i = 4; i < 6; i++) { 468 for (i = 4; i < 6; i++) {
470 smc->hw.fddi_phys_addr.a[i] = 469 smc->hw.fddi_phys_addr.a[i] =
471 canonical[inp(PR_A(i+SA_MAC+PRA_OFF))] ; 470 bitrev8(inp(PR_A(i+SA_MAC+PRA_OFF)));
472 } 471 }
473#endif 472#endif
474#ifdef PCI 473#ifdef PCI
475 for (i = 0; i < 6; i++) { /* read mac address from board */ 474 for (i = 0; i < 6; i++) { /* read mac address from board */
476 smc->hw.fddi_phys_addr.a[i] = 475 smc->hw.fddi_phys_addr.a[i] =
477 canonical[inp(ADDR(B2_MAC_0+i))] ; 476 bitrev8(inp(ADDR(B2_MAC_0+i)));
478 } 477 }
479#endif 478#endif
480#ifndef PCI 479#ifndef PCI
@@ -493,7 +492,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
493 if (mac_addr) { 492 if (mac_addr) {
494 for (i = 0; i < 6 ;i++) { 493 for (i = 0; i < 6 ;i++) {
495 smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ; 494 smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ;
496 smc->hw.fddi_home_addr.a[i] = canonical[mac_addr[i]] ; 495 smc->hw.fddi_home_addr.a[i] = bitrev8(mac_addr[i]);
497 } 496 }
498 return ; 497 return ;
499 } 498 }
@@ -501,7 +500,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
501 500
502 for (i = 0; i < 6 ;i++) { 501 for (i = 0; i < 6 ;i++) {
503 smc->hw.fddi_canon_addr.a[i] = 502 smc->hw.fddi_canon_addr.a[i] =
504 canonical[smc->hw.fddi_phys_addr.a[i]] ; 503 bitrev8(smc->hw.fddi_phys_addr.a[i]);
505 } 504 }
506} 505}
507 506
@@ -1269,11 +1268,8 @@ void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr)
1269{ 1268{
1270 int i ; 1269 int i ;
1271 1270
1272 extern const u_char canonical[256] ; 1271 for (i = 0 ; i < 6 ; i++)
1273 1272 bia_addr->a[i] = bitrev8(smc->hw.fddi_phys_addr.a[i]);
1274 for (i = 0 ; i < 6 ; i++) {
1275 bia_addr->a[i] = canonical[smc->hw.fddi_phys_addr.a[i]] ;
1276 }
1277} 1273}
1278 1274
1279void smt_start_watchdog(struct s_smc *smc) 1275void smt_start_watchdog(struct s_smc *smc)
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 0784f558ca9a..a45205da8033 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -22,7 +22,7 @@
22#include "h/fddi.h" 22#include "h/fddi.h"
23#include "h/smc.h" 23#include "h/smc.h"
24#include "h/supern_2.h" 24#include "h/supern_2.h"
25#include "can.c" 25#include <linux/bitrev.h>
26 26
27#ifndef lint 27#ifndef lint
28static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ; 28static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
@@ -1073,7 +1073,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
1073 if (can) { 1073 if (can) {
1074 p = own->a ; 1074 p = own->a ;
1075 for (i = 0 ; i < 6 ; i++, p++) 1075 for (i = 0 ; i < 6 ; i++, p++)
1076 *p = canonical[*p] ; 1076 *p = bitrev8(*p);
1077 } 1077 }
1078 slot = NULL; 1078 slot = NULL;
1079 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ 1079 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 99a776a51fb5..fe847800acdc 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -18,6 +18,7 @@
18#include "h/fddi.h" 18#include "h/fddi.h"
19#include "h/smc.h" 19#include "h/smc.h"
20#include "h/smt_p.h" 20#include "h/smt_p.h"
21#include <linux/bitrev.h>
21 22
22#define KERNEL 23#define KERNEL
23#include "h/smtstate.h" 24#include "h/smtstate.h"
@@ -26,8 +27,6 @@
26static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; 27static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ;
27#endif 28#endif
28 29
29extern const u_char canonical[256] ;
30
31/* 30/*
32 * FC in SMbuf 31 * FC in SMbuf
33 */ 32 */
@@ -180,7 +179,7 @@ void smt_agent_init(struct s_smc *smc)
180 driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ; 179 driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ;
181 for (i = 0 ; i < 6 ; i ++) { 180 for (i = 0 ; i < 6 ; i ++) {
182 smc->mib.fddiSMTStationId.sid_node.a[i] = 181 smc->mib.fddiSMTStationId.sid_node.a[i] =
183 canonical[smc->mib.fddiSMTStationId.sid_node.a[i]] ; 182 bitrev8(smc->mib.fddiSMTStationId.sid_node.a[i]);
184 } 183 }
185 smc->mib.fddiSMTManufacturerData[0] = 184 smc->mib.fddiSMTManufacturerData[0] =
186 smc->mib.fddiSMTStationId.sid_node.a[0] ; 185 smc->mib.fddiSMTStationId.sid_node.a[0] ;
@@ -2049,9 +2048,8 @@ static void hwm_conv_can(struct s_smc *smc, char *data, int len)
2049 2048
2050 SK_UNUSED(smc) ; 2049 SK_UNUSED(smc) ;
2051 2050
2052 for (i = len; i ; i--, data++) { 2051 for (i = len; i ; i--, data++)
2053 *data = canonical[*(u_char *)data] ; 2052 *data = bitrev8(*data);
2054 }
2055} 2053}
2056#endif 2054#endif
2057 2055
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 45283f3f95e4..e482e7fcbb2b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "1.9" 45#define DRV_VERSION "1.10"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -132,18 +132,93 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
132} 132}
133 133
134/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 134/* Wake on Lan only supported on Yukon chips with rev 1 or above */
135static int wol_supported(const struct skge_hw *hw) 135static u32 wol_supported(const struct skge_hw *hw)
136{ 136{
137 return !((hw->chip_id == CHIP_ID_GENESIS || 137 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0)
138 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0))); 138 return WAKE_MAGIC | WAKE_PHY;
139 else
140 return 0;
141}
142
143static u32 pci_wake_enabled(struct pci_dev *dev)
144{
145 int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
146 u16 value;
147
148 /* If device doesn't support PM Capabilities, but request is to disable
149 * wake events, it's a nop; otherwise fail */
150 if (!pm)
151 return 0;
152
153 pci_read_config_word(dev, pm + PCI_PM_PMC, &value);
154
155 value &= PCI_PM_CAP_PME_MASK;
156 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
157
158 return value != 0;
159}
160
161static void skge_wol_init(struct skge_port *skge)
162{
163 struct skge_hw *hw = skge->hw;
164 int port = skge->port;
165 enum pause_control save_mode;
166 u32 ctrl;
167
168 /* Bring hardware out of reset */
169 skge_write16(hw, B0_CTST, CS_RST_CLR);
170 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
171
172 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
173 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
174
175 /* Force to 10/100 skge_reset will re-enable on resume */
176 save_mode = skge->flow_control;
177 skge->flow_control = FLOW_MODE_SYMMETRIC;
178
179 ctrl = skge->advertising;
180 skge->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
181
182 skge_phy_reset(skge);
183
184 skge->flow_control = save_mode;
185 skge->advertising = ctrl;
186
187 /* Set GMAC to no flow control and auto update for speed/duplex */
188 gma_write16(hw, port, GM_GP_CTRL,
189 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
190 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
191
192 /* Set WOL address */
193 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
194 skge->netdev->dev_addr, ETH_ALEN);
195
196 /* Turn on appropriate WOL control bits */
197 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
198 ctrl = 0;
199 if (skge->wol & WAKE_PHY)
200 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
201 else
202 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
203
204 if (skge->wol & WAKE_MAGIC)
205 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
206 else
207 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
208
209 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
210 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
211
212 /* block receiver */
213 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
139} 214}
140 215
141static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 216static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
142{ 217{
143 struct skge_port *skge = netdev_priv(dev); 218 struct skge_port *skge = netdev_priv(dev);
144 219
145 wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0; 220 wol->supported = wol_supported(skge->hw);
146 wol->wolopts = skge->wol ? WAKE_MAGIC : 0; 221 wol->wolopts = skge->wol;
147} 222}
148 223
149static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 224static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -151,23 +226,12 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
151 struct skge_port *skge = netdev_priv(dev); 226 struct skge_port *skge = netdev_priv(dev);
152 struct skge_hw *hw = skge->hw; 227 struct skge_hw *hw = skge->hw;
153 228
154 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 229 if (wol->wolopts & wol_supported(hw))
155 return -EOPNOTSUPP; 230 return -EOPNOTSUPP;
156 231
157 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) 232 skge->wol = wol->wolopts;
158 return -EOPNOTSUPP; 233 if (!netif_running(dev))
159 234 skge_wol_init(skge);
160 skge->wol = wol->wolopts == WAKE_MAGIC;
161
162 if (skge->wol) {
163 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
164
165 skge_write16(hw, WOL_CTRL_STAT,
166 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
167 WOL_CTL_ENA_MAGIC_PKT_UNIT);
168 } else
169 skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
170
171 return 0; 235 return 0;
172} 236}
173 237
@@ -2373,6 +2437,9 @@ static int skge_up(struct net_device *dev)
2373 size_t rx_size, tx_size; 2437 size_t rx_size, tx_size;
2374 int err; 2438 int err;
2375 2439
2440 if (!is_valid_ether_addr(dev->dev_addr))
2441 return -EINVAL;
2442
2376 if (netif_msg_ifup(skge)) 2443 if (netif_msg_ifup(skge))
2377 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2444 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2378 2445
@@ -2392,7 +2459,7 @@ static int skge_up(struct net_device *dev)
2392 BUG_ON(skge->dma & 7); 2459 BUG_ON(skge->dma & 7);
2393 2460
2394 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2461 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2395 printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n"); 2462 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2396 err = -EINVAL; 2463 err = -EINVAL;
2397 goto free_pci_mem; 2464 goto free_pci_mem;
2398 } 2465 }
@@ -3001,6 +3068,7 @@ static void skge_mac_intr(struct skge_hw *hw, int port)
3001/* Handle device specific framing and timeout interrupts */ 3068/* Handle device specific framing and timeout interrupts */
3002static void skge_error_irq(struct skge_hw *hw) 3069static void skge_error_irq(struct skge_hw *hw)
3003{ 3070{
3071 struct pci_dev *pdev = hw->pdev;
3004 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3072 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3005 3073
3006 if (hw->chip_id == CHIP_ID_GENESIS) { 3074 if (hw->chip_id == CHIP_ID_GENESIS) {
@@ -3016,12 +3084,12 @@ static void skge_error_irq(struct skge_hw *hw)
3016 } 3084 }
3017 3085
3018 if (hwstatus & IS_RAM_RD_PAR) { 3086 if (hwstatus & IS_RAM_RD_PAR) {
3019 printk(KERN_ERR PFX "Ram read data parity error\n"); 3087 dev_err(&pdev->dev, "Ram read data parity error\n");
3020 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); 3088 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
3021 } 3089 }
3022 3090
3023 if (hwstatus & IS_RAM_WR_PAR) { 3091 if (hwstatus & IS_RAM_WR_PAR) {
3024 printk(KERN_ERR PFX "Ram write data parity error\n"); 3092 dev_err(&pdev->dev, "Ram write data parity error\n");
3025 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); 3093 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
3026 } 3094 }
3027 3095
@@ -3032,38 +3100,38 @@ static void skge_error_irq(struct skge_hw *hw)
3032 skge_mac_parity(hw, 1); 3100 skge_mac_parity(hw, 1);
3033 3101
3034 if (hwstatus & IS_R1_PAR_ERR) { 3102 if (hwstatus & IS_R1_PAR_ERR) {
3035 printk(KERN_ERR PFX "%s: receive queue parity error\n", 3103 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3036 hw->dev[0]->name); 3104 hw->dev[0]->name);
3037 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 3105 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
3038 } 3106 }
3039 3107
3040 if (hwstatus & IS_R2_PAR_ERR) { 3108 if (hwstatus & IS_R2_PAR_ERR) {
3041 printk(KERN_ERR PFX "%s: receive queue parity error\n", 3109 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3042 hw->dev[1]->name); 3110 hw->dev[1]->name);
3043 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 3111 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
3044 } 3112 }
3045 3113
3046 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 3114 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
3047 u16 pci_status, pci_cmd; 3115 u16 pci_status, pci_cmd;
3048 3116
3049 pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd); 3117 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
3050 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); 3118 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
3051 3119
3052 printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n", 3120 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
3053 pci_name(hw->pdev), pci_cmd, pci_status); 3121 pci_cmd, pci_status);
3054 3122
3055 /* Write the error bits back to clear them. */ 3123 /* Write the error bits back to clear them. */
3056 pci_status &= PCI_STATUS_ERROR_BITS; 3124 pci_status &= PCI_STATUS_ERROR_BITS;
3057 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3125 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3058 pci_write_config_word(hw->pdev, PCI_COMMAND, 3126 pci_write_config_word(pdev, PCI_COMMAND,
3059 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 3127 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
3060 pci_write_config_word(hw->pdev, PCI_STATUS, pci_status); 3128 pci_write_config_word(pdev, PCI_STATUS, pci_status);
3061 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3129 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3062 3130
3063 /* if error still set then just ignore it */ 3131 /* if error still set then just ignore it */
3064 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3132 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3065 if (hwstatus & IS_IRQ_STAT) { 3133 if (hwstatus & IS_IRQ_STAT) {
3066 printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n"); 3134 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
3067 hw->intr_mask &= ~IS_HW_ERR; 3135 hw->intr_mask &= ~IS_HW_ERR;
3068 } 3136 }
3069 } 3137 }
@@ -3277,8 +3345,8 @@ static int skge_reset(struct skge_hw *hw)
3277 hw->phy_addr = PHY_ADDR_BCOM; 3345 hw->phy_addr = PHY_ADDR_BCOM;
3278 break; 3346 break;
3279 default: 3347 default:
3280 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n", 3348 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
3281 pci_name(hw->pdev), hw->phy_type); 3349 hw->phy_type);
3282 return -EOPNOTSUPP; 3350 return -EOPNOTSUPP;
3283 } 3351 }
3284 break; 3352 break;
@@ -3293,8 +3361,8 @@ static int skge_reset(struct skge_hw *hw)
3293 break; 3361 break;
3294 3362
3295 default: 3363 default:
3296 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", 3364 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3297 pci_name(hw->pdev), hw->chip_id); 3365 hw->chip_id);
3298 return -EOPNOTSUPP; 3366 return -EOPNOTSUPP;
3299 } 3367 }
3300 3368
@@ -3334,7 +3402,7 @@ static int skge_reset(struct skge_hw *hw)
3334 /* avoid boards with stuck Hardware error bits */ 3402 /* avoid boards with stuck Hardware error bits */
3335 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && 3403 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
3336 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { 3404 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
3337 printk(KERN_WARNING PFX "stuck hardware sensor bit\n"); 3405 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
3338 hw->intr_mask &= ~IS_HW_ERR; 3406 hw->intr_mask &= ~IS_HW_ERR;
3339 } 3407 }
3340 3408
@@ -3408,7 +3476,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3408 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3476 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3409 3477
3410 if (!dev) { 3478 if (!dev) {
3411 printk(KERN_ERR "skge etherdev alloc failed"); 3479 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
3412 return NULL; 3480 return NULL;
3413 } 3481 }
3414 3482
@@ -3452,6 +3520,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3452 skge->duplex = -1; 3520 skge->duplex = -1;
3453 skge->speed = -1; 3521 skge->speed = -1;
3454 skge->advertising = skge_supported_modes(hw); 3522 skge->advertising = skge_supported_modes(hw);
3523 skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
3455 3524
3456 hw->dev[port] = dev; 3525 hw->dev[port] = dev;
3457 3526
@@ -3496,15 +3565,13 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3496 3565
3497 err = pci_enable_device(pdev); 3566 err = pci_enable_device(pdev);
3498 if (err) { 3567 if (err) {
3499 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3568 dev_err(&pdev->dev, "cannot enable PCI device\n");
3500 pci_name(pdev));
3501 goto err_out; 3569 goto err_out;
3502 } 3570 }
3503 3571
3504 err = pci_request_regions(pdev, DRV_NAME); 3572 err = pci_request_regions(pdev, DRV_NAME);
3505 if (err) { 3573 if (err) {
3506 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3574 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3507 pci_name(pdev));
3508 goto err_out_disable_pdev; 3575 goto err_out_disable_pdev;
3509 } 3576 }
3510 3577
@@ -3519,8 +3586,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3519 } 3586 }
3520 3587
3521 if (err) { 3588 if (err) {
3522 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3589 dev_err(&pdev->dev, "no usable DMA configuration\n");
3523 pci_name(pdev));
3524 goto err_out_free_regions; 3590 goto err_out_free_regions;
3525 } 3591 }
3526 3592
@@ -3538,8 +3604,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3538 err = -ENOMEM; 3604 err = -ENOMEM;
3539 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3605 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3540 if (!hw) { 3606 if (!hw) {
3541 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3607 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3542 pci_name(pdev));
3543 goto err_out_free_regions; 3608 goto err_out_free_regions;
3544 } 3609 }
3545 3610
@@ -3550,8 +3615,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3550 3615
3551 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3616 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3552 if (!hw->regs) { 3617 if (!hw->regs) {
3553 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3618 dev_err(&pdev->dev, "cannot map device registers\n");
3554 pci_name(pdev));
3555 goto err_out_free_hw; 3619 goto err_out_free_hw;
3556 } 3620 }
3557 3621
@@ -3567,23 +3631,19 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3567 if (!dev) 3631 if (!dev)
3568 goto err_out_led_off; 3632 goto err_out_led_off;
3569 3633
3570 if (!is_valid_ether_addr(dev->dev_addr)) { 3634 /* Some motherboards are broken and has zero in ROM. */
3571 printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n", 3635 if (!is_valid_ether_addr(dev->dev_addr))
3572 pci_name(pdev)); 3636 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
3573 err = -EIO;
3574 goto err_out_free_netdev;
3575 }
3576 3637
3577 err = register_netdev(dev); 3638 err = register_netdev(dev);
3578 if (err) { 3639 if (err) {
3579 printk(KERN_ERR PFX "%s: cannot register net device\n", 3640 dev_err(&pdev->dev, "cannot register net device\n");
3580 pci_name(pdev));
3581 goto err_out_free_netdev; 3641 goto err_out_free_netdev;
3582 } 3642 }
3583 3643
3584 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3644 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
3585 if (err) { 3645 if (err) {
3586 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3646 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3587 dev->name, pdev->irq); 3647 dev->name, pdev->irq);
3588 goto err_out_unregister; 3648 goto err_out_unregister;
3589 } 3649 }
@@ -3594,7 +3654,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3594 skge_show_addr(dev1); 3654 skge_show_addr(dev1);
3595 else { 3655 else {
3596 /* Failure to register second port need not be fatal */ 3656 /* Failure to register second port need not be fatal */
3597 printk(KERN_WARNING PFX "register of second port failed\n"); 3657 dev_warn(&pdev->dev, "register of second port failed\n");
3598 hw->dev[1] = NULL; 3658 hw->dev[1] = NULL;
3599 free_netdev(dev1); 3659 free_netdev(dev1);
3600 } 3660 }
@@ -3659,28 +3719,46 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3659} 3719}
3660 3720
3661#ifdef CONFIG_PM 3721#ifdef CONFIG_PM
3722static int vaux_avail(struct pci_dev *pdev)
3723{
3724 int pm_cap;
3725
3726 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3727 if (pm_cap) {
3728 u16 ctl;
3729 pci_read_config_word(pdev, pm_cap + PCI_PM_PMC, &ctl);
3730 if (ctl & PCI_PM_CAP_AUX_POWER)
3731 return 1;
3732 }
3733 return 0;
3734}
3735
3736
3662static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 3737static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3663{ 3738{
3664 struct skge_hw *hw = pci_get_drvdata(pdev); 3739 struct skge_hw *hw = pci_get_drvdata(pdev);
3665 int i, wol = 0; 3740 int i, err, wol = 0;
3741
3742 err = pci_save_state(pdev);
3743 if (err)
3744 return err;
3666 3745
3667 pci_save_state(pdev);
3668 for (i = 0; i < hw->ports; i++) { 3746 for (i = 0; i < hw->ports; i++) {
3669 struct net_device *dev = hw->dev[i]; 3747 struct net_device *dev = hw->dev[i];
3748 struct skge_port *skge = netdev_priv(dev);
3670 3749
3671 if (netif_running(dev)) { 3750 if (netif_running(dev))
3672 struct skge_port *skge = netdev_priv(dev); 3751 skge_down(dev);
3752 if (skge->wol)
3753 skge_wol_init(skge);
3673 3754
3674 netif_carrier_off(dev); 3755 wol |= skge->wol;
3675 if (skge->wol)
3676 netif_stop_queue(dev);
3677 else
3678 skge_down(dev);
3679 wol |= skge->wol;
3680 }
3681 netif_device_detach(dev);
3682 } 3756 }
3683 3757
3758 if (wol && vaux_avail(pdev))
3759 skge_write8(hw, B0_POWER_CTRL,
3760 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
3761
3684 skge_write32(hw, B0_IMSK, 0); 3762 skge_write32(hw, B0_IMSK, 0);
3685 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 3763 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3686 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3764 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3693,8 +3771,14 @@ static int skge_resume(struct pci_dev *pdev)
3693 struct skge_hw *hw = pci_get_drvdata(pdev); 3771 struct skge_hw *hw = pci_get_drvdata(pdev);
3694 int i, err; 3772 int i, err;
3695 3773
3696 pci_set_power_state(pdev, PCI_D0); 3774 err = pci_set_power_state(pdev, PCI_D0);
3697 pci_restore_state(pdev); 3775 if (err)
3776 goto out;
3777
3778 err = pci_restore_state(pdev);
3779 if (err)
3780 goto out;
3781
3698 pci_enable_wake(pdev, PCI_D0, 0); 3782 pci_enable_wake(pdev, PCI_D0, 0);
3699 3783
3700 err = skge_reset(hw); 3784 err = skge_reset(hw);
@@ -3704,7 +3788,6 @@ static int skge_resume(struct pci_dev *pdev)
3704 for (i = 0; i < hw->ports; i++) { 3788 for (i = 0; i < hw->ports; i++) {
3705 struct net_device *dev = hw->dev[i]; 3789 struct net_device *dev = hw->dev[i];
3706 3790
3707 netif_device_attach(dev);
3708 if (netif_running(dev)) { 3791 if (netif_running(dev)) {
3709 err = skge_up(dev); 3792 err = skge_up(dev);
3710 3793
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index f6223c533c01..17b1b479dff5 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -876,11 +876,13 @@ enum {
876 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ 876 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
877 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ 877 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
878}; 878};
879#define WOL_REGS(port, x) (x + (port)*0x80)
879 880
880enum { 881enum {
881 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ 882 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
882 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ 883 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
883}; 884};
885#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
884 886
885enum { 887enum {
886 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ 888 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 822dd0b13133..f2ab3d56e565 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -49,7 +49,7 @@
49#include "sky2.h" 49#include "sky2.h"
50 50
51#define DRV_NAME "sky2" 51#define DRV_NAME "sky2"
52#define DRV_VERSION "1.10" 52#define DRV_VERSION "1.12"
53#define PFX DRV_NAME " " 53#define PFX DRV_NAME " "
54 54
55/* 55/*
@@ -105,6 +105,7 @@ static const struct pci_device_id sky2_id_table[] = {
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ 109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
@@ -126,6 +127,9 @@ static const struct pci_device_id sky2_id_table[] = {
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ 127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ 128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ 129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
129 { 0 } 133 { 0 }
130}; 134};
131 135
@@ -140,7 +144,7 @@ static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
140static const char *yukon2_name[] = { 144static const char *yukon2_name[] = {
141 "XL", /* 0xb3 */ 145 "XL", /* 0xb3 */
142 "EC Ultra", /* 0xb4 */ 146 "EC Ultra", /* 0xb4 */
143 "UNKNOWN", /* 0xb5 */ 147 "Extreme", /* 0xb5 */
144 "EC", /* 0xb6 */ 148 "EC", /* 0xb6 */
145 "FE", /* 0xb7 */ 149 "FE", /* 0xb7 */
146}; 150};
@@ -192,76 +196,52 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
192 return v; 196 return v;
193} 197}
194 198
195static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
196{
197 u16 power_control;
198 int vaux;
199
200 pr_debug("sky2_set_power_state %d\n", state);
201 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
202
203 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
204 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
205 (power_control & PCI_PM_CAP_PME_D3cold);
206
207 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
208
209 power_control |= PCI_PM_CTRL_PME_STATUS;
210 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
211 199
212 switch (state) { 200static void sky2_power_on(struct sky2_hw *hw)
213 case PCI_D0: 201{
214 /* switch power to VCC (WA for VAUX problem) */ 202 /* switch power to VCC (WA for VAUX problem) */
215 sky2_write8(hw, B0_POWER_CTRL, 203 sky2_write8(hw, B0_POWER_CTRL,
216 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 204 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
217
218 /* disable Core Clock Division, */
219 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
220
221 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
222 /* enable bits are inverted */
223 sky2_write8(hw, B2_Y2_CLK_GATE,
224 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
225 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
226 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
227 else
228 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
229 205
230 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 206 /* disable Core Clock Division, */
231 u32 reg1; 207 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
232 208
233 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 209 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
234 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 210 /* enable bits are inverted */
235 reg1 &= P_ASPM_CONTROL_MSK; 211 sky2_write8(hw, B2_Y2_CLK_GATE,
236 sky2_pci_write32(hw, PCI_DEV_REG4, reg1); 212 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
237 sky2_pci_write32(hw, PCI_DEV_REG5, 0); 213 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
238 } 214 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
215 else
216 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
239 217
240 break; 218 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
219 u32 reg1;
241 220
242 case PCI_D3hot: 221 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
243 case PCI_D3cold: 222 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
244 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 223 reg1 &= P_ASPM_CONTROL_MSK;
245 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 224 sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
246 else 225 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
247 /* enable bits are inverted */
248 sky2_write8(hw, B2_Y2_CLK_GATE,
249 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
250 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
251 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
252
253 /* switch power to VAUX */
254 if (vaux && state != PCI_D3cold)
255 sky2_write8(hw, B0_POWER_CTRL,
256 (PC_VAUX_ENA | PC_VCC_ENA |
257 PC_VAUX_ON | PC_VCC_OFF));
258 break;
259 default:
260 printk(KERN_ERR PFX "Unknown power state %d\n", state);
261 } 226 }
227}
262 228
263 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); 229static void sky2_power_aux(struct sky2_hw *hw)
264 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 230{
231 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
232 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
233 else
234 /* enable bits are inverted */
235 sky2_write8(hw, B2_Y2_CLK_GATE,
236 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
237 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
238 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
239
240 /* switch power to VAUX */
241 if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
242 sky2_write8(hw, B0_POWER_CTRL,
243 (PC_VAUX_ENA | PC_VCC_ENA |
244 PC_VAUX_ON | PC_VCC_OFF));
265} 245}
266 246
267static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 247static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
@@ -313,8 +293,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
313 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 293 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
314 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; 294 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
315 295
316 if (sky2->autoneg == AUTONEG_ENABLE && 296 if (sky2->autoneg == AUTONEG_ENABLE
317 !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 297 && !(hw->chip_id == CHIP_ID_YUKON_XL
298 || hw->chip_id == CHIP_ID_YUKON_EC_U
299 || hw->chip_id == CHIP_ID_YUKON_EX)) {
318 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 300 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
319 301
320 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 302 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -341,8 +323,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
341 /* enable automatic crossover */ 323 /* enable automatic crossover */
342 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 324 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
343 325
344 if (sky2->autoneg == AUTONEG_ENABLE && 326 if (sky2->autoneg == AUTONEG_ENABLE
345 (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 327 && (hw->chip_id == CHIP_ID_YUKON_XL
328 || hw->chip_id == CHIP_ID_YUKON_EC_U
329 || hw->chip_id == CHIP_ID_YUKON_EX)) {
346 ctrl &= ~PHY_M_PC_DSC_MSK; 330 ctrl &= ~PHY_M_PC_DSC_MSK;
347 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 331 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
348 } 332 }
@@ -497,7 +481,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
497 /* restore page register */ 481 /* restore page register */
498 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 482 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
499 break; 483 break;
484
500 case CHIP_ID_YUKON_EC_U: 485 case CHIP_ID_YUKON_EC_U:
486 case CHIP_ID_YUKON_EX:
501 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 487 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
502 488
503 /* select page 3 to access LED control register */ 489 /* select page 3 to access LED control register */
@@ -539,7 +525,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
539 525
540 /* set page register to 0 */ 526 /* set page register to 0 */
541 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 527 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
542 } else { 528 } else if (hw->chip_id != CHIP_ID_YUKON_EX) {
543 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 529 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
544 530
545 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { 531 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
@@ -591,6 +577,73 @@ static void sky2_phy_reinit(struct sky2_port *sky2)
591 spin_unlock_bh(&sky2->phy_lock); 577 spin_unlock_bh(&sky2->phy_lock);
592} 578}
593 579
580/* Put device in state to listen for Wake On Lan */
581static void sky2_wol_init(struct sky2_port *sky2)
582{
583 struct sky2_hw *hw = sky2->hw;
584 unsigned port = sky2->port;
585 enum flow_control save_mode;
586 u16 ctrl;
587 u32 reg1;
588
589 /* Bring hardware out of reset */
590 sky2_write16(hw, B0_CTST, CS_RST_CLR);
591 sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
592
593 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
594 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
595
596 /* Force to 10/100
597 * sky2_reset will re-enable on resume
598 */
599 save_mode = sky2->flow_mode;
600 ctrl = sky2->advertising;
601
602 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
603 sky2->flow_mode = FC_NONE;
604 sky2_phy_power(hw, port, 1);
605 sky2_phy_reinit(sky2);
606
607 sky2->flow_mode = save_mode;
608 sky2->advertising = ctrl;
609
610 /* Set GMAC to no flow control and auto update for speed/duplex */
611 gma_write16(hw, port, GM_GP_CTRL,
612 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
613 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
614
615 /* Set WOL address */
616 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
617 sky2->netdev->dev_addr, ETH_ALEN);
618
619 /* Turn on appropriate WOL control bits */
620 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
621 ctrl = 0;
622 if (sky2->wol & WAKE_PHY)
623 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
624 else
625 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
626
627 if (sky2->wol & WAKE_MAGIC)
628 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
629 else
630 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
631
632 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
633 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
634
635 /* Turn on legacy PCI-Express PME mode */
636 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
637 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
638 reg1 |= PCI_Y2_PME_LEGACY;
639 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
640 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
641
642 /* block receiver */
643 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
644
645}
646
594static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 647static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
595{ 648{
596 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 649 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
@@ -684,7 +737,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
684 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 737 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
685 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 738 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
686 739
687 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 740 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
688 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 741 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
689 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 742 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
690 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 743 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
@@ -1467,6 +1520,9 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1467 if (unlikely(netif_msg_tx_done(sky2))) 1520 if (unlikely(netif_msg_tx_done(sky2)))
1468 printk(KERN_DEBUG "%s: tx done %u\n", 1521 printk(KERN_DEBUG "%s: tx done %u\n",
1469 dev->name, idx); 1522 dev->name, idx);
1523 sky2->net_stats.tx_packets++;
1524 sky2->net_stats.tx_bytes += re->skb->len;
1525
1470 dev_kfree_skb_any(re->skb); 1526 dev_kfree_skb_any(re->skb);
1471 } 1527 }
1472 1528
@@ -1641,7 +1697,9 @@ static void sky2_link_up(struct sky2_port *sky2)
1641 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1697 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1642 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1698 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1643 1699
1644 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) { 1700 if (hw->chip_id == CHIP_ID_YUKON_XL
1701 || hw->chip_id == CHIP_ID_YUKON_EC_U
1702 || hw->chip_id == CHIP_ID_YUKON_EX) {
1645 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 1703 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1646 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ 1704 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1647 1705
@@ -1734,14 +1792,16 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1734 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 1792 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1735 1793
1736 /* Pause bits are offset (9..8) */ 1794 /* Pause bits are offset (9..8) */
1737 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) 1795 if (hw->chip_id == CHIP_ID_YUKON_XL
1796 || hw->chip_id == CHIP_ID_YUKON_EC_U
1797 || hw->chip_id == CHIP_ID_YUKON_EX)
1738 aux >>= 6; 1798 aux >>= 6;
1739 1799
1740 sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN, 1800 sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN,
1741 aux & PHY_M_PS_TX_P_EN); 1801 aux & PHY_M_PS_TX_P_EN);
1742 1802
1743 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 1803 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
1744 && hw->chip_id != CHIP_ID_YUKON_EC_U) 1804 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
1745 sky2->flow_status = FC_NONE; 1805 sky2->flow_status = FC_NONE;
1746 1806
1747 if (aux & PHY_M_PS_RX_P_EN) 1807 if (aux & PHY_M_PS_RX_P_EN)
@@ -1794,48 +1854,37 @@ out:
1794} 1854}
1795 1855
1796 1856
1797/* Transmit timeout is only called if we are running, carries is up 1857/* Transmit timeout is only called if we are running, carrier is up
1798 * and tx queue is full (stopped). 1858 * and tx queue is full (stopped).
1859 * Called with netif_tx_lock held.
1799 */ 1860 */
1800static void sky2_tx_timeout(struct net_device *dev) 1861static void sky2_tx_timeout(struct net_device *dev)
1801{ 1862{
1802 struct sky2_port *sky2 = netdev_priv(dev); 1863 struct sky2_port *sky2 = netdev_priv(dev);
1803 struct sky2_hw *hw = sky2->hw; 1864 struct sky2_hw *hw = sky2->hw;
1804 unsigned txq = txqaddr[sky2->port]; 1865 u32 imask;
1805 u16 report, done;
1806 1866
1807 if (netif_msg_timer(sky2)) 1867 if (netif_msg_timer(sky2))
1808 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1868 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1809 1869
1810 report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1811 done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
1812
1813 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n", 1870 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1814 dev->name, 1871 dev->name, sky2->tx_cons, sky2->tx_prod,
1815 sky2->tx_cons, sky2->tx_prod, report, done); 1872 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
1873 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
1816 1874
1817 if (report != done) { 1875 imask = sky2_read32(hw, B0_IMSK); /* block IRQ in hw */
1818 printk(KERN_INFO PFX "status burst pending (irq moderation?)\n"); 1876 sky2_write32(hw, B0_IMSK, 0);
1819 1877 sky2_read32(hw, B0_IMSK);
1820 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1821 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1822 } else if (report != sky2->tx_cons) {
1823 printk(KERN_INFO PFX "status report lost?\n");
1824 1878
1825 netif_tx_lock_bh(dev); 1879 netif_poll_disable(hw->dev[0]); /* stop NAPI poll */
1826 sky2_tx_complete(sky2, report); 1880 synchronize_irq(hw->pdev->irq);
1827 netif_tx_unlock_bh(dev);
1828 } else {
1829 printk(KERN_INFO PFX "hardware hung? flushing\n");
1830 1881
1831 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1882 netif_start_queue(dev); /* don't wakeup during flush */
1832 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1883 sky2_tx_complete(sky2, sky2->tx_prod); /* Flush transmit queue */
1833 1884
1834 sky2_tx_clean(dev); 1885 sky2_write32(hw, B0_IMSK, imask);
1835 1886
1836 sky2_qset(hw, txq); 1887 sky2_phy_reinit(sky2); /* this clears flow control etc */
1837 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1838 }
1839} 1888}
1840 1889
1841static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1890static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1849,8 +1898,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1849 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 1898 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1850 return -EINVAL; 1899 return -EINVAL;
1851 1900
1901 /* TSO on Yukon Ultra and MTU > 1500 not supported */
1852 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN) 1902 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
1853 return -EINVAL; 1903 dev->features &= ~NETIF_F_TSO;
1854 1904
1855 if (!netif_running(dev)) { 1905 if (!netif_running(dev)) {
1856 dev->mtu = new_mtu; 1906 dev->mtu = new_mtu;
@@ -2089,6 +2139,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2089 goto force_update; 2139 goto force_update;
2090 2140
2091 skb->protocol = eth_type_trans(skb, dev); 2141 skb->protocol = eth_type_trans(skb, dev);
2142 sky2->net_stats.rx_packets++;
2143 sky2->net_stats.rx_bytes += skb->len;
2092 dev->last_rx = jiffies; 2144 dev->last_rx = jiffies;
2093 2145
2094#ifdef SKY2_VLAN_TAG_USED 2146#ifdef SKY2_VLAN_TAG_USED
@@ -2218,8 +2270,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2218 2270
2219 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2271 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2220 if (net_ratelimit()) 2272 if (net_ratelimit())
2221 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 2273 dev_err(&hw->pdev->dev, "PCI hardware error (0x%x)\n",
2222 pci_name(hw->pdev), pci_err); 2274 pci_err);
2223 2275
2224 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2276 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2225 sky2_pci_write16(hw, PCI_STATUS, 2277 sky2_pci_write16(hw, PCI_STATUS,
@@ -2234,8 +2286,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2234 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); 2286 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2235 2287
2236 if (net_ratelimit()) 2288 if (net_ratelimit())
2237 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2289 dev_err(&hw->pdev->dev, "PCI Express error (0x%x)\n",
2238 pci_name(hw->pdev), pex_err); 2290 pex_err);
2239 2291
2240 /* clear the interrupt */ 2292 /* clear the interrupt */
2241 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2293 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2404,6 +2456,7 @@ static inline u32 sky2_mhz(const struct sky2_hw *hw)
2404 switch (hw->chip_id) { 2456 switch (hw->chip_id) {
2405 case CHIP_ID_YUKON_EC: 2457 case CHIP_ID_YUKON_EC:
2406 case CHIP_ID_YUKON_EC_U: 2458 case CHIP_ID_YUKON_EC_U:
2459 case CHIP_ID_YUKON_EX:
2407 return 125; /* 125 Mhz */ 2460 return 125; /* 125 Mhz */
2408 case CHIP_ID_YUKON_FE: 2461 case CHIP_ID_YUKON_FE:
2409 return 100; /* 100 Mhz */ 2462 return 100; /* 100 Mhz */
@@ -2423,34 +2476,62 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2423} 2476}
2424 2477
2425 2478
2426static int sky2_reset(struct sky2_hw *hw) 2479static int __devinit sky2_init(struct sky2_hw *hw)
2427{ 2480{
2428 u16 status;
2429 u8 t8; 2481 u8 t8;
2430 int i;
2431 2482
2432 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2483 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2433 2484
2434 hw->chip_id = sky2_read8(hw, B2_CHIP_ID); 2485 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2435 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) { 2486 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2436 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", 2487 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
2437 pci_name(hw->pdev), hw->chip_id); 2488 hw->chip_id);
2438 return -EOPNOTSUPP; 2489 return -EOPNOTSUPP;
2439 } 2490 }
2440 2491
2492 if (hw->chip_id == CHIP_ID_YUKON_EX)
2493 dev_warn(&hw->pdev->dev, "this driver not yet tested on this chip type\n"
2494 "Please report success or failure to <netdev@vger.kernel.org>\n");
2495
2496 /* Make sure and enable all clocks */
2497 if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U)
2498 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
2499
2441 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; 2500 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2442 2501
2443 /* This rev is really old, and requires untested workarounds */ 2502 /* This rev is really old, and requires untested workarounds */
2444 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) { 2503 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2445 printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n", 2504 dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n",
2446 pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], 2505 yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
2447 hw->chip_id, hw->chip_rev); 2506 hw->chip_id, hw->chip_rev);
2448 return -EOPNOTSUPP; 2507 return -EOPNOTSUPP;
2449 } 2508 }
2450 2509
2510 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2511 hw->ports = 1;
2512 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2513 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2514 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2515 ++hw->ports;
2516 }
2517
2518 return 0;
2519}
2520
2521static void sky2_reset(struct sky2_hw *hw)
2522{
2523 u16 status;
2524 int i;
2525
2451 /* disable ASF */ 2526 /* disable ASF */
2452 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2527 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2453 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2528 if (hw->chip_id == CHIP_ID_YUKON_EX) {
2529 status = sky2_read16(hw, HCU_CCSR);
2530 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2531 HCU_CCSR_UC_STATE_MSK);
2532 sky2_write16(hw, HCU_CCSR, status);
2533 } else
2534 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2454 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); 2535 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2455 } 2536 }
2456 2537
@@ -2472,15 +2553,7 @@ static int sky2_reset(struct sky2_hw *hw)
2472 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); 2553 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2473 2554
2474 2555
2475 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2556 sky2_power_on(hw);
2476 hw->ports = 1;
2477 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2478 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2479 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2480 ++hw->ports;
2481 }
2482
2483 sky2_set_power_state(hw, PCI_D0);
2484 2557
2485 for (i = 0; i < hw->ports; i++) { 2558 for (i = 0; i < hw->ports; i++) {
2486 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2559 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -2563,7 +2636,37 @@ static int sky2_reset(struct sky2_hw *hw)
2563 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); 2636 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2564 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); 2637 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2565 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); 2638 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2639}
2640
2641static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
2642{
2643 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
2644}
2645
2646static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2647{
2648 const struct sky2_port *sky2 = netdev_priv(dev);
2649
2650 wol->supported = sky2_wol_supported(sky2->hw);
2651 wol->wolopts = sky2->wol;
2652}
2653
2654static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2655{
2656 struct sky2_port *sky2 = netdev_priv(dev);
2657 struct sky2_hw *hw = sky2->hw;
2658
2659 if (wol->wolopts & ~sky2_wol_supported(sky2->hw))
2660 return -EOPNOTSUPP;
2661
2662 sky2->wol = wol->wolopts;
2663
2664 if (hw->chip_id == CHIP_ID_YUKON_EC_U)
2665 sky2_write32(hw, B0_CTST, sky2->wol
2666 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
2566 2667
2668 if (!netif_running(dev))
2669 sky2_wol_init(sky2);
2567 return 0; 2670 return 0;
2568} 2671}
2569 2672
@@ -2814,25 +2917,9 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2814 } 2917 }
2815} 2918}
2816 2919
2817/* Use hardware MIB variables for critical path statistics and
2818 * transmit feedback not reported at interrupt.
2819 * Other errors are accounted for in interrupt handler.
2820 */
2821static struct net_device_stats *sky2_get_stats(struct net_device *dev) 2920static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2822{ 2921{
2823 struct sky2_port *sky2 = netdev_priv(dev); 2922 struct sky2_port *sky2 = netdev_priv(dev);
2824 u64 data[13];
2825
2826 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2827
2828 sky2->net_stats.tx_bytes = data[0];
2829 sky2->net_stats.rx_bytes = data[1];
2830 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2831 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2832 sky2->net_stats.multicast = data[3] + data[5];
2833 sky2->net_stats.collisions = data[10];
2834 sky2->net_stats.tx_aborted_errors = data[12];
2835
2836 return &sky2->net_stats; 2923 return &sky2->net_stats;
2837} 2924}
2838 2925
@@ -3191,7 +3278,9 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3191static const struct ethtool_ops sky2_ethtool_ops = { 3278static const struct ethtool_ops sky2_ethtool_ops = {
3192 .get_settings = sky2_get_settings, 3279 .get_settings = sky2_get_settings,
3193 .set_settings = sky2_set_settings, 3280 .set_settings = sky2_set_settings,
3194 .get_drvinfo = sky2_get_drvinfo, 3281 .get_drvinfo = sky2_get_drvinfo,
3282 .get_wol = sky2_get_wol,
3283 .set_wol = sky2_set_wol,
3195 .get_msglevel = sky2_get_msglevel, 3284 .get_msglevel = sky2_get_msglevel,
3196 .set_msglevel = sky2_set_msglevel, 3285 .set_msglevel = sky2_set_msglevel,
3197 .nway_reset = sky2_nway_reset, 3286 .nway_reset = sky2_nway_reset,
@@ -3221,13 +3310,14 @@ static const struct ethtool_ops sky2_ethtool_ops = {
3221 3310
3222/* Initialize network device */ 3311/* Initialize network device */
3223static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, 3312static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3224 unsigned port, int highmem) 3313 unsigned port,
3314 int highmem, int wol)
3225{ 3315{
3226 struct sky2_port *sky2; 3316 struct sky2_port *sky2;
3227 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 3317 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
3228 3318
3229 if (!dev) { 3319 if (!dev) {
3230 printk(KERN_ERR "sky2 etherdev alloc failed"); 3320 dev_err(&hw->pdev->dev, "etherdev alloc failed");
3231 return NULL; 3321 return NULL;
3232 } 3322 }
3233 3323
@@ -3269,6 +3359,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3269 sky2->speed = -1; 3359 sky2->speed = -1;
3270 sky2->advertising = sky2_supported_modes(hw); 3360 sky2->advertising = sky2_supported_modes(hw);
3271 sky2->rx_csum = 1; 3361 sky2->rx_csum = 1;
3362 sky2->wol = wol;
3272 3363
3273 spin_lock_init(&sky2->phy_lock); 3364 spin_lock_init(&sky2->phy_lock);
3274 sky2->tx_pending = TX_DEF_PENDING; 3365 sky2->tx_pending = TX_DEF_PENDING;
@@ -3278,11 +3369,9 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3278 3369
3279 sky2->port = port; 3370 sky2->port = port;
3280 3371
3281 if (hw->chip_id != CHIP_ID_YUKON_EC_U) 3372 dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
3282 dev->features |= NETIF_F_TSO;
3283 if (highmem) 3373 if (highmem)
3284 dev->features |= NETIF_F_HIGHDMA; 3374 dev->features |= NETIF_F_HIGHDMA;
3285 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3286 3375
3287#ifdef SKY2_VLAN_TAG_USED 3376#ifdef SKY2_VLAN_TAG_USED
3288 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3377 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -3343,8 +3432,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3343 3432
3344 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); 3433 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
3345 if (err) { 3434 if (err) {
3346 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3435 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3347 pci_name(pdev), pdev->irq);
3348 return err; 3436 return err;
3349 } 3437 }
3350 3438
@@ -3355,9 +3443,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3355 3443
3356 if (!hw->msi) { 3444 if (!hw->msi) {
3357 /* MSI test failed, go back to INTx mode */ 3445 /* MSI test failed, go back to INTx mode */
3358 printk(KERN_INFO PFX "%s: No interrupt generated using MSI, " 3446 dev_info(&pdev->dev, "No interrupt generated using MSI, "
3359 "switching to INTx mode.\n", 3447 "switching to INTx mode.\n");
3360 pci_name(pdev));
3361 3448
3362 err = -EOPNOTSUPP; 3449 err = -EOPNOTSUPP;
3363 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); 3450 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
@@ -3371,62 +3458,62 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3371 return err; 3458 return err;
3372} 3459}
3373 3460
3461static int __devinit pci_wake_enabled(struct pci_dev *dev)
3462{
3463 int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3464 u16 value;
3465
3466 if (!pm)
3467 return 0;
3468 if (pci_read_config_word(dev, pm + PCI_PM_CTRL, &value))
3469 return 0;
3470 return value & PCI_PM_CTRL_PME_ENABLE;
3471}
3472
3374static int __devinit sky2_probe(struct pci_dev *pdev, 3473static int __devinit sky2_probe(struct pci_dev *pdev,
3375 const struct pci_device_id *ent) 3474 const struct pci_device_id *ent)
3376{ 3475{
3377 struct net_device *dev, *dev1 = NULL; 3476 struct net_device *dev;
3378 struct sky2_hw *hw; 3477 struct sky2_hw *hw;
3379 int err, pm_cap, using_dac = 0; 3478 int err, using_dac = 0, wol_default;
3380 3479
3381 err = pci_enable_device(pdev); 3480 err = pci_enable_device(pdev);
3382 if (err) { 3481 if (err) {
3383 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3482 dev_err(&pdev->dev, "cannot enable PCI device\n");
3384 pci_name(pdev));
3385 goto err_out; 3483 goto err_out;
3386 } 3484 }
3387 3485
3388 err = pci_request_regions(pdev, DRV_NAME); 3486 err = pci_request_regions(pdev, DRV_NAME);
3389 if (err) { 3487 if (err) {
3390 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3488 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3391 pci_name(pdev));
3392 goto err_out; 3489 goto err_out;
3393 } 3490 }
3394 3491
3395 pci_set_master(pdev); 3492 pci_set_master(pdev);
3396 3493
3397 /* Find power-management capability. */
3398 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3399 if (pm_cap == 0) {
3400 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
3401 "aborting.\n");
3402 err = -EIO;
3403 goto err_out_free_regions;
3404 }
3405
3406 if (sizeof(dma_addr_t) > sizeof(u32) && 3494 if (sizeof(dma_addr_t) > sizeof(u32) &&
3407 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 3495 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3408 using_dac = 1; 3496 using_dac = 1;
3409 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3497 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3410 if (err < 0) { 3498 if (err < 0) {
3411 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " 3499 dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
3412 "for consistent allocations\n", pci_name(pdev)); 3500 "for consistent allocations\n");
3413 goto err_out_free_regions; 3501 goto err_out_free_regions;
3414 } 3502 }
3415
3416 } else { 3503 } else {
3417 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3504 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3418 if (err) { 3505 if (err) {
3419 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3506 dev_err(&pdev->dev, "no usable DMA configuration\n");
3420 pci_name(pdev));
3421 goto err_out_free_regions; 3507 goto err_out_free_regions;
3422 } 3508 }
3423 } 3509 }
3424 3510
3511 wol_default = pci_wake_enabled(pdev) ? WAKE_MAGIC : 0;
3512
3425 err = -ENOMEM; 3513 err = -ENOMEM;
3426 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3514 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3427 if (!hw) { 3515 if (!hw) {
3428 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3516 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3429 pci_name(pdev));
3430 goto err_out_free_regions; 3517 goto err_out_free_regions;
3431 } 3518 }
3432 3519
@@ -3434,11 +3521,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3434 3521
3435 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3522 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3436 if (!hw->regs) { 3523 if (!hw->regs) {
3437 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3524 dev_err(&pdev->dev, "cannot map device registers\n");
3438 pci_name(pdev));
3439 goto err_out_free_hw; 3525 goto err_out_free_hw;
3440 } 3526 }
3441 hw->pm_cap = pm_cap;
3442 3527
3443#ifdef __BIG_ENDIAN 3528#ifdef __BIG_ENDIAN
3444 /* The sk98lin vendor driver uses hardware byte swapping but 3529 /* The sk98lin vendor driver uses hardware byte swapping but
@@ -3458,18 +3543,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3458 if (!hw->st_le) 3543 if (!hw->st_le)
3459 goto err_out_iounmap; 3544 goto err_out_iounmap;
3460 3545
3461 err = sky2_reset(hw); 3546 err = sky2_init(hw);
3462 if (err) 3547 if (err)
3463 goto err_out_iounmap; 3548 goto err_out_iounmap;
3464 3549
3465 printk(KERN_INFO PFX "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n", 3550 dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
3466 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0), 3551 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
3467 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], 3552 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
3468 hw->chip_id, hw->chip_rev); 3553 hw->chip_id, hw->chip_rev);
3469 3554
3470 dev = sky2_init_netdev(hw, 0, using_dac); 3555 sky2_reset(hw);
3471 if (!dev) 3556
3557 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
3558 if (!dev) {
3559 err = -ENOMEM;
3472 goto err_out_free_pci; 3560 goto err_out_free_pci;
3561 }
3473 3562
3474 if (!disable_msi && pci_enable_msi(pdev) == 0) { 3563 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3475 err = sky2_test_msi(hw); 3564 err = sky2_test_msi(hw);
@@ -3481,32 +3570,33 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3481 3570
3482 err = register_netdev(dev); 3571 err = register_netdev(dev);
3483 if (err) { 3572 if (err) {
3484 printk(KERN_ERR PFX "%s: cannot register net device\n", 3573 dev_err(&pdev->dev, "cannot register net device\n");
3485 pci_name(pdev));
3486 goto err_out_free_netdev; 3574 goto err_out_free_netdev;
3487 } 3575 }
3488 3576
3489 err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED, 3577 err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED,
3490 dev->name, hw); 3578 dev->name, hw);
3491 if (err) { 3579 if (err) {
3492 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3580 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3493 pci_name(pdev), pdev->irq);
3494 goto err_out_unregister; 3581 goto err_out_unregister;
3495 } 3582 }
3496 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3583 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3497 3584
3498 sky2_show_addr(dev); 3585 sky2_show_addr(dev);
3499 3586
3500 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) { 3587 if (hw->ports > 1) {
3501 if (register_netdev(dev1) == 0) 3588 struct net_device *dev1;
3502 sky2_show_addr(dev1); 3589
3503 else { 3590 dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
3504 /* Failure to register second port need not be fatal */ 3591 if (!dev1)
3505 printk(KERN_WARNING PFX 3592 dev_warn(&pdev->dev, "allocation for second device failed\n");
3506 "register of second port failed\n"); 3593 else if ((err = register_netdev(dev1))) {
3594 dev_warn(&pdev->dev,
3595 "register of second port failed (%d)\n", err);
3507 hw->dev[1] = NULL; 3596 hw->dev[1] = NULL;
3508 free_netdev(dev1); 3597 free_netdev(dev1);
3509 } 3598 } else
3599 sky2_show_addr(dev1);
3510 } 3600 }
3511 3601
3512 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); 3602 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
@@ -3555,7 +3645,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3555 unregister_netdev(dev1); 3645 unregister_netdev(dev1);
3556 unregister_netdev(dev0); 3646 unregister_netdev(dev0);
3557 3647
3558 sky2_set_power_state(hw, PCI_D3hot); 3648 sky2_power_aux(hw);
3649
3559 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 3650 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3560 sky2_write8(hw, B0_CTST, CS_RST_SET); 3651 sky2_write8(hw, B0_CTST, CS_RST_SET);
3561 sky2_read8(hw, B0_CTST); 3652 sky2_read8(hw, B0_CTST);
@@ -3580,27 +3671,31 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3580static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) 3671static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3581{ 3672{
3582 struct sky2_hw *hw = pci_get_drvdata(pdev); 3673 struct sky2_hw *hw = pci_get_drvdata(pdev);
3583 int i; 3674 int i, wol = 0;
3584 pci_power_t pstate = pci_choose_state(pdev, state);
3585
3586 if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
3587 return -EINVAL;
3588 3675
3589 del_timer_sync(&hw->idle_timer); 3676 del_timer_sync(&hw->idle_timer);
3590 netif_poll_disable(hw->dev[0]); 3677 netif_poll_disable(hw->dev[0]);
3591 3678
3592 for (i = 0; i < hw->ports; i++) { 3679 for (i = 0; i < hw->ports; i++) {
3593 struct net_device *dev = hw->dev[i]; 3680 struct net_device *dev = hw->dev[i];
3681 struct sky2_port *sky2 = netdev_priv(dev);
3594 3682
3595 if (netif_running(dev)) { 3683 if (netif_running(dev))
3596 sky2_down(dev); 3684 sky2_down(dev);
3597 netif_device_detach(dev); 3685
3598 } 3686 if (sky2->wol)
3687 sky2_wol_init(sky2);
3688
3689 wol |= sky2->wol;
3599 } 3690 }
3600 3691
3601 sky2_write32(hw, B0_IMSK, 0); 3692 sky2_write32(hw, B0_IMSK, 0);
3693 sky2_power_aux(hw);
3694
3602 pci_save_state(pdev); 3695 pci_save_state(pdev);
3603 sky2_set_power_state(hw, pstate); 3696 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3697 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3698
3604 return 0; 3699 return 0;
3605} 3700}
3606 3701
@@ -3609,21 +3704,22 @@ static int sky2_resume(struct pci_dev *pdev)
3609 struct sky2_hw *hw = pci_get_drvdata(pdev); 3704 struct sky2_hw *hw = pci_get_drvdata(pdev);
3610 int i, err; 3705 int i, err;
3611 3706
3612 pci_restore_state(pdev); 3707 err = pci_set_power_state(pdev, PCI_D0);
3613 pci_enable_wake(pdev, PCI_D0, 0); 3708 if (err)
3614 sky2_set_power_state(hw, PCI_D0); 3709 goto out;
3615 3710
3616 err = sky2_reset(hw); 3711 err = pci_restore_state(pdev);
3617 if (err) 3712 if (err)
3618 goto out; 3713 goto out;
3619 3714
3715 pci_enable_wake(pdev, PCI_D0, 0);
3716 sky2_reset(hw);
3717
3620 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3718 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3621 3719
3622 for (i = 0; i < hw->ports; i++) { 3720 for (i = 0; i < hw->ports; i++) {
3623 struct net_device *dev = hw->dev[i]; 3721 struct net_device *dev = hw->dev[i];
3624 if (netif_running(dev)) { 3722 if (netif_running(dev)) {
3625 netif_device_attach(dev);
3626
3627 err = sky2_up(dev); 3723 err = sky2_up(dev);
3628 if (err) { 3724 if (err) {
3629 printk(KERN_ERR PFX "%s: could not up: %d\n", 3725 printk(KERN_ERR PFX "%s: could not up: %d\n",
@@ -3636,11 +3732,43 @@ static int sky2_resume(struct pci_dev *pdev)
3636 3732
3637 netif_poll_enable(hw->dev[0]); 3733 netif_poll_enable(hw->dev[0]);
3638 sky2_idle_start(hw); 3734 sky2_idle_start(hw);
3735 return 0;
3639out: 3736out:
3737 dev_err(&pdev->dev, "resume failed (%d)\n", err);
3738 pci_disable_device(pdev);
3640 return err; 3739 return err;
3641} 3740}
3642#endif 3741#endif
3643 3742
3743static void sky2_shutdown(struct pci_dev *pdev)
3744{
3745 struct sky2_hw *hw = pci_get_drvdata(pdev);
3746 int i, wol = 0;
3747
3748 del_timer_sync(&hw->idle_timer);
3749 netif_poll_disable(hw->dev[0]);
3750
3751 for (i = 0; i < hw->ports; i++) {
3752 struct net_device *dev = hw->dev[i];
3753 struct sky2_port *sky2 = netdev_priv(dev);
3754
3755 if (sky2->wol) {
3756 wol = 1;
3757 sky2_wol_init(sky2);
3758 }
3759 }
3760
3761 if (wol)
3762 sky2_power_aux(hw);
3763
3764 pci_enable_wake(pdev, PCI_D3hot, wol);
3765 pci_enable_wake(pdev, PCI_D3cold, wol);
3766
3767 pci_disable_device(pdev);
3768 pci_set_power_state(pdev, PCI_D3hot);
3769
3770}
3771
3644static struct pci_driver sky2_driver = { 3772static struct pci_driver sky2_driver = {
3645 .name = DRV_NAME, 3773 .name = DRV_NAME,
3646 .id_table = sky2_id_table, 3774 .id_table = sky2_id_table,
@@ -3650,6 +3778,7 @@ static struct pci_driver sky2_driver = {
3650 .suspend = sky2_suspend, 3778 .suspend = sky2_suspend,
3651 .resume = sky2_resume, 3779 .resume = sky2_resume,
3652#endif 3780#endif
3781 .shutdown = sky2_shutdown,
3653}; 3782};
3654 3783
3655static int __init sky2_init_module(void) 3784static int __init sky2_init_module(void)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 6ed1d47dbbd3..3b0189569d52 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -32,6 +32,7 @@ enum pci_dev_reg_1 {
32 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ 32 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
33 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ 33 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
34 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ 34 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
35 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
35}; 36};
36 37
37enum pci_dev_reg_2 { 38enum pci_dev_reg_2 {
@@ -370,12 +371,9 @@ enum {
370 371
371/* B2_CHIP_ID 8 bit Chip Identification Number */ 372/* B2_CHIP_ID 8 bit Chip Identification Number */
372enum { 373enum {
373 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
374 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
375 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
376 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
377 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ 374 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
378 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */ 375 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */
376 CHIP_ID_YUKON_EX = 0xb5, /* Chip ID for YUKON-2 Extreme */
379 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ 377 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
380 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ 378 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
381 379
@@ -767,6 +765,24 @@ enum {
767 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */ 765 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
768}; 766};
769 767
768enum {
769 SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */
770 SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */
771};
772
773enum {
774 CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */
775 CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */
776 CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */
777 CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */
778 CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */
779 CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */
780 HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */
781 CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */
782 HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */
783 HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */
784};
785
770/* ASF Subsystem Registers (Yukon-2 only) */ 786/* ASF Subsystem Registers (Yukon-2 only) */
771enum { 787enum {
772 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */ 788 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
@@ -837,33 +853,27 @@ enum {
837 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ 853 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
838 854
839/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ 855/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
840
841 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
842
843 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ 856 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
844 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ 857 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
845 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ 858 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
846 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ 859 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
847 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
848 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
849 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ 860 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
850 861
851/* WOL Pattern Length Registers (YUKON only) */ 862/* WOL Pattern Length Registers (YUKON only) */
852
853 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ 863 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
854 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ 864 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
855 865
856/* WOL Pattern Counter Registers (YUKON only) */ 866/* WOL Pattern Counter Registers (YUKON only) */
857
858
859 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ 867 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
860 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ 868 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
861}; 869};
870#define WOL_REGS(port, x) (x + (port)*0x80)
862 871
863enum { 872enum {
864 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ 873 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
865 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ 874 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
866}; 875};
876#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
867 877
868enum { 878enum {
869 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ 879 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
@@ -1654,6 +1664,39 @@ enum {
1654 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */ 1664 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
1655 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */ 1665 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
1656}; 1666};
1667/* HCU_CCSR CPU Control and Status Register */
1668enum {
1669 HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
1670 HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */
1671 /* Clock Stretching Timeout */
1672 HCU_CCSR_CS_TO = 1<<25,
1673 HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */
1674
1675 HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */
1676 HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */
1677
1678 HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */
1679 HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */
1680
1681 HCU_CCSR_SET_SYNC_CPU = 1<<5,
1682 HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
1683 HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
1684 HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */
1685/* Microcontroller State */
1686 HCU_CCSR_UC_STATE_MSK = 3,
1687 HCU_CCSR_UC_STATE_BASE = 1<<0,
1688 HCU_CCSR_ASF_RESET = 0,
1689 HCU_CCSR_ASF_HALTED = 1<<1,
1690 HCU_CCSR_ASF_RUNNING = 1<<0,
1691};
1692
1693/* HCU_HCSR Host Control and Status Register */
1694enum {
1695 HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */
1696
1697 HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */
1698 HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */
1699};
1657 1700
1658/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ 1701/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
1659enum { 1702enum {
@@ -1715,14 +1758,17 @@ enum {
1715 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 1758 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1716 1759
1717#define GMAC_DEF_MSK GM_IS_TX_FF_UR 1760#define GMAC_DEF_MSK GM_IS_TX_FF_UR
1761};
1718 1762
1719/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 1763/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1720 /* Bits 15.. 2: reserved */ 1764enum { /* Bits 15.. 2: reserved */
1721 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ 1765 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1722 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ 1766 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1767};
1723 1768
1724 1769
1725/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ 1770/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1771enum {
1726 WOL_CTL_LINK_CHG_OCC = 1<<15, 1772 WOL_CTL_LINK_CHG_OCC = 1<<15,
1727 WOL_CTL_MAGIC_PKT_OCC = 1<<14, 1773 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1728 WOL_CTL_PATTERN_OCC = 1<<13, 1774 WOL_CTL_PATTERN_OCC = 1<<13,
@@ -1741,17 +1787,6 @@ enum {
1741 WOL_CTL_DIS_PATTERN_UNIT = 1<<0, 1787 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1742}; 1788};
1743 1789
1744#define WOL_CTL_DEFAULT \
1745 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1746 WOL_CTL_DIS_PME_ON_PATTERN | \
1747 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1748 WOL_CTL_DIS_LINK_CHG_UNIT | \
1749 WOL_CTL_DIS_PATTERN_UNIT | \
1750 WOL_CTL_DIS_MAGIC_PKT_UNIT)
1751
1752/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1753#define WOL_CTL_PATT_ENA(x) (1 << (x))
1754
1755 1790
1756/* Control flags */ 1791/* Control flags */
1757enum { 1792enum {
@@ -1875,6 +1910,7 @@ struct sky2_port {
1875 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ 1910 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
1876 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 1911 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
1877 u8 rx_csum; 1912 u8 rx_csum;
1913 u8 wol;
1878 enum flow_control flow_mode; 1914 enum flow_control flow_mode;
1879 enum flow_control flow_status; 1915 enum flow_control flow_status;
1880 1916
@@ -1887,7 +1923,6 @@ struct sky2_hw {
1887 struct pci_dev *pdev; 1923 struct pci_dev *pdev;
1888 struct net_device *dev[2]; 1924 struct net_device *dev[2];
1889 1925
1890 int pm_cap;
1891 u8 chip_id; 1926 u8 chip_id;
1892 u8 chip_rev; 1927 u8 chip_rev;
1893 u8 pmd_type; 1928 u8 pmd_type;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 43af61438449..c95614131980 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1659,7 +1659,7 @@ smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1659{ 1659{
1660 strncpy(info->driver, CARDNAME, sizeof(info->driver)); 1660 strncpy(info->driver, CARDNAME, sizeof(info->driver));
1661 strncpy(info->version, version, sizeof(info->version)); 1661 strncpy(info->version, version, sizeof(info->version));
1662 strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); 1662 strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info));
1663} 1663}
1664 1664
1665static int smc911x_ethtool_nwayreset(struct net_device *dev) 1665static int smc911x_ethtool_nwayreset(struct net_device *dev)
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index e62a9586fb95..49f4b7712ebf 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1712,7 +1712,7 @@ smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1712{ 1712{
1713 strncpy(info->driver, CARDNAME, sizeof(info->driver)); 1713 strncpy(info->driver, CARDNAME, sizeof(info->driver));
1714 strncpy(info->version, version, sizeof(info->version)); 1714 strncpy(info->version, version, sizeof(info->version));
1715 strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); 1715 strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info));
1716} 1716}
1717 1717
1718static int smc_ethtool_nwayreset(struct net_device *dev) 1718static int smc_ethtool_nwayreset(struct net_device *dev)
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 8ea2fc1b96cb..bf6ff39e02bb 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -280,72 +280,67 @@ spider_net_free_chain(struct spider_net_card *card,
280{ 280{
281 struct spider_net_descr *descr; 281 struct spider_net_descr *descr;
282 282
283 for (descr = chain->tail; !descr->bus_addr; descr = descr->next) { 283 descr = chain->ring;
284 pci_unmap_single(card->pdev, descr->bus_addr, 284 do {
285 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
286 descr->bus_addr = 0; 285 descr->bus_addr = 0;
287 } 286 descr->next_descr_addr = 0;
287 descr = descr->next;
288 } while (descr != chain->ring);
289
290 dma_free_coherent(&card->pdev->dev, chain->num_desc,
291 chain->ring, chain->dma_addr);
288} 292}
289 293
290/** 294/**
291 * spider_net_init_chain - links descriptor chain 295 * spider_net_init_chain - alloc and link descriptor chain
292 * @card: card structure 296 * @card: card structure
293 * @chain: address of chain 297 * @chain: address of chain
294 * @start_descr: address of descriptor array
295 * @no: number of descriptors
296 * 298 *
297 * we manage a circular list that mirrors the hardware structure, 299 * We manage a circular list that mirrors the hardware structure,
298 * except that the hardware uses bus addresses. 300 * except that the hardware uses bus addresses.
299 * 301 *
300 * returns 0 on success, <0 on failure 302 * Returns 0 on success, <0 on failure
301 */ 303 */
302static int 304static int
303spider_net_init_chain(struct spider_net_card *card, 305spider_net_init_chain(struct spider_net_card *card,
304 struct spider_net_descr_chain *chain, 306 struct spider_net_descr_chain *chain)
305 struct spider_net_descr *start_descr,
306 int no)
307{ 307{
308 int i; 308 int i;
309 struct spider_net_descr *descr; 309 struct spider_net_descr *descr;
310 dma_addr_t buf; 310 dma_addr_t buf;
311 size_t alloc_size;
311 312
312 descr = start_descr; 313 alloc_size = chain->num_desc * sizeof (struct spider_net_descr);
313 memset(descr, 0, sizeof(*descr) * no);
314 314
315 /* set up the hardware pointers in each descriptor */ 315 chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
316 for (i=0; i<no; i++, descr++) { 316 &chain->dma_addr, GFP_KERNEL);
317 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 317
318 if (!chain->ring)
319 return -ENOMEM;
318 320
319 buf = pci_map_single(card->pdev, descr, 321 descr = chain->ring;
320 SPIDER_NET_DESCR_SIZE, 322 memset(descr, 0, alloc_size);
321 PCI_DMA_BIDIRECTIONAL);
322 323
323 if (pci_dma_mapping_error(buf)) 324 /* Set up the hardware pointers in each descriptor */
324 goto iommu_error; 325 buf = chain->dma_addr;
326 for (i=0; i < chain->num_desc; i++, descr++) {
327 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
325 328
326 descr->bus_addr = buf; 329 descr->bus_addr = buf;
330 descr->next_descr_addr = 0;
327 descr->next = descr + 1; 331 descr->next = descr + 1;
328 descr->prev = descr - 1; 332 descr->prev = descr - 1;
329 333
334 buf += sizeof(struct spider_net_descr);
330 } 335 }
331 /* do actual circular list */ 336 /* do actual circular list */
332 (descr-1)->next = start_descr; 337 (descr-1)->next = chain->ring;
333 start_descr->prev = descr-1; 338 chain->ring->prev = descr-1;
334 339
335 spin_lock_init(&chain->lock); 340 spin_lock_init(&chain->lock);
336 chain->head = start_descr; 341 chain->head = chain->ring;
337 chain->tail = start_descr; 342 chain->tail = chain->ring;
338
339 return 0; 343 return 0;
340
341iommu_error:
342 descr = start_descr;
343 for (i=0; i < no; i++, descr++)
344 if (descr->bus_addr)
345 pci_unmap_single(card->pdev, descr->bus_addr,
346 SPIDER_NET_DESCR_SIZE,
347 PCI_DMA_BIDIRECTIONAL);
348 return -ENOMEM;
349} 344}
350 345
351/** 346/**
@@ -372,21 +367,20 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
372} 367}
373 368
374/** 369/**
375 * spider_net_prepare_rx_descr - reinitializes a rx descriptor 370 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
376 * @card: card structure 371 * @card: card structure
377 * @descr: descriptor to re-init 372 * @descr: descriptor to re-init
378 * 373 *
379 * return 0 on succes, <0 on failure 374 * Return 0 on succes, <0 on failure.
380 * 375 *
381 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. 376 * Allocates a new rx skb, iommu-maps it and attaches it to the
382 * Activate the descriptor state-wise 377 * descriptor. Mark the descriptor as activated, ready-to-use.
383 */ 378 */
384static int 379static int
385spider_net_prepare_rx_descr(struct spider_net_card *card, 380spider_net_prepare_rx_descr(struct spider_net_card *card,
386 struct spider_net_descr *descr) 381 struct spider_net_descr *descr)
387{ 382{
388 dma_addr_t buf; 383 dma_addr_t buf;
389 int error = 0;
390 int offset; 384 int offset;
391 int bufsize; 385 int bufsize;
392 386
@@ -414,7 +408,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
414 (SPIDER_NET_RXBUF_ALIGN - 1); 408 (SPIDER_NET_RXBUF_ALIGN - 1);
415 if (offset) 409 if (offset)
416 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 410 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
417 /* io-mmu-map the skb */ 411 /* iommu-map the skb */
418 buf = pci_map_single(card->pdev, descr->skb->data, 412 buf = pci_map_single(card->pdev, descr->skb->data,
419 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 413 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
420 descr->buf_addr = buf; 414 descr->buf_addr = buf;
@@ -425,11 +419,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
425 card->spider_stats.rx_iommu_map_error++; 419 card->spider_stats.rx_iommu_map_error++;
426 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 420 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
427 } else { 421 } else {
422 descr->next_descr_addr = 0;
423 wmb();
428 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 424 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE; 425 SPIDER_NET_DMAC_NOINTR_COMPLETE;
426
427 wmb();
428 descr->prev->next_descr_addr = descr->bus_addr;
430 } 429 }
431 430
432 return error; 431 return 0;
433} 432}
434 433
435/** 434/**
@@ -493,10 +492,10 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
493} 492}
494 493
495/** 494/**
496 * spider_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains 495 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
497 * @card: card structure 496 * @card: card structure
498 * 497 *
499 * returns 0 on success, <0 on failure 498 * Returns 0 on success, <0 on failure.
500 */ 499 */
501static int 500static int
502spider_net_alloc_rx_skbs(struct spider_net_card *card) 501spider_net_alloc_rx_skbs(struct spider_net_card *card)
@@ -507,16 +506,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
507 result = -ENOMEM; 506 result = -ENOMEM;
508 507
509 chain = &card->rx_chain; 508 chain = &card->rx_chain;
510 /* put at least one buffer into the chain. if this fails, 509 /* Put at least one buffer into the chain. if this fails,
511 * we've got a problem. if not, spider_net_refill_rx_chain 510 * we've got a problem. If not, spider_net_refill_rx_chain
512 * will do the rest at the end of this function */ 511 * will do the rest at the end of this function. */
513 if (spider_net_prepare_rx_descr(card, chain->head)) 512 if (spider_net_prepare_rx_descr(card, chain->head))
514 goto error; 513 goto error;
515 else 514 else
516 chain->head = chain->head->next; 515 chain->head = chain->head->next;
517 516
518 /* this will allocate the rest of the rx buffers; if not, it's 517 /* This will allocate the rest of the rx buffers;
519 * business as usual later on */ 518 * if not, it's business as usual later on. */
520 spider_net_refill_rx_chain(card); 519 spider_net_refill_rx_chain(card);
521 spider_net_enable_rxdmac(card); 520 spider_net_enable_rxdmac(card);
522 return 0; 521 return 0;
@@ -707,7 +706,7 @@ spider_net_set_low_watermark(struct spider_net_card *card)
707 } 706 }
708 707
709 /* If TX queue is short, don't even bother with interrupts */ 708 /* If TX queue is short, don't even bother with interrupts */
710 if (cnt < card->num_tx_desc/4) 709 if (cnt < card->tx_chain.num_desc/4)
711 return cnt; 710 return cnt;
712 711
713 /* Set low-watermark 3/4th's of the way into the queue. */ 712 /* Set low-watermark 3/4th's of the way into the queue. */
@@ -915,16 +914,13 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
915 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 914 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
916 * @descr: descriptor to process 915 * @descr: descriptor to process
917 * @card: card structure 916 * @card: card structure
918 * @napi: whether caller is in NAPI context
919 *
920 * returns 1 on success, 0 if no packet was passed to the stack
921 * 917 *
922 * iommu-unmaps the skb, fills out skb structure and passes the data to the 918 * Fills out skb structure and passes the data to the stack.
923 * stack. The descriptor state is not changed. 919 * The descriptor state is not changed.
924 */ 920 */
925static int 921static void
926spider_net_pass_skb_up(struct spider_net_descr *descr, 922spider_net_pass_skb_up(struct spider_net_descr *descr,
927 struct spider_net_card *card, int napi) 923 struct spider_net_card *card)
928{ 924{
929 struct sk_buff *skb; 925 struct sk_buff *skb;
930 struct net_device *netdev; 926 struct net_device *netdev;
@@ -932,23 +928,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
932 928
933 data_status = descr->data_status; 929 data_status = descr->data_status;
934 data_error = descr->data_error; 930 data_error = descr->data_error;
935
936 netdev = card->netdev; 931 netdev = card->netdev;
937 932
938 /* unmap descriptor */
939 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
940 PCI_DMA_FROMDEVICE);
941
942 /* the cases we'll throw away the packet immediately */
943 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
944 if (netif_msg_rx_err(card))
945 pr_err("error in received descriptor found, "
946 "data_status=x%08x, data_error=x%08x\n",
947 data_status, data_error);
948 card->spider_stats.rx_desc_error++;
949 return 0;
950 }
951
952 skb = descr->skb; 933 skb = descr->skb;
953 skb->dev = netdev; 934 skb->dev = netdev;
954 skb_put(skb, descr->valid_size); 935 skb_put(skb, descr->valid_size);
@@ -977,57 +958,72 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
977 } 958 }
978 959
979 /* pass skb up to stack */ 960 /* pass skb up to stack */
980 if (napi) 961 netif_receive_skb(skb);
981 netif_receive_skb(skb);
982 else
983 netif_rx_ni(skb);
984 962
985 /* update netdevice statistics */ 963 /* update netdevice statistics */
986 card->netdev_stats.rx_packets++; 964 card->netdev_stats.rx_packets++;
987 card->netdev_stats.rx_bytes += skb->len; 965 card->netdev_stats.rx_bytes += skb->len;
966}
988 967
989 return 1; 968#ifdef DEBUG
969static void show_rx_chain(struct spider_net_card *card)
970{
971 struct spider_net_descr_chain *chain = &card->rx_chain;
972 struct spider_net_descr *start= chain->tail;
973 struct spider_net_descr *descr= start;
974 int status;
975
976 int cnt = 0;
977 int cstat = spider_net_get_descr_status(descr);
978 printk(KERN_INFO "RX chain tail at descr=%ld\n",
979 (start - card->descr) - card->tx_chain.num_desc);
980 status = cstat;
981 do
982 {
983 status = spider_net_get_descr_status(descr);
984 if (cstat != status) {
985 printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
986 cstat = status;
987 cnt = 0;
988 }
989 cnt ++;
990 descr = descr->next;
991 } while (descr != start);
992 printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
990} 993}
994#endif
991 995
992/** 996/**
993 * spider_net_decode_one_descr - processes an rx descriptor 997 * spider_net_decode_one_descr - processes an rx descriptor
994 * @card: card structure 998 * @card: card structure
995 * @napi: whether caller is in NAPI context
996 * 999 *
997 * returns 1 if a packet has been sent to the stack, otherwise 0 1000 * Returns 1 if a packet has been sent to the stack, otherwise 0
998 * 1001 *
999 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1002 * Processes an rx descriptor by iommu-unmapping the data buffer and passing
1000 * the packet up to the stack. This function is called in softirq 1003 * the packet up to the stack. This function is called in softirq
1001 * context, e.g. either bottom half from interrupt or NAPI polling context 1004 * context, e.g. either bottom half from interrupt or NAPI polling context
1002 */ 1005 */
1003static int 1006static int
1004spider_net_decode_one_descr(struct spider_net_card *card, int napi) 1007spider_net_decode_one_descr(struct spider_net_card *card)
1005{ 1008{
1006 struct spider_net_descr_chain *chain = &card->rx_chain; 1009 struct spider_net_descr_chain *chain = &card->rx_chain;
1007 struct spider_net_descr *descr = chain->tail; 1010 struct spider_net_descr *descr = chain->tail;
1008 int status; 1011 int status;
1009 int result;
1010 1012
1011 status = spider_net_get_descr_status(descr); 1013 status = spider_net_get_descr_status(descr);
1012 1014
1013 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1015 /* Nothing in the descriptor, or ring must be empty */
1014 /* nothing in the descriptor yet */ 1016 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1015 result=0; 1017 (status == SPIDER_NET_DESCR_NOT_IN_USE))
1016 goto out; 1018 return 0;
1017 }
1018
1019 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1020 /* not initialized yet, the ring must be empty */
1021 spider_net_refill_rx_chain(card);
1022 spider_net_enable_rxdmac(card);
1023 result=0;
1024 goto out;
1025 }
1026 1019
1027 /* descriptor definitively used -- move on tail */ 1020 /* descriptor definitively used -- move on tail */
1028 chain->tail = descr->next; 1021 chain->tail = descr->next;
1029 1022
1030 result = 0; 1023 /* unmap descriptor */
1024 pci_unmap_single(card->pdev, descr->buf_addr,
1025 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1026
1031 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1027 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1032 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || 1028 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1033 (status == SPIDER_NET_DESCR_FORCE_END) ) { 1029 (status == SPIDER_NET_DESCR_FORCE_END) ) {
@@ -1035,31 +1031,55 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1035 pr_err("%s: dropping RX descriptor with state %d\n", 1031 pr_err("%s: dropping RX descriptor with state %d\n",
1036 card->netdev->name, status); 1032 card->netdev->name, status);
1037 card->netdev_stats.rx_dropped++; 1033 card->netdev_stats.rx_dropped++;
1038 pci_unmap_single(card->pdev, descr->buf_addr, 1034 goto bad_desc;
1039 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1040 dev_kfree_skb_irq(descr->skb);
1041 goto refill;
1042 } 1035 }
1043 1036
1044 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1037 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1045 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1038 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1046 if (netif_msg_rx_err(card)) { 1039 if (netif_msg_rx_err(card))
1047 pr_err("%s: RX descriptor with state %d\n", 1040 pr_err("%s: RX descriptor with unkown state %d\n",
1048 card->netdev->name, status); 1041 card->netdev->name, status);
1049 card->spider_stats.rx_desc_unk_state++; 1042 card->spider_stats.rx_desc_unk_state++;
1050 } 1043 goto bad_desc;
1051 goto refill;
1052 } 1044 }
1053 1045
1054 /* ok, we've got a packet in descr */ 1046 /* The cases we'll throw away the packet immediately */
1055 result = spider_net_pass_skb_up(descr, card, napi); 1047 if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1056refill: 1048 if (netif_msg_rx_err(card))
1049 pr_err("%s: error in received descriptor found, "
1050 "data_status=x%08x, data_error=x%08x\n",
1051 card->netdev->name,
1052 descr->data_status, descr->data_error);
1053 goto bad_desc;
1054 }
1055
1056 if (descr->dmac_cmd_status & 0xfefe) {
1057 pr_err("%s: bad status, cmd_status=x%08x\n",
1058 card->netdev->name,
1059 descr->dmac_cmd_status);
1060 pr_err("buf_addr=x%08x\n", descr->buf_addr);
1061 pr_err("buf_size=x%08x\n", descr->buf_size);
1062 pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr);
1063 pr_err("result_size=x%08x\n", descr->result_size);
1064 pr_err("valid_size=x%08x\n", descr->valid_size);
1065 pr_err("data_status=x%08x\n", descr->data_status);
1066 pr_err("data_error=x%08x\n", descr->data_error);
1067 pr_err("bus_addr=x%08x\n", descr->bus_addr);
1068 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1069
1070 card->spider_stats.rx_desc_error++;
1071 goto bad_desc;
1072 }
1073
1074 /* Ok, we've got a packet in descr */
1075 spider_net_pass_skb_up(descr, card);
1057 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1076 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1058 /* change the descriptor state: */ 1077 return 1;
1059 if (!napi) 1078
1060 spider_net_refill_rx_chain(card); 1079bad_desc:
1061out: 1080 dev_kfree_skb_irq(descr->skb);
1062 return result; 1081 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1082 return 0;
1063} 1083}
1064 1084
1065/** 1085/**
@@ -1085,7 +1105,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1085 packets_to_do = min(*budget, netdev->quota); 1105 packets_to_do = min(*budget, netdev->quota);
1086 1106
1087 while (packets_to_do) { 1107 while (packets_to_do) {
1088 if (spider_net_decode_one_descr(card, 1)) { 1108 if (spider_net_decode_one_descr(card)) {
1089 packets_done++; 1109 packets_done++;
1090 packets_to_do--; 1110 packets_to_do--;
1091 } else { 1111 } else {
@@ -1098,6 +1118,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1098 netdev->quota -= packets_done; 1118 netdev->quota -= packets_done;
1099 *budget -= packets_done; 1119 *budget -= packets_done;
1100 spider_net_refill_rx_chain(card); 1120 spider_net_refill_rx_chain(card);
1121 spider_net_enable_rxdmac(card);
1101 1122
1102 /* if all packets are in the stack, enable interrupts and return 0 */ 1123 /* if all packets are in the stack, enable interrupts and return 0 */
1103 /* if not, return 1 */ 1124 /* if not, return 1 */
@@ -1227,24 +1248,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1227} 1248}
1228 1249
1229/** 1250/**
1230 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1231 * @card: card structure
1232 *
1233 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1234 * more packets in it and empty its RX RAM. This is called in bottom half
1235 * context
1236 */
1237static void
1238spider_net_handle_rxram_full(struct spider_net_card *card)
1239{
1240 while (spider_net_decode_one_descr(card, 0))
1241 ;
1242 spider_net_enable_rxchtails(card);
1243 spider_net_enable_rxdmac(card);
1244 netif_rx_schedule(card->netdev);
1245}
1246
1247/**
1248 * spider_net_handle_error_irq - handles errors raised by an interrupt 1251 * spider_net_handle_error_irq - handles errors raised by an interrupt
1249 * @card: card structure 1252 * @card: card structure
1250 * @status_reg: interrupt status register 0 (GHIINT0STS) 1253 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1366,10 +1369,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1366 case SPIDER_NET_GRFAFLLINT: /* fallthrough */ 1369 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1367 case SPIDER_NET_GRMFLLINT: 1370 case SPIDER_NET_GRMFLLINT:
1368 if (netif_msg_intr(card) && net_ratelimit()) 1371 if (netif_msg_intr(card) && net_ratelimit())
1369 pr_debug("Spider RX RAM full, incoming packets " 1372 pr_err("Spider RX RAM full, incoming packets "
1370 "might be discarded!\n"); 1373 "might be discarded!\n");
1371 spider_net_rx_irq_off(card); 1374 spider_net_rx_irq_off(card);
1372 tasklet_schedule(&card->rxram_full_tl); 1375 netif_rx_schedule(card->netdev);
1373 show_error = 0; 1376 show_error = 0;
1374 break; 1377 break;
1375 1378
@@ -1384,7 +1387,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1384 case SPIDER_NET_GDCDCEINT: /* fallthrough */ 1387 case SPIDER_NET_GDCDCEINT: /* fallthrough */
1385 case SPIDER_NET_GDBDCEINT: /* fallthrough */ 1388 case SPIDER_NET_GDBDCEINT: /* fallthrough */
1386 case SPIDER_NET_GDADCEINT: 1389 case SPIDER_NET_GDADCEINT:
1387 if (netif_msg_intr(card)) 1390 if (netif_msg_intr(card) && net_ratelimit())
1388 pr_err("got descriptor chain end interrupt, " 1391 pr_err("got descriptor chain end interrupt, "
1389 "restarting DMAC %c.\n", 1392 "restarting DMAC %c.\n",
1390 'D'-(i-SPIDER_NET_GDDDCEINT)/3); 1393 'D'-(i-SPIDER_NET_GDDDCEINT)/3);
@@ -1455,7 +1458,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1455 break; 1458 break;
1456 } 1459 }
1457 1460
1458 if ((show_error) && (netif_msg_intr(card))) 1461 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1459 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, " 1462 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
1460 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1463 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1461 card->netdev->name, 1464 card->netdev->name,
@@ -1651,27 +1654,18 @@ int
1651spider_net_open(struct net_device *netdev) 1654spider_net_open(struct net_device *netdev)
1652{ 1655{
1653 struct spider_net_card *card = netdev_priv(netdev); 1656 struct spider_net_card *card = netdev_priv(netdev);
1654 struct spider_net_descr *descr; 1657 int result;
1655 int i, result;
1656 1658
1657 result = -ENOMEM; 1659 result = spider_net_init_chain(card, &card->tx_chain);
1658 if (spider_net_init_chain(card, &card->tx_chain, card->descr, 1660 if (result)
1659 card->num_tx_desc))
1660 goto alloc_tx_failed; 1661 goto alloc_tx_failed;
1661
1662 card->low_watermark = NULL; 1662 card->low_watermark = NULL;
1663 1663
1664 /* rx_chain is after tx_chain, so offset is descr + tx_count */ 1664 result = spider_net_init_chain(card, &card->rx_chain);
1665 if (spider_net_init_chain(card, &card->rx_chain, 1665 if (result)
1666 card->descr + card->num_tx_desc,
1667 card->num_rx_desc))
1668 goto alloc_rx_failed; 1666 goto alloc_rx_failed;
1669 1667
1670 descr = card->rx_chain.head; 1668 /* Allocate rx skbs */
1671 for (i=0; i < card->num_rx_desc; i++, descr++)
1672 descr->next_descr_addr = descr->next->bus_addr;
1673
1674 /* allocate rx skbs */
1675 if (spider_net_alloc_rx_skbs(card)) 1669 if (spider_net_alloc_rx_skbs(card))
1676 goto alloc_skbs_failed; 1670 goto alloc_skbs_failed;
1677 1671
@@ -1902,7 +1896,6 @@ spider_net_stop(struct net_device *netdev)
1902{ 1896{
1903 struct spider_net_card *card = netdev_priv(netdev); 1897 struct spider_net_card *card = netdev_priv(netdev);
1904 1898
1905 tasklet_kill(&card->rxram_full_tl);
1906 netif_poll_disable(netdev); 1899 netif_poll_disable(netdev);
1907 netif_carrier_off(netdev); 1900 netif_carrier_off(netdev);
1908 netif_stop_queue(netdev); 1901 netif_stop_queue(netdev);
@@ -1924,6 +1917,7 @@ spider_net_stop(struct net_device *netdev)
1924 1917
1925 /* release chains */ 1918 /* release chains */
1926 spider_net_release_tx_chain(card, 1); 1919 spider_net_release_tx_chain(card, 1);
1920 spider_net_free_rx_chain_contents(card);
1927 1921
1928 spider_net_free_rx_chain_contents(card); 1922 spider_net_free_rx_chain_contents(card);
1929 1923
@@ -2046,9 +2040,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
2046 2040
2047 pci_set_drvdata(card->pdev, netdev); 2041 pci_set_drvdata(card->pdev, netdev);
2048 2042
2049 card->rxram_full_tl.data = (unsigned long) card;
2050 card->rxram_full_tl.func =
2051 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2052 init_timer(&card->tx_timer); 2043 init_timer(&card->tx_timer);
2053 card->tx_timer.function = 2044 card->tx_timer.function =
2054 (void (*)(unsigned long)) spider_net_cleanup_tx_ring; 2045 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
@@ -2057,8 +2048,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
2057 2048
2058 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2049 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2059 2050
2060 card->num_tx_desc = tx_descriptors; 2051 card->tx_chain.num_desc = tx_descriptors;
2061 card->num_rx_desc = rx_descriptors; 2052 card->rx_chain.num_desc = rx_descriptors;
2062 2053
2063 spider_net_setup_netdev_ops(netdev); 2054 spider_net_setup_netdev_ops(netdev);
2064 2055
@@ -2107,12 +2098,8 @@ spider_net_alloc_card(void)
2107{ 2098{
2108 struct net_device *netdev; 2099 struct net_device *netdev;
2109 struct spider_net_card *card; 2100 struct spider_net_card *card;
2110 size_t alloc_size;
2111 2101
2112 alloc_size = sizeof (*card) + 2102 netdev = alloc_etherdev(sizeof(struct spider_net_card));
2113 sizeof (struct spider_net_descr) * rx_descriptors +
2114 sizeof (struct spider_net_descr) * tx_descriptors;
2115 netdev = alloc_etherdev(alloc_size);
2116 if (!netdev) 2103 if (!netdev)
2117 return NULL; 2104 return NULL;
2118 2105
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3e196df29790..2fec5cf76926 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -24,7 +24,7 @@
24#ifndef _SPIDER_NET_H 24#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H 25#define _SPIDER_NET_H
26 26
27#define VERSION "1.6 A" 27#define VERSION "1.6 B"
28 28
29#include "sungem_phy.h" 29#include "sungem_phy.h"
30 30
@@ -378,6 +378,9 @@ struct spider_net_descr_chain {
378 spinlock_t lock; 378 spinlock_t lock;
379 struct spider_net_descr *head; 379 struct spider_net_descr *head;
380 struct spider_net_descr *tail; 380 struct spider_net_descr *tail;
381 struct spider_net_descr *ring;
382 int num_desc;
383 dma_addr_t dma_addr;
381}; 384};
382 385
383/* descriptor data_status bits */ 386/* descriptor data_status bits */
@@ -397,8 +400,6 @@ struct spider_net_descr_chain {
397 * 701b8000 would be correct, but every packets gets that flag */ 400 * 701b8000 would be correct, but every packets gets that flag */
398#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000 401#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
399 402
400#define SPIDER_NET_DESCR_SIZE 32
401
402/* this will be bigger some time */ 403/* this will be bigger some time */
403struct spider_net_options { 404struct spider_net_options {
404 int rx_csum; /* for rx: if 0 ip_summed=NONE, 405 int rx_csum; /* for rx: if 0 ip_summed=NONE,
@@ -441,25 +442,16 @@ struct spider_net_card {
441 struct spider_net_descr_chain rx_chain; 442 struct spider_net_descr_chain rx_chain;
442 struct spider_net_descr *low_watermark; 443 struct spider_net_descr *low_watermark;
443 444
444 struct net_device_stats netdev_stats;
445
446 struct spider_net_options options;
447
448 spinlock_t intmask_lock;
449 struct tasklet_struct rxram_full_tl;
450 struct timer_list tx_timer; 445 struct timer_list tx_timer;
451
452 struct work_struct tx_timeout_task; 446 struct work_struct tx_timeout_task;
453 atomic_t tx_timeout_task_counter; 447 atomic_t tx_timeout_task_counter;
454 wait_queue_head_t waitq; 448 wait_queue_head_t waitq;
455 449
456 /* for ethtool */ 450 /* for ethtool */
457 int msg_enable; 451 int msg_enable;
458 int num_rx_desc; 452 struct net_device_stats netdev_stats;
459 int num_tx_desc;
460 struct spider_net_extra_stats spider_stats; 453 struct spider_net_extra_stats spider_stats;
461 454 struct spider_net_options options;
462 struct spider_net_descr descr[0];
463}; 455};
464 456
465#define pr_err(fmt,arg...) \ 457#define pr_err(fmt,arg...) \
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 91b995102915..6bcf03fc89be 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
158 struct spider_net_card *card = netdev->priv; 158 struct spider_net_card *card = netdev->priv;
159 159
160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; 160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
161 ering->tx_pending = card->num_tx_desc; 161 ering->tx_pending = card->tx_chain.num_desc;
162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; 162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
163 ering->rx_pending = card->num_rx_desc; 163 ering->rx_pending = card->rx_chain.num_desc;
164} 164}
165 165
166static int spider_net_get_stats_count(struct net_device *netdev) 166static int spider_net_get_stats_count(struct net_device *netdev)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f4bf62c2a7a5..135c0987deae 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -58,11 +58,7 @@
58#define TG3_VLAN_TAG_USED 0 58#define TG3_VLAN_TAG_USED 0
59#endif 59#endif
60 60
61#ifdef NETIF_F_TSO
62#define TG3_TSO_SUPPORT 1 61#define TG3_TSO_SUPPORT 1
63#else
64#define TG3_TSO_SUPPORT 0
65#endif
66 62
67#include "tg3.h" 63#include "tg3.h"
68 64
@@ -3873,7 +3869,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3873 3869
3874 entry = tp->tx_prod; 3870 entry = tp->tx_prod;
3875 base_flags = 0; 3871 base_flags = 0;
3876#if TG3_TSO_SUPPORT != 0
3877 mss = 0; 3872 mss = 0;
3878 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 3873 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3879 (mss = skb_shinfo(skb)->gso_size) != 0) { 3874 (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -3906,11 +3901,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3906 } 3901 }
3907 else if (skb->ip_summed == CHECKSUM_PARTIAL) 3902 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3908 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3903 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3909#else
3910 mss = 0;
3911 if (skb->ip_summed == CHECKSUM_PARTIAL)
3912 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3913#endif
3914#if TG3_VLAN_TAG_USED 3904#if TG3_VLAN_TAG_USED
3915 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 3905 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3916 base_flags |= (TXD_FLAG_VLAN | 3906 base_flags |= (TXD_FLAG_VLAN |
@@ -3970,7 +3960,6 @@ out_unlock:
3970 return NETDEV_TX_OK; 3960 return NETDEV_TX_OK;
3971} 3961}
3972 3962
3973#if TG3_TSO_SUPPORT != 0
3974static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); 3963static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3975 3964
3976/* Use GSO to workaround a rare TSO bug that may be triggered when the 3965/* Use GSO to workaround a rare TSO bug that may be triggered when the
@@ -4002,7 +3991,6 @@ tg3_tso_bug_end:
4002 3991
4003 return NETDEV_TX_OK; 3992 return NETDEV_TX_OK;
4004} 3993}
4005#endif
4006 3994
4007/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 3995/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4008 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 3996 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
@@ -4036,7 +4024,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4036 base_flags = 0; 4024 base_flags = 0;
4037 if (skb->ip_summed == CHECKSUM_PARTIAL) 4025 if (skb->ip_summed == CHECKSUM_PARTIAL)
4038 base_flags |= TXD_FLAG_TCPUDP_CSUM; 4026 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4039#if TG3_TSO_SUPPORT != 0
4040 mss = 0; 4027 mss = 0;
4041 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 4028 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4042 (mss = skb_shinfo(skb)->gso_size) != 0) { 4029 (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -4091,9 +4078,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4091 } 4078 }
4092 } 4079 }
4093 } 4080 }
4094#else
4095 mss = 0;
4096#endif
4097#if TG3_VLAN_TAG_USED 4081#if TG3_VLAN_TAG_USED
4098 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 4082 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4099 base_flags |= (TXD_FLAG_VLAN | 4083 base_flags |= (TXD_FLAG_VLAN |
@@ -5329,7 +5313,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5329 return 0; 5313 return 0;
5330} 5314}
5331 5315
5332#if TG3_TSO_SUPPORT != 0
5333 5316
5334#define TG3_TSO_FW_RELEASE_MAJOR 0x1 5317#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5335#define TG3_TSO_FW_RELASE_MINOR 0x6 5318#define TG3_TSO_FW_RELASE_MINOR 0x6
@@ -5906,7 +5889,6 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
5906 return 0; 5889 return 0;
5907} 5890}
5908 5891
5909#endif /* TG3_TSO_SUPPORT != 0 */
5910 5892
5911/* tp->lock is held. */ 5893/* tp->lock is held. */
5912static void __tg3_set_mac_addr(struct tg3 *tp) 5894static void __tg3_set_mac_addr(struct tg3 *tp)
@@ -6120,7 +6102,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6120 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 6102 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6121 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 6103 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6122 } 6104 }
6123#if TG3_TSO_SUPPORT != 0
6124 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 6105 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6125 int fw_len; 6106 int fw_len;
6126 6107
@@ -6135,7 +6116,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6135 tw32(BUFMGR_MB_POOL_SIZE, 6116 tw32(BUFMGR_MB_POOL_SIZE,
6136 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 6117 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6137 } 6118 }
6138#endif
6139 6119
6140 if (tp->dev->mtu <= ETH_DATA_LEN) { 6120 if (tp->dev->mtu <= ETH_DATA_LEN) {
6141 tw32(BUFMGR_MB_RDMA_LOW_WATER, 6121 tw32(BUFMGR_MB_RDMA_LOW_WATER,
@@ -6337,10 +6317,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6337 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6317 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6338 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 6318 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6339 6319
6340#if TG3_TSO_SUPPORT != 0
6341 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6320 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6342 rdmac_mode |= (1 << 27); 6321 rdmac_mode |= (1 << 27);
6343#endif
6344 6322
6345 /* Receive/send statistics. */ 6323 /* Receive/send statistics. */
6346 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 6324 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
@@ -6511,10 +6489,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6511 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 6489 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6512 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); 6490 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6513 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 6491 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6514#if TG3_TSO_SUPPORT != 0
6515 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6492 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6516 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 6493 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6517#endif
6518 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); 6494 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6519 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 6495 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6520 6496
@@ -6524,13 +6500,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6524 return err; 6500 return err;
6525 } 6501 }
6526 6502
6527#if TG3_TSO_SUPPORT != 0
6528 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 6503 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6529 err = tg3_load_tso_firmware(tp); 6504 err = tg3_load_tso_firmware(tp);
6530 if (err) 6505 if (err)
6531 return err; 6506 return err;
6532 } 6507 }
6533#endif
6534 6508
6535 tp->tx_mode = TX_MODE_ENABLE; 6509 tp->tx_mode = TX_MODE_ENABLE;
6536 tw32_f(MAC_TX_MODE, tp->tx_mode); 6510 tw32_f(MAC_TX_MODE, tp->tx_mode);
@@ -8062,7 +8036,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
8062 tp->msg_enable = value; 8036 tp->msg_enable = value;
8063} 8037}
8064 8038
8065#if TG3_TSO_SUPPORT != 0
8066static int tg3_set_tso(struct net_device *dev, u32 value) 8039static int tg3_set_tso(struct net_device *dev, u32 value)
8067{ 8040{
8068 struct tg3 *tp = netdev_priv(dev); 8041 struct tg3 *tp = netdev_priv(dev);
@@ -8081,7 +8054,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8081 } 8054 }
8082 return ethtool_op_set_tso(dev, value); 8055 return ethtool_op_set_tso(dev, value);
8083} 8056}
8084#endif
8085 8057
8086static int tg3_nway_reset(struct net_device *dev) 8058static int tg3_nway_reset(struct net_device *dev)
8087{ 8059{
@@ -9212,10 +9184,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
9212 .set_tx_csum = tg3_set_tx_csum, 9184 .set_tx_csum = tg3_set_tx_csum,
9213 .get_sg = ethtool_op_get_sg, 9185 .get_sg = ethtool_op_get_sg,
9214 .set_sg = ethtool_op_set_sg, 9186 .set_sg = ethtool_op_set_sg,
9215#if TG3_TSO_SUPPORT != 0
9216 .get_tso = ethtool_op_get_tso, 9187 .get_tso = ethtool_op_get_tso,
9217 .set_tso = tg3_set_tso, 9188 .set_tso = tg3_set_tso,
9218#endif
9219 .self_test_count = tg3_get_test_count, 9189 .self_test_count = tg3_get_test_count,
9220 .self_test = tg3_self_test, 9190 .self_test = tg3_self_test,
9221 .get_strings = tg3_get_strings, 9191 .get_strings = tg3_get_strings,
@@ -11856,7 +11826,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11856 11826
11857 tg3_init_bufmgr_config(tp); 11827 tg3_init_bufmgr_config(tp);
11858 11828
11859#if TG3_TSO_SUPPORT != 0
11860 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 11829 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11861 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 11830 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11862 } 11831 }
@@ -11881,7 +11850,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11881 dev->features |= NETIF_F_TSO6; 11850 dev->features |= NETIF_F_TSO6;
11882 } 11851 }
11883 11852
11884#endif
11885 11853
11886 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 11854 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11887 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 11855 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 7e4b23c7c1ba..abb8611c5a91 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2865,8 +2865,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2865 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2865 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2866 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2866 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2867 ugeth->tx_bd_ring_offset[j] = 2867 ugeth->tx_bd_ring_offset[j] =
2868 (u32) (kmalloc((u32) (length + align), 2868 kmalloc((u32) (length + align), GFP_KERNEL);
2869 GFP_KERNEL)); 2869
2870 if (ugeth->tx_bd_ring_offset[j] != 0) 2870 if (ugeth->tx_bd_ring_offset[j] != 0)
2871 ugeth->p_tx_bd_ring[j] = 2871 ugeth->p_tx_bd_ring[j] =
2872 (void*)((ugeth->tx_bd_ring_offset[j] + 2872 (void*)((ugeth->tx_bd_ring_offset[j] +
@@ -2901,7 +2901,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2901 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2901 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2902 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2902 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2903 ugeth->rx_bd_ring_offset[j] = 2903 ugeth->rx_bd_ring_offset[j] =
2904 (u32) (kmalloc((u32) (length + align), GFP_KERNEL)); 2904 kmalloc((u32) (length + align), GFP_KERNEL);
2905 if (ugeth->rx_bd_ring_offset[j] != 0) 2905 if (ugeth->rx_bd_ring_offset[j] != 0)
2906 ugeth->p_rx_bd_ring[j] = 2906 ugeth->p_rx_bd_ring[j] =
2907 (void*)((ugeth->rx_bd_ring_offset[j] + 2907 (void*)((ugeth->rx_bd_ring_offset[j] +
@@ -2927,10 +2927,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2927 /* Init Tx bds */ 2927 /* Init Tx bds */
2928 for (j = 0; j < ug_info->numQueuesTx; j++) { 2928 for (j = 0; j < ug_info->numQueuesTx; j++) {
2929 /* Setup the skbuff rings */ 2929 /* Setup the skbuff rings */
2930 ugeth->tx_skbuff[j] = 2930 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2931 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * 2931 ugeth->ug_info->bdRingLenTx[j],
2932 ugeth->ug_info->bdRingLenTx[j], 2932 GFP_KERNEL);
2933 GFP_KERNEL);
2934 2933
2935 if (ugeth->tx_skbuff[j] == NULL) { 2934 if (ugeth->tx_skbuff[j] == NULL) {
2936 ugeth_err("%s: Could not allocate tx_skbuff", 2935 ugeth_err("%s: Could not allocate tx_skbuff",
@@ -2959,10 +2958,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2959 /* Init Rx bds */ 2958 /* Init Rx bds */
2960 for (j = 0; j < ug_info->numQueuesRx; j++) { 2959 for (j = 0; j < ug_info->numQueuesRx; j++) {
2961 /* Setup the skbuff rings */ 2960 /* Setup the skbuff rings */
2962 ugeth->rx_skbuff[j] = 2961 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2963 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * 2962 ugeth->ug_info->bdRingLenRx[j],
2964 ugeth->ug_info->bdRingLenRx[j], 2963 GFP_KERNEL);
2965 GFP_KERNEL);
2966 2964
2967 if (ugeth->rx_skbuff[j] == NULL) { 2965 if (ugeth->rx_skbuff[j] == NULL) {
2968 ugeth_err("%s: Could not allocate rx_skbuff", 2966 ugeth_err("%s: Could not allocate rx_skbuff",
@@ -3453,8 +3451,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3453 * allocated resources can be released when the channel is freed. 3451 * allocated resources can be released when the channel is freed.
3454 */ 3452 */
3455 if (!(ugeth->p_init_enet_param_shadow = 3453 if (!(ugeth->p_init_enet_param_shadow =
3456 (struct ucc_geth_init_pram *) kmalloc(sizeof(struct ucc_geth_init_pram), 3454 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3457 GFP_KERNEL))) {
3458 ugeth_err 3455 ugeth_err
3459 ("%s: Can not allocate memory for" 3456 ("%s: Can not allocate memory for"
3460 " p_UccInitEnetParamShadows.", __FUNCTION__); 3457 " p_UccInitEnetParamShadows.", __FUNCTION__);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 21f76f51c95e..61708cf4c85d 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -235,6 +235,19 @@ comment "Cyclades-PC300 MLPPP support is disabled."
235comment "Refer to the file README.mlppp, provided by PC300 package." 235comment "Refer to the file README.mlppp, provided by PC300 package."
236 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP) 236 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
237 237
238config PC300TOO
239 tristate "Cyclades PC300 RSV/X21 alternative support"
240 depends on HDLC && PCI
241 help
242 Alternative driver for PC300 RSV/X21 PCI cards made by
243 Cyclades, Inc. If you have such a card, say Y here and see
244 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
245
246 To compile this as a module, choose M here: the module
247 will be called pc300too.
248
249 If unsure, say N here.
250
238config N2 251config N2
239 tristate "SDL RISCom/N2 support" 252 tristate "SDL RISCom/N2 support"
240 depends on HDLC && ISA 253 depends on HDLC && ISA
@@ -344,17 +357,6 @@ config DLCI
344 To compile this driver as a module, choose M here: the 357 To compile this driver as a module, choose M here: the
345 module will be called dlci. 358 module will be called dlci.
346 359
347config DLCI_COUNT
348 int "Max open DLCI"
349 depends on DLCI
350 default "24"
351 help
352 Maximal number of logical point-to-point frame relay connections
353 (the identifiers of which are called DCLIs) that the driver can
354 handle.
355
356 The default is probably fine.
357
358config DLCI_MAX 360config DLCI_MAX
359 int "Max DLCI per device" 361 int "Max DLCI per device"
360 depends on DLCI 362 depends on DLCI
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 83ec2c87ba3f..d61fef36afc9 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_N2) += n2.o
41obj-$(CONFIG_C101) += c101.o 41obj-$(CONFIG_C101) += c101.o
42obj-$(CONFIG_WANXL) += wanxl.o 42obj-$(CONFIG_WANXL) += wanxl.o
43obj-$(CONFIG_PCI200SYN) += pci200syn.o 43obj-$(CONFIG_PCI200SYN) += pci200syn.o
44obj-$(CONFIG_PC300TOO) += pc300too.o
44 45
45clean-files := wanxlfw.inc 46clean-files := wanxlfw.inc
46$(obj)/wanxl.o: $(obj)/wanxlfw.inc 47$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index db354e0edbe5..9040d7cf651e 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -222,7 +222,7 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
222 return -EINVAL; 222 return -EINVAL;
223} 223}
224 224
225void hdlc_setup(struct net_device *dev) 225static void hdlc_setup(struct net_device *dev)
226{ 226{
227 hdlc_device *hdlc = dev_to_hdlc(dev); 227 hdlc_device *hdlc = dev_to_hdlc(dev);
228 228
@@ -325,7 +325,6 @@ MODULE_LICENSE("GPL v2");
325EXPORT_SYMBOL(hdlc_open); 325EXPORT_SYMBOL(hdlc_open);
326EXPORT_SYMBOL(hdlc_close); 326EXPORT_SYMBOL(hdlc_close);
327EXPORT_SYMBOL(hdlc_ioctl); 327EXPORT_SYMBOL(hdlc_ioctl);
328EXPORT_SYMBOL(hdlc_setup);
329EXPORT_SYMBOL(alloc_hdlcdev); 328EXPORT_SYMBOL(alloc_hdlcdev);
330EXPORT_SYMBOL(unregister_hdlc_device); 329EXPORT_SYMBOL(unregister_hdlc_device);
331EXPORT_SYMBOL(register_hdlc_protocol); 330EXPORT_SYMBOL(register_hdlc_protocol);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
new file mode 100644
index 000000000000..79b2d5454d6b
--- /dev/null
+++ b/drivers/net/wan/pc300too.c
@@ -0,0 +1,565 @@
1/*
2 * Cyclades PC300 synchronous serial card driver for Linux
3 *
4 * Copyright (C) 2000-2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
11 *
12 * Sources of information:
13 * Hitachi HD64572 SCA-II User's Manual
14 * Cyclades PC300 Linux driver
15 *
16 * This driver currently supports only PC300/RSV (V.24/V.35) and
17 * PC300/X21 cards.
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/fcntl.h>
26#include <linux/in.h>
27#include <linux/string.h>
28#include <linux/errno.h>
29#include <linux/init.h>
30#include <linux/ioport.h>
31#include <linux/moduleparam.h>
32#include <linux/netdevice.h>
33#include <linux/hdlc.h>
34#include <linux/pci.h>
35#include <linux/delay.h>
36#include <asm/io.h>
37
38#include "hd64572.h"
39
40static const char* version = "Cyclades PC300 driver version: 1.17";
41static const char* devname = "PC300";
42
43#undef DEBUG_PKT
44#define DEBUG_RINGS
45
46#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */
47#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */
48#define ALL_PAGES_ALWAYS_MAPPED
49#define NEED_DETECT_RAM
50#define NEED_SCA_MSCI_INTR
51#define MAX_TX_BUFFERS 10
52
53static int pci_clock_freq = 33000000;
54static int use_crystal_clock = 0;
55static unsigned int CLOCK_BASE;
56
57/* Masks to access the init_ctrl PLX register */
58#define PC300_CLKSEL_MASK (0x00000004UL)
59#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3))
60#define PC300_CTYPE_MASK (0x00000800UL)
61
62
63enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */
64
65/*
66 * PLX PCI9050-1 local configuration and shared runtime registers.
67 * This structure can be used to access 9050 registers (memory mapped).
68 */
69typedef struct {
70 u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
71 u32 loc_rom_range; /* 10h : Local ROM Range */
72 u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
73 u32 loc_rom_base; /* 24h : Local ROM Base */
74 u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
75 u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
76 u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
77 u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
78 u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
79}plx9050;
80
81
82
83typedef struct port_s {
84 struct net_device *dev;
85 struct card_s *card;
86 spinlock_t lock; /* TX lock */
87 sync_serial_settings settings;
88 int rxpart; /* partial frame received, next frame invalid*/
89 unsigned short encoding;
90 unsigned short parity;
91 unsigned int iface;
92 u16 rxin; /* rx ring buffer 'in' pointer */
93 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
94 u16 txlast;
95 u8 rxs, txs, tmc; /* SCA registers */
96 u8 phy_node; /* physical port # - 0 or 1 */
97}port_t;
98
99
100
101typedef struct card_s {
102 int type; /* RSV, X21, etc. */
103 int n_ports; /* 1 or 2 ports */
104 u8* __iomem rambase; /* buffer memory base (virtual) */
105 u8* __iomem scabase; /* SCA memory base (virtual) */
106 plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */
107 u32 init_ctrl_value; /* Saved value - 9050 bug workaround */
108 u16 rx_ring_buffers; /* number of buffers in a ring */
109 u16 tx_ring_buffers;
110 u16 buff_offset; /* offset of first buffer of first channel */
111 u8 irq; /* interrupt request level */
112
113 port_t ports[2];
114}card_t;
115
116
117#define sca_in(reg, card) readb(card->scabase + (reg))
118#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
119#define sca_inw(reg, card) readw(card->scabase + (reg))
120#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
121#define sca_inl(reg, card) readl(card->scabase + (reg))
122#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
123
124#define port_to_card(port) (port->card)
125#define log_node(port) (port->phy_node)
126#define phy_node(port) (port->phy_node)
127#define winbase(card) (card->rambase)
128#define get_port(card, port) ((port) < (card)->n_ports ? \
129 (&(card)->ports[port]) : (NULL))
130
131#include "hd6457x.c"
132
133
134static void pc300_set_iface(port_t *port)
135{
136 card_t *card = port->card;
137 u32* init_ctrl = &card->plxbase->init_ctrl;
138 u16 msci = get_msci(port);
139 u8 rxs = port->rxs & CLK_BRG_MASK;
140 u8 txs = port->txs & CLK_BRG_MASK;
141
142 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
143 port_to_card(port));
144 switch(port->settings.clock_type) {
145 case CLOCK_INT:
146 rxs |= CLK_BRG; /* BRG output */
147 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
148 break;
149
150 case CLOCK_TXINT:
151 rxs |= CLK_LINE; /* RXC input */
152 txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
153 break;
154
155 case CLOCK_TXFROMRX:
156 rxs |= CLK_LINE; /* RXC input */
157 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
158 break;
159
160 default: /* EXTernal clock */
161 rxs |= CLK_LINE; /* RXC input */
162 txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
163 break;
164 }
165
166 port->rxs = rxs;
167 port->txs = txs;
168 sca_out(rxs, msci + RXS, card);
169 sca_out(txs, msci + TXS, card);
170 sca_set_port(port);
171
172 if (port->card->type == PC300_RSV) {
173 if (port->iface == IF_IFACE_V35)
174 writel(card->init_ctrl_value |
175 PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
176 else
177 writel(card->init_ctrl_value &
178 ~PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
179 }
180}
181
182
183
184static int pc300_open(struct net_device *dev)
185{
186 port_t *port = dev_to_port(dev);
187
188 int result = hdlc_open(dev);
189 if (result)
190 return result;
191
192 sca_open(dev);
193 pc300_set_iface(port);
194 return 0;
195}
196
197
198
199static int pc300_close(struct net_device *dev)
200{
201 sca_close(dev);
202 hdlc_close(dev);
203 return 0;
204}
205
206
207
208static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
209{
210 const size_t size = sizeof(sync_serial_settings);
211 sync_serial_settings new_line;
212 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
213 int new_type;
214 port_t *port = dev_to_port(dev);
215
216#ifdef DEBUG_RINGS
217 if (cmd == SIOCDEVPRIVATE) {
218 sca_dump_rings(dev);
219 return 0;
220 }
221#endif
222 if (cmd != SIOCWANDEV)
223 return hdlc_ioctl(dev, ifr, cmd);
224
225 if (ifr->ifr_settings.type == IF_GET_IFACE) {
226 ifr->ifr_settings.type = port->iface;
227 if (ifr->ifr_settings.size < size) {
228 ifr->ifr_settings.size = size; /* data size wanted */
229 return -ENOBUFS;
230 }
231 if (copy_to_user(line, &port->settings, size))
232 return -EFAULT;
233 return 0;
234
235 }
236
237 if (port->card->type == PC300_X21 &&
238 (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
239 ifr->ifr_settings.type == IF_IFACE_X21))
240 new_type = IF_IFACE_X21;
241
242 else if (port->card->type == PC300_RSV &&
243 (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
244 ifr->ifr_settings.type == IF_IFACE_V35))
245 new_type = IF_IFACE_V35;
246
247 else if (port->card->type == PC300_RSV &&
248 ifr->ifr_settings.type == IF_IFACE_V24)
249 new_type = IF_IFACE_V24;
250
251 else
252 return hdlc_ioctl(dev, ifr, cmd);
253
254 if (!capable(CAP_NET_ADMIN))
255 return -EPERM;
256
257 if (copy_from_user(&new_line, line, size))
258 return -EFAULT;
259
260 if (new_line.clock_type != CLOCK_EXT &&
261 new_line.clock_type != CLOCK_TXFROMRX &&
262 new_line.clock_type != CLOCK_INT &&
263 new_line.clock_type != CLOCK_TXINT)
264 return -EINVAL; /* No such clock setting */
265
266 if (new_line.loopback != 0 && new_line.loopback != 1)
267 return -EINVAL;
268
269 memcpy(&port->settings, &new_line, size); /* Update settings */
270 port->iface = new_type;
271 pc300_set_iface(port);
272 return 0;
273}
274
275
276
277static void pc300_pci_remove_one(struct pci_dev *pdev)
278{
279 int i;
280 card_t *card = pci_get_drvdata(pdev);
281
282 for (i = 0; i < 2; i++)
283 if (card->ports[i].card) {
284 struct net_device *dev = port_to_dev(&card->ports[i]);
285 unregister_hdlc_device(dev);
286 }
287
288 if (card->irq)
289 free_irq(card->irq, card);
290
291 if (card->rambase)
292 iounmap(card->rambase);
293 if (card->scabase)
294 iounmap(card->scabase);
295 if (card->plxbase)
296 iounmap(card->plxbase);
297
298 pci_release_regions(pdev);
299 pci_disable_device(pdev);
300 pci_set_drvdata(pdev, NULL);
301 if (card->ports[0].dev)
302 free_netdev(card->ports[0].dev);
303 if (card->ports[1].dev)
304 free_netdev(card->ports[1].dev);
305 kfree(card);
306}
307
308
309
310static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
311 const struct pci_device_id *ent)
312{
313 card_t *card;
314 u8 rev_id;
315 u32 __iomem *p;
316 int i;
317 u32 ramsize;
318 u32 ramphys; /* buffer memory base */
319 u32 scaphys; /* SCA memory base */
320 u32 plxphys; /* PLX registers memory base */
321
322#ifndef MODULE
323 static int printed_version;
324 if (!printed_version++)
325 printk(KERN_INFO "%s\n", version);
326#endif
327
328 i = pci_enable_device(pdev);
329 if (i)
330 return i;
331
332 i = pci_request_regions(pdev, "PC300");
333 if (i) {
334 pci_disable_device(pdev);
335 return i;
336 }
337
338 card = kmalloc(sizeof(card_t), GFP_KERNEL);
339 if (card == NULL) {
340 printk(KERN_ERR "pc300: unable to allocate memory\n");
341 pci_release_regions(pdev);
342 pci_disable_device(pdev);
343 return -ENOBUFS;
344 }
345 memset(card, 0, sizeof(card_t));
346 pci_set_drvdata(pdev, card);
347
348 if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
349 pdev->device == PCI_DEVICE_ID_PC300_TE_2)
350 card->type = PC300_TE; /* not fully supported */
351 else if (card->init_ctrl_value & PC300_CTYPE_MASK)
352 card->type = PC300_X21;
353 else
354 card->type = PC300_RSV;
355
356 if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
357 pdev->device == PCI_DEVICE_ID_PC300_TE_1)
358 card->n_ports = 1;
359 else
360 card->n_ports = 2;
361
362 for (i = 0; i < card->n_ports; i++)
363 if (!(card->ports[i].dev = alloc_hdlcdev(&card->ports[i]))) {
364 printk(KERN_ERR "pc300: unable to allocate memory\n");
365 pc300_pci_remove_one(pdev);
366 return -ENOMEM;
367 }
368
369 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
370 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
371 pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
372 pci_resource_len(pdev, 3) < 16384) {
373 printk(KERN_ERR "pc300: invalid card EEPROM parameters\n");
374 pc300_pci_remove_one(pdev);
375 return -EFAULT;
376 }
377
378 plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
379 card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
380
381 scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
382 card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
383
384 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
385 card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));
386
387 if (card->plxbase == NULL ||
388 card->scabase == NULL ||
389 card->rambase == NULL) {
390 printk(KERN_ERR "pc300: ioremap() failed\n");
391 pc300_pci_remove_one(pdev);
392 }
393
394 /* PLX PCI 9050 workaround for local configuration register read bug */
395 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys);
396 card->init_ctrl_value = readl(&((plx9050*)card->scabase)->init_ctrl);
397 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
398
399 /* Reset PLX */
400 p = &card->plxbase->init_ctrl;
401 writel(card->init_ctrl_value | 0x40000000, p);
402 readl(p); /* Flush the write - do not use sca_flush */
403 udelay(1);
404
405 writel(card->init_ctrl_value, p);
406 readl(p); /* Flush the write - do not use sca_flush */
407 udelay(1);
408
409 /* Reload Config. Registers from EEPROM */
410 writel(card->init_ctrl_value | 0x20000000, p);
411 readl(p); /* Flush the write - do not use sca_flush */
412 udelay(1);
413
414 writel(card->init_ctrl_value, p);
415 readl(p); /* Flush the write - do not use sca_flush */
416 udelay(1);
417
418 ramsize = sca_detect_ram(card, card->rambase,
419 pci_resource_len(pdev, 3));
420
421 if (use_crystal_clock)
422 card->init_ctrl_value &= ~PC300_CLKSEL_MASK;
423 else
424 card->init_ctrl_value |= PC300_CLKSEL_MASK;
425
426 writel(card->init_ctrl_value, &card->plxbase->init_ctrl);
427 /* number of TX + RX buffers for one port */
428 i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU));
429 card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
430 card->rx_ring_buffers = i - card->tx_ring_buffers;
431
432 card->buff_offset = card->n_ports * sizeof(pkt_desc) *
433 (card->tx_ring_buffers + card->rx_ring_buffers);
434
435 printk(KERN_INFO "pc300: PC300/%s, %u KB RAM at 0x%x, IRQ%u, "
436 "using %u TX + %u RX packets rings\n",
437 card->type == PC300_X21 ? "X21" :
438 card->type == PC300_TE ? "TE" : "RSV",
439 ramsize / 1024, ramphys, pdev->irq,
440 card->tx_ring_buffers, card->rx_ring_buffers);
441
442 if (card->tx_ring_buffers < 1) {
443 printk(KERN_ERR "pc300: RAM test failed\n");
444 pc300_pci_remove_one(pdev);
445 return -EFAULT;
446 }
447
448 /* Enable interrupts on the PCI bridge, LINTi1 active low */
449 writew(0x0041, &card->plxbase->intr_ctrl_stat);
450
451 /* Allocate IRQ */
452 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) {
453 printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
454 pdev->irq);
455 pc300_pci_remove_one(pdev);
456 return -EBUSY;
457 }
458 card->irq = pdev->irq;
459
460 sca_init(card, 0);
461
462 // COTE not set - allows better TX DMA settings
463 // sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card);
464
465 sca_out(0x10, BTCR, card);
466
467 for (i = 0; i < card->n_ports; i++) {
468 port_t *port = &card->ports[i];
469 struct net_device *dev = port_to_dev(port);
470 hdlc_device *hdlc = dev_to_hdlc(dev);
471 port->phy_node = i;
472
473 spin_lock_init(&port->lock);
474 SET_MODULE_OWNER(dev);
475 dev->irq = card->irq;
476 dev->mem_start = ramphys;
477 dev->mem_end = ramphys + ramsize - 1;
478 dev->tx_queue_len = 50;
479 dev->do_ioctl = pc300_ioctl;
480 dev->open = pc300_open;
481 dev->stop = pc300_close;
482 hdlc->attach = sca_attach;
483 hdlc->xmit = sca_xmit;
484 port->settings.clock_type = CLOCK_EXT;
485 port->card = card;
486 if (card->type == PC300_X21)
487 port->iface = IF_IFACE_X21;
488 else
489 port->iface = IF_IFACE_V35;
490
491 if (register_hdlc_device(dev)) {
492 printk(KERN_ERR "pc300: unable to register hdlc "
493 "device\n");
494 port->card = NULL;
495 pc300_pci_remove_one(pdev);
496 return -ENOBUFS;
497 }
498 sca_init_sync_port(port); /* Set up SCA memory */
499
500 printk(KERN_INFO "%s: PC300 node %d\n",
501 dev->name, port->phy_node);
502 }
503 return 0;
504}
505
506
507
508static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
509 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
510 PCI_ANY_ID, 0, 0, 0 },
511 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
512 PCI_ANY_ID, 0, 0, 0 },
513 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_1, PCI_ANY_ID,
514 PCI_ANY_ID, 0, 0, 0 },
515 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_2, PCI_ANY_ID,
516 PCI_ANY_ID, 0, 0, 0 },
517 { 0, }
518};
519
520
521static struct pci_driver pc300_pci_driver = {
522 name: "PC300",
523 id_table: pc300_pci_tbl,
524 probe: pc300_pci_init_one,
525 remove: pc300_pci_remove_one,
526};
527
528
529static int __init pc300_init_module(void)
530{
531#ifdef MODULE
532 printk(KERN_INFO "%s\n", version);
533#endif
534 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
535 printk(KERN_ERR "pc300: Invalid PCI clock frequency\n");
536 return -EINVAL;
537 }
538 if (use_crystal_clock != 0 && use_crystal_clock != 1) {
539 printk(KERN_ERR "pc300: Invalid 'use_crystal_clock' value\n");
540 return -EINVAL;
541 }
542
543 CLOCK_BASE = use_crystal_clock ? 24576000 : pci_clock_freq;
544
545 return pci_module_init(&pc300_pci_driver);
546}
547
548
549
550static void __exit pc300_cleanup_module(void)
551{
552 pci_unregister_driver(&pc300_pci_driver);
553}
554
555MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
556MODULE_DESCRIPTION("Cyclades PC300 serial port driver");
557MODULE_LICENSE("GPL v2");
558MODULE_DEVICE_TABLE(pci, pc300_pci_tbl);
559module_param(pci_clock_freq, int, 0444);
560MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
561module_param(use_crystal_clock, int, 0444);
562MODULE_PARM_DESC(use_crystal_clock,
563 "Use 24.576 MHz clock instead of PCI clock");
564module_init(pc300_init_module);
565module_exit(pc300_cleanup_module);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 59ddd21c3958..8dbcf83bb5f3 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -331,8 +331,7 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
331static void z8530_rx(struct z8530_channel *c) 331static void z8530_rx(struct z8530_channel *c)
332{ 332{
333 u8 ch,stat; 333 u8 ch,stat;
334 spin_lock(c->lock); 334
335
336 while(1) 335 while(1)
337 { 336 {
338 /* FIFO empty ? */ 337 /* FIFO empty ? */
@@ -390,7 +389,6 @@ static void z8530_rx(struct z8530_channel *c)
390 */ 389 */
391 write_zsctrl(c, ERR_RES); 390 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS); 391 write_zsctrl(c, RES_H_IUS);
393 spin_unlock(c->lock);
394} 392}
395 393
396 394
@@ -406,7 +404,6 @@ static void z8530_rx(struct z8530_channel *c)
406 404
407static void z8530_tx(struct z8530_channel *c) 405static void z8530_tx(struct z8530_channel *c)
408{ 406{
409 spin_lock(c->lock);
410 while(c->txcount) { 407 while(c->txcount) {
411 /* FIFO full ? */ 408 /* FIFO full ? */
412 if(!(read_zsreg(c, R0)&4)) 409 if(!(read_zsreg(c, R0)&4))
@@ -434,7 +431,6 @@ static void z8530_tx(struct z8530_channel *c)
434 431
435 z8530_tx_done(c); 432 z8530_tx_done(c);
436 write_zsctrl(c, RES_H_IUS); 433 write_zsctrl(c, RES_H_IUS);
437 spin_unlock(c->lock);
438} 434}
439 435
440/** 436/**
@@ -452,7 +448,6 @@ static void z8530_status(struct z8530_channel *chan)
452{ 448{
453 u8 status, altered; 449 u8 status, altered;
454 450
455 spin_lock(chan->lock);
456 status=read_zsreg(chan, R0); 451 status=read_zsreg(chan, R0);
457 altered=chan->status^status; 452 altered=chan->status^status;
458 453
@@ -487,7 +482,6 @@ static void z8530_status(struct z8530_channel *chan)
487 } 482 }
488 write_zsctrl(chan, RES_EXT_INT); 483 write_zsctrl(chan, RES_EXT_INT);
489 write_zsctrl(chan, RES_H_IUS); 484 write_zsctrl(chan, RES_H_IUS);
490 spin_unlock(chan->lock);
491} 485}
492 486
493struct z8530_irqhandler z8530_sync= 487struct z8530_irqhandler z8530_sync=
@@ -511,7 +505,6 @@ EXPORT_SYMBOL(z8530_sync);
511 505
512static void z8530_dma_rx(struct z8530_channel *chan) 506static void z8530_dma_rx(struct z8530_channel *chan)
513{ 507{
514 spin_lock(chan->lock);
515 if(chan->rxdma_on) 508 if(chan->rxdma_on)
516 { 509 {
517 /* Special condition check only */ 510 /* Special condition check only */
@@ -534,7 +527,6 @@ static void z8530_dma_rx(struct z8530_channel *chan)
534 /* DMA is off right now, drain the slow way */ 527 /* DMA is off right now, drain the slow way */
535 z8530_rx(chan); 528 z8530_rx(chan);
536 } 529 }
537 spin_unlock(chan->lock);
538} 530}
539 531
540/** 532/**
@@ -547,7 +539,6 @@ static void z8530_dma_rx(struct z8530_channel *chan)
547 539
548static void z8530_dma_tx(struct z8530_channel *chan) 540static void z8530_dma_tx(struct z8530_channel *chan)
549{ 541{
550 spin_lock(chan->lock);
551 if(!chan->dma_tx) 542 if(!chan->dma_tx)
552 { 543 {
553 printk(KERN_WARNING "Hey who turned the DMA off?\n"); 544 printk(KERN_WARNING "Hey who turned the DMA off?\n");
@@ -557,7 +548,6 @@ static void z8530_dma_tx(struct z8530_channel *chan)
557 /* This shouldnt occur in DMA mode */ 548 /* This shouldnt occur in DMA mode */
558 printk(KERN_ERR "DMA tx - bogus event!\n"); 549 printk(KERN_ERR "DMA tx - bogus event!\n");
559 z8530_tx(chan); 550 z8530_tx(chan);
560 spin_unlock(chan->lock);
561} 551}
562 552
563/** 553/**
@@ -596,7 +586,6 @@ static void z8530_dma_status(struct z8530_channel *chan)
596 } 586 }
597 } 587 }
598 588
599 spin_lock(chan->lock);
600 if(altered&chan->dcdcheck) 589 if(altered&chan->dcdcheck)
601 { 590 {
602 if(status&chan->dcdcheck) 591 if(status&chan->dcdcheck)
@@ -618,7 +607,6 @@ static void z8530_dma_status(struct z8530_channel *chan)
618 607
619 write_zsctrl(chan, RES_EXT_INT); 608 write_zsctrl(chan, RES_EXT_INT);
620 write_zsctrl(chan, RES_H_IUS); 609 write_zsctrl(chan, RES_H_IUS);
621 spin_unlock(chan->lock);
622} 610}
623 611
624struct z8530_irqhandler z8530_dma_sync= 612struct z8530_irqhandler z8530_dma_sync=
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 8286678513b9..3a064def162e 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -352,6 +352,10 @@
352#define BCM43xx_UCODEFLAG_UNKPACTRL 0x0040 352#define BCM43xx_UCODEFLAG_UNKPACTRL 0x0040
353#define BCM43xx_UCODEFLAG_JAPAN 0x0080 353#define BCM43xx_UCODEFLAG_JAPAN 0x0080
354 354
355/* Hardware Radio Enable masks */
356#define BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16)
357#define BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4)
358
355/* Generic-Interrupt reasons. */ 359/* Generic-Interrupt reasons. */
356#define BCM43xx_IRQ_READY (1 << 0) 360#define BCM43xx_IRQ_READY (1 << 0)
357#define BCM43xx_IRQ_BEACON (1 << 1) 361#define BCM43xx_IRQ_BEACON (1 << 1)
@@ -758,7 +762,8 @@ struct bcm43xx_private {
758 bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */ 762 bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */
759 reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */ 763 reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */
760 short_preamble:1, /* TRUE, if short preamble is enabled. */ 764 short_preamble:1, /* TRUE, if short preamble is enabled. */
761 firmware_norelease:1; /* Do not release the firmware. Used on suspend. */ 765 firmware_norelease:1, /* Do not release the firmware. Used on suspend. */
766 radio_hw_enable:1; /* TRUE if radio is hardware enabled */
762 767
763 struct bcm43xx_stats stats; 768 struct bcm43xx_stats stats;
764 769
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
index 7d383a27b927..8f198befba39 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
@@ -26,6 +26,7 @@
26*/ 26*/
27 27
28#include "bcm43xx_leds.h" 28#include "bcm43xx_leds.h"
29#include "bcm43xx_radio.h"
29#include "bcm43xx.h" 30#include "bcm43xx.h"
30 31
31#include <asm/bitops.h> 32#include <asm/bitops.h>
@@ -108,6 +109,7 @@ static void bcm43xx_led_init_hardcoded(struct bcm43xx_private *bcm,
108 switch (led_index) { 109 switch (led_index) {
109 case 0: 110 case 0:
110 led->behaviour = BCM43xx_LED_ACTIVITY; 111 led->behaviour = BCM43xx_LED_ACTIVITY;
112 led->activelow = 1;
111 if (bcm->board_vendor == PCI_VENDOR_ID_COMPAQ) 113 if (bcm->board_vendor == PCI_VENDOR_ID_COMPAQ)
112 led->behaviour = BCM43xx_LED_RADIO_ALL; 114 led->behaviour = BCM43xx_LED_RADIO_ALL;
113 break; 115 break;
@@ -199,20 +201,21 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
199 turn_on = activity; 201 turn_on = activity;
200 break; 202 break;
201 case BCM43xx_LED_RADIO_ALL: 203 case BCM43xx_LED_RADIO_ALL:
202 turn_on = radio->enabled; 204 turn_on = radio->enabled && bcm43xx_is_hw_radio_enabled(bcm);
203 break; 205 break;
204 case BCM43xx_LED_RADIO_A: 206 case BCM43xx_LED_RADIO_A:
205 case BCM43xx_LED_BCM4303_2: 207 case BCM43xx_LED_BCM4303_2:
206 turn_on = (radio->enabled && phy->type == BCM43xx_PHYTYPE_A); 208 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
209 phy->type == BCM43xx_PHYTYPE_A);
207 break; 210 break;
208 case BCM43xx_LED_RADIO_B: 211 case BCM43xx_LED_RADIO_B:
209 case BCM43xx_LED_BCM4303_1: 212 case BCM43xx_LED_BCM4303_1:
210 turn_on = (radio->enabled && 213 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
211 (phy->type == BCM43xx_PHYTYPE_B || 214 (phy->type == BCM43xx_PHYTYPE_B ||
212 phy->type == BCM43xx_PHYTYPE_G)); 215 phy->type == BCM43xx_PHYTYPE_G));
213 break; 216 break;
214 case BCM43xx_LED_MODE_BG: 217 case BCM43xx_LED_MODE_BG:
215 if (phy->type == BCM43xx_PHYTYPE_G && 218 if (phy->type == BCM43xx_PHYTYPE_G && bcm43xx_is_hw_radio_enabled(bcm) &&
216 1/*FIXME: using G rates.*/) 219 1/*FIXME: using G rates.*/)
217 turn_on = 1; 220 turn_on = 1;
218 break; 221 break;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 91b752e3d07e..23aaf1ed8541 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -2441,6 +2441,9 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2441 if (err) 2441 if (err)
2442 goto err_gpio_cleanup; 2442 goto err_gpio_cleanup;
2443 bcm43xx_radio_turn_on(bcm); 2443 bcm43xx_radio_turn_on(bcm);
2444 bcm->radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
2445 dprintk(KERN_INFO PFX "Radio %s by hardware\n",
2446 (bcm->radio_hw_enable == 0) ? "disabled" : "enabled");
2444 2447
2445 bcm43xx_write16(bcm, 0x03E6, 0x0000); 2448 bcm43xx_write16(bcm, 0x03E6, 0x0000);
2446 err = bcm43xx_phy_init(bcm); 2449 err = bcm43xx_phy_init(bcm);
@@ -3175,9 +3178,24 @@ static void bcm43xx_periodic_every30sec(struct bcm43xx_private *bcm)
3175 3178
3176static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm) 3179static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
3177{ 3180{
3181 bcm43xx_phy_xmitpower(bcm); //FIXME: unless scanning?
3182 //TODO for APHY (temperature?)
3183}
3184
3185static void bcm43xx_periodic_every1sec(struct bcm43xx_private *bcm)
3186{
3178 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 3187 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
3179 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm); 3188 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
3189 int radio_hw_enable;
3180 3190
3191 /* check if radio hardware enabled status changed */
3192 radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
3193 if (unlikely(bcm->radio_hw_enable != radio_hw_enable)) {
3194 bcm->radio_hw_enable = radio_hw_enable;
3195 dprintk(KERN_INFO PFX "Radio hardware status changed to %s\n",
3196 (radio_hw_enable == 0) ? "disabled" : "enabled");
3197 bcm43xx_leds_update(bcm, 0);
3198 }
3181 if (phy->type == BCM43xx_PHYTYPE_G) { 3199 if (phy->type == BCM43xx_PHYTYPE_G) {
3182 //TODO: update_aci_moving_average 3200 //TODO: update_aci_moving_average
3183 if (radio->aci_enable && radio->aci_wlan_automatic) { 3201 if (radio->aci_enable && radio->aci_wlan_automatic) {
@@ -3201,21 +3219,21 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
3201 //TODO: implement rev1 workaround 3219 //TODO: implement rev1 workaround
3202 } 3220 }
3203 } 3221 }
3204 bcm43xx_phy_xmitpower(bcm); //FIXME: unless scanning?
3205 //TODO for APHY (temperature?)
3206} 3222}
3207 3223
3208static void do_periodic_work(struct bcm43xx_private *bcm) 3224static void do_periodic_work(struct bcm43xx_private *bcm)
3209{ 3225{
3210 if (bcm->periodic_state % 8 == 0) 3226 if (bcm->periodic_state % 120 == 0)
3211 bcm43xx_periodic_every120sec(bcm); 3227 bcm43xx_periodic_every120sec(bcm);
3212 if (bcm->periodic_state % 4 == 0) 3228 if (bcm->periodic_state % 60 == 0)
3213 bcm43xx_periodic_every60sec(bcm); 3229 bcm43xx_periodic_every60sec(bcm);
3214 if (bcm->periodic_state % 2 == 0) 3230 if (bcm->periodic_state % 30 == 0)
3215 bcm43xx_periodic_every30sec(bcm); 3231 bcm43xx_periodic_every30sec(bcm);
3216 bcm43xx_periodic_every15sec(bcm); 3232 if (bcm->periodic_state % 15 == 0)
3233 bcm43xx_periodic_every15sec(bcm);
3234 bcm43xx_periodic_every1sec(bcm);
3217 3235
3218 schedule_delayed_work(&bcm->periodic_work, HZ * 15); 3236 schedule_delayed_work(&bcm->periodic_work, HZ);
3219} 3237}
3220 3238
3221static void bcm43xx_periodic_work_handler(struct work_struct *work) 3239static void bcm43xx_periodic_work_handler(struct work_struct *work)
@@ -3228,7 +3246,7 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
3228 unsigned long orig_trans_start = 0; 3246 unsigned long orig_trans_start = 0;
3229 3247
3230 mutex_lock(&bcm->mutex); 3248 mutex_lock(&bcm->mutex);
3231 if (unlikely(bcm->periodic_state % 4 == 0)) { 3249 if (unlikely(bcm->periodic_state % 60 == 0)) {
3232 /* Periodic work will take a long time, so we want it to 3250 /* Periodic work will take a long time, so we want it to
3233 * be preemtible. 3251 * be preemtible.
3234 */ 3252 */
@@ -3260,7 +3278,7 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
3260 3278
3261 do_periodic_work(bcm); 3279 do_periodic_work(bcm);
3262 3280
3263 if (unlikely(bcm->periodic_state % 4 == 0)) { 3281 if (unlikely(bcm->periodic_state % 60 == 0)) {
3264 spin_lock_irqsave(&bcm->irq_lock, flags); 3282 spin_lock_irqsave(&bcm->irq_lock, flags);
3265 tasklet_enable(&bcm->isr_tasklet); 3283 tasklet_enable(&bcm->isr_tasklet);
3266 bcm43xx_interrupt_enable(bcm, savedirqs); 3284 bcm43xx_interrupt_enable(bcm, savedirqs);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
index bb9c484d7e19..af19a07032a3 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
@@ -1981,6 +1981,7 @@ void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm)
1981 } 1981 }
1982 radio->enabled = 1; 1982 radio->enabled = 1;
1983 dprintk(KERN_INFO PFX "Radio turned on\n"); 1983 dprintk(KERN_INFO PFX "Radio turned on\n");
1984 bcm43xx_leds_update(bcm, 0);
1984} 1985}
1985 1986
1986void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm) 1987void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm)
@@ -2001,6 +2002,7 @@ void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm)
2001 bcm43xx_phy_write(bcm, 0x0015, 0xAA00); 2002 bcm43xx_phy_write(bcm, 0x0015, 0xAA00);
2002 radio->enabled = 0; 2003 radio->enabled = 0;
2003 dprintk(KERN_INFO PFX "Radio turned off\n"); 2004 dprintk(KERN_INFO PFX "Radio turned off\n");
2005 bcm43xx_leds_update(bcm, 0);
2004} 2006}
2005 2007
2006void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm) 2008void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h b/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
index 9ed18039fa3e..77a98a53a2e2 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
@@ -65,6 +65,22 @@ void bcm43xx_radio_init2060(struct bcm43xx_private *bcm);
65void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm); 65void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm);
66void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm); 66void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm);
67 67
68static inline
69int bcm43xx_is_hw_radio_enabled(struct bcm43xx_private *bcm)
70{
71 /* function to return state of hardware enable of radio
72 * returns 0 if radio disabled, 1 if radio enabled
73 */
74 if (bcm->current_core->rev >= 3)
75 return ((bcm43xx_read32(bcm, BCM43xx_MMIO_RADIO_HWENABLED_HI)
76 & BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK)
77 == 0) ? 1 : 0;
78 else
79 return ((bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_HWENABLED_LO)
80 & BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK)
81 == 0) ? 0 : 1;
82}
83
68int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, u8 channel, 84int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, u8 channel,
69 int synthetic_pu_workaround); 85 int synthetic_pu_workaround);
70 86
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 04c19cefa1da..9077e6edde34 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -84,7 +84,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
84 if (strchr(dev->name, '%')) 84 if (strchr(dev->name, '%'))
85 ret = dev_alloc_name(dev, dev->name); 85 ret = dev_alloc_name(dev, dev->name);
86 86
87 SET_NETDEV_DEV(dev, mdev->class_dev.dev); 87 SET_NETDEV_DEV(dev, mdev->dev.parent);
88 if (ret >= 0) 88 if (ret >= 0)
89 ret = register_netdevice(dev); 89 ret = register_netdevice(dev);
90 90
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 22cb3fb7502e..c878a2f3239c 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -9166,7 +9166,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
9166{ 9166{
9167 struct ipw_priv *priv = ieee80211_priv(dev); 9167 struct ipw_priv *priv = ieee80211_priv(dev);
9168 mutex_lock(&priv->mutex); 9168 mutex_lock(&priv->mutex);
9169 if (wrqu->rts.disabled) 9169 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9170 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 9170 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9171 else { 9171 else {
9172 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 9172 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
@@ -9255,7 +9255,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
9255{ 9255{
9256 struct ipw_priv *priv = ieee80211_priv(dev); 9256 struct ipw_priv *priv = ieee80211_priv(dev);
9257 mutex_lock(&priv->mutex); 9257 mutex_lock(&priv->mutex);
9258 if (wrqu->frag.disabled) 9258 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9259 priv->ieee->fts = DEFAULT_FTS; 9259 priv->ieee->fts = DEFAULT_FTS;
9260 else { 9260 else {
9261 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 9261 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 936c888e03e1..4e7f6cf51436 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2059,7 +2059,7 @@ static int determine_firmware(struct net_device *dev)
2059 int err; 2059 int err;
2060 struct comp_id nic_id, sta_id; 2060 struct comp_id nic_id, sta_id;
2061 unsigned int firmver; 2061 unsigned int firmver;
2062 char tmp[SYMBOL_MAX_VER_LEN+1]; 2062 char tmp[SYMBOL_MAX_VER_LEN+1] __attribute__((aligned(2)));
2063 2063
2064 /* Get the hardware version */ 2064 /* Get the hardware version */
2065 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id); 2065 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
@@ -4293,8 +4293,8 @@ static void orinoco_get_drvinfo(struct net_device *dev,
4293 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1); 4293 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
4294 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1); 4294 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
4295 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1); 4295 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
4296 if (dev->class_dev.dev) 4296 if (dev->dev.parent)
4297 strncpy(info->bus_info, dev->class_dev.dev->bus_id, 4297 strncpy(info->bus_info, dev->dev.parent->bus_id,
4298 sizeof(info->bus_info) - 1); 4298 sizeof(info->bus_info) - 1);
4299 else 4299 else
4300 snprintf(info->bus_info, sizeof(info->bus_info) - 1, 4300 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index d08ae8d2726c..d1e502236b2a 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -332,7 +332,7 @@ orinoco_cs_config(struct pcmcia_device *link)
332 332
333 /* Finally, report what we've done */ 333 /* Finally, report what we've done */
334 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 334 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
335 "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id, 335 "0x%04x-0x%04x\n", dev->name, dev->dev.parent->bus_id,
336 link->irq.AssignedIRQ, link->io.BasePort1, 336 link->irq.AssignedIRQ, link->io.BasePort1,
337 link->io.BasePort1 + link->io.NumPorts1 - 1); 337 link->io.BasePort1 + link->io.NumPorts1 - 1);
338 338
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index f057fd9fcd79..a037b11dac9d 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/ethtool.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
@@ -787,6 +788,17 @@ islpci_set_multicast_list(struct net_device *dev)
787} 788}
788#endif 789#endif
789 790
791static void islpci_ethtool_get_drvinfo(struct net_device *dev,
792 struct ethtool_drvinfo *info)
793{
794 strcpy(info->driver, DRV_NAME);
795 strcpy(info->version, DRV_VERSION);
796}
797
798static struct ethtool_ops islpci_ethtool_ops = {
799 .get_drvinfo = islpci_ethtool_get_drvinfo,
800};
801
790struct net_device * 802struct net_device *
791islpci_setup(struct pci_dev *pdev) 803islpci_setup(struct pci_dev *pdev)
792{ 804{
@@ -813,6 +825,7 @@ islpci_setup(struct pci_dev *pdev)
813 ndev->do_ioctl = &prism54_ioctl; 825 ndev->do_ioctl = &prism54_ioctl;
814 ndev->wireless_handlers = 826 ndev->wireless_handlers =
815 (struct iw_handler_def *) &prism54_handler_def; 827 (struct iw_handler_def *) &prism54_handler_def;
828 ndev->ethtool_ops = &islpci_ethtool_ops;
816 829
817 ndev->hard_start_xmit = &islpci_eth_transmit; 830 ndev->hard_start_xmit = &islpci_eth_transmit;
818 /* ndev->set_multicast_list = &islpci_set_multicast_list; */ 831 /* ndev->set_multicast_list = &islpci_set_multicast_list; */
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index a9aa1662eaa4..736666da6c24 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -211,4 +211,8 @@ islpci_trigger(islpci_private *priv)
211 211
212int islpci_free_memory(islpci_private *); 212int islpci_free_memory(islpci_private *);
213struct net_device *islpci_setup(struct pci_dev *); 213struct net_device *islpci_setup(struct pci_dev *);
214
215#define DRV_NAME "prism54"
216#define DRV_VERSION "1.2"
217
214#endif /* _ISLPCI_DEV_H */ 218#endif /* _ISLPCI_DEV_H */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 58257b40c043..3dcb13bb7d57 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -28,9 +28,6 @@
28#include "islpci_mgt.h" /* for pc_debug */ 28#include "islpci_mgt.h" /* for pc_debug */
29#include "isl_oid.h" 29#include "isl_oid.h"
30 30
31#define DRV_NAME "prism54"
32#define DRV_VERSION "1.2"
33
34MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>"); 31MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>");
35MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter"); 32MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
36MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index cf2d1486b01d..af70460f008a 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -806,7 +806,7 @@ spectrum_cs_config(struct pcmcia_device *link)
806 806
807 /* Finally, report what we've done */ 807 /* Finally, report what we've done */
808 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 808 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
809 "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id, 809 "0x%04x-0x%04x\n", dev->name, dev->dev.parent->bus_id,
810 link->irq.AssignedIRQ, link->io.BasePort1, 810 link->irq.AssignedIRQ, link->io.BasePort1,
811 link->io.BasePort1 + link->io.NumPorts1 - 1); 811 link->io.BasePort1 + link->io.NumPorts1 - 1);
812 812
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 78ea72fb8f0c..12dfc0b6efe6 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -84,6 +84,18 @@ static void print_id(struct zd_chip *chip)
84 dev_info(zd_chip_dev(chip), "%s\n", buffer); 84 dev_info(zd_chip_dev(chip), "%s\n", buffer);
85} 85}
86 86
87static zd_addr_t inc_addr(zd_addr_t addr)
88{
89 u16 a = (u16)addr;
90 /* Control registers use byte addressing, but everything else uses word
91 * addressing. */
92 if ((a & 0xf000) == CR_START)
93 a += 2;
94 else
95 a += 1;
96 return (zd_addr_t)a;
97}
98
87/* Read a variable number of 32-bit values. Parameter count is not allowed to 99/* Read a variable number of 32-bit values. Parameter count is not allowed to
88 * exceed USB_MAX_IOREAD32_COUNT. 100 * exceed USB_MAX_IOREAD32_COUNT.
89 */ 101 */
@@ -114,7 +126,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
114 for (i = 0; i < count; i++) { 126 for (i = 0; i < count; i++) {
115 int j = 2*i; 127 int j = 2*i;
116 /* We read the high word always first. */ 128 /* We read the high word always first. */
117 a16[j] = zd_inc_word(addr[i]); 129 a16[j] = inc_addr(addr[i]);
118 a16[j+1] = addr[i]; 130 a16[j+1] = addr[i];
119 } 131 }
120 132
@@ -163,7 +175,7 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
163 j = 2*i; 175 j = 2*i;
164 /* We write the high word always first. */ 176 /* We write the high word always first. */
165 ioreqs16[j].value = ioreqs[i].value >> 16; 177 ioreqs16[j].value = ioreqs[i].value >> 16;
166 ioreqs16[j].addr = zd_inc_word(ioreqs[i].addr); 178 ioreqs16[j].addr = inc_addr(ioreqs[i].addr);
167 ioreqs16[j+1].value = ioreqs[i].value; 179 ioreqs16[j+1].value = ioreqs[i].value;
168 ioreqs16[j+1].addr = ioreqs[i].addr; 180 ioreqs16[j+1].addr = ioreqs[i].addr;
169 } 181 }
@@ -466,7 +478,8 @@ static int read_values(struct zd_chip *chip, u8 *values, size_t count,
466 478
467 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 479 ZD_ASSERT(mutex_is_locked(&chip->mutex));
468 for (i = 0;;) { 480 for (i = 0;;) {
469 r = zd_ioread32_locked(chip, &v, e2p_addr+i/2); 481 r = zd_ioread32_locked(chip, &v,
482 (zd_addr_t)((u16)e2p_addr+i/2));
470 if (r) 483 if (r)
471 return r; 484 return r;
472 v -= guard; 485 v -= guard;
@@ -798,47 +811,18 @@ static int hw_reset_phy(struct zd_chip *chip)
798static int zd1211_hw_init_hmac(struct zd_chip *chip) 811static int zd1211_hw_init_hmac(struct zd_chip *chip)
799{ 812{
800 static const struct zd_ioreq32 ioreqs[] = { 813 static const struct zd_ioreq32 ioreqs[] = {
801 { CR_ACK_TIMEOUT_EXT, 0x20 },
802 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
803 { CR_ZD1211_RETRY_MAX, 0x2 }, 814 { CR_ZD1211_RETRY_MAX, 0x2 },
804 { CR_SNIFFER_ON, 0 },
805 { CR_RX_FILTER, STA_RX_FILTER },
806 { CR_GROUP_HASH_P1, 0x00 },
807 { CR_GROUP_HASH_P2, 0x80000000 },
808 { CR_REG1, 0xa4 },
809 { CR_ADDA_PWR_DWN, 0x7f },
810 { CR_BCN_PLCP_CFG, 0x00f00401 },
811 { CR_PHY_DELAY, 0x00 },
812 { CR_ACK_TIMEOUT_EXT, 0x80 },
813 { CR_ADDA_PWR_DWN, 0x00 },
814 { CR_ACK_TIME_80211, 0x100 },
815 { CR_RX_PE_DELAY, 0x70 },
816 { CR_PS_CTRL, 0x10000000 },
817 { CR_RTS_CTS_RATE, 0x02030203 },
818 { CR_RX_THRESHOLD, 0x000c0640 }, 815 { CR_RX_THRESHOLD, 0x000c0640 },
819 { CR_AFTER_PNP, 0x1 },
820 { CR_WEP_PROTECT, 0x114 },
821 }; 816 };
822 817
823 int r;
824
825 dev_dbg_f(zd_chip_dev(chip), "\n"); 818 dev_dbg_f(zd_chip_dev(chip), "\n");
826 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 819 ZD_ASSERT(mutex_is_locked(&chip->mutex));
827 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 820 return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
828#ifdef DEBUG
829 if (r) {
830 dev_err(zd_chip_dev(chip),
831 "error in zd_iowrite32a_locked. Error number %d\n", r);
832 }
833#endif /* DEBUG */
834 return r;
835} 821}
836 822
837static int zd1211b_hw_init_hmac(struct zd_chip *chip) 823static int zd1211b_hw_init_hmac(struct zd_chip *chip)
838{ 824{
839 static const struct zd_ioreq32 ioreqs[] = { 825 static const struct zd_ioreq32 ioreqs[] = {
840 { CR_ACK_TIMEOUT_EXT, 0x20 },
841 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
842 { CR_ZD1211B_RETRY_MAX, 0x02020202 }, 826 { CR_ZD1211B_RETRY_MAX, 0x02020202 },
843 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f }, 827 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f },
844 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f }, 828 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f },
@@ -847,6 +831,20 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
847 { CR_ZD1211B_AIFS_CTL1, 0x00280028 }, 831 { CR_ZD1211B_AIFS_CTL1, 0x00280028 },
848 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 832 { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
849 { CR_ZD1211B_TXOP, 0x01800824 }, 833 { CR_ZD1211B_TXOP, 0x01800824 },
834 { CR_RX_THRESHOLD, 0x000c0eff, },
835 };
836
837 dev_dbg_f(zd_chip_dev(chip), "\n");
838 ZD_ASSERT(mutex_is_locked(&chip->mutex));
839 return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
840}
841
842static int hw_init_hmac(struct zd_chip *chip)
843{
844 int r;
845 static const struct zd_ioreq32 ioreqs[] = {
846 { CR_ACK_TIMEOUT_EXT, 0x20 },
847 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
850 { CR_SNIFFER_ON, 0 }, 848 { CR_SNIFFER_ON, 0 },
851 { CR_RX_FILTER, STA_RX_FILTER }, 849 { CR_RX_FILTER, STA_RX_FILTER },
852 { CR_GROUP_HASH_P1, 0x00 }, 850 { CR_GROUP_HASH_P1, 0x00 },
@@ -861,25 +859,16 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
861 { CR_RX_PE_DELAY, 0x70 }, 859 { CR_RX_PE_DELAY, 0x70 },
862 { CR_PS_CTRL, 0x10000000 }, 860 { CR_PS_CTRL, 0x10000000 },
863 { CR_RTS_CTS_RATE, 0x02030203 }, 861 { CR_RTS_CTS_RATE, 0x02030203 },
864 { CR_RX_THRESHOLD, 0x000c0eff, },
865 { CR_AFTER_PNP, 0x1 }, 862 { CR_AFTER_PNP, 0x1 },
866 { CR_WEP_PROTECT, 0x114 }, 863 { CR_WEP_PROTECT, 0x114 },
864 { CR_IFS_VALUE, IFS_VALUE_DEFAULT },
867 }; 865 };
868 866
869 int r;
870
871 dev_dbg_f(zd_chip_dev(chip), "\n");
872 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 867 ZD_ASSERT(mutex_is_locked(&chip->mutex));
873 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 868 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
874 if (r) { 869 if (r)
875 dev_dbg_f(zd_chip_dev(chip), 870 return r;
876 "error in zd_iowrite32a_locked. Error number %d\n", r);
877 }
878 return r;
879}
880 871
881static int hw_init_hmac(struct zd_chip *chip)
882{
883 return chip->is_zd1211b ? 872 return chip->is_zd1211b ?
884 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip); 873 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
885} 874}
@@ -974,16 +963,14 @@ static int hw_init(struct zd_chip *chip)
974 if (r) 963 if (r)
975 return r; 964 return r;
976 965
977 /* Although the vendor driver defaults to a different value during
978 * init, it overwrites the IFS value with the following every time
979 * the channel changes. We should aim to be more intelligent... */
980 r = zd_iowrite32_locked(chip, IFS_VALUE_DEFAULT, CR_IFS_VALUE);
981 if (r)
982 return r;
983
984 return set_beacon_interval(chip, 100); 966 return set_beacon_interval(chip, 100);
985} 967}
986 968
969static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
970{
971 return (zd_addr_t)((u16)chip->fw_regs_base + offset);
972}
973
987#ifdef DEBUG 974#ifdef DEBUG
988static int dump_cr(struct zd_chip *chip, const zd_addr_t addr, 975static int dump_cr(struct zd_chip *chip, const zd_addr_t addr,
989 const char *addr_string) 976 const char *addr_string)
@@ -1018,9 +1005,11 @@ static int test_init(struct zd_chip *chip)
1018 1005
1019static void dump_fw_registers(struct zd_chip *chip) 1006static void dump_fw_registers(struct zd_chip *chip)
1020{ 1007{
1021 static const zd_addr_t addr[4] = { 1008 const zd_addr_t addr[4] = {
1022 FW_FIRMWARE_VER, FW_USB_SPEED, FW_FIX_TX_RATE, 1009 fw_reg_addr(chip, FW_REG_FIRMWARE_VER),
1023 FW_LINK_STATUS 1010 fw_reg_addr(chip, FW_REG_USB_SPEED),
1011 fw_reg_addr(chip, FW_REG_FIX_TX_RATE),
1012 fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
1024 }; 1013 };
1025 1014
1026 int r; 1015 int r;
@@ -1046,7 +1035,8 @@ static int print_fw_version(struct zd_chip *chip)
1046 int r; 1035 int r;
1047 u16 version; 1036 u16 version;
1048 1037
1049 r = zd_ioread16_locked(chip, &version, FW_FIRMWARE_VER); 1038 r = zd_ioread16_locked(chip, &version,
1039 fw_reg_addr(chip, FW_REG_FIRMWARE_VER));
1050 if (r) 1040 if (r)
1051 return r; 1041 return r;
1052 1042
@@ -1126,6 +1116,22 @@ int zd_chip_disable_hwint(struct zd_chip *chip)
1126 return r; 1116 return r;
1127} 1117}
1128 1118
1119static int read_fw_regs_offset(struct zd_chip *chip)
1120{
1121 int r;
1122
1123 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1124 r = zd_ioread16_locked(chip, (u16*)&chip->fw_regs_base,
1125 FWRAW_REGS_ADDR);
1126 if (r)
1127 return r;
1128 dev_dbg_f(zd_chip_dev(chip), "fw_regs_base: %#06hx\n",
1129 (u16)chip->fw_regs_base);
1130
1131 return 0;
1132}
1133
1134
1129int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) 1135int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
1130{ 1136{
1131 int r; 1137 int r;
@@ -1145,7 +1151,7 @@ int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
1145 if (r) 1151 if (r)
1146 goto out; 1152 goto out;
1147 1153
1148 r = zd_usb_init_hw(&chip->usb); 1154 r = read_fw_regs_offset(chip);
1149 if (r) 1155 if (r)
1150 goto out; 1156 goto out;
1151 1157
@@ -1325,15 +1331,15 @@ u8 zd_chip_get_channel(struct zd_chip *chip)
1325 1331
1326int zd_chip_control_leds(struct zd_chip *chip, enum led_status status) 1332int zd_chip_control_leds(struct zd_chip *chip, enum led_status status)
1327{ 1333{
1328 static const zd_addr_t a[] = { 1334 const zd_addr_t a[] = {
1329 FW_LINK_STATUS, 1335 fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
1330 CR_LED, 1336 CR_LED,
1331 }; 1337 };
1332 1338
1333 int r; 1339 int r;
1334 u16 v[ARRAY_SIZE(a)]; 1340 u16 v[ARRAY_SIZE(a)];
1335 struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = { 1341 struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = {
1336 [0] = { FW_LINK_STATUS }, 1342 [0] = { fw_reg_addr(chip, FW_REG_LED_LINK_STATUS) },
1337 [1] = { CR_LED }, 1343 [1] = { CR_LED },
1338 }; 1344 };
1339 u16 other_led; 1345 u16 other_led;
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index a4e3cee9b59d..b07569e391ee 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -18,7 +18,6 @@
18#ifndef _ZD_CHIP_H 18#ifndef _ZD_CHIP_H
19#define _ZD_CHIP_H 19#define _ZD_CHIP_H
20 20
21#include "zd_types.h"
22#include "zd_rf.h" 21#include "zd_rf.h"
23#include "zd_usb.h" 22#include "zd_usb.h"
24 23
@@ -27,6 +26,37 @@
27 * adds a processor for handling the USB protocol. 26 * adds a processor for handling the USB protocol.
28 */ 27 */
29 28
29/* Address space */
30enum {
31 /* CONTROL REGISTERS */
32 CR_START = 0x9000,
33
34
35 /* FIRMWARE */
36 FW_START = 0xee00,
37
38
39 /* EEPROM */
40 E2P_START = 0xf800,
41 E2P_LEN = 0x800,
42
43 /* EEPROM layout */
44 E2P_LOAD_CODE_LEN = 0xe, /* base 0xf800 */
45 E2P_LOAD_VECT_LEN = 0x9, /* base 0xf80e */
46 /* E2P_DATA indexes into this */
47 E2P_DATA_LEN = 0x7e, /* base 0xf817 */
48 E2P_BOOT_CODE_LEN = 0x760, /* base 0xf895 */
49 E2P_INTR_VECT_LEN = 0xb, /* base 0xfff5 */
50
51 /* Some precomputed offsets into the EEPROM */
52 E2P_DATA_OFFSET = E2P_LOAD_CODE_LEN + E2P_LOAD_VECT_LEN,
53 E2P_BOOT_CODE_OFFSET = E2P_DATA_OFFSET + E2P_DATA_LEN,
54};
55
56#define CTL_REG(offset) ((zd_addr_t)(CR_START + (offset)))
57#define E2P_DATA(offset) ((zd_addr_t)(E2P_START + E2P_DATA_OFFSET + (offset)))
58#define FWRAW_DATA(offset) ((zd_addr_t)(FW_START + (offset)))
59
30/* 8-bit hardware registers */ 60/* 8-bit hardware registers */
31#define CR0 CTL_REG(0x0000) 61#define CR0 CTL_REG(0x0000)
32#define CR1 CTL_REG(0x0004) 62#define CR1 CTL_REG(0x0004)
@@ -302,7 +332,7 @@
302 332
303#define CR_MAX_PHY_REG 255 333#define CR_MAX_PHY_REG 255
304 334
305/* Taken from the ZYDAS driver, not all of them are relevant for the ZSD1211 335/* Taken from the ZYDAS driver, not all of them are relevant for the ZD1211
306 * driver. 336 * driver.
307 */ 337 */
308 338
@@ -594,81 +624,71 @@
594/* 624/*
595 * Upper 16 bit contains the regulatory domain. 625 * Upper 16 bit contains the regulatory domain.
596 */ 626 */
597#define E2P_SUBID E2P_REG(0x00) 627#define E2P_SUBID E2P_DATA(0x00)
598#define E2P_POD E2P_REG(0x02) 628#define E2P_POD E2P_DATA(0x02)
599#define E2P_MAC_ADDR_P1 E2P_REG(0x04) 629#define E2P_MAC_ADDR_P1 E2P_DATA(0x04)
600#define E2P_MAC_ADDR_P2 E2P_REG(0x06) 630#define E2P_MAC_ADDR_P2 E2P_DATA(0x06)
601#define E2P_PWR_CAL_VALUE1 E2P_REG(0x08) 631#define E2P_PWR_CAL_VALUE1 E2P_DATA(0x08)
602#define E2P_PWR_CAL_VALUE2 E2P_REG(0x0a) 632#define E2P_PWR_CAL_VALUE2 E2P_DATA(0x0a)
603#define E2P_PWR_CAL_VALUE3 E2P_REG(0x0c) 633#define E2P_PWR_CAL_VALUE3 E2P_DATA(0x0c)
604#define E2P_PWR_CAL_VALUE4 E2P_REG(0x0e) 634#define E2P_PWR_CAL_VALUE4 E2P_DATA(0x0e)
605#define E2P_PWR_INT_VALUE1 E2P_REG(0x10) 635#define E2P_PWR_INT_VALUE1 E2P_DATA(0x10)
606#define E2P_PWR_INT_VALUE2 E2P_REG(0x12) 636#define E2P_PWR_INT_VALUE2 E2P_DATA(0x12)
607#define E2P_PWR_INT_VALUE3 E2P_REG(0x14) 637#define E2P_PWR_INT_VALUE3 E2P_DATA(0x14)
608#define E2P_PWR_INT_VALUE4 E2P_REG(0x16) 638#define E2P_PWR_INT_VALUE4 E2P_DATA(0x16)
609 639
610/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30) 640/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30)
611 * also only 11 channels. */ 641 * also only 11 channels. */
612#define E2P_ALLOWED_CHANNEL E2P_REG(0x18) 642#define E2P_ALLOWED_CHANNEL E2P_DATA(0x18)
613 643
614#define E2P_PHY_REG E2P_REG(0x1a) 644#define E2P_PHY_REG E2P_DATA(0x1a)
615#define E2P_DEVICE_VER E2P_REG(0x20) 645#define E2P_DEVICE_VER E2P_DATA(0x20)
616#define E2P_36M_CAL_VALUE1 E2P_REG(0x28) 646#define E2P_36M_CAL_VALUE1 E2P_DATA(0x28)
617#define E2P_36M_CAL_VALUE2 E2P_REG(0x2a) 647#define E2P_36M_CAL_VALUE2 E2P_DATA(0x2a)
618#define E2P_36M_CAL_VALUE3 E2P_REG(0x2c) 648#define E2P_36M_CAL_VALUE3 E2P_DATA(0x2c)
619#define E2P_36M_CAL_VALUE4 E2P_REG(0x2e) 649#define E2P_36M_CAL_VALUE4 E2P_DATA(0x2e)
620#define E2P_11A_INT_VALUE1 E2P_REG(0x30) 650#define E2P_11A_INT_VALUE1 E2P_DATA(0x30)
621#define E2P_11A_INT_VALUE2 E2P_REG(0x32) 651#define E2P_11A_INT_VALUE2 E2P_DATA(0x32)
622#define E2P_11A_INT_VALUE3 E2P_REG(0x34) 652#define E2P_11A_INT_VALUE3 E2P_DATA(0x34)
623#define E2P_11A_INT_VALUE4 E2P_REG(0x36) 653#define E2P_11A_INT_VALUE4 E2P_DATA(0x36)
624#define E2P_48M_CAL_VALUE1 E2P_REG(0x38) 654#define E2P_48M_CAL_VALUE1 E2P_DATA(0x38)
625#define E2P_48M_CAL_VALUE2 E2P_REG(0x3a) 655#define E2P_48M_CAL_VALUE2 E2P_DATA(0x3a)
626#define E2P_48M_CAL_VALUE3 E2P_REG(0x3c) 656#define E2P_48M_CAL_VALUE3 E2P_DATA(0x3c)
627#define E2P_48M_CAL_VALUE4 E2P_REG(0x3e) 657#define E2P_48M_CAL_VALUE4 E2P_DATA(0x3e)
628#define E2P_48M_INT_VALUE1 E2P_REG(0x40) 658#define E2P_48M_INT_VALUE1 E2P_DATA(0x40)
629#define E2P_48M_INT_VALUE2 E2P_REG(0x42) 659#define E2P_48M_INT_VALUE2 E2P_DATA(0x42)
630#define E2P_48M_INT_VALUE3 E2P_REG(0x44) 660#define E2P_48M_INT_VALUE3 E2P_DATA(0x44)
631#define E2P_48M_INT_VALUE4 E2P_REG(0x46) 661#define E2P_48M_INT_VALUE4 E2P_DATA(0x46)
632#define E2P_54M_CAL_VALUE1 E2P_REG(0x48) /* ??? */ 662#define E2P_54M_CAL_VALUE1 E2P_DATA(0x48) /* ??? */
633#define E2P_54M_CAL_VALUE2 E2P_REG(0x4a) 663#define E2P_54M_CAL_VALUE2 E2P_DATA(0x4a)
634#define E2P_54M_CAL_VALUE3 E2P_REG(0x4c) 664#define E2P_54M_CAL_VALUE3 E2P_DATA(0x4c)
635#define E2P_54M_CAL_VALUE4 E2P_REG(0x4e) 665#define E2P_54M_CAL_VALUE4 E2P_DATA(0x4e)
636#define E2P_54M_INT_VALUE1 E2P_REG(0x50) 666#define E2P_54M_INT_VALUE1 E2P_DATA(0x50)
637#define E2P_54M_INT_VALUE2 E2P_REG(0x52) 667#define E2P_54M_INT_VALUE2 E2P_DATA(0x52)
638#define E2P_54M_INT_VALUE3 E2P_REG(0x54) 668#define E2P_54M_INT_VALUE3 E2P_DATA(0x54)
639#define E2P_54M_INT_VALUE4 E2P_REG(0x56) 669#define E2P_54M_INT_VALUE4 E2P_DATA(0x56)
640 670
641/* All 16 bit values */ 671/* This word contains the base address of the FW_REG_ registers below */
642#define FW_FIRMWARE_VER FW_REG(0) 672#define FWRAW_REGS_ADDR FWRAW_DATA(0x1d)
643/* non-zero if USB high speed connection */ 673
644#define FW_USB_SPEED FW_REG(1) 674/* All 16 bit values, offset from the address in FWRAW_REGS_ADDR */
645#define FW_FIX_TX_RATE FW_REG(2) 675enum {
646/* Seems to be able to control LEDs over the firmware */ 676 FW_REG_FIRMWARE_VER = 0,
647#define FW_LINK_STATUS FW_REG(3) 677 /* non-zero if USB high speed connection */
648#define FW_SOFT_RESET FW_REG(4) 678 FW_REG_USB_SPEED = 1,
649#define FW_FLASH_CHK FW_REG(5) 679 FW_REG_FIX_TX_RATE = 2,
680 /* Seems to be able to control LEDs over the firmware */
681 FW_REG_LED_LINK_STATUS = 3,
682 FW_REG_SOFT_RESET = 4,
683 FW_REG_FLASH_CHK = 5,
684};
650 685
686/* Values for FW_LINK_STATUS */
651#define FW_LINK_OFF 0x0 687#define FW_LINK_OFF 0x0
652#define FW_LINK_TX 0x1 688#define FW_LINK_TX 0x1
653/* 0x2 - link led on? */ 689/* 0x2 - link led on? */
654 690
655enum { 691enum {
656 CR_BASE_OFFSET = 0x9000,
657 FW_START_OFFSET = 0xee00,
658 FW_BASE_ADDR_OFFSET = FW_START_OFFSET + 0x1d,
659 EEPROM_START_OFFSET = 0xf800,
660 EEPROM_SIZE = 0x800, /* words */
661 LOAD_CODE_SIZE = 0xe, /* words */
662 LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */
663 EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE,
664 EEPROM_REGS_SIZE = 0x7e, /* words */
665 E2P_BASE_OFFSET = EEPROM_START_OFFSET +
666 EEPROM_REGS_OFFSET,
667};
668
669#define FW_REG_TABLE_ADDR USB_ADDR(FW_START_OFFSET + 0x1d)
670
671enum {
672 /* indices for ofdm_cal_values */ 692 /* indices for ofdm_cal_values */
673 OFDM_36M_INDEX = 0, 693 OFDM_36M_INDEX = 0,
674 OFDM_48M_INDEX = 1, 694 OFDM_48M_INDEX = 1,
@@ -679,6 +699,8 @@ struct zd_chip {
679 struct zd_usb usb; 699 struct zd_usb usb;
680 struct zd_rf rf; 700 struct zd_rf rf;
681 struct mutex mutex; 701 struct mutex mutex;
702 /* Base address of FW_REG_ registers */
703 zd_addr_t fw_regs_base;
682 u8 e2p_mac[ETH_ALEN]; 704 u8 e2p_mac[ETH_ALEN];
683 /* EepSetPoint in the vendor driver */ 705 /* EepSetPoint in the vendor driver */
684 u8 pwr_cal_values[E2P_CHANNEL_COUNT]; 706 u8 pwr_cal_values[E2P_CHANNEL_COUNT];
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index fb22f62cf1f3..deb99d1eaa77 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -23,6 +23,8 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25 25
26typedef u16 __nocast zd_addr_t;
27
26#define dev_printk_f(level, dev, fmt, args...) \ 28#define dev_printk_f(level, dev, fmt, args...) \
27 dev_printk(level, dev, "%s() " fmt, __func__, ##args) 29 dev_printk(level, dev, "%s() " fmt, __func__, ##args)
28 30
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
index 26b8298dff8c..c4f36d39642b 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -2,7 +2,6 @@
2#define _ZD_IEEE80211_H 2#define _ZD_IEEE80211_H
3 3
4#include <net/ieee80211.h> 4#include <net/ieee80211.h>
5#include "zd_types.h"
6 5
7/* Additional definitions from the standards. 6/* Additional definitions from the standards.
8 */ 7 */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index 676b3734f1ed..a57732eb69e1 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -18,8 +18,6 @@
18#ifndef _ZD_RF_H 18#ifndef _ZD_RF_H
19#define _ZD_RF_H 19#define _ZD_RF_H
20 20
21#include "zd_types.h"
22
23#define UW2451_RF 0x2 21#define UW2451_RF 0x2
24#define UCHIP_RF 0x3 22#define UCHIP_RF 0x3
25#define AL2230_RF 0x4 23#define AL2230_RF 0x4
diff --git a/drivers/net/wireless/zd1211rw/zd_types.h b/drivers/net/wireless/zd1211rw/zd_types.h
deleted file mode 100644
index 0155a1584ed3..000000000000
--- a/drivers/net/wireless/zd1211rw/zd_types.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/* zd_types.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_TYPES_H
19#define _ZD_TYPES_H
20
21#include <linux/types.h>
22
23/* We have three register spaces mapped into the overall USB address space of
24 * 64K words (16-bit values). There is the control register space of
25 * double-word registers, the eeprom register space and the firmware register
26 * space. The control register space is byte mapped, the others are word
27 * mapped.
28 *
29 * For that reason, we are using byte offsets for control registers and word
30 * offsets for everything else.
31 */
32
33typedef u32 __nocast zd_addr_t;
34
35enum {
36 ADDR_BASE_MASK = 0xff000000,
37 ADDR_OFFSET_MASK = 0x0000ffff,
38 ADDR_ZERO_MASK = 0x00ff0000,
39 NULL_BASE = 0x00000000,
40 USB_BASE = 0x01000000,
41 CR_BASE = 0x02000000,
42 CR_MAX_OFFSET = 0x0b30,
43 E2P_BASE = 0x03000000,
44 E2P_MAX_OFFSET = 0x007e,
45 FW_BASE = 0x04000000,
46 FW_MAX_OFFSET = 0x0005,
47};
48
49#define ZD_ADDR_BASE(addr) ((u32)(addr) & ADDR_BASE_MASK)
50#define ZD_OFFSET(addr) ((u32)(addr) & ADDR_OFFSET_MASK)
51
52#define ZD_ADDR(base, offset) \
53 ((zd_addr_t)(((base) & ADDR_BASE_MASK) | ((offset) & ADDR_OFFSET_MASK)))
54
55#define ZD_NULL_ADDR ((zd_addr_t)0)
56#define USB_REG(offset) ZD_ADDR(USB_BASE, offset) /* word addressing */
57#define CTL_REG(offset) ZD_ADDR(CR_BASE, offset) /* byte addressing */
58#define E2P_REG(offset) ZD_ADDR(E2P_BASE, offset) /* word addressing */
59#define FW_REG(offset) ZD_ADDR(FW_BASE, offset) /* word addressing */
60
61static inline zd_addr_t zd_inc_word(zd_addr_t addr)
62{
63 u32 base = ZD_ADDR_BASE(addr);
64 u32 offset = ZD_OFFSET(addr);
65
66 offset += base == CR_BASE ? 2 : 1;
67
68 return base | offset;
69}
70
71#endif /* _ZD_TYPES_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 605e96e74057..75ef55624d7f 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -58,6 +58,10 @@ static struct usb_device_id usb_ids[] = {
58 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, 58 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
59 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 59 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
60 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, 60 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
61 { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
62 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
63 { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B },
61 /* "Driverless" devices that need ejecting */ 65 /* "Driverless" devices that need ejecting */
62 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 66 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
63 {} 67 {}
@@ -73,96 +77,6 @@ MODULE_DEVICE_TABLE(usb, usb_ids);
73#define FW_ZD1211_PREFIX "zd1211/zd1211_" 77#define FW_ZD1211_PREFIX "zd1211/zd1211_"
74#define FW_ZD1211B_PREFIX "zd1211/zd1211b_" 78#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
75 79
76/* register address handling */
77
78#ifdef DEBUG
79static int check_addr(struct zd_usb *usb, zd_addr_t addr)
80{
81 u32 base = ZD_ADDR_BASE(addr);
82 u32 offset = ZD_OFFSET(addr);
83
84 if ((u32)addr & ADDR_ZERO_MASK)
85 goto invalid_address;
86 switch (base) {
87 case USB_BASE:
88 break;
89 case CR_BASE:
90 if (offset > CR_MAX_OFFSET) {
91 dev_dbg(zd_usb_dev(usb),
92 "CR offset %#010x larger than"
93 " CR_MAX_OFFSET %#10x\n",
94 offset, CR_MAX_OFFSET);
95 goto invalid_address;
96 }
97 if (offset & 1) {
98 dev_dbg(zd_usb_dev(usb),
99 "CR offset %#010x is not a multiple of 2\n",
100 offset);
101 goto invalid_address;
102 }
103 break;
104 case E2P_BASE:
105 if (offset > E2P_MAX_OFFSET) {
106 dev_dbg(zd_usb_dev(usb),
107 "E2P offset %#010x larger than"
108 " E2P_MAX_OFFSET %#010x\n",
109 offset, E2P_MAX_OFFSET);
110 goto invalid_address;
111 }
112 break;
113 case FW_BASE:
114 if (!usb->fw_base_offset) {
115 dev_dbg(zd_usb_dev(usb),
116 "ERROR: fw base offset has not been set\n");
117 return -EAGAIN;
118 }
119 if (offset > FW_MAX_OFFSET) {
120 dev_dbg(zd_usb_dev(usb),
121 "FW offset %#10x is larger than"
122 " FW_MAX_OFFSET %#010x\n",
123 offset, FW_MAX_OFFSET);
124 goto invalid_address;
125 }
126 break;
127 default:
128 dev_dbg(zd_usb_dev(usb),
129 "address has unsupported base %#010x\n", addr);
130 goto invalid_address;
131 }
132
133 return 0;
134invalid_address:
135 dev_dbg(zd_usb_dev(usb),
136 "ERROR: invalid address: %#010x\n", addr);
137 return -EINVAL;
138}
139#endif /* DEBUG */
140
141static u16 usb_addr(struct zd_usb *usb, zd_addr_t addr)
142{
143 u32 base;
144 u16 offset;
145
146 base = ZD_ADDR_BASE(addr);
147 offset = ZD_OFFSET(addr);
148
149 ZD_ASSERT(check_addr(usb, addr) == 0);
150
151 switch (base) {
152 case CR_BASE:
153 offset += CR_BASE_OFFSET;
154 break;
155 case E2P_BASE:
156 offset += E2P_BASE_OFFSET;
157 break;
158 case FW_BASE:
159 offset += usb->fw_base_offset;
160 break;
161 }
162
163 return offset;
164}
165
166/* USB device initialization */ 80/* USB device initialization */
167 81
168static int request_fw_file( 82static int request_fw_file(
@@ -295,14 +209,13 @@ static int handle_version_mismatch(struct usb_device *udev, u8 device_type,
295 if (r) 209 if (r)
296 goto error; 210 goto error;
297 211
298 r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START_OFFSET, 212 r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START, REBOOT);
299 REBOOT);
300 if (r) 213 if (r)
301 goto error; 214 goto error;
302 215
303 offset = ((EEPROM_REGS_OFFSET + EEPROM_REGS_SIZE) * sizeof(u16)); 216 offset = (E2P_BOOT_CODE_OFFSET * sizeof(u16));
304 r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset, 217 r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset,
305 E2P_BASE_OFFSET + EEPROM_REGS_SIZE, REBOOT); 218 E2P_START + E2P_BOOT_CODE_OFFSET, REBOOT);
306 219
307 /* At this point, the vendor driver downloads the whole firmware 220 /* At this point, the vendor driver downloads the whole firmware
308 * image, hacks around with version IDs, and uploads it again, 221 * image, hacks around with version IDs, and uploads it again,
@@ -331,7 +244,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
331 if (r) 244 if (r)
332 goto error; 245 goto error;
333 246
334 fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET); 247 fw_bcdDevice = get_word(ub_fw->data, E2P_DATA_OFFSET);
335 248
336 if (fw_bcdDevice != bcdDevice) { 249 if (fw_bcdDevice != bcdDevice) {
337 dev_info(&udev->dev, 250 dev_info(&udev->dev,
@@ -357,8 +270,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
357 if (r) 270 if (r)
358 goto error; 271 goto error;
359 272
360 r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START_OFFSET, 273 r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START, REBOOT);
361 REBOOT);
362 if (r) { 274 if (r) {
363 dev_err(&udev->dev, 275 dev_err(&udev->dev,
364 "Could not upload firmware code uph. Error number %d\n", 276 "Could not upload firmware code uph. Error number %d\n",
@@ -858,7 +770,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
858 spin_lock_init(&intr->lock); 770 spin_lock_init(&intr->lock);
859 intr->interval = int_urb_interval(zd_usb_to_usbdev(usb)); 771 intr->interval = int_urb_interval(zd_usb_to_usbdev(usb));
860 init_completion(&intr->read_regs.completion); 772 init_completion(&intr->read_regs.completion);
861 intr->read_regs.cr_int_addr = cpu_to_le16(usb_addr(usb, CR_INTERRUPT)); 773 intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT);
862} 774}
863 775
864static inline void init_usb_rx(struct zd_usb *usb) 776static inline void init_usb_rx(struct zd_usb *usb)
@@ -890,22 +802,6 @@ void zd_usb_init(struct zd_usb *usb, struct net_device *netdev,
890 init_usb_rx(usb); 802 init_usb_rx(usb);
891} 803}
892 804
893int zd_usb_init_hw(struct zd_usb *usb)
894{
895 int r;
896 struct zd_chip *chip = zd_usb_to_chip(usb);
897
898 ZD_ASSERT(mutex_is_locked(&chip->mutex));
899 r = zd_ioread16_locked(chip, &usb->fw_base_offset,
900 USB_REG((u16)FW_BASE_ADDR_OFFSET));
901 if (r)
902 return r;
903 dev_dbg_f(zd_usb_dev(usb), "fw_base_offset: %#06hx\n",
904 usb->fw_base_offset);
905
906 return 0;
907}
908
909void zd_usb_clear(struct zd_usb *usb) 805void zd_usb_clear(struct zd_usb *usb)
910{ 806{
911 usb_set_intfdata(usb->intf, NULL); 807 usb_set_intfdata(usb->intf, NULL);
@@ -1253,7 +1149,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1253 return -ENOMEM; 1149 return -ENOMEM;
1254 req->id = cpu_to_le16(USB_REQ_READ_REGS); 1150 req->id = cpu_to_le16(USB_REQ_READ_REGS);
1255 for (i = 0; i < count; i++) 1151 for (i = 0; i < count; i++)
1256 req->addr[i] = cpu_to_le16(usb_addr(usb, addresses[i])); 1152 req->addr[i] = cpu_to_le16((u16)addresses[i]);
1257 1153
1258 udev = zd_usb_to_usbdev(usb); 1154 udev = zd_usb_to_usbdev(usb);
1259 prepare_read_regs_int(usb); 1155 prepare_read_regs_int(usb);
@@ -1318,7 +1214,7 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1318 req->id = cpu_to_le16(USB_REQ_WRITE_REGS); 1214 req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
1319 for (i = 0; i < count; i++) { 1215 for (i = 0; i < count; i++) {
1320 struct reg_data *rw = &req->reg_writes[i]; 1216 struct reg_data *rw = &req->reg_writes[i];
1321 rw->addr = cpu_to_le16(usb_addr(usb, ioreqs[i].addr)); 1217 rw->addr = cpu_to_le16((u16)ioreqs[i].addr);
1322 rw->value = cpu_to_le16(ioreqs[i].value); 1218 rw->value = cpu_to_le16(ioreqs[i].value);
1323 } 1219 }
1324 1220
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 317d37c36679..506ea6a74393 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -25,7 +25,6 @@
25#include <linux/usb.h> 25#include <linux/usb.h>
26 26
27#include "zd_def.h" 27#include "zd_def.h"
28#include "zd_types.h"
29 28
30enum devicetype { 29enum devicetype {
31 DEVICE_ZD1211 = 0, 30 DEVICE_ZD1211 = 0,
@@ -181,15 +180,14 @@ struct zd_usb_tx {
181 spinlock_t lock; 180 spinlock_t lock;
182}; 181};
183 182
184/* Contains the usb parts. The structure doesn't require a lock, because intf 183/* Contains the usb parts. The structure doesn't require a lock because intf
185 * and fw_base_offset, will not be changed after initialization. 184 * will not be changed after initialization.
186 */ 185 */
187struct zd_usb { 186struct zd_usb {
188 struct zd_usb_interrupt intr; 187 struct zd_usb_interrupt intr;
189 struct zd_usb_rx rx; 188 struct zd_usb_rx rx;
190 struct zd_usb_tx tx; 189 struct zd_usb_tx tx;
191 struct usb_interface *intf; 190 struct usb_interface *intf;
192 u16 fw_base_offset;
193}; 191};
194 192
195#define zd_usb_dev(usb) (&usb->intf->dev) 193#define zd_usb_dev(usb) (&usb->intf->dev)
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index adce4204d87d..be92695a7833 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -145,15 +145,6 @@ config HOTPLUG_PCI_SHPC
145 145
146 When in doubt, say N. 146 When in doubt, say N.
147 147
148config HOTPLUG_PCI_SHPC_POLL_EVENT_MODE
149 bool "Use polling mechanism for hot-plug events (for testing purpose)"
150 depends on HOTPLUG_PCI_SHPC
151 help
152 Say Y here if you want to use the polling mechanism for hot-plug
153 events for early platform testing.
154
155 When in doubt, say N.
156
157config HOTPLUG_PCI_RPA 148config HOTPLUG_PCI_RPA
158 tristate "RPA PCI Hotplug driver" 149 tristate "RPA PCI Hotplug driver"
159 depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE 150 depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index bd1faebf61a0..fca978fb158e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -773,13 +773,13 @@ static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
773 goto out; 773 goto out;
774 774
775 table = obj->buffer.pointer; 775 table = obj->buffer.pointer;
776 switch (((acpi_table_entry_header *)table)->type) { 776 switch (((struct acpi_subtable_header *)table)->type) {
777 case ACPI_MADT_IOSAPIC: 777 case ACPI_MADT_TYPE_IO_SAPIC:
778 *gsi_base = ((struct acpi_table_iosapic *)table)->global_irq_base; 778 *gsi_base = ((struct acpi_madt_io_sapic *)table)->global_irq_base;
779 result = 0; 779 result = 0;
780 break; 780 break;
781 case ACPI_MADT_IOAPIC: 781 case ACPI_MADT_TYPE_IO_APIC:
782 *gsi_base = ((struct acpi_table_ioapic *)table)->global_irq_base; 782 *gsi_base = ((struct acpi_madt_io_apic *)table)->global_irq_base;
783 result = 0; 783 result = 0;
784 break; 784 break;
785 default: 785 default:
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4fb12fcda563..d19fcae8a7c0 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -44,15 +44,20 @@ extern int pciehp_poll_time;
44extern int pciehp_debug; 44extern int pciehp_debug;
45extern int pciehp_force; 45extern int pciehp_force;
46 46
47/*#define dbg(format, arg...) do { if (pciehp_debug) printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); } while (0)*/ 47#define dbg(format, arg...) \
48#define dbg(format, arg...) do { if (pciehp_debug) printk("%s: " format, MY_NAME , ## arg); } while (0) 48 do { \
49#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 49 if (pciehp_debug) \
50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) 50 printk("%s: " format, MY_NAME , ## arg); \
51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 51 } while (0)
52 52#define err(format, arg...) \
53 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
54#define info(format, arg...) \
55 printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
56#define warn(format, arg...) \
57 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
53 58
59#define SLOT_NAME_SIZE 10
54struct slot { 60struct slot {
55 struct slot *next;
56 u8 bus; 61 u8 bus;
57 u8 device; 62 u8 device;
58 u32 number; 63 u32 number;
@@ -63,6 +68,8 @@ struct slot {
63 struct hpc_ops *hpc_ops; 68 struct hpc_ops *hpc_ops;
64 struct hotplug_slot *hotplug_slot; 69 struct hotplug_slot *hotplug_slot;
65 struct list_head slot_list; 70 struct list_head slot_list;
71 char name[SLOT_NAME_SIZE];
72 unsigned long last_emi_toggle;
66}; 73};
67 74
68struct event_info { 75struct event_info {
@@ -70,34 +77,15 @@ struct event_info {
70 u8 hp_slot; 77 u8 hp_slot;
71}; 78};
72 79
73typedef u8(*php_intr_callback_t) (u8 hp_slot, void *instance_id);
74
75struct php_ctlr_state_s {
76 struct php_ctlr_state_s *pnext;
77 struct pci_dev *pci_dev;
78 unsigned int irq;
79 unsigned long flags; /* spinlock's */
80 u32 slot_device_offset;
81 u32 num_slots;
82 struct timer_list int_poll_timer; /* Added for poll event */
83 php_intr_callback_t attention_button_callback;
84 php_intr_callback_t switch_change_callback;
85 php_intr_callback_t presence_change_callback;
86 php_intr_callback_t power_fault_callback;
87 void *callback_instance_id;
88 struct ctrl_reg *creg; /* Ptr to controller register space */
89};
90
91#define MAX_EVENTS 10 80#define MAX_EVENTS 10
92struct controller { 81struct controller {
93 struct controller *next; 82 struct controller *next;
94 struct mutex crit_sect; /* critical section mutex */ 83 struct mutex crit_sect; /* critical section mutex */
95 struct mutex ctrl_lock; /* controller lock */ 84 struct mutex ctrl_lock; /* controller lock */
96 struct php_ctlr_state_s *hpc_ctlr_handle; /* HPC controller handle */
97 int num_slots; /* Number of slots on ctlr */ 85 int num_slots; /* Number of slots on ctlr */
98 int slot_num_inc; /* 1 or -1 */ 86 int slot_num_inc; /* 1 or -1 */
99 struct pci_dev *pci_dev; 87 struct pci_dev *pci_dev;
100 struct pci_bus *pci_bus; 88 struct list_head slot_list;
101 struct event_info event_queue[MAX_EVENTS]; 89 struct event_info event_queue[MAX_EVENTS];
102 struct slot *slot; 90 struct slot *slot;
103 struct hpc_ops *hpc_ops; 91 struct hpc_ops *hpc_ops;
@@ -112,6 +100,8 @@ struct controller {
112 u8 ctrlcap; 100 u8 ctrlcap;
113 u16 vendor_id; 101 u16 vendor_id;
114 u8 cap_base; 102 u8 cap_base;
103 struct timer_list poll_timer;
104 volatile int cmd_busy;
115}; 105};
116 106
117#define INT_BUTTON_IGNORE 0 107#define INT_BUTTON_IGNORE 0
@@ -131,8 +121,6 @@ struct controller {
131#define POWERON_STATE 3 121#define POWERON_STATE 3
132#define POWEROFF_STATE 4 122#define POWEROFF_STATE 4
133 123
134#define PCI_TO_PCI_BRIDGE_CLASS 0x00060400
135
136/* Error messages */ 124/* Error messages */
137#define INTERLOCK_OPEN 0x00000002 125#define INTERLOCK_OPEN 0x00000002
138#define ADD_NOT_SUPPORTED 0x00000003 126#define ADD_NOT_SUPPORTED 0x00000003
@@ -144,10 +132,6 @@ struct controller {
144#define WRONG_BUS_FREQUENCY 0x0000000D 132#define WRONG_BUS_FREQUENCY 0x0000000D
145#define POWER_FAILURE 0x0000000E 133#define POWER_FAILURE 0x0000000E
146 134
147#define REMOVE_NOT_SUPPORTED 0x00000003
148
149#define DISABLE_CARD 1
150
151/* Field definitions in Slot Capabilities Register */ 135/* Field definitions in Slot Capabilities Register */
152#define ATTN_BUTTN_PRSN 0x00000001 136#define ATTN_BUTTN_PRSN 0x00000001
153#define PWR_CTRL_PRSN 0x00000002 137#define PWR_CTRL_PRSN 0x00000002
@@ -155,6 +139,7 @@ struct controller {
155#define ATTN_LED_PRSN 0x00000008 139#define ATTN_LED_PRSN 0x00000008
156#define PWR_LED_PRSN 0x00000010 140#define PWR_LED_PRSN 0x00000010
157#define HP_SUPR_RM_SUP 0x00000020 141#define HP_SUPR_RM_SUP 0x00000020
142#define EMI_PRSN 0x00020000
158 143
159#define ATTN_BUTTN(cap) (cap & ATTN_BUTTN_PRSN) 144#define ATTN_BUTTN(cap) (cap & ATTN_BUTTN_PRSN)
160#define POWER_CTRL(cap) (cap & PWR_CTRL_PRSN) 145#define POWER_CTRL(cap) (cap & PWR_CTRL_PRSN)
@@ -162,130 +147,65 @@ struct controller {
162#define ATTN_LED(cap) (cap & ATTN_LED_PRSN) 147#define ATTN_LED(cap) (cap & ATTN_LED_PRSN)
163#define PWR_LED(cap) (cap & PWR_LED_PRSN) 148#define PWR_LED(cap) (cap & PWR_LED_PRSN)
164#define HP_SUPR_RM(cap) (cap & HP_SUPR_RM_SUP) 149#define HP_SUPR_RM(cap) (cap & HP_SUPR_RM_SUP)
165 150#define EMI(cap) (cap & EMI_PRSN)
166/* 151
167 * error Messages 152extern int pciehp_event_start_thread(void);
168 */ 153extern void pciehp_event_stop_thread(void);
169#define msg_initialization_err "Initialization failure, error=%d\n" 154extern int pciehp_enable_slot(struct slot *slot);
170#define msg_button_on "PCI slot #%s - powering on due to button press.\n" 155extern int pciehp_disable_slot(struct slot *slot);
171#define msg_button_off "PCI slot #%s - powering off due to button press.\n" 156extern u8 pciehp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
172#define msg_button_cancel "PCI slot #%s - action canceled due to button press.\n" 157extern u8 pciehp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
173#define msg_button_ignore "PCI slot #%s - button press ignored. (action in progress...)\n" 158extern u8 pciehp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
174 159extern u8 pciehp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
175/* controller functions */ 160extern int pciehp_configure_device(struct slot *p_slot);
176extern int pciehp_event_start_thread (void); 161extern int pciehp_unconfigure_device(struct slot *p_slot);
177extern void pciehp_event_stop_thread (void); 162int pcie_init(struct controller *ctrl, struct pcie_device *dev);
178extern int pciehp_enable_slot (struct slot *slot);
179extern int pciehp_disable_slot (struct slot *slot);
180
181extern u8 pciehp_handle_attention_button (u8 hp_slot, void *inst_id);
182extern u8 pciehp_handle_switch_change (u8 hp_slot, void *inst_id);
183extern u8 pciehp_handle_presence_change (u8 hp_slot, void *inst_id);
184extern u8 pciehp_handle_power_fault (u8 hp_slot, void *inst_id);
185/* extern void long_delay (int delay); */
186
187/* pci functions */
188extern int pciehp_configure_device (struct slot *p_slot);
189extern int pciehp_unconfigure_device (struct slot *p_slot);
190
191
192 163
193/* Global variables */ 164/* Global variables */
194extern struct controller *pciehp_ctrl_list; 165extern struct controller *pciehp_ctrl_list;
195 166
196/* Inline functions */
197
198static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) 167static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
199{ 168{
200 struct slot *p_slot, *tmp_slot = NULL; 169 struct slot *slot;
201
202 p_slot = ctrl->slot;
203 170
204 while (p_slot && (p_slot->device != device)) { 171 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
205 tmp_slot = p_slot; 172 if (slot->device == device)
206 p_slot = p_slot->next; 173 return slot;
207 } 174 }
208 if (p_slot == NULL) {
209 err("ERROR: pciehp_find_slot device=0x%x\n", device);
210 p_slot = tmp_slot;
211 }
212
213 return p_slot;
214}
215
216static inline int wait_for_ctrl_irq(struct controller *ctrl)
217{
218 int retval = 0;
219
220 DECLARE_WAITQUEUE(wait, current);
221
222 add_wait_queue(&ctrl->queue, &wait);
223 if (!pciehp_poll_mode)
224 /* Sleep for up to 1 second */
225 msleep_interruptible(1000);
226 else
227 msleep_interruptible(2500);
228
229 remove_wait_queue(&ctrl->queue, &wait);
230 if (signal_pending(current))
231 retval = -EINTR;
232
233 return retval;
234}
235
236#define SLOT_NAME_SIZE 10
237 175
238static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot) 176 err("%s: slot (device=0x%x) not found\n", __FUNCTION__, device);
239{ 177 return NULL;
240 snprintf(buffer, buffer_size, "%04d_%04d", slot->bus, slot->number);
241} 178}
242 179
243enum php_ctlr_type {
244 PCI,
245 ISA,
246 ACPI
247};
248
249int pcie_init(struct controller *ctrl, struct pcie_device *dev);
250
251/* This has no meaning for PCI Express, as there is only 1 slot per port */
252int pcie_get_ctlr_slot_config(struct controller *ctrl,
253 int *num_ctlr_slots,
254 int *first_device_num,
255 int *physical_slot_num,
256 u8 *ctrlcap);
257
258struct hpc_ops { 180struct hpc_ops {
259 int (*power_on_slot) (struct slot *slot); 181 int (*power_on_slot)(struct slot *slot);
260 int (*power_off_slot) (struct slot *slot); 182 int (*power_off_slot)(struct slot *slot);
261 int (*get_power_status) (struct slot *slot, u8 *status); 183 int (*get_power_status)(struct slot *slot, u8 *status);
262 int (*get_attention_status) (struct slot *slot, u8 *status); 184 int (*get_attention_status)(struct slot *slot, u8 *status);
263 int (*set_attention_status) (struct slot *slot, u8 status); 185 int (*set_attention_status)(struct slot *slot, u8 status);
264 int (*get_latch_status) (struct slot *slot, u8 *status); 186 int (*get_latch_status)(struct slot *slot, u8 *status);
265 int (*get_adapter_status) (struct slot *slot, u8 *status); 187 int (*get_adapter_status)(struct slot *slot, u8 *status);
266 188 int (*get_emi_status)(struct slot *slot, u8 *status);
267 int (*get_max_bus_speed) (struct slot *slot, enum pci_bus_speed *speed); 189 int (*toggle_emi)(struct slot *slot);
268 int (*get_cur_bus_speed) (struct slot *slot, enum pci_bus_speed *speed); 190 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
269 191 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
270 int (*get_max_lnk_width) (struct slot *slot, enum pcie_link_width *value); 192 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
271 int (*get_cur_lnk_width) (struct slot *slot, enum pcie_link_width *value); 193 int (*get_cur_lnk_width)(struct slot *slot, enum pcie_link_width *val);
272 194 int (*query_power_fault)(struct slot *slot);
273 int (*query_power_fault) (struct slot *slot); 195 void (*green_led_on)(struct slot *slot);
274 void (*green_led_on) (struct slot *slot); 196 void (*green_led_off)(struct slot *slot);
275 void (*green_led_off) (struct slot *slot); 197 void (*green_led_blink)(struct slot *slot);
276 void (*green_led_blink) (struct slot *slot); 198 void (*release_ctlr)(struct controller *ctrl);
277 void (*release_ctlr) (struct controller *ctrl); 199 int (*check_lnk_status)(struct controller *ctrl);
278 int (*check_lnk_status) (struct controller *ctrl);
279}; 200};
280 201
281
282#ifdef CONFIG_ACPI 202#ifdef CONFIG_ACPI
283#include <acpi/acpi.h> 203#include <acpi/acpi.h>
284#include <acpi/acpi_bus.h> 204#include <acpi/acpi_bus.h>
285#include <acpi/actypes.h> 205#include <acpi/actypes.h>
286#include <linux/pci-acpi.h> 206#include <linux/pci-acpi.h>
287 207
288#define pciehp_get_hp_hw_control_from_firmware(dev) \ 208#define pciehp_get_hp_hw_control_from_firmware(dev) \
289 pciehp_acpi_get_hp_hw_control_from_firmware(dev) 209 pciehp_acpi_get_hp_hw_control_from_firmware(dev)
290static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, 210static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
291 struct hotplug_params *hpp) 211 struct hotplug_params *hpp)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f13f31323e85..a92eda6e02f6 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -34,6 +34,7 @@
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include "pciehp.h" 35#include "pciehp.h"
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/time.h>
37 38
38/* Global variables */ 39/* Global variables */
39int pciehp_debug; 40int pciehp_debug;
@@ -87,6 +88,95 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
87 .get_cur_bus_speed = get_cur_bus_speed, 88 .get_cur_bus_speed = get_cur_bus_speed,
88}; 89};
89 90
91/*
92 * Check the status of the Electro Mechanical Interlock (EMI)
93 */
94static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
95{
96 struct slot *slot = hotplug_slot->private;
97 return (slot->hpc_ops->get_emi_status(slot, value));
98}
99
100/*
101 * sysfs interface for the Electro Mechanical Interlock (EMI)
102 * 1 == locked, 0 == unlocked
103 */
104static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
105{
106 int retval;
107 u8 value;
108
109 retval = get_lock_status(slot, &value);
110 if (retval)
111 goto lock_read_exit;
112 retval = sprintf (buf, "%d\n", value);
113
114lock_read_exit:
115 return retval;
116}
117
118/*
119 * Change the status of the Electro Mechanical Interlock (EMI)
120 * This is a toggle - in addition there must be at least 1 second
121 * in between toggles.
122 */
123static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
124{
125 struct slot *slot = hotplug_slot->private;
126 int retval;
127 u8 value;
128
129 mutex_lock(&slot->ctrl->crit_sect);
130
131 /* has it been >1 sec since our last toggle? */
132 if ((get_seconds() - slot->last_emi_toggle) < 1)
133 return -EINVAL;
134
135 /* see what our current state is */
136 retval = get_lock_status(hotplug_slot, &value);
137 if (retval || (value == status))
138 goto set_lock_exit;
139
140 slot->hpc_ops->toggle_emi(slot);
141set_lock_exit:
142 mutex_unlock(&slot->ctrl->crit_sect);
143 return 0;
144}
145
146/*
147 * sysfs interface which allows the user to toggle the Electro Mechanical
148 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
149 */
150static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf,
151 size_t count)
152{
153 unsigned long llock;
154 u8 lock;
155 int retval = 0;
156
157 llock = simple_strtoul(buf, NULL, 10);
158 lock = (u8)(llock & 0xff);
159
160 switch (lock) {
161 case 0:
162 case 1:
163 retval = set_lock_status(slot, lock);
164 break;
165 default:
166 err ("%d is an invalid lock value\n", lock);
167 retval = -EINVAL;
168 }
169 if (retval)
170 return retval;
171 return count;
172}
173
174static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
175 .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
176 .show = lock_read_file,
177 .store = lock_write_file
178};
179
90/** 180/**
91 * release_slot - free up the memory used by a slot 181 * release_slot - free up the memory used by a slot
92 * @hotplug_slot: slot to free 182 * @hotplug_slot: slot to free
@@ -98,148 +188,108 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
98 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); 188 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
99 189
100 kfree(slot->hotplug_slot->info); 190 kfree(slot->hotplug_slot->info);
101 kfree(slot->hotplug_slot->name);
102 kfree(slot->hotplug_slot); 191 kfree(slot->hotplug_slot);
103 kfree(slot); 192 kfree(slot);
104} 193}
105 194
195static void make_slot_name(struct slot *slot)
196{
197 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
198 slot->bus, slot->number);
199}
200
106static int init_slots(struct controller *ctrl) 201static int init_slots(struct controller *ctrl)
107{ 202{
108 struct slot *slot; 203 struct slot *slot;
109 struct hpc_ops *hpc_ops;
110 struct hotplug_slot *hotplug_slot; 204 struct hotplug_slot *hotplug_slot;
111 struct hotplug_slot_info *hotplug_slot_info; 205 struct hotplug_slot_info *info;
112 u8 number_of_slots; 206 int retval = -ENOMEM;
113 u8 slot_device; 207 int i;
114 u32 slot_number;
115 int result = -ENOMEM;
116 208
117 number_of_slots = ctrl->num_slots; 209 for (i = 0; i < ctrl->num_slots; i++) {
118 slot_device = ctrl->slot_device_offset;
119 slot_number = ctrl->first_slot;
120
121 while (number_of_slots) {
122 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 210 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
123 if (!slot) 211 if (!slot)
124 goto error; 212 goto error;
125 213
126 slot->hotplug_slot = 214 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
127 kzalloc(sizeof(*(slot->hotplug_slot)), 215 if (!hotplug_slot)
128 GFP_KERNEL);
129 if (!slot->hotplug_slot)
130 goto error_slot; 216 goto error_slot;
131 hotplug_slot = slot->hotplug_slot; 217 slot->hotplug_slot = hotplug_slot;
132 218
133 hotplug_slot->info = 219 info = kzalloc(sizeof(*info), GFP_KERNEL);
134 kzalloc(sizeof(*(hotplug_slot->info)), 220 if (!info)
135 GFP_KERNEL);
136 if (!hotplug_slot->info)
137 goto error_hpslot; 221 goto error_hpslot;
138 hotplug_slot_info = hotplug_slot->info; 222 hotplug_slot->info = info;
139 hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
140 if (!hotplug_slot->name)
141 goto error_info;
142 223
143 slot->ctrl = ctrl; 224 hotplug_slot->name = slot->name;
144 slot->bus = ctrl->slot_bus;
145 slot->device = slot_device;
146 slot->hpc_ops = hpc_ops = ctrl->hpc_ops;
147 225
226 slot->hp_slot = i;
227 slot->ctrl = ctrl;
228 slot->bus = ctrl->pci_dev->subordinate->number;
229 slot->device = ctrl->slot_device_offset + i;
230 slot->hpc_ops = ctrl->hpc_ops;
148 slot->number = ctrl->first_slot; 231 slot->number = ctrl->first_slot;
149 slot->hp_slot = slot_device - ctrl->slot_device_offset;
150 232
151 /* register this slot with the hotplug pci core */ 233 /* register this slot with the hotplug pci core */
152 hotplug_slot->private = slot; 234 hotplug_slot->private = slot;
153 hotplug_slot->release = &release_slot; 235 hotplug_slot->release = &release_slot;
154 make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot); 236 make_slot_name(slot);
155 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 237 hotplug_slot->ops = &pciehp_hotplug_slot_ops;
156 238
157 hpc_ops->get_power_status(slot, 239 get_power_status(hotplug_slot, &info->power_status);
158 &(hotplug_slot_info->power_status)); 240 get_attention_status(hotplug_slot, &info->attention_status);
159 hpc_ops->get_attention_status(slot, 241 get_latch_status(hotplug_slot, &info->latch_status);
160 &(hotplug_slot_info->attention_status)); 242 get_adapter_status(hotplug_slot, &info->adapter_status);
161 hpc_ops->get_latch_status(slot,
162 &(hotplug_slot_info->latch_status));
163 hpc_ops->get_adapter_status(slot,
164 &(hotplug_slot_info->adapter_status));
165 243
166 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 244 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
167 "slot_device_offset=%x\n", 245 "slot_device_offset=%x\n", slot->bus, slot->device,
168 slot->bus, slot->device, slot->hp_slot, slot->number, 246 slot->hp_slot, slot->number, ctrl->slot_device_offset);
169 ctrl->slot_device_offset); 247 retval = pci_hp_register(hotplug_slot);
170 result = pci_hp_register(hotplug_slot); 248 if (retval) {
171 if (result) { 249 err ("pci_hp_register failed with error %d\n", retval);
172 err ("pci_hp_register failed with error %d\n", result); 250 goto error_info;
173 goto error_name; 251 }
252 /* create additional sysfs entries */
253 if (EMI(ctrl->ctrlcap)) {
254 retval = sysfs_create_file(&hotplug_slot->kobj,
255 &hotplug_slot_attr_lock.attr);
256 if (retval) {
257 pci_hp_deregister(hotplug_slot);
258 err("cannot create additional sysfs entries\n");
259 goto error_info;
260 }
174 } 261 }
175 262
176 slot->next = ctrl->slot; 263 list_add(&slot->slot_list, &ctrl->slot_list);
177 ctrl->slot = slot;
178
179 number_of_slots--;
180 slot_device++;
181 slot_number += ctrl->slot_num_inc;
182 } 264 }
183 265
184 return 0; 266 return 0;
185
186error_name:
187 kfree(hotplug_slot->name);
188error_info: 267error_info:
189 kfree(hotplug_slot_info); 268 kfree(info);
190error_hpslot: 269error_hpslot:
191 kfree(hotplug_slot); 270 kfree(hotplug_slot);
192error_slot: 271error_slot:
193 kfree(slot); 272 kfree(slot);
194error: 273error:
195 return result; 274 return retval;
196}
197
198
199static int cleanup_slots (struct controller * ctrl)
200{
201 struct slot *old_slot, *next_slot;
202
203 old_slot = ctrl->slot;
204 ctrl->slot = NULL;
205
206 while (old_slot) {
207 next_slot = old_slot->next;
208 pci_hp_deregister (old_slot->hotplug_slot);
209 old_slot = next_slot;
210 }
211
212
213 return(0);
214} 275}
215 276
216static int get_ctlr_slot_config(struct controller *ctrl) 277static void cleanup_slots(struct controller *ctrl)
217{ 278{
218 int num_ctlr_slots; /* Not needed; PCI Express has 1 slot per port*/ 279 struct list_head *tmp;
219 int first_device_num; /* Not needed */ 280 struct list_head *next;
220 int physical_slot_num; 281 struct slot *slot;
221 u8 ctrlcap;
222 int rc;
223 282
224 rc = pcie_get_ctlr_slot_config(ctrl, &num_ctlr_slots, &first_device_num, &physical_slot_num, &ctrlcap); 283 list_for_each_safe(tmp, next, &ctrl->slot_list) {
225 if (rc) { 284 slot = list_entry(tmp, struct slot, slot_list);
226 err("%s: get_ctlr_slot_config fail for b:d (%x:%x)\n", __FUNCTION__, ctrl->bus, ctrl->device); 285 list_del(&slot->slot_list);
227 return (-1); 286 if (EMI(ctrl->ctrlcap))
287 sysfs_remove_file(&slot->hotplug_slot->kobj,
288 &hotplug_slot_attr_lock.attr);
289 pci_hp_deregister(slot->hotplug_slot);
228 } 290 }
229
230 ctrl->num_slots = num_ctlr_slots; /* PCI Express has 1 slot per port */
231 ctrl->slot_device_offset = first_device_num;
232 ctrl->first_slot = physical_slot_num;
233 ctrl->ctrlcap = ctrlcap;
234
235 dbg("%s: bus(0x%x) num_slot(0x%x) 1st_dev(0x%x) psn(0x%x) ctrlcap(%x) for b:d (%x:%x)\n",
236 __FUNCTION__, ctrl->slot_bus, num_ctlr_slots, first_device_num, physical_slot_num, ctrlcap,
237 ctrl->bus, ctrl->device);
238
239 return (0);
240} 291}
241 292
242
243/* 293/*
244 * set_attention_status - Turns the Amber LED for a slot on, off or blink 294 * set_attention_status - Turns the Amber LED for a slot on, off or blink
245 */ 295 */
@@ -378,8 +428,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
378 int rc; 428 int rc;
379 struct controller *ctrl; 429 struct controller *ctrl;
380 struct slot *t_slot; 430 struct slot *t_slot;
381 int first_device_num = 0 ; /* first PCI device number supported by this PCIE */
382 int num_ctlr_slots; /* number of slots supported by this HPC */
383 u8 value; 431 u8 value;
384 struct pci_dev *pdev; 432 struct pci_dev *pdev;
385 433
@@ -388,6 +436,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
388 err("%s : out of memory\n", __FUNCTION__); 436 err("%s : out of memory\n", __FUNCTION__);
389 goto err_out_none; 437 goto err_out_none;
390 } 438 }
439 INIT_LIST_HEAD(&ctrl->slot_list);
391 440
392 pdev = dev->port; 441 pdev = dev->port;
393 ctrl->pci_dev = pdev; 442 ctrl->pci_dev = pdev;
@@ -400,13 +449,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
400 449
401 pci_set_drvdata(pdev, ctrl); 450 pci_set_drvdata(pdev, ctrl);
402 451
403 ctrl->pci_bus = kmalloc(sizeof(*ctrl->pci_bus), GFP_KERNEL);
404 if (!ctrl->pci_bus) {
405 err("%s: out of memory\n", __FUNCTION__);
406 rc = -ENOMEM;
407 goto err_out_unmap_mmio_region;
408 }
409 memcpy (ctrl->pci_bus, pdev->bus, sizeof (*ctrl->pci_bus));
410 ctrl->bus = pdev->bus->number; /* ctrl bus */ 452 ctrl->bus = pdev->bus->number; /* ctrl bus */
411 ctrl->slot_bus = pdev->subordinate->number; /* bus controlled by this HPC */ 453 ctrl->slot_bus = pdev->subordinate->number; /* bus controlled by this HPC */
412 454
@@ -415,26 +457,14 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
415 dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", __FUNCTION__, 457 dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", __FUNCTION__,
416 ctrl->bus, ctrl->device, ctrl->function, pdev->irq); 458 ctrl->bus, ctrl->device, ctrl->function, pdev->irq);
417 459
418 /*
419 * Save configuration headers for this and subordinate PCI buses
420 */
421
422 rc = get_ctlr_slot_config(ctrl);
423 if (rc) {
424 err(msg_initialization_err, rc);
425 goto err_out_free_ctrl_bus;
426 }
427 first_device_num = ctrl->slot_device_offset;
428 num_ctlr_slots = ctrl->num_slots;
429
430 /* Setup the slot information structures */ 460 /* Setup the slot information structures */
431 rc = init_slots(ctrl); 461 rc = init_slots(ctrl);
432 if (rc) { 462 if (rc) {
433 err(msg_initialization_err, 6); 463 err("%s: slot initialization failed\n", PCIE_MODULE_NAME);
434 goto err_out_free_ctrl_slot; 464 goto err_out_release_ctlr;
435 } 465 }
436 466
437 t_slot = pciehp_find_slot(ctrl, first_device_num); 467 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
438 468
439 /* Finish setting up the hot plug ctrl device */ 469 /* Finish setting up the hot plug ctrl device */
440 ctrl->next_event = 0; 470 ctrl->next_event = 0;
@@ -447,32 +477,18 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
447 pciehp_ctrl_list = ctrl; 477 pciehp_ctrl_list = ctrl;
448 } 478 }
449 479
450 /* Wait for exclusive access to hardware */
451 mutex_lock(&ctrl->ctrl_lock);
452
453 t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */ 480 t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */
454
455 if ((POWER_CTRL(ctrl->ctrlcap)) && !value) { 481 if ((POWER_CTRL(ctrl->ctrlcap)) && !value) {
456 rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/ 482 rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
457 if (rc) { 483 if (rc)
458 /* Done with exclusive hardware access */
459 mutex_unlock(&ctrl->ctrl_lock);
460 goto err_out_free_ctrl_slot; 484 goto err_out_free_ctrl_slot;
461 } else
462 /* Wait for the command to complete */
463 wait_for_ctrl_irq (ctrl);
464 } 485 }
465 486
466 /* Done with exclusive hardware access */
467 mutex_unlock(&ctrl->ctrl_lock);
468
469 return 0; 487 return 0;
470 488
471err_out_free_ctrl_slot: 489err_out_free_ctrl_slot:
472 cleanup_slots(ctrl); 490 cleanup_slots(ctrl);
473err_out_free_ctrl_bus: 491err_out_release_ctlr:
474 kfree(ctrl->pci_bus);
475err_out_unmap_mmio_region:
476 ctrl->hpc_ops->release_ctlr(ctrl); 492 ctrl->hpc_ops->release_ctlr(ctrl);
477err_out_free_ctrl: 493err_out_free_ctrl:
478 kfree(ctrl); 494 kfree(ctrl);
@@ -506,8 +522,6 @@ static void __exit unload_pciehpd(void)
506 while (ctrl) { 522 while (ctrl) {
507 cleanup_slots(ctrl); 523 cleanup_slots(ctrl);
508 524
509 kfree (ctrl->pci_bus);
510
511 ctrl->hpc_ops->release_ctlr(ctrl); 525 ctrl->hpc_ops->release_ctlr(ctrl);
512 526
513 tctrl = ctrl; 527 tctrl = ctrl;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 372c63e35aa9..4283ef56dbd9 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -48,9 +48,8 @@ static inline char *slot_name(struct slot *p_slot)
48 return p_slot->hotplug_slot->name; 48 return p_slot->hotplug_slot->name;
49} 49}
50 50
51u8 pciehp_handle_attention_button(u8 hp_slot, void *inst_id) 51u8 pciehp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
52{ 52{
53 struct controller *ctrl = (struct controller *) inst_id;
54 struct slot *p_slot; 53 struct slot *p_slot;
55 u8 rc = 0; 54 u8 rc = 0;
56 u8 getstatus; 55 u8 getstatus;
@@ -101,9 +100,8 @@ u8 pciehp_handle_attention_button(u8 hp_slot, void *inst_id)
101 100
102} 101}
103 102
104u8 pciehp_handle_switch_change(u8 hp_slot, void *inst_id) 103u8 pciehp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
105{ 104{
106 struct controller *ctrl = (struct controller *) inst_id;
107 struct slot *p_slot; 105 struct slot *p_slot;
108 u8 rc = 0; 106 u8 rc = 0;
109 u8 getstatus; 107 u8 getstatus;
@@ -143,9 +141,8 @@ u8 pciehp_handle_switch_change(u8 hp_slot, void *inst_id)
143 return rc; 141 return rc;
144} 142}
145 143
146u8 pciehp_handle_presence_change(u8 hp_slot, void *inst_id) 144u8 pciehp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
147{ 145{
148 struct controller *ctrl = (struct controller *) inst_id;
149 struct slot *p_slot; 146 struct slot *p_slot;
150 u8 presence_save, rc = 0; 147 u8 presence_save, rc = 0;
151 struct event_info *taskInfo; 148 struct event_info *taskInfo;
@@ -187,9 +184,8 @@ u8 pciehp_handle_presence_change(u8 hp_slot, void *inst_id)
187 return rc; 184 return rc;
188} 185}
189 186
190u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id) 187u8 pciehp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
191{ 188{
192 struct controller *ctrl = (struct controller *) inst_id;
193 struct slot *p_slot; 189 struct slot *p_slot;
194 u8 rc = 0; 190 u8 rc = 0;
195 struct event_info *taskInfo; 191 struct event_info *taskInfo;
@@ -233,35 +229,25 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
233 229
234static void set_slot_off(struct controller *ctrl, struct slot * pslot) 230static void set_slot_off(struct controller *ctrl, struct slot * pslot)
235{ 231{
236 /* Wait for exclusive access to hardware */
237 mutex_lock(&ctrl->ctrl_lock);
238
239 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 232 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
240 if (POWER_CTRL(ctrl->ctrlcap)) { 233 if (POWER_CTRL(ctrl->ctrlcap)) {
241 if (pslot->hpc_ops->power_off_slot(pslot)) { 234 if (pslot->hpc_ops->power_off_slot(pslot)) {
242 err("%s: Issue of Slot Power Off command failed\n", __FUNCTION__); 235 err("%s: Issue of Slot Power Off command failed\n",
243 mutex_unlock(&ctrl->ctrl_lock); 236 __FUNCTION__);
244 return; 237 return;
245 } 238 }
246 wait_for_ctrl_irq (ctrl);
247 } 239 }
248 240
249 if (PWR_LED(ctrl->ctrlcap)) { 241 if (PWR_LED(ctrl->ctrlcap))
250 pslot->hpc_ops->green_led_off(pslot); 242 pslot->hpc_ops->green_led_off(pslot);
251 wait_for_ctrl_irq (ctrl);
252 }
253 243
254 if (ATTN_LED(ctrl->ctrlcap)) { 244 if (ATTN_LED(ctrl->ctrlcap)) {
255 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 245 if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
256 err("%s: Issue of Set Attention Led command failed\n", __FUNCTION__); 246 err("%s: Issue of Set Attention Led command failed\n",
257 mutex_unlock(&ctrl->ctrl_lock); 247 __FUNCTION__);
258 return; 248 return;
259 } 249 }
260 wait_for_ctrl_irq (ctrl);
261 } 250 }
262
263 /* Done with exclusive hardware access */
264 mutex_unlock(&ctrl->ctrl_lock);
265} 251}
266 252
267/** 253/**
@@ -274,7 +260,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
274static int board_added(struct slot *p_slot) 260static int board_added(struct slot *p_slot)
275{ 261{
276 u8 hp_slot; 262 u8 hp_slot;
277 int rc = 0; 263 int retval = 0;
278 struct controller *ctrl = p_slot->ctrl; 264 struct controller *ctrl = p_slot->ctrl;
279 265
280 hp_slot = p_slot->device - ctrl->slot_device_offset; 266 hp_slot = p_slot->device - ctrl->slot_device_offset;
@@ -283,53 +269,38 @@ static int board_added(struct slot *p_slot)
283 __FUNCTION__, p_slot->device, 269 __FUNCTION__, p_slot->device,
284 ctrl->slot_device_offset, hp_slot); 270 ctrl->slot_device_offset, hp_slot);
285 271
286 /* Wait for exclusive access to hardware */
287 mutex_lock(&ctrl->ctrl_lock);
288
289 if (POWER_CTRL(ctrl->ctrlcap)) { 272 if (POWER_CTRL(ctrl->ctrlcap)) {
290 /* Power on slot */ 273 /* Power on slot */
291 rc = p_slot->hpc_ops->power_on_slot(p_slot); 274 retval = p_slot->hpc_ops->power_on_slot(p_slot);
292 if (rc) { 275 if (retval)
293 mutex_unlock(&ctrl->ctrl_lock); 276 return retval;
294 return -1;
295 }
296
297 /* Wait for the command to complete */
298 wait_for_ctrl_irq (ctrl);
299 } 277 }
300 278
301 if (PWR_LED(ctrl->ctrlcap)) { 279 if (PWR_LED(ctrl->ctrlcap))
302 p_slot->hpc_ops->green_led_blink(p_slot); 280 p_slot->hpc_ops->green_led_blink(p_slot);
303
304 /* Wait for the command to complete */
305 wait_for_ctrl_irq (ctrl);
306 }
307
308 /* Done with exclusive hardware access */
309 mutex_unlock(&ctrl->ctrl_lock);
310 281
311 /* Wait for ~1 second */ 282 /* Wait for ~1 second */
312 wait_for_ctrl_irq (ctrl); 283 msleep(1000);
313 284
314 /* Check link training status */ 285 /* Check link training status */
315 rc = p_slot->hpc_ops->check_lnk_status(ctrl); 286 retval = p_slot->hpc_ops->check_lnk_status(ctrl);
316 if (rc) { 287 if (retval) {
317 err("%s: Failed to check link status\n", __FUNCTION__); 288 err("%s: Failed to check link status\n", __FUNCTION__);
318 set_slot_off(ctrl, p_slot); 289 set_slot_off(ctrl, p_slot);
319 return rc; 290 return retval;
320 } 291 }
321 292
322 /* Check for a power fault */ 293 /* Check for a power fault */
323 if (p_slot->hpc_ops->query_power_fault(p_slot)) { 294 if (p_slot->hpc_ops->query_power_fault(p_slot)) {
324 dbg("%s: power fault detected\n", __FUNCTION__); 295 dbg("%s: power fault detected\n", __FUNCTION__);
325 rc = POWER_FAILURE; 296 retval = POWER_FAILURE;
326 goto err_exit; 297 goto err_exit;
327 } 298 }
328 299
329 rc = pciehp_configure_device(p_slot); 300 retval = pciehp_configure_device(p_slot);
330 if (rc) { 301 if (retval) {
331 err("Cannot add device 0x%x:%x\n", p_slot->bus, 302 err("Cannot add device 0x%x:%x\n", p_slot->bus,
332 p_slot->device); 303 p_slot->device);
333 goto err_exit; 304 goto err_exit;
334 } 305 }
335 306
@@ -338,26 +309,16 @@ static int board_added(struct slot *p_slot)
338 */ 309 */
339 if (pcie_mch_quirk) 310 if (pcie_mch_quirk)
340 pci_fixup_device(pci_fixup_final, ctrl->pci_dev); 311 pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
341 if (PWR_LED(ctrl->ctrlcap)) { 312 if (PWR_LED(ctrl->ctrlcap))
342 /* Wait for exclusive access to hardware */
343 mutex_lock(&ctrl->ctrl_lock);
344
345 p_slot->hpc_ops->green_led_on(p_slot); 313 p_slot->hpc_ops->green_led_on(p_slot);
346 314
347 /* Wait for the command to complete */
348 wait_for_ctrl_irq (ctrl);
349
350 /* Done with exclusive hardware access */
351 mutex_unlock(&ctrl->ctrl_lock);
352 }
353 return 0; 315 return 0;
354 316
355err_exit: 317err_exit:
356 set_slot_off(ctrl, p_slot); 318 set_slot_off(ctrl, p_slot);
357 return -1; 319 return retval;
358} 320}
359 321
360
361/** 322/**
362 * remove_board - Turns off slot and LED's 323 * remove_board - Turns off slot and LED's
363 * 324 *
@@ -366,44 +327,32 @@ static int remove_board(struct slot *p_slot)
366{ 327{
367 u8 device; 328 u8 device;
368 u8 hp_slot; 329 u8 hp_slot;
369 int rc; 330 int retval = 0;
370 struct controller *ctrl = p_slot->ctrl; 331 struct controller *ctrl = p_slot->ctrl;
371 332
372 if (pciehp_unconfigure_device(p_slot)) 333 retval = pciehp_unconfigure_device(p_slot);
373 return 1; 334 if (retval)
335 return retval;
374 336
375 device = p_slot->device; 337 device = p_slot->device;
376
377 hp_slot = p_slot->device - ctrl->slot_device_offset; 338 hp_slot = p_slot->device - ctrl->slot_device_offset;
378 p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 339 p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
379 340
380 dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot); 341 dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot);
381 342
382 /* Wait for exclusive access to hardware */
383 mutex_lock(&ctrl->ctrl_lock);
384
385 if (POWER_CTRL(ctrl->ctrlcap)) { 343 if (POWER_CTRL(ctrl->ctrlcap)) {
386 /* power off slot */ 344 /* power off slot */
387 rc = p_slot->hpc_ops->power_off_slot(p_slot); 345 retval = p_slot->hpc_ops->power_off_slot(p_slot);
388 if (rc) { 346 if (retval) {
389 err("%s: Issue of Slot Disable command failed\n", __FUNCTION__); 347 err("%s: Issue of Slot Disable command failed\n",
390 mutex_unlock(&ctrl->ctrl_lock); 348 __FUNCTION__);
391 return rc; 349 return retval;
392 } 350 }
393 /* Wait for the command to complete */
394 wait_for_ctrl_irq (ctrl);
395 } 351 }
396 352
397 if (PWR_LED(ctrl->ctrlcap)) { 353 if (PWR_LED(ctrl->ctrlcap))
398 /* turn off Green LED */ 354 /* turn off Green LED */
399 p_slot->hpc_ops->green_led_off(p_slot); 355 p_slot->hpc_ops->green_led_off(p_slot);
400
401 /* Wait for the command to complete */
402 wait_for_ctrl_irq (ctrl);
403 }
404
405 /* Done with exclusive hardware access */
406 mutex_unlock(&ctrl->ctrl_lock);
407 356
408 return 0; 357 return 0;
409} 358}
@@ -448,18 +397,10 @@ static void pciehp_pushbutton_thread(unsigned long slot)
448 dbg("%s: adding bus:device(%x:%x)\n", __FUNCTION__, 397 dbg("%s: adding bus:device(%x:%x)\n", __FUNCTION__,
449 p_slot->bus, p_slot->device); 398 p_slot->bus, p_slot->device);
450 399
451 if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) { 400 if (pciehp_enable_slot(p_slot) &&
452 /* Wait for exclusive access to hardware */ 401 PWR_LED(p_slot->ctrl->ctrlcap))
453 mutex_lock(&p_slot->ctrl->ctrl_lock);
454
455 p_slot->hpc_ops->green_led_off(p_slot); 402 p_slot->hpc_ops->green_led_off(p_slot);
456 403
457 /* Wait for the command to complete */
458 wait_for_ctrl_irq (p_slot->ctrl);
459
460 /* Done with exclusive hardware access */
461 mutex_unlock(&p_slot->ctrl->ctrl_lock);
462 }
463 p_slot->state = STATIC_STATE; 404 p_slot->state = STATIC_STATE;
464 } 405 }
465 406
@@ -498,18 +439,10 @@ static void pciehp_surprise_rm_thread(unsigned long slot)
498 dbg("%s: adding bus:device(%x:%x)\n", 439 dbg("%s: adding bus:device(%x:%x)\n",
499 __FUNCTION__, p_slot->bus, p_slot->device); 440 __FUNCTION__, p_slot->bus, p_slot->device);
500 441
501 if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) { 442 if (pciehp_enable_slot(p_slot) &&
502 /* Wait for exclusive access to hardware */ 443 PWR_LED(p_slot->ctrl->ctrlcap))
503 mutex_lock(&p_slot->ctrl->ctrl_lock);
504
505 p_slot->hpc_ops->green_led_off(p_slot); 444 p_slot->hpc_ops->green_led_off(p_slot);
506 445
507 /* Wait for the command to complete */
508 wait_for_ctrl_irq (p_slot->ctrl);
509
510 /* Done with exclusive hardware access */
511 mutex_unlock(&p_slot->ctrl->ctrl_lock);
512 }
513 p_slot->state = STATIC_STATE; 446 p_slot->state = STATIC_STATE;
514 } 447 }
515 448
@@ -620,46 +553,24 @@ static void interrupt_event_handler(struct controller *ctrl)
620 553
621 switch (p_slot->state) { 554 switch (p_slot->state) {
622 case BLINKINGOFF_STATE: 555 case BLINKINGOFF_STATE:
623 /* Wait for exclusive access to hardware */ 556 if (PWR_LED(ctrl->ctrlcap))
624 mutex_lock(&ctrl->ctrl_lock);
625
626 if (PWR_LED(ctrl->ctrlcap)) {
627 p_slot->hpc_ops->green_led_on(p_slot); 557 p_slot->hpc_ops->green_led_on(p_slot);
628 /* Wait for the command to complete */
629 wait_for_ctrl_irq (ctrl);
630 }
631 if (ATTN_LED(ctrl->ctrlcap)) {
632 p_slot->hpc_ops->set_attention_status(p_slot, 0);
633 558
634 /* Wait for the command to complete */ 559 if (ATTN_LED(ctrl->ctrlcap))
635 wait_for_ctrl_irq (ctrl); 560 p_slot->hpc_ops->set_attention_status(p_slot, 0);
636 }
637 /* Done with exclusive hardware access */
638 mutex_unlock(&ctrl->ctrl_lock);
639 break; 561 break;
640 case BLINKINGON_STATE: 562 case BLINKINGON_STATE:
641 /* Wait for exclusive access to hardware */ 563 if (PWR_LED(ctrl->ctrlcap))
642 mutex_lock(&ctrl->ctrl_lock);
643
644 if (PWR_LED(ctrl->ctrlcap)) {
645 p_slot->hpc_ops->green_led_off(p_slot); 564 p_slot->hpc_ops->green_led_off(p_slot);
646 /* Wait for the command to complete */
647 wait_for_ctrl_irq (ctrl);
648 }
649 if (ATTN_LED(ctrl->ctrlcap)){
650 p_slot->hpc_ops->set_attention_status(p_slot, 0);
651 /* Wait for the command to complete */
652 wait_for_ctrl_irq (ctrl);
653 }
654 /* Done with exclusive hardware access */
655 mutex_unlock(&ctrl->ctrl_lock);
656 565
566 if (ATTN_LED(ctrl->ctrlcap))
567 p_slot->hpc_ops->set_attention_status(p_slot, 0);
657 break; 568 break;
658 default: 569 default:
659 warn("Not a valid state\n"); 570 warn("Not a valid state\n");
660 return; 571 return;
661 } 572 }
662 info(msg_button_cancel, slot_name(p_slot)); 573 info("PCI slot #%s - action canceled due to button press.\n", slot_name(p_slot));
663 p_slot->state = STATIC_STATE; 574 p_slot->state = STATIC_STATE;
664 } 575 }
665 /* ***********Button Pressed (No action on 1st press...) */ 576 /* ***********Button Pressed (No action on 1st press...) */
@@ -672,34 +583,21 @@ static void interrupt_event_handler(struct controller *ctrl)
672 /* slot is on */ 583 /* slot is on */
673 dbg("slot is on\n"); 584 dbg("slot is on\n");
674 p_slot->state = BLINKINGOFF_STATE; 585 p_slot->state = BLINKINGOFF_STATE;
675 info(msg_button_off, slot_name(p_slot)); 586 info("PCI slot #%s - powering off due to button press.\n", slot_name(p_slot));
676 } else { 587 } else {
677 /* slot is off */ 588 /* slot is off */
678 dbg("slot is off\n"); 589 dbg("slot is off\n");
679 p_slot->state = BLINKINGON_STATE; 590 p_slot->state = BLINKINGON_STATE;
680 info(msg_button_on, slot_name(p_slot)); 591 info("PCI slot #%s - powering on due to button press.\n", slot_name(p_slot));
681 } 592 }
682 593
683 /* Wait for exclusive access to hardware */
684 mutex_lock(&ctrl->ctrl_lock);
685
686 /* blink green LED and turn off amber */ 594 /* blink green LED and turn off amber */
687 if (PWR_LED(ctrl->ctrlcap)) { 595 if (PWR_LED(ctrl->ctrlcap))
688 p_slot->hpc_ops->green_led_blink(p_slot); 596 p_slot->hpc_ops->green_led_blink(p_slot);
689 /* Wait for the command to complete */
690 wait_for_ctrl_irq (ctrl);
691 }
692 597
693 if (ATTN_LED(ctrl->ctrlcap)) { 598 if (ATTN_LED(ctrl->ctrlcap))
694 p_slot->hpc_ops->set_attention_status(p_slot, 0); 599 p_slot->hpc_ops->set_attention_status(p_slot, 0);
695 600
696 /* Wait for the command to complete */
697 wait_for_ctrl_irq (ctrl);
698 }
699
700 /* Done with exclusive hardware access */
701 mutex_unlock(&ctrl->ctrl_lock);
702
703 init_timer(&p_slot->task_event); 601 init_timer(&p_slot->task_event);
704 p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ 602 p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */
705 p_slot->task_event.function = (void (*)(unsigned long)) pushbutton_helper_thread; 603 p_slot->task_event.function = (void (*)(unsigned long)) pushbutton_helper_thread;
@@ -712,21 +610,11 @@ static void interrupt_event_handler(struct controller *ctrl)
712 else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { 610 else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) {
713 if (POWER_CTRL(ctrl->ctrlcap)) { 611 if (POWER_CTRL(ctrl->ctrlcap)) {
714 dbg("power fault\n"); 612 dbg("power fault\n");
715 /* Wait for exclusive access to hardware */ 613 if (ATTN_LED(ctrl->ctrlcap))
716 mutex_lock(&ctrl->ctrl_lock);
717
718 if (ATTN_LED(ctrl->ctrlcap)) {
719 p_slot->hpc_ops->set_attention_status(p_slot, 1); 614 p_slot->hpc_ops->set_attention_status(p_slot, 1);
720 wait_for_ctrl_irq (ctrl);
721 }
722 615
723 if (PWR_LED(ctrl->ctrlcap)) { 616 if (PWR_LED(ctrl->ctrlcap))
724 p_slot->hpc_ops->green_led_off(p_slot); 617 p_slot->hpc_ops->green_led_off(p_slot);
725 wait_for_ctrl_irq (ctrl);
726 }
727
728 /* Done with exclusive hardware access */
729 mutex_unlock(&ctrl->ctrl_lock);
730 } 618 }
731 } 619 }
732 /***********SURPRISE REMOVAL********************/ 620 /***********SURPRISE REMOVAL********************/
@@ -754,7 +642,6 @@ static void interrupt_event_handler(struct controller *ctrl)
754 } 642 }
755} 643}
756 644
757
758int pciehp_enable_slot(struct slot *p_slot) 645int pciehp_enable_slot(struct slot *p_slot)
759{ 646{
760 u8 getstatus = 0; 647 u8 getstatus = 0;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 25d3aadfddbf..fbc64aa2dd68 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -35,6 +35,7 @@
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/time.h>
38 39
39#include "../pci.h" 40#include "../pci.h"
40#include "pciehp.h" 41#include "pciehp.h"
@@ -105,34 +106,30 @@ enum ctrl_offsets {
105 ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl), 106 ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl),
106 ROOTSTATUS = offsetof(struct ctrl_reg, root_status), 107 ROOTSTATUS = offsetof(struct ctrl_reg, root_status),
107}; 108};
108static int pcie_cap_base = 0; /* Base of the PCI Express capability item structure */ 109
109 110static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
110#define PCIE_CAP_ID(cb) ( cb + PCIECAPID ) 111{
111#define NXT_CAP_PTR(cb) ( cb + NXTCAPPTR ) 112 struct pci_dev *dev = ctrl->pci_dev;
112#define CAP_REG(cb) ( cb + CAPREG ) 113 return pci_read_config_word(dev, ctrl->cap_base + reg, value);
113#define DEV_CAP(cb) ( cb + DEVCAP ) 114}
114#define DEV_CTRL(cb) ( cb + DEVCTRL ) 115
115#define DEV_STATUS(cb) ( cb + DEVSTATUS ) 116static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
116#define LNK_CAP(cb) ( cb + LNKCAP ) 117{
117#define LNK_CTRL(cb) ( cb + LNKCTRL ) 118 struct pci_dev *dev = ctrl->pci_dev;
118#define LNK_STATUS(cb) ( cb + LNKSTATUS ) 119 return pci_read_config_dword(dev, ctrl->cap_base + reg, value);
119#define SLOT_CAP(cb) ( cb + SLOTCAP ) 120}
120#define SLOT_CTRL(cb) ( cb + SLOTCTRL ) 121
121#define SLOT_STATUS(cb) ( cb + SLOTSTATUS ) 122static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
122#define ROOT_CTRL(cb) ( cb + ROOTCTRL ) 123{
123#define ROOT_STATUS(cb) ( cb + ROOTSTATUS ) 124 struct pci_dev *dev = ctrl->pci_dev;
124 125 return pci_write_config_word(dev, ctrl->cap_base + reg, value);
125#define hp_register_read_word(pdev, reg , value) \ 126}
126 pci_read_config_word(pdev, reg, &value) 127
127 128static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
128#define hp_register_read_dword(pdev, reg , value) \ 129{
129 pci_read_config_dword(pdev, reg, &value) 130 struct pci_dev *dev = ctrl->pci_dev;
130 131 return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
131#define hp_register_write_word(pdev, reg , value) \ 132}
132 pci_write_config_word(pdev, reg, value)
133
134#define hp_register_dwrite_word(pdev, reg , value) \
135 pci_write_config_dword(pdev, reg, value)
136 133
137/* Field definitions in PCI Express Capabilities Register */ 134/* Field definitions in PCI Express Capabilities Register */
138#define CAP_VER 0x000F 135#define CAP_VER 0x000F
@@ -196,6 +193,7 @@ static int pcie_cap_base = 0; /* Base of the PCI Express capability item struct
196#define ATTN_LED_CTRL 0x00C0 193#define ATTN_LED_CTRL 0x00C0
197#define PWR_LED_CTRL 0x0300 194#define PWR_LED_CTRL 0x0300
198#define PWR_CTRL 0x0400 195#define PWR_CTRL 0x0400
196#define EMI_CTRL 0x0800
199 197
200/* Attention indicator and Power indicator states */ 198/* Attention indicator and Power indicator states */
201#define LED_ON 0x01 199#define LED_ON 0x01
@@ -206,6 +204,10 @@ static int pcie_cap_base = 0; /* Base of the PCI Express capability item struct
206#define POWER_ON 0 204#define POWER_ON 0
207#define POWER_OFF 0x0400 205#define POWER_OFF 0x0400
208 206
207/* EMI Status defines */
208#define EMI_DISENGAGED 0
209#define EMI_ENGAGED 1
210
209/* Field definitions in Slot Status Register */ 211/* Field definitions in Slot Status Register */
210#define ATTN_BUTTN_PRESSED 0x0001 212#define ATTN_BUTTN_PRESSED 0x0001
211#define PWR_FAULT_DETECTED 0x0002 213#define PWR_FAULT_DETECTED 0x0002
@@ -214,114 +216,117 @@ static int pcie_cap_base = 0; /* Base of the PCI Express capability item struct
214#define CMD_COMPLETED 0x0010 216#define CMD_COMPLETED 0x0010
215#define MRL_STATE 0x0020 217#define MRL_STATE 0x0020
216#define PRSN_STATE 0x0040 218#define PRSN_STATE 0x0040
219#define EMI_STATE 0x0080
220#define EMI_STATUS_BIT 7
217 221
218static spinlock_t hpc_event_lock; 222static spinlock_t hpc_event_lock;
219 223
220DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */ 224DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */
221static struct php_ctlr_state_s *php_ctlr_list_head; /* HPC state linked list */
222static int ctlr_seq_num = 0; /* Controller sequence # */ 225static int ctlr_seq_num = 0; /* Controller sequence # */
223static spinlock_t list_lock;
224
225static irqreturn_t pcie_isr(int IRQ, void *dev_id);
226 226
227static void start_int_poll_timer(struct php_ctlr_state_s *php_ctlr, int seconds); 227static irqreturn_t pcie_isr(int irq, void *dev_id);
228static void start_int_poll_timer(struct controller *ctrl, int sec);
228 229
229/* This is the interrupt polling timeout function. */ 230/* This is the interrupt polling timeout function. */
230static void int_poll_timeout(unsigned long lphp_ctlr) 231static void int_poll_timeout(unsigned long data)
231{ 232{
232 struct php_ctlr_state_s *php_ctlr = (struct php_ctlr_state_s *)lphp_ctlr; 233 struct controller *ctrl = (struct controller *)data;
233 234
234 DBG_ENTER_ROUTINE 235 DBG_ENTER_ROUTINE
235 236
236 if ( !php_ctlr ) {
237 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
238 return;
239 }
240
241 /* Poll for interrupt events. regs == NULL => polling */ 237 /* Poll for interrupt events. regs == NULL => polling */
242 pcie_isr( 0, (void *)php_ctlr ); 238 pcie_isr(0, ctrl);
243
244 init_timer(&php_ctlr->int_poll_timer);
245 239
240 init_timer(&ctrl->poll_timer);
246 if (!pciehp_poll_time) 241 if (!pciehp_poll_time)
247 pciehp_poll_time = 2; /* reset timer to poll in 2 secs if user doesn't specify at module installation*/ 242 pciehp_poll_time = 2; /* reset timer to poll in 2 secs if user doesn't specify at module installation*/
248 243
249 start_int_poll_timer(php_ctlr, pciehp_poll_time); 244 start_int_poll_timer(ctrl, pciehp_poll_time);
250
251 return;
252} 245}
253 246
254/* This function starts the interrupt polling timer. */ 247/* This function starts the interrupt polling timer. */
255static void start_int_poll_timer(struct php_ctlr_state_s *php_ctlr, int seconds) 248static void start_int_poll_timer(struct controller *ctrl, int sec)
256{ 249{
257 if (!php_ctlr) { 250 /* Clamp to sane value */
258 err("%s: Invalid HPC controller handle!\n", __FUNCTION__); 251 if ((sec <= 0) || (sec > 60))
259 return; 252 sec = 2;
260 } 253
254 ctrl->poll_timer.function = &int_poll_timeout;
255 ctrl->poll_timer.data = (unsigned long)ctrl;
256 ctrl->poll_timer.expires = jiffies + sec * HZ;
257 add_timer(&ctrl->poll_timer);
258}
261 259
262 if ( ( seconds <= 0 ) || ( seconds > 60 ) ) 260static inline int pcie_wait_cmd(struct controller *ctrl)
263 seconds = 2; /* Clamp to sane value */ 261{
262 int retval = 0;
263 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
264 unsigned long timeout = msecs_to_jiffies(msecs);
265 int rc;
264 266
265 php_ctlr->int_poll_timer.function = &int_poll_timeout; 267 rc = wait_event_interruptible_timeout(ctrl->queue,
266 php_ctlr->int_poll_timer.data = (unsigned long)php_ctlr; /* Instance data */ 268 !ctrl->cmd_busy, timeout);
267 php_ctlr->int_poll_timer.expires = jiffies + seconds * HZ; 269 if (!rc)
268 add_timer(&php_ctlr->int_poll_timer); 270 dbg("Command not completed in 1000 msec\n");
271 else if (rc < 0) {
272 retval = -EINTR;
273 info("Command was interrupted by a signal\n");
274 }
269 275
270 return; 276 return retval;
271} 277}
272 278
273static int pcie_write_cmd(struct slot *slot, u16 cmd) 279static int pcie_write_cmd(struct slot *slot, u16 cmd)
274{ 280{
275 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 281 struct controller *ctrl = slot->ctrl;
276 int retval = 0; 282 int retval = 0;
277 u16 slot_status; 283 u16 slot_status;
278 284
279 DBG_ENTER_ROUTINE 285 DBG_ENTER_ROUTINE
280
281 if (!php_ctlr) {
282 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
283 return -1;
284 }
285 286
286 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status); 287 mutex_lock(&ctrl->ctrl_lock);
288
289 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
287 if (retval) { 290 if (retval) {
288 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 291 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
289 return retval; 292 goto out;
290 } 293 }
291 294
292 if ((slot_status & CMD_COMPLETED) == CMD_COMPLETED ) { 295 if ((slot_status & CMD_COMPLETED) == CMD_COMPLETED ) {
293 /* After 1 sec and CMD_COMPLETED still not set, just proceed forward to issue 296 /* After 1 sec and CMD_COMPLETED still not set, just
294 the next command according to spec. Just print out the error message */ 297 proceed forward to issue the next command according
295 dbg("%s : CMD_COMPLETED not clear after 1 sec.\n", __FUNCTION__); 298 to spec. Just print out the error message */
299 dbg("%s: CMD_COMPLETED not clear after 1 sec.\n",
300 __FUNCTION__);
296 } 301 }
297 302
298 retval = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), cmd | CMD_CMPL_INTR_ENABLE); 303 ctrl->cmd_busy = 1;
304 retval = pciehp_writew(ctrl, SLOTCTRL, (cmd | CMD_CMPL_INTR_ENABLE));
299 if (retval) { 305 if (retval) {
300 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 306 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
301 return retval; 307 goto out;
302 } 308 }
303 309
310 /*
311 * Wait for command completion.
312 */
313 retval = pcie_wait_cmd(ctrl);
314 out:
315 mutex_unlock(&ctrl->ctrl_lock);
304 DBG_LEAVE_ROUTINE 316 DBG_LEAVE_ROUTINE
305 return retval; 317 return retval;
306} 318}
307 319
308static int hpc_check_lnk_status(struct controller *ctrl) 320static int hpc_check_lnk_status(struct controller *ctrl)
309{ 321{
310 struct php_ctlr_state_s *php_ctlr = ctrl->hpc_ctlr_handle;
311 u16 lnk_status; 322 u16 lnk_status;
312 int retval = 0; 323 int retval = 0;
313 324
314 DBG_ENTER_ROUTINE 325 DBG_ENTER_ROUTINE
315 326
316 if (!php_ctlr) { 327 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
317 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
318 return -1;
319 }
320
321 retval = hp_register_read_word(php_ctlr->pci_dev, LNK_STATUS(ctrl->cap_base), lnk_status);
322
323 if (retval) { 328 if (retval) {
324 err("%s : hp_register_read_word LNK_STATUS failed\n", __FUNCTION__); 329 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
325 return retval; 330 return retval;
326 } 331 }
327 332
@@ -340,26 +345,21 @@ static int hpc_check_lnk_status(struct controller *ctrl)
340 345
341static int hpc_get_attention_status(struct slot *slot, u8 *status) 346static int hpc_get_attention_status(struct slot *slot, u8 *status)
342{ 347{
343 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 348 struct controller *ctrl = slot->ctrl;
344 u16 slot_ctrl; 349 u16 slot_ctrl;
345 u8 atten_led_state; 350 u8 atten_led_state;
346 int retval = 0; 351 int retval = 0;
347 352
348 DBG_ENTER_ROUTINE 353 DBG_ENTER_ROUTINE
349 354
350 if (!php_ctlr) { 355 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
351 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
352 return -1;
353 }
354
355 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
356
357 if (retval) { 356 if (retval) {
358 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 357 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
359 return retval; 358 return retval;
360 } 359 }
361 360
362 dbg("%s: SLOT_CTRL %x, value read %x\n", __FUNCTION__,SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl); 361 dbg("%s: SLOTCTRL %x, value read %x\n",
362 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
363 363
364 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; 364 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6;
365 365
@@ -385,27 +385,22 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
385 return 0; 385 return 0;
386} 386}
387 387
388static int hpc_get_power_status(struct slot * slot, u8 *status) 388static int hpc_get_power_status(struct slot *slot, u8 *status)
389{ 389{
390 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 390 struct controller *ctrl = slot->ctrl;
391 u16 slot_ctrl; 391 u16 slot_ctrl;
392 u8 pwr_state; 392 u8 pwr_state;
393 int retval = 0; 393 int retval = 0;
394 394
395 DBG_ENTER_ROUTINE 395 DBG_ENTER_ROUTINE
396 396
397 if (!php_ctlr) { 397 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
398 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
399 return -1;
400 }
401
402 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
403
404 if (retval) { 398 if (retval) {
405 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 399 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
406 return retval; 400 return retval;
407 } 401 }
408 dbg("%s: SLOT_CTRL %x value read %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl); 402 dbg("%s: SLOTCTRL %x value read %x\n",
403 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
409 404
410 pwr_state = (slot_ctrl & PWR_CTRL) >> 10; 405 pwr_state = (slot_ctrl & PWR_CTRL) >> 10;
411 406
@@ -428,21 +423,15 @@ static int hpc_get_power_status(struct slot * slot, u8 *status)
428 423
429static int hpc_get_latch_status(struct slot *slot, u8 *status) 424static int hpc_get_latch_status(struct slot *slot, u8 *status)
430{ 425{
431 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 426 struct controller *ctrl = slot->ctrl;
432 u16 slot_status; 427 u16 slot_status;
433 int retval = 0; 428 int retval = 0;
434 429
435 DBG_ENTER_ROUTINE 430 DBG_ENTER_ROUTINE
436 431
437 if (!php_ctlr) { 432 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
438 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
439 return -1;
440 }
441
442 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status);
443
444 if (retval) { 433 if (retval) {
445 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 434 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
446 return retval; 435 return retval;
447 } 436 }
448 437
@@ -454,22 +443,16 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
454 443
455static int hpc_get_adapter_status(struct slot *slot, u8 *status) 444static int hpc_get_adapter_status(struct slot *slot, u8 *status)
456{ 445{
457 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 446 struct controller *ctrl = slot->ctrl;
458 u16 slot_status; 447 u16 slot_status;
459 u8 card_state; 448 u8 card_state;
460 int retval = 0; 449 int retval = 0;
461 450
462 DBG_ENTER_ROUTINE 451 DBG_ENTER_ROUTINE
463 452
464 if (!php_ctlr) { 453 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
465 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
466 return -1;
467 }
468
469 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status);
470
471 if (retval) { 454 if (retval) {
472 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 455 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
473 return retval; 456 return retval;
474 } 457 }
475 card_state = (u8)((slot_status & PRSN_STATE) >> 6); 458 card_state = (u8)((slot_status & PRSN_STATE) >> 6);
@@ -479,24 +462,18 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
479 return 0; 462 return 0;
480} 463}
481 464
482static int hpc_query_power_fault(struct slot * slot) 465static int hpc_query_power_fault(struct slot *slot)
483{ 466{
484 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 467 struct controller *ctrl = slot->ctrl;
485 u16 slot_status; 468 u16 slot_status;
486 u8 pwr_fault; 469 u8 pwr_fault;
487 int retval = 0; 470 int retval = 0;
488 471
489 DBG_ENTER_ROUTINE 472 DBG_ENTER_ROUTINE
490 473
491 if (!php_ctlr) { 474 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
492 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
493 return -1;
494 }
495
496 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status);
497
498 if (retval) { 475 if (retval) {
499 err("%s : Cannot check for power fault\n", __FUNCTION__); 476 err("%s: Cannot check for power fault\n", __FUNCTION__);
500 return retval; 477 return retval;
501 } 478 }
502 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); 479 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
@@ -505,28 +482,63 @@ static int hpc_query_power_fault(struct slot * slot)
505 return pwr_fault; 482 return pwr_fault;
506} 483}
507 484
508static int hpc_set_attention_status(struct slot *slot, u8 value) 485static int hpc_get_emi_status(struct slot *slot, u8 *status)
509{ 486{
510 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 487 struct controller *ctrl = slot->ctrl;
488 u16 slot_status;
489 int retval = 0;
490
491 DBG_ENTER_ROUTINE
492
493 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
494 if (retval) {
495 err("%s : Cannot check EMI status\n", __FUNCTION__);
496 return retval;
497 }
498 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
499
500 DBG_LEAVE_ROUTINE
501 return retval;
502}
503
504static int hpc_toggle_emi(struct slot *slot)
505{
506 struct controller *ctrl = slot->ctrl;
511 u16 slot_cmd = 0; 507 u16 slot_cmd = 0;
512 u16 slot_ctrl; 508 u16 slot_ctrl;
513 int rc = 0; 509 int rc = 0;
514 510
515 DBG_ENTER_ROUTINE 511 DBG_ENTER_ROUTINE
516 512
517 if (!php_ctlr) { 513 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
518 err("%s: Invalid HPC controller handle!\n", __FUNCTION__); 514 if (rc) {
519 return -1; 515 err("%s : hp_register_read_word SLOT_CTRL failed\n",
516 __FUNCTION__);
517 return rc;
520 } 518 }
521 519
522 if (slot->hp_slot >= php_ctlr->num_slots) { 520 slot_cmd = (slot_ctrl | EMI_CTRL);
523 err("%s: Invalid HPC slot number!\n", __FUNCTION__); 521 if (!pciehp_poll_mode)
524 return -1; 522 slot_cmd = slot_cmd | HP_INTR_ENABLE;
525 }
526 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
527 523
524 pcie_write_cmd(slot, slot_cmd);
525 slot->last_emi_toggle = get_seconds();
526 DBG_LEAVE_ROUTINE
527 return rc;
528}
529
530static int hpc_set_attention_status(struct slot *slot, u8 value)
531{
532 struct controller *ctrl = slot->ctrl;
533 u16 slot_cmd = 0;
534 u16 slot_ctrl;
535 int rc = 0;
536
537 DBG_ENTER_ROUTINE
538
539 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
528 if (rc) { 540 if (rc) {
529 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 541 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
530 return rc; 542 return rc;
531 } 543 }
532 544
@@ -547,7 +559,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
547 slot_cmd = slot_cmd | HP_INTR_ENABLE; 559 slot_cmd = slot_cmd | HP_INTR_ENABLE;
548 560
549 pcie_write_cmd(slot, slot_cmd); 561 pcie_write_cmd(slot, slot_cmd);
550 dbg("%s: SLOT_CTRL %x write cmd %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 562 dbg("%s: SLOTCTRL %x write cmd %x\n",
563 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
551 564
552 DBG_LEAVE_ROUTINE 565 DBG_LEAVE_ROUTINE
553 return rc; 566 return rc;
@@ -556,27 +569,16 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
556 569
557static void hpc_set_green_led_on(struct slot *slot) 570static void hpc_set_green_led_on(struct slot *slot)
558{ 571{
559 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 572 struct controller *ctrl = slot->ctrl;
560 u16 slot_cmd; 573 u16 slot_cmd;
561 u16 slot_ctrl; 574 u16 slot_ctrl;
562 int rc = 0; 575 int rc = 0;
563 576
564 DBG_ENTER_ROUTINE 577 DBG_ENTER_ROUTINE
565 578
566 if (!php_ctlr) { 579 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
567 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
568 return ;
569 }
570
571 if (slot->hp_slot >= php_ctlr->num_slots) {
572 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
573 return ;
574 }
575
576 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
577
578 if (rc) { 580 if (rc) {
579 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 581 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
580 return; 582 return;
581 } 583 }
582 slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0100; 584 slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0100;
@@ -585,34 +587,24 @@ static void hpc_set_green_led_on(struct slot *slot)
585 587
586 pcie_write_cmd(slot, slot_cmd); 588 pcie_write_cmd(slot, slot_cmd);
587 589
588 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 590 dbg("%s: SLOTCTRL %x write cmd %x\n",
591 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
589 DBG_LEAVE_ROUTINE 592 DBG_LEAVE_ROUTINE
590 return; 593 return;
591} 594}
592 595
593static void hpc_set_green_led_off(struct slot *slot) 596static void hpc_set_green_led_off(struct slot *slot)
594{ 597{
595 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 598 struct controller *ctrl = slot->ctrl;
596 u16 slot_cmd; 599 u16 slot_cmd;
597 u16 slot_ctrl; 600 u16 slot_ctrl;
598 int rc = 0; 601 int rc = 0;
599 602
600 DBG_ENTER_ROUTINE 603 DBG_ENTER_ROUTINE
601 604
602 if (!php_ctlr) { 605 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
603 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
604 return ;
605 }
606
607 if (slot->hp_slot >= php_ctlr->num_slots) {
608 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
609 return ;
610 }
611
612 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
613
614 if (rc) { 606 if (rc) {
615 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 607 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
616 return; 608 return;
617 } 609 }
618 610
@@ -621,7 +613,8 @@ static void hpc_set_green_led_off(struct slot *slot)
621 if (!pciehp_poll_mode) 613 if (!pciehp_poll_mode)
622 slot_cmd = slot_cmd | HP_INTR_ENABLE; 614 slot_cmd = slot_cmd | HP_INTR_ENABLE;
623 pcie_write_cmd(slot, slot_cmd); 615 pcie_write_cmd(slot, slot_cmd);
624 dbg("%s: SLOT_CTRL %x write cmd %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 616 dbg("%s: SLOTCTRL %x write cmd %x\n",
617 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
625 618
626 DBG_LEAVE_ROUTINE 619 DBG_LEAVE_ROUTINE
627 return; 620 return;
@@ -629,27 +622,16 @@ static void hpc_set_green_led_off(struct slot *slot)
629 622
630static void hpc_set_green_led_blink(struct slot *slot) 623static void hpc_set_green_led_blink(struct slot *slot)
631{ 624{
632 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 625 struct controller *ctrl = slot->ctrl;
633 u16 slot_cmd; 626 u16 slot_cmd;
634 u16 slot_ctrl; 627 u16 slot_ctrl;
635 int rc = 0; 628 int rc = 0;
636 629
637 DBG_ENTER_ROUTINE 630 DBG_ENTER_ROUTINE
638 631
639 if (!php_ctlr) { 632 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
640 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
641 return ;
642 }
643
644 if (slot->hp_slot >= php_ctlr->num_slots) {
645 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
646 return ;
647 }
648
649 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
650
651 if (rc) { 633 if (rc) {
652 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 634 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
653 return; 635 return;
654 } 636 }
655 637
@@ -659,126 +641,54 @@ static void hpc_set_green_led_blink(struct slot *slot)
659 slot_cmd = slot_cmd | HP_INTR_ENABLE; 641 slot_cmd = slot_cmd | HP_INTR_ENABLE;
660 pcie_write_cmd(slot, slot_cmd); 642 pcie_write_cmd(slot, slot_cmd);
661 643
662 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 644 dbg("%s: SLOTCTRL %x write cmd %x\n",
645 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
663 DBG_LEAVE_ROUTINE 646 DBG_LEAVE_ROUTINE
664 return; 647 return;
665} 648}
666 649
667int pcie_get_ctlr_slot_config(struct controller *ctrl,
668 int *num_ctlr_slots, /* number of slots in this HPC; only 1 in PCIE */
669 int *first_device_num, /* PCI dev num of the first slot in this PCIE */
670 int *physical_slot_num, /* phy slot num of the first slot in this PCIE */
671 u8 *ctrlcap)
672{
673 struct php_ctlr_state_s *php_ctlr = ctrl->hpc_ctlr_handle;
674 u32 slot_cap;
675 int rc = 0;
676
677 DBG_ENTER_ROUTINE
678
679 if (!php_ctlr) {
680 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
681 return -1;
682 }
683
684 *first_device_num = 0;
685 *num_ctlr_slots = 1;
686
687 rc = hp_register_read_dword(php_ctlr->pci_dev, SLOT_CAP(ctrl->cap_base), slot_cap);
688
689 if (rc) {
690 err("%s : hp_register_read_dword SLOT_CAP failed\n", __FUNCTION__);
691 return -1;
692 }
693
694 *physical_slot_num = slot_cap >> 19;
695 dbg("%s: PSN %d \n", __FUNCTION__, *physical_slot_num);
696
697 *ctrlcap = slot_cap & 0x0000007f;
698
699 DBG_LEAVE_ROUTINE
700 return 0;
701}
702
703static void hpc_release_ctlr(struct controller *ctrl) 650static void hpc_release_ctlr(struct controller *ctrl)
704{ 651{
705 struct php_ctlr_state_s *php_ctlr = ctrl->hpc_ctlr_handle;
706 struct php_ctlr_state_s *p, *p_prev;
707
708 DBG_ENTER_ROUTINE 652 DBG_ENTER_ROUTINE
709 653
710 if (!php_ctlr) { 654 if (pciehp_poll_mode)
711 err("%s: Invalid HPC controller handle!\n", __FUNCTION__); 655 del_timer(&ctrl->poll_timer);
712 return ; 656 else
713 } 657 free_irq(ctrl->pci_dev->irq, ctrl);
714
715 if (pciehp_poll_mode) {
716 del_timer(&php_ctlr->int_poll_timer);
717 } else {
718 if (php_ctlr->irq) {
719 free_irq(php_ctlr->irq, ctrl);
720 php_ctlr->irq = 0;
721 }
722 }
723 if (php_ctlr->pci_dev)
724 php_ctlr->pci_dev = NULL;
725
726 spin_lock(&list_lock);
727 p = php_ctlr_list_head;
728 p_prev = NULL;
729 while (p) {
730 if (p == php_ctlr) {
731 if (p_prev)
732 p_prev->pnext = p->pnext;
733 else
734 php_ctlr_list_head = p->pnext;
735 break;
736 } else {
737 p_prev = p;
738 p = p->pnext;
739 }
740 }
741 spin_unlock(&list_lock);
742
743 kfree(php_ctlr);
744 658
745 DBG_LEAVE_ROUTINE 659 DBG_LEAVE_ROUTINE
746
747} 660}
748 661
749static int hpc_power_on_slot(struct slot * slot) 662static int hpc_power_on_slot(struct slot * slot)
750{ 663{
751 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 664 struct controller *ctrl = slot->ctrl;
752 u16 slot_cmd; 665 u16 slot_cmd;
753 u16 slot_ctrl, slot_status; 666 u16 slot_ctrl, slot_status;
754
755 int retval = 0; 667 int retval = 0;
756 668
757 DBG_ENTER_ROUTINE 669 DBG_ENTER_ROUTINE
758 670
759 if (!php_ctlr) {
760 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
761 return -1;
762 }
763
764 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot); 671 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot);
765 if (slot->hp_slot >= php_ctlr->num_slots) {
766 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
767 return -1;
768 }
769 672
770 /* Clear sticky power-fault bit from previous power failures */ 673 /* Clear sticky power-fault bit from previous power failures */
771 hp_register_read_word(php_ctlr->pci_dev, 674 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
772 SLOT_STATUS(slot->ctrl->cap_base), slot_status); 675 if (retval) {
676 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
677 return retval;
678 }
773 slot_status &= PWR_FAULT_DETECTED; 679 slot_status &= PWR_FAULT_DETECTED;
774 if (slot_status) 680 if (slot_status) {
775 hp_register_write_word(php_ctlr->pci_dev, 681 retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status);
776 SLOT_STATUS(slot->ctrl->cap_base), slot_status); 682 if (retval) {
777 683 err("%s: Cannot write to SLOTSTATUS register\n",
778 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl); 684 __FUNCTION__);
685 return retval;
686 }
687 }
779 688
689 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
780 if (retval) { 690 if (retval) {
781 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 691 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
782 return retval; 692 return retval;
783 } 693 }
784 694
@@ -798,7 +708,8 @@ static int hpc_power_on_slot(struct slot * slot)
798 err("%s: Write %x command failed!\n", __FUNCTION__, slot_cmd); 708 err("%s: Write %x command failed!\n", __FUNCTION__, slot_cmd);
799 return -1; 709 return -1;
800 } 710 }
801 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 711 dbg("%s: SLOTCTRL %x write cmd %x\n",
712 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
802 713
803 DBG_LEAVE_ROUTINE 714 DBG_LEAVE_ROUTINE
804 715
@@ -807,29 +718,18 @@ static int hpc_power_on_slot(struct slot * slot)
807 718
808static int hpc_power_off_slot(struct slot * slot) 719static int hpc_power_off_slot(struct slot * slot)
809{ 720{
810 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 721 struct controller *ctrl = slot->ctrl;
811 u16 slot_cmd; 722 u16 slot_cmd;
812 u16 slot_ctrl; 723 u16 slot_ctrl;
813
814 int retval = 0; 724 int retval = 0;
815 725
816 DBG_ENTER_ROUTINE 726 DBG_ENTER_ROUTINE
817 727
818 if (!php_ctlr) {
819 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
820 return -1;
821 }
822
823 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot); 728 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot);
824 slot->hp_slot = 0;
825 if (slot->hp_slot >= php_ctlr->num_slots) {
826 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
827 return -1;
828 }
829 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
830 729
730 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
831 if (retval) { 731 if (retval) {
832 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 732 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
833 return retval; 733 return retval;
834 } 734 }
835 735
@@ -854,47 +754,25 @@ static int hpc_power_off_slot(struct slot * slot)
854 err("%s: Write command failed!\n", __FUNCTION__); 754 err("%s: Write command failed!\n", __FUNCTION__);
855 return -1; 755 return -1;
856 } 756 }
857 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 757 dbg("%s: SLOTCTRL %x write cmd %x\n",
758 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
858 759
859 DBG_LEAVE_ROUTINE 760 DBG_LEAVE_ROUTINE
860 761
861 return retval; 762 return retval;
862} 763}
863 764
864static irqreturn_t pcie_isr(int IRQ, void *dev_id) 765static irqreturn_t pcie_isr(int irq, void *dev_id)
865{ 766{
866 struct controller *ctrl = NULL; 767 struct controller *ctrl = (struct controller *)dev_id;
867 struct php_ctlr_state_s *php_ctlr;
868 u8 schedule_flag = 0;
869 u16 slot_status, intr_detect, intr_loc; 768 u16 slot_status, intr_detect, intr_loc;
870 u16 temp_word; 769 u16 temp_word;
871 int hp_slot = 0; /* only 1 slot per PCI Express port */ 770 int hp_slot = 0; /* only 1 slot per PCI Express port */
872 int rc = 0; 771 int rc = 0;
873 772
874 if (!dev_id) 773 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
875 return IRQ_NONE;
876
877 if (!pciehp_poll_mode) {
878 ctrl = dev_id;
879 php_ctlr = ctrl->hpc_ctlr_handle;
880 } else {
881 php_ctlr = dev_id;
882 ctrl = (struct controller *)php_ctlr->callback_instance_id;
883 }
884
885 if (!ctrl) {
886 dbg("%s: dev_id %p ctlr == NULL\n", __FUNCTION__, (void*) dev_id);
887 return IRQ_NONE;
888 }
889
890 if (!php_ctlr) {
891 dbg("%s: php_ctlr == NULL\n", __FUNCTION__);
892 return IRQ_NONE;
893 }
894
895 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status);
896 if (rc) { 774 if (rc) {
897 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 775 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
898 return IRQ_NONE; 776 return IRQ_NONE;
899 } 777 }
900 778
@@ -910,33 +788,38 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id)
910 dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc); 788 dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc);
911 /* Mask Hot-plug Interrupt Enable */ 789 /* Mask Hot-plug Interrupt Enable */
912 if (!pciehp_poll_mode) { 790 if (!pciehp_poll_mode) {
913 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word); 791 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
914 if (rc) { 792 if (rc) {
915 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 793 err("%s: Cannot read SLOT_CTRL register\n",
794 __FUNCTION__);
916 return IRQ_NONE; 795 return IRQ_NONE;
917 } 796 }
918 797
919 dbg("%s: hp_register_read_word SLOT_CTRL with value %x\n", __FUNCTION__, temp_word); 798 dbg("%s: pciehp_readw(SLOTCTRL) with value %x\n",
799 __FUNCTION__, temp_word);
920 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00; 800 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00;
921 801 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
922 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word);
923 if (rc) { 802 if (rc) {
924 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 803 err("%s: Cannot write to SLOTCTRL register\n",
804 __FUNCTION__);
925 return IRQ_NONE; 805 return IRQ_NONE;
926 } 806 }
927 807
928 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 808 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
929 if (rc) { 809 if (rc) {
930 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 810 err("%s: Cannot read SLOT_STATUS register\n",
811 __FUNCTION__);
931 return IRQ_NONE; 812 return IRQ_NONE;
932 } 813 }
933 dbg("%s: hp_register_read_word SLOT_STATUS with value %x\n", __FUNCTION__, slot_status); 814 dbg("%s: pciehp_readw(SLOTSTATUS) with value %x\n",
815 __FUNCTION__, slot_status);
934 816
935 /* Clear command complete interrupt caused by this write */ 817 /* Clear command complete interrupt caused by this write */
936 temp_word = 0x1f; 818 temp_word = 0x1f;
937 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 819 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
938 if (rc) { 820 if (rc) {
939 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 821 err("%s: Cannot write to SLOTSTATUS register\n",
822 __FUNCTION__);
940 return IRQ_NONE; 823 return IRQ_NONE;
941 } 824 }
942 } 825 }
@@ -945,60 +828,65 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id)
945 /* 828 /*
946 * Command Complete Interrupt Pending 829 * Command Complete Interrupt Pending
947 */ 830 */
831 ctrl->cmd_busy = 0;
948 wake_up_interruptible(&ctrl->queue); 832 wake_up_interruptible(&ctrl->queue);
949 } 833 }
950 834
951 if ((php_ctlr->switch_change_callback) && (intr_loc & MRL_SENS_CHANGED)) 835 if (intr_loc & MRL_SENS_CHANGED)
952 schedule_flag += php_ctlr->switch_change_callback( 836 pciehp_handle_switch_change(hp_slot, ctrl);
953 hp_slot, php_ctlr->callback_instance_id); 837
954 if ((php_ctlr->attention_button_callback) && (intr_loc & ATTN_BUTTN_PRESSED)) 838 if (intr_loc & ATTN_BUTTN_PRESSED)
955 schedule_flag += php_ctlr->attention_button_callback( 839 pciehp_handle_attention_button(hp_slot, ctrl);
956 hp_slot, php_ctlr->callback_instance_id); 840
957 if ((php_ctlr->presence_change_callback) && (intr_loc & PRSN_DETECT_CHANGED)) 841 if (intr_loc & PRSN_DETECT_CHANGED)
958 schedule_flag += php_ctlr->presence_change_callback( 842 pciehp_handle_presence_change(hp_slot, ctrl);
959 hp_slot , php_ctlr->callback_instance_id); 843
960 if ((php_ctlr->power_fault_callback) && (intr_loc & PWR_FAULT_DETECTED)) 844 if (intr_loc & PWR_FAULT_DETECTED)
961 schedule_flag += php_ctlr->power_fault_callback( 845 pciehp_handle_power_fault(hp_slot, ctrl);
962 hp_slot, php_ctlr->callback_instance_id);
963 846
964 /* Clear all events after serving them */ 847 /* Clear all events after serving them */
965 temp_word = 0x1F; 848 temp_word = 0x1F;
966 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 849 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
967 if (rc) { 850 if (rc) {
968 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 851 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
969 return IRQ_NONE; 852 return IRQ_NONE;
970 } 853 }
971 /* Unmask Hot-plug Interrupt Enable */ 854 /* Unmask Hot-plug Interrupt Enable */
972 if (!pciehp_poll_mode) { 855 if (!pciehp_poll_mode) {
973 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word); 856 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
974 if (rc) { 857 if (rc) {
975 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 858 err("%s: Cannot read SLOTCTRL register\n",
859 __FUNCTION__);
976 return IRQ_NONE; 860 return IRQ_NONE;
977 } 861 }
978 862
979 dbg("%s: Unmask Hot-plug Interrupt Enable\n", __FUNCTION__); 863 dbg("%s: Unmask Hot-plug Interrupt Enable\n", __FUNCTION__);
980 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE; 864 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
981 865
982 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word); 866 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
983 if (rc) { 867 if (rc) {
984 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 868 err("%s: Cannot write to SLOTCTRL register\n",
869 __FUNCTION__);
985 return IRQ_NONE; 870 return IRQ_NONE;
986 } 871 }
987 872
988 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 873 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
989 if (rc) { 874 if (rc) {
990 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 875 err("%s: Cannot read SLOT_STATUS register\n",
876 __FUNCTION__);
991 return IRQ_NONE; 877 return IRQ_NONE;
992 } 878 }
993 879
994 /* Clear command complete interrupt caused by this write */ 880 /* Clear command complete interrupt caused by this write */
995 temp_word = 0x1F; 881 temp_word = 0x1F;
996 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 882 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
997 if (rc) { 883 if (rc) {
998 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 884 err("%s: Cannot write to SLOTSTATUS failed\n",
885 __FUNCTION__);
999 return IRQ_NONE; 886 return IRQ_NONE;
1000 } 887 }
1001 dbg("%s: hp_register_write_word SLOT_STATUS with value %x\n", __FUNCTION__, temp_word); 888 dbg("%s: pciehp_writew(SLOTSTATUS) with value %x\n",
889 __FUNCTION__, temp_word);
1002 } 890 }
1003 891
1004 return IRQ_HANDLED; 892 return IRQ_HANDLED;
@@ -1006,27 +894,16 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id)
1006 894
1007static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value) 895static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1008{ 896{
1009 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 897 struct controller *ctrl = slot->ctrl;
1010 enum pcie_link_speed lnk_speed; 898 enum pcie_link_speed lnk_speed;
1011 u32 lnk_cap; 899 u32 lnk_cap;
1012 int retval = 0; 900 int retval = 0;
1013 901
1014 DBG_ENTER_ROUTINE 902 DBG_ENTER_ROUTINE
1015 903
1016 if (!php_ctlr) { 904 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
1017 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1018 return -1;
1019 }
1020
1021 if (slot->hp_slot >= php_ctlr->num_slots) {
1022 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1023 return -1;
1024 }
1025
1026 retval = hp_register_read_dword(php_ctlr->pci_dev, LNK_CAP(slot->ctrl->cap_base), lnk_cap);
1027
1028 if (retval) { 905 if (retval) {
1029 err("%s : hp_register_read_dword LNK_CAP failed\n", __FUNCTION__); 906 err("%s: Cannot read LNKCAP register\n", __FUNCTION__);
1030 return retval; 907 return retval;
1031 } 908 }
1032 909
@@ -1047,27 +924,16 @@ static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1047 924
1048static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value) 925static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value)
1049{ 926{
1050 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 927 struct controller *ctrl = slot->ctrl;
1051 enum pcie_link_width lnk_wdth; 928 enum pcie_link_width lnk_wdth;
1052 u32 lnk_cap; 929 u32 lnk_cap;
1053 int retval = 0; 930 int retval = 0;
1054 931
1055 DBG_ENTER_ROUTINE 932 DBG_ENTER_ROUTINE
1056 933
1057 if (!php_ctlr) { 934 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
1058 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1059 return -1;
1060 }
1061
1062 if (slot->hp_slot >= php_ctlr->num_slots) {
1063 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1064 return -1;
1065 }
1066
1067 retval = hp_register_read_dword(php_ctlr->pci_dev, LNK_CAP(slot->ctrl->cap_base), lnk_cap);
1068
1069 if (retval) { 935 if (retval) {
1070 err("%s : hp_register_read_dword LNK_CAP failed\n", __FUNCTION__); 936 err("%s: Cannot read LNKCAP register\n", __FUNCTION__);
1071 return retval; 937 return retval;
1072 } 938 }
1073 939
@@ -1109,27 +975,16 @@ static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value
1109 975
1110static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value) 976static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1111{ 977{
1112 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 978 struct controller *ctrl = slot->ctrl;
1113 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; 979 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
1114 int retval = 0; 980 int retval = 0;
1115 u16 lnk_status; 981 u16 lnk_status;
1116 982
1117 DBG_ENTER_ROUTINE 983 DBG_ENTER_ROUTINE
1118 984
1119 if (!php_ctlr) { 985 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
1120 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1121 return -1;
1122 }
1123
1124 if (slot->hp_slot >= php_ctlr->num_slots) {
1125 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1126 return -1;
1127 }
1128
1129 retval = hp_register_read_word(php_ctlr->pci_dev, LNK_STATUS(slot->ctrl->cap_base), lnk_status);
1130
1131 if (retval) { 986 if (retval) {
1132 err("%s : hp_register_read_word LNK_STATUS failed\n", __FUNCTION__); 987 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
1133 return retval; 988 return retval;
1134 } 989 }
1135 990
@@ -1150,27 +1005,16 @@ static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1150 1005
1151static int hpc_get_cur_lnk_width (struct slot *slot, enum pcie_link_width *value) 1006static int hpc_get_cur_lnk_width (struct slot *slot, enum pcie_link_width *value)
1152{ 1007{
1153 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 1008 struct controller *ctrl = slot->ctrl;
1154 enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN; 1009 enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
1155 int retval = 0; 1010 int retval = 0;
1156 u16 lnk_status; 1011 u16 lnk_status;
1157 1012
1158 DBG_ENTER_ROUTINE 1013 DBG_ENTER_ROUTINE
1159 1014
1160 if (!php_ctlr) { 1015 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
1161 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1162 return -1;
1163 }
1164
1165 if (slot->hp_slot >= php_ctlr->num_slots) {
1166 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1167 return -1;
1168 }
1169
1170 retval = hp_register_read_word(php_ctlr->pci_dev, LNK_STATUS(slot->ctrl->cap_base), lnk_status);
1171
1172 if (retval) { 1016 if (retval) {
1173 err("%s : hp_register_read_word LNK_STATUS failed\n", __FUNCTION__); 1017 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
1174 return retval; 1018 return retval;
1175 } 1019 }
1176 1020
@@ -1218,6 +1062,8 @@ static struct hpc_ops pciehp_hpc_ops = {
1218 .get_attention_status = hpc_get_attention_status, 1062 .get_attention_status = hpc_get_attention_status,
1219 .get_latch_status = hpc_get_latch_status, 1063 .get_latch_status = hpc_get_latch_status,
1220 .get_adapter_status = hpc_get_adapter_status, 1064 .get_adapter_status = hpc_get_adapter_status,
1065 .get_emi_status = hpc_get_emi_status,
1066 .toggle_emi = hpc_toggle_emi,
1221 1067
1222 .get_max_bus_speed = hpc_get_max_lnk_speed, 1068 .get_max_bus_speed = hpc_get_max_lnk_speed,
1223 .get_cur_bus_speed = hpc_get_cur_lnk_speed, 1069 .get_cur_bus_speed = hpc_get_cur_lnk_speed,
@@ -1305,38 +1151,24 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1305 1151
1306int pcie_init(struct controller * ctrl, struct pcie_device *dev) 1152int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1307{ 1153{
1308 struct php_ctlr_state_s *php_ctlr, *p;
1309 void *instance_id = ctrl;
1310 int rc; 1154 int rc;
1311 static int first = 1; 1155 static int first = 1;
1312 u16 temp_word; 1156 u16 temp_word;
1313 u16 cap_reg; 1157 u16 cap_reg;
1314 u16 intr_enable = 0; 1158 u16 intr_enable = 0;
1315 u32 slot_cap; 1159 u32 slot_cap;
1316 int cap_base, saved_cap_base; 1160 int cap_base;
1317 u16 slot_status, slot_ctrl; 1161 u16 slot_status, slot_ctrl;
1318 struct pci_dev *pdev; 1162 struct pci_dev *pdev;
1319 1163
1320 DBG_ENTER_ROUTINE 1164 DBG_ENTER_ROUTINE
1321 1165
1322 spin_lock_init(&list_lock);
1323 php_ctlr = kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL);
1324
1325 if (!php_ctlr) { /* allocate controller state data */
1326 err("%s: HPC controller memory allocation error!\n", __FUNCTION__);
1327 goto abort;
1328 }
1329
1330 memset(php_ctlr, 0, sizeof(struct php_ctlr_state_s));
1331
1332 pdev = dev->port; 1166 pdev = dev->port;
1333 php_ctlr->pci_dev = pdev; /* save pci_dev in context */ 1167 ctrl->pci_dev = pdev; /* save pci_dev in context */
1334 1168
1335 dbg("%s: hotplug controller vendor id 0x%x device id 0x%x\n", 1169 dbg("%s: hotplug controller vendor id 0x%x device id 0x%x\n",
1336 __FUNCTION__, pdev->vendor, pdev->device); 1170 __FUNCTION__, pdev->vendor, pdev->device);
1337 1171
1338 saved_cap_base = pcie_cap_base;
1339
1340 if ((cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP)) == 0) { 1172 if ((cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP)) == 0) {
1341 dbg("%s: Can't find PCI_CAP_ID_EXP (0x10)\n", __FUNCTION__); 1173 dbg("%s: Can't find PCI_CAP_ID_EXP (0x10)\n", __FUNCTION__);
1342 goto abort_free_ctlr; 1174 goto abort_free_ctlr;
@@ -1344,14 +1176,15 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1344 1176
1345 ctrl->cap_base = cap_base; 1177 ctrl->cap_base = cap_base;
1346 1178
1347 dbg("%s: pcie_cap_base %x\n", __FUNCTION__, pcie_cap_base); 1179 dbg("%s: pcie_cap_base %x\n", __FUNCTION__, cap_base);
1348 1180
1349 rc = hp_register_read_word(pdev, CAP_REG(ctrl->cap_base), cap_reg); 1181 rc = pciehp_readw(ctrl, CAPREG, &cap_reg);
1350 if (rc) { 1182 if (rc) {
1351 err("%s : hp_register_read_word CAP_REG failed\n", __FUNCTION__); 1183 err("%s: Cannot read CAPREG register\n", __FUNCTION__);
1352 goto abort_free_ctlr; 1184 goto abort_free_ctlr;
1353 } 1185 }
1354 dbg("%s: CAP_REG offset %x cap_reg %x\n", __FUNCTION__, CAP_REG(ctrl->cap_base), cap_reg); 1186 dbg("%s: CAPREG offset %x cap_reg %x\n",
1187 __FUNCTION__, ctrl->cap_base + CAPREG, cap_reg);
1355 1188
1356 if (((cap_reg & SLOT_IMPL) == 0) || (((cap_reg & DEV_PORT_TYPE) != 0x0040) 1189 if (((cap_reg & SLOT_IMPL) == 0) || (((cap_reg & DEV_PORT_TYPE) != 0x0040)
1357 && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) { 1190 && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) {
@@ -1359,31 +1192,34 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1359 goto abort_free_ctlr; 1192 goto abort_free_ctlr;
1360 } 1193 }
1361 1194
1362 rc = hp_register_read_dword(php_ctlr->pci_dev, SLOT_CAP(ctrl->cap_base), slot_cap); 1195 rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
1363 if (rc) { 1196 if (rc) {
1364 err("%s : hp_register_read_word CAP_REG failed\n", __FUNCTION__); 1197 err("%s: Cannot read SLOTCAP register\n", __FUNCTION__);
1365 goto abort_free_ctlr; 1198 goto abort_free_ctlr;
1366 } 1199 }
1367 dbg("%s: SLOT_CAP offset %x slot_cap %x\n", __FUNCTION__, SLOT_CAP(ctrl->cap_base), slot_cap); 1200 dbg("%s: SLOTCAP offset %x slot_cap %x\n",
1201 __FUNCTION__, ctrl->cap_base + SLOTCAP, slot_cap);
1368 1202
1369 if (!(slot_cap & HP_CAP)) { 1203 if (!(slot_cap & HP_CAP)) {
1370 dbg("%s : This slot is not hot-plug capable\n", __FUNCTION__); 1204 dbg("%s : This slot is not hot-plug capable\n", __FUNCTION__);
1371 goto abort_free_ctlr; 1205 goto abort_free_ctlr;
1372 } 1206 }
1373 /* For debugging purpose */ 1207 /* For debugging purpose */
1374 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 1208 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1375 if (rc) { 1209 if (rc) {
1376 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 1210 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
1377 goto abort_free_ctlr; 1211 goto abort_free_ctlr;
1378 } 1212 }
1379 dbg("%s: SLOT_STATUS offset %x slot_status %x\n", __FUNCTION__, SLOT_STATUS(ctrl->cap_base), slot_status); 1213 dbg("%s: SLOTSTATUS offset %x slot_status %x\n",
1214 __FUNCTION__, ctrl->cap_base + SLOTSTATUS, slot_status);
1380 1215
1381 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), slot_ctrl); 1216 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
1382 if (rc) { 1217 if (rc) {
1383 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 1218 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
1384 goto abort_free_ctlr; 1219 goto abort_free_ctlr;
1385 } 1220 }
1386 dbg("%s: SLOT_CTRL offset %x slot_ctrl %x\n", __FUNCTION__, SLOT_CTRL(ctrl->cap_base), slot_ctrl); 1221 dbg("%s: SLOTCTRL offset %x slot_ctrl %x\n",
1222 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
1387 1223
1388 if (first) { 1224 if (first) {
1389 spin_lock_init(&hpc_event_lock); 1225 spin_lock_init(&hpc_event_lock);
@@ -1405,69 +1241,64 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1405 /* setup wait queue */ 1241 /* setup wait queue */
1406 init_waitqueue_head(&ctrl->queue); 1242 init_waitqueue_head(&ctrl->queue);
1407 1243
1408 /* find the IRQ */
1409 php_ctlr->irq = dev->irq;
1410
1411 /* Save interrupt callback info */
1412 php_ctlr->attention_button_callback = pciehp_handle_attention_button;
1413 php_ctlr->switch_change_callback = pciehp_handle_switch_change;
1414 php_ctlr->presence_change_callback = pciehp_handle_presence_change;
1415 php_ctlr->power_fault_callback = pciehp_handle_power_fault;
1416 php_ctlr->callback_instance_id = instance_id;
1417
1418 /* return PCI Controller Info */ 1244 /* return PCI Controller Info */
1419 php_ctlr->slot_device_offset = 0; 1245 ctrl->slot_device_offset = 0;
1420 php_ctlr->num_slots = 1; 1246 ctrl->num_slots = 1;
1247 ctrl->first_slot = slot_cap >> 19;
1248 ctrl->ctrlcap = slot_cap & 0x0000007f;
1421 1249
1422 /* Mask Hot-plug Interrupt Enable */ 1250 /* Mask Hot-plug Interrupt Enable */
1423 rc = hp_register_read_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1251 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
1424 if (rc) { 1252 if (rc) {
1425 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 1253 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
1426 goto abort_free_ctlr; 1254 goto abort_free_ctlr;
1427 } 1255 }
1428 1256
1429 dbg("%s: SLOT_CTRL %x value read %x\n", __FUNCTION__, SLOT_CTRL(ctrl->cap_base), temp_word); 1257 dbg("%s: SLOTCTRL %x value read %x\n",
1258 __FUNCTION__, ctrl->cap_base + SLOTCTRL, temp_word);
1430 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00; 1259 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00;
1431 1260
1432 rc = hp_register_write_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1261 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1433 if (rc) { 1262 if (rc) {
1434 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 1263 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
1435 goto abort_free_ctlr; 1264 goto abort_free_ctlr;
1436 } 1265 }
1437 1266
1438 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 1267 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1439 if (rc) { 1268 if (rc) {
1440 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 1269 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
1441 goto abort_free_ctlr; 1270 goto abort_free_ctlr;
1442 } 1271 }
1443 1272
1444 temp_word = 0x1F; /* Clear all events */ 1273 temp_word = 0x1F; /* Clear all events */
1445 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 1274 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
1446 if (rc) { 1275 if (rc) {
1447 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 1276 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
1448 goto abort_free_ctlr; 1277 goto abort_free_ctlr;
1449 } 1278 }
1450 1279
1451 if (pciehp_poll_mode) {/* Install interrupt polling code */ 1280 if (pciehp_poll_mode) {
1452 /* Install and start the interrupt polling timer */ 1281 /* Install interrupt polling timer. Start with 10 sec delay */
1453 init_timer(&php_ctlr->int_poll_timer); 1282 init_timer(&ctrl->poll_timer);
1454 start_int_poll_timer( php_ctlr, 10 ); /* start with 10 second delay */ 1283 start_int_poll_timer(ctrl, 10);
1455 } else { 1284 } else {
1456 /* Installs the interrupt handler */ 1285 /* Installs the interrupt handler */
1457 rc = request_irq(php_ctlr->irq, pcie_isr, IRQF_SHARED, MY_NAME, (void *) ctrl); 1286 rc = request_irq(ctrl->pci_dev->irq, pcie_isr, IRQF_SHARED,
1458 dbg("%s: request_irq %d for hpc%d (returns %d)\n", __FUNCTION__, php_ctlr->irq, ctlr_seq_num, rc); 1287 MY_NAME, (void *)ctrl);
1288 dbg("%s: request_irq %d for hpc%d (returns %d)\n",
1289 __FUNCTION__, ctrl->pci_dev->irq, ctlr_seq_num, rc);
1459 if (rc) { 1290 if (rc) {
1460 err("Can't get irq %d for the hotplug controller\n", php_ctlr->irq); 1291 err("Can't get irq %d for the hotplug controller\n",
1292 ctrl->pci_dev->irq);
1461 goto abort_free_ctlr; 1293 goto abort_free_ctlr;
1462 } 1294 }
1463 } 1295 }
1464
1465 dbg("pciehp ctrl b:d:f:irq=0x%x:%x:%x:%x\n", pdev->bus->number, 1296 dbg("pciehp ctrl b:d:f:irq=0x%x:%x:%x:%x\n", pdev->bus->number,
1466 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq); 1297 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq);
1467 1298
1468 rc = hp_register_read_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1299 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
1469 if (rc) { 1300 if (rc) {
1470 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 1301 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
1471 goto abort_free_irq; 1302 goto abort_free_irq;
1472 } 1303 }
1473 1304
@@ -1491,21 +1322,21 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1491 } 1322 }
1492 1323
1493 /* Unmask Hot-plug Interrupt Enable for the interrupt notification mechanism case */ 1324 /* Unmask Hot-plug Interrupt Enable for the interrupt notification mechanism case */
1494 rc = hp_register_write_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1325 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1495 if (rc) { 1326 if (rc) {
1496 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 1327 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
1497 goto abort_free_irq; 1328 goto abort_free_irq;
1498 } 1329 }
1499 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 1330 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1500 if (rc) { 1331 if (rc) {
1501 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 1332 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
1502 goto abort_disable_intr; 1333 goto abort_disable_intr;
1503 } 1334 }
1504 1335
1505 temp_word = 0x1F; /* Clear all events */ 1336 temp_word = 0x1F; /* Clear all events */
1506 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 1337 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
1507 if (rc) { 1338 if (rc) {
1508 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 1339 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
1509 goto abort_disable_intr; 1340 goto abort_disable_intr;
1510 } 1341 }
1511 1342
@@ -1518,24 +1349,7 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1518 goto abort_disable_intr; 1349 goto abort_disable_intr;
1519 } 1350 }
1520 1351
1521 /* Add this HPC instance into the HPC list */
1522 spin_lock(&list_lock);
1523 if (php_ctlr_list_head == 0) {
1524 php_ctlr_list_head = php_ctlr;
1525 p = php_ctlr_list_head;
1526 p->pnext = NULL;
1527 } else {
1528 p = php_ctlr_list_head;
1529
1530 while (p->pnext)
1531 p = p->pnext;
1532
1533 p->pnext = php_ctlr;
1534 }
1535 spin_unlock(&list_lock);
1536
1537 ctlr_seq_num++; 1352 ctlr_seq_num++;
1538 ctrl->hpc_ctlr_handle = php_ctlr;
1539 ctrl->hpc_ops = &pciehp_hpc_ops; 1353 ctrl->hpc_ops = &pciehp_hpc_ops;
1540 1354
1541 DBG_LEAVE_ROUTINE 1355 DBG_LEAVE_ROUTINE
@@ -1543,24 +1357,21 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1543 1357
1544 /* We end up here for the many possible ways to fail this API. */ 1358 /* We end up here for the many possible ways to fail this API. */
1545abort_disable_intr: 1359abort_disable_intr:
1546 rc = hp_register_read_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1360 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
1547 if (!rc) { 1361 if (!rc) {
1548 temp_word &= ~(intr_enable | HP_INTR_ENABLE); 1362 temp_word &= ~(intr_enable | HP_INTR_ENABLE);
1549 rc = hp_register_write_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1363 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1550 } 1364 }
1551 if (rc) 1365 if (rc)
1552 err("%s : disabling interrupts failed\n", __FUNCTION__); 1366 err("%s : disabling interrupts failed\n", __FUNCTION__);
1553 1367
1554abort_free_irq: 1368abort_free_irq:
1555 if (pciehp_poll_mode) 1369 if (pciehp_poll_mode)
1556 del_timer_sync(&php_ctlr->int_poll_timer); 1370 del_timer_sync(&ctrl->poll_timer);
1557 else 1371 else
1558 free_irq(php_ctlr->irq, ctrl); 1372 free_irq(ctrl->pci_dev->irq, ctrl);
1559 1373
1560abort_free_ctlr: 1374abort_free_ctlr:
1561 pcie_cap_base = saved_cap_base;
1562 kfree(php_ctlr);
1563abort:
1564 DBG_LEAVE_ROUTINE 1375 DBG_LEAVE_ROUTINE
1565 return -1; 1376 return -1;
1566} 1377}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 5d188c558386..78cf0711d1fa 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -28,6 +28,8 @@
28#include <asm/sn/sn_feature_sets.h> 28#include <asm/sn/sn_feature_sets.h>
29#include <asm/sn/sn_sal.h> 29#include <asm/sn/sn_sal.h>
30#include <asm/sn/types.h> 30#include <asm/sn/types.h>
31#include <linux/acpi.h>
32#include <asm/sn/acpi.h>
31 33
32#include "../pci.h" 34#include "../pci.h"
33 35
@@ -35,14 +37,17 @@ MODULE_LICENSE("GPL");
35MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)"); 37MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)");
36MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver"); 38MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver");
37 39
38#define PCIIO_ASIC_TYPE_TIOCA 4 40
41/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */
39#define PCI_SLOT_ALREADY_UP 2 /* slot already up */ 42#define PCI_SLOT_ALREADY_UP 2 /* slot already up */
40#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */ 43#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */
41#define PCI_L1_ERR 7 /* L1 console command error */ 44#define PCI_L1_ERR 7 /* L1 console command error */
42#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */ 45#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */
46
47
48#define PCIIO_ASIC_TYPE_TIOCA 4
43#define PCI_L1_QSIZE 128 /* our L1 message buffer size */ 49#define PCI_L1_QSIZE 128 /* our L1 message buffer size */
44#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */ 50#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */
45#define SGI_HOTPLUG_PROM_REV 0x0430 /* Min. required PROM version */
46#define SN_SLOT_NAME_SIZE 33 /* size of name string */ 51#define SN_SLOT_NAME_SIZE 33 /* size of name string */
47 52
48/* internal list head */ 53/* internal list head */
@@ -227,7 +232,7 @@ static void sn_bus_free_data(struct pci_dev *dev)
227} 232}
228 233
229static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot, 234static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
230 int device_num) 235 int device_num, char **ssdt)
231{ 236{
232 struct slot *slot = bss_hotplug_slot->private; 237 struct slot *slot = bss_hotplug_slot->private;
233 struct pcibus_info *pcibus_info; 238 struct pcibus_info *pcibus_info;
@@ -240,7 +245,8 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
240 * Power-on and initialize the slot in the SN 245 * Power-on and initialize the slot in the SN
241 * PCI infrastructure. 246 * PCI infrastructure.
242 */ 247 */
243 rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp); 248 rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt);
249
244 250
245 if (rc == PCI_SLOT_ALREADY_UP) { 251 if (rc == PCI_SLOT_ALREADY_UP) {
246 dev_dbg(slot->pci_bus->self, "is already active\n"); 252 dev_dbg(slot->pci_bus->self, "is already active\n");
@@ -335,6 +341,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
335 int func, num_funcs; 341 int func, num_funcs;
336 int new_ppb = 0; 342 int new_ppb = 0;
337 int rc; 343 int rc;
344 char *ssdt = NULL;
338 void pcibios_fixup_device_resources(struct pci_dev *); 345 void pcibios_fixup_device_resources(struct pci_dev *);
339 346
340 /* Serialize the Linux PCI infrastructure */ 347 /* Serialize the Linux PCI infrastructure */
@@ -342,14 +349,29 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
342 349
343 /* 350 /*
344 * Power-on and initialize the slot in the SN 351 * Power-on and initialize the slot in the SN
345 * PCI infrastructure. 352 * PCI infrastructure. Also, retrieve the ACPI SSDT
353 * table for the slot (if ACPI capable PROM).
346 */ 354 */
347 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); 355 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt);
348 if (rc) { 356 if (rc) {
349 mutex_unlock(&sn_hotplug_mutex); 357 mutex_unlock(&sn_hotplug_mutex);
350 return rc; 358 return rc;
351 } 359 }
352 360
361 if (ssdt)
362 ssdt = __va(ssdt);
363 /* Add the new SSDT for the slot to the ACPI namespace */
364 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
365 acpi_status ret;
366
367 ret = acpi_load_table((struct acpi_table_header *)ssdt);
368 if (ACPI_FAILURE(ret)) {
369 printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n",
370 __FUNCTION__, ret);
371 /* try to continue on */
372 }
373 }
374
353 num_funcs = pci_scan_slot(slot->pci_bus, 375 num_funcs = pci_scan_slot(slot->pci_bus,
354 PCI_DEVFN(slot->device_num + 1, 0)); 376 PCI_DEVFN(slot->device_num + 1, 0));
355 if (!num_funcs) { 377 if (!num_funcs) {
@@ -374,7 +396,10 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
374 * pdi_host_pcidev_info). 396 * pdi_host_pcidev_info).
375 */ 397 */
376 pcibios_fixup_device_resources(dev); 398 pcibios_fixup_device_resources(dev);
377 sn_pci_fixup_slot(dev); 399 if (SN_ACPI_BASE_SUPPORT())
400 sn_acpi_slot_fixup(dev);
401 else
402 sn_io_slot_fixup(dev);
378 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 403 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
379 unsigned char sec_bus; 404 unsigned char sec_bus;
380 pci_read_config_byte(dev, PCI_SECONDARY_BUS, 405 pci_read_config_byte(dev, PCI_SECONDARY_BUS,
@@ -388,6 +413,63 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
388 } 413 }
389 } 414 }
390 415
416 /*
417 * Add the slot's devices to the ACPI infrastructure */
418 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
419 unsigned long adr;
420 struct acpi_device *pdevice;
421 struct acpi_device *device;
422 acpi_handle phandle;
423 acpi_handle chandle = NULL;
424 acpi_handle rethandle;
425 acpi_status ret;
426
427 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
428
429 if (acpi_bus_get_device(phandle, &pdevice)) {
430 dev_dbg(slot->pci_bus->self,
431 "no parent device, assuming NULL\n");
432 pdevice = NULL;
433 }
434
435 /*
436 * Walk the rootbus node's immediate children looking for
437 * the slot's device node(s). There can be more than
438 * one for multifunction devices.
439 */
440 for (;;) {
441 rethandle = NULL;
442 ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
443 phandle, chandle,
444 &rethandle);
445
446 if (ret == AE_NOT_FOUND || rethandle == NULL)
447 break;
448
449 chandle = rethandle;
450
451 ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR,
452 NULL, &adr);
453
454 if (ACPI_SUCCESS(ret) &&
455 (adr>>16) == (slot->device_num + 1)) {
456
457 ret = acpi_bus_add(&device, pdevice, chandle,
458 ACPI_BUS_TYPE_DEVICE);
459 if (ACPI_FAILURE(ret)) {
460 printk(KERN_ERR "%s: acpi_bus_add "
461 "failed (0x%x) for slot %d "
462 "func %d\n", __FUNCTION__,
463 ret, (int)(adr>>16),
464 (int)(adr&0xffff));
465 /* try to continue on */
466 } else {
467 acpi_bus_start(device);
468 }
469 }
470 }
471 }
472
391 /* Call the driver for the new device */ 473 /* Call the driver for the new device */
392 pci_bus_add_devices(slot->pci_bus); 474 pci_bus_add_devices(slot->pci_bus);
393 /* Call the drivers for the new devices subordinate to PPB */ 475 /* Call the drivers for the new devices subordinate to PPB */
@@ -412,6 +494,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
412 struct pci_dev *dev; 494 struct pci_dev *dev;
413 int func; 495 int func;
414 int rc; 496 int rc;
497 acpi_owner_id ssdt_id = 0;
415 498
416 /* Acquire update access to the bus */ 499 /* Acquire update access to the bus */
417 mutex_lock(&sn_hotplug_mutex); 500 mutex_lock(&sn_hotplug_mutex);
@@ -422,6 +505,52 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
422 if (rc) 505 if (rc)
423 goto leaving; 506 goto leaving;
424 507
508 /* free the ACPI resources for the slot */
509 if (SN_ACPI_BASE_SUPPORT() &&
510 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
511 unsigned long adr;
512 struct acpi_device *device;
513 acpi_handle phandle;
514 acpi_handle chandle = NULL;
515 acpi_handle rethandle;
516 acpi_status ret;
517
518 /* Get the rootbus node pointer */
519 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
520
521 /*
522 * Walk the rootbus node's immediate children looking for
523 * the slot's device node(s). There can be more than
524 * one for multifunction devices.
525 */
526 for (;;) {
527 rethandle = NULL;
528 ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
529 phandle, chandle,
530 &rethandle);
531
532 if (ret == AE_NOT_FOUND || rethandle == NULL)
533 break;
534
535 chandle = rethandle;
536
537 ret = acpi_evaluate_integer(chandle,
538 METHOD_NAME__ADR,
539 NULL, &adr);
540 if (ACPI_SUCCESS(ret) &&
541 (adr>>16) == (slot->device_num + 1)) {
542 /* retain the owner id */
543 acpi_get_id(chandle, &ssdt_id);
544
545 ret = acpi_bus_get_device(chandle,
546 &device);
547 if (ACPI_SUCCESS(ret))
548 acpi_bus_trim(device, 1);
549 }
550 }
551
552 }
553
425 /* Free the SN resources assigned to the Linux device.*/ 554 /* Free the SN resources assigned to the Linux device.*/
426 for (func = 0; func < 8; func++) { 555 for (func = 0; func < 8; func++) {
427 dev = pci_get_slot(slot->pci_bus, 556 dev = pci_get_slot(slot->pci_bus,
@@ -434,6 +563,18 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
434 } 563 }
435 } 564 }
436 565
566 /* Remove the SSDT for the slot from the ACPI namespace */
567 if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
568 acpi_status ret;
569 ret = acpi_unload_table_id(ssdt_id);
570 if (ACPI_FAILURE(ret)) {
571 printk(KERN_ERR "%s: acpi_unload_table_id "
572 "failed (0x%x) for id %d\n",
573 __FUNCTION__, ret, ssdt_id);
574 /* try to continue on */
575 }
576 }
577
437 /* free the collected sysdata pointers */ 578 /* free the collected sysdata pointers */
438 sn_bus_free_sysdata(); 579 sn_bus_free_sysdata();
439 580
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 3ca6a4f574b3..01d31a1f697c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -106,7 +106,7 @@ struct controller {
106}; 106};
107 107
108/* Define AMD SHPC ID */ 108/* Define AMD SHPC ID */
109#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 109#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
110#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 110#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
111 111
112/* AMD PCIX bridge registers */ 112/* AMD PCIX bridge registers */
@@ -221,7 +221,7 @@ enum ctrl_offsets {
221}; 221};
222 222
223static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot) 223static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot)
224{ 224{
225 return hotplug_slot->private; 225 return hotplug_slot->private;
226} 226}
227 227
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 590cd3cbe010..5f4bc08a633a 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -401,10 +401,6 @@ static int __init shpcd_init(void)
401{ 401{
402 int retval = 0; 402 int retval = 0;
403 403
404#ifdef CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE
405 shpchp_poll_mode = 1;
406#endif
407
408 retval = pci_register_driver(&shpc_driver); 404 retval = pci_register_driver(&shpc_driver);
409 dbg("%s: pci_register_driver = %d\n", __FUNCTION__, retval); 405 dbg("%s: pci_register_driver = %d\n", __FUNCTION__, retval);
410 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 406 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 6bb84734cd6c..b746bd265bc6 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -64,7 +64,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
64 64
65 /* Attention Button Change */ 65 /* Attention Button Change */
66 dbg("shpchp: Attention button interrupt received.\n"); 66 dbg("shpchp: Attention button interrupt received.\n");
67 67
68 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 68 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
69 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 69 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
70 70
@@ -128,7 +128,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
128 128
129 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 129 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
130 130
131 /* 131 /*
132 * Save the presence state 132 * Save the presence state
133 */ 133 */
134 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 134 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
@@ -184,12 +184,12 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
184 return 1; 184 return 1;
185} 185}
186 186
187/* The following routines constitute the bulk of the 187/* The following routines constitute the bulk of the
188 hotplug controller logic 188 hotplug controller logic
189 */ 189 */
190static int change_bus_speed(struct controller *ctrl, struct slot *p_slot, 190static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
191 enum pci_bus_speed speed) 191 enum pci_bus_speed speed)
192{ 192{
193 int rc = 0; 193 int rc = 0;
194 194
195 dbg("%s: change to speed %d\n", __FUNCTION__, speed); 195 dbg("%s: change to speed %d\n", __FUNCTION__, speed);
@@ -204,7 +204,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
204static int fix_bus_speed(struct controller *ctrl, struct slot *pslot, 204static int fix_bus_speed(struct controller *ctrl, struct slot *pslot,
205 u8 flag, enum pci_bus_speed asp, enum pci_bus_speed bsp, 205 u8 flag, enum pci_bus_speed asp, enum pci_bus_speed bsp,
206 enum pci_bus_speed msp) 206 enum pci_bus_speed msp)
207{ 207{
208 int rc = 0; 208 int rc = 0;
209 209
210 /* 210 /*
@@ -257,23 +257,23 @@ static int board_added(struct slot *p_slot)
257 err("%s: Failed to power on slot\n", __FUNCTION__); 257 err("%s: Failed to power on slot\n", __FUNCTION__);
258 return -1; 258 return -1;
259 } 259 }
260 260
261 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { 261 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
262 if (slots_not_empty) 262 if (slots_not_empty)
263 return WRONG_BUS_FREQUENCY; 263 return WRONG_BUS_FREQUENCY;
264 264
265 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { 265 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
266 err("%s: Issue of set bus speed mode command failed\n", __FUNCTION__); 266 err("%s: Issue of set bus speed mode command failed\n", __FUNCTION__);
267 return WRONG_BUS_FREQUENCY; 267 return WRONG_BUS_FREQUENCY;
268 } 268 }
269 269
270 /* turn on board, blink green LED, turn off Amber LED */ 270 /* turn on board, blink green LED, turn off Amber LED */
271 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 271 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
272 err("%s: Issue of Slot Enable command failed\n", __FUNCTION__); 272 err("%s: Issue of Slot Enable command failed\n", __FUNCTION__);
273 return rc; 273 return rc;
274 } 274 }
275 } 275 }
276 276
277 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); 277 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
278 if (rc) { 278 if (rc) {
279 err("%s: Can't get adapter speed or bus mode mismatch\n", 279 err("%s: Can't get adapter speed or bus mode mismatch\n",
@@ -378,7 +378,7 @@ static int remove_board(struct slot *p_slot)
378 err("%s: Issue of Slot Disable command failed\n", __FUNCTION__); 378 err("%s: Issue of Slot Disable command failed\n", __FUNCTION__);
379 return rc; 379 return rc;
380 } 380 }
381 381
382 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); 382 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
383 if (rc) { 383 if (rc) {
384 err("%s: Issue of Set Attention command failed\n", __FUNCTION__); 384 err("%s: Issue of Set Attention command failed\n", __FUNCTION__);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index b7bede4b7c27..5183a45d45b5 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -35,38 +35,6 @@
35 35
36#include "shpchp.h" 36#include "shpchp.h"
37 37
38#ifdef DEBUG
39#define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */
40#define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */
41#define DBG_K_INFO ((unsigned int)0x00000004) /* Info messages */
42#define DBG_K_ERROR ((unsigned int)0x00000008) /* Error messages */
43#define DBG_K_TRACE (DBG_K_TRACE_ENTRY|DBG_K_TRACE_EXIT)
44#define DBG_K_STANDARD (DBG_K_INFO|DBG_K_ERROR|DBG_K_TRACE)
45/* Redefine this flagword to set debug level */
46#define DEBUG_LEVEL DBG_K_STANDARD
47
48#define DEFINE_DBG_BUFFER char __dbg_str_buf[256];
49
50#define DBG_PRINT( dbg_flags, args... ) \
51 do { \
52 if ( DEBUG_LEVEL & ( dbg_flags ) ) \
53 { \
54 int len; \
55 len = sprintf( __dbg_str_buf, "%s:%d: %s: ", \
56 __FILE__, __LINE__, __FUNCTION__ ); \
57 sprintf( __dbg_str_buf + len, args ); \
58 printk( KERN_NOTICE "%s\n", __dbg_str_buf ); \
59 } \
60 } while (0)
61
62#define DBG_ENTER_ROUTINE DBG_PRINT (DBG_K_TRACE_ENTRY, "%s", "[Entry]");
63#define DBG_LEAVE_ROUTINE DBG_PRINT (DBG_K_TRACE_EXIT, "%s", "[Exit]");
64#else
65#define DEFINE_DBG_BUFFER
66#define DBG_ENTER_ROUTINE
67#define DBG_LEAVE_ROUTINE
68#endif /* DEBUG */
69
70/* Slot Available Register I field definition */ 38/* Slot Available Register I field definition */
71#define SLOT_33MHZ 0x0000001f 39#define SLOT_33MHZ 0x0000001f
72#define SLOT_66MHZ_PCIX 0x00001f00 40#define SLOT_66MHZ_PCIX 0x00001f00
@@ -211,7 +179,6 @@
211#define SLOT_EVENT_LATCH 0x2 179#define SLOT_EVENT_LATCH 0x2
212#define SLOT_SERR_INT_MASK 0x3 180#define SLOT_SERR_INT_MASK 0x3
213 181
214DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */
215static atomic_t shpchp_num_controllers = ATOMIC_INIT(0); 182static atomic_t shpchp_num_controllers = ATOMIC_INIT(0);
216 183
217static irqreturn_t shpc_isr(int irq, void *dev_id); 184static irqreturn_t shpc_isr(int irq, void *dev_id);
@@ -268,8 +235,6 @@ static void int_poll_timeout(unsigned long data)
268{ 235{
269 struct controller *ctrl = (struct controller *)data; 236 struct controller *ctrl = (struct controller *)data;
270 237
271 DBG_ENTER_ROUTINE
272
273 /* Poll for interrupt events. regs == NULL => polling */ 238 /* Poll for interrupt events. regs == NULL => polling */
274 shpc_isr(0, ctrl); 239 shpc_isr(0, ctrl);
275 240
@@ -278,8 +243,6 @@ static void int_poll_timeout(unsigned long data)
278 shpchp_poll_time = 2; /* default polling interval is 2 sec */ 243 shpchp_poll_time = 2; /* default polling interval is 2 sec */
279 244
280 start_int_poll_timer(ctrl, shpchp_poll_time); 245 start_int_poll_timer(ctrl, shpchp_poll_time);
281
282 DBG_LEAVE_ROUTINE
283} 246}
284 247
285/* 248/*
@@ -353,8 +316,6 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
353 int retval = 0; 316 int retval = 0;
354 u16 temp_word; 317 u16 temp_word;
355 318
356 DBG_ENTER_ROUTINE
357
358 mutex_lock(&slot->ctrl->cmd_lock); 319 mutex_lock(&slot->ctrl->cmd_lock);
359 320
360 if (!shpc_poll_ctrl_busy(ctrl)) { 321 if (!shpc_poll_ctrl_busy(ctrl)) {
@@ -368,9 +329,9 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
368 ++t_slot; 329 ++t_slot;
369 temp_word = (t_slot << 8) | (cmd & 0xFF); 330 temp_word = (t_slot << 8) | (cmd & 0xFF);
370 dbg("%s: t_slot %x cmd %x\n", __FUNCTION__, t_slot, cmd); 331 dbg("%s: t_slot %x cmd %x\n", __FUNCTION__, t_slot, cmd);
371 332
372 /* To make sure the Controller Busy bit is 0 before we send out the 333 /* To make sure the Controller Busy bit is 0 before we send out the
373 * command. 334 * command.
374 */ 335 */
375 shpc_writew(ctrl, CMD, temp_word); 336 shpc_writew(ctrl, CMD, temp_word);
376 337
@@ -389,20 +350,14 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
389 } 350 }
390 out: 351 out:
391 mutex_unlock(&slot->ctrl->cmd_lock); 352 mutex_unlock(&slot->ctrl->cmd_lock);
392
393 DBG_LEAVE_ROUTINE
394 return retval; 353 return retval;
395} 354}
396 355
397static int hpc_check_cmd_status(struct controller *ctrl) 356static int hpc_check_cmd_status(struct controller *ctrl)
398{ 357{
399 u16 cmd_status;
400 int retval = 0; 358 int retval = 0;
359 u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
401 360
402 DBG_ENTER_ROUTINE
403
404 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
405
406 switch (cmd_status >> 1) { 361 switch (cmd_status >> 1) {
407 case 0: 362 case 0:
408 retval = 0; 363 retval = 0;
@@ -423,7 +378,6 @@ static int hpc_check_cmd_status(struct controller *ctrl)
423 retval = cmd_status; 378 retval = cmd_status;
424 } 379 }
425 380
426 DBG_LEAVE_ROUTINE
427 return retval; 381 return retval;
428} 382}
429 383
@@ -431,13 +385,8 @@ static int hpc_check_cmd_status(struct controller *ctrl)
431static int hpc_get_attention_status(struct slot *slot, u8 *status) 385static int hpc_get_attention_status(struct slot *slot, u8 *status)
432{ 386{
433 struct controller *ctrl = slot->ctrl; 387 struct controller *ctrl = slot->ctrl;
434 u32 slot_reg; 388 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
435 u8 state; 389 u8 state = (slot_reg & ATN_LED_STATE_MASK) >> ATN_LED_STATE_SHIFT;
436
437 DBG_ENTER_ROUTINE
438
439 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
440 state = (slot_reg & ATN_LED_STATE_MASK) >> ATN_LED_STATE_SHIFT;
441 390
442 switch (state) { 391 switch (state) {
443 case ATN_LED_STATE_ON: 392 case ATN_LED_STATE_ON:
@@ -454,20 +403,14 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
454 break; 403 break;
455 } 404 }
456 405
457 DBG_LEAVE_ROUTINE
458 return 0; 406 return 0;
459} 407}
460 408
461static int hpc_get_power_status(struct slot * slot, u8 *status) 409static int hpc_get_power_status(struct slot * slot, u8 *status)
462{ 410{
463 struct controller *ctrl = slot->ctrl; 411 struct controller *ctrl = slot->ctrl;
464 u32 slot_reg; 412 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
465 u8 state; 413 u8 state = (slot_reg & SLOT_STATE_MASK) >> SLOT_STATE_SHIFT;
466
467 DBG_ENTER_ROUTINE
468
469 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
470 state = (slot_reg & SLOT_STATE_MASK) >> SLOT_STATE_SHIFT;
471 414
472 switch (state) { 415 switch (state) {
473 case SLOT_STATE_PWRONLY: 416 case SLOT_STATE_PWRONLY:
@@ -484,7 +427,6 @@ static int hpc_get_power_status(struct slot * slot, u8 *status)
484 break; 427 break;
485 } 428 }
486 429
487 DBG_LEAVE_ROUTINE
488 return 0; 430 return 0;
489} 431}
490 432
@@ -492,30 +434,21 @@ static int hpc_get_power_status(struct slot * slot, u8 *status)
492static int hpc_get_latch_status(struct slot *slot, u8 *status) 434static int hpc_get_latch_status(struct slot *slot, u8 *status)
493{ 435{
494 struct controller *ctrl = slot->ctrl; 436 struct controller *ctrl = slot->ctrl;
495 u32 slot_reg; 437 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
496
497 DBG_ENTER_ROUTINE
498 438
499 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
500 *status = !!(slot_reg & MRL_SENSOR); /* 0 -> close; 1 -> open */ 439 *status = !!(slot_reg & MRL_SENSOR); /* 0 -> close; 1 -> open */
501 440
502 DBG_LEAVE_ROUTINE
503 return 0; 441 return 0;
504} 442}
505 443
506static int hpc_get_adapter_status(struct slot *slot, u8 *status) 444static int hpc_get_adapter_status(struct slot *slot, u8 *status)
507{ 445{
508 struct controller *ctrl = slot->ctrl; 446 struct controller *ctrl = slot->ctrl;
509 u32 slot_reg; 447 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
510 u8 state; 448 u8 state = (slot_reg & PRSNT_MASK) >> PRSNT_SHIFT;
511
512 DBG_ENTER_ROUTINE
513 449
514 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
515 state = (slot_reg & PRSNT_MASK) >> PRSNT_SHIFT;
516 *status = (state != 0x3) ? 1 : 0; 450 *status = (state != 0x3) ? 1 : 0;
517 451
518 DBG_LEAVE_ROUTINE
519 return 0; 452 return 0;
520} 453}
521 454
@@ -523,11 +456,8 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
523{ 456{
524 struct controller *ctrl = slot->ctrl; 457 struct controller *ctrl = slot->ctrl;
525 458
526 DBG_ENTER_ROUTINE
527
528 *prog_int = shpc_readb(ctrl, PROG_INTERFACE); 459 *prog_int = shpc_readb(ctrl, PROG_INTERFACE);
529 460
530 DBG_LEAVE_ROUTINE
531 return 0; 461 return 0;
532} 462}
533 463
@@ -539,8 +469,6 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
539 u8 m66_cap = !!(slot_reg & MHZ66_CAP); 469 u8 m66_cap = !!(slot_reg & MHZ66_CAP);
540 u8 pi, pcix_cap; 470 u8 pi, pcix_cap;
541 471
542 DBG_ENTER_ROUTINE
543
544 if ((retval = hpc_get_prog_int(slot, &pi))) 472 if ((retval = hpc_get_prog_int(slot, &pi)))
545 return retval; 473 return retval;
546 474
@@ -582,21 +510,15 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
582 } 510 }
583 511
584 dbg("Adapter speed = %d\n", *value); 512 dbg("Adapter speed = %d\n", *value);
585 DBG_LEAVE_ROUTINE
586 return retval; 513 return retval;
587} 514}
588 515
589static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode) 516static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
590{ 517{
591 struct controller *ctrl = slot->ctrl;
592 u16 sec_bus_status;
593 u8 pi;
594 int retval = 0; 518 int retval = 0;
595 519 struct controller *ctrl = slot->ctrl;
596 DBG_ENTER_ROUTINE 520 u16 sec_bus_status = shpc_readw(ctrl, SEC_BUS_CONFIG);
597 521 u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
598 pi = shpc_readb(ctrl, PROG_INTERFACE);
599 sec_bus_status = shpc_readw(ctrl, SEC_BUS_CONFIG);
600 522
601 if (pi == 2) { 523 if (pi == 2) {
602 *mode = (sec_bus_status & 0x0100) >> 8; 524 *mode = (sec_bus_status & 0x0100) >> 8;
@@ -605,21 +527,14 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
605 } 527 }
606 528
607 dbg("Mode 1 ECC cap = %d\n", *mode); 529 dbg("Mode 1 ECC cap = %d\n", *mode);
608
609 DBG_LEAVE_ROUTINE
610 return retval; 530 return retval;
611} 531}
612 532
613static int hpc_query_power_fault(struct slot * slot) 533static int hpc_query_power_fault(struct slot * slot)
614{ 534{
615 struct controller *ctrl = slot->ctrl; 535 struct controller *ctrl = slot->ctrl;
616 u32 slot_reg; 536 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
617
618 DBG_ENTER_ROUTINE
619
620 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
621 537
622 DBG_LEAVE_ROUTINE
623 /* Note: Logic 0 => fault */ 538 /* Note: Logic 0 => fault */
624 return !(slot_reg & POWER_FAULT); 539 return !(slot_reg & POWER_FAULT);
625} 540}
@@ -629,7 +544,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
629 u8 slot_cmd = 0; 544 u8 slot_cmd = 0;
630 545
631 switch (value) { 546 switch (value) {
632 case 0 : 547 case 0 :
633 slot_cmd = SET_ATTN_OFF; /* OFF */ 548 slot_cmd = SET_ATTN_OFF; /* OFF */
634 break; 549 break;
635 case 1: 550 case 1:
@@ -666,8 +581,6 @@ static void hpc_release_ctlr(struct controller *ctrl)
666 int i; 581 int i;
667 u32 slot_reg, serr_int; 582 u32 slot_reg, serr_int;
668 583
669 DBG_ENTER_ROUTINE
670
671 /* 584 /*
672 * Mask event interrupts and SERRs of all slots 585 * Mask event interrupts and SERRs of all slots
673 */ 586 */
@@ -708,61 +621,43 @@ static void hpc_release_ctlr(struct controller *ctrl)
708 */ 621 */
709 if (atomic_dec_and_test(&shpchp_num_controllers)) 622 if (atomic_dec_and_test(&shpchp_num_controllers))
710 destroy_workqueue(shpchp_wq); 623 destroy_workqueue(shpchp_wq);
711
712 DBG_LEAVE_ROUTINE
713} 624}
714 625
715static int hpc_power_on_slot(struct slot * slot) 626static int hpc_power_on_slot(struct slot * slot)
716{ 627{
717 int retval; 628 int retval;
718 629
719 DBG_ENTER_ROUTINE
720
721 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR); 630 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR);
722 if (retval) { 631 if (retval)
723 err("%s: Write command failed!\n", __FUNCTION__); 632 err("%s: Write command failed!\n", __FUNCTION__);
724 return retval;
725 }
726
727 DBG_LEAVE_ROUTINE
728 633
729 return 0; 634 return retval;
730} 635}
731 636
732static int hpc_slot_enable(struct slot * slot) 637static int hpc_slot_enable(struct slot * slot)
733{ 638{
734 int retval; 639 int retval;
735 640
736 DBG_ENTER_ROUTINE
737
738 /* Slot - Enable, Power Indicator - Blink, Attention Indicator - Off */ 641 /* Slot - Enable, Power Indicator - Blink, Attention Indicator - Off */
739 retval = shpc_write_cmd(slot, slot->hp_slot, 642 retval = shpc_write_cmd(slot, slot->hp_slot,
740 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF); 643 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF);
741 if (retval) { 644 if (retval)
742 err("%s: Write command failed!\n", __FUNCTION__); 645 err("%s: Write command failed!\n", __FUNCTION__);
743 return retval;
744 }
745 646
746 DBG_LEAVE_ROUTINE 647 return retval;
747 return 0;
748} 648}
749 649
750static int hpc_slot_disable(struct slot * slot) 650static int hpc_slot_disable(struct slot * slot)
751{ 651{
752 int retval; 652 int retval;
753 653
754 DBG_ENTER_ROUTINE
755
756 /* Slot - Disable, Power Indicator - Off, Attention Indicator - On */ 654 /* Slot - Disable, Power Indicator - Off, Attention Indicator - On */
757 retval = shpc_write_cmd(slot, slot->hp_slot, 655 retval = shpc_write_cmd(slot, slot->hp_slot,
758 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON); 656 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON);
759 if (retval) { 657 if (retval)
760 err("%s: Write command failed!\n", __FUNCTION__); 658 err("%s: Write command failed!\n", __FUNCTION__);
761 return retval;
762 }
763 659
764 DBG_LEAVE_ROUTINE 660 return retval;
765 return 0;
766} 661}
767 662
768static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) 663static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
@@ -771,8 +666,6 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
771 struct controller *ctrl = slot->ctrl; 666 struct controller *ctrl = slot->ctrl;
772 u8 pi, cmd; 667 u8 pi, cmd;
773 668
774 DBG_ENTER_ROUTINE
775
776 pi = shpc_readb(ctrl, PROG_INTERFACE); 669 pi = shpc_readb(ctrl, PROG_INTERFACE);
777 if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX)) 670 if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX))
778 return -EINVAL; 671 return -EINVAL;
@@ -828,7 +721,6 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
828 if (retval) 721 if (retval)
829 err("%s: Write command failed!\n", __FUNCTION__); 722 err("%s: Write command failed!\n", __FUNCTION__);
830 723
831 DBG_LEAVE_ROUTINE
832 return retval; 724 return retval;
833} 725}
834 726
@@ -843,7 +735,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
843 if (!intr_loc) 735 if (!intr_loc)
844 return IRQ_NONE; 736 return IRQ_NONE;
845 737
846 dbg("%s: intr_loc = %x\n",__FUNCTION__, intr_loc); 738 dbg("%s: intr_loc = %x\n",__FUNCTION__, intr_loc);
847 739
848 if(!shpchp_poll_mode) { 740 if(!shpchp_poll_mode) {
849 /* 741 /*
@@ -856,12 +748,12 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
856 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 748 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
857 749
858 intr_loc2 = shpc_readl(ctrl, INTR_LOC); 750 intr_loc2 = shpc_readl(ctrl, INTR_LOC);
859 dbg("%s: intr_loc2 = %x\n",__FUNCTION__, intr_loc2); 751 dbg("%s: intr_loc2 = %x\n",__FUNCTION__, intr_loc2);
860 } 752 }
861 753
862 if (intr_loc & CMD_INTR_PENDING) { 754 if (intr_loc & CMD_INTR_PENDING) {
863 /* 755 /*
864 * Command Complete Interrupt Pending 756 * Command Complete Interrupt Pending
865 * RO only - clear by writing 1 to the Command Completion 757 * RO only - clear by writing 1 to the Command Completion
866 * Detect bit in Controller SERR-INT register 758 * Detect bit in Controller SERR-INT register
867 */ 759 */
@@ -875,7 +767,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
875 if (!(intr_loc & ~CMD_INTR_PENDING)) 767 if (!(intr_loc & ~CMD_INTR_PENDING))
876 goto out; 768 goto out;
877 769
878 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { 770 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
879 /* To find out which slot has interrupt pending */ 771 /* To find out which slot has interrupt pending */
880 if (!(intr_loc & SLOT_INTR_PENDING(hp_slot))) 772 if (!(intr_loc & SLOT_INTR_PENDING(hp_slot)))
881 continue; 773 continue;
@@ -907,7 +799,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
907 serr_int &= ~(GLOBAL_INTR_MASK | SERR_INTR_RSVDZ_MASK); 799 serr_int &= ~(GLOBAL_INTR_MASK | SERR_INTR_RSVDZ_MASK);
908 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 800 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
909 } 801 }
910 802
911 return IRQ_HANDLED; 803 return IRQ_HANDLED;
912} 804}
913 805
@@ -920,8 +812,6 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
920 u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); 812 u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1);
921 u32 slot_avail2 = shpc_readl(ctrl, SLOT_AVAIL2); 813 u32 slot_avail2 = shpc_readl(ctrl, SLOT_AVAIL2);
922 814
923 DBG_ENTER_ROUTINE
924
925 if (pi == 2) { 815 if (pi == 2) {
926 if (slot_avail2 & SLOT_133MHZ_PCIX_533) 816 if (slot_avail2 & SLOT_133MHZ_PCIX_533)
927 bus_speed = PCI_SPEED_133MHz_PCIX_533; 817 bus_speed = PCI_SPEED_133MHz_PCIX_533;
@@ -954,7 +844,7 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
954 844
955 *value = bus_speed; 845 *value = bus_speed;
956 dbg("Max bus speed = %d\n", bus_speed); 846 dbg("Max bus speed = %d\n", bus_speed);
957 DBG_LEAVE_ROUTINE 847
958 return retval; 848 return retval;
959} 849}
960 850
@@ -967,8 +857,6 @@ static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
967 u8 pi = shpc_readb(ctrl, PROG_INTERFACE); 857 u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
968 u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); 858 u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
969 859
970 DBG_ENTER_ROUTINE
971
972 if ((pi == 1) && (speed_mode > 4)) { 860 if ((pi == 1) && (speed_mode > 4)) {
973 *value = PCI_SPEED_UNKNOWN; 861 *value = PCI_SPEED_UNKNOWN;
974 return -ENODEV; 862 return -ENODEV;
@@ -1024,7 +912,6 @@ static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
1024 } 912 }
1025 913
1026 dbg("Current bus speed = %d\n", bus_speed); 914 dbg("Current bus speed = %d\n", bus_speed);
1027 DBG_LEAVE_ROUTINE
1028 return retval; 915 return retval;
1029} 916}
1030 917
@@ -1032,7 +919,7 @@ static struct hpc_ops shpchp_hpc_ops = {
1032 .power_on_slot = hpc_power_on_slot, 919 .power_on_slot = hpc_power_on_slot,
1033 .slot_enable = hpc_slot_enable, 920 .slot_enable = hpc_slot_enable,
1034 .slot_disable = hpc_slot_disable, 921 .slot_disable = hpc_slot_disable,
1035 .set_bus_speed_mode = hpc_set_bus_speed_mode, 922 .set_bus_speed_mode = hpc_set_bus_speed_mode,
1036 .set_attention_status = hpc_set_attention_status, 923 .set_attention_status = hpc_set_attention_status,
1037 .get_power_status = hpc_get_power_status, 924 .get_power_status = hpc_get_power_status,
1038 .get_attention_status = hpc_get_attention_status, 925 .get_attention_status = hpc_get_attention_status,
@@ -1049,7 +936,7 @@ static struct hpc_ops shpchp_hpc_ops = {
1049 .green_led_on = hpc_set_green_led_on, 936 .green_led_on = hpc_set_green_led_on,
1050 .green_led_off = hpc_set_green_led_off, 937 .green_led_off = hpc_set_green_led_off,
1051 .green_led_blink = hpc_set_green_led_blink, 938 .green_led_blink = hpc_set_green_led_blink,
1052 939
1053 .release_ctlr = hpc_release_ctlr, 940 .release_ctlr = hpc_release_ctlr,
1054}; 941};
1055 942
@@ -1061,8 +948,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1061 u32 tempdword, slot_reg, slot_config; 948 u32 tempdword, slot_reg, slot_config;
1062 u8 i; 949 u8 i;
1063 950
1064 DBG_ENTER_ROUTINE
1065
1066 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ 951 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
1067 952
1068 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == 953 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
@@ -1108,9 +993,9 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1108 ctrl->mmio_size = 0x24 + 0x4 * num_slots; 993 ctrl->mmio_size = 0x24 + 0x4 * num_slots;
1109 } 994 }
1110 995
1111 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, 996 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor,
1112 pdev->subsystem_device); 997 pdev->subsystem_device);
1113 998
1114 rc = pci_enable_device(pdev); 999 rc = pci_enable_device(pdev);
1115 if (rc) { 1000 if (rc) {
1116 err("%s: pci_enable_device failed\n", __FUNCTION__); 1001 err("%s: pci_enable_device failed\n", __FUNCTION__);
@@ -1172,7 +1057,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1172 slot_reg &= ~SLOT_REG_RSVDZ_MASK; 1057 slot_reg &= ~SLOT_REG_RSVDZ_MASK;
1173 shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg); 1058 shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
1174 } 1059 }
1175 1060
1176 if (shpchp_poll_mode) { 1061 if (shpchp_poll_mode) {
1177 /* Install interrupt polling timer. Start with 10 sec delay */ 1062 /* Install interrupt polling timer. Start with 10 sec delay */
1178 init_timer(&ctrl->poll_timer); 1063 init_timer(&ctrl->poll_timer);
@@ -1184,7 +1069,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1184 info("Can't get msi for the hotplug controller\n"); 1069 info("Can't get msi for the hotplug controller\n");
1185 info("Use INTx for the hotplug controller\n"); 1070 info("Use INTx for the hotplug controller\n");
1186 } 1071 }
1187 1072
1188 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, 1073 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
1189 MY_NAME, (void *)ctrl); 1074 MY_NAME, (void *)ctrl);
1190 dbg("%s: request_irq %d for hpc%d (returns %d)\n", 1075 dbg("%s: request_irq %d for hpc%d (returns %d)\n",
@@ -1235,13 +1120,11 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1235 dbg("%s: SERR_INTR_ENABLE = %x\n", __FUNCTION__, tempdword); 1120 dbg("%s: SERR_INTR_ENABLE = %x\n", __FUNCTION__, tempdword);
1236 } 1121 }
1237 1122
1238 DBG_LEAVE_ROUTINE
1239 return 0; 1123 return 0;
1240 1124
1241 /* We end up here for the many possible ways to fail this API. */ 1125 /* We end up here for the many possible ways to fail this API. */
1242abort_iounmap: 1126abort_iounmap:
1243 iounmap(ctrl->creg); 1127 iounmap(ctrl->creg);
1244abort: 1128abort:
1245 DBG_LEAVE_ROUTINE
1246 return rc; 1129 return rc;
1247} 1130}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ed3f7e1a563c..68555c11f556 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -24,8 +24,6 @@
24#include "pci.h" 24#include "pci.h"
25#include "msi.h" 25#include "msi.h"
26 26
27static DEFINE_SPINLOCK(msi_lock);
28static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
29static struct kmem_cache* msi_cachep; 27static struct kmem_cache* msi_cachep;
30 28
31static int pci_msi_enable = 1; 29static int pci_msi_enable = 1;
@@ -44,13 +42,13 @@ static void msi_set_mask_bit(unsigned int irq, int flag)
44{ 42{
45 struct msi_desc *entry; 43 struct msi_desc *entry;
46 44
47 entry = msi_desc[irq]; 45 entry = get_irq_msi(irq);
48 BUG_ON(!entry || !entry->dev); 46 BUG_ON(!entry || !entry->dev);
49 switch (entry->msi_attrib.type) { 47 switch (entry->msi_attrib.type) {
50 case PCI_CAP_ID_MSI: 48 case PCI_CAP_ID_MSI:
51 if (entry->msi_attrib.maskbit) { 49 if (entry->msi_attrib.maskbit) {
52 int pos; 50 int pos;
53 u32 mask_bits; 51 u32 mask_bits;
54 52
55 pos = (long)entry->mask_base; 53 pos = (long)entry->mask_base;
56 pci_read_config_dword(entry->dev, pos, &mask_bits); 54 pci_read_config_dword(entry->dev, pos, &mask_bits);
@@ -74,7 +72,7 @@ static void msi_set_mask_bit(unsigned int irq, int flag)
74 72
75void read_msi_msg(unsigned int irq, struct msi_msg *msg) 73void read_msi_msg(unsigned int irq, struct msi_msg *msg)
76{ 74{
77 struct msi_desc *entry = get_irq_data(irq); 75 struct msi_desc *entry = get_irq_msi(irq);
78 switch(entry->msi_attrib.type) { 76 switch(entry->msi_attrib.type) {
79 case PCI_CAP_ID_MSI: 77 case PCI_CAP_ID_MSI:
80 { 78 {
@@ -113,7 +111,7 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
113 111
114void write_msi_msg(unsigned int irq, struct msi_msg *msg) 112void write_msi_msg(unsigned int irq, struct msi_msg *msg)
115{ 113{
116 struct msi_desc *entry = get_irq_data(irq); 114 struct msi_desc *entry = get_irq_msi(irq);
117 switch (entry->msi_attrib.type) { 115 switch (entry->msi_attrib.type) {
118 case PCI_CAP_ID_MSI: 116 case PCI_CAP_ID_MSI:
119 { 117 {
@@ -162,6 +160,7 @@ void unmask_msi_irq(unsigned int irq)
162} 160}
163 161
164static int msi_free_irq(struct pci_dev* dev, int irq); 162static int msi_free_irq(struct pci_dev* dev, int irq);
163
165static int msi_init(void) 164static int msi_init(void)
166{ 165{
167 static int status = -ENOMEM; 166 static int status = -ENOMEM;
@@ -169,13 +168,6 @@ static int msi_init(void)
169 if (!status) 168 if (!status)
170 return status; 169 return status;
171 170
172 if (pci_msi_quirk) {
173 pci_msi_enable = 0;
174 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
175 status = -EINVAL;
176 return status;
177 }
178
179 status = msi_cache_init(); 171 status = msi_cache_init();
180 if (status < 0) { 172 if (status < 0) {
181 pci_msi_enable = 0; 173 pci_msi_enable = 0;
@@ -200,46 +192,6 @@ static struct msi_desc* alloc_msi_entry(void)
200 return entry; 192 return entry;
201} 193}
202 194
203static void attach_msi_entry(struct msi_desc *entry, int irq)
204{
205 unsigned long flags;
206
207 spin_lock_irqsave(&msi_lock, flags);
208 msi_desc[irq] = entry;
209 spin_unlock_irqrestore(&msi_lock, flags);
210}
211
212static int create_msi_irq(void)
213{
214 struct msi_desc *entry;
215 int irq;
216
217 entry = alloc_msi_entry();
218 if (!entry)
219 return -ENOMEM;
220
221 irq = create_irq();
222 if (irq < 0) {
223 kmem_cache_free(msi_cachep, entry);
224 return -EBUSY;
225 }
226
227 set_irq_data(irq, entry);
228
229 return irq;
230}
231
232static void destroy_msi_irq(unsigned int irq)
233{
234 struct msi_desc *entry;
235
236 entry = get_irq_data(irq);
237 set_irq_chip(irq, NULL);
238 set_irq_data(irq, NULL);
239 destroy_irq(irq);
240 kmem_cache_free(msi_cachep, entry);
241}
242
243static void enable_msi_mode(struct pci_dev *dev, int pos, int type) 195static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
244{ 196{
245 u16 control; 197 u16 control;
@@ -278,36 +230,8 @@ void disable_msi_mode(struct pci_dev *dev, int pos, int type)
278 pci_intx(dev, 1); /* enable intx */ 230 pci_intx(dev, 1); /* enable intx */
279} 231}
280 232
281static int msi_lookup_irq(struct pci_dev *dev, int type)
282{
283 int irq;
284 unsigned long flags;
285
286 spin_lock_irqsave(&msi_lock, flags);
287 for (irq = 0; irq < NR_IRQS; irq++) {
288 if (!msi_desc[irq] || msi_desc[irq]->dev != dev ||
289 msi_desc[irq]->msi_attrib.type != type ||
290 msi_desc[irq]->msi_attrib.default_irq != dev->irq)
291 continue;
292 spin_unlock_irqrestore(&msi_lock, flags);
293 /* This pre-assigned MSI irq for this device
294 already exits. Override dev->irq with this irq */
295 dev->irq = irq;
296 return 0;
297 }
298 spin_unlock_irqrestore(&msi_lock, flags);
299
300 return -EACCES;
301}
302
303void pci_scan_msi_device(struct pci_dev *dev)
304{
305 if (!dev)
306 return;
307}
308
309#ifdef CONFIG_PM 233#ifdef CONFIG_PM
310int pci_save_msi_state(struct pci_dev *dev) 234static int __pci_save_msi_state(struct pci_dev *dev)
311{ 235{
312 int pos, i = 0; 236 int pos, i = 0;
313 u16 control; 237 u16 control;
@@ -345,7 +269,7 @@ int pci_save_msi_state(struct pci_dev *dev)
345 return 0; 269 return 0;
346} 270}
347 271
348void pci_restore_msi_state(struct pci_dev *dev) 272static void __pci_restore_msi_state(struct pci_dev *dev)
349{ 273{
350 int i = 0, pos; 274 int i = 0, pos;
351 u16 control; 275 u16 control;
@@ -373,14 +297,16 @@ void pci_restore_msi_state(struct pci_dev *dev)
373 kfree(save_state); 297 kfree(save_state);
374} 298}
375 299
376int pci_save_msix_state(struct pci_dev *dev) 300static int __pci_save_msix_state(struct pci_dev *dev)
377{ 301{
378 int pos; 302 int pos;
379 int temp;
380 int irq, head, tail = 0; 303 int irq, head, tail = 0;
381 u16 control; 304 u16 control;
382 struct pci_cap_saved_state *save_state; 305 struct pci_cap_saved_state *save_state;
383 306
307 if (!dev->msix_enabled)
308 return 0;
309
384 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 310 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
385 if (pos <= 0 || dev->no_msi) 311 if (pos <= 0 || dev->no_msi)
386 return 0; 312 return 0;
@@ -398,38 +324,46 @@ int pci_save_msix_state(struct pci_dev *dev)
398 *((u16 *)&save_state->data[0]) = control; 324 *((u16 *)&save_state->data[0]) = control;
399 325
400 /* save the table */ 326 /* save the table */
401 temp = dev->irq; 327 irq = head = dev->first_msi_irq;
402 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
403 kfree(save_state);
404 return -EINVAL;
405 }
406
407 irq = head = dev->irq;
408 while (head != tail) { 328 while (head != tail) {
409 struct msi_desc *entry; 329 struct msi_desc *entry;
410 330
411 entry = msi_desc[irq]; 331 entry = get_irq_msi(irq);
412 read_msi_msg(irq, &entry->msg_save); 332 read_msi_msg(irq, &entry->msg_save);
413 333
414 tail = msi_desc[irq]->link.tail; 334 tail = entry->link.tail;
415 irq = tail; 335 irq = tail;
416 } 336 }
417 dev->irq = temp;
418 337
419 save_state->cap_nr = PCI_CAP_ID_MSIX; 338 save_state->cap_nr = PCI_CAP_ID_MSIX;
420 pci_add_saved_cap(dev, save_state); 339 pci_add_saved_cap(dev, save_state);
421 return 0; 340 return 0;
422} 341}
423 342
424void pci_restore_msix_state(struct pci_dev *dev) 343int pci_save_msi_state(struct pci_dev *dev)
344{
345 int rc;
346
347 rc = __pci_save_msi_state(dev);
348 if (rc)
349 return rc;
350
351 rc = __pci_save_msix_state(dev);
352
353 return rc;
354}
355
356static void __pci_restore_msix_state(struct pci_dev *dev)
425{ 357{
426 u16 save; 358 u16 save;
427 int pos; 359 int pos;
428 int irq, head, tail = 0; 360 int irq, head, tail = 0;
429 struct msi_desc *entry; 361 struct msi_desc *entry;
430 int temp;
431 struct pci_cap_saved_state *save_state; 362 struct pci_cap_saved_state *save_state;
432 363
364 if (!dev->msix_enabled)
365 return;
366
433 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX); 367 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
434 if (!save_state) 368 if (!save_state)
435 return; 369 return;
@@ -442,23 +376,25 @@ void pci_restore_msix_state(struct pci_dev *dev)
442 return; 376 return;
443 377
444 /* route the table */ 378 /* route the table */
445 temp = dev->irq; 379 irq = head = dev->first_msi_irq;
446 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX))
447 return;
448 irq = head = dev->irq;
449 while (head != tail) { 380 while (head != tail) {
450 entry = msi_desc[irq]; 381 entry = get_irq_msi(irq);
451 write_msi_msg(irq, &entry->msg_save); 382 write_msi_msg(irq, &entry->msg_save);
452 383
453 tail = msi_desc[irq]->link.tail; 384 tail = entry->link.tail;
454 irq = tail; 385 irq = tail;
455 } 386 }
456 dev->irq = temp;
457 387
458 pci_write_config_word(dev, msi_control_reg(pos), save); 388 pci_write_config_word(dev, msi_control_reg(pos), save);
459 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 389 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
460} 390}
461#endif 391
392void pci_restore_msi_state(struct pci_dev *dev)
393{
394 __pci_restore_msi_state(dev);
395 __pci_restore_msix_state(dev);
396}
397#endif /* CONFIG_PM */
462 398
463/** 399/**
464 * msi_capability_init - configure device's MSI capability structure 400 * msi_capability_init - configure device's MSI capability structure
@@ -471,7 +407,6 @@ void pci_restore_msix_state(struct pci_dev *dev)
471 **/ 407 **/
472static int msi_capability_init(struct pci_dev *dev) 408static int msi_capability_init(struct pci_dev *dev)
473{ 409{
474 int status;
475 struct msi_desc *entry; 410 struct msi_desc *entry;
476 int pos, irq; 411 int pos, irq;
477 u16 control; 412 u16 control;
@@ -479,13 +414,10 @@ static int msi_capability_init(struct pci_dev *dev)
479 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 414 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
480 pci_read_config_word(dev, msi_control_reg(pos), &control); 415 pci_read_config_word(dev, msi_control_reg(pos), &control);
481 /* MSI Entry Initialization */ 416 /* MSI Entry Initialization */
482 irq = create_msi_irq(); 417 entry = alloc_msi_entry();
483 if (irq < 0) 418 if (!entry)
484 return irq; 419 return -ENOMEM;
485 420
486 entry = get_irq_data(irq);
487 entry->link.head = irq;
488 entry->link.tail = irq;
489 entry->msi_attrib.type = PCI_CAP_ID_MSI; 421 entry->msi_attrib.type = PCI_CAP_ID_MSI;
490 entry->msi_attrib.is_64 = is_64bit_address(control); 422 entry->msi_attrib.is_64 = is_64bit_address(control);
491 entry->msi_attrib.entry_nr = 0; 423 entry->msi_attrib.entry_nr = 0;
@@ -511,13 +443,16 @@ static int msi_capability_init(struct pci_dev *dev)
511 maskbits); 443 maskbits);
512 } 444 }
513 /* Configure MSI capability structure */ 445 /* Configure MSI capability structure */
514 status = arch_setup_msi_irq(irq, dev); 446 irq = arch_setup_msi_irq(dev, entry);
515 if (status < 0) { 447 if (irq < 0) {
516 destroy_msi_irq(irq); 448 kmem_cache_free(msi_cachep, entry);
517 return status; 449 return irq;
518 } 450 }
451 entry->link.head = irq;
452 entry->link.tail = irq;
453 dev->first_msi_irq = irq;
454 set_irq_msi(irq, entry);
519 455
520 attach_msi_entry(entry, irq);
521 /* Set MSI enabled bits */ 456 /* Set MSI enabled bits */
522 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 457 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
523 458
@@ -539,7 +474,6 @@ static int msix_capability_init(struct pci_dev *dev,
539 struct msix_entry *entries, int nvec) 474 struct msix_entry *entries, int nvec)
540{ 475{
541 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; 476 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
542 int status;
543 int irq, pos, i, j, nr_entries, temp = 0; 477 int irq, pos, i, j, nr_entries, temp = 0;
544 unsigned long phys_addr; 478 unsigned long phys_addr;
545 u32 table_offset; 479 u32 table_offset;
@@ -562,13 +496,11 @@ static int msix_capability_init(struct pci_dev *dev,
562 496
563 /* MSI-X Table Initialization */ 497 /* MSI-X Table Initialization */
564 for (i = 0; i < nvec; i++) { 498 for (i = 0; i < nvec; i++) {
565 irq = create_msi_irq(); 499 entry = alloc_msi_entry();
566 if (irq < 0) 500 if (!entry)
567 break; 501 break;
568 502
569 entry = get_irq_data(irq);
570 j = entries[i].entry; 503 j = entries[i].entry;
571 entries[i].vector = irq;
572 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 504 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
573 entry->msi_attrib.is_64 = 1; 505 entry->msi_attrib.is_64 = 1;
574 entry->msi_attrib.entry_nr = j; 506 entry->msi_attrib.entry_nr = j;
@@ -577,6 +509,14 @@ static int msix_capability_init(struct pci_dev *dev,
577 entry->msi_attrib.pos = pos; 509 entry->msi_attrib.pos = pos;
578 entry->dev = dev; 510 entry->dev = dev;
579 entry->mask_base = base; 511 entry->mask_base = base;
512
513 /* Configure MSI-X capability structure */
514 irq = arch_setup_msi_irq(dev, entry);
515 if (irq < 0) {
516 kmem_cache_free(msi_cachep, entry);
517 break;
518 }
519 entries[i].vector = irq;
580 if (!head) { 520 if (!head) {
581 entry->link.head = irq; 521 entry->link.head = irq;
582 entry->link.tail = irq; 522 entry->link.tail = irq;
@@ -589,14 +529,8 @@ static int msix_capability_init(struct pci_dev *dev,
589 } 529 }
590 temp = irq; 530 temp = irq;
591 tail = entry; 531 tail = entry;
592 /* Configure MSI-X capability structure */
593 status = arch_setup_msi_irq(irq, dev);
594 if (status < 0) {
595 destroy_msi_irq(irq);
596 break;
597 }
598 532
599 attach_msi_entry(entry, irq); 533 set_irq_msi(irq, entry);
600 } 534 }
601 if (i != nvec) { 535 if (i != nvec) {
602 int avail = i - 1; 536 int avail = i - 1;
@@ -613,6 +547,7 @@ static int msix_capability_init(struct pci_dev *dev,
613 avail = -EBUSY; 547 avail = -EBUSY;
614 return avail; 548 return avail;
615 } 549 }
550 dev->first_msi_irq = entries[0].vector;
616 /* Set MSI-X enabled bits */ 551 /* Set MSI-X enabled bits */
617 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 552 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
618 553
@@ -660,13 +595,11 @@ int pci_msi_supported(struct pci_dev * dev)
660 **/ 595 **/
661int pci_enable_msi(struct pci_dev* dev) 596int pci_enable_msi(struct pci_dev* dev)
662{ 597{
663 int pos, temp, status; 598 int pos, status;
664 599
665 if (pci_msi_supported(dev) < 0) 600 if (pci_msi_supported(dev) < 0)
666 return -EINVAL; 601 return -EINVAL;
667 602
668 temp = dev->irq;
669
670 status = msi_init(); 603 status = msi_init();
671 if (status < 0) 604 if (status < 0)
672 return status; 605 return status;
@@ -675,15 +608,14 @@ int pci_enable_msi(struct pci_dev* dev)
675 if (!pos) 608 if (!pos)
676 return -EINVAL; 609 return -EINVAL;
677 610
678 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSI)); 611 WARN_ON(!!dev->msi_enabled);
679 612
680 /* Check whether driver already requested for MSI-X irqs */ 613 /* Check whether driver already requested for MSI-X irqs */
681 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 614 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
682 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) { 615 if (pos > 0 && dev->msix_enabled) {
683 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 616 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
684 "Device already has MSI-X irq assigned\n", 617 "Device already has MSI-X enabled\n",
685 pci_name(dev)); 618 pci_name(dev));
686 dev->irq = temp;
687 return -EINVAL; 619 return -EINVAL;
688 } 620 }
689 status = msi_capability_init(dev); 621 status = msi_capability_init(dev);
@@ -695,13 +627,15 @@ void pci_disable_msi(struct pci_dev* dev)
695 struct msi_desc *entry; 627 struct msi_desc *entry;
696 int pos, default_irq; 628 int pos, default_irq;
697 u16 control; 629 u16 control;
698 unsigned long flags;
699 630
700 if (!pci_msi_enable) 631 if (!pci_msi_enable)
701 return; 632 return;
702 if (!dev) 633 if (!dev)
703 return; 634 return;
704 635
636 if (!dev->msi_enabled)
637 return;
638
705 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 639 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
706 if (!pos) 640 if (!pos)
707 return; 641 return;
@@ -710,28 +644,26 @@ void pci_disable_msi(struct pci_dev* dev)
710 if (!(control & PCI_MSI_FLAGS_ENABLE)) 644 if (!(control & PCI_MSI_FLAGS_ENABLE))
711 return; 645 return;
712 646
647
713 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 648 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
714 649
715 spin_lock_irqsave(&msi_lock, flags); 650 entry = get_irq_msi(dev->first_msi_irq);
716 entry = msi_desc[dev->irq];
717 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 651 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
718 spin_unlock_irqrestore(&msi_lock, flags);
719 return; 652 return;
720 } 653 }
721 if (irq_has_action(dev->irq)) { 654 if (irq_has_action(dev->first_msi_irq)) {
722 spin_unlock_irqrestore(&msi_lock, flags);
723 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " 655 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
724 "free_irq() on MSI irq %d\n", 656 "free_irq() on MSI irq %d\n",
725 pci_name(dev), dev->irq); 657 pci_name(dev), dev->first_msi_irq);
726 BUG_ON(irq_has_action(dev->irq)); 658 BUG_ON(irq_has_action(dev->first_msi_irq));
727 } else { 659 } else {
728 default_irq = entry->msi_attrib.default_irq; 660 default_irq = entry->msi_attrib.default_irq;
729 spin_unlock_irqrestore(&msi_lock, flags); 661 msi_free_irq(dev, dev->first_msi_irq);
730 msi_free_irq(dev, dev->irq);
731 662
732 /* Restore dev->irq to its default pin-assertion irq */ 663 /* Restore dev->irq to its default pin-assertion irq */
733 dev->irq = default_irq; 664 dev->irq = default_irq;
734 } 665 }
666 dev->first_msi_irq = 0;
735} 667}
736 668
737static int msi_free_irq(struct pci_dev* dev, int irq) 669static int msi_free_irq(struct pci_dev* dev, int irq)
@@ -739,27 +671,20 @@ static int msi_free_irq(struct pci_dev* dev, int irq)
739 struct msi_desc *entry; 671 struct msi_desc *entry;
740 int head, entry_nr, type; 672 int head, entry_nr, type;
741 void __iomem *base; 673 void __iomem *base;
742 unsigned long flags;
743 674
744 arch_teardown_msi_irq(irq); 675 entry = get_irq_msi(irq);
745
746 spin_lock_irqsave(&msi_lock, flags);
747 entry = msi_desc[irq];
748 if (!entry || entry->dev != dev) { 676 if (!entry || entry->dev != dev) {
749 spin_unlock_irqrestore(&msi_lock, flags);
750 return -EINVAL; 677 return -EINVAL;
751 } 678 }
752 type = entry->msi_attrib.type; 679 type = entry->msi_attrib.type;
753 entry_nr = entry->msi_attrib.entry_nr; 680 entry_nr = entry->msi_attrib.entry_nr;
754 head = entry->link.head; 681 head = entry->link.head;
755 base = entry->mask_base; 682 base = entry->mask_base;
756 msi_desc[entry->link.head]->link.tail = entry->link.tail; 683 get_irq_msi(entry->link.head)->link.tail = entry->link.tail;
757 msi_desc[entry->link.tail]->link.head = entry->link.head; 684 get_irq_msi(entry->link.tail)->link.head = entry->link.head;
758 entry->dev = NULL;
759 msi_desc[irq] = NULL;
760 spin_unlock_irqrestore(&msi_lock, flags);
761 685
762 destroy_msi_irq(irq); 686 arch_teardown_msi_irq(irq);
687 kmem_cache_free(msi_cachep, entry);
763 688
764 if (type == PCI_CAP_ID_MSIX) { 689 if (type == PCI_CAP_ID_MSIX) {
765 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE + 690 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
@@ -790,7 +715,7 @@ static int msi_free_irq(struct pci_dev* dev, int irq)
790int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 715int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
791{ 716{
792 int status, pos, nr_entries; 717 int status, pos, nr_entries;
793 int i, j, temp; 718 int i, j;
794 u16 control; 719 u16 control;
795 720
796 if (!entries || pci_msi_supported(dev) < 0) 721 if (!entries || pci_msi_supported(dev) < 0)
@@ -818,16 +743,14 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
818 return -EINVAL; /* duplicate entry */ 743 return -EINVAL; /* duplicate entry */
819 } 744 }
820 } 745 }
821 temp = dev->irq; 746 WARN_ON(!!dev->msix_enabled);
822 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSIX));
823 747
824 /* Check whether driver already requested for MSI irq */ 748 /* Check whether driver already requested for MSI irq */
825 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && 749 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
826 !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) { 750 dev->msi_enabled) {
827 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 751 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
828 "Device already has an MSI irq assigned\n", 752 "Device already has an MSI irq assigned\n",
829 pci_name(dev)); 753 pci_name(dev));
830 dev->irq = temp;
831 return -EINVAL; 754 return -EINVAL;
832 } 755 }
833 status = msix_capability_init(dev, entries, nvec); 756 status = msix_capability_init(dev, entries, nvec);
@@ -836,7 +759,8 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
836 759
837void pci_disable_msix(struct pci_dev* dev) 760void pci_disable_msix(struct pci_dev* dev)
838{ 761{
839 int pos, temp; 762 int irq, head, tail = 0, warning = 0;
763 int pos;
840 u16 control; 764 u16 control;
841 765
842 if (!pci_msi_enable) 766 if (!pci_msi_enable)
@@ -844,6 +768,9 @@ void pci_disable_msix(struct pci_dev* dev)
844 if (!dev) 768 if (!dev)
845 return; 769 return;
846 770
771 if (!dev->msix_enabled)
772 return;
773
847 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 774 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
848 if (!pos) 775 if (!pos)
849 return; 776 return;
@@ -854,31 +781,23 @@ void pci_disable_msix(struct pci_dev* dev)
854 781
855 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 782 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
856 783
857 temp = dev->irq; 784 irq = head = dev->first_msi_irq;
858 if (!msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) { 785 while (head != tail) {
859 int irq, head, tail = 0, warning = 0; 786 tail = get_irq_msi(irq)->link.tail;
860 unsigned long flags; 787 if (irq_has_action(irq))
861 788 warning = 1;
862 irq = head = dev->irq; 789 else if (irq != head) /* Release MSI-X irq */
863 dev->irq = temp; /* Restore pin IRQ */ 790 msi_free_irq(dev, irq);
864 while (head != tail) { 791 irq = tail;
865 spin_lock_irqsave(&msi_lock, flags); 792 }
866 tail = msi_desc[irq]->link.tail; 793 msi_free_irq(dev, irq);
867 spin_unlock_irqrestore(&msi_lock, flags); 794 if (warning) {
868 if (irq_has_action(irq)) 795 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
869 warning = 1; 796 "free_irq() on all MSI-X irqs\n",
870 else if (irq != head) /* Release MSI-X irq */ 797 pci_name(dev));
871 msi_free_irq(dev, irq); 798 BUG_ON(warning > 0);
872 irq = tail;
873 }
874 msi_free_irq(dev, irq);
875 if (warning) {
876 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
877 "free_irq() on all MSI-X irqs\n",
878 pci_name(dev));
879 BUG_ON(warning > 0);
880 }
881 } 799 }
800 dev->first_msi_irq = 0;
882} 801}
883 802
884/** 803/**
@@ -892,35 +811,26 @@ void pci_disable_msix(struct pci_dev* dev)
892 **/ 811 **/
893void msi_remove_pci_irq_vectors(struct pci_dev* dev) 812void msi_remove_pci_irq_vectors(struct pci_dev* dev)
894{ 813{
895 int pos, temp;
896 unsigned long flags;
897
898 if (!pci_msi_enable || !dev) 814 if (!pci_msi_enable || !dev)
899 return; 815 return;
900 816
901 temp = dev->irq; /* Save IOAPIC IRQ */ 817 if (dev->msi_enabled) {
902 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 818 if (irq_has_action(dev->first_msi_irq)) {
903 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
904 if (irq_has_action(dev->irq)) {
905 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 819 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
906 "called without free_irq() on MSI irq %d\n", 820 "called without free_irq() on MSI irq %d\n",
907 pci_name(dev), dev->irq); 821 pci_name(dev), dev->first_msi_irq);
908 BUG_ON(irq_has_action(dev->irq)); 822 BUG_ON(irq_has_action(dev->first_msi_irq));
909 } else /* Release MSI irq assigned to this device */ 823 } else /* Release MSI irq assigned to this device */
910 msi_free_irq(dev, dev->irq); 824 msi_free_irq(dev, dev->first_msi_irq);
911 dev->irq = temp; /* Restore IOAPIC IRQ */
912 } 825 }
913 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 826 if (dev->msix_enabled) {
914 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
915 int irq, head, tail = 0, warning = 0; 827 int irq, head, tail = 0, warning = 0;
916 void __iomem *base = NULL; 828 void __iomem *base = NULL;
917 829
918 irq = head = dev->irq; 830 irq = head = dev->first_msi_irq;
919 while (head != tail) { 831 while (head != tail) {
920 spin_lock_irqsave(&msi_lock, flags); 832 tail = get_irq_msi(irq)->link.tail;
921 tail = msi_desc[irq]->link.tail; 833 base = get_irq_msi(irq)->mask_base;
922 base = msi_desc[irq]->mask_base;
923 spin_unlock_irqrestore(&msi_lock, flags);
924 if (irq_has_action(irq)) 834 if (irq_has_action(irq))
925 warning = 1; 835 warning = 1;
926 else if (irq != head) /* Release MSI-X irq */ 836 else if (irq != head) /* Release MSI-X irq */
@@ -935,7 +845,6 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev)
935 pci_name(dev)); 845 pci_name(dev));
936 BUG_ON(warning > 0); 846 BUG_ON(warning > 0);
937 } 847 }
938 dev->irq = temp; /* Restore IOAPIC IRQ */
939 } 848 }
940} 849}
941 850
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 92d5e8db0de7..4438ae1ede4f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -324,8 +324,7 @@ static int pci_default_resume(struct pci_dev *pci_dev)
324 /* restore the PCI config space */ 324 /* restore the PCI config space */
325 pci_restore_state(pci_dev); 325 pci_restore_state(pci_dev);
326 /* if the device was enabled before suspend, reenable */ 326 /* if the device was enabled before suspend, reenable */
327 if (atomic_read(&pci_dev->enable_cnt)) 327 retval = __pci_reenable_device(pci_dev);
328 retval = __pci_enable_device(pci_dev);
329 /* if the device was busmaster before the suspend, make it busmaster again */ 328 /* if the device was busmaster before the suspend, make it busmaster again */
330 if (pci_dev->is_busmaster) 329 if (pci_dev->is_busmaster)
331 pci_set_master(pci_dev); 330 pci_set_master(pci_dev);
@@ -422,7 +421,8 @@ static struct kobj_type pci_driver_kobj_type = {
422 * If no error occurred, the driver remains registered even if 421 * If no error occurred, the driver remains registered even if
423 * no device was claimed during registration. 422 * no device was claimed during registration.
424 */ 423 */
425int __pci_register_driver(struct pci_driver *drv, struct module *owner) 424int __pci_register_driver(struct pci_driver *drv, struct module *owner,
425 const char *mod_name)
426{ 426{
427 int error; 427 int error;
428 428
@@ -430,6 +430,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner)
430 drv->driver.name = drv->name; 430 drv->driver.name = drv->name;
431 drv->driver.bus = &pci_bus_type; 431 drv->driver.bus = &pci_bus_type;
432 drv->driver.owner = owner; 432 drv->driver.owner = owner;
433 drv->driver.mod_name = mod_name;
433 drv->driver.kobj.ktype = &pci_driver_kobj_type; 434 drv->driver.kobj.ktype = &pci_driver_kobj_type;
434 435
435 if (pci_multithread_probe) 436 if (pci_multithread_probe)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 206c834d263a..84c757ba0664 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -392,6 +392,14 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
392 if (state > PCI_D3hot) 392 if (state > PCI_D3hot)
393 state = PCI_D3hot; 393 state = PCI_D3hot;
394 394
395 /*
396 * If the device or the parent bridge can't support PCI PM, ignore
397 * the request if we're doing anything besides putting it into D0
398 * (which would only happen on boot).
399 */
400 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
401 return 0;
402
395 /* Validate current state: 403 /* Validate current state:
396 * Can enter D0 from any state, but if we can only go deeper 404 * Can enter D0 from any state, but if we can only go deeper
397 * to sleep if we're already in a low power state 405 * to sleep if we're already in a low power state
@@ -403,13 +411,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
403 } else if (dev->current_state == state) 411 } else if (dev->current_state == state)
404 return 0; /* we're already there */ 412 return 0; /* we're already there */
405 413
406 /*
407 * If the device or the parent bridge can't support PCI PM, ignore
408 * the request if we're doing anything besides putting it into D0
409 * (which would only happen on boot).
410 */
411 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
412 return 0;
413 414
414 /* find PCI PM capability in list */ 415 /* find PCI PM capability in list */
415 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 416 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
@@ -633,8 +634,6 @@ pci_save_state(struct pci_dev *dev)
633 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 634 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
634 if ((i = pci_save_msi_state(dev)) != 0) 635 if ((i = pci_save_msi_state(dev)) != 0)
635 return i; 636 return i;
636 if ((i = pci_save_msix_state(dev)) != 0)
637 return i;
638 if ((i = pci_save_pcie_state(dev)) != 0) 637 if ((i = pci_save_pcie_state(dev)) != 0)
639 return i; 638 return i;
640 if ((i = pci_save_pcix_state(dev)) != 0) 639 if ((i = pci_save_pcix_state(dev)) != 0)
@@ -672,22 +671,11 @@ pci_restore_state(struct pci_dev *dev)
672 } 671 }
673 pci_restore_pcix_state(dev); 672 pci_restore_pcix_state(dev);
674 pci_restore_msi_state(dev); 673 pci_restore_msi_state(dev);
675 pci_restore_msix_state(dev); 674
676 return 0; 675 return 0;
677} 676}
678 677
679/** 678static int do_pci_enable_device(struct pci_dev *dev, int bars)
680 * pci_enable_device_bars - Initialize some of a device for use
681 * @dev: PCI device to be initialized
682 * @bars: bitmask of BAR's that must be configured
683 *
684 * Initialize device before it's used by a driver. Ask low-level code
685 * to enable selected I/O and memory resources. Wake up the device if it
686 * was suspended. Beware, this function can fail.
687 */
688
689int
690pci_enable_device_bars(struct pci_dev *dev, int bars)
691{ 679{
692 int err; 680 int err;
693 681
@@ -697,30 +685,47 @@ pci_enable_device_bars(struct pci_dev *dev, int bars)
697 err = pcibios_enable_device(dev, bars); 685 err = pcibios_enable_device(dev, bars);
698 if (err < 0) 686 if (err < 0)
699 return err; 687 return err;
688 pci_fixup_device(pci_fixup_enable, dev);
689
700 return 0; 690 return 0;
701} 691}
702 692
703/** 693/**
704 * __pci_enable_device - Initialize device before it's used by a driver. 694 * __pci_reenable_device - Resume abandoned device
695 * @dev: PCI device to be resumed
696 *
697 * Note this function is a backend of pci_default_resume and is not supposed
698 * to be called by normal code, write proper resume handler and use it instead.
699 */
700int
701__pci_reenable_device(struct pci_dev *dev)
702{
703 if (atomic_read(&dev->enable_cnt))
704 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
705 return 0;
706}
707
708/**
709 * pci_enable_device_bars - Initialize some of a device for use
705 * @dev: PCI device to be initialized 710 * @dev: PCI device to be initialized
711 * @bars: bitmask of BAR's that must be configured
706 * 712 *
707 * Initialize device before it's used by a driver. Ask low-level code 713 * Initialize device before it's used by a driver. Ask low-level code
708 * to enable I/O and memory. Wake up the device if it was suspended. 714 * to enable selected I/O and memory resources. Wake up the device if it
709 * Beware, this function can fail. 715 * was suspended. Beware, this function can fail.
710 *
711 * Note this function is a backend and is not supposed to be called by
712 * normal code, use pci_enable_device() instead.
713 */ 716 */
714int 717int
715__pci_enable_device(struct pci_dev *dev) 718pci_enable_device_bars(struct pci_dev *dev, int bars)
716{ 719{
717 int err; 720 int err;
718 721
719 err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 722 if (atomic_add_return(1, &dev->enable_cnt) > 1)
720 if (err) 723 return 0; /* already enabled */
721 return err; 724
722 pci_fixup_device(pci_fixup_enable, dev); 725 err = do_pci_enable_device(dev, bars);
723 return 0; 726 if (err < 0)
727 atomic_dec(&dev->enable_cnt);
728 return err;
724} 729}
725 730
726/** 731/**
@@ -736,13 +741,7 @@ __pci_enable_device(struct pci_dev *dev)
736 */ 741 */
737int pci_enable_device(struct pci_dev *dev) 742int pci_enable_device(struct pci_dev *dev)
738{ 743{
739 int result; 744 return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
740 if (atomic_add_return(1, &dev->enable_cnt) > 1)
741 return 0; /* already enabled */
742 result = __pci_enable_device(dev);
743 if (result < 0)
744 atomic_dec(&dev->enable_cnt);
745 return result;
746} 745}
747 746
748/** 747/**
@@ -921,6 +920,47 @@ err_out:
921 return -EBUSY; 920 return -EBUSY;
922} 921}
923 922
923/**
924 * pci_release_selected_regions - Release selected PCI I/O and memory resources
925 * @pdev: PCI device whose resources were previously reserved
926 * @bars: Bitmask of BARs to be released
927 *
928 * Release selected PCI I/O and memory resources previously reserved.
929 * Call this function only after all use of the PCI regions has ceased.
930 */
931void pci_release_selected_regions(struct pci_dev *pdev, int bars)
932{
933 int i;
934
935 for (i = 0; i < 6; i++)
936 if (bars & (1 << i))
937 pci_release_region(pdev, i);
938}
939
940/**
941 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
942 * @pdev: PCI device whose resources are to be reserved
943 * @bars: Bitmask of BARs to be requested
944 * @res_name: Name to be associated with resource
945 */
946int pci_request_selected_regions(struct pci_dev *pdev, int bars,
947 const char *res_name)
948{
949 int i;
950
951 for (i = 0; i < 6; i++)
952 if (bars & (1 << i))
953 if(pci_request_region(pdev, i, res_name))
954 goto err_out;
955 return 0;
956
957err_out:
958 while(--i >= 0)
959 if (bars & (1 << i))
960 pci_release_region(pdev, i);
961
962 return -EBUSY;
963}
924 964
925/** 965/**
926 * pci_release_regions - Release reserved PCI I/O and memory resources 966 * pci_release_regions - Release reserved PCI I/O and memory resources
@@ -933,10 +973,7 @@ err_out:
933 973
934void pci_release_regions(struct pci_dev *pdev) 974void pci_release_regions(struct pci_dev *pdev)
935{ 975{
936 int i; 976 pci_release_selected_regions(pdev, (1 << 6) - 1);
937
938 for (i = 0; i < 6; i++)
939 pci_release_region(pdev, i);
940} 977}
941 978
942/** 979/**
@@ -954,18 +991,7 @@ void pci_release_regions(struct pci_dev *pdev)
954 */ 991 */
955int pci_request_regions(struct pci_dev *pdev, const char *res_name) 992int pci_request_regions(struct pci_dev *pdev, const char *res_name)
956{ 993{
957 int i; 994 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
958
959 for (i = 0; i < 6; i++)
960 if(pci_request_region(pdev, i, res_name))
961 goto err_out;
962 return 0;
963
964err_out:
965 while(--i >= 0)
966 pci_release_region(pdev, i);
967
968 return -EBUSY;
969} 995}
970 996
971/** 997/**
@@ -1148,7 +1174,23 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
1148 return 0; 1174 return 0;
1149} 1175}
1150#endif 1176#endif
1151 1177
1178/**
1179 * pci_select_bars - Make BAR mask from the type of resource
1180 * @pdev: the PCI device for which BAR mask is made
1181 * @flags: resource type mask to be selected
1182 *
1183 * This helper routine makes bar mask from the type of resource.
1184 */
1185int pci_select_bars(struct pci_dev *dev, unsigned long flags)
1186{
1187 int i, bars = 0;
1188 for (i = 0; i < PCI_NUM_RESOURCES; i++)
1189 if (pci_resource_flags(dev, i) & flags)
1190 bars |= (1 << i);
1191 return bars;
1192}
1193
1152static int __devinit pci_init(void) 1194static int __devinit pci_init(void)
1153{ 1195{
1154 struct pci_dev *dev = NULL; 1196 struct pci_dev *dev = NULL;
@@ -1181,12 +1223,6 @@ early_param("pci", pci_setup);
1181 1223
1182device_initcall(pci_init); 1224device_initcall(pci_init);
1183 1225
1184#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
1185/* FIXME: Some boxes have multiple ISA bridges! */
1186struct pci_dev *isa_bridge;
1187EXPORT_SYMBOL(isa_bridge);
1188#endif
1189
1190EXPORT_SYMBOL_GPL(pci_restore_bars); 1226EXPORT_SYMBOL_GPL(pci_restore_bars);
1191EXPORT_SYMBOL(pci_enable_device_bars); 1227EXPORT_SYMBOL(pci_enable_device_bars);
1192EXPORT_SYMBOL(pci_enable_device); 1228EXPORT_SYMBOL(pci_enable_device);
@@ -1197,6 +1233,8 @@ EXPORT_SYMBOL(pci_release_regions);
1197EXPORT_SYMBOL(pci_request_regions); 1233EXPORT_SYMBOL(pci_request_regions);
1198EXPORT_SYMBOL(pci_release_region); 1234EXPORT_SYMBOL(pci_release_region);
1199EXPORT_SYMBOL(pci_request_region); 1235EXPORT_SYMBOL(pci_request_region);
1236EXPORT_SYMBOL(pci_release_selected_regions);
1237EXPORT_SYMBOL(pci_request_selected_regions);
1200EXPORT_SYMBOL(pci_set_master); 1238EXPORT_SYMBOL(pci_set_master);
1201EXPORT_SYMBOL(pci_set_mwi); 1239EXPORT_SYMBOL(pci_set_mwi);
1202EXPORT_SYMBOL(pci_clear_mwi); 1240EXPORT_SYMBOL(pci_clear_mwi);
@@ -1205,13 +1243,10 @@ EXPORT_SYMBOL(pci_set_dma_mask);
1205EXPORT_SYMBOL(pci_set_consistent_dma_mask); 1243EXPORT_SYMBOL(pci_set_consistent_dma_mask);
1206EXPORT_SYMBOL(pci_assign_resource); 1244EXPORT_SYMBOL(pci_assign_resource);
1207EXPORT_SYMBOL(pci_find_parent_resource); 1245EXPORT_SYMBOL(pci_find_parent_resource);
1246EXPORT_SYMBOL(pci_select_bars);
1208 1247
1209EXPORT_SYMBOL(pci_set_power_state); 1248EXPORT_SYMBOL(pci_set_power_state);
1210EXPORT_SYMBOL(pci_save_state); 1249EXPORT_SYMBOL(pci_save_state);
1211EXPORT_SYMBOL(pci_restore_state); 1250EXPORT_SYMBOL(pci_restore_state);
1212EXPORT_SYMBOL(pci_enable_wake); 1251EXPORT_SYMBOL(pci_enable_wake);
1213 1252
1214/* Quirk info */
1215
1216EXPORT_SYMBOL(isa_dma_bridge_buggy);
1217EXPORT_SYMBOL(pci_pci_problems);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 398852f526a6..a4f2d580625e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,6 @@
1/* Functions internal to the PCI core code */ 1/* Functions internal to the PCI core code */
2 2
3extern int __must_check __pci_enable_device(struct pci_dev *); 3extern int __must_check __pci_reenable_device(struct pci_dev *);
4extern int pci_uevent(struct device *dev, char **envp, int num_envp, 4extern int pci_uevent(struct device *dev, char **envp, int num_envp,
5 char *buffer, int buffer_size); 5 char *buffer, int buffer_size);
6extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); 6extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
@@ -43,12 +43,8 @@ extern void pci_remove_legacy_files(struct pci_bus *bus);
43/* Lock for read/write access to pci device and bus lists */ 43/* Lock for read/write access to pci device and bus lists */
44extern struct rw_semaphore pci_bus_sem; 44extern struct rw_semaphore pci_bus_sem;
45 45
46#ifdef CONFIG_PCI_MSI
47extern int pci_msi_quirk;
48#else
49#define pci_msi_quirk 0
50#endif
51extern unsigned int pci_pm_d3_delay; 46extern unsigned int pci_pm_d3_delay;
47
52#ifdef CONFIG_PCI_MSI 48#ifdef CONFIG_PCI_MSI
53void disable_msi_mode(struct pci_dev *dev, int pos, int type); 49void disable_msi_mode(struct pci_dev *dev, int pos, int type);
54void pci_no_msi(void); 50void pci_no_msi(void);
@@ -56,17 +52,15 @@ void pci_no_msi(void);
56static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { } 52static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
57static inline void pci_no_msi(void) { } 53static inline void pci_no_msi(void) { }
58#endif 54#endif
55
59#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM) 56#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM)
60int pci_save_msi_state(struct pci_dev *dev); 57int pci_save_msi_state(struct pci_dev *dev);
61int pci_save_msix_state(struct pci_dev *dev);
62void pci_restore_msi_state(struct pci_dev *dev); 58void pci_restore_msi_state(struct pci_dev *dev);
63void pci_restore_msix_state(struct pci_dev *dev);
64#else 59#else
65static inline int pci_save_msi_state(struct pci_dev *dev) { return 0; } 60static inline int pci_save_msi_state(struct pci_dev *dev) { return 0; }
66static inline int pci_save_msix_state(struct pci_dev *dev) { return 0; }
67static inline void pci_restore_msi_state(struct pci_dev *dev) {} 61static inline void pci_restore_msi_state(struct pci_dev *dev) {}
68static inline void pci_restore_msix_state(struct pci_dev *dev) {}
69#endif 62#endif
63
70static inline int pci_no_d1d2(struct pci_dev *dev) 64static inline int pci_no_d1d2(struct pci_dev *dev)
71{ 65{
72 unsigned int parent_dstates = 0; 66 unsigned int parent_dstates = 0;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0e0401dd02cb..2fe1d690eb13 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -144,6 +144,32 @@ static u32 pci_size(u32 base, u32 maxbase, u32 mask)
144 return size; 144 return size;
145} 145}
146 146
147static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
148{
149 u64 size = mask & maxbase; /* Find the significant bits */
150 if (!size)
151 return 0;
152
153 /* Get the lowest of them to find the decode size, and
154 from that the extent. */
155 size = (size & ~(size-1)) - 1;
156
157 /* base == maxbase can be valid only if the BAR has
158 already been programmed with all 1s. */
159 if (base == maxbase && ((base | size) & mask) != mask)
160 return 0;
161
162 return size;
163}
164
165static inline int is_64bit_memory(u32 mask)
166{
167 if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
168 (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
169 return 1;
170 return 0;
171}
172
147static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 173static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
148{ 174{
149 unsigned int pos, reg, next; 175 unsigned int pos, reg, next;
@@ -151,6 +177,10 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
151 struct resource *res; 177 struct resource *res;
152 178
153 for(pos=0; pos<howmany; pos = next) { 179 for(pos=0; pos<howmany; pos = next) {
180 u64 l64;
181 u64 sz64;
182 u32 raw_sz;
183
154 next = pos+1; 184 next = pos+1;
155 res = &dev->resource[pos]; 185 res = &dev->resource[pos];
156 res->name = pci_name(dev); 186 res->name = pci_name(dev);
@@ -163,9 +193,16 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
163 continue; 193 continue;
164 if (l == 0xffffffff) 194 if (l == 0xffffffff)
165 l = 0; 195 l = 0;
166 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { 196 raw_sz = sz;
197 if ((l & PCI_BASE_ADDRESS_SPACE) ==
198 PCI_BASE_ADDRESS_SPACE_MEMORY) {
167 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); 199 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
168 if (!sz) 200 /*
201 * For 64bit prefetchable memory sz could be 0, if the
202 * real size is bigger than 4G, so we need to check
203 * szhi for that.
204 */
205 if (!is_64bit_memory(l) && !sz)
169 continue; 206 continue;
170 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 207 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
171 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; 208 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
@@ -178,30 +215,36 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
178 } 215 }
179 res->end = res->start + (unsigned long) sz; 216 res->end = res->start + (unsigned long) sz;
180 res->flags |= pci_calc_resource_flags(l); 217 res->flags |= pci_calc_resource_flags(l);
181 if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) 218 if (is_64bit_memory(l)) {
182 == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
183 u32 szhi, lhi; 219 u32 szhi, lhi;
220
184 pci_read_config_dword(dev, reg+4, &lhi); 221 pci_read_config_dword(dev, reg+4, &lhi);
185 pci_write_config_dword(dev, reg+4, ~0); 222 pci_write_config_dword(dev, reg+4, ~0);
186 pci_read_config_dword(dev, reg+4, &szhi); 223 pci_read_config_dword(dev, reg+4, &szhi);
187 pci_write_config_dword(dev, reg+4, lhi); 224 pci_write_config_dword(dev, reg+4, lhi);
188 szhi = pci_size(lhi, szhi, 0xffffffff); 225 sz64 = ((u64)szhi << 32) | raw_sz;
226 l64 = ((u64)lhi << 32) | l;
227 sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
189 next++; 228 next++;
190#if BITS_PER_LONG == 64 229#if BITS_PER_LONG == 64
191 res->start |= ((unsigned long) lhi) << 32; 230 if (!sz64) {
192 res->end = res->start + sz; 231 res->start = 0;
193 if (szhi) { 232 res->end = 0;
194 /* This BAR needs > 4GB? Wow. */ 233 res->flags = 0;
195 res->end |= (unsigned long)szhi<<32; 234 continue;
196 } 235 }
236 res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK;
237 res->end = res->start + sz64;
197#else 238#else
198 if (szhi) { 239 if (sz64 > 0x100000000ULL) {
199 printk(KERN_ERR "PCI: Unable to handle 64-bit BAR for device %s\n", pci_name(dev)); 240 printk(KERN_ERR "PCI: Unable to handle 64-bit "
241 "BAR for device %s\n", pci_name(dev));
200 res->start = 0; 242 res->start = 0;
201 res->flags = 0; 243 res->flags = 0;
202 } else if (lhi) { 244 } else if (lhi) {
203 /* 64-bit wide address, treat as disabled */ 245 /* 64-bit wide address, treat as disabled */
204 pci_write_config_dword(dev, reg, l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); 246 pci_write_config_dword(dev, reg,
247 l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
205 pci_write_config_dword(dev, reg+4, 0); 248 pci_write_config_dword(dev, reg+4, 0);
206 res->start = 0; 249 res->start = 0;
207 res->end = sz; 250 res->end = sz;
@@ -902,7 +945,6 @@ pci_scan_single_device(struct pci_bus *bus, int devfn)
902 return NULL; 945 return NULL;
903 946
904 pci_device_add(dev, bus); 947 pci_device_add(dev, bus);
905 pci_scan_msi_device(dev);
906 948
907 return dev; 949 return dev;
908} 950}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c913ea4e545c..11217bda4b9e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -61,7 +61,8 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
61 61
62 This appears to be BIOS not version dependent. So presumably there is a 62 This appears to be BIOS not version dependent. So presumably there is a
63 chipset level fix */ 63 chipset level fix */
64int isa_dma_bridge_buggy; /* Exported */ 64int isa_dma_bridge_buggy;
65EXPORT_SYMBOL(isa_dma_bridge_buggy);
65 66
66static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) 67static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
67{ 68{
@@ -83,6 +84,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
83DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs ); 84DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs );
84 85
85int pci_pci_problems; 86int pci_pci_problems;
87EXPORT_SYMBOL(pci_pci_problems);
86 88
87/* 89/*
88 * Chipsets where PCI->PCI transfers vanish or hang 90 * Chipsets where PCI->PCI transfers vanish or hang
@@ -94,6 +96,8 @@ static void __devinit quirk_nopcipci(struct pci_dev *dev)
94 pci_pci_problems |= PCIPCI_FAIL; 96 pci_pci_problems |= PCIPCI_FAIL;
95 } 97 }
96} 98}
99DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci );
100DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci );
97 101
98static void __devinit quirk_nopciamd(struct pci_dev *dev) 102static void __devinit quirk_nopciamd(struct pci_dev *dev)
99{ 103{
@@ -105,9 +109,6 @@ static void __devinit quirk_nopciamd(struct pci_dev *dev)
105 pci_pci_problems |= PCIAGP_FAIL; 109 pci_pci_problems |= PCIAGP_FAIL;
106 } 110 }
107} 111}
108
109DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci );
110DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci );
111DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd ); 112DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd );
112 113
113/* 114/*
@@ -976,52 +977,51 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
976 case 0x1626: /* L3C notebook */ 977 case 0x1626: /* L3C notebook */
977 asus_hides_smbus = 1; 978 asus_hides_smbus = 1;
978 } 979 }
979 if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) 980 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
980 switch(dev->subsystem_device) { 981 switch(dev->subsystem_device) {
981 case 0x80b1: /* P4GE-V */ 982 case 0x80b1: /* P4GE-V */
982 case 0x80b2: /* P4PE */ 983 case 0x80b2: /* P4PE */
983 case 0x8093: /* P4B533-V */ 984 case 0x8093: /* P4B533-V */
984 asus_hides_smbus = 1; 985 asus_hides_smbus = 1;
985 } 986 }
986 if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB) 987 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
987 switch(dev->subsystem_device) { 988 switch(dev->subsystem_device) {
988 case 0x8030: /* P4T533 */ 989 case 0x8030: /* P4T533 */
989 asus_hides_smbus = 1; 990 asus_hides_smbus = 1;
990 } 991 }
991 if (dev->device == PCI_DEVICE_ID_INTEL_7205_0) 992 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
992 switch (dev->subsystem_device) { 993 switch (dev->subsystem_device) {
993 case 0x8070: /* P4G8X Deluxe */ 994 case 0x8070: /* P4G8X Deluxe */
994 asus_hides_smbus = 1; 995 asus_hides_smbus = 1;
995 } 996 }
996 if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) 997 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
997 switch (dev->subsystem_device) { 998 switch (dev->subsystem_device) {
998 case 0x80c9: /* PU-DLS */ 999 case 0x80c9: /* PU-DLS */
999 asus_hides_smbus = 1; 1000 asus_hides_smbus = 1;
1000 } 1001 }
1001 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) 1002 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1002 switch (dev->subsystem_device) { 1003 switch (dev->subsystem_device) {
1003 case 0x1751: /* M2N notebook */ 1004 case 0x1751: /* M2N notebook */
1004 case 0x1821: /* M5N notebook */ 1005 case 0x1821: /* M5N notebook */
1005 asus_hides_smbus = 1; 1006 asus_hides_smbus = 1;
1006 } 1007 }
1007 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1008 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1008 switch (dev->subsystem_device) { 1009 switch (dev->subsystem_device) {
1009 case 0x184b: /* W1N notebook */ 1010 case 0x184b: /* W1N notebook */
1010 case 0x186a: /* M6Ne notebook */ 1011 case 0x186a: /* M6Ne notebook */
1011 asus_hides_smbus = 1; 1012 asus_hides_smbus = 1;
1012 } 1013 }
1013 if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) 1014 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1014 switch (dev->subsystem_device) { 1015 switch (dev->subsystem_device) {
1015 case 0x80f2: /* P4P800-X */ 1016 case 0x80f2: /* P4P800-X */
1016 asus_hides_smbus = 1; 1017 asus_hides_smbus = 1;
1017 } 1018 }
1018 if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { 1019 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1019 switch (dev->subsystem_device) { 1020 switch (dev->subsystem_device) {
1020 case 0x1882: /* M6V notebook */ 1021 case 0x1882: /* M6V notebook */
1021 case 0x1977: /* A6VA notebook */ 1022 case 0x1977: /* A6VA notebook */
1022 asus_hides_smbus = 1; 1023 asus_hides_smbus = 1;
1023 } 1024 }
1024 }
1025 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) { 1025 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1026 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1026 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1027 switch(dev->subsystem_device) { 1027 switch(dev->subsystem_device) {
@@ -1029,25 +1029,24 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1029 case 0x0890: /* HP Compaq nc6000 */ 1029 case 0x0890: /* HP Compaq nc6000 */
1030 asus_hides_smbus = 1; 1030 asus_hides_smbus = 1;
1031 } 1031 }
1032 if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) 1032 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1033 switch (dev->subsystem_device) { 1033 switch (dev->subsystem_device) {
1034 case 0x12bc: /* HP D330L */ 1034 case 0x12bc: /* HP D330L */
1035 case 0x12bd: /* HP D530 */ 1035 case 0x12bd: /* HP D530 */
1036 asus_hides_smbus = 1; 1036 asus_hides_smbus = 1;
1037 } 1037 }
1038 if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { 1038 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1039 switch (dev->subsystem_device) { 1039 switch (dev->subsystem_device) {
1040 case 0x099c: /* HP Compaq nx6110 */ 1040 case 0x099c: /* HP Compaq nx6110 */
1041 asus_hides_smbus = 1; 1041 asus_hides_smbus = 1;
1042 } 1042 }
1043 }
1044 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) { 1043 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) {
1045 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) 1044 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1046 switch(dev->subsystem_device) { 1045 switch(dev->subsystem_device) {
1047 case 0x0001: /* Toshiba Satellite A40 */ 1046 case 0x0001: /* Toshiba Satellite A40 */
1048 asus_hides_smbus = 1; 1047 asus_hides_smbus = 1;
1049 } 1048 }
1050 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1049 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1051 switch(dev->subsystem_device) { 1050 switch(dev->subsystem_device) {
1052 case 0x0001: /* Toshiba Tecra M2 */ 1051 case 0x0001: /* Toshiba Tecra M2 */
1053 asus_hides_smbus = 1; 1052 asus_hides_smbus = 1;
@@ -1136,6 +1135,14 @@ static void quirk_sis_96x_smbus(struct pci_dev *dev)
1136 pci_write_config_byte(dev, 0x77, val & ~0x10); 1135 pci_write_config_byte(dev, 0x77, val & ~0x10);
1137 } 1136 }
1138} 1137}
1138DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1139DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1140DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1141DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1142DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1143DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1144DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1145DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1139 1146
1140/* 1147/*
1141 * ... This is further complicated by the fact that some SiS96x south 1148 * ... This is further complicated by the fact that some SiS96x south
@@ -1145,8 +1152,6 @@ static void quirk_sis_96x_smbus(struct pci_dev *dev)
1145 * 1152 *
1146 * We can also enable the sis96x bit in the discovery register.. 1153 * We can also enable the sis96x bit in the discovery register..
1147 */ 1154 */
1148static int __devinitdata sis_96x_compatible = 0;
1149
1150#define SIS_DETECT_REGISTER 0x40 1155#define SIS_DETECT_REGISTER 0x40
1151 1156
1152static void quirk_sis_503(struct pci_dev *dev) 1157static void quirk_sis_503(struct pci_dev *dev)
@@ -1162,9 +1167,6 @@ static void quirk_sis_503(struct pci_dev *dev)
1162 return; 1167 return;
1163 } 1168 }
1164 1169
1165 /* Make people aware that we changed the config.. */
1166 printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible);
1167
1168 /* 1170 /*
1169 * Ok, it now shows up as a 96x.. run the 96x quirk by 1171 * Ok, it now shows up as a 96x.. run the 96x quirk by
1170 * hand in case it has already been processed. 1172 * hand in case it has already been processed.
@@ -1173,20 +1175,10 @@ static void quirk_sis_503(struct pci_dev *dev)
1173 dev->device = devid; 1175 dev->device = devid;
1174 quirk_sis_96x_smbus(dev); 1176 quirk_sis_96x_smbus(dev);
1175} 1177}
1176
1177static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
1178{
1179 sis_96x_compatible = 1;
1180}
1181DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_645, quirk_sis_96x_compatible );
1182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_646, quirk_sis_96x_compatible );
1183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_648, quirk_sis_96x_compatible );
1184DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_650, quirk_sis_96x_compatible );
1185DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_651, quirk_sis_96x_compatible );
1186DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_735, quirk_sis_96x_compatible );
1187
1188DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); 1178DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 );
1189DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); 1179DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 );
1180
1181
1190/* 1182/*
1191 * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller 1183 * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
1192 * and MC97 modem controller are disabled when a second PCI soundcard is 1184 * and MC97 modem controller are disabled when a second PCI soundcard is
@@ -1217,21 +1209,8 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
1217 } 1209 }
1218} 1210}
1219DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); 1211DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc );
1220
1221
1222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1223DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1224DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1225DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1226
1227DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); 1212DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc );
1228 1213
1229
1230DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1231DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1232DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1233DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1234
1235#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) 1214#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1236 1215
1237/* 1216/*
@@ -1276,7 +1255,6 @@ static void quirk_jmicron_dualfn(struct pci_dev *pdev)
1276 break; 1255 break;
1277 } 1256 }
1278} 1257}
1279
1280DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); 1258DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn);
1281DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); 1259DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn);
1282 1260
@@ -1420,6 +1398,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_co
1420 1398
1421 1399
1422int pcie_mch_quirk; 1400int pcie_mch_quirk;
1401EXPORT_SYMBOL(pcie_mch_quirk);
1423 1402
1424static void __devinit quirk_pcie_mch(struct pci_dev *pdev) 1403static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
1425{ 1404{
@@ -1481,6 +1460,24 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1481DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); 1460DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1482DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); 1461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1483 1462
1463/*
1464 * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
1465 * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
1466 * Re-allocate the region if needed...
1467 */
1468static void __init quirk_tc86c001_ide(struct pci_dev *dev)
1469{
1470 struct resource *r = &dev->resource[0];
1471
1472 if (r->start & 0x8) {
1473 r->start = 0;
1474 r->end = 0xf;
1475 }
1476}
1477DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
1478 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
1479 quirk_tc86c001_ide);
1480
1484static void __devinit quirk_netmos(struct pci_dev *dev) 1481static void __devinit quirk_netmos(struct pci_dev *dev)
1485{ 1482{
1486 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; 1483 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
@@ -1646,6 +1643,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
1646 } 1643 }
1647 pci_do_fixups(dev, start, end); 1644 pci_do_fixups(dev, start, end);
1648} 1645}
1646EXPORT_SYMBOL(pci_fixup_device);
1649 1647
1650/* Enable 1k I/O space granularity on the Intel P64H2 */ 1648/* Enable 1k I/O space granularity on the Intel P64H2 */
1651static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) 1649static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
@@ -1673,6 +1671,31 @@ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
1673} 1671}
1674DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); 1672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
1675 1673
1674/* Fix the IOBL_ADR for 1k I/O space granularity on the Intel P64H2
1675 * The IOBL_ADR gets re-written to 4k boundaries in pci_setup_bridge()
1676 * in drivers/pci/setup-bus.c
1677 */
1678static void __devinit quirk_p64h2_1k_io_fix_iobl(struct pci_dev *dev)
1679{
1680 u16 en1k, iobl_adr, iobl_adr_1k;
1681 struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
1682
1683 pci_read_config_word(dev, 0x40, &en1k);
1684
1685 if (en1k & 0x200) {
1686 pci_read_config_word(dev, PCI_IO_BASE, &iobl_adr);
1687
1688 iobl_adr_1k = iobl_adr | (res->start >> 8) | (res->end & 0xfc00);
1689
1690 if (iobl_adr != iobl_adr_1k) {
1691 printk(KERN_INFO "PCI: Fixing P64H2 IOBL_ADR from 0x%x to 0x%x for 1 KB Granularity\n",
1692 iobl_adr,iobl_adr_1k);
1693 pci_write_config_word(dev, PCI_IO_BASE, iobl_adr_1k);
1694 }
1695 }
1696}
1697DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io_fix_iobl);
1698
1676/* Under some circumstances, AER is not linked with extended capabilities. 1699/* Under some circumstances, AER is not linked with extended capabilities.
1677 * Force it to be linked by setting the corresponding control bit in the 1700 * Force it to be linked by setting the corresponding control bit in the
1678 * config space. 1701 * config space.
@@ -1695,9 +1718,6 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1695 quirk_nvidia_ck804_pcie_aer_ext_cap); 1718 quirk_nvidia_ck804_pcie_aer_ext_cap);
1696 1719
1697#ifdef CONFIG_PCI_MSI 1720#ifdef CONFIG_PCI_MSI
1698/* To disable MSI globally */
1699int pci_msi_quirk;
1700
1701/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely 1721/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely
1702 * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually 1722 * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
1703 * some other busses controlled by the chipset even if Linux is not aware of it. 1723 * some other busses controlled by the chipset even if Linux is not aware of it.
@@ -1706,8 +1726,8 @@ int pci_msi_quirk;
1706 */ 1726 */
1707static void __init quirk_svw_msi(struct pci_dev *dev) 1727static void __init quirk_svw_msi(struct pci_dev *dev)
1708{ 1728{
1709 pci_msi_quirk = 1; 1729 pci_no_msi();
1710 printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n"); 1730 printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n");
1711} 1731}
1712DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi); 1732DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi);
1713 1733
@@ -1788,8 +1808,3 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
1788DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1808DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1789 quirk_nvidia_ck804_msi_ht_cap); 1809 quirk_nvidia_ck804_msi_ht_cap);
1790#endif /* CONFIG_PCI_MSI */ 1810#endif /* CONFIG_PCI_MSI */
1791
1792EXPORT_SYMBOL(pcie_mch_quirk);
1793#ifdef CONFIG_HOTPLUG
1794EXPORT_SYMBOL(pci_fixup_device);
1795#endif
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index b2653c4afe9e..ff98eaddaa73 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -358,43 +358,6 @@ exit:
358} 358}
359 359
360/** 360/**
361 * pci_find_device_reverse - begin or continue searching for a PCI device by vendor/device id
362 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
363 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
364 * @from: Previous PCI device found in search, or %NULL for new search.
365 *
366 * Iterates through the list of known PCI devices in the reverse order of
367 * pci_find_device().
368 * If a PCI device is found with a matching @vendor and @device, a pointer to
369 * its device structure is returned. Otherwise, %NULL is returned.
370 * A new search is initiated by passing %NULL as the @from argument.
371 * Otherwise if @from is not %NULL, searches continue from previous device
372 * on the global list.
373 */
374struct pci_dev *
375pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct pci_dev *from)
376{
377 struct list_head *n;
378 struct pci_dev *dev;
379
380 WARN_ON(in_interrupt());
381 down_read(&pci_bus_sem);
382 n = from ? from->global_list.prev : pci_devices.prev;
383
384 while (n && (n != &pci_devices)) {
385 dev = pci_dev_g(n);
386 if ((vendor == PCI_ANY_ID || dev->vendor == vendor) &&
387 (device == PCI_ANY_ID || dev->device == device))
388 goto exit;
389 n = n->prev;
390 }
391 dev = NULL;
392exit:
393 up_read(&pci_bus_sem);
394 return dev;
395}
396
397/**
398 * pci_get_class - begin or continue searching for a PCI device by class 361 * pci_get_class - begin or continue searching for a PCI device by class
399 * @class: search for a PCI device with this class designation 362 * @class: search for a PCI device with this class designation
400 * @from: Previous PCI device found in search, or %NULL for new search. 363 * @from: Previous PCI device found in search, or %NULL for new search.
@@ -469,7 +432,6 @@ EXPORT_SYMBOL(pci_dev_present);
469EXPORT_SYMBOL(pci_find_present); 432EXPORT_SYMBOL(pci_find_present);
470 433
471EXPORT_SYMBOL(pci_find_device); 434EXPORT_SYMBOL(pci_find_device);
472EXPORT_SYMBOL(pci_find_device_reverse);
473EXPORT_SYMBOL(pci_find_slot); 435EXPORT_SYMBOL(pci_find_slot);
474/* For boot time work */ 436/* For boot time work */
475EXPORT_SYMBOL(pci_find_bus); 437EXPORT_SYMBOL(pci_find_bus);
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 606a46740338..ac004248324a 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -110,7 +110,7 @@ int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state)
110 110
111 down_read(&pcmcia_socket_list_rwsem); 111 down_read(&pcmcia_socket_list_rwsem);
112 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { 112 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
113 if (socket->dev.dev != dev) 113 if (socket->dev.parent != dev)
114 continue; 114 continue;
115 mutex_lock(&socket->skt_mutex); 115 mutex_lock(&socket->skt_mutex);
116 socket_suspend(socket); 116 socket_suspend(socket);
@@ -128,7 +128,7 @@ int pcmcia_socket_dev_resume(struct device *dev)
128 128
129 down_read(&pcmcia_socket_list_rwsem); 129 down_read(&pcmcia_socket_list_rwsem);
130 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { 130 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
131 if (socket->dev.dev != dev) 131 if (socket->dev.parent != dev)
132 continue; 132 continue;
133 mutex_lock(&socket->skt_mutex); 133 mutex_lock(&socket->skt_mutex);
134 socket_resume(socket); 134 socket_resume(socket);
@@ -143,12 +143,12 @@ EXPORT_SYMBOL(pcmcia_socket_dev_resume);
143 143
144struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt) 144struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt)
145{ 145{
146 struct class_device *cl_dev = class_device_get(&skt->dev); 146 struct device *dev = get_device(&skt->dev);
147 if (!cl_dev) 147 if (!dev)
148 return NULL; 148 return NULL;
149 skt = class_get_devdata(cl_dev); 149 skt = dev_get_drvdata(dev);
150 if (!try_module_get(skt->owner)) { 150 if (!try_module_get(skt->owner)) {
151 class_device_put(&skt->dev); 151 put_device(&skt->dev);
152 return NULL; 152 return NULL;
153 } 153 }
154 return (skt); 154 return (skt);
@@ -159,14 +159,14 @@ EXPORT_SYMBOL(pcmcia_get_socket);
159void pcmcia_put_socket(struct pcmcia_socket *skt) 159void pcmcia_put_socket(struct pcmcia_socket *skt)
160{ 160{
161 module_put(skt->owner); 161 module_put(skt->owner);
162 class_device_put(&skt->dev); 162 put_device(&skt->dev);
163} 163}
164EXPORT_SYMBOL(pcmcia_put_socket); 164EXPORT_SYMBOL(pcmcia_put_socket);
165 165
166 166
167static void pcmcia_release_socket(struct class_device *class_dev) 167static void pcmcia_release_socket(struct device *dev)
168{ 168{
169 struct pcmcia_socket *socket = class_get_devdata(class_dev); 169 struct pcmcia_socket *socket = dev_get_drvdata(dev);
170 170
171 complete(&socket->socket_released); 171 complete(&socket->socket_released);
172} 172}
@@ -181,7 +181,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
181 struct task_struct *tsk; 181 struct task_struct *tsk;
182 int ret; 182 int ret;
183 183
184 if (!socket || !socket->ops || !socket->dev.dev || !socket->resource_ops) 184 if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops)
185 return -EINVAL; 185 return -EINVAL;
186 186
187 cs_dbg(socket, 0, "pcmcia_register_socket(0x%p)\n", socket->ops); 187 cs_dbg(socket, 0, "pcmcia_register_socket(0x%p)\n", socket->ops);
@@ -226,9 +226,9 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
226#endif 226#endif
227 227
228 /* set proper values in socket->dev */ 228 /* set proper values in socket->dev */
229 socket->dev.class_data = socket; 229 dev_set_drvdata(&socket->dev, socket);
230 socket->dev.class = &pcmcia_socket_class; 230 socket->dev.class = &pcmcia_socket_class;
231 snprintf(socket->dev.class_id, BUS_ID_SIZE, "pcmcia_socket%u", socket->sock); 231 snprintf(socket->dev.bus_id, BUS_ID_SIZE, "pcmcia_socket%u", socket->sock);
232 232
233 /* base address = 0, map = 0 */ 233 /* base address = 0, map = 0 */
234 socket->cis_mem.flags = 0; 234 socket->cis_mem.flags = 0;
@@ -640,7 +640,7 @@ static int pccardd(void *__skt)
640 skt->ops->set_socket(skt, &skt->socket); 640 skt->ops->set_socket(skt, &skt->socket);
641 641
642 /* register with the device core */ 642 /* register with the device core */
643 ret = class_device_register(&skt->dev); 643 ret = device_register(&skt->dev);
644 if (ret) { 644 if (ret) {
645 printk(KERN_WARNING "PCMCIA: unable to register socket 0x%p\n", 645 printk(KERN_WARNING "PCMCIA: unable to register socket 0x%p\n",
646 skt); 646 skt);
@@ -689,7 +689,7 @@ static int pccardd(void *__skt)
689 remove_wait_queue(&skt->thread_wait, &wait); 689 remove_wait_queue(&skt->thread_wait, &wait);
690 690
691 /* remove from the device core */ 691 /* remove from the device core */
692 class_device_unregister(&skt->dev); 692 device_unregister(&skt->dev);
693 693
694 return 0; 694 return 0;
695} 695}
@@ -904,7 +904,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
904EXPORT_SYMBOL(pcmcia_insert_card); 904EXPORT_SYMBOL(pcmcia_insert_card);
905 905
906 906
907static int pcmcia_socket_uevent(struct class_device *dev, char **envp, 907static int pcmcia_socket_uevent(struct device *dev, char **envp,
908 int num_envp, char *buffer, int buffer_size) 908 int num_envp, char *buffer, int buffer_size)
909{ 909{
910 struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); 910 struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev);
@@ -930,8 +930,8 @@ static void pcmcia_release_socket_class(struct class *data)
930 930
931struct class pcmcia_socket_class = { 931struct class pcmcia_socket_class = {
932 .name = "pcmcia_socket", 932 .name = "pcmcia_socket",
933 .uevent = pcmcia_socket_uevent, 933 .dev_uevent = pcmcia_socket_uevent,
934 .release = pcmcia_release_socket, 934 .dev_release = pcmcia_release_socket,
935 .class_release = pcmcia_release_socket_class, 935 .class_release = pcmcia_release_socket_class,
936}; 936};
937EXPORT_SYMBOL(pcmcia_socket_class); 937EXPORT_SYMBOL(pcmcia_socket_class);
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index f573ea04db6f..9fa207e3c7b3 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -142,7 +142,7 @@ struct pcmcia_callback{
142 142
143int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); 143int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
144 144
145#define cs_socket_name(skt) ((skt)->dev.class_id) 145#define cs_socket_name(skt) ((skt)->dev.bus_id)
146 146
147#ifdef DEBUG 147#ifdef DEBUG
148extern int cs_debug_level(int); 148extern int cs_debug_level(int);
@@ -158,6 +158,6 @@ extern int cs_debug_level(int);
158#endif 158#endif
159 159
160#define cs_err(skt, fmt, arg...) \ 160#define cs_err(skt, fmt, arg...) \
161 printk(KERN_ERR "cs: %s: " fmt, (skt)->dev.class_id , ## arg) 161 printk(KERN_ERR "cs: %s: " fmt, (skt)->dev.bus_id , ## arg)
162 162
163#endif /* _LINUX_CS_INTERNAL_H */ 163#endif /* _LINUX_CS_INTERNAL_H */
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 7355eb455a88..18e111e12339 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -572,7 +572,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
572 p_dev->func = function; 572 p_dev->func = function;
573 573
574 p_dev->dev.bus = &pcmcia_bus_type; 574 p_dev->dev.bus = &pcmcia_bus_type;
575 p_dev->dev.parent = s->dev.dev; 575 p_dev->dev.parent = s->dev.parent;
576 p_dev->dev.release = pcmcia_release_dev; 576 p_dev->dev.release = pcmcia_release_dev;
577 bus_id_len = sprintf (p_dev->dev.bus_id, "%d.%d", p_dev->socket->sock, p_dev->device_no); 577 bus_id_len = sprintf (p_dev->dev.bus_id, "%d.%d", p_dev->socket->sock, p_dev->device_no);
578 578
@@ -1328,10 +1328,10 @@ static struct pcmcia_callback pcmcia_bus_callback = {
1328 .resume = pcmcia_bus_resume, 1328 .resume = pcmcia_bus_resume,
1329}; 1329};
1330 1330
1331static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev, 1331static int __devinit pcmcia_bus_add_socket(struct device *dev,
1332 struct class_interface *class_intf) 1332 struct class_interface *class_intf)
1333{ 1333{
1334 struct pcmcia_socket *socket = class_get_devdata(class_dev); 1334 struct pcmcia_socket *socket = dev_get_drvdata(dev);
1335 int ret; 1335 int ret;
1336 1336
1337 socket = pcmcia_get_socket(socket); 1337 socket = pcmcia_get_socket(socket);
@@ -1364,10 +1364,10 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev,
1364 return 0; 1364 return 0;
1365} 1365}
1366 1366
1367static void pcmcia_bus_remove_socket(struct class_device *class_dev, 1367static void pcmcia_bus_remove_socket(struct device *dev,
1368 struct class_interface *class_intf) 1368 struct class_interface *class_intf)
1369{ 1369{
1370 struct pcmcia_socket *socket = class_get_devdata(class_dev); 1370 struct pcmcia_socket *socket = dev_get_drvdata(dev);
1371 1371
1372 if (!socket) 1372 if (!socket)
1373 return; 1373 return;
@@ -1389,8 +1389,8 @@ static void pcmcia_bus_remove_socket(struct class_device *class_dev,
1389/* the pcmcia_bus_interface is used to handle pcmcia socket devices */ 1389/* the pcmcia_bus_interface is used to handle pcmcia socket devices */
1390static struct class_interface pcmcia_bus_interface = { 1390static struct class_interface pcmcia_bus_interface = {
1391 .class = &pcmcia_socket_class, 1391 .class = &pcmcia_socket_class,
1392 .add = &pcmcia_bus_add_socket, 1392 .add_dev = &pcmcia_bus_add_socket,
1393 .remove = &pcmcia_bus_remove_socket, 1393 .remove_dev = &pcmcia_bus_remove_socket,
1394}; 1394};
1395 1395
1396 1396
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index c2ea07aa7a12..df21e2d16f87 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -161,7 +161,7 @@ static int __devinit i82092aa_pci_probe(struct pci_dev *dev, const struct pci_de
161 pci_set_drvdata(dev, &sockets[i].socket); 161 pci_set_drvdata(dev, &sockets[i].socket);
162 162
163 for (i = 0; i<socket_count; i++) { 163 for (i = 0; i<socket_count; i++) {
164 sockets[i].socket.dev.dev = &dev->dev; 164 sockets[i].socket.dev.parent = &dev->dev;
165 sockets[i].socket.ops = &i82092aa_operations; 165 sockets[i].socket.ops = &i82092aa_operations;
166 sockets[i].socket.resource_ops = &pccard_nonstatic_ops; 166 sockets[i].socket.resource_ops = &pccard_nonstatic_ops;
167 ret = pcmcia_register_socket(&sockets[i].socket); 167 ret = pcmcia_register_socket(&sockets[i].socket);
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index ea74f98a7350..72ff2f615b33 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1298,7 +1298,7 @@ static int __init init_i82365(void)
1298 1298
1299 /* register sockets with the pcmcia core */ 1299 /* register sockets with the pcmcia core */
1300 for (i = 0; i < sockets; i++) { 1300 for (i = 0; i < sockets; i++) {
1301 socket[i].socket.dev.dev = &i82365_device->dev; 1301 socket[i].socket.dev.parent = &i82365_device->dev;
1302 socket[i].socket.ops = &pcic_operations; 1302 socket[i].socket.ops = &pcic_operations;
1303 socket[i].socket.resource_ops = &pccard_nonstatic_ops; 1303 socket[i].socket.resource_ops = &pccard_nonstatic_ops;
1304 socket[i].socket.owner = THIS_MODULE; 1304 socket[i].socket.owner = THIS_MODULE;
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 327372b7a54e..88494149e910 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -59,7 +59,6 @@ typedef struct user_info_t {
59 59
60#ifdef DEBUG 60#ifdef DEBUG
61extern int ds_pc_debug; 61extern int ds_pc_debug;
62#define cs_socket_name(skt) ((skt)->dev.class_id)
63 62
64#define ds_dbg(lvl, fmt, arg...) do { \ 63#define ds_dbg(lvl, fmt, arg...) do { \
65 if (ds_pc_debug >= lvl) \ 64 if (ds_pc_debug >= lvl) \
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index b9201c2ec38b..0ce39de834c4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -48,7 +48,6 @@ static u8 pcmcia_used_irq[NR_IRQS];
48 48
49#ifdef DEBUG 49#ifdef DEBUG
50extern int ds_pc_debug; 50extern int ds_pc_debug;
51#define cs_socket_name(skt) ((skt)->dev.class_id)
52 51
53#define ds_dbg(skt, lvl, fmt, arg...) do { \ 52#define ds_dbg(skt, lvl, fmt, arg...) do { \
54 if (ds_pc_debug >= lvl) \ 53 if (ds_pc_debug >= lvl) \
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 360c24896548..dd0ddf19ee57 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -682,7 +682,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
682 682
683 socket[i].socket.ops = &pd6729_operations; 683 socket[i].socket.ops = &pd6729_operations;
684 socket[i].socket.resource_ops = &pccard_nonstatic_ops; 684 socket[i].socket.resource_ops = &pccard_nonstatic_ops;
685 socket[i].socket.dev.dev = &dev->dev; 685 socket[i].socket.dev.parent = &dev->dev;
686 socket[i].socket.driver_data = &socket[i]; 686 socket[i].socket.driver_data = &socket[i];
687 } 687 }
688 688
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index c3176b16b7be..bfcaad6021cf 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -616,7 +616,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
616static struct resource *nonstatic_find_io_region(unsigned long base, int num, 616static struct resource *nonstatic_find_io_region(unsigned long base, int num,
617 unsigned long align, struct pcmcia_socket *s) 617 unsigned long align, struct pcmcia_socket *s)
618{ 618{
619 struct resource *res = make_resource(0, num, IORESOURCE_IO, s->dev.class_id); 619 struct resource *res = make_resource(0, num, IORESOURCE_IO, s->dev.bus_id);
620 struct socket_data *s_data = s->resource_data; 620 struct socket_data *s_data = s->resource_data;
621 struct pcmcia_align_data data; 621 struct pcmcia_align_data data;
622 unsigned long min = base; 622 unsigned long min = base;
@@ -650,7 +650,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
650static struct resource * nonstatic_find_mem_region(u_long base, u_long num, 650static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
651 u_long align, int low, struct pcmcia_socket *s) 651 u_long align, int low, struct pcmcia_socket *s)
652{ 652{
653 struct resource *res = make_resource(0, num, IORESOURCE_MEM, s->dev.class_id); 653 struct resource *res = make_resource(0, num, IORESOURCE_MEM, s->dev.bus_id);
654 struct socket_data *s_data = s->resource_data; 654 struct socket_data *s_data = s->resource_data;
655 struct pcmcia_align_data data; 655 struct pcmcia_align_data data;
656 unsigned long min, max; 656 unsigned long min, max;
@@ -897,9 +897,10 @@ EXPORT_SYMBOL(pccard_nonstatic_ops);
897 897
898/* sysfs interface to the resource database */ 898/* sysfs interface to the resource database */
899 899
900static ssize_t show_io_db(struct class_device *class_dev, char *buf) 900static ssize_t show_io_db(struct device *dev,
901 struct device_attribute *attr, char *buf)
901{ 902{
902 struct pcmcia_socket *s = class_get_devdata(class_dev); 903 struct pcmcia_socket *s = dev_get_drvdata(dev);
903 struct socket_data *data; 904 struct socket_data *data;
904 struct resource_map *p; 905 struct resource_map *p;
905 ssize_t ret = 0; 906 ssize_t ret = 0;
@@ -920,9 +921,11 @@ static ssize_t show_io_db(struct class_device *class_dev, char *buf)
920 return (ret); 921 return (ret);
921} 922}
922 923
923static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size_t count) 924static ssize_t store_io_db(struct device *dev,
925 struct device_attribute *attr,
926 const char *buf, size_t count)
924{ 927{
925 struct pcmcia_socket *s = class_get_devdata(class_dev); 928 struct pcmcia_socket *s = dev_get_drvdata(dev);
926 unsigned long start_addr, end_addr; 929 unsigned long start_addr, end_addr;
927 unsigned int add = ADD_MANAGED_RESOURCE; 930 unsigned int add = ADD_MANAGED_RESOURCE;
928 ssize_t ret = 0; 931 ssize_t ret = 0;
@@ -947,11 +950,12 @@ static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size
947 950
948 return ret ? ret : count; 951 return ret ? ret : count;
949} 952}
950static CLASS_DEVICE_ATTR(available_resources_io, 0600, show_io_db, store_io_db); 953static DEVICE_ATTR(available_resources_io, 0600, show_io_db, store_io_db);
951 954
952static ssize_t show_mem_db(struct class_device *class_dev, char *buf) 955static ssize_t show_mem_db(struct device *dev,
956 struct device_attribute *attr, char *buf)
953{ 957{
954 struct pcmcia_socket *s = class_get_devdata(class_dev); 958 struct pcmcia_socket *s = dev_get_drvdata(dev);
955 struct socket_data *data; 959 struct socket_data *data;
956 struct resource_map *p; 960 struct resource_map *p;
957 ssize_t ret = 0; 961 ssize_t ret = 0;
@@ -972,9 +976,11 @@ static ssize_t show_mem_db(struct class_device *class_dev, char *buf)
972 return (ret); 976 return (ret);
973} 977}
974 978
975static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, size_t count) 979static ssize_t store_mem_db(struct device *dev,
980 struct device_attribute *attr,
981 const char *buf, size_t count)
976{ 982{
977 struct pcmcia_socket *s = class_get_devdata(class_dev); 983 struct pcmcia_socket *s = dev_get_drvdata(dev);
978 unsigned long start_addr, end_addr; 984 unsigned long start_addr, end_addr;
979 unsigned int add = ADD_MANAGED_RESOURCE; 985 unsigned int add = ADD_MANAGED_RESOURCE;
980 ssize_t ret = 0; 986 ssize_t ret = 0;
@@ -999,25 +1005,25 @@ static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, siz
999 1005
1000 return ret ? ret : count; 1006 return ret ? ret : count;
1001} 1007}
1002static CLASS_DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db); 1008static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db);
1003 1009
1004static struct class_device_attribute *pccard_rsrc_attributes[] = { 1010static struct device_attribute *pccard_rsrc_attributes[] = {
1005 &class_device_attr_available_resources_io, 1011 &dev_attr_available_resources_io,
1006 &class_device_attr_available_resources_mem, 1012 &dev_attr_available_resources_mem,
1007 NULL, 1013 NULL,
1008}; 1014};
1009 1015
1010static int __devinit pccard_sysfs_add_rsrc(struct class_device *class_dev, 1016static int __devinit pccard_sysfs_add_rsrc(struct device *dev,
1011 struct class_interface *class_intf) 1017 struct class_interface *class_intf)
1012{ 1018{
1013 struct pcmcia_socket *s = class_get_devdata(class_dev); 1019 struct pcmcia_socket *s = dev_get_drvdata(dev);
1014 struct class_device_attribute **attr; 1020 struct device_attribute **attr;
1015 int ret = 0; 1021 int ret = 0;
1016 if (s->resource_ops != &pccard_nonstatic_ops) 1022 if (s->resource_ops != &pccard_nonstatic_ops)
1017 return 0; 1023 return 0;
1018 1024
1019 for (attr = pccard_rsrc_attributes; *attr; attr++) { 1025 for (attr = pccard_rsrc_attributes; *attr; attr++) {
1020 ret = class_device_create_file(class_dev, *attr); 1026 ret = device_create_file(dev, *attr);
1021 if (ret) 1027 if (ret)
1022 break; 1028 break;
1023 } 1029 }
@@ -1025,23 +1031,23 @@ static int __devinit pccard_sysfs_add_rsrc(struct class_device *class_dev,
1025 return ret; 1031 return ret;
1026} 1032}
1027 1033
1028static void __devexit pccard_sysfs_remove_rsrc(struct class_device *class_dev, 1034static void __devexit pccard_sysfs_remove_rsrc(struct device *dev,
1029 struct class_interface *class_intf) 1035 struct class_interface *class_intf)
1030{ 1036{
1031 struct pcmcia_socket *s = class_get_devdata(class_dev); 1037 struct pcmcia_socket *s = dev_get_drvdata(dev);
1032 struct class_device_attribute **attr; 1038 struct device_attribute **attr;
1033 1039
1034 if (s->resource_ops != &pccard_nonstatic_ops) 1040 if (s->resource_ops != &pccard_nonstatic_ops)
1035 return; 1041 return;
1036 1042
1037 for (attr = pccard_rsrc_attributes; *attr; attr++) 1043 for (attr = pccard_rsrc_attributes; *attr; attr++)
1038 class_device_remove_file(class_dev, *attr); 1044 device_remove_file(dev, *attr);
1039} 1045}
1040 1046
1041static struct class_interface pccard_rsrc_interface = { 1047static struct class_interface pccard_rsrc_interface = {
1042 .class = &pcmcia_socket_class, 1048 .class = &pcmcia_socket_class,
1043 .add = &pccard_sysfs_add_rsrc, 1049 .add_dev = &pccard_sysfs_add_rsrc,
1044 .remove = __devexit_p(&pccard_sysfs_remove_rsrc), 1050 .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc),
1045}; 1051};
1046 1052
1047static int __init nonstatic_sysfs_init(void) 1053static int __init nonstatic_sysfs_init(void)
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index e433704e026a..d2a3bea55de2 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -478,10 +478,10 @@ dump_bits(char **p, const char *prefix, unsigned int val, struct bittbl *bits, i
478 * 478 *
479 * Returns: the number of characters added to the buffer 479 * Returns: the number of characters added to the buffer
480 */ 480 */
481static ssize_t show_status(struct class_device *class_dev, char *buf) 481static ssize_t show_status(struct device *dev, char *buf)
482{ 482{
483 struct soc_pcmcia_socket *skt = 483 struct soc_pcmcia_socket *skt =
484 container_of(class_dev, struct soc_pcmcia_socket, socket.dev); 484 container_of(dev, struct soc_pcmcia_socket, socket.dev);
485 char *p = buf; 485 char *p = buf;
486 486
487 p+=sprintf(p, "slot : %d\n", skt->nr); 487 p+=sprintf(p, "slot : %d\n", skt->nr);
@@ -747,7 +747,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops
747 747
748 add_timer(&skt->poll_timer); 748 add_timer(&skt->poll_timer);
749 749
750 class_device_create_file(&skt->socket.dev, &class_device_attr_status); 750 device_create_file(&skt->socket.dev, &device_attr_status);
751 } 751 }
752 752
753 dev_set_drvdata(dev, sinfo); 753 dev_set_drvdata(dev, sinfo);
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index b005602d6b53..ea5765c3bdc0 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -40,7 +40,8 @@
40 40
41#define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev) 41#define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev)
42 42
43static ssize_t pccard_show_type(struct class_device *dev, char *buf) 43static ssize_t pccard_show_type(struct device *dev, struct device_attribute *attr,
44 char *buf)
44{ 45{
45 struct pcmcia_socket *s = to_socket(dev); 46 struct pcmcia_socket *s = to_socket(dev);
46 47
@@ -50,9 +51,10 @@ static ssize_t pccard_show_type(struct class_device *dev, char *buf)
50 return sprintf(buf, "32-bit\n"); 51 return sprintf(buf, "32-bit\n");
51 return sprintf(buf, "16-bit\n"); 52 return sprintf(buf, "16-bit\n");
52} 53}
53static CLASS_DEVICE_ATTR(card_type, 0444, pccard_show_type, NULL); 54static DEVICE_ATTR(card_type, 0444, pccard_show_type, NULL);
54 55
55static ssize_t pccard_show_voltage(struct class_device *dev, char *buf) 56static ssize_t pccard_show_voltage(struct device *dev, struct device_attribute *attr,
57 char *buf)
56{ 58{
57 struct pcmcia_socket *s = to_socket(dev); 59 struct pcmcia_socket *s = to_socket(dev);
58 60
@@ -63,28 +65,31 @@ static ssize_t pccard_show_voltage(struct class_device *dev, char *buf)
63 s->socket.Vcc % 10); 65 s->socket.Vcc % 10);
64 return sprintf(buf, "X.XV\n"); 66 return sprintf(buf, "X.XV\n");
65} 67}
66static CLASS_DEVICE_ATTR(card_voltage, 0444, pccard_show_voltage, NULL); 68static DEVICE_ATTR(card_voltage, 0444, pccard_show_voltage, NULL);
67 69
68static ssize_t pccard_show_vpp(struct class_device *dev, char *buf) 70static ssize_t pccard_show_vpp(struct device *dev, struct device_attribute *attr,
71 char *buf)
69{ 72{
70 struct pcmcia_socket *s = to_socket(dev); 73 struct pcmcia_socket *s = to_socket(dev);
71 if (!(s->state & SOCKET_PRESENT)) 74 if (!(s->state & SOCKET_PRESENT))
72 return -ENODEV; 75 return -ENODEV;
73 return sprintf(buf, "%d.%dV\n", s->socket.Vpp / 10, s->socket.Vpp % 10); 76 return sprintf(buf, "%d.%dV\n", s->socket.Vpp / 10, s->socket.Vpp % 10);
74} 77}
75static CLASS_DEVICE_ATTR(card_vpp, 0444, pccard_show_vpp, NULL); 78static DEVICE_ATTR(card_vpp, 0444, pccard_show_vpp, NULL);
76 79
77static ssize_t pccard_show_vcc(struct class_device *dev, char *buf) 80static ssize_t pccard_show_vcc(struct device *dev, struct device_attribute *attr,
81 char *buf)
78{ 82{
79 struct pcmcia_socket *s = to_socket(dev); 83 struct pcmcia_socket *s = to_socket(dev);
80 if (!(s->state & SOCKET_PRESENT)) 84 if (!(s->state & SOCKET_PRESENT))
81 return -ENODEV; 85 return -ENODEV;
82 return sprintf(buf, "%d.%dV\n", s->socket.Vcc / 10, s->socket.Vcc % 10); 86 return sprintf(buf, "%d.%dV\n", s->socket.Vcc / 10, s->socket.Vcc % 10);
83} 87}
84static CLASS_DEVICE_ATTR(card_vcc, 0444, pccard_show_vcc, NULL); 88static DEVICE_ATTR(card_vcc, 0444, pccard_show_vcc, NULL);
85 89
86 90
87static ssize_t pccard_store_insert(struct class_device *dev, const char *buf, size_t count) 91static ssize_t pccard_store_insert(struct device *dev, struct device_attribute *attr,
92 const char *buf, size_t count)
88{ 93{
89 ssize_t ret; 94 ssize_t ret;
90 struct pcmcia_socket *s = to_socket(dev); 95 struct pcmcia_socket *s = to_socket(dev);
@@ -96,16 +101,20 @@ static ssize_t pccard_store_insert(struct class_device *dev, const char *buf, si
96 101
97 return ret ? ret : count; 102 return ret ? ret : count;
98} 103}
99static CLASS_DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert); 104static DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert);
100 105
101 106
102static ssize_t pccard_show_card_pm_state(struct class_device *dev, char *buf) 107static ssize_t pccard_show_card_pm_state(struct device *dev,
108 struct device_attribute *attr,
109 char *buf)
103{ 110{
104 struct pcmcia_socket *s = to_socket(dev); 111 struct pcmcia_socket *s = to_socket(dev);
105 return sprintf(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on"); 112 return sprintf(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
106} 113}
107 114
108static ssize_t pccard_store_card_pm_state(struct class_device *dev, const char *buf, size_t count) 115static ssize_t pccard_store_card_pm_state(struct device *dev,
116 struct device_attribute *attr,
117 const char *buf, size_t count)
109{ 118{
110 ssize_t ret = -EINVAL; 119 ssize_t ret = -EINVAL;
111 struct pcmcia_socket *s = to_socket(dev); 120 struct pcmcia_socket *s = to_socket(dev);
@@ -120,9 +129,11 @@ static ssize_t pccard_store_card_pm_state(struct class_device *dev, const char *
120 129
121 return ret ? -ENODEV : count; 130 return ret ? -ENODEV : count;
122} 131}
123static CLASS_DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state); 132static DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state);
124 133
125static ssize_t pccard_store_eject(struct class_device *dev, const char *buf, size_t count) 134static ssize_t pccard_store_eject(struct device *dev,
135 struct device_attribute *attr,
136 const char *buf, size_t count)
126{ 137{
127 ssize_t ret; 138 ssize_t ret;
128 struct pcmcia_socket *s = to_socket(dev); 139 struct pcmcia_socket *s = to_socket(dev);
@@ -134,16 +145,20 @@ static ssize_t pccard_store_eject(struct class_device *dev, const char *buf, siz
134 145
135 return ret ? ret : count; 146 return ret ? ret : count;
136} 147}
137static CLASS_DEVICE_ATTR(card_eject, 0200, NULL, pccard_store_eject); 148static DEVICE_ATTR(card_eject, 0200, NULL, pccard_store_eject);
138 149
139 150
140static ssize_t pccard_show_irq_mask(struct class_device *dev, char *buf) 151static ssize_t pccard_show_irq_mask(struct device *dev,
152 struct device_attribute *attr,
153 char *buf)
141{ 154{
142 struct pcmcia_socket *s = to_socket(dev); 155 struct pcmcia_socket *s = to_socket(dev);
143 return sprintf(buf, "0x%04x\n", s->irq_mask); 156 return sprintf(buf, "0x%04x\n", s->irq_mask);
144} 157}
145 158
146static ssize_t pccard_store_irq_mask(struct class_device *dev, const char *buf, size_t count) 159static ssize_t pccard_store_irq_mask(struct device *dev,
160 struct device_attribute *attr,
161 const char *buf, size_t count)
147{ 162{
148 ssize_t ret; 163 ssize_t ret;
149 struct pcmcia_socket *s = to_socket(dev); 164 struct pcmcia_socket *s = to_socket(dev);
@@ -161,16 +176,19 @@ static ssize_t pccard_store_irq_mask(struct class_device *dev, const char *buf,
161 176
162 return ret ? ret : count; 177 return ret ? ret : count;
163} 178}
164static CLASS_DEVICE_ATTR(card_irq_mask, 0600, pccard_show_irq_mask, pccard_store_irq_mask); 179static DEVICE_ATTR(card_irq_mask, 0600, pccard_show_irq_mask, pccard_store_irq_mask);
165 180
166 181
167static ssize_t pccard_show_resource(struct class_device *dev, char *buf) 182static ssize_t pccard_show_resource(struct device *dev,
183 struct device_attribute *attr, char *buf)
168{ 184{
169 struct pcmcia_socket *s = to_socket(dev); 185 struct pcmcia_socket *s = to_socket(dev);
170 return sprintf(buf, "%s\n", s->resource_setup_done ? "yes" : "no"); 186 return sprintf(buf, "%s\n", s->resource_setup_done ? "yes" : "no");
171} 187}
172 188
173static ssize_t pccard_store_resource(struct class_device *dev, const char *buf, size_t count) 189static ssize_t pccard_store_resource(struct device *dev,
190 struct device_attribute *attr,
191 const char *buf, size_t count)
174{ 192{
175 unsigned long flags; 193 unsigned long flags;
176 struct pcmcia_socket *s = to_socket(dev); 194 struct pcmcia_socket *s = to_socket(dev);
@@ -196,7 +214,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
196 214
197 return count; 215 return count;
198} 216}
199static CLASS_DEVICE_ATTR(available_resources_setup_done, 0600, pccard_show_resource, pccard_store_resource); 217static DEVICE_ATTR(available_resources_setup_done, 0600, pccard_show_resource, pccard_store_resource);
200 218
201 219
202static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off, size_t count) 220static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off, size_t count)
@@ -279,7 +297,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, char *buf, loff_t off, size
279 if (off + count > size) 297 if (off + count > size)
280 count = size - off; 298 count = size - off;
281 299
282 s = to_socket(container_of(kobj, struct class_device, kobj)); 300 s = to_socket(container_of(kobj, struct device, kobj));
283 301
284 if (!(s->state & SOCKET_PRESENT)) 302 if (!(s->state & SOCKET_PRESENT))
285 return -ENODEV; 303 return -ENODEV;
@@ -296,7 +314,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, char *buf, loff_t off, size
296 314
297static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, size_t count) 315static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, size_t count)
298{ 316{
299 struct pcmcia_socket *s = to_socket(container_of(kobj, struct class_device, kobj)); 317 struct pcmcia_socket *s = to_socket(container_of(kobj, struct device, kobj));
300 cisdump_t *cis; 318 cisdump_t *cis;
301 int error; 319 int error;
302 320
@@ -335,16 +353,16 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
335} 353}
336 354
337 355
338static struct class_device_attribute *pccard_socket_attributes[] = { 356static struct device_attribute *pccard_socket_attributes[] = {
339 &class_device_attr_card_type, 357 &dev_attr_card_type,
340 &class_device_attr_card_voltage, 358 &dev_attr_card_voltage,
341 &class_device_attr_card_vpp, 359 &dev_attr_card_vpp,
342 &class_device_attr_card_vcc, 360 &dev_attr_card_vcc,
343 &class_device_attr_card_insert, 361 &dev_attr_card_insert,
344 &class_device_attr_card_pm_state, 362 &dev_attr_card_pm_state,
345 &class_device_attr_card_eject, 363 &dev_attr_card_eject,
346 &class_device_attr_card_irq_mask, 364 &dev_attr_card_irq_mask,
347 &class_device_attr_available_resources_setup_done, 365 &dev_attr_available_resources_setup_done,
348 NULL, 366 NULL,
349}; 367};
350 368
@@ -355,35 +373,35 @@ static struct bin_attribute pccard_cis_attr = {
355 .write = pccard_store_cis, 373 .write = pccard_store_cis,
356}; 374};
357 375
358static int __devinit pccard_sysfs_add_socket(struct class_device *class_dev, 376static int __devinit pccard_sysfs_add_socket(struct device *dev,
359 struct class_interface *class_intf) 377 struct class_interface *class_intf)
360{ 378{
361 struct class_device_attribute **attr; 379 struct device_attribute **attr;
362 int ret = 0; 380 int ret = 0;
363 381
364 for (attr = pccard_socket_attributes; *attr; attr++) { 382 for (attr = pccard_socket_attributes; *attr; attr++) {
365 ret = class_device_create_file(class_dev, *attr); 383 ret = device_create_file(dev, *attr);
366 if (ret) 384 if (ret)
367 break; 385 break;
368 } 386 }
369 if (!ret) 387 if (!ret)
370 ret = sysfs_create_bin_file(&class_dev->kobj, &pccard_cis_attr); 388 ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
371 389
372 return ret; 390 return ret;
373} 391}
374 392
375static void __devexit pccard_sysfs_remove_socket(struct class_device *class_dev, 393static void __devexit pccard_sysfs_remove_socket(struct device *dev,
376 struct class_interface *class_intf) 394 struct class_interface *class_intf)
377{ 395{
378 struct class_device_attribute **attr; 396 struct device_attribute **attr;
379 397
380 sysfs_remove_bin_file(&class_dev->kobj, &pccard_cis_attr); 398 sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
381 for (attr = pccard_socket_attributes; *attr; attr++) 399 for (attr = pccard_socket_attributes; *attr; attr++)
382 class_device_remove_file(class_dev, *attr); 400 device_remove_file(dev, *attr);
383} 401}
384 402
385struct class_interface pccard_sysfs_interface = { 403struct class_interface pccard_sysfs_interface = {
386 .class = &pcmcia_socket_class, 404 .class = &pcmcia_socket_class,
387 .add = &pccard_sysfs_add_socket, 405 .add_dev = &pccard_sysfs_add_socket,
388 .remove = __devexit_p(&pccard_sysfs_remove_socket), 406 .remove_dev = __devexit_p(&pccard_sysfs_remove_socket),
389}; 407};
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 2d2f415f80a8..c158cf38b9dd 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -512,7 +512,7 @@ static int __init init_tcic(void)
512 for (i = 0; i < sockets; i++) { 512 for (i = 0; i < sockets; i++) {
513 socket_table[i].socket.ops = &tcic_operations; 513 socket_table[i].socket.ops = &tcic_operations;
514 socket_table[i].socket.resource_ops = &pccard_nonstatic_ops; 514 socket_table[i].socket.resource_ops = &pccard_nonstatic_ops;
515 socket_table[i].socket.dev.dev = &tcic_device.dev; 515 socket_table[i].socket.dev.parent = &tcic_device.dev;
516 ret = pcmcia_register_socket(&socket_table[i].socket); 516 ret = pcmcia_register_socket(&socket_table[i].socket);
517 if (ret && i) 517 if (ret && i)
518 pcmcia_unregister_socket(&socket_table[0].socket); 518 pcmcia_unregister_socket(&socket_table[0].socket);
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index da471bddc972..a61d768f6e0e 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1104,7 +1104,7 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
1104 /* prepare pcmcia_socket */ 1104 /* prepare pcmcia_socket */
1105 socket->socket.ops = &yenta_socket_operations; 1105 socket->socket.ops = &yenta_socket_operations;
1106 socket->socket.resource_ops = &pccard_nonstatic_ops; 1106 socket->socket.resource_ops = &pccard_nonstatic_ops;
1107 socket->socket.dev.dev = &dev->dev; 1107 socket->socket.dev.parent = &dev->dev;
1108 socket->socket.driver_data = socket; 1108 socket->socket.driver_data = socket;
1109 socket->socket.owner = THIS_MODULE; 1109 socket->socket.owner = THIS_MODULE;
1110 socket->socket.features = SS_CAP_PAGE_REGS | SS_CAP_PCCARD; 1110 socket->socket.features = SS_CAP_PAGE_REGS | SS_CAP_PCCARD;
diff --git a/drivers/pnp/pnpacpi/Kconfig b/drivers/pnp/pnpacpi/Kconfig
index b1854171b963..ad27e5e0101f 100644
--- a/drivers/pnp/pnpacpi/Kconfig
+++ b/drivers/pnp/pnpacpi/Kconfig
@@ -2,8 +2,8 @@
2# Plug and Play ACPI configuration 2# Plug and Play ACPI configuration
3# 3#
4config PNPACPI 4config PNPACPI
5 bool "Plug and Play ACPI support (EXPERIMENTAL)" 5 bool "Plug and Play ACPI support"
6 depends on PNP && ACPI && EXPERIMENTAL 6 depends on PNP && ACPI
7 default y 7 default y
8 ---help--- 8 ---help---
9 Linux uses the PNPACPI to autodetect built-in 9 Linux uses the PNPACPI to autodetect built-in
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index d42015c382af..2065e74bb63f 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * Some code is based on pnpbios_core.c 4 * Some code is based on pnpbios_core.c
5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com> 5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com>
6 * 6 * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
7 */ 8 */
8 9
9#include <linux/pnp.h> 10#include <linux/pnp.h>
@@ -21,18 +22,21 @@ static const struct pnp_device_id pnp_dev_table[] = {
21 { "", 0 } 22 { "", 0 }
22}; 23};
23 24
24static void reserve_ioport_range(char *pnpid, int start, int end) 25static void reserve_range(char *pnpid, int start, int end, int port)
25{ 26{
26 struct resource *res; 27 struct resource *res;
27 char *regionid; 28 char *regionid;
28 29
29 regionid = kmalloc(16, GFP_KERNEL); 30 regionid = kmalloc(16, GFP_KERNEL);
30 if ( regionid == NULL ) 31 if (regionid == NULL)
31 return; 32 return;
32 snprintf(regionid, 16, "pnp %s", pnpid); 33 snprintf(regionid, 16, "pnp %s", pnpid);
33 res = request_region(start,end-start+1,regionid); 34 if (port)
34 if ( res == NULL ) 35 res = request_region(start,end-start+1,regionid);
35 kfree( regionid ); 36 else
37 res = request_mem_region(start,end-start+1,regionid);
38 if (res == NULL)
39 kfree(regionid);
36 else 40 else
37 res->flags &= ~IORESOURCE_BUSY; 41 res->flags &= ~IORESOURCE_BUSY;
38 /* 42 /*
@@ -41,26 +45,20 @@ static void reserve_ioport_range(char *pnpid, int start, int end)
41 * have double reservations. 45 * have double reservations.
42 */ 46 */
43 printk(KERN_INFO 47 printk(KERN_INFO
44 "pnp: %s: ioport range 0x%x-0x%x %s reserved\n", 48 "pnp: %s: %s range 0x%x-0x%x %s reserved\n",
45 pnpid, start, end, 49 pnpid, port ? "ioport" : "iomem", start, end,
46 NULL != res ? "has been" : "could not be" 50 NULL != res ? "has been" : "could not be");
47 );
48
49 return;
50} 51}
51 52
52static void reserve_resources_of_dev( struct pnp_dev *dev ) 53static void reserve_resources_of_dev(struct pnp_dev *dev)
53{ 54{
54 int i; 55 int i;
55 56
56 for (i=0;i<PNP_MAX_PORT;i++) { 57 for (i = 0; i < PNP_MAX_PORT; i++) {
57 if (!pnp_port_valid(dev, i)) 58 if (!pnp_port_valid(dev, i))
58 /* end of resources */
59 continue; 59 continue;
60 if (pnp_port_start(dev, i) == 0) 60 if (pnp_port_start(dev, i) == 0)
61 /* disabled */ 61 continue; /* disabled */
62 /* Do nothing */
63 continue;
64 if (pnp_port_start(dev, i) < 0x100) 62 if (pnp_port_start(dev, i) < 0x100)
65 /* 63 /*
66 * Below 0x100 is only standard PC hardware 64 * Below 0x100 is only standard PC hardware
@@ -72,14 +70,18 @@ static void reserve_resources_of_dev( struct pnp_dev *dev )
72 */ 70 */
73 continue; 71 continue;
74 if (pnp_port_end(dev, i) < pnp_port_start(dev, i)) 72 if (pnp_port_end(dev, i) < pnp_port_start(dev, i))
75 /* invalid endpoint */ 73 continue; /* invalid */
76 /* Do nothing */ 74
75 reserve_range(dev->dev.bus_id, pnp_port_start(dev, i),
76 pnp_port_end(dev, i), 1);
77 }
78
79 for (i = 0; i < PNP_MAX_MEM; i++) {
80 if (!pnp_mem_valid(dev, i))
77 continue; 81 continue;
78 reserve_ioport_range( 82
79 dev->dev.bus_id, 83 reserve_range(dev->dev.bus_id, pnp_mem_start(dev, i),
80 pnp_port_start(dev, i), 84 pnp_mem_end(dev, i), 0);
81 pnp_port_end(dev, i)
82 );
83 } 85 }
84 86
85 return; 87 return;
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index ae89b9b88743..165af398fdea 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -103,14 +103,8 @@ config CCW_CONSOLE
103 depends on TN3215_CONSOLE || TN3270_CONSOLE 103 depends on TN3215_CONSOLE || TN3270_CONSOLE
104 default y 104 default y
105 105
106config SCLP
107 bool "Support for SCLP"
108 help
109 Include support for the SCLP interface to the service element.
110
111config SCLP_TTY 106config SCLP_TTY
112 bool "Support for SCLP line mode terminal" 107 bool "Support for SCLP line mode terminal"
113 depends on SCLP
114 help 108 help
115 Include support for IBM SCLP line-mode terminals. 109 Include support for IBM SCLP line-mode terminals.
116 110
@@ -123,7 +117,6 @@ config SCLP_CONSOLE
123 117
124config SCLP_VT220_TTY 118config SCLP_VT220_TTY
125 bool "Support for SCLP VT220-compatible terminal" 119 bool "Support for SCLP VT220-compatible terminal"
126 depends on SCLP
127 help 120 help
128 Include support for an IBM SCLP VT220-compatible terminal. 121 Include support for an IBM SCLP VT220-compatible terminal.
129 122
@@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE
136 129
137config SCLP_CPI 130config SCLP_CPI
138 tristate "Control-Program Identification" 131 tristate "Control-Program Identification"
139 depends on SCLP
140 help 132 help
141 This option enables the hardware console interface for system 133 This option enables the hardware console interface for system
142 identification. This is commonly used for workload management and 134 identification. This is commonly used for workload management and
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 9803c9352d78..5a888704a8d0 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6
5obj-y += s390mach.o sysinfo.o s390_rdev.o 7obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 8obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 9
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 492b68bcd7cc..eb5dc62f0d9c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,7 @@
37 */ 37 */
38debug_info_t *dasd_debug_area; 38debug_info_t *dasd_debug_area;
39struct dasd_discipline *dasd_diag_discipline_pointer; 39struct dasd_discipline *dasd_diag_discipline_pointer;
40void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
40 41
41MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
42MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
@@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device);
51static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_device * device);
52static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_device * device);
53static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_device *);
54static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_ccw_queue(struct dasd_device *, int);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_tasklet(struct dasd_device *);
57static void do_kick_device(struct work_struct *); 57static void do_kick_device(struct work_struct *);
@@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
483/* 483/*
484 * Add profiling information for cqr before execution. 484 * Add profiling information for cqr before execution.
485 */ 485 */
486static inline void 486static void
487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
488 struct request *req) 488 struct request *req)
489{ 489{
@@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
505/* 505/*
506 * Add profiling information for cqr after execution. 506 * Add profiling information for cqr after execution.
507 */ 507 */
508static inline void 508static void
509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
510 struct request *req) 510 struct request *req)
511{ 511{
@@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1022 irb->scsw.cstat == 0 && 1022 irb->scsw.cstat == 0 &&
1023 !irb->esw.esw0.erw.cons) 1023 !irb->esw.esw0.erw.cons)
1024 era = dasd_era_none; 1024 era = dasd_era_none;
1025 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1026 era = dasd_era_fatal; /* don't recover this request */
1027 else if (irb->esw.esw0.erw.cons) 1025 else if (irb->esw.esw0.erw.cons)
1028 era = device->discipline->examine_error(cqr, irb); 1026 era = device->discipline->examine_error(cqr, irb);
1029 else 1027 else
@@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1104/* 1102/*
1105 * Process ccw request queue. 1103 * Process ccw request queue.
1106 */ 1104 */
1107static inline void 1105static void
1108__dasd_process_ccw_queue(struct dasd_device * device, 1106__dasd_process_ccw_queue(struct dasd_device * device,
1109 struct list_head *final_queue) 1107 struct list_head *final_queue)
1110{ 1108{
@@ -1127,7 +1125,9 @@ restart:
1127 cqr->status = DASD_CQR_FAILED; 1125 cqr->status = DASD_CQR_FAILED;
1128 cqr->stopclk = get_clock(); 1126 cqr->stopclk = get_clock();
1129 } else { 1127 } else {
1130 if (cqr->irb.esw.esw0.erw.cons) { 1128 if (cqr->irb.esw.esw0.erw.cons &&
1129 test_bit(DASD_CQR_FLAGS_USE_ERP,
1130 &cqr->flags)) {
1131 erp_fn = device->discipline-> 1131 erp_fn = device->discipline->
1132 erp_action(cqr); 1132 erp_action(cqr);
1133 erp_fn(cqr); 1133 erp_fn(cqr);
@@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1181/* 1181/*
1182 * Fetch requests from the block device queue. 1182 * Fetch requests from the block device queue.
1183 */ 1183 */
1184static inline void 1184static void
1185__dasd_process_blk_queue(struct dasd_device * device) 1185__dasd_process_blk_queue(struct dasd_device * device)
1186{ 1186{
1187 request_queue_t *queue; 1187 request_queue_t *queue;
@@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device)
1232 if (IS_ERR(cqr)) { 1232 if (IS_ERR(cqr)) {
1233 if (PTR_ERR(cqr) == -ENOMEM) 1233 if (PTR_ERR(cqr) == -ENOMEM)
1234 break; /* terminate request queue loop */ 1234 break; /* terminate request queue loop */
1235 if (PTR_ERR(cqr) == -EAGAIN) {
1236 /*
1237 * The current request cannot be build right
1238 * now, we have to try later. If this request
1239 * is the head-of-queue we stop the device
1240 * for 1/2 second.
1241 */
1242 if (!list_empty(&device->ccw_queue))
1243 break;
1244 device->stopped |= DASD_STOPPED_PENDING;
1245 dasd_set_timer(device, HZ/2);
1246 break;
1247 }
1235 DBF_DEV_EVENT(DBF_ERR, device, 1248 DBF_DEV_EVENT(DBF_ERR, device,
1236 "CCW creation failed (rc=%ld) " 1249 "CCW creation failed (rc=%ld) "
1237 "on request %p", 1250 "on request %p",
@@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device)
1254 * Take a look at the first request on the ccw queue and check 1267 * Take a look at the first request on the ccw queue and check
1255 * if it reached its expire time. If so, terminate the IO. 1268 * if it reached its expire time. If so, terminate the IO.
1256 */ 1269 */
1257static inline void 1270static void
1258__dasd_check_expire(struct dasd_device * device) 1271__dasd_check_expire(struct dasd_device * device)
1259{ 1272{
1260 struct dasd_ccw_req *cqr; 1273 struct dasd_ccw_req *cqr;
@@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device)
1285 * Take a look at the first request on the ccw queue and check 1298 * Take a look at the first request on the ccw queue and check
1286 * if it needs to be started. 1299 * if it needs to be started.
1287 */ 1300 */
1288static inline void 1301static void
1289__dasd_start_head(struct dasd_device * device) 1302__dasd_start_head(struct dasd_device * device)
1290{ 1303{
1291 struct dasd_ccw_req *cqr; 1304 struct dasd_ccw_req *cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4d01040c2c63..8b9d68f6e016 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
170 /* log the erp chain if fatal error occurred */ 170 /* log the erp chain if fatal error occurred */
171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { 171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
172 dasd_log_sense(cqr, irb); 172 dasd_log_sense(cqr, irb);
173 dasd_log_ccw(cqr, 0, irb->scsw.cpa);
174 } 173 }
175 174
176 return era; 175 return era;
@@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2640 2639
2641 struct dasd_ccw_req *erp = NULL; 2640 struct dasd_ccw_req *erp = NULL;
2642 struct dasd_device *device = cqr->device; 2641 struct dasd_device *device = cqr->device;
2643 __u32 cpa = cqr->irb.scsw.cpa;
2644 struct dasd_ccw_req *temp_erp = NULL; 2642 struct dasd_ccw_req *temp_erp = NULL;
2645 2643
2646 if (device->features & DASD_FEATURE_ERPLOG) { 2644 if (device->features & DASD_FEATURE_ERPLOG) {
@@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2706 } 2704 }
2707 } 2705 }
2708 2706
2709 if (erp->status == DASD_CQR_FAILED)
2710 dasd_log_ccw(erp, 1, cpa);
2711
2712 /* enqueue added ERP request */ 2707 /* enqueue added ERP request */
2713 if (erp->status == DASD_CQR_FILLED) { 2708 if (erp->status == DASD_CQR_FILLED) {
2714 erp->status = DASD_CQR_QUEUED; 2709 erp->status = DASD_CQR_QUEUED;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 5943266152f5..ed70852cc915 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup);
136/* 136/*
137 * Read a device busid/devno from a string. 137 * Read a device busid/devno from a string.
138 */ 138 */
139static inline int 139static int
140dasd_busid(char **str, int *id0, int *id1, int *devno) 140dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 141{
142 int val, old_style; 142 int val, old_style;
@@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
182 * only one: "ro" for read-only devices. The default feature set 182 * only one: "ro" for read-only devices. The default feature set
183 * is empty (value 0). 183 * is empty (value 0).
184 */ 184 */
185static inline int 185static int
186dasd_feature_list(char *str, char **endp) 186dasd_feature_list(char *str, char **endp)
187{ 187{
188 int features, len, rc; 188 int features, len, rc;
@@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) {
341 return ERR_PTR(-EINVAL); 341 return ERR_PTR(-EINVAL);
342} 342}
343 343
344static inline char * 344static char *
345dasd_parse_next_element( char *parsestring ) { 345dasd_parse_next_element( char *parsestring ) {
346 char * residual_str; 346 char * residual_str;
347 residual_str = dasd_parse_keyword(parsestring); 347 residual_str = dasd_parse_keyword(parsestring);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 53db58a68617..ab782bb46ac1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
43#define DIAG_MAX_RETRIES 32 43#define DIAG_MAX_RETRIES 32
44#define DIAG_TIMEOUT 50 * HZ 44#define DIAG_TIMEOUT 50 * HZ
45 45
46struct dasd_discipline dasd_diag_discipline; 46static struct dasd_discipline dasd_diag_discipline;
47 47
48struct dasd_diag_private { 48struct dasd_diag_private {
49 struct dasd_diag_characteristics rdc_data; 49 struct dasd_diag_characteristics rdc_data;
@@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd)
90 * block offset. On success, return zero and set end_block to contain the 90 * block offset. On success, return zero and set end_block to contain the
91 * number of blocks on the device minus the specified offset. Return non-zero 91 * number of blocks on the device minus the specified offset. Return non-zero
92 * otherwise. */ 92 * otherwise. */
93static __inline__ int 93static inline int
94mdsk_init_io(struct dasd_device *device, unsigned int blocksize, 94mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
95 blocknum_t offset, blocknum_t *end_block) 95 blocknum_t offset, blocknum_t *end_block)
96{ 96{
@@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
117 117
118/* Remove block I/O environment for device. Return zero on success, non-zero 118/* Remove block I/O environment for device. Return zero on success, non-zero
119 * otherwise. */ 119 * otherwise. */
120static __inline__ int 120static inline int
121mdsk_term_io(struct dasd_device * device) 121mdsk_term_io(struct dasd_device * device)
122{ 122{
123 struct dasd_diag_private *private; 123 struct dasd_diag_private *private;
@@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
576 "dump sense not available for DIAG data"); 576 "dump sense not available for DIAG data");
577} 577}
578 578
579struct dasd_discipline dasd_diag_discipline = { 579static struct dasd_discipline dasd_diag_discipline = {
580 .owner = THIS_MODULE, 580 .owner = THIS_MODULE,
581 .name = "DIAG", 581 .name = "DIAG",
582 .ebcname = "DIAG", 582 .ebcname = "DIAG",
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fdaa471e845f..cecab2274a6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2)
134 return (d1 + (d2 - 1)) / d2; 134 return (d1 + (d2 - 1)) / d2;
135} 135}
136 136
137static inline int 137static unsigned int
138bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
139{
140 unsigned int fl1, fl2, int1, int2;
141 int bpr;
142
143 switch (rdc->formula) {
144 case 0x01:
145 fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
146 fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
147 ECKD_F1(rdc));
148 bpr = fl1 + fl2;
149 break;
150 case 0x02:
151 int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
152 int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
153 fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
154 ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
155 ECKD_F1(rdc));
156 fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
157 ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
158 ECKD_F1(rdc));
159 bpr = fl1 + fl2;
160 break;
161 default:
162 bpr = 0;
163 break;
164 }
165 return bpr;
166}
167
168static inline unsigned int
169bytes_per_track(struct dasd_eckd_characteristics *rdc)
170{
171 return *(unsigned int *) (rdc->byte_per_track) >> 8;
172}
173
174static inline unsigned int
175recs_per_track(struct dasd_eckd_characteristics * rdc, 138recs_per_track(struct dasd_eckd_characteristics * rdc,
176 unsigned int kl, unsigned int dl) 139 unsigned int kl, unsigned int dl)
177{ 140{
@@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc,
204 return 0; 167 return 0;
205} 168}
206 169
207static inline void 170static int
208check_XRC (struct ccw1 *de_ccw, 171check_XRC (struct ccw1 *de_ccw,
209 struct DE_eckd_data *data, 172 struct DE_eckd_data *data,
210 struct dasd_device *device) 173 struct dasd_device *device)
211{ 174{
212 struct dasd_eckd_private *private; 175 struct dasd_eckd_private *private;
176 int rc;
213 177
214 private = (struct dasd_eckd_private *) device->private; 178 private = (struct dasd_eckd_private *) device->private;
179 if (!private->rdc_data.facilities.XRC_supported)
180 return 0;
215 181
216 /* switch on System Time Stamp - needed for XRC Support */ 182 /* switch on System Time Stamp - needed for XRC Support */
217 if (private->rdc_data.facilities.XRC_supported) { 183 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
218 184 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
219 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
220 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
221
222 data->ep_sys_time = get_clock ();
223
224 de_ccw->count = sizeof (struct DE_eckd_data);
225 de_ccw->flags |= CCW_FLAG_SLI;
226 }
227 185
228 return; 186 rc = get_sync_clock(&data->ep_sys_time);
187 /* Ignore return code if sync clock is switched off. */
188 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0;
229 190
230} /* end check_XRC */ 191 de_ccw->count = sizeof (struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc;
194}
231 195
232static inline void 196static int
233define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, 197define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
234 int totrk, int cmd, struct dasd_device * device) 198 int totrk, int cmd, struct dasd_device * device)
235{ 199{
236 struct dasd_eckd_private *private; 200 struct dasd_eckd_private *private;
237 struct ch_t geo, beg, end; 201 struct ch_t geo, beg, end;
202 int rc = 0;
238 203
239 private = (struct dasd_eckd_private *) device->private; 204 private = (struct dasd_eckd_private *) device->private;
240 205
@@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
263 case DASD_ECKD_CCW_WRITE_KD_MT: 228 case DASD_ECKD_CCW_WRITE_KD_MT:
264 data->mask.perm = 0x02; 229 data->mask.perm = 0x02;
265 data->attributes.operation = private->attrib.operation; 230 data->attributes.operation = private->attrib.operation;
266 check_XRC (ccw, data, device); 231 rc = check_XRC (ccw, data, device);
267 break; 232 break;
268 case DASD_ECKD_CCW_WRITE_CKD: 233 case DASD_ECKD_CCW_WRITE_CKD:
269 case DASD_ECKD_CCW_WRITE_CKD_MT: 234 case DASD_ECKD_CCW_WRITE_CKD_MT:
270 data->attributes.operation = DASD_BYPASS_CACHE; 235 data->attributes.operation = DASD_BYPASS_CACHE;
271 check_XRC (ccw, data, device); 236 rc = check_XRC (ccw, data, device);
272 break; 237 break;
273 case DASD_ECKD_CCW_ERASE: 238 case DASD_ECKD_CCW_ERASE:
274 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 239 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
276 data->mask.perm = 0x3; 241 data->mask.perm = 0x3;
277 data->mask.auth = 0x1; 242 data->mask.auth = 0x1;
278 data->attributes.operation = DASD_BYPASS_CACHE; 243 data->attributes.operation = DASD_BYPASS_CACHE;
279 check_XRC (ccw, data, device); 244 rc = check_XRC (ccw, data, device);
280 break; 245 break;
281 default: 246 default:
282 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 247 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
@@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
312 data->beg_ext.head = beg.head; 277 data->beg_ext.head = beg.head;
313 data->end_ext.cyl = end.cyl; 278 data->end_ext.cyl = end.cyl;
314 data->end_ext.head = end.head; 279 data->end_ext.head = end.head;
280 return rc;
315} 281}
316 282
317static inline void 283static void
318locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 284locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
319 int rec_on_trk, int no_rec, int cmd, 285 int rec_on_trk, int no_rec, int cmd,
320 struct dasd_device * device, int reclen) 286 struct dasd_device * device, int reclen)
@@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device)
548/* 514/*
549 * Build CP for Perform Subsystem Function - SSC. 515 * Build CP for Perform Subsystem Function - SSC.
550 */ 516 */
551struct dasd_ccw_req * 517static struct dasd_ccw_req *
552dasd_eckd_build_psf_ssc(struct dasd_device *device) 518dasd_eckd_build_psf_ssc(struct dasd_device *device)
553{ 519{
554 struct dasd_ccw_req *cqr; 520 struct dasd_ccw_req *cqr;
@@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1200 return cqr; 1166 return cqr;
1201 ccw = cqr->cpaddr; 1167 ccw = cqr->cpaddr;
1202 /* First ccw is define extent. */ 1168 /* First ccw is define extent. */
1203 define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); 1169 if (define_extent(ccw++, cqr->data, first_trk,
1170 last_trk, cmd, device) == -EAGAIN) {
1171 /* Clock not in sync and XRC is enabled. Try again later. */
1172 dasd_sfree_request(cqr, device);
1173 return ERR_PTR(-EAGAIN);
1174 }
1204 /* Build locate_record+read/write/ccws. */ 1175 /* Build locate_record+read/write/ccws. */
1205 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); 1176 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1206 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1177 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device)
1380 cqr->device = device; 1351 cqr->device = device;
1381 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1352 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1382 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1383 cqr->retries = 0; 1354 cqr->retries = 2; /* set retry counter to enable basic ERP */
1384 cqr->expires = 2 * HZ; 1355 cqr->expires = 2 * HZ;
1385 cqr->buildclk = get_clock(); 1356 cqr->buildclk = get_clock();
1386 cqr->status = DASD_CQR_FILLED; 1357 cqr->status = DASD_CQR_FILLED;
@@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1420 cqr->device = device; 1391 cqr->device = device;
1421 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1392 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1422 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1393 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1423 cqr->retries = 0; 1394 cqr->retries = 2; /* set retry counter to enable basic ERP */
1424 cqr->expires = 2 * HZ; 1395 cqr->expires = 2 * HZ;
1425 cqr->buildclk = get_clock(); 1396 cqr->buildclk = get_clock();
1426 cqr->status = DASD_CQR_FILLED; 1397 cqr->status = DASD_CQR_FILLED;
@@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1459 cqr->device = device; 1430 cqr->device = device;
1460 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1431 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1461 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1432 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1462 cqr->retries = 0; 1433 cqr->retries = 2; /* set retry counter to enable basic ERP */
1463 cqr->expires = 2 * HZ; 1434 cqr->expires = 2 * HZ;
1464 cqr->buildclk = get_clock(); 1435 cqr->buildclk = get_clock();
1465 cqr->status = DASD_CQR_FILLED; 1436 cqr->status = DASD_CQR_FILLED;
@@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
1609 * Dump the range of CCWs into 'page' buffer 1580 * Dump the range of CCWs into 'page' buffer
1610 * and return number of printed chars. 1581 * and return number of printed chars.
1611 */ 1582 */
1612static inline int 1583static int
1613dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 1584dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1614{ 1585{
1615 int len, count; 1586 int len, count;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index e0bf30ebb215..6cedc914077e 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = {
658 .owner = THIS_MODULE, 658 .owner = THIS_MODULE,
659}; 659};
660 660
661static struct miscdevice dasd_eer_dev = { 661static struct miscdevice *dasd_eer_dev = NULL;
662 .minor = MISC_DYNAMIC_MINOR,
663 .name = "dasd_eer",
664 .fops = &dasd_eer_fops,
665};
666 662
667int __init dasd_eer_init(void) 663int __init dasd_eer_init(void)
668{ 664{
669 int rc; 665 int rc;
670 666
671 rc = misc_register(&dasd_eer_dev); 667 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
668 if (!dasd_eer_dev)
669 return -ENOMEM;
670
671 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
672 dasd_eer_dev->name = "dasd_eer";
673 dasd_eer_dev->fops = &dasd_eer_fops;
674
675 rc = misc_register(dasd_eer_dev);
672 if (rc) { 676 if (rc) {
677 kfree(dasd_eer_dev);
678 dasd_eer_dev = NULL;
673 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 679 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
674 "register misc device"); 680 "register misc device");
675 return rc; 681 return rc;
@@ -680,5 +686,9 @@ int __init dasd_eer_init(void)
680 686
681void dasd_eer_exit(void) 687void dasd_eer_exit(void)
682{ 688{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 689 if (dasd_eer_dev) {
690 WARN_ON(misc_deregister(dasd_eer_dev) != 0);
691 kfree(dasd_eer_dev);
692 dasd_eer_dev = NULL;
693 }
684} 694}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 58a65097922b..caa5d91420f8 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
152 152
153} /* end default_erp_postaction */ 153} /* end default_erp_postaction */
154 154
155/*
156 * Print the hex dump of the memory used by a request. This includes
157 * all error recovery ccws that have been chained in from of the
158 * real request.
159 */
160static inline void
161hex_dump_memory(struct dasd_device *device, void *data, int len)
162{
163 int *pint;
164
165 pint = (int *) data;
166 while (len > 0) {
167 DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
168 pint, pint[0], pint[1], pint[2], pint[3]);
169 pint += 4;
170 len -= 16;
171 }
172}
173
174void 155void
175dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) 156dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
176{ 157{
@@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
182 device->discipline->dump_sense(device, cqr, irb); 163 device->discipline->dump_sense(device, cqr, irb);
183} 164}
184 165
185void
186dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
187{
188 struct dasd_device *device;
189 struct dasd_ccw_req *lcqr;
190 struct ccw1 *ccw;
191 int cplength;
192
193 device = cqr->device;
194 /* log the channel program */
195 for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
196 DEV_MESSAGE(KERN_ERR, device,
197 "(%s) ERP chain report for req: %p",
198 caller == 0 ? "EXAMINE" : "ACTION", lcqr);
199 hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
200
201 cplength = 1;
202 ccw = lcqr->cpaddr;
203 while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
204 cplength++;
205
206 if (cplength > 40) { /* log only parts of the CP */
207 DEV_MESSAGE(KERN_ERR, device, "%s",
208 "Start of channel program:");
209 hex_dump_memory(device, lcqr->cpaddr,
210 40*sizeof(struct ccw1));
211
212 DEV_MESSAGE(KERN_ERR, device, "%s",
213 "End of channel program:");
214 hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
215 10*sizeof(struct ccw1));
216 } else { /* log the whole CP */
217 DEV_MESSAGE(KERN_ERR, device, "%s",
218 "Channel program (complete):");
219 hex_dump_memory(device, lcqr->cpaddr,
220 cplength*sizeof(struct ccw1));
221 }
222
223 if (lcqr != cqr)
224 continue;
225
226 /*
227 * Log bytes arround failed CCW but only if we did
228 * not log the whole CP of the CCW is outside the
229 * logged CP.
230 */
231 if (cplength > 40 ||
232 ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
233 (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
234
235 DEV_MESSAGE(KERN_ERR, device,
236 "Failed CCW (%p) (area):",
237 (void *) (long) cpa);
238 hex_dump_memory(device, cqr->cpaddr - 10,
239 20*sizeof(struct ccw1));
240 }
241 }
242
243} /* end log_erp_chain */
244
245EXPORT_SYMBOL(dasd_default_erp_action); 166EXPORT_SYMBOL(dasd_default_erp_action);
246EXPORT_SYMBOL(dasd_default_erp_postaction); 167EXPORT_SYMBOL(dasd_default_erp_postaction);
247EXPORT_SYMBOL(dasd_alloc_erp_request); 168EXPORT_SYMBOL(dasd_alloc_erp_request);
248EXPORT_SYMBOL(dasd_free_erp_request); 169EXPORT_SYMBOL(dasd_free_erp_request);
249EXPORT_SYMBOL(dasd_log_sense); 170EXPORT_SYMBOL(dasd_log_sense);
250EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index b857fd5893fd..be0909e39226 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = {
75 .notify = dasd_generic_notify, 75 .notify = dasd_generic_notify,
76}; 76};
77 77
78static inline void 78static void
79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, 79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
80 int blksize, int beg, int nr) 80 int blksize, int beg, int nr)
81{ 81{
@@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
95 data->ext_end = nr - 1; 95 data->ext_end = nr - 1;
96} 96}
97 97
98static inline void 98static void
99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, 99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
100 int block_nr, int block_ct) 100 int block_nr, int block_ct)
101{ 101{
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d163632101d2..47ba4462708d 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device)
147 */ 147 */
148 memset(&bpart, 0, sizeof(struct blkpg_partition)); 148 memset(&bpart, 0, sizeof(struct blkpg_partition));
149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); 149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
150 barg.data = (void __user *) &bpart; 150 barg.data = (void __force __user *) &bpart;
151 barg.op = BLKPG_DEL_PARTITION; 151 barg.op = BLKPG_DEL_PARTITION;
152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) 152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); 153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fb725e3b08fe..a2cc69e11410 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
559 struct dasd_device *); 559 struct dasd_device *);
560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
561void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 561void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
562void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
563 562
564/* externals in dasd_3370_erp.c */ 563/* externals in dasd_3370_erp.c */
565dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); 564dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bfa010f6dab2..8b7e11815d70 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL;
28static struct proc_dir_entry *dasd_devices_entry = NULL; 28static struct proc_dir_entry *dasd_devices_entry = NULL;
29static struct proc_dir_entry *dasd_statistics_entry = NULL; 29static struct proc_dir_entry *dasd_statistics_entry = NULL;
30 30
31static inline char * 31static char *
32dasd_get_user_string(const char __user *user_buf, size_t user_len) 32dasd_get_user_string(const char __user *user_buf, size_t user_len)
33{ 33{
34 char *buffer; 34 char *buffer;
@@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = {
154 .release = seq_release, 154 .release = seq_release,
155}; 155};
156 156
157static inline int 157static int
158dasd_calc_metrics(char *page, char **start, off_t off, 158dasd_calc_metrics(char *page, char **start, off_t off,
159 int count, int *eof, int len) 159 int count, int *eof, int len)
160{ 160{
@@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off,
167 return len; 167 return len;
168} 168}
169 169
170static inline char * 170static char *
171dasd_statistics_array(char *str, int *array, int shift) 171dasd_statistics_array(char *str, unsigned int *array, int shift)
172{ 172{
173 int i; 173 int i;
174 174
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index be9b05347b4f..1340451ea408 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev)
102 * device needs to be enqueued before the semaphore is 102 * device needs to be enqueued before the semaphore is
103 * freed. 103 * freed.
104 */ 104 */
105static inline int 105static int
106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
107{ 107{
108 int minor, found; 108 int minor, found;
@@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
230 SEGMENT_SHARED); 230 SEGMENT_SHARED);
231 if (rc < 0) { 231 if (rc < 0) {
232 BUG_ON(rc == -EINVAL); 232 BUG_ON(rc == -EINVAL);
233 if (rc == -EIO || rc == -ENOENT) 233 if (rc != -EAGAIN)
234 goto removeseg; 234 goto removeseg;
235 } else { 235 } else {
236 dev_info->is_shared = 1; 236 dev_info->is_shared = 1;
@@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
253 SEGMENT_EXCLUSIVE); 253 SEGMENT_EXCLUSIVE);
254 if (rc < 0) { 254 if (rc < 0) {
255 BUG_ON(rc == -EINVAL); 255 BUG_ON(rc == -EINVAL);
256 if (rc == -EIO || rc == -ENOENT) 256 if (rc != -EAGAIN)
257 goto removeseg; 257 goto removeseg;
258 } else { 258 } else {
259 dev_info->is_shared = 0; 259 dev_info->is_shared = 0;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c3e97b4fc186..293e667b50f2 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,7 +2,8 @@
2# S/390 character devices 2# S/390 character devices
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o
6 7
7obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
8obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o
11 12
12obj-$(CONFIG_TN3215) += con3215.o 13obj-$(CONFIG_TN3215) += con3215.o
13 14
14obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o 15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o 16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o 17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 25b5d7a66417..9a328f14a641 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = {
1121 * 3215 tty registration code called from tty_init(). 1121 * 3215 tty registration code called from tty_init().
1122 * Most kernel services (incl. kmalloc) are available at this poimt. 1122 * Most kernel services (incl. kmalloc) are available at this poimt.
1123 */ 1123 */
1124int __init 1124static int __init
1125tty3215_init(void) 1125tty3215_init(void)
1126{ 1126{
1127 struct tty_driver *driver; 1127 struct tty_driver *driver;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 7566be890688..8e7f2d7633d6 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *);
69/* 69/*
70 * Setup timeout for a device. On timeout trigger an update. 70 * Setup timeout for a device. On timeout trigger an update.
71 */ 71 */
72void 72static void con3270_set_timer(struct con3270 *cp, int expires)
73con3270_set_timer(struct con3270 *cp, int expires)
74{ 73{
75 if (expires == 0) { 74 if (expires == 0) {
76 if (timer_pending(&cp->timer)) 75 if (timer_pending(&cp->timer))
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 17027d918cf7..564baca01b7c 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -5,6 +5,8 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/keyboard.h> 6#include <linux/keyboard.h>
7#include <linux/kd.h> 7#include <linux/kd.h>
8#include <linux/kbd_kern.h>
9#include <linux/kbd_diacr.h>
8 10
9u_short plain_map[NR_KEYS] = { 11u_short plain_map[NR_KEYS] = {
10 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 12 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0893d306ae80..e1a746269c4c 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -23,7 +23,7 @@
23#include "raw3270.h" 23#include "raw3270.h"
24#include "ctrlchar.h" 24#include "ctrlchar.h"
25 25
26struct raw3270_fn fs3270_fn; 26static struct raw3270_fn fs3270_fn;
27 27
28struct fs3270 { 28struct fs3270 {
29 struct raw3270_view view; 29 struct raw3270_view view;
@@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view)
401} 401}
402 402
403/* View to a 3270 device. Can be console, tty or fullscreen. */ 403/* View to a 3270 device. Can be console, tty or fullscreen. */
404struct raw3270_fn fs3270_fn = { 404static struct raw3270_fn fs3270_fn = {
405 .activate = fs3270_activate, 405 .activate = fs3270_activate,
406 .deactivate = fs3270_deactivate, 406 .deactivate = fs3270_deactivate,
407 .intv = (void *) fs3270_irq, 407 .intv = (void *) fs3270_irq,
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 3e86fd1756e5..f62f9a4e8950 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
148 } 148 }
149} 149}
150 150
151#if 0
151/* 152/*
152 * Generate ebcdic -> ascii translation table from kbd_data. 153 * Generate ebcdic -> ascii translation table from kbd_data.
153 */ 154 */
@@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
173 } 174 }
174 } 175 }
175} 176}
177#endif
176 178
177/* 179/*
178 * We have a combining character DIACR here, followed by the character CH. 180 * We have a combining character DIACR here, followed by the character CH.
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index cdb24f528112..9e451acc6491 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
67 return -EINVAL; 67 return -EINVAL;
68} 68}
69 69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, 70static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr) 71 struct monwrite_hdr *monhdr)
72{ 72{
73 struct mon_buf *entry, *next; 73 struct mon_buf *entry, *next;
74 74
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7a84014f2037..8facd14adb7c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32struct class *class3270; 32static struct class *class3270;
33 33
34/* The main 3270 data structure. */ 34/* The main 3270 data structure. */
35struct raw3270 { 35struct raw3270 {
@@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
86/* 86/*
87 * Encode array for 12 bit 3270 addresses. 87 * Encode array for 12 bit 3270 addresses.
88 */ 88 */
89unsigned char raw3270_ebcgraf[64] = { 89static unsigned char raw3270_ebcgraf[64] = {
90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 8a056df09d6b..f171de3b0b11 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t {
59/* Internal state: is a request active at the sclp? */ 59/* Internal state: is a request active at the sclp? */
60static volatile enum sclp_running_state_t { 60static volatile enum sclp_running_state_t {
61 sclp_running_state_idle, 61 sclp_running_state_idle,
62 sclp_running_state_running 62 sclp_running_state_running,
63 sclp_running_state_reset_pending
63} sclp_running_state = sclp_running_state_idle; 64} sclp_running_state = sclp_running_state_idle;
64 65
65/* Internal state: is a read request pending? */ 66/* Internal state: is a read request pending? */
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t {
88 89
89/* Timeout intervals in seconds.*/ 90/* Timeout intervals in seconds.*/
90#define SCLP_BUSY_INTERVAL 10 91#define SCLP_BUSY_INTERVAL 10
91#define SCLP_RETRY_INTERVAL 15 92#define SCLP_RETRY_INTERVAL 30
92 93
93static void sclp_process_queue(void); 94static void sclp_process_queue(void);
94static int sclp_init_mask(int calculate); 95static int sclp_init_mask(int calculate);
95static int sclp_init(void); 96static int sclp_init(void);
96 97
97/* Perform service call. Return 0 on success, non-zero otherwise. */ 98/* Perform service call. Return 0 on success, non-zero otherwise. */
98static int 99int
99service_call(sclp_cmdw_t command, void *sccb) 100sclp_service_call(sclp_cmdw_t command, void *sccb)
100{ 101{
101 int cc; 102 int cc;
102 103
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb)
113 return 0; 114 return 0;
114} 115}
115 116
116/* Request timeout handler. Restart the request queue. If DATA is non-zero, 117static inline void __sclp_make_read_req(void);
117 * force restart of running request. */ 118
118static void 119static void
119sclp_request_timeout(unsigned long data) 120__sclp_queue_read_req(void)
120{ 121{
121 unsigned long flags; 122 if (sclp_reading_state == sclp_reading_state_idle) {
122 123 sclp_reading_state = sclp_reading_state_reading;
123 if (data) { 124 __sclp_make_read_req();
124 spin_lock_irqsave(&sclp_lock, flags); 125 /* Add request to head of queue */
125 sclp_running_state = sclp_running_state_idle; 126 list_add(&sclp_read_req.list, &sclp_req_queue);
126 spin_unlock_irqrestore(&sclp_lock, flags);
127 } 127 }
128 sclp_process_queue();
129} 128}
130 129
131/* Set up request retry timer. Called while sclp_lock is locked. */ 130/* Set up request retry timer. Called while sclp_lock is locked. */
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
140 add_timer(&sclp_request_timer); 139 add_timer(&sclp_request_timer);
141} 140}
142 141
142/* Request timeout handler. Restart the request queue. If DATA is non-zero,
143 * force restart of running request. */
144static void
145sclp_request_timeout(unsigned long data)
146{
147 unsigned long flags;
148
149 spin_lock_irqsave(&sclp_lock, flags);
150 if (data) {
151 if (sclp_running_state == sclp_running_state_running) {
152 /* Break running state and queue NOP read event request
153 * to get a defined interface state. */
154 __sclp_queue_read_req();
155 sclp_running_state = sclp_running_state_idle;
156 }
157 } else {
158 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
159 sclp_request_timeout, 0);
160 }
161 spin_unlock_irqrestore(&sclp_lock, flags);
162 sclp_process_queue();
163}
164
143/* Try to start a request. Return zero if the request was successfully 165/* Try to start a request. Return zero if the request was successfully
144 * started or if it will be started at a later time. Return non-zero otherwise. 166 * started or if it will be started at a later time. Return non-zero otherwise.
145 * Called while sclp_lock is locked. */ 167 * Called while sclp_lock is locked. */
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req)
151 if (sclp_running_state != sclp_running_state_idle) 173 if (sclp_running_state != sclp_running_state_idle)
152 return 0; 174 return 0;
153 del_timer(&sclp_request_timer); 175 del_timer(&sclp_request_timer);
154 rc = service_call(req->command, req->sccb); 176 rc = sclp_service_call(req->command, req->sccb);
155 req->start_count++; 177 req->start_count++;
156 178
157 if (rc == 0) { 179 if (rc == 0) {
@@ -191,7 +213,15 @@ sclp_process_queue(void)
191 rc = __sclp_start_request(req); 213 rc = __sclp_start_request(req);
192 if (rc == 0) 214 if (rc == 0)
193 break; 215 break;
194 /* Request failed. */ 216 /* Request failed */
217 if (req->start_count > 1) {
218 /* Cannot abort already submitted request - could still
219 * be active at the SCLP */
220 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
221 sclp_request_timeout, 0);
222 break;
223 }
224 /* Post-processing for aborted request */
195 list_del(&req->list); 225 list_del(&req->list);
196 if (req->callback) { 226 if (req->callback) {
197 spin_unlock_irqrestore(&sclp_lock, flags); 227 spin_unlock_irqrestore(&sclp_lock, flags);
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req)
221 list_add_tail(&req->list, &sclp_req_queue); 251 list_add_tail(&req->list, &sclp_req_queue);
222 rc = 0; 252 rc = 0;
223 /* Start if request is first in list */ 253 /* Start if request is first in list */
224 if (req->list.prev == &sclp_req_queue) { 254 if (sclp_running_state == sclp_running_state_idle &&
255 req->list.prev == &sclp_req_queue) {
225 rc = __sclp_start_request(req); 256 rc = __sclp_start_request(req);
226 if (rc) 257 if (rc)
227 list_del(&req->list); 258 list_del(&req->list);
@@ -294,7 +325,7 @@ __sclp_make_read_req(void)
294 sccb = (struct sccb_header *) sclp_read_sccb; 325 sccb = (struct sccb_header *) sclp_read_sccb;
295 clear_page(sccb); 326 clear_page(sccb);
296 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 327 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
297 sclp_read_req.command = SCLP_CMDW_READDATA; 328 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
298 sclp_read_req.status = SCLP_REQ_QUEUED; 329 sclp_read_req.status = SCLP_REQ_QUEUED;
299 sclp_read_req.start_count = 0; 330 sclp_read_req.start_count = 0;
300 sclp_read_req.callback = sclp_read_cb; 331 sclp_read_req.callback = sclp_read_cb;
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code)
334 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 365 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
335 evbuf_pending = S390_lowcore.ext_params & 0x3; 366 evbuf_pending = S390_lowcore.ext_params & 0x3;
336 if (finished_sccb) { 367 if (finished_sccb) {
368 del_timer(&sclp_request_timer);
369 sclp_running_state = sclp_running_state_reset_pending;
337 req = __sclp_find_req(finished_sccb); 370 req = __sclp_find_req(finished_sccb);
338 if (req) { 371 if (req) {
339 /* Request post-processing */ 372 /* Request post-processing */
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code)
348 sclp_running_state = sclp_running_state_idle; 381 sclp_running_state = sclp_running_state_idle;
349 } 382 }
350 if (evbuf_pending && sclp_receive_mask != 0 && 383 if (evbuf_pending && sclp_receive_mask != 0 &&
351 sclp_reading_state == sclp_reading_state_idle && 384 sclp_activation_state == sclp_activation_state_active)
352 sclp_activation_state == sclp_activation_state_active ) { 385 __sclp_queue_read_req();
353 sclp_reading_state = sclp_reading_state_reading;
354 __sclp_make_read_req();
355 /* Add request to head of queue */
356 list_add(&sclp_read_req.list, &sclp_req_queue);
357 }
358 spin_unlock(&sclp_lock); 386 spin_unlock(&sclp_lock);
359 sclp_process_queue(); 387 sclp_process_queue();
360} 388}
@@ -374,6 +402,7 @@ sclp_sync_wait(void)
374 unsigned long flags; 402 unsigned long flags;
375 unsigned long cr0, cr0_sync; 403 unsigned long cr0, cr0_sync;
376 u64 timeout; 404 u64 timeout;
405 int irq_context;
377 406
378 /* We'll be disabling timer interrupts, so we need a custom timeout 407 /* We'll be disabling timer interrupts, so we need a custom timeout
379 * mechanism */ 408 * mechanism */
@@ -386,7 +415,9 @@ sclp_sync_wait(void)
386 } 415 }
387 local_irq_save(flags); 416 local_irq_save(flags);
388 /* Prevent bottom half from executing once we force interrupts open */ 417 /* Prevent bottom half from executing once we force interrupts open */
389 local_bh_disable(); 418 irq_context = in_interrupt();
419 if (!irq_context)
420 local_bh_disable();
390 /* Enable service-signal interruption, disable timer interrupts */ 421 /* Enable service-signal interruption, disable timer interrupts */
391 trace_hardirqs_on(); 422 trace_hardirqs_on();
392 __ctl_store(cr0, 0, 0); 423 __ctl_store(cr0, 0, 0);
@@ -402,19 +433,19 @@ sclp_sync_wait(void)
402 get_clock() > timeout && 433 get_clock() > timeout &&
403 del_timer(&sclp_request_timer)) 434 del_timer(&sclp_request_timer))
404 sclp_request_timer.function(sclp_request_timer.data); 435 sclp_request_timer.function(sclp_request_timer.data);
405 barrier();
406 cpu_relax(); 436 cpu_relax();
407 } 437 }
408 local_irq_disable(); 438 local_irq_disable();
409 __ctl_load(cr0, 0, 0); 439 __ctl_load(cr0, 0, 0);
410 _local_bh_enable(); 440 if (!irq_context)
441 _local_bh_enable();
411 local_irq_restore(flags); 442 local_irq_restore(flags);
412} 443}
413 444
414EXPORT_SYMBOL(sclp_sync_wait); 445EXPORT_SYMBOL(sclp_sync_wait);
415 446
416/* Dispatch changes in send and receive mask to registered listeners. */ 447/* Dispatch changes in send and receive mask to registered listeners. */
417static inline void 448static void
418sclp_dispatch_state_change(void) 449sclp_dispatch_state_change(void)
419{ 450{
420 struct list_head *l; 451 struct list_head *l;
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
597 sccb = (struct init_sccb *) sclp_init_sccb; 628 sccb = (struct init_sccb *) sclp_init_sccb;
598 clear_page(sccb); 629 clear_page(sccb);
599 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 630 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
600 sclp_init_req.command = SCLP_CMDW_WRITEMASK; 631 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
601 sclp_init_req.status = SCLP_REQ_FILLED; 632 sclp_init_req.status = SCLP_REQ_FILLED;
602 sclp_init_req.start_count = 0; 633 sclp_init_req.start_count = 0;
603 sclp_init_req.callback = NULL; 634 sclp_init_req.callback = NULL;
@@ -800,7 +831,7 @@ sclp_check_interface(void)
800 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 831 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
801 __sclp_make_init_req(0, 0); 832 __sclp_make_init_req(0, 0);
802 sccb = (struct init_sccb *) sclp_init_req.sccb; 833 sccb = (struct init_sccb *) sclp_init_req.sccb;
803 rc = service_call(sclp_init_req.command, sccb); 834 rc = sclp_service_call(sclp_init_req.command, sccb);
804 if (rc == -EIO) 835 if (rc == -EIO)
805 break; 836 break;
806 sclp_init_req.status = SCLP_REQ_RUNNING; 837 sclp_init_req.status = SCLP_REQ_RUNNING;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 2c71d6ee7b5b..7d29ab45a6ed 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -12,7 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/list.h> 14#include <linux/list.h>
15 15#include <asm/sclp.h>
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17 17
18/* maximum number of pages concerning our own memory management */ 18/* maximum number of pages concerning our own memory management */
@@ -49,9 +49,11 @@
49 49
50typedef unsigned int sclp_cmdw_t; 50typedef unsigned int sclp_cmdw_t;
51 51
52#define SCLP_CMDW_READDATA 0x00770005 52#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
53#define SCLP_CMDW_WRITEDATA 0x00760005 53#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
54#define SCLP_CMDW_WRITEMASK 0x00780005 54#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
55#define SCLP_CMDW_READ_SCP_INFO 0x00020001
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
55 57
56#define GDS_ID_MDSMU 0x1310 58#define GDS_ID_MDSMU 0x1310
57#define GDS_ID_MDSRouteInfo 0x1311 59#define GDS_ID_MDSRouteInfo 0x1311
@@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t;
66 68
67typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
68 70
69struct sccb_header {
70 u16 length;
71 u8 function_code;
72 u8 control_mask[3];
73 u16 response_code;
74} __attribute__((packed));
75
76struct gds_subvector { 71struct gds_subvector {
77 u8 length; 72 u8 length;
78 u8 key; 73 u8 key;
@@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg);
131int sclp_remove_processed(struct sccb_header *sccb); 126int sclp_remove_processed(struct sccb_header *sccb);
132int sclp_deactivate(void); 127int sclp_deactivate(void);
133int sclp_reactivate(void); 128int sclp_reactivate(void);
129int sclp_service_call(sclp_cmdw_t command, void *sccb);
134 130
135/* useful inlines */ 131/* useful inlines */
136 132
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 86864f641716..ead1043d788e 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); 66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
67} 67}
68 68
69static inline void 69static void
70sclp_conbuf_emit(void) 70sclp_conbuf_emit(void)
71{ 71{
72 struct sclp_buffer* buffer; 72 struct sclp_buffer* buffer;
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 4f873ae148b7..65aa2c85737f 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -169,7 +169,7 @@ cpi_prepare_req(void)
169 } 169 }
170 170
171 /* prepare request data structure presented to SCLP driver */ 171 /* prepare request data structure presented to SCLP driver */
172 req->command = SCLP_CMDW_WRITEDATA; 172 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
173 req->sccb = sccb; 173 req->sccb = sccb;
174 req->status = SCLP_REQ_FILLED; 174 req->status = SCLP_REQ_FILLED;
175 req->callback = cpi_callback; 175 req->callback = cpi_callback;
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
new file mode 100644
index 000000000000..7bcbe643b087
--- /dev/null
+++ b/drivers/s390/char/sclp_info.c
@@ -0,0 +1,57 @@
1/*
2 * drivers/s390/char/sclp_info.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <asm/sclp.h>
12#include "sclp.h"
13
14struct sclp_readinfo_sccb s390_readinfo_sccb;
15
16void __init sclp_readinfo_early(void)
17{
18 sclp_cmdw_t command;
19 struct sccb_header *sccb;
20 int ret;
21
22 __ctl_set_bit(0, 9); /* enable service signal subclass mask */
23
24 sccb = &s390_readinfo_sccb.header;
25 command = SCLP_CMDW_READ_SCP_INFO_FORCED;
26 while (1) {
27 u16 response;
28
29 memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb));
30 sccb->length = sizeof(s390_readinfo_sccb);
31 sccb->control_mask[2] = 0x80;
32
33 ret = sclp_service_call(command, &s390_readinfo_sccb);
34
35 if (ret == -EIO)
36 goto out;
37 if (ret == -EBUSY)
38 continue;
39
40 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
41 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
42 local_irq_disable();
43 barrier();
44
45 response = sccb->response_code;
46
47 if (response == 0x10)
48 break;
49
50 if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO)
51 break;
52
53 command = SCLP_CMDW_READ_SCP_INFO;
54 }
55out:
56 __ctl_clear_bit(0, 9); /* disable service signal subclass mask */
57}
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 0c92d3909cca..2486783ea58e 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EvTyp_PMsgCmd;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITEDATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
464 buffer->request.status = SCLP_REQ_FILLED; 464 buffer->request.status = SCLP_REQ_FILLED;
465 buffer->request.callback = sclp_writedata_callback; 465 buffer->request.callback = sclp_writedata_callback;
466 buffer->request.callback_data = buffer; 466 buffer->request.callback_data = buffer;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2d173e5c8a09..90536f60bf50 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = {
721 .ioctl = sclp_tty_ioctl, 721 .ioctl = sclp_tty_ioctl,
722}; 722};
723 723
724int __init 724static int __init
725sclp_tty_init(void) 725sclp_tty_init(void)
726{ 726{
727 struct tty_driver *driver; 727 struct tty_driver *driver;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 723bf4191bfe..544f137d70d7 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request)
207 request->sclp_req.status = SCLP_REQ_FAILED; 207 request->sclp_req.status = SCLP_REQ_FAILED;
208 return -EIO; 208 return -EIO;
209 } 209 }
210 request->sclp_req.command = SCLP_CMDW_WRITEDATA; 210 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
211 request->sclp_req.status = SCLP_REQ_FILLED; 211 request->sclp_req.status = SCLP_REQ_FILLED;
212 request->sclp_req.callback = sclp_vt220_callback; 212 request->sclp_req.callback = sclp_vt220_callback;
213 request->sclp_req.callback_data = (void *) request; 213 request->sclp_req.callback_data = (void *) request;
@@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = {
669/* 669/*
670 * Register driver with SCLP and Linux and initialize internal tty structures. 670 * Register driver with SCLP and Linux and initialize internal tty structures.
671 */ 671 */
672int __init 672static int __init
673sclp_vt220_tty_init(void) 673sclp_vt220_tty_init(void)
674{ 674{
675 struct tty_driver *driver; 675 struct tty_driver *driver;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index c9f1c4c8bb13..bb4ff537729d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,7 +3,7 @@
3 * tape device driver for 3480/3490E/3590 tapes. 3 * tape device driver for 3480/3490E/3590 tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -99,7 +99,11 @@ enum tape_op {
99 TO_DIS, /* Tape display */ 99 TO_DIS, /* Tape display */
100 TO_ASSIGN, /* Assign tape to channel path */ 100 TO_ASSIGN, /* Assign tape to channel path */
101 TO_UNASSIGN, /* Unassign tape from channel path */ 101 TO_UNASSIGN, /* Unassign tape from channel path */
102 TO_SIZE /* #entries in tape_op_t */ 102 TO_CRYPT_ON, /* Enable encrpytion */
103 TO_CRYPT_OFF, /* Disable encrpytion */
104 TO_KEKL_SET, /* Set KEK label */
105 TO_KEKL_QUERY, /* Query KEK label */
106 TO_SIZE, /* #entries in tape_op_t */
103}; 107};
104 108
105/* Forward declaration */ 109/* Forward declaration */
@@ -112,6 +116,7 @@ enum tape_request_status {
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */ 116 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */ 117 TAPE_REQUEST_DONE, /* request is completed. */
114 TAPE_REQUEST_CANCEL, /* request should be canceled. */ 118 TAPE_REQUEST_CANCEL, /* request should be canceled. */
119 TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
115}; 120};
116 121
117/* Tape CCW request */ 122/* Tape CCW request */
@@ -164,10 +169,11 @@ struct tape_discipline {
164 * The discipline irq function either returns an error code (<0) which 169 * The discipline irq function either returns an error code (<0) which
165 * means that the request has failed with an error or one of the following: 170 * means that the request has failed with an error or one of the following:
166 */ 171 */
167#define TAPE_IO_SUCCESS 0 /* request successful */ 172#define TAPE_IO_SUCCESS 0 /* request successful */
168#define TAPE_IO_PENDING 1 /* request still running */ 173#define TAPE_IO_PENDING 1 /* request still running */
169#define TAPE_IO_RETRY 2 /* retry to current request */ 174#define TAPE_IO_RETRY 2 /* retry to current request */
170#define TAPE_IO_STOP 3 /* stop the running request */ 175#define TAPE_IO_STOP 3 /* stop the running request */
176#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
171 177
172/* Char Frontend Data */ 178/* Char Frontend Data */
173struct tape_char_data { 179struct tape_char_data {
@@ -242,6 +248,10 @@ struct tape_device {
242 248
243 /* Function to start or stop the next request later. */ 249 /* Function to start or stop the next request later. */
244 struct delayed_work tape_dnr; 250 struct delayed_work tape_dnr;
251
252 /* Timer for long busy */
253 struct timer_list lb_timeout;
254
245}; 255};
246 256
247/* Externals from tape_core.c */ 257/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 9df912f63188..50f5edab83d7 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.c 2 * drivers/s390/char/tape_3590.c
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <asm/ebcdic.h>
14 15
15#define TAPE_DBF_AREA tape_3590_dbf 16#define TAPE_DBF_AREA tape_3590_dbf
16 17
@@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
30 * - Read Device (buffered) log: BRA 31 * - Read Device (buffered) log: BRA
31 * - Read Library log: BRA 32 * - Read Library log: BRA
32 * - Swap Devices: BRA 33 * - Swap Devices: BRA
33 * - Long Busy: BRA 34 * - Long Busy: implemented
34 * - Special Intercept: BRA 35 * - Special Intercept: BRA
35 * - Read Alternate: implemented 36 * - Read Alternate: implemented
36 *******************************************************************/ 37 *******************************************************************/
@@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
94 [0xae] = "Subsystem environmental alert", 95 [0xae] = "Subsystem environmental alert",
95}; 96};
96 97
98static int crypt_supported(struct tape_device *device)
99{
100 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
101}
102
103static int crypt_enabled(struct tape_device *device)
104{
105 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
106}
107
108static void ext_to_int_kekl(struct tape390_kekl *in,
109 struct tape3592_kekl *out)
110{
111 int i;
112
113 memset(out, 0, sizeof(*out));
114 if (in->type == TAPE390_KEKL_TYPE_HASH)
115 out->flags |= 0x40;
116 if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
117 out->flags |= 0x80;
118 strncpy(out->label, in->label, 64);
119 for (i = strlen(in->label); i < sizeof(out->label); i++)
120 out->label[i] = ' ';
121 ASCEBC(out->label, sizeof(out->label));
122}
123
124static void int_to_ext_kekl(struct tape3592_kekl *in,
125 struct tape390_kekl *out)
126{
127 memset(out, 0, sizeof(*out));
128 if(in->flags & 0x40)
129 out->type = TAPE390_KEKL_TYPE_HASH;
130 else
131 out->type = TAPE390_KEKL_TYPE_LABEL;
132 if(in->flags & 0x80)
133 out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
134 else
135 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
136 memcpy(out->label, in->label, sizeof(in->label));
137 EBCASC(out->label, sizeof(in->label));
138 strstrip(out->label);
139}
140
141static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
142 struct tape390_kekl_pair *out)
143{
144 if (in->count == 0) {
145 out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
146 out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
147 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
148 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
149 } else if (in->count == 1) {
150 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
151 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
152 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
153 } else if (in->count == 2) {
154 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
155 int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
156 } else {
157 printk("Invalid KEKL number: %d\n", in->count);
158 BUG();
159 }
160}
161
162static int check_ext_kekl(struct tape390_kekl *kekl)
163{
164 if (kekl->type == TAPE390_KEKL_TYPE_NONE)
165 goto invalid;
166 if (kekl->type > TAPE390_KEKL_TYPE_HASH)
167 goto invalid;
168 if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
169 goto invalid;
170 if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
171 goto invalid;
172 if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
173 (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
174 goto invalid;
175
176 return 0;
177invalid:
178 return -EINVAL;
179}
180
181static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
182{
183 if (check_ext_kekl(&kekls->kekl[0]))
184 goto invalid;
185 if (check_ext_kekl(&kekls->kekl[1]))
186 goto invalid;
187
188 return 0;
189invalid:
190 return -EINVAL;
191}
192
193/*
194 * Query KEKLs
195 */
196static int tape_3592_kekl_query(struct tape_device *device,
197 struct tape390_kekl_pair *ext_kekls)
198{
199 struct tape_request *request;
200 struct tape3592_kekl_query_order *order;
201 struct tape3592_kekl_query_data *int_kekls;
202 int rc;
203
204 DBF_EVENT(6, "tape3592_kekl_query\n");
205 int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
206 if (!int_kekls)
207 return -ENOMEM;
208 request = tape_alloc_request(2, sizeof(*order));
209 if (IS_ERR(request)) {
210 rc = PTR_ERR(request);
211 goto fail_malloc;
212 }
213 order = request->cpdata;
214 memset(order,0,sizeof(*order));
215 order->code = 0xe2;
216 order->max_count = 2;
217 request->op = TO_KEKL_QUERY;
218 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
219 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
220 int_kekls);
221 rc = tape_do_io(device, request);
222 if (rc)
223 goto fail_request;
224 int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
225
226 rc = 0;
227fail_request:
228 tape_free_request(request);
229fail_malloc:
230 kfree(int_kekls);
231 return rc;
232}
233
234/*
235 * IOCTL: Query KEKLs
236 */
237static int tape_3592_ioctl_kekl_query(struct tape_device *device,
238 unsigned long arg)
239{
240 int rc;
241 struct tape390_kekl_pair *ext_kekls;
242
243 DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
244 if (!crypt_supported(device))
245 return -ENOSYS;
246 if (!crypt_enabled(device))
247 return -EUNATCH;
248 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
249 if (!ext_kekls)
250 return -ENOMEM;
251 rc = tape_3592_kekl_query(device, ext_kekls);
252 if (rc != 0)
253 goto fail;
254 if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
255 rc = -EFAULT;
256 goto fail;
257 }
258 rc = 0;
259fail:
260 kfree(ext_kekls);
261 return rc;
262}
263
264static int tape_3590_mttell(struct tape_device *device, int mt_count);
265
266/*
267 * Set KEKLs
268 */
269static int tape_3592_kekl_set(struct tape_device *device,
270 struct tape390_kekl_pair *ext_kekls)
271{
272 struct tape_request *request;
273 struct tape3592_kekl_set_order *order;
274
275 DBF_EVENT(6, "tape3592_kekl_set\n");
276 if (check_ext_kekl_pair(ext_kekls)) {
277 DBF_EVENT(6, "invalid kekls\n");
278 return -EINVAL;
279 }
280 if (tape_3590_mttell(device, 0) != 0)
281 return -EBADSLT;
282 request = tape_alloc_request(1, sizeof(*order));
283 if (IS_ERR(request))
284 return PTR_ERR(request);
285 order = request->cpdata;
286 memset(order, 0, sizeof(*order));
287 order->code = 0xe3;
288 order->kekls.count = 2;
289 ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
290 ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
291 request->op = TO_KEKL_SET;
292 tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
293
294 return tape_do_io_free(device, request);
295}
296
297/*
298 * IOCTL: Set KEKLs
299 */
300static int tape_3592_ioctl_kekl_set(struct tape_device *device,
301 unsigned long arg)
302{
303 int rc;
304 struct tape390_kekl_pair *ext_kekls;
305
306 DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
307 if (!crypt_supported(device))
308 return -ENOSYS;
309 if (!crypt_enabled(device))
310 return -EUNATCH;
311 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
312 if (!ext_kekls)
313 return -ENOMEM;
314 if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
315 rc = -EFAULT;
316 goto out;
317 }
318 rc = tape_3592_kekl_set(device, ext_kekls);
319out:
320 kfree(ext_kekls);
321 return rc;
322}
323
324/*
325 * Enable encryption
326 */
327static int tape_3592_enable_crypt(struct tape_device *device)
328{
329 struct tape_request *request;
330 char *data;
331
332 DBF_EVENT(6, "tape_3592_enable_crypt\n");
333 if (!crypt_supported(device))
334 return -ENOSYS;
335 request = tape_alloc_request(2, 72);
336 if (IS_ERR(request))
337 return PTR_ERR(request);
338 data = request->cpdata;
339 memset(data,0,72);
340
341 data[0] = 0x05;
342 data[36 + 0] = 0x03;
343 data[36 + 1] = 0x03;
344 data[36 + 4] = 0x40;
345 data[36 + 6] = 0x01;
346 data[36 + 14] = 0x2f;
347 data[36 + 18] = 0xc3;
348 data[36 + 35] = 0x72;
349 request->op = TO_CRYPT_ON;
350 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
351 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
352 return tape_do_io_free(device, request);
353}
354
355/*
356 * Disable encryption
357 */
358static int tape_3592_disable_crypt(struct tape_device *device)
359{
360 struct tape_request *request;
361 char *data;
362
363 DBF_EVENT(6, "tape_3592_disable_crypt\n");
364 if (!crypt_supported(device))
365 return -ENOSYS;
366 request = tape_alloc_request(2, 72);
367 if (IS_ERR(request))
368 return PTR_ERR(request);
369 data = request->cpdata;
370 memset(data,0,72);
371
372 data[0] = 0x05;
373 data[36 + 0] = 0x03;
374 data[36 + 1] = 0x03;
375 data[36 + 35] = 0x32;
376
377 request->op = TO_CRYPT_OFF;
378 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
379 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
380
381 return tape_do_io_free(device, request);
382}
383
384/*
385 * IOCTL: Set encryption status
386 */
387static int tape_3592_ioctl_crypt_set(struct tape_device *device,
388 unsigned long arg)
389{
390 struct tape390_crypt_info info;
391
392 DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
393 if (!crypt_supported(device))
394 return -ENOSYS;
395 if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
396 return -EFAULT;
397 if (info.status & ~TAPE390_CRYPT_ON_MASK)
398 return -EINVAL;
399 if (info.status & TAPE390_CRYPT_ON_MASK)
400 return tape_3592_enable_crypt(device);
401 else
402 return tape_3592_disable_crypt(device);
403}
404
405static int tape_3590_sense_medium(struct tape_device *device);
406
407/*
408 * IOCTL: Query enryption status
409 */
410static int tape_3592_ioctl_crypt_query(struct tape_device *device,
411 unsigned long arg)
412{
413 DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
414 if (!crypt_supported(device))
415 return -ENOSYS;
416 tape_3590_sense_medium(device);
417 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
418 sizeof(TAPE_3590_CRYPT_INFO(device))))
419 return -EFAULT;
420 else
421 return 0;
422}
423
97/* 424/*
98 * 3590 IOCTL Overload 425 * 3590 IOCTL Overload
99 */ 426 */
@@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
109 436
110 return tape_std_display(device, &disp); 437 return tape_std_display(device, &disp);
111 } 438 }
439 case TAPE390_KEKL_SET:
440 return tape_3592_ioctl_kekl_set(device, arg);
441 case TAPE390_KEKL_QUERY:
442 return tape_3592_ioctl_kekl_query(device, arg);
443 case TAPE390_CRYPT_SET:
444 return tape_3592_ioctl_crypt_set(device, arg);
445 case TAPE390_CRYPT_QUERY:
446 return tape_3592_ioctl_crypt_query(device, arg);
112 default: 447 default:
113 return -EINVAL; /* no additional ioctls */ 448 return -EINVAL; /* no additional ioctls */
114 } 449 }
@@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work)
248 case TO_READ_ATTMSG: 583 case TO_READ_ATTMSG:
249 tape_3590_read_attmsg(p->device); 584 tape_3590_read_attmsg(p->device);
250 break; 585 break;
586 case TO_CRYPT_ON:
587 tape_3592_enable_crypt(p->device);
588 break;
589 case TO_CRYPT_OFF:
590 tape_3592_disable_crypt(p->device);
591 break;
251 default: 592 default:
252 DBF_EVENT(3, "T3590: work handler undefined for " 593 DBF_EVENT(3, "T3590: work handler undefined for "
253 "operation 0x%02x\n", p->op); 594 "operation 0x%02x\n", p->op);
@@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
365} 706}
366#endif 707#endif
367 708
709static void tape_3590_med_state_set(struct tape_device *device,
710 struct tape_3590_med_sense *sense)
711{
712 struct tape390_crypt_info *c_info;
713
714 c_info = &TAPE_3590_CRYPT_INFO(device);
715
716 if (sense->masst == MSENSE_UNASSOCIATED) {
717 tape_med_state_set(device, MS_UNLOADED);
718 TAPE_3590_CRYPT_INFO(device).medium_status = 0;
719 return;
720 }
721 if (sense->masst != MSENSE_ASSOCIATED_MOUNT) {
722 PRINT_ERR("Unknown medium state: %x\n", sense->masst);
723 return;
724 }
725 tape_med_state_set(device, MS_LOADED);
726 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
727 if (sense->flags & MSENSE_CRYPT_MASK) {
728 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
729 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
730 } else {
731 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
732 c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
733 }
734}
735
368/* 736/*
369 * The done handler is called at device/channel end and wakes up the sleeping 737 * The done handler is called at device/channel end and wakes up the sleeping
370 * process 738 * process
@@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
372static int 740static int
373tape_3590_done(struct tape_device *device, struct tape_request *request) 741tape_3590_done(struct tape_device *device, struct tape_request *request)
374{ 742{
375 struct tape_3590_med_sense *sense; 743 struct tape_3590_disc_data *disc_data;
376 744
377 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 745 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
746 disc_data = device->discdata;
378 747
379 switch (request->op) { 748 switch (request->op) {
380 case TO_BSB: 749 case TO_BSB:
@@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
394 break; 763 break;
395 case TO_RUN: 764 case TO_RUN:
396 tape_med_state_set(device, MS_UNLOADED); 765 tape_med_state_set(device, MS_UNLOADED);
766 tape_3590_schedule_work(device, TO_CRYPT_OFF);
397 break; 767 break;
398 case TO_MSEN: 768 case TO_MSEN:
399 sense = (struct tape_3590_med_sense *) request->cpdata; 769 tape_3590_med_state_set(device, request->cpdata);
400 if (sense->masst == MSENSE_UNASSOCIATED) 770 break;
401 tape_med_state_set(device, MS_UNLOADED); 771 case TO_CRYPT_ON:
402 if (sense->masst == MSENSE_ASSOCIATED_MOUNT) 772 TAPE_3590_CRYPT_INFO(device).status
403 tape_med_state_set(device, MS_LOADED); 773 |= TAPE390_CRYPT_ON_MASK;
774 *(device->modeset_byte) |= 0x03;
775 break;
776 case TO_CRYPT_OFF:
777 TAPE_3590_CRYPT_INFO(device).status
778 &= ~TAPE390_CRYPT_ON_MASK;
779 *(device->modeset_byte) &= ~0x03;
404 break; 780 break;
405 case TO_RBI: /* RBI seems to succeed even without medium loaded. */ 781 case TO_RBI: /* RBI seems to succeed even without medium loaded. */
406 case TO_NOP: /* Same to NOP. */ 782 case TO_NOP: /* Same to NOP. */
@@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
409 case TO_DIS: 785 case TO_DIS:
410 case TO_ASSIGN: 786 case TO_ASSIGN:
411 case TO_UNASSIGN: 787 case TO_UNASSIGN:
412 break;
413 case TO_SIZE: 788 case TO_SIZE:
789 case TO_KEKL_SET:
790 case TO_KEKL_QUERY:
414 break; 791 break;
415 } 792 }
416 return TAPE_IO_SUCCESS; 793 return TAPE_IO_SUCCESS;
@@ -540,10 +917,8 @@ static int
540tape_3590_erp_long_busy(struct tape_device *device, 917tape_3590_erp_long_busy(struct tape_device *device,
541 struct tape_request *request, struct irb *irb) 918 struct tape_request *request, struct irb *irb)
542{ 919{
543 /* FIXME: how about WAITING for a minute ? */ 920 DBF_EVENT(6, "Device is busy\n");
544 PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", 921 return TAPE_IO_LONG_BUSY;
545 device->cdev->dev.bus_id);
546 return tape_3590_erp_basic(device, request, irb, -EBUSY);
547} 922}
548 923
549/* 924/*
@@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
951 device->cdev->dev.bus_id, sense->mc); 1326 device->cdev->dev.bus_id, sense->mc);
952} 1327}
953 1328
1329static int tape_3590_crypt_error(struct tape_device *device,
1330 struct tape_request *request, struct irb *irb)
1331{
1332 u8 cu_rc, ekm_rc1;
1333 u16 ekm_rc2;
1334 u32 drv_rc;
1335 char *bus_id, *sense;
1336
1337 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1338 bus_id = device->cdev->dev.bus_id;
1339 cu_rc = sense[0];
1340 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1341 ekm_rc1 = sense[9];
1342 ekm_rc2 = *((u16*) &sense[10]);
1343 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1344 /* key not defined on EKM */
1345 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
1346 if ((cu_rc == 1) || (cu_rc == 2))
1347 /* No connection to EKM */
1348 return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
1349
1350 PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id);
1351 PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc,
1352 drv_rc, ekm_rc1, ekm_rc2);
1353
1354 return tape_3590_erp_basic(device, request, irb, -ENOKEY);
1355}
1356
954/* 1357/*
955 * 3590 error Recovery routine: 1358 * 3590 error Recovery routine:
956 * If possible, it tries to recover from the error. If this is not possible, 1359 * If possible, it tries to recover from the error. If this is not possible,
@@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
979 1382
980 sense = (struct tape_3590_sense *) irb->ecw; 1383 sense = (struct tape_3590_sense *) irb->ecw;
981 1384
1385 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
1386
982 /* 1387 /*
983 * First check all RC-QRCs where we want to do something special 1388 * First check all RC-QRCs where we want to do something special
984 * - "break": basic error recovery is done 1389 * - "break": basic error recovery is done
@@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
999 case 0x2231: 1404 case 0x2231:
1000 tape_3590_print_era_msg(device, irb); 1405 tape_3590_print_era_msg(device, irb);
1001 return tape_3590_erp_special_interrupt(device, request, irb); 1406 return tape_3590_erp_special_interrupt(device, request, irb);
1407 case 0x2240:
1408 return tape_3590_crypt_error(device, request, irb);
1002 1409
1003 case 0x3010: 1410 case 0x3010:
1004 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", 1411 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
@@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1020 DBF_EVENT(2, "(%08x): Rewind Unload complete\n", 1427 DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
1021 device->cdev_id); 1428 device->cdev_id);
1022 tape_med_state_set(device, MS_UNLOADED); 1429 tape_med_state_set(device, MS_UNLOADED);
1430 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1023 return tape_3590_erp_basic(device, request, irb, 0); 1431 return tape_3590_erp_basic(device, request, irb, 0);
1024 1432
1025 case 0x4010: 1433 case 0x4010:
@@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1030 PRINT_WARN("(%s): Tape operation when medium not loaded\n", 1438 PRINT_WARN("(%s): Tape operation when medium not loaded\n",
1031 device->cdev->dev.bus_id); 1439 device->cdev->dev.bus_id);
1032 tape_med_state_set(device, MS_UNLOADED); 1440 tape_med_state_set(device, MS_UNLOADED);
1441 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1033 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1442 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1034 case 0x4012: /* Device Long Busy */ 1443 case 0x4012: /* Device Long Busy */
1444 /* XXX: Also use long busy handling here? */
1445 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
1035 tape_3590_print_era_msg(device, irb); 1446 tape_3590_print_era_msg(device, irb);
1447 return tape_3590_erp_basic(device, request, irb, -EBUSY);
1448 case 0x4014:
1449 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
1036 return tape_3590_erp_long_busy(device, request, irb); 1450 return tape_3590_erp_long_busy(device, request, irb);
1037 1451
1038 case 0x5010: 1452 case 0x5010:
@@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1064 case 0x5120: 1478 case 0x5120:
1065 case 0x1120: 1479 case 0x1120:
1066 tape_med_state_set(device, MS_UNLOADED); 1480 tape_med_state_set(device, MS_UNLOADED);
1481 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1067 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1482 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1068 1483
1069 case 0x6020: 1484 case 0x6020:
@@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device)
1142{ 1557{
1143 int rc; 1558 int rc;
1144 struct tape_3590_disc_data *data; 1559 struct tape_3590_disc_data *data;
1560 char *rdc_data;
1145 1561
1146 DBF_EVENT(6, "3590 device setup\n"); 1562 DBF_EVENT(6, "3590 device setup\n");
1147 data = kmalloc(sizeof(struct tape_3590_disc_data), 1563 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
1148 GFP_KERNEL | GFP_DMA);
1149 if (data == NULL) 1564 if (data == NULL)
1150 return -ENOMEM; 1565 return -ENOMEM;
1151 data->read_back_op = READ_PREVIOUS; 1566 data->read_back_op = READ_PREVIOUS;
1152 device->discdata = data; 1567 device->discdata = data;
1153 1568
1154 if ((rc = tape_std_assign(device)) == 0) { 1569 rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA);
1155 /* Try to find out if medium is loaded */ 1570 if (!rdc_data) {
1156 if ((rc = tape_3590_sense_medium(device)) != 0) 1571 rc = -ENOMEM;
1157 DBF_LH(3, "3590 medium sense returned %d\n", rc); 1572 goto fail_kmalloc;
1573 }
1574 rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64);
1575 if (rc) {
1576 DBF_LH(3, "Read device characteristics failed!\n");
1577 goto fail_kmalloc;
1578 }
1579 rc = tape_std_assign(device);
1580 if (rc)
1581 goto fail_rdc_data;
1582 if (rdc_data[31] == 0x13) {
1583 PRINT_INFO("Device has crypto support\n");
1584 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1585 tape_3592_disable_crypt(device);
1586 } else {
1587 DBF_EVENT(6, "Device has NO crypto support\n");
1158 } 1588 }
1589 /* Try to find out if medium is loaded */
1590 rc = tape_3590_sense_medium(device);
1591 if (rc) {
1592 DBF_LH(3, "3590 medium sense returned %d\n", rc);
1593 goto fail_rdc_data;
1594 }
1595 return 0;
1159 1596
1597fail_rdc_data:
1598 kfree(rdc_data);
1599fail_kmalloc:
1600 kfree(data);
1160 return rc; 1601 return rc;
1161} 1602}
1162 1603
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index cf274b9445a6..aa5138807af1 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.h 2 * drivers/s390/char/tape_3590.h
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -38,16 +38,22 @@
38#define MSENSE_UNASSOCIATED 0x00 38#define MSENSE_UNASSOCIATED 0x00
39#define MSENSE_ASSOCIATED_MOUNT 0x01 39#define MSENSE_ASSOCIATED_MOUNT 0x01
40#define MSENSE_ASSOCIATED_UMOUNT 0x02 40#define MSENSE_ASSOCIATED_UMOUNT 0x02
41#define MSENSE_CRYPT_MASK 0x00000010
41 42
42#define TAPE_3590_MAX_MSG 0xb0 43#define TAPE_3590_MAX_MSG 0xb0
43 44
44/* Datatypes */ 45/* Datatypes */
45 46
46struct tape_3590_disc_data { 47struct tape_3590_disc_data {
47 unsigned char modeset_byte; 48 struct tape390_crypt_info crypt_info;
48 int read_back_op; 49 int read_back_op;
49}; 50};
50 51
52#define TAPE_3590_CRYPT_INFO(device) \
53 ((struct tape_3590_disc_data*)(device->discdata))->crypt_info
54#define TAPE_3590_READ_BACK_OP(device) \
55 ((struct tape_3590_disc_data*)(device->discdata))->read_back_op
56
51struct tape_3590_sense { 57struct tape_3590_sense {
52 58
53 unsigned int command_rej:1; 59 unsigned int command_rej:1;
@@ -118,7 +124,48 @@ struct tape_3590_sense {
118struct tape_3590_med_sense { 124struct tape_3590_med_sense {
119 unsigned int macst:4; 125 unsigned int macst:4;
120 unsigned int masst:4; 126 unsigned int masst:4;
121 char pad[127]; 127 char pad1[7];
128 unsigned int flags;
129 char pad2[116];
130} __attribute__ ((packed));
131
132/* Datastructures for 3592 encryption support */
133
134struct tape3592_kekl {
135 __u8 flags;
136 char label[64];
137} __attribute__ ((packed));
138
139struct tape3592_kekl_pair {
140 __u8 count;
141 struct tape3592_kekl kekl[2];
142} __attribute__ ((packed));
143
144struct tape3592_kekl_query_data {
145 __u16 len;
146 __u8 fmt;
147 __u8 mc;
148 __u32 id;
149 __u8 flags;
150 struct tape3592_kekl_pair kekls;
151 char reserved[116];
152} __attribute__ ((packed));
153
154struct tape3592_kekl_query_order {
155 __u8 code;
156 __u8 flags;
157 char reserved1[2];
158 __u8 max_count;
159 char reserved2[35];
160} __attribute__ ((packed));
161
162struct tape3592_kekl_set_order {
163 __u8 code;
164 __u8 flags;
165 char reserved1[2];
166 __u8 op;
167 struct tape3592_kekl_pair kekls;
168 char reserved2[120];
122} __attribute__ ((packed)); 169} __attribute__ ((packed));
123 170
124#endif /* _TAPE_3590_H */ 171#endif /* _TAPE_3590_H */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index c8a89b3b87d4..dd0ecaed592e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device)
73/* 73/*
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static inline void 76static void
77tapeblock_end_request(struct request *req, int uptodate) 77tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
@@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
108/* 108/*
109 * Feed the tape device CCW queue with requests supplied in a list. 109 * Feed the tape device CCW queue with requests supplied in a list.
110 */ 110 */
111static inline int 111static int
112tapeblock_start_request(struct tape_device *device, struct request *req) 112tapeblock_start_request(struct tape_device *device, struct request *req)
113{ 113{
114 struct tape_request * ccw_req; 114 struct tape_request * ccw_req;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31198c8f2718..9faea04e11e9 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -3,7 +3,7 @@
3 * character device frontend for tape device driver 3 * character device frontend for tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device)
89 device->nt = NULL; 89 device->nt = NULL;
90} 90}
91 91
92/* 92static int
93 * Terminate write command (we write two TMs and skip backward over last)
94 * This ensures that the tape is always correctly terminated.
95 * When the user writes afterwards a new file, he will overwrite the
96 * second TM and therefore one TM will remain to separate the
97 * two files on the tape...
98 */
99static inline void
100tapechar_terminate_write(struct tape_device *device)
101{
102 if (tape_mtop(device, MTWEOF, 1) == 0 &&
103 tape_mtop(device, MTWEOF, 1) == 0)
104 tape_mtop(device, MTBSR, 1);
105}
106
107static inline int
108tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) 93tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
109{ 94{
110 struct idal_buffer *new; 95 struct idal_buffer *new;
@@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
137/* 122/*
138 * Tape device read function 123 * Tape device read function
139 */ 124 */
140ssize_t 125static ssize_t
141tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 126tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
142{ 127{
143 struct tape_device *device; 128 struct tape_device *device;
@@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
201/* 186/*
202 * Tape device write function 187 * Tape device write function
203 */ 188 */
204ssize_t 189static ssize_t
205tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) 190tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
206{ 191{
207 struct tape_device *device; 192 struct tape_device *device;
@@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
291/* 276/*
292 * Character frontend tape device open function. 277 * Character frontend tape device open function.
293 */ 278 */
294int 279static int
295tapechar_open (struct inode *inode, struct file *filp) 280tapechar_open (struct inode *inode, struct file *filp)
296{ 281{
297 struct tape_device *device; 282 struct tape_device *device;
@@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp)
326 * Character frontend tape device release function. 311 * Character frontend tape device release function.
327 */ 312 */
328 313
329int 314static int
330tapechar_release(struct inode *inode, struct file *filp) 315tapechar_release(struct inode *inode, struct file *filp)
331{ 316{
332 struct tape_device *device; 317 struct tape_device *device;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c6c2e918b990..e2a8a1a04bab 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,7 +3,7 @@
3 * basic function of the tape device driver 3 * basic function of the tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -26,9 +26,11 @@
26#include "tape_std.h" 26#include "tape_std.h"
27 27
28#define PRINTK_HEADER "TAPE_CORE: " 28#define PRINTK_HEADER "TAPE_CORE: "
29#define LONG_BUSY_TIMEOUT 180 /* seconds */
29 30
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void tape_delayed_next_request(struct work_struct *); 32static void tape_delayed_next_request(struct work_struct *);
33static void tape_long_busy_timeout(unsigned long data);
32 34
33/* 35/*
34 * One list to contain all tape devices of all disciplines, so 36 * One list to contain all tape devices of all disciplines, so
@@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] =
69 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
70 [TO_READ_ATTMSG] = "RAT", 72 [TO_READ_ATTMSG] = "RAT",
71 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
72 [TO_UNASSIGN] = "UAS" 74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
76 [TO_KEKL_QUERY] = "KLQ",
73}; 77};
74 78
75static inline int 79static int
76busid_to_int(char *bus_id) 80busid_to_int(char *bus_id)
77{ 81{
78 int dec; 82 int dec;
@@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
252/* 256/*
253 * Stop running ccw. Has to be called with the device lock held. 257 * Stop running ccw. Has to be called with the device lock held.
254 */ 258 */
255static inline int 259static int
256__tape_cancel_io(struct tape_device *device, struct tape_request *request) 260__tape_cancel_io(struct tape_device *device, struct tape_request *request)
257{ 261{
258 int retries; 262 int retries;
@@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device,
346 return -EINVAL; 350 return -EINVAL;
347 } 351 }
348 352
353 init_timer(&device->lb_timeout);
354 device->lb_timeout.function = tape_long_busy_timeout;
355
349 /* Let the discipline have a go at the device. */ 356 /* Let the discipline have a go at the device. */
350 device->discipline = discipline; 357 device->discipline = discipline;
351 if (!try_module_get(discipline->owner)) { 358 if (!try_module_get(discipline->owner)) {
@@ -385,7 +392,7 @@ out:
385 return rc; 392 return rc;
386} 393}
387 394
388static inline void 395static void
389tape_cleanup_device(struct tape_device *device) 396tape_cleanup_device(struct tape_device *device)
390{ 397{
391 tapeblock_cleanup_device(device); 398 tapeblock_cleanup_device(device);
@@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev)
563 return ret; 570 return ret;
564} 571}
565 572
566static inline void 573static void
567__tape_discard_requests(struct tape_device *device) 574__tape_discard_requests(struct tape_device *device)
568{ 575{
569 struct tape_request * request; 576 struct tape_request * request;
@@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request)
703 kfree(request); 710 kfree(request);
704} 711}
705 712
706static inline int 713static int
707__tape_start_io(struct tape_device *device, struct tape_request *request) 714__tape_start_io(struct tape_device *device, struct tape_request *request)
708{ 715{
709 int rc; 716 int rc;
@@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
733 return rc; 740 return rc;
734} 741}
735 742
736static inline void 743static void
737__tape_start_next_request(struct tape_device *device) 744__tape_start_next_request(struct tape_device *device)
738{ 745{
739 struct list_head *l, *n; 746 struct list_head *l, *n;
@@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work)
801 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 808 spin_unlock_irq(get_ccwdev_lock(device->cdev));
802} 809}
803 810
804static inline void 811static void tape_long_busy_timeout(unsigned long data)
812{
813 struct tape_request *request;
814 struct tape_device *device;
815
816 device = (struct tape_device *) data;
817 spin_lock_irq(get_ccwdev_lock(device->cdev));
818 request = list_entry(device->req_queue.next, struct tape_request, list);
819 if (request->status != TAPE_REQUEST_LONG_BUSY)
820 BUG();
821 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
822 __tape_start_next_request(device);
823 device->lb_timeout.data = (unsigned long) tape_put_device(device);
824 spin_unlock_irq(get_ccwdev_lock(device->cdev));
825}
826
827static void
805__tape_end_request( 828__tape_end_request(
806 struct tape_device * device, 829 struct tape_device * device,
807 struct tape_request * request, 830 struct tape_request * request,
@@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
878 * and starts it if the tape is idle. Has to be called with 901 * and starts it if the tape is idle. Has to be called with
879 * the device lock held. 902 * the device lock held.
880 */ 903 */
881static inline int 904static int
882__tape_start_request(struct tape_device *device, struct tape_request *request) 905__tape_start_request(struct tape_device *device, struct tape_request *request)
883{ 906{
884 int rc; 907 int rc;
@@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 /* May be an unsolicited irq */ 1117 /* May be an unsolicited irq */
1095 if(request != NULL) 1118 if(request != NULL)
1096 request->rescnt = irb->scsw.count; 1119 request->rescnt = irb->scsw.count;
1097 1120 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) &&
1121 !list_empty(&device->req_queue)) {
1122 /* Not Ready to Ready after long busy ? */
1123 struct tape_request *req;
1124 req = list_entry(device->req_queue.next,
1125 struct tape_request, list);
1126 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1127 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1128 if (del_timer(&device->lb_timeout)) {
1129 device->lb_timeout.data = (unsigned long)
1130 tape_put_device(device);
1131 __tape_start_next_request(device);
1132 }
1133 return;
1134 }
1135 }
1098 if (irb->scsw.dstat != 0x0c) { 1136 if (irb->scsw.dstat != 0x0c) {
1099 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1137 /* Set the 'ONLINE' flag depending on sense byte 1 */
1100 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1138 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
@@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1142 break; 1180 break;
1143 case TAPE_IO_PENDING: 1181 case TAPE_IO_PENDING:
1144 break; 1182 break;
1183 case TAPE_IO_LONG_BUSY:
1184 device->lb_timeout.data =
1185 (unsigned long)tape_get_device_reference(device);
1186 device->lb_timeout.expires = jiffies +
1187 LONG_BUSY_TIMEOUT * HZ;
1188 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1189 add_timer(&device->lb_timeout);
1190 request->status = TAPE_REQUEST_LONG_BUSY;
1191 break;
1145 case TAPE_IO_RETRY: 1192 case TAPE_IO_RETRY:
1146 rc = __tape_start_io(device, request); 1193 rc = __tape_start_io(device, request);
1147 if (rc) 1194 if (rc)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 09844621edc0..bc33068b9ce2 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -36,7 +36,7 @@
36struct tty_driver *tty3270_driver; 36struct tty_driver *tty3270_driver;
37static int tty3270_max_index; 37static int tty3270_max_index;
38 38
39struct raw3270_fn tty3270_fn; 39static struct raw3270_fn tty3270_fn;
40 40
41struct tty3270_cell { 41struct tty3270_cell {
42 unsigned char character; 42 unsigned char character;
@@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *);
119/* 119/*
120 * Setup timeout for a device. On timeout trigger an update. 120 * Setup timeout for a device. On timeout trigger an update.
121 */ 121 */
122void 122static void tty3270_set_timer(struct tty3270 *tp, int expires)
123tty3270_set_timer(struct tty3270 *tp, int expires)
124{ 123{
125 if (expires == 0) { 124 if (expires == 0) {
126 if (timer_pending(&tp->timer) && del_timer(&tp->timer)) 125 if (timer_pending(&tp->timer) && del_timer(&tp->timer))
@@ -841,7 +840,7 @@ tty3270_del_views(void)
841 } 840 }
842} 841}
843 842
844struct raw3270_fn tty3270_fn = { 843static struct raw3270_fn tty3270_fn = {
845 .activate = tty3270_activate, 844 .activate = tty3270_activate,
846 .deactivate = tty3270_deactivate, 845 .deactivate = tty3270_deactivate,
847 .intv = (void *) tty3270_irq, 846 .intv = (void *) tty3270_irq,
@@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = {
1754 .set_termios = tty3270_set_termios 1753 .set_termios = tty3270_set_termios
1755}; 1754};
1756 1755
1757void 1756static void tty3270_notifier(int index, int active)
1758tty3270_notifier(int index, int active)
1759{ 1757{
1760 if (active) 1758 if (active)
1761 tty_register_device(tty3270_driver, index, NULL); 1759 tty_register_device(tty3270_driver, index, NULL);
@@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active)
1767 * 3270 tty registration code called from tty_init(). 1765 * 3270 tty registration code called from tty_init().
1768 * Most kernel services (incl. kmalloc) are available at this poimt. 1766 * Most kernel services (incl. kmalloc) are available at this poimt.
1769 */ 1767 */
1770int __init 1768static int __init tty3270_init(void)
1771tty3270_init(void)
1772{ 1769{
1773 struct tty_driver *driver; 1770 struct tty_driver *driver;
1774 int ret; 1771 int ret;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6cb23040954b..4f894dc2373b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -128,9 +128,8 @@ static iucv_interrupt_ops_t vmlogrdr_iucvops = {
128 .MessagePending = vmlogrdr_iucv_MessagePending, 128 .MessagePending = vmlogrdr_iucv_MessagePending,
129}; 129};
130 130
131 131static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
132DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); 132static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
133DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
134 133
135/* 134/*
136 * pointer to system service private structure 135 * pointer to system service private structure
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 12c2d6b746e6..aa65df4dfced 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -43,7 +43,7 @@ typedef enum {add, free} range_action;
43 * Function: blacklist_range 43 * Function: blacklist_range
44 * (Un-)blacklist the devices from-to 44 * (Un-)blacklist the devices from-to
45 */ 45 */
46static inline void 46static void
47blacklist_range (range_action action, unsigned int from, unsigned int to, 47blacklist_range (range_action action, unsigned int from, unsigned int to,
48 unsigned int ssid) 48 unsigned int ssid)
49{ 49{
@@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to,
69 * Get devno/busid from given string. 69 * Get devno/busid from given string.
70 * Shamelessly grabbed from dasd_devmap.c. 70 * Shamelessly grabbed from dasd_devmap.c.
71 */ 71 */
72static inline int 72static int
73blacklist_busid(char **str, int *id0, int *ssid, int *devno) 73blacklist_busid(char **str, int *id0, int *ssid, int *devno)
74{ 74{
75 int val, old_style; 75 int val, old_style;
@@ -123,10 +123,10 @@ confused:
123 return 1; 123 return 1;
124} 124}
125 125
126static inline int 126static int
127blacklist_parse_parameters (char *str, range_action action) 127blacklist_parse_parameters (char *str, range_action action)
128{ 128{
129 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; 129 int from, to, from_id0, to_id0, from_ssid, to_ssid;
130 130
131 while (*str != 0 && *str != '\n') { 131 while (*str != 0 && *str != '\n') {
132 range_action ra = action; 132 range_action ra = action;
@@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno)
227 * Function: blacklist_parse_proc_parameters 227 * Function: blacklist_parse_proc_parameters
228 * parse the stuff which is piped to /proc/cio_ignore 228 * parse the stuff which is piped to /proc/cio_ignore
229 */ 229 */
230static inline void 230static void
231blacklist_parse_proc_parameters (char *buf) 231blacklist_parse_proc_parameters (char *buf)
232{ 232{
233 if (strncmp (buf, "free ", 5) == 0) { 233 if (strncmp (buf, "free ", 5) == 0) {
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 38954f5cd14c..d48e3ca4752c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
53 53
54static struct bus_type ccwgroup_bus_type; 54static struct bus_type ccwgroup_bus_type;
55 55
56static inline void 56static void
57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
58{ 58{
59 int i; 59 int i;
@@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev)
104 kfree(gdev); 104 kfree(gdev);
105} 105}
106 106
107static inline int 107static int
108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
109{ 109{
110 char str[8]; 110 char str[8];
@@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev)
424 return 0; 424 return 0;
425} 425}
426 426
427static inline struct ccwgroup_device * 427static struct ccwgroup_device *
428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) 428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
429{ 429{
430 struct ccwgroup_device *gdev; 430 struct ccwgroup_device *gdev;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index cbab8d2ce5cf..6f05a44e3817 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
93 u16 sch; /* subchannel */ 93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */ 94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */
96 } *ssd_area; 96 } __attribute__ ((packed)) *ssd_area;
97 97
98 ssd_area = page; 98 ssd_area = page;
99 99
@@ -277,7 +277,7 @@ out_unreg:
277 return 0; 277 return 0;
278} 278}
279 279
280static inline void 280static void
281s390_set_chpid_offline( __u8 chpid) 281s390_set_chpid_offline( __u8 chpid)
282{ 282{
283 char dbf_txt[15]; 283 char dbf_txt[15];
@@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
338 return 0x80 >> chp; 338 return 0x80 >> chp;
339} 339}
340 340
341static inline int 341static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 342s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 343{
344 struct schib schib; 344 struct schib schib;
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data)
444 u32 andesc[28]; 444 u32 andesc[28];
445 /* incident-specific information */ 445 /* incident-specific information */
446 u32 isinfo[28]; 446 u32 isinfo[28];
447 } *lir; 447 } __attribute__ ((packed)) *lir;
448 448
449 lir = data; 449 lir = data;
450 if (!(lir->iq&0x80)) 450 if (!(lir->iq&0x80))
@@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data)
461 return (u16) (lir->indesc[0]&0x000000ff); 461 return (u16) (lir->indesc[0]&0x000000ff);
462} 462}
463 463
464int 464struct chsc_sei_area {
465chsc_process_crw(void) 465 struct chsc_header request;
466 u32 reserved1;
467 u32 reserved2;
468 u32 reserved3;
469 struct chsc_header response;
470 u32 reserved4;
471 u8 flags;
472 u8 vf; /* validity flags */
473 u8 rs; /* reporting source */
474 u8 cc; /* content code */
475 u16 fla; /* full link address */
476 u16 rsid; /* reporting source id */
477 u32 reserved5;
478 u32 reserved6;
479 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
480 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed));
482
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{
485 int chpid;
486
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4)
490 return 0;
491 chpid = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else
495 s390_set_chpid_offline(chpid);
496
497 return 0;
498}
499
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
466{ 501{
467 int chpid, ret;
468 struct res_acc_data res_data; 502 struct res_acc_data res_data;
469 struct { 503 struct device *dev;
470 struct chsc_header request; 504 int status;
471 u32 reserved1; 505 int rc;
472 u32 reserved2; 506
473 u32 reserved3; 507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474 struct chsc_header response; 508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475 u32 reserved4; 509 if (sei_area->rs != 4)
476 u8 flags; 510 return 0;
477 u8 vf; /* validity flags */ 511 /* allocate a new channel path structure, if needed */
478 u8 rs; /* reporting source */ 512 status = get_chp_status(sei_area->rsid);
479 u8 cc; /* content code */ 513 if (status < 0)
480 u16 fla; /* full link address */ 514 new_channel_path(sei_area->rsid);
481 u16 rsid; /* reporting source id */ 515 else if (!status)
482 u32 reserved5; 516 return 0;
483 u32 reserved6; 517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
484 u32 ccdf[96]; /* content-code dependent field */ 518 memset(&res_data, 0, sizeof(struct res_acc_data));
485 /* ccdf has to be big enough for a link-incident record */ 519 res_data.chp = to_channelpath(dev);
486 } *sei_area; 520 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0)
523 /* full link address */
524 res_data.fla_mask = 0xffff;
525 else
526 /* link address */
527 res_data.fla_mask = 0xff00;
528 }
529 rc = s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533}
534
535static int chsc_process_sei(struct chsc_sei_area *sei_area)
536{
537 int rc;
538
539 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40)
541 CIO_CRW_EVENT(2, "chsc: event overflow\n");
542 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) {
545 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area);
547 break;
548 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area);
550 break;
551 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc);
554 break;
555 }
556
557 return rc;
558}
559
560int chsc_process_crw(void)
561{
562 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
487 565
488 if (!sei_page) 566 if (!sei_page)
489 return 0; 567 return 0;
490 /* 568 /* Access to sei_page is serialized through machine check handler
491 * build the chsc request block for store event information 569 * thread, so no need for locking. */
492 * and do the call
493 * This function is only called by the machine check handler thread,
494 * so we don't need locking for the sei_page.
495 */
496 sei_area = sei_page; 570 sei_area = sei_page;
497 571
498 CIO_TRACE_EVENT( 2, "prcss"); 572 CIO_TRACE_EVENT( 2, "prcss");
499 ret = 0; 573 ret = 0;
500 do { 574 do {
501 int ccode, status;
502 struct device *dev;
503 memset(sei_area, 0, sizeof(*sei_area)); 575 memset(sei_area, 0, sizeof(*sei_area));
504 memset(&res_data, 0, sizeof(struct res_acc_data));
505 sei_area->request.length = 0x0010; 576 sei_area->request.length = 0x0010;
506 sei_area->request.code = 0x000e; 577 sei_area->request.code = 0x000e;
578 if (chsc(sei_area))
579 break;
507 580
508 ccode = chsc(sei_area); 581 if (sei_area->response.code == 0x0001) {
509 if (ccode > 0) 582 CIO_CRW_EVENT(4, "chsc: sei successful\n");
510 return 0; 583 rc = chsc_process_sei(sei_area);
511 584 if (rc)
512 switch (sei_area->response.code) { 585 ret = rc;
513 /* for debug purposes, check for problems */ 586 } else {
514 case 0x0001: 587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
515 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
516 "successfully stored\n");
517 break; /* everything ok */
518 case 0x0002:
519 CIO_CRW_EVENT(2,
520 "chsc_process_crw: invalid command!\n");
521 return 0;
522 case 0x0003:
523 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
524 "request block!\n");
525 return 0;
526 case 0x0005:
527 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
528 "information stored\n");
529 return 0;
530 default:
531 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
532 sei_area->response.code); 588 sei_area->response.code);
533 return 0; 589 ret = 0;
534 }
535
536 /* Check if we might have lost some information. */
537 if (sei_area->flags & 0x40)
538 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
539 "has been lost due to overflow!\n");
540
541 if (sei_area->rs != 4) {
542 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
543 "(%04X) isn't a chpid!\n",
544 sei_area->rsid);
545 continue;
546 }
547
548 /* which kind of information was stored? */
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 CIO_CRW_EVENT(4, "chsc_process_crw: "
552 "channel subsystem reports link incident,"
553 " reporting source is chpid %x\n",
554 sei_area->rsid);
555 chpid = __get_chpid_from_lir(sei_area->ccdf);
556 if (chpid < 0)
557 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
558 __FUNCTION__);
559 else
560 s390_set_chpid_offline(chpid);
561 break;
562
563 case 2: /* i/o resource accessibiliy */
564 CIO_CRW_EVENT(4, "chsc_process_crw: "
565 "channel subsystem reports some I/O "
566 "devices may have become accessible\n");
567 pr_debug("Data received after sei: \n");
568 pr_debug("Validity flags: %x\n", sei_area->vf);
569
570 /* allocate a new channel path structure, if needed */
571 status = get_chp_status(sei_area->rsid);
572 if (status < 0)
573 new_channel_path(sei_area->rsid);
574 else if (!status)
575 break;
576 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
577 res_data.chp = to_channelpath(dev);
578 pr_debug("chpid: %x", sei_area->rsid);
579 if ((sei_area->vf & 0xc0) != 0) {
580 res_data.fla = sei_area->fla;
581 if ((sei_area->vf & 0xc0) == 0xc0) {
582 pr_debug(" full link addr: %x",
583 sei_area->fla);
584 res_data.fla_mask = 0xffff;
585 } else {
586 pr_debug(" link addr: %x",
587 sei_area->fla);
588 res_data.fla_mask = 0xff00;
589 }
590 }
591 ret = s390_process_res_acc(&res_data);
592 pr_debug("\n\n");
593 put_device(dev);
594 break;
595
596 default: /* other stuff */
597 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
598 sei_area->cc);
599 break; 590 break;
600 } 591 }
601 } while (sei_area->flags & 0x80); 592 } while (sei_area->flags & 0x80);
593
602 return ret; 594 return ret;
603} 595}
604 596
605static inline int 597static int
606__chp_add_new_sch(struct subchannel_id schid) 598__chp_add_new_sch(struct subchannel_id schid)
607{ 599{
608 struct schib schib; 600 struct schib schib;
609 int ret; 601 int ret;
610 602
611 if (stsch(schid, &schib)) 603 if (stsch_err(schid, &schib))
612 /* We're through */ 604 /* We're through */
613 return need_rescan ? -EAGAIN : -ENXIO; 605 return need_rescan ? -EAGAIN : -ENXIO;
614 606
@@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on)
709 return chp_add(chpid); 701 return chp_add(chpid);
710} 702}
711 703
712static inline int check_for_io_on_path(struct subchannel *sch, int index) 704static int check_for_io_on_path(struct subchannel *sch, int index)
713{ 705{
714 int cc; 706 int cc;
715 707
@@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch)
741 sch->driver->termination(&sch->dev); 733 sch->driver->termination(&sch->dev);
742} 734}
743 735
744static inline void 736static void
745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746{ 738{
747 int chp, old_lpm; 739 int chp, old_lpm;
@@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = {
967static void 959static void
968chsc_remove_chp_cmg_attr(struct channel_path *chp) 960chsc_remove_chp_cmg_attr(struct channel_path *chp)
969{ 961{
970 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
971 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
972} 964}
973 965
974static int 966static int
@@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp)
976{ 968{
977 int ret; 969 int ret;
978 970
979 ret = sysfs_create_bin_file(&chp->dev.kobj, 971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
980 &chp_measurement_chars_attr);
981 if (ret) 972 if (ret)
982 return ret; 973 return ret;
983 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
984 if (ret) 975 if (ret)
985 sysfs_remove_bin_file(&chp->dev.kobj, 976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
986 &chp_measurement_chars_attr);
987 return ret; 977 return ret;
988} 978}
989 979
@@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1042 u32 : 4; 1032 u32 : 4;
1043 u32 fmt : 4; 1033 u32 fmt : 4;
1044 u32 : 16; 1034 u32 : 16;
1045 } *secm_area; 1035 } __attribute__ ((packed)) *secm_area;
1046 int ret, ccode; 1036 int ret, ccode;
1047 1037
1048 secm_area = page; 1038 secm_area = page;
@@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid,
1253 struct chsc_header response; 1243 struct chsc_header response;
1254 u32 zeroes2; 1244 u32 zeroes2;
1255 struct channel_path_desc desc; 1245 struct channel_path_desc desc;
1256 } *scpd_area; 1246 } __attribute__ ((packed)) *scpd_area;
1257 1247
1258 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1248 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1259 if (!scpd_area) 1249 if (!scpd_area)
@@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1350 u32 cmg : 8; 1340 u32 cmg : 8;
1351 u32 zeroes3; 1341 u32 zeroes3;
1352 u32 data[NR_MEASUREMENT_CHARS]; 1342 u32 data[NR_MEASUREMENT_CHARS];
1353 } *scmc_area; 1343 } __attribute__ ((packed)) *scmc_area;
1354 1344
1355 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1345 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1356 if (!scmc_area) 1346 if (!scmc_area)
@@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code)
1517 u32 reserved5:4; 1507 u32 reserved5:4;
1518 u32 format2:4; 1508 u32 format2:4;
1519 u32 reserved6:24; 1509 u32 reserved6:24;
1520 } *sda_area; 1510 } __attribute__ ((packed)) *sda_area;
1521 1511
1522 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1512 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1523 if (!sda_area) 1513 if (!sda_area)
@@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void)
1569 u32 reserved4; 1559 u32 reserved4;
1570 u32 general_char[510]; 1560 u32 general_char[510];
1571 u32 chsc_char[518]; 1561 u32 chsc_char[518];
1572 } *scsc_area; 1562 } __attribute__ ((packed)) *scsc_area;
1573 1563
1574 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1564 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1575 if (!scsc_area) { 1565 if (!scsc_area) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index a259245780ae..0fb2b024208f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -10,17 +10,17 @@
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
13}; 13} __attribute__ ((packed));
14 14
15#define NR_MEASUREMENT_CHARS 5 15#define NR_MEASUREMENT_CHARS 5
16struct cmg_chars { 16struct cmg_chars {
17 u32 values[NR_MEASUREMENT_CHARS]; 17 u32 values[NR_MEASUREMENT_CHARS];
18}; 18} __attribute__ ((packed));
19 19
20#define NR_MEASUREMENT_ENTRIES 8 20#define NR_MEASUREMENT_ENTRIES 8
21struct cmg_entry { 21struct cmg_entry {
22 u32 values[NR_MEASUREMENT_ENTRIES]; 22 u32 values[NR_MEASUREMENT_ENTRIES];
23}; 23} __attribute__ ((packed));
24 24
25struct channel_path_desc { 25struct channel_path_desc {
26 u8 flags; 26 u8 flags;
@@ -31,7 +31,7 @@ struct channel_path_desc {
31 u8 zeroes; 31 u8 zeroes;
32 u8 chla; 32 u8 chla;
33 u8 chpp; 33 u8 chpp;
34}; 34} __attribute__ ((packed));
35 35
36struct channel_path { 36struct channel_path {
37 int id; 37 int id;
@@ -47,6 +47,9 @@ struct channel_path {
47extern void s390_process_css( void ); 47extern void s390_process_css( void );
48extern void chsc_validate_chpids(struct subchannel *); 48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int); 49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
50 53
51struct css_general_char { 54struct css_general_char {
52 u64 : 41; 55 u64 : 41;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ae1bf231d089..b3a56dc5f68a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch)
122 * Use tpi to get a pending interrupt, call the interrupt handler and 122 * Use tpi to get a pending interrupt, call the interrupt handler and
123 * return a pointer to the subchannel structure. 123 * return a pointer to the subchannel structure.
124 */ 124 */
125static inline int 125static int
126cio_tpi(void) 126cio_tpi(void)
127{ 127{
128 struct tpi_info *tpi_info; 128 struct tpi_info *tpi_info;
@@ -152,7 +152,7 @@ cio_tpi(void)
152 return 1; 152 return 1;
153} 153}
154 154
155static inline int 155static int
156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
157{ 157{
158 char dbf_text[15]; 158 char dbf_text[15];
@@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
585 * This device must not be known to Linux. So we simply 585 * This device must not be known to Linux. So we simply
586 * say that there is no device and return ENODEV. 586 * say that there is no device and return ENODEV.
587 */ 587 */
588 CIO_MSG_EVENT(0, "Blacklisted device detected " 588 CIO_MSG_EVENT(4, "Blacklisted device detected "
589 "at devno %04X, subchannel set %x\n", 589 "at devno %04X, subchannel set %x\n",
590 sch->schib.pmcw.dev, sch->schid.ssid); 590 sch->schib.pmcw.dev, sch->schid.ssid);
591 err = -ENODEV; 591 err = -ENODEV;
@@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs)
646 * Make sure that the i/o interrupt did not "overtake" 646 * Make sure that the i/o interrupt did not "overtake"
647 * the last HZ timer interrupt. 647 * the last HZ timer interrupt.
648 */ 648 */
649 account_ticks(); 649 account_ticks(S390_lowcore.int_clock);
650 /* 650 /*
651 * Get interrupt information from lowcore 651 * Get interrupt information from lowcore
652 */ 652 */
@@ -832,7 +832,7 @@ cio_get_console_subchannel(void)
832} 832}
833 833
834#endif 834#endif
835static inline int 835static int
836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
837{ 837{
838 int retry, cc; 838 int retry, cc;
@@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
850 return -EBUSY; /* uhm... */ 850 return -EBUSY; /* uhm... */
851} 851}
852 852
853static inline int 853/* we can't use the normal udelay here, since it enables external interrupts */
854
855static void udelay_reset(unsigned long usecs)
856{
857 uint64_t start_cc, end_cc;
858
859 asm volatile ("STCK %0" : "=m" (start_cc));
860 do {
861 cpu_relax();
862 asm volatile ("STCK %0" : "=m" (end_cc));
863 } while (((end_cc - start_cc)/4096) < usecs);
864}
865
866static int
854__clear_subchannel_easy(struct subchannel_id schid) 867__clear_subchannel_easy(struct subchannel_id schid)
855{ 868{
856 int retry; 869 int retry;
@@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid)
865 if (schid_equal(&ti.schid, &schid)) 878 if (schid_equal(&ti.schid, &schid))
866 return 0; 879 return 0;
867 } 880 }
868 udelay(100); 881 udelay_reset(100);
869 } 882 }
870 return -EBUSY; 883 return -EBUSY;
871} 884}
@@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
882 int rc; 895 int rc;
883 896
884 pgm_check_occured = 0; 897 pgm_check_occured = 0;
885 s390_reset_pgm_handler = cio_reset_pgm_check_handler; 898 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
886 rc = stsch(schid, addr); 899 rc = stsch(schid, addr);
887 s390_reset_pgm_handler = NULL; 900 s390_base_pgm_handler_fn = NULL;
888 901
889 /* The program check handler could have changed pgm_check_occured */ 902 /* The program check handler could have changed pgm_check_occured. */
890 barrier(); 903 barrier();
891 904
892 if (pgm_check_occured) 905 if (pgm_check_occured)
@@ -944,7 +957,7 @@ static void css_reset(void)
944 /* Reset subchannels. */ 957 /* Reset subchannels. */
945 for_each_subchannel(__shutdown_subchannel_easy, NULL); 958 for_each_subchannel(__shutdown_subchannel_easy, NULL);
946 /* Reset channel paths. */ 959 /* Reset channel paths. */
947 s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; 960 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
948 /* Enable channel report machine checks. */ 961 /* Enable channel report machine checks. */
949 __ctl_set_bit(14, 28); 962 __ctl_set_bit(14, 28);
950 /* Temporarily reenable machine checks. */ 963 /* Temporarily reenable machine checks. */
@@ -969,7 +982,7 @@ static void css_reset(void)
969 local_mcck_disable(); 982 local_mcck_disable();
970 /* Disable channel report machine checks. */ 983 /* Disable channel report machine checks. */
971 __ctl_clear_bit(14, 28); 984 __ctl_clear_bit(14, 28);
972 s390_reset_mcck_handler = NULL; 985 s390_base_mcck_handler_fn = NULL;
973} 986}
974 987
975static struct reset_call css_reset_call = { 988static struct reset_call css_reset_call = {
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 828b2d334f0a..90b22faabbf7 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -519,8 +519,8 @@ struct cmb {
519/* insert a single device into the cmb_area list 519/* insert a single device into the cmb_area list
520 * called with cmb_area.lock held from alloc_cmb 520 * called with cmb_area.lock held from alloc_cmb
521 */ 521 */
522static inline int alloc_cmb_single (struct ccw_device *cdev, 522static int alloc_cmb_single(struct ccw_device *cdev,
523 struct cmb_data *cmb_data) 523 struct cmb_data *cmb_data)
524{ 524{
525 struct cmb *cmb; 525 struct cmb *cmb;
526 struct ccw_device_private *node; 526 struct ccw_device_private *node;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9d6c02446863..fe0ace7aece8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1];
30 30
31int css_characteristics_avail = 0; 31int css_characteristics_avail = 0;
32 32
33inline int 33int
34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35{ 35{
36 struct subchannel_id schid; 36 struct subchannel_id schid;
@@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev)
108 } 108 }
109} 109}
110 110
111extern int css_get_ssd_info(struct subchannel *sch);
112
113
114int css_sch_device_register(struct subchannel *sch) 111int css_sch_device_register(struct subchannel *sch)
115{ 112{
116 int ret; 113 int ret;
@@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
187 return dev ? to_subchannel(dev) : NULL; 184 return dev ? to_subchannel(dev) : NULL;
188} 185}
189 186
190static inline int css_get_subchannel_status(struct subchannel *sch) 187static int css_get_subchannel_status(struct subchannel *sch)
191{ 188{
192 struct schib schib; 189 struct schib schib;
193 190
@@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
299 /* Will be done on the slow path. */ 296 /* Will be done on the slow path. */
300 return -EAGAIN; 297 return -EAGAIN;
301 } 298 }
302 if (stsch(schid, &schib) || !schib.pmcw.dnv) { 299 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) {
303 /* Unusable - ignore. */ 300 /* Unusable - ignore. */
304 return 0; 301 return 0;
305 } 302 }
@@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused)
417 need_reprobe); 414 need_reprobe);
418} 415}
419 416
420DECLARE_WORK(css_reprobe_work, reprobe_all); 417static DECLARE_WORK(css_reprobe_work, reprobe_all);
421 418
422/* Schedule reprobing of all unregistered subchannels. */ 419/* Schedule reprobing of all unregistered subchannels. */
423void css_schedule_reprobe(void) 420void css_schedule_reprobe(void)
@@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
578 575
579static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 576static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
580 577
581static inline int __init setup_css(int nr) 578static int __init setup_css(int nr)
582{ 579{
583 u32 tod_high; 580 u32 tod_high;
584 int ret; 581 int ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 3464c5b875c4..ca2bab932a8a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 144extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int);
147extern void css_reiterate_subchannels(void);
146 148
147#define __MAX_SUBCHANNEL 65535 149#define __MAX_SUBCHANNEL 65535
148#define __MAX_SSID 3 150#define __MAX_SSID 3
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 803579053c2f..e322111fb369 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -138,7 +138,6 @@ struct bus_type ccw_bus_type;
138 138
139static int io_subchannel_probe (struct subchannel *); 139static int io_subchannel_probe (struct subchannel *);
140static int io_subchannel_remove (struct subchannel *); 140static int io_subchannel_remove (struct subchannel *);
141void io_subchannel_irq (struct device *);
142static int io_subchannel_notify(struct device *, int); 141static int io_subchannel_notify(struct device *, int);
143static void io_subchannel_verify(struct device *); 142static void io_subchannel_verify(struct device *);
144static void io_subchannel_ioterm(struct device *); 143static void io_subchannel_ioterm(struct device *);
@@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
235 ssize_t ret = 0; 234 ssize_t ret = 0;
236 int chp; 235 int chp;
237 236
238 if (ssd) 237 for (chp = 0; chp < 8; chp++)
239 for (chp = 0; chp < 8; chp++) 238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
240 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
241 else
242 ret += sprintf (buf, "n/a");
243 ret += sprintf (buf+ret, "\n"); 239 ret += sprintf (buf+ret, "\n");
244 return min((ssize_t)PAGE_SIZE, ret); 240 return min((ssize_t)PAGE_SIZE, ret);
245} 241}
@@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = {
552 .attrs = ccwdev_attrs, 548 .attrs = ccwdev_attrs,
553}; 549};
554 550
555static inline int 551static int
556device_add_files (struct device *dev) 552device_add_files (struct device *dev)
557{ 553{
558 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
559} 555}
560 556
561static inline void 557static void
562device_remove_files(struct device *dev) 558device_remove_files(struct device *dev)
563{ 559{
564 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); 560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 29db6341d632..b66338b76579 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work;
74extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
75extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
76 76
77void io_subchannel_irq (struct device *pdev);
77void io_subchannel_recog_done(struct ccw_device *cdev); 78void io_subchannel_recog_done(struct ccw_device *cdev);
78 79
79int ccw_device_cancel_halt_clear(struct ccw_device *); 80int ccw_device_cancel_halt_clear(struct ccw_device *);
@@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *);
118/* qdio needs this. */ 119/* qdio needs this. */
119void ccw_device_set_timeout(struct ccw_device *, int); 120void ccw_device_set_timeout(struct ccw_device *, int);
120extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 121extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
122extern struct bus_type ccw_bus_type;
121 123
122/* Channel measurement facility related */ 124/* Channel measurement facility related */
123void retry_set_schib(struct ccw_device *cdev); 125void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index eed14572fc3b..51238e7555bb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
206 * been varied online on the SE so we have to find out by magic (i. e. driving 206 * been varied online on the SE so we have to find out by magic (i. e. driving
207 * the channel subsystem to device selection and updating our path masks). 207 * the channel subsystem to device selection and updating our path masks).
208 */ 208 */
209static inline void 209static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 210__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 211{
212 int mask, i; 212 int mask, i;
@@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
387 put_device (&cdev->dev); 387 put_device (&cdev->dev);
388} 388}
389 389
390static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) 390static int cmp_pgid(struct pgid *p1, struct pgid *p2)
391{ 391{
392 char *c1; 392 char *c1;
393 char *c2; 393 char *c2;
@@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
842call_handler_unsol: 842call_handler_unsol:
843 if (cdev->handler) 843 if (cdev->handler)
844 cdev->handler (cdev, 0, irb); 844 cdev->handler (cdev, 0, irb);
845 if (cdev->private->flags.doverify)
846 ccw_device_online_verify(cdev, 0);
845 return; 847 return;
846 } 848 }
847 /* Accumulate status and find out if a basic sense is needed. */ 849 /* Accumulate status and find out if a basic sense is needed. */
@@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
892/* 894/*
893 * Got an interrupt for a basic sense. 895 * Got an interrupt for a basic sense.
894 */ 896 */
895void 897static void
896ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 898ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
897{ 899{
898 struct irb *irb; 900 struct irb *irb;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index d269607336ec..d7b25b8f71d2 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
302 wake_up(&cdev->private->wait_q); 302 wake_up(&cdev->private->wait_q);
303} 303}
304 304
305static inline int 305static int
306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) 306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
307{ 307{
308 int ret; 308 int ret;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index bdcf930f7beb..6b1caea622ea 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -25,7 +25,7 @@
25 * Check for any kind of channel or interface control check but don't 25 * Check for any kind of channel or interface control check but don't
26 * issue the message for the console device 26 * issue the message for the console device
27 */ 27 */
28static inline void 28static void
29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
30{ 30{
31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
@@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
72/* 72/*
73 * Copy valid bits from the extended control word to device irb. 73 * Copy valid bits from the extended control word to device irb.
74 */ 74 */
75static inline void 75static void
76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) 76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
77{ 77{
78 /* 78 /*
@@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
94/* 94/*
95 * Check if extended status word is valid. 95 * Check if extended status word is valid.
96 */ 96 */
97static inline int 97static int
98ccw_device_accumulate_esw_valid(struct irb *irb) 98ccw_device_accumulate_esw_valid(struct irb *irb)
99{ 99{
100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
@@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb)
109/* 109/*
110 * Copy valid bits from the extended status word to device irb. 110 * Copy valid bits from the extended status word to device irb.
111 */ 111 */
112static inline void 112static void
113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) 113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
114{ 114{
115 struct irb *cdev_irb; 115 struct irb *cdev_irb;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 6fd1940842eb..d726cd5777de 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -66,7 +66,6 @@ MODULE_LICENSE("GPL");
66/******************** HERE WE GO ***********************************/ 66/******************** HERE WE GO ***********************************/
67 67
68static const char version[] = "QDIO base support version 2"; 68static const char version[] = "QDIO base support version 2";
69extern struct bus_type ccw_bus_type;
70 69
71static int qdio_performance_stats = 0; 70static int qdio_performance_stats = 0;
72static int proc_perf_file_registration; 71static int proc_perf_file_registration;
@@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q)
138} 137}
139 138
140/*check ccq */ 139/*check ccq */
141static inline int 140static int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 141qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{ 142{
144 char dbf_text[15]; 143 char dbf_text[15];
@@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
153 return -EIO; 152 return -EIO;
154} 153}
155/* EQBS: extract buffer states */ 154/* EQBS: extract buffer states */
156static inline int 155static int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 156qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt) 157 unsigned int *start, unsigned int *cnt)
159{ 158{
@@ -188,7 +187,7 @@ again:
188} 187}
189 188
190/* SQBS: set buffer states */ 189/* SQBS: set buffer states */
191static inline int 190static int
192qdio_do_sqbs(struct qdio_q *q, unsigned char state, 191qdio_do_sqbs(struct qdio_q *q, unsigned char state,
193 unsigned int *start, unsigned int *cnt) 192 unsigned int *start, unsigned int *cnt)
194{ 193{
@@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 314 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
316 * an access exception 315 * an access exception
317 */ 316 */
318static inline int 317static int
319qdio_siga_output(struct qdio_q *q) 318qdio_siga_output(struct qdio_q *q)
320{ 319{
321 int cc; 320 int cc;
@@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q)
349 return cc; 348 return cc;
350} 349}
351 350
352static inline int 351static int
353qdio_siga_input(struct qdio_q *q) 352qdio_siga_input(struct qdio_q *q)
354{ 353{
355 int cc; 354 int cc;
@@ -421,7 +420,7 @@ tiqdio_sched_tl(void)
421 tasklet_hi_schedule(&tiqdio_tasklet); 420 tasklet_hi_schedule(&tiqdio_tasklet);
422} 421}
423 422
424static inline void 423static void
425qdio_mark_tiq(struct qdio_q *q) 424qdio_mark_tiq(struct qdio_q *q)
426{ 425{
427 unsigned long flags; 426 unsigned long flags;
@@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q)
471 tasklet_schedule(&q->tasklet); 470 tasklet_schedule(&q->tasklet);
472} 471}
473 472
474static inline int 473static int
475qdio_stop_polling(struct qdio_q *q) 474qdio_stop_polling(struct qdio_q *q)
476{ 475{
477#ifdef QDIO_USE_PROCESSING_STATE 476#ifdef QDIO_USE_PROCESSING_STATE
@@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q)
525 * sophisticated locking outside of unmark_q, so that we don't need to 524 * sophisticated locking outside of unmark_q, so that we don't need to
526 * disable the interrupts :-) 525 * disable the interrupts :-)
527*/ 526*/
528static inline void 527static void
529qdio_unmark_q(struct qdio_q *q) 528qdio_unmark_q(struct qdio_q *q)
530{ 529{
531 unsigned long flags; 530 unsigned long flags;
@@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
691 return q->first_to_check; 690 return q->first_to_check;
692} 691}
693 692
694static inline int 693static int
695qdio_get_outbound_buffer_frontier(struct qdio_q *q) 694qdio_get_outbound_buffer_frontier(struct qdio_q *q)
696{ 695{
697 struct qdio_irq *irq; 696 struct qdio_irq *irq;
@@ -774,7 +773,7 @@ out:
774} 773}
775 774
776/* all buffers are processed */ 775/* all buffers are processed */
777static inline int 776static int
778qdio_is_outbound_q_done(struct qdio_q *q) 777qdio_is_outbound_q_done(struct qdio_q *q)
779{ 778{
780 int no_used; 779 int no_used;
@@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q)
796 return (no_used==0); 795 return (no_used==0);
797} 796}
798 797
799static inline int 798static int
800qdio_has_outbound_q_moved(struct qdio_q *q) 799qdio_has_outbound_q_moved(struct qdio_q *q)
801{ 800{
802 int i; 801 int i;
@@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q)
816 } 815 }
817} 816}
818 817
819static inline void 818static void
820qdio_kick_outbound_q(struct qdio_q *q) 819qdio_kick_outbound_q(struct qdio_q *q)
821{ 820{
822 int result; 821 int result;
@@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q)
905 } 904 }
906} 905}
907 906
908static inline void 907static void
909qdio_kick_outbound_handler(struct qdio_q *q) 908qdio_kick_outbound_handler(struct qdio_q *q)
910{ 909{
911 int start, end, real_end, count; 910 int start, end, real_end, count;
@@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q)
942 q->error_status_flags=0; 941 q->error_status_flags=0;
943} 942}
944 943
945static inline void 944static void
946__qdio_outbound_processing(struct qdio_q *q) 945__qdio_outbound_processing(struct qdio_q *q)
947{ 946{
948 int siga_attempts; 947 int siga_attempts;
@@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q)
1002/************************* INBOUND ROUTINES *******************************/ 1001/************************* INBOUND ROUTINES *******************************/
1003 1002
1004 1003
1005static inline int 1004static int
1006qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1005qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1007{ 1006{
1008 struct qdio_irq *irq; 1007 struct qdio_irq *irq;
@@ -1133,7 +1132,7 @@ out:
1133 return q->first_to_check; 1132 return q->first_to_check;
1134} 1133}
1135 1134
1136static inline int 1135static int
1137qdio_has_inbound_q_moved(struct qdio_q *q) 1136qdio_has_inbound_q_moved(struct qdio_q *q)
1138{ 1137{
1139 int i; 1138 int i;
@@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
1167} 1166}
1168 1167
1169/* means, no more buffers to be filled */ 1168/* means, no more buffers to be filled */
1170static inline int 1169static int
1171tiqdio_is_inbound_q_done(struct qdio_q *q) 1170tiqdio_is_inbound_q_done(struct qdio_q *q)
1172{ 1171{
1173 int no_used; 1172 int no_used;
@@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1228 return 0; 1227 return 0;
1229} 1228}
1230 1229
1231static inline int 1230static int
1232qdio_is_inbound_q_done(struct qdio_q *q) 1231qdio_is_inbound_q_done(struct qdio_q *q)
1233{ 1232{
1234 int no_used; 1233 int no_used;
@@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q)
1296 } 1295 }
1297} 1296}
1298 1297
1299static inline void 1298static void
1300qdio_kick_inbound_handler(struct qdio_q *q) 1299qdio_kick_inbound_handler(struct qdio_q *q)
1301{ 1300{
1302 int count, start, end, real_end, i; 1301 int count, start, end, real_end, i;
@@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q)
1343 } 1342 }
1344} 1343}
1345 1344
1346static inline void 1345static void
1347__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) 1346__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1348{ 1347{
1349 struct qdio_irq *irq_ptr; 1348 struct qdio_irq *irq_ptr;
@@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q)
1442 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); 1441 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1443} 1442}
1444 1443
1445static inline void 1444static void
1446__qdio_inbound_processing(struct qdio_q *q) 1445__qdio_inbound_processing(struct qdio_q *q)
1447{ 1446{
1448 int q_laps=0; 1447 int q_laps=0;
@@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q)
1493/************************* MAIN ROUTINES *******************************/ 1492/************************* MAIN ROUTINES *******************************/
1494 1493
1495#ifdef QDIO_USE_PROCESSING_STATE 1494#ifdef QDIO_USE_PROCESSING_STATE
1496static inline int 1495static int
1497tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) 1496tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1498{ 1497{
1499 if (!q) { 1498 if (!q) {
@@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1545} 1544}
1546#endif /* QDIO_USE_PROCESSING_STATE */ 1545#endif /* QDIO_USE_PROCESSING_STATE */
1547 1546
1548static inline void 1547static void
1549tiqdio_inbound_checks(void) 1548tiqdio_inbound_checks(void)
1550{ 1549{
1551 struct qdio_q *q; 1550 struct qdio_q *q;
@@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1949 mb(); 1948 mb();
1950} 1949}
1951 1950
1952static inline void 1951static void
1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) 1952qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1954{ 1953{
1955 char dbf_text[15]; 1954 char dbf_text[15];
@@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1966 1965
1967} 1966}
1968 1967
1969static inline void 1968static void
1970qdio_handle_pci(struct qdio_irq *irq_ptr) 1969qdio_handle_pci(struct qdio_irq *irq_ptr)
1971{ 1970{
1972 int i; 1971 int i;
@@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
2002 2001
2003static void qdio_establish_handle_irq(struct ccw_device*, int, int); 2002static void qdio_establish_handle_irq(struct ccw_device*, int, int);
2004 2003
2005static inline void 2004static void
2006qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, 2005qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
2007 int cstat, int dstat) 2006 int cstat, int dstat)
2008{ 2007{
@@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2229 return cc; 2228 return cc;
2230} 2229}
2231 2230
2232static inline void 2231static void
2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, 2232qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token) 2233 unsigned long token)
2235{ 2234{
@@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev)
2740 return 0; 2739 return 0;
2741} 2740}
2742 2741
2743static inline void 2742static void
2744qdio_allocate_do_dbf(struct qdio_initialize *init_data) 2743qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2745{ 2744{
2746 char dbf_text[20]; /* if a printf printed out more than 8 chars */ 2745 char dbf_text[20]; /* if a printf printed out more than 8 chars */
@@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2773 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); 2772 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2774} 2773}
2775 2774
2776static inline void 2775static void
2777qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) 2776qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2778{ 2777{
2779 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; 2778 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
@@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2792 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; 2791 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2793} 2792}
2794 2793
2795static inline void 2794static void
2796qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, 2795qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2797 int j, int iqfmt) 2796 int j, int iqfmt)
2798{ 2797{
@@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2813} 2812}
2814 2813
2815 2814
2816static inline void 2815static void
2817qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) 2816qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2818{ 2817{
2819 int i; 2818 int i;
@@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2839 } 2838 }
2840} 2839}
2841 2840
2842static inline void 2841static void
2843qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) 2842qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2844{ 2843{
2845 int i; 2844 int i;
@@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2865 } 2864 }
2866} 2865}
2867 2866
2868static inline int 2867static int
2869qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, 2868qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2870 int dstat) 2869 int dstat)
2871{ 2870{
@@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data)
3014 return 0; 3013 return 0;
3015} 3014}
3016 3015
3017int qdio_fill_irq(struct qdio_initialize *init_data) 3016static int qdio_fill_irq(struct qdio_initialize *init_data)
3018{ 3017{
3019 int i; 3018 int i;
3020 char dbf_text[15]; 3019 char dbf_text[15];
@@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
3367} 3366}
3368 3367
3369/* buffers filled forwards again to make Rick happy */ 3368/* buffers filled forwards again to make Rick happy */
3370static inline void 3369static void
3371qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3370qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3372 unsigned int count, struct qdio_buffer *buffers) 3371 unsigned int count, struct qdio_buffer *buffers)
3373{ 3372{
@@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3386 } 3385 }
3387} 3386}
3388 3387
3389static inline void 3388static void
3390qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3389qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3391 unsigned int count, struct qdio_buffer *buffers) 3390 unsigned int count, struct qdio_buffer *buffers)
3392{ 3391{
@@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3407 } 3406 }
3408} 3407}
3409 3408
3410static inline void 3409static void
3411do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, 3410do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3412 unsigned int qidx, unsigned int count, 3411 unsigned int qidx, unsigned int count,
3413 struct qdio_buffer *buffers) 3412 struct qdio_buffer *buffers)
@@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3443 qdio_mark_q(q); 3442 qdio_mark_q(q);
3444} 3443}
3445 3444
3446static inline void 3445static void
3447do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, 3446do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3448 unsigned int qidx, unsigned int count, 3447 unsigned int qidx, unsigned int count,
3449 struct qdio_buffer *buffers) 3448 struct qdio_buffer *buffers)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 81b5899f4010..c7d1355237b6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev)
465 * Flush all requests from the request/pending queue of an AP device. 465 * Flush all requests from the request/pending queue of an AP device.
466 * @ap_dev: pointer to the AP device. 466 * @ap_dev: pointer to the AP device.
467 */ 467 */
468static inline void __ap_flush_queue(struct ap_device *ap_dev) 468static void __ap_flush_queue(struct ap_device *ap_dev)
469{ 469{
470 struct ap_message *ap_msg, *next; 470 struct ap_message *ap_msg, *next;
471 471
@@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
587/** 587/**
588 * Pick one of the 16 ap domains. 588 * Pick one of the 16 ap domains.
589 */ 589 */
590static inline int ap_select_domain(void) 590static int ap_select_domain(void)
591{ 591{
592 int queue_depth, device_type, count, max_count, best_domain; 592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j; 593 int rc, i, j;
@@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void)
825 * required, bit 2^1 is set if the poll timer needs to get armed 825 * required, bit 2^1 is set if the poll timer needs to get armed
826 * Returns 0 if the device is still present, -ENODEV if not. 826 * Returns 0 if the device is still present, -ENODEV if not.
827 */ 827 */
828static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 828static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
829{ 829{
830 struct ap_queue_status status; 830 struct ap_queue_status status;
831 struct ap_message *ap_msg; 831 struct ap_message *ap_msg;
@@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
872 * required, bit 2^1 is set if the poll timer needs to get armed 872 * required, bit 2^1 is set if the poll timer needs to get armed
873 * Returns 0 if the device is still present, -ENODEV if not. 873 * Returns 0 if the device is still present, -ENODEV if not.
874 */ 874 */
875static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 875static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
876{ 876{
877 struct ap_queue_status status; 877 struct ap_queue_status status;
878 struct ap_message *ap_msg; 878 struct ap_message *ap_msg;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 1edc10a7a6f2..b9e59bc9435a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
791 return rc; 791 return rc;
792} 792}
793 793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 794static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg) 795 unsigned long arg)
796{ 796{
797 if (cmd == ICARSAMODEXPO) 797 if (cmd == ICARSAMODEXPO)
@@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = {
833 */ 833 */
834static struct proc_dir_entry *zcrypt_entry; 834static struct proc_dir_entry *zcrypt_entry;
835 835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, 836static int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len) 837 unsigned int len)
838{ 838{
839 int hl, i; 839 int hl, i;
840 840
@@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
845 return hl; 845 return hl;
846} 846}
847 847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, 848static int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len) 849 unsigned int len)
850{ 850{
851 int hl, inl, c, cx; 851 int hl, inl, c, cx;
852 852
@@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
865 return hl; 865 return hl;
866} 866}
867 867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr, 868static int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len) 869 unsigned char *addr, unsigned int len)
870{ 870{
871 int hl, inl, r, rx; 871 int hl, inl, r, rx;
872 872
@@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
885 return hl; 885 return hl;
886} 886}
887 887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, 888static int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len) 889 unsigned int *array, unsigned int len)
890{ 890{
891 int hl, r; 891 int hl, r;
892 892
@@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
943 zcrypt_qdepth_mask(workarea); 943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts", 944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES); 945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea); 946 zcrypt_perdev_reqcnt((int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts", 947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1; 949 *eof = 1;
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 32e37014345c..818ffe05ac00 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
191 * 191 *
192 * Returns 0 on success or -EFAULT. 192 * Returns 0 on success or -EFAULT.
193 */ 193 */
194static inline int convert_type84(struct zcrypt_device *zdev, 194static int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply, 195 struct ap_message *reply,
196 char __user *outputdata, 196 char __user *outputdata,
197 unsigned int outputdatalength) 197 unsigned int outputdatalength)
198{ 198{
199 struct type84_hdr *t84h = reply->message; 199 struct type84_hdr *t84h = reply->message;
200 char *data; 200 char *data;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index b7153c1e15cd..252443b6bd1b 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -709,7 +709,8 @@ out_free:
709 * PCIXCC/CEX2C device to the request distributor 709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer 710 * @xcRB: pointer to the send_cprb request buffer
711 */ 711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) 712static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
713 struct ica_xcRB *xcRB)
713{ 714{
714 struct ap_message ap_msg; 715 struct ap_message ap_msg;
715 struct response_type resp_type = { 716 struct response_type resp_type = {
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 95f4e105cb96..7809a79feec7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -121,7 +121,7 @@ MODULE_LICENSE("GPL");
121#define DEBUG 121#define DEBUG
122#endif 122#endif
123 123
124 char debug_buffer[255]; 124static char debug_buffer[255];
125/** 125/**
126 * Debug Facility Stuff 126 * Debug Facility Stuff
127 */ 127 */
@@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch );
223/* Functions */ 223/* Functions */
224static int add_claw_reads(struct net_device *dev, 224static int add_claw_reads(struct net_device *dev,
225 struct ccwbk* p_first, struct ccwbk* p_last); 225 struct ccwbk* p_first, struct ccwbk* p_last);
226static void inline ccw_check_return_code (struct ccw_device *cdev, 226static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
227 int return_code); 227static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
228static void inline ccw_check_unit_check (struct chbk * p_ch,
229 unsigned char sense );
230static int find_link(struct net_device *dev, char *host_name, char *ws_name ); 228static int find_link(struct net_device *dev, char *host_name, char *ws_name );
231static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); 229static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
232static int init_ccw_bk(struct net_device *dev); 230static int init_ccw_bk(struct net_device *dev);
233static void probe_error( struct ccwgroup_device *cgdev); 231static void probe_error( struct ccwgroup_device *cgdev);
234static struct net_device_stats *claw_stats(struct net_device *dev); 232static struct net_device_stats *claw_stats(struct net_device *dev);
235static int inline pages_to_order_of_mag(int num_of_pages); 233static int pages_to_order_of_mag(int num_of_pages);
236static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); 234static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
237#ifdef DEBUG 235#ifdef DEBUG
238static void dumpit (char *buf, int len); 236static void dumpit (char *buf, int len);
@@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch )
1310* of magnitude get_free_pages() has an upper order of 9 * 1308* of magnitude get_free_pages() has an upper order of 9 *
1311*--------------------------------------------------------------------*/ 1309*--------------------------------------------------------------------*/
1312 1310
1313static int inline 1311static int
1314pages_to_order_of_mag(int num_of_pages) 1312pages_to_order_of_mag(int num_of_pages)
1315{ 1313{
1316 int order_of_mag=1; /* assume 2 pages */ 1314 int order_of_mag=1; /* assume 2 pages */
@@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1482 * * 1480 * *
1483 *-------------------------------------------------------------------*/ 1481 *-------------------------------------------------------------------*/
1484 1482
1485static void inline 1483static void
1486ccw_check_return_code(struct ccw_device *cdev, int return_code) 1484ccw_check_return_code(struct ccw_device *cdev, int return_code)
1487{ 1485{
1488#ifdef FUNCTRACE 1486#ifdef FUNCTRACE
@@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1529* ccw_check_unit_check * 1527* ccw_check_unit_check *
1530*--------------------------------------------------------------------*/ 1528*--------------------------------------------------------------------*/
1531 1529
1532static void inline 1530static void
1533ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) 1531ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1534{ 1532{
1535 struct net_device *dev = p_ch->ndev; 1533 struct net_device *dev = p_ch->ndev;
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 03cc263fe0da..5a84fbbc6611 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset)
369 * @param ch The channel where this skb has been received. 369 * @param ch The channel where this skb has been received.
370 * @param pskb The received skb. 370 * @param pskb The received skb.
371 */ 371 */
372static __inline__ void 372static void
373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) 373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374{ 374{
375 struct net_device *dev = ch->netdev; 375 struct net_device *dev = ch->netdev;
@@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
512 * @param ch The channel, the error belongs to. 512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect. 513 * @param return_code The error code to inspect.
514 */ 514 */
515static void inline 515static void
516ccw_check_return_code(struct channel *ch, int return_code, char *msg) 516ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517{ 517{
518 DBF_TEXT(trace, 5, __FUNCTION__); 518 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg)
547 * @param ch The channel, the sense code belongs to. 547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect. 548 * @param sense The sense code to inspect.
549 */ 549 */
550static void inline 550static void
551ccw_unit_check(struct channel *ch, unsigned char sense) 551ccw_unit_check(struct channel *ch, unsigned char sense)
552{ 552{
553 DBF_TEXT(trace, 5, __FUNCTION__); 553 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q)
603 } 603 }
604} 604}
605 605
606static __inline__ int 606static int
607ctc_checkalloc_buffer(struct channel *ch, int warn) 607ctc_checkalloc_buffer(struct channel *ch, int warn)
608{ 608{
609 DBF_TEXT(trace, 5, __FUNCTION__); 609 DBF_TEXT(trace, 5, __FUNCTION__);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index e965f03a7291..76728ae4b843 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = {
57 57
58static struct ccw_driver cu3088_driver; 58static struct ccw_driver cu3088_driver;
59 59
60struct device *cu3088_root_dev; 60static struct device *cu3088_root_dev;
61 61
62static ssize_t 62static ssize_t
63group_write(struct device_driver *drv, const char *buf, size_t count) 63group_write(struct device_driver *drv, const char *buf, size_t count)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index e5665b6743a1..b97dd15bdb9a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
828/** 828/**
829 * Emit buffer of a lan comand. 829 * Emit buffer of a lan comand.
830 */ 830 */
831void 831static void
832lcs_lancmd_timeout(unsigned long data) 832lcs_lancmd_timeout(unsigned long data)
833{ 833{
834 struct lcs_reply *reply, *list_reply, *r; 834 struct lcs_reply *reply, *list_reply, *r;
@@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1360 return 0; 1360 return 0;
1361} 1361}
1362 1362
1363void 1363static void
1364lcs_schedule_recovery(struct lcs_card *card) 1364lcs_schedule_recovery(struct lcs_card *card)
1365{ 1365{
1366 LCS_DBF_TEXT(2, trace, "startrec"); 1366 LCS_DBF_TEXT(2, trace, "startrec");
@@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
1990 1990
1991} 1991}
1992 1992
1993DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); 1993static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
1994 1994
1995static ssize_t 1995static ssize_t
1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index d7d1cc0a5c8e..3346088f47e0 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2053,7 +2053,7 @@ out_free_ndev:
2053 return ret; 2053 return ret;
2054} 2054}
2055 2055
2056DRIVER_ATTR(connection, 0200, NULL, conn_write); 2056static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2057 2057
2058static ssize_t 2058static ssize_t
2059remove_write (struct device_driver *drv, const char *buf, size_t count) 2059remove_write (struct device_driver *drv, const char *buf, size_t count)
@@ -2112,7 +2112,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2112 return -EINVAL; 2112 return -EINVAL;
2113} 2113}
2114 2114
2115DRIVER_ATTR(remove, 0200, NULL, remove_write); 2115static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2116 2116
2117static void 2117static void
2118netiucv_banner(void) 2118netiucv_banner(void)
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 6bb558a9a032..7c735e1fe063 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
49 return buffers_needed; 49 return buffers_needed;
50} 50}
51 51
52static inline void 52static void
53qeth_eddp_free_context(struct qeth_eddp_context *ctx) 53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{ 54{
55 int i; 55 int i;
@@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
91 } 91 }
92} 92}
93 93
94static inline int 94static int
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96 struct qeth_eddp_context *ctx) 96 struct qeth_eddp_context *ctx)
97{ 97{
@@ -196,7 +196,7 @@ out:
196 return flush_cnt; 196 return flush_cnt;
197} 197}
198 198
199static inline void 199static void
200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201 struct qeth_eddp_data *eddp, int data_len) 201 struct qeth_eddp_data *eddp, int data_len)
202{ 202{
@@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
256 ctx->offset += eddp->thl; 256 ctx->offset += eddp->thl;
257} 257}
258 258
259static inline void 259static void
260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, 260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
261 __wsum *hcsum) 261 __wsum *hcsum)
262{ 262{
@@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
302 } 302 }
303} 303}
304 304
305static inline void 305static void
306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307 struct qeth_eddp_data *eddp, int data_len, 307 struct qeth_eddp_data *eddp, int data_len,
308 __wsum hcsum) 308 __wsum hcsum)
@@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
350} 350}
351 351
352static inline __wsum 352static __wsum
353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) 353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
354{ 354{
355 __wsum phcsum; /* pseudo header checksum */ 355 __wsum phcsum; /* pseudo header checksum */
@@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); 363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
364} 364}
365 365
366static inline __wsum 366static __wsum
367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) 367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
368{ 368{
369 __be32 proto; 369 __be32 proto;
@@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
381 return phcsum; 381 return phcsum;
382} 382}
383 383
384static inline struct qeth_eddp_data * 384static struct qeth_eddp_data *
385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) 385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
386{ 386{
387 struct qeth_eddp_data *eddp; 387 struct qeth_eddp_data *eddp;
@@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
399 return eddp; 399 return eddp;
400} 400}
401 401
402static inline void 402static void
403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404 struct qeth_eddp_data *eddp) 404 struct qeth_eddp_data *eddp)
405{ 405{
@@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
464 } 464 }
465} 465}
466 466
467static inline int 467static int
468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469 struct sk_buff *skb, struct qeth_hdr *qhdr) 469 struct sk_buff *skb, struct qeth_hdr *qhdr)
470{ 470{
@@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
505 return 0; 505 return 0;
506} 506}
507 507
508static inline void 508static void
509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, 509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
510 int hdr_len) 510 int hdr_len)
511{ 511{
@@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
529 (skb_shinfo(skb)->gso_segs + 1); 529 (skb_shinfo(skb)->gso_segs + 1);
530} 530}
531 531
532static inline struct qeth_eddp_context * 532static struct qeth_eddp_context *
533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, 533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
534 int hdr_len) 534 int hdr_len)
535{ 535{
@@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
581 return ctx; 581 return ctx;
582} 582}
583 583
584static inline struct qeth_eddp_context * 584static struct qeth_eddp_context *
585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, 585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
586 struct qeth_hdr *qhdr) 586 struct qeth_hdr *qhdr)
587{ 587{
@@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
625 } 625 }
626 return NULL; 626 return NULL;
627} 627}
628
629
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index d2efa5ff125d..2257e45594b3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
651 return 0; 651 return 0;
652} 652}
653 653
654static inline int 654static int
655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, 655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
656 int same_type) 656 int same_type)
657{ 657{
@@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
795 return rc; 795 return rc;
796} 796}
797 797
798static inline void 798static void
799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) 799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
800{ 800{
801 struct qeth_ipaddr *addr, *tmp; 801 struct qeth_ipaddr *addr, *tmp;
@@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *);
882static void qeth_add_multicast_ipv6(struct qeth_card *); 882static void qeth_add_multicast_ipv6(struct qeth_card *);
883#endif 883#endif
884 884
885static inline int 885static int
886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) 886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
887{ 887{
888 unsigned long flags; 888 unsigned long flags;
@@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
920 wake_up(&card->wait_q); 920 wake_up(&card->wait_q);
921} 921}
922 922
923static inline int 923static int
924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
925{ 925{
926 unsigned long flags; 926 unsigned long flags;
@@ -1764,9 +1764,9 @@ out:
1764 qeth_release_buffer(channel,iob); 1764 qeth_release_buffer(channel,iob);
1765} 1765}
1766 1766
1767static inline void 1767static void
1768qeth_prepare_control_data(struct qeth_card *card, int len, 1768qeth_prepare_control_data(struct qeth_card *card, int len,
1769struct qeth_cmd_buffer *iob) 1769 struct qeth_cmd_buffer *iob)
1770{ 1770{
1771 qeth_setup_ccw(&card->write,iob->data,len); 1771 qeth_setup_ccw(&card->write,iob->data,len);
1772 iob->callback = qeth_release_buffer; 1772 iob->callback = qeth_release_buffer;
@@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2160 return 0; 2160 return 0;
2161} 2161}
2162 2162
2163static inline struct sk_buff * 2163static struct sk_buff *
2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) 2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2165{ 2165{
2166 struct sk_buff* skb; 2166 struct sk_buff* skb;
@@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2179 return skb; 2179 return skb;
2180} 2180}
2181 2181
2182static inline struct sk_buff * 2182static struct sk_buff *
2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, 2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2184 struct qdio_buffer_element **__element, int *__offset, 2184 struct qdio_buffer_element **__element, int *__offset,
2185 struct qeth_hdr **hdr) 2185 struct qeth_hdr **hdr)
@@ -2264,7 +2264,7 @@ no_mem:
2264 return NULL; 2264 return NULL;
2265} 2265}
2266 2266
2267static inline __be16 2267static __be16
2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev) 2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2269{ 2269{
2270 struct qeth_card *card; 2270 struct qeth_card *card;
@@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2297 return htons(ETH_P_802_2); 2297 return htons(ETH_P_802_2);
2298} 2298}
2299 2299
2300static inline void 2300static void
2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, 2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2302 struct qeth_hdr *hdr) 2302 struct qeth_hdr *hdr)
2303{ 2303{
@@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2351 fake_llc->ethertype = ETH_P_IP; 2351 fake_llc->ethertype = ETH_P_IP;
2352} 2352}
2353 2353
2354static inline void 2354static void
2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, 2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2356 struct qeth_hdr *hdr) 2356 struct qeth_hdr *hdr)
2357{ 2357{
@@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2421} 2421}
2422 2422
2423static inline __u16 2423static __u16
2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2425 struct qeth_hdr *hdr) 2425 struct qeth_hdr *hdr)
2426{ 2426{
@@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2476 return vlan_id; 2476 return vlan_id;
2477} 2477}
2478 2478
2479static inline void 2479static void
2480qeth_process_inbound_buffer(struct qeth_card *card, 2480qeth_process_inbound_buffer(struct qeth_card *card,
2481 struct qeth_qdio_buffer *buf, int index) 2481 struct qeth_qdio_buffer *buf, int index)
2482{ 2482{
@@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2528 } 2528 }
2529} 2529}
2530 2530
2531static inline struct qeth_buffer_pool_entry * 2531static struct qeth_buffer_pool_entry *
2532qeth_get_buffer_pool_entry(struct qeth_card *card) 2532qeth_get_buffer_pool_entry(struct qeth_card *card)
2533{ 2533{
2534 struct qeth_buffer_pool_entry *entry; 2534 struct qeth_buffer_pool_entry *entry;
@@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card)
2543 return NULL; 2543 return NULL;
2544} 2544}
2545 2545
2546static inline void 2546static void
2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) 2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2548{ 2548{
2549 struct qeth_buffer_pool_entry *pool_entry; 2549 struct qeth_buffer_pool_entry *pool_entry;
@@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2570 buf->state = QETH_QDIO_BUF_EMPTY; 2570 buf->state = QETH_QDIO_BUF_EMPTY;
2571} 2571}
2572 2572
2573static inline void 2573static void
2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2575 struct qeth_qdio_out_buffer *buf) 2575 struct qeth_qdio_out_buffer *buf)
2576{ 2576{
@@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2596} 2596}
2597 2597
2598static inline void 2598static void
2599qeth_queue_input_buffer(struct qeth_card *card, int index) 2599qeth_queue_input_buffer(struct qeth_card *card, int index)
2600{ 2600{
2601 struct qeth_qdio_q *queue = card->qdio.in_q; 2601 struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2699 card->perf_stats.inbound_start_time; 2699 card->perf_stats.inbound_start_time;
2700} 2700}
2701 2701
2702static inline int 2702static int
2703qeth_handle_send_error(struct qeth_card *card, 2703qeth_handle_send_error(struct qeth_card *card,
2704 struct qeth_qdio_out_buffer *buffer, 2704 struct qeth_qdio_out_buffer *buffer,
2705 unsigned int qdio_err, unsigned int siga_err) 2705 unsigned int qdio_err, unsigned int siga_err)
@@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2821 * Switched to packing state if the number of used buffers on a queue 2821 * Switched to packing state if the number of used buffers on a queue
2822 * reaches a certain limit. 2822 * reaches a certain limit.
2823 */ 2823 */
2824static inline void 2824static void
2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2826{ 2826{
2827 if (!queue->do_pack) { 2827 if (!queue->do_pack) {
@@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2842 * In that case 1 is returned to inform the caller. If no buffer 2842 * In that case 1 is returned to inform the caller. If no buffer
2843 * has to be flushed, zero is returned. 2843 * has to be flushed, zero is returned.
2844 */ 2844 */
2845static inline int 2845static int
2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2847{ 2847{
2848 struct qeth_qdio_out_buffer *buffer; 2848 struct qeth_qdio_out_buffer *buffer;
@@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2877 * Checks if there is a packing buffer and prepares it to be flushed. 2877 * Checks if there is a packing buffer and prepares it to be flushed.
2878 * In that case returns 1, otherwise zero. 2878 * In that case returns 1, otherwise zero.
2879 */ 2879 */
2880static inline int 2880static int
2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) 2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2882{ 2882{
2883 struct qeth_qdio_out_buffer *buffer; 2883 struct qeth_qdio_out_buffer *buffer;
@@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2894 return 0; 2894 return 0;
2895} 2895}
2896 2896
2897static inline void 2897static void
2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2899{ 2899{
2900 int index; 2900 int index;
@@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3594 } 3594 }
3595} 3595}
3596 3596
3597static inline int 3597static int
3598qeth_send_packet(struct qeth_card *, struct sk_buff *); 3598qeth_send_packet(struct qeth_card *, struct sk_buff *);
3599 3599
3600static int 3600static int
@@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev)
3759 return 0; 3759 return 0;
3760} 3760}
3761 3761
3762static inline int 3762static int
3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) 3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3764{ 3764{
3765 int cast_type = RTN_UNSPEC; 3765 int cast_type = RTN_UNSPEC;
@@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3806 return cast_type; 3806 return cast_type;
3807} 3807}
3808 3808
3809static inline int 3809static int
3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, 3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3811 int ipv, int cast_type) 3811 int ipv, int cast_type)
3812{ 3812{
@@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb)
3853 } 3853 }
3854} 3854}
3855 3855
3856static inline struct qeth_hdr * 3856static struct qeth_hdr *
3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) 3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3858{ 3858{
3859#ifdef CONFIG_QETH_VLAN 3859#ifdef CONFIG_QETH_VLAN
@@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); 3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
3883} 3883}
3884 3884
3885static inline void 3885static void
3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) 3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
3887{ 3887{
3888 if (orig_skb != new_skb) 3888 if (orig_skb != new_skb)
3889 dev_kfree_skb_any(new_skb); 3889 dev_kfree_skb_any(new_skb);
3890} 3890}
3891 3891
3892static inline struct sk_buff * 3892static struct sk_buff *
3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3894 struct qeth_hdr **hdr, int ipv) 3894 struct qeth_hdr **hdr, int ipv)
3895{ 3895{
@@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type)
3940 return ct | QETH_CAST_UNICAST; 3940 return ct | QETH_CAST_UNICAST;
3941} 3941}
3942 3942
3943static inline void 3943static void
3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, 3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3945 struct sk_buff *skb) 3945 struct sk_buff *skb)
3946{ 3946{
@@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3977 } 3977 }
3978} 3978}
3979 3979
3980static inline void 3980static void
3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3982 struct sk_buff *skb, int cast_type) 3982 struct sk_buff *skb, int cast_type)
3983{ 3983{
@@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4068 } 4068 }
4069} 4069}
4070 4070
4071static inline void 4071static void
4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4073 int is_tso, int *next_element_to_fill) 4073 int is_tso, int *next_element_to_fill)
4074{ 4074{
@@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4112 *next_element_to_fill = element; 4112 *next_element_to_fill = element;
4113} 4113}
4114 4114
4115static inline int 4115static int
4116qeth_fill_buffer(struct qeth_qdio_out_q *queue, 4116qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4117 struct qeth_qdio_out_buffer *buf, 4117 struct qeth_qdio_out_buffer *buf,
4118 struct sk_buff *skb) 4118 struct sk_buff *skb)
@@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4171 return flush_cnt; 4171 return flush_cnt;
4172} 4172}
4173 4173
4174static inline int 4174static int
4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4176 struct sk_buff *skb, struct qeth_hdr *hdr, 4176 struct sk_buff *skb, struct qeth_hdr *hdr,
4177 int elements_needed, 4177 int elements_needed,
@@ -4222,7 +4222,7 @@ out:
4222 return -EBUSY; 4222 return -EBUSY;
4223} 4223}
4224 4224
4225static inline int 4225static int
4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4227 struct sk_buff *skb, struct qeth_hdr *hdr, 4227 struct sk_buff *skb, struct qeth_hdr *hdr,
4228 int elements_needed, struct qeth_eddp_context *ctx) 4228 int elements_needed, struct qeth_eddp_context *ctx)
@@ -4328,7 +4328,7 @@ out:
4328 return rc; 4328 return rc;
4329} 4329}
4330 4330
4331static inline int 4331static int
4332qeth_get_elements_no(struct qeth_card *card, void *hdr, 4332qeth_get_elements_no(struct qeth_card *card, void *hdr,
4333 struct sk_buff *skb, int elems) 4333 struct sk_buff *skb, int elems)
4334{ 4334{
@@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
4349} 4349}
4350 4350
4351 4351
4352static inline int 4352static int
4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) 4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4354{ 4354{
4355 int ipv = 0; 4355 int ipv = 0;
@@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4536} 4536}
4537 4537
4538 4538
4539static inline const char * 4539static const char *
4540qeth_arp_get_error_cause(int *rc) 4540qeth_arp_get_error_cause(int *rc)
4541{ 4541{
4542 switch (*rc) { 4542 switch (*rc) {
@@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4597 return rc; 4597 return rc;
4598} 4598}
4599 4599
4600static inline void 4600static void
4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4602 struct qeth_arp_query_data *qdata, 4602 struct qeth_arp_query_data *qdata,
4603 int entry_size, int uentry_size) 4603 int entry_size, int uentry_size)
@@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5214 spin_unlock_irqrestore(&card->vlanlock, flags); 5214 spin_unlock_irqrestore(&card->vlanlock, flags);
5215} 5215}
5216 5216
5217static inline void 5217static void
5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, 5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5219 unsigned short vid) 5219 unsigned short vid)
5220{ 5220{
@@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card)
5625 spin_unlock_irqrestore(&card->ip_lock, flags); 5625 spin_unlock_irqrestore(&card->ip_lock, flags);
5626} 5626}
5627 5627
5628static inline void 5628static void
5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) 5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5630{ 5630{
5631 struct qeth_ipaddr *ipm; 5631 struct qeth_ipaddr *ipm;
@@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card)
5711} 5711}
5712 5712
5713#ifdef CONFIG_QETH_IPV6 5713#ifdef CONFIG_QETH_IPV6
5714static inline void 5714static void
5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5716{ 5716{
5717 struct qeth_ipaddr *ipm; 5717 struct qeth_ipaddr *ipm;
@@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6022 6022
6023 return rc; 6023 return rc;
6024} 6024}
6025static inline void 6025static void
6026qeth_fill_netmask(u8 *netmask, unsigned int len) 6026qeth_fill_netmask(u8 *netmask, unsigned int len)
6027{ 6027{
6028 int i,j; 6028 int i,j;
@@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6626 return rc; 6626 return rc;
6627} 6627}
6628 6628
6629static inline int 6629static int
6630qeth_setadapter_hstr(struct qeth_card *card) 6630qeth_setadapter_hstr(struct qeth_card *card)
6631{ 6631{
6632 int rc; 6632 int rc;
@@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card,
6889 return rc; 6889 return rc;
6890} 6890}
6891 6891
6892static inline int 6892static int
6893qeth_start_ipa_arp_processing(struct qeth_card *card) 6893qeth_start_ipa_arp_processing(struct qeth_card *card)
6894{ 6894{
6895 int rc; 6895 int rc;
@@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7529 wake_up(&card->wait_q); 7529 wake_up(&card->wait_q);
7530} 7530}
7531 7531
7532static inline int 7532static int
7533qeth_threads_running(struct qeth_card *card, unsigned long threads) 7533qeth_threads_running(struct qeth_card *card, unsigned long threads)
7534{ 7534{
7535 unsigned long flags; 7535 unsigned long flags;
@@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8118 spin_unlock_irqrestore(&card->ip_lock, flags); 8118 spin_unlock_irqrestore(&card->ip_lock, flags);
8119} 8119}
8120 8120
8121static inline void 8121static void
8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8123{ 8123{
8124 int i, j; 8124 int i, j;
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 5836737ac58f..d518419cd0c6 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c
328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, 328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
329 qeth_dev_bufcnt_store); 329 qeth_dev_bufcnt_store);
330 330
331static inline ssize_t 331static ssize_t
332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, 332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
333 char *buf) 333 char *buf)
334{ 334{
@@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu
368 return qeth_dev_route_show(card, &card->options.route4, buf); 368 return qeth_dev_route_show(card, &card->options.route4, buf);
369} 369}
370 370
371static inline ssize_t 371static ssize_t
372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, 372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
373 enum qeth_prot_versions prot, const char *buf, size_t count) 373 enum qeth_prot_versions prot, const char *buf, size_t count)
374{ 374{
@@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \
998 .store = _store, \ 998 .store = _store, \
999}; 999};
1000 1000
1001int 1001static int
1002qeth_check_layer2(struct qeth_card *card) 1002qeth_check_layer2(struct qeth_card *card)
1003{ 1003{
1004 if (card->options.layer2) 1004 if (card->options.layer2)
@@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1100 qeth_dev_ipato_invert4_show, 1100 qeth_dev_ipato_invert4_show,
1101 qeth_dev_ipato_invert4_store); 1101 qeth_dev_ipato_invert4_store);
1102 1102
1103static inline ssize_t 1103static ssize_t
1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, 1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1105 enum qeth_prot_versions proto) 1105 enum qeth_prot_versions proto)
1106{ 1106{
@@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char
1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); 1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1147} 1147}
1148 1148
1149static inline int 1149static int
1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, 1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1151 u8 *addr, int *mask_bits) 1151 u8 *addr, int *mask_bits)
1152{ 1152{
@@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1178 return 0; 1178 return 0;
1179} 1179}
1180 1180
1181static inline ssize_t 1181static ssize_t
1182qeth_dev_ipato_add_store(const char *buf, size_t count, 1182qeth_dev_ipato_add_store(const char *buf, size_t count,
1183 struct qeth_card *card, enum qeth_prot_versions proto) 1183 struct qeth_card *card, enum qeth_prot_versions proto)
1184{ 1184{
@@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1223 qeth_dev_ipato_add4_show, 1223 qeth_dev_ipato_add4_show,
1224 qeth_dev_ipato_add4_store); 1224 qeth_dev_ipato_add4_store);
1225 1225
1226static inline ssize_t 1226static ssize_t
1227qeth_dev_ipato_del_store(const char *buf, size_t count, 1227qeth_dev_ipato_del_store(const char *buf, size_t count,
1228 struct qeth_card *card, enum qeth_prot_versions proto) 1228 struct qeth_card *card, enum qeth_prot_versions proto)
1229{ 1229{
@@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = {
1361 .attrs = (struct attribute **)qeth_ipato_device_attrs, 1361 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1362}; 1362};
1363 1363
1364static inline ssize_t 1364static ssize_t
1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, 1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1366 enum qeth_prot_versions proto) 1366 enum qeth_prot_versions proto)
1367{ 1367{
@@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char
1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); 1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1408} 1408}
1409 1409
1410static inline int 1410static int
1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, 1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1412 u8 *addr) 1412 u8 *addr)
1413{ 1413{
@@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1418 return 0; 1418 return 0;
1419} 1419}
1420 1420
1421static inline ssize_t 1421static ssize_t
1422qeth_dev_vipa_add_store(const char *buf, size_t count, 1422qeth_dev_vipa_add_store(const char *buf, size_t count,
1423 struct qeth_card *card, enum qeth_prot_versions proto) 1423 struct qeth_card *card, enum qeth_prot_versions proto)
1424{ 1424{
@@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1451 qeth_dev_vipa_add4_show, 1451 qeth_dev_vipa_add4_show,
1452 qeth_dev_vipa_add4_store); 1452 qeth_dev_vipa_add4_store);
1453 1453
1454static inline ssize_t 1454static ssize_t
1455qeth_dev_vipa_del_store(const char *buf, size_t count, 1455qeth_dev_vipa_del_store(const char *buf, size_t count,
1456 struct qeth_card *card, enum qeth_prot_versions proto) 1456 struct qeth_card *card, enum qeth_prot_versions proto)
1457{ 1457{
@@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = {
1542 .attrs = (struct attribute **)qeth_vipa_device_attrs, 1542 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1543}; 1543};
1544 1544
1545static inline ssize_t 1545static ssize_t
1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, 1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1547 enum qeth_prot_versions proto) 1547 enum qeth_prot_versions proto)
1548{ 1548{
@@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char
1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); 1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1589} 1589}
1590 1590
1591static inline int 1591static int
1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, 1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1593 u8 *addr) 1593 u8 *addr)
1594{ 1594{
@@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1599 return 0; 1599 return 0;
1600} 1600}
1601 1601
1602static inline ssize_t 1602static ssize_t
1603qeth_dev_rxip_add_store(const char *buf, size_t count, 1603qeth_dev_rxip_add_store(const char *buf, size_t count,
1604 struct qeth_card *card, enum qeth_prot_versions proto) 1604 struct qeth_card *card, enum qeth_prot_versions proto)
1605{ 1605{
@@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1632 qeth_dev_rxip_add4_show, 1632 qeth_dev_rxip_add4_show,
1633 qeth_dev_rxip_add4_store); 1633 qeth_dev_rxip_add4_store);
1634 1634
1635static inline ssize_t 1635static ssize_t
1636qeth_dev_rxip_del_store(const char *buf, size_t count, 1636qeth_dev_rxip_del_store(const char *buf, size_t count,
1637 struct qeth_card *card, enum qeth_prot_versions proto) 1637 struct qeth_card *card, enum qeth_prot_versions proto)
1638{ 1638{
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index e088b5e28711..806bb1a921eb 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -13,22 +13,18 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/workqueue.h> 14#include <linux/workqueue.h>
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/device.h>
16#include <linux/kthread.h> 17#include <linux/kthread.h>
17 18#include <asm/etr.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
20#include "s390mach.h" 24#include "s390mach.h"
21 25
22static struct semaphore m_sem; 26static struct semaphore m_sem;
23 27
24extern int css_process_crw(int, int);
25extern int chsc_process_crw(void);
26extern int chp_process_crw(int, int);
27extern void css_reiterate_subchannels(void);
28
29extern struct workqueue_struct *slow_path_wq;
30extern struct work_struct slow_path_work;
31
32static NORET_TYPE void 28static NORET_TYPE void
33s390_handle_damage(char *msg) 29s390_handle_damage(char *msg)
34{ 30{
@@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs)
470 s390_handle_damage("unable to revalidate registers."); 466 s390_handle_damage("unable to revalidate registers.");
471 } 467 }
472 468
469 if (mci->cd) {
470 /* Timing facility damage */
471 s390_handle_damage("TOD clock damaged");
472 }
473
474 if (mci->ed && mci->ec) {
475 /* External damage */
476 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
477 etr_sync_check();
478 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
479 etr_switch_to_local();
480 }
481
473 if (mci->se) 482 if (mci->se)
474 /* Storage error uncorrected */ 483 /* Storage error uncorrected */
475 s390_handle_damage("received storage error uncorrected " 484 s390_handle_damage("received storage error uncorrected "
@@ -508,7 +517,7 @@ static int
508machine_check_init(void) 517machine_check_init(void)
509{ 518{
510 init_MUTEX_LOCKED(&m_sem); 519 init_MUTEX_LOCKED(&m_sem);
511 ctl_clear_bit(14, 25); /* disable external damage MCH */ 520 ctl_set_bit(14, 25); /* enable external damage MCH */
512 ctl_set_bit(14, 27); /* enable system recovery MCH */ 521 ctl_set_bit(14, 27); /* enable system recovery MCH */
513#ifdef CONFIG_MACHCHK_WARNING 522#ifdef CONFIG_MACHCHK_WARNING
514 ctl_set_bit(14, 24); /* enable warning MCH */ 523 ctl_set_bit(14, 24); /* enable warning MCH */
@@ -529,7 +538,11 @@ arch_initcall(machine_check_init);
529static int __init 538static int __init
530machine_check_crw_init (void) 539machine_check_crw_init (void)
531{ 540{
532 kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); 541 struct task_struct *task;
542
543 task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
544 if (IS_ERR(task))
545 return PTR_ERR(task);
533 ctl_set_bit(14, 28); /* enable channel report MCH */ 546 ctl_set_bit(14, 28); /* enable channel report MCH */
534 return 0; 547 return 0;
535} 548}
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index 7abb42a09ae2..d3ca4281a494 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw )
102 return ccode; 102 return ccode;
103} 103}
104 104
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107
105#endif /* __s390mach */ 108#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 85093b71f9fa..39a885266790 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -47,13 +47,12 @@ static int __init zfcp_module_init(void);
47static void zfcp_ns_gid_pn_handler(unsigned long); 47static void zfcp_ns_gid_pn_handler(unsigned long);
48 48
49/* miscellaneous */ 49/* miscellaneous */
50static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); 50static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
51static inline void zfcp_sg_list_free(struct zfcp_sg_list *); 51static void zfcp_sg_list_free(struct zfcp_sg_list *);
52static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, 52static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
53 void __user *, size_t); 53 void __user *, size_t);
54static inline int zfcp_sg_list_copy_to_user(void __user *, 54static int zfcp_sg_list_copy_to_user(void __user *,
55 struct zfcp_sg_list *, size_t); 55 struct zfcp_sg_list *, size_t);
56
57static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); 56static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
58 57
59#define ZFCP_CFDC_IOC_MAGIC 0xDD 58#define ZFCP_CFDC_IOC_MAGIC 0xDD
@@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
605 * elements of the scatter-gather list. The maximum size of a single element 604 * elements of the scatter-gather list. The maximum size of a single element
606 * in the scatter-gather list is PAGE_SIZE. 605 * in the scatter-gather list is PAGE_SIZE.
607 */ 606 */
608static inline int 607static int
609zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) 608zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
610{ 609{
611 struct scatterlist *sg; 610 struct scatterlist *sg;
@@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
652 * Memory for each element in the scatter-gather list is freed. 651 * Memory for each element in the scatter-gather list is freed.
653 * Finally sg_list->sg is freed itself and sg_list->count is reset. 652 * Finally sg_list->sg is freed itself and sg_list->count is reset.
654 */ 653 */
655static inline void 654static void
656zfcp_sg_list_free(struct zfcp_sg_list *sg_list) 655zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
657{ 656{
658 struct scatterlist *sg; 657 struct scatterlist *sg;
@@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
697 * @size: number of bytes to be copied 696 * @size: number of bytes to be copied
698 * Return: 0 on success, -EFAULT if copy_from_user fails. 697 * Return: 0 on success, -EFAULT if copy_from_user fails.
699 */ 698 */
700static inline int 699static int
701zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, 700zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
702 void __user *user_buffer, 701 void __user *user_buffer,
703 size_t size) 702 size_t size)
@@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
735 * @size: number of bytes to be copied 734 * @size: number of bytes to be copied
736 * Return: 0 on success, -EFAULT if copy_to_user fails 735 * Return: 0 on success, -EFAULT if copy_to_user fails
737 */ 736 */
738static inline int 737static int
739zfcp_sg_list_copy_to_user(void __user *user_buffer, 738zfcp_sg_list_copy_to_user(void __user *user_buffer,
740 struct zfcp_sg_list *sg_list, 739 struct zfcp_sg_list *sg_list,
741 size_t size) 740 size_t size)
@@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1799 * @code: reason code 1798 * @code: reason code
1800 * @rc_table: table of reason codes and descriptions 1799 * @rc_table: table of reason codes and descriptions
1801 */ 1800 */
1802static inline const char * 1801static const char *
1803zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) 1802zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1804{ 1803{
1805 const char *descr = "unknown reason code"; 1804 const char *descr = "unknown reason code";
@@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt)
1847 * @rjt_par: reject parameter acc. to FC-PH/FC-FS 1846 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1848 * @rc_table: table of reason codes and descriptions 1847 * @rc_table: table of reason codes and descriptions
1849 */ 1848 */
1850static inline void 1849static void
1851zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, 1850zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1852 const struct zfcp_rc_entry *rc_table) 1851 const struct zfcp_rc_entry *rc_table)
1853{ 1852{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0aa3b1ac76af..d8191d115c14 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize,
31 31
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
33 33
34static inline int 34static int
35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) 35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
36{ 36{
37 unsigned long long sec; 37 unsigned long long sec;
@@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label,
106 return len; 106 return len;
107} 107}
108 108
109static inline int 109static int
110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, 110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
111 debug_entry_t * entry, char *out_buf) 111 debug_entry_t * entry, char *out_buf)
112{ 112{
@@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
130 return len; 130 return len;
131} 131}
132 132
133inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) 133void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
134{ 134{
135 struct zfcp_adapter *adapter = fsf_req->adapter; 135 struct zfcp_adapter *adapter = fsf_req->adapter;
136 struct fsf_qtcb *qtcb = fsf_req->qtcb; 136 struct fsf_qtcb *qtcb = fsf_req->qtcb;
@@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
242} 242}
243 243
244inline void 244void
245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, 245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
246 struct fsf_status_read_buffer *status_buffer) 246 struct fsf_status_read_buffer *status_buffer)
247{ 247{
@@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
296} 296}
297 297
298inline void 298void
299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
300 unsigned int qdio_error, unsigned int siga_error, 300 unsigned int qdio_error, unsigned int siga_error,
301 int sbal_index, int sbal_count) 301 int sbal_index, int sbal_count)
@@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
317} 317}
318 318
319static inline int 319static int
320zfcp_hba_dbf_view_response(char *out_buf, 320zfcp_hba_dbf_view_response(char *out_buf,
321 struct zfcp_hba_dbf_record_response *rec) 321 struct zfcp_hba_dbf_record_response *rec)
322{ 322{
@@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf,
403 return len; 403 return len;
404} 404}
405 405
406static inline int 406static int
407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) 407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
408{ 408{
409 int len = 0; 409 int len = 0;
@@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
424 return len; 424 return len;
425} 425}
426 426
427static inline int 427static int
428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) 428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
429{ 429{
430 int len = 0; 430 int len = 0;
@@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
469 return len; 469 return len;
470} 470}
471 471
472struct debug_view zfcp_hba_dbf_view = { 472static struct debug_view zfcp_hba_dbf_view = {
473 "structured", 473 "structured",
474 NULL, 474 NULL,
475 &zfcp_dbf_view_header, 475 &zfcp_dbf_view_header,
@@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = {
478 NULL 478 NULL
479}; 479};
480 480
481inline void 481void
482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, 482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
483 u32 s_id, u32 d_id, void *buffer, int buflen) 483 u32 s_id, u32 d_id, void *buffer, int buflen)
484{ 484{
@@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
520} 520}
521 521
522inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 522void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
523{ 523{
524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
525 struct zfcp_port *port = ct->port; 525 struct zfcp_port *port = ct->port;
@@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
531 ct->req->length); 531 ct->req->length);
532} 532}
533 533
534inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 534void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
535{ 535{
536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
537 struct zfcp_port *port = ct->port; 537 struct zfcp_port *port = ct->port;
@@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
543 ct->resp->length); 543 ct->resp->length);
544} 544}
545 545
546static inline void 546static void
547_zfcp_san_dbf_event_common_els(const char *tag, int level, 547_zfcp_san_dbf_event_common_els(const char *tag, int level,
548 struct zfcp_fsf_req *fsf_req, u32 s_id, 548 struct zfcp_fsf_req *fsf_req, u32 s_id,
549 u32 d_id, u8 ls_code, void *buffer, int buflen) 549 u32 d_id, u8 ls_code, void *buffer, int buflen)
@@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level,
585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
586} 586}
587 587
588inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) 588void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
589{ 589{
590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
591 591
@@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
597 els->req->length); 597 els->req->length);
598} 598}
599 599
600inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) 600void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
601{ 601{
602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
603 603
@@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
608 els->resp->length); 608 els->resp->length);
609} 609}
610 610
611inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) 611void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
612{ 612{
613 struct zfcp_adapter *adapter = fsf_req->adapter; 613 struct zfcp_adapter *adapter = fsf_req->adapter;
614 struct fsf_status_read_buffer *status_buffer = 614 struct fsf_status_read_buffer *status_buffer =
@@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
693 return len; 693 return len;
694} 694}
695 695
696struct debug_view zfcp_san_dbf_view = { 696static struct debug_view zfcp_san_dbf_view = {
697 "structured", 697 "structured",
698 NULL, 698 NULL,
699 &zfcp_dbf_view_header, 699 &zfcp_dbf_view_header,
@@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = {
702 NULL 702 NULL
703}; 703};
704 704
705static inline void 705static void
706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
@@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); 786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
787} 787}
788 788
789inline void 789void
790zfcp_scsi_dbf_event_result(const char *tag, int level, 790zfcp_scsi_dbf_event_result(const char *tag, int level,
791 struct zfcp_adapter *adapter, 791 struct zfcp_adapter *adapter,
792 struct scsi_cmnd *scsi_cmnd, 792 struct scsi_cmnd *scsi_cmnd,
@@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
796 adapter, scsi_cmnd, fsf_req, 0); 796 adapter, scsi_cmnd, fsf_req, 0);
797} 797}
798 798
799inline void 799void
800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
801 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
802 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
@@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
806 adapter, scsi_cmnd, new_fsf_req, old_req_id); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
807} 807}
808 808
809inline void 809void
810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
811 struct scsi_cmnd *scsi_cmnd) 811 struct scsi_cmnd *scsi_cmnd)
812{ 812{
@@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
884 return len; 884 return len;
885} 885}
886 886
887struct debug_view zfcp_scsi_dbf_view = { 887static struct debug_view zfcp_scsi_dbf_view = {
888 "structured", 888 "structured",
889 NULL, 889 NULL,
890 &zfcp_dbf_view_header, 890 &zfcp_dbf_view_header,
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c88babce9bca..88642dec080c 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
200 * returns: 0 - initiated action successfully 200 * returns: 0 - initiated action successfully
201 * <0 - failed to initiate action 201 * <0 - failed to initiate action
202 */ 202 */
203int 203static int
204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) 204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
205{ 205{
206 int retval; 206 int retval;
@@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
295 * zfcp_erp_adisc - send ADISC ELS command 295 * zfcp_erp_adisc - send ADISC ELS command
296 * @port: port structure 296 * @port: port structure
297 */ 297 */
298int 298static int
299zfcp_erp_adisc(struct zfcp_port *port) 299zfcp_erp_adisc(struct zfcp_port *port)
300{ 300{
301 struct zfcp_adapter *adapter = port->adapter; 301 struct zfcp_adapter *adapter = port->adapter;
@@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
380 * 380 *
381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. 381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
382 */ 382 */
383void 383static void
384zfcp_erp_adisc_handler(unsigned long data) 384zfcp_erp_adisc_handler(unsigned long data)
385{ 385{
386 struct zfcp_send_els *send_els; 386 struct zfcp_send_els *send_els;
@@ -3141,7 +3141,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3141 break; 3141 break;
3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3143 if (result != ZFCP_ERP_SUCCEEDED) { 3143 if (result != ZFCP_ERP_SUCCEEDED) {
3144 struct zfcp_port *port;
3145 list_for_each_entry(port, &adapter->port_list_head, list) 3144 list_for_each_entry(port, &adapter->port_list_head, list)
3146 if (port->rport && 3145 if (port->rport &&
3147 !atomic_test_mask(ZFCP_STATUS_PORT_WKA, 3146 !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b8794d77285d..cda0cc095ad1 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); 120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); 121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
122extern void set_host_byte(u32 *, char); 122extern void set_host_byte(int *, char);
123extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(int *, char);
124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
126 126
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 067f1519eb04..4b3ae3f22e78 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4563,7 +4563,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4563/* 4563/*
4564 * set qtcb pointer in fsf_req and initialize QTCB 4564 * set qtcb pointer in fsf_req and initialize QTCB
4565 */ 4565 */
4566static inline void 4566static void
4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4568{ 4568{
4569 if (likely(fsf_req->qtcb != NULL)) { 4569 if (likely(fsf_req->qtcb != NULL)) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbd9f48e863e..1e12a78e8edd 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -21,22 +21,22 @@
21 21
22#include "zfcp_ext.h" 22#include "zfcp_ext.h"
23 23
24static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 24static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
26 (struct zfcp_qdio_queue *, int, int); 26 (struct zfcp_qdio_queue *, int, int);
27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
28 (struct zfcp_fsf_req *, int, int); 28 (struct zfcp_fsf_req *, int, int);
29static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain 29static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
30 (struct zfcp_fsf_req *, unsigned long); 30 (struct zfcp_fsf_req *, unsigned long);
31static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next 31static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
32 (struct zfcp_fsf_req *, unsigned long); 32 (struct zfcp_fsf_req *, unsigned long);
33static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); 33static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); 34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
35static inline void zfcp_qdio_sbale_fill 35static void zfcp_qdio_sbale_fill
36 (struct zfcp_fsf_req *, unsigned long, void *, int); 36 (struct zfcp_fsf_req *, unsigned long, void *, int);
37static inline int zfcp_qdio_sbals_from_segment 37static int zfcp_qdio_sbals_from_segment
38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); 38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
39static inline int zfcp_qdio_sbals_from_buffer 39static int zfcp_qdio_sbals_from_buffer
40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); 40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
41 41
42static qdio_handler_t zfcp_qdio_request_handler; 42static qdio_handler_t zfcp_qdio_request_handler;
@@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
201 * returns: error flag 201 * returns: error flag
202 * 202 *
203 */ 203 */
204static inline int 204static int
205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, 205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
206 unsigned int qdio_error, unsigned int siga_error, 206 unsigned int qdio_error, unsigned int siga_error,
207 int first_element, int elements_processed) 207 int first_element, int elements_processed)
@@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for 462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
463 * a struct zfcp_fsf_req 463 * a struct zfcp_fsf_req
464 */ 464 */
465inline volatile struct qdio_buffer_element * 465volatile struct qdio_buffer_element *
466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
467{ 467{
468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
@@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for 484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
485 * a struct zfcp_fsf_req 485 * a struct zfcp_fsf_req
486 */ 486 */
487inline volatile struct qdio_buffer_element * 487volatile struct qdio_buffer_element *
488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
489{ 489{
490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
@@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
499 * 499 *
500 * Note: We can assume at least one free SBAL in the request_queue when called. 500 * Note: We can assume at least one free SBAL in the request_queue when called.
501 */ 501 */
502static inline void 502static void
503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
504{ 504{
505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
@@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
517 * 517 *
518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. 518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
519 */ 519 */
520static inline volatile struct qdio_buffer_element * 520static volatile struct qdio_buffer_element *
521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
522{ 522{
523 volatile struct qdio_buffer_element *sbale; 523 volatile struct qdio_buffer_element *sbale;
@@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
554/** 554/**
555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed 555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
556 */ 556 */
557static inline volatile struct qdio_buffer_element * 557static volatile struct qdio_buffer_element *
558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
559{ 559{
560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue 569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
570 * with zero from 570 * with zero from
571 */ 571 */
572static inline int 572static int
573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) 573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
574{ 574{
575 struct qdio_buffer **buf = queue->buffer; 575 struct qdio_buffer **buf = queue->buffer;
@@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE 603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
604 * on request_queue 604 * on request_queue
605 */ 605 */
606static inline void 606static void
607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
608 void *addr, int length) 608 void *addr, int length)
609{ 609{
@@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
624 * Alignment and length of the segment determine how many SBALEs are needed 624 * Alignment and length of the segment determine how many SBALEs are needed
625 * for the memory segment. 625 * for the memory segment.
626 */ 626 */
627static inline int 627static int
628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
629 void *start_addr, unsigned long total_length) 629 void *start_addr, unsigned long total_length)
630{ 630{
@@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
659 * @sg_count: number of elements in scatter-gather list 659 * @sg_count: number of elements in scatter-gather list
660 * @max_sbals: upper bound for number of SBALs to be used 660 * @max_sbals: upper bound for number of SBALs to be used
661 */ 661 */
662inline int 662int
663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
664 struct scatterlist *sg, int sg_count, int max_sbals) 664 struct scatterlist *sg, int sg_count, int max_sbals)
665{ 665{
@@ -707,7 +707,7 @@ out:
707 * @length: length of buffer 707 * @length: length of buffer
708 * @max_sbals: upper bound for number of SBALs to be used 708 * @max_sbals: upper bound for number of SBALs to be used
709 */ 709 */
710static inline int 710static int
711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
712 void *buffer, unsigned long length, int max_sbals) 712 void *buffer, unsigned long length, int max_sbals)
713{ 713{
@@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used 728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
729 * to fill SBALs 729 * to fill SBALs
730 */ 730 */
731inline int 731int
732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, 732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) 733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
734{ 734{
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 452d96f92a14..99db02062c3b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
90 return fcp_sns_info_ptr; 90 return fcp_sns_info_ptr;
91} 91}
92 92
93fcp_dl_t * 93static fcp_dl_t *
94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) 94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
95{ 95{
96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2; 96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
@@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
124 * regarding the specified byte 124 * regarding the specified byte
125 */ 125 */
126static inline void 126static inline void
127set_byte(u32 * result, char status, char pos) 127set_byte(int *result, char status, char pos)
128{ 128{
129 *result |= status << (pos * 8); 129 *result |= status << (pos * 8);
130} 130}
131 131
132void 132void
133set_host_byte(u32 * result, char status) 133set_host_byte(int *result, char status)
134{ 134{
135 set_byte(result, status, 2); 135 set_byte(result, status, 2);
136} 136}
137 137
138void 138void
139set_driver_byte(u32 * result, char status) 139set_driver_byte(int *result, char status)
140{ 140{
141 set_byte(result, status, 3); 141 set_byte(result, status, 3);
142} 142}
@@ -280,7 +280,7 @@ out:
280 return retval; 280 return retval;
281} 281}
282 282
283void 283static void
284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) 284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
285{ 285{
286 struct completion *wait = (struct completion *) scpnt->SCp.ptr; 286 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
@@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
324 * returns: 0 - success, SCSI command enqueued 324 * returns: 0 - success, SCSI command enqueued
325 * !0 - failure 325 * !0 - failure
326 */ 326 */
327int 327static int
328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
329 void (*done) (struct scsi_cmnd *)) 329 void (*done) (struct scsi_cmnd *))
330{ 330{
@@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
380 * will handle late commands. (Usually, the normal completion of late 380 * will handle late commands. (Usually, the normal completion of late
381 * commands is ignored with respect to the running abort operation.) 381 * commands is ignored with respect to the running abort operation.)
382 */ 382 */
383int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 383static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 384{
385 struct Scsi_Host *scsi_host; 385 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 386 struct zfcp_adapter *adapter;
@@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
445 return retval; 445 return retval;
446} 446}
447 447
448int 448static int
449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
450{ 450{
451 int retval; 451 int retval;
@@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
541/** 541/**
542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
543 */ 543 */
544int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 544static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
545{ 545{
546 struct zfcp_unit *unit; 546 struct zfcp_unit *unit;
547 struct zfcp_adapter *adapter; 547 struct zfcp_adapter *adapter;
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 1e788e815ce7..090743d2f914 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -9,8 +9,14 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h>
12#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
13 14
15/* Sigh, math-emu. Don't ask. */
16#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h>
18#include <math-emu/single.h>
19
14struct sysinfo_1_1_1 { 20struct sysinfo_1_1_1 {
15 char reserved_0[32]; 21 char reserved_0[32];
16 char manufacturer[16]; 22 char manufacturer[16];
@@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
198 * if the higher order 8 bits are not zero. Printing 204 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no, 205 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer. 206 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange 207 * The user-space needs to know about the strange
202 * encoding of the alternate cpu capability. 208 * encoding of the alternate cpu capability.
203 */ 209 */
204 len += sprintf(page + len, "Capability: %u %u\n", 210 len += sprintf(page + len, "Capability: %u %u\n",
@@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void)
351 357
352__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
353 359
360/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */
363void s390_adjust_jiffies(void)
364{
365 struct sysinfo_1_2_2 *info;
366 const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
367 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
368 FP_DECL_EX;
369 unsigned int capability;
370
371 info = (void *) get_zeroed_page(GFP_KERNEL);
372 if (!info)
373 return;
374
375 if (stsi(info, 1, 2, 2) != -ENOSYS) {
376 /*
377 * Major sigh. The cpu capability encoding is "special".
378 * If the first 9 bits of info->capability are 0 then it
379 * is a 32 bit unsigned integer in the range 0 .. 2^23.
380 * If the first 9 bits are != 0 then it is a 32 bit float.
381 * In addition a lower value indicates a proportionally
382 * higher cpu capacity. Bogomips are the other way round.
383 * To get to a halfway suitable number we divide 1e7
384 * by the cpu capability number. Yes, that means a floating
385 * point division .. math-emu here we come :-)
386 */
387 FP_UNPACK_SP(SA, &fmil);
388 if ((info->capability >> 23) == 0)
389 FP_FROM_INT_S(SB, info->capability, 32, int);
390 else
391 FP_UNPACK_SP(SB, &info->capability);
392 FP_DIV_S(SR, SA, SB);
393 FP_TO_INT_S(capability, SR, 32, 0);
394 } else
395 /*
396 * Really old machine without stsi block for basic
397 * cpu information. Report 42.0 bogomips.
398 */
399 capability = 42;
400 loops_per_jiffy = capability * (500000/HZ);
401 free_page((unsigned long) info);
402}
403
404/*
405 * calibrate the delay loop
406 */
407void __init calibrate_delay(void)
408{
409 s390_adjust_jiffies();
410 /* Print the good old Bogomips line .. */
411 printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
412 "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
413 (loops_per_jiffy/(5000/HZ)) % 100);
414}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b318500785e5..821386c7b576 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7558,9 +7558,6 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7560 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7560 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7561 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7562 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
7563 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7564 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 7561 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 7562 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
7566 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7563 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 8ed6c75adf0f..638b8009b3bc 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -36,7 +36,7 @@
36#include <linux/stat.h> 36#include <linux/stat.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/usb_ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41 41
42#include "usbatm.h" 42#include "usbatm.h"
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 6377db1b446d..63e50a1f1396 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -398,6 +398,9 @@ static int usblp_open(struct inode *inode, struct file *file)
398 retval = 0; 398 retval = 0;
399#endif 399#endif
400 400
401 retval = usb_autopm_get_interface(intf);
402 if (retval < 0)
403 goto out;
401 usblp->used = 1; 404 usblp->used = 1;
402 file->private_data = usblp; 405 file->private_data = usblp;
403 406
@@ -442,6 +445,7 @@ static int usblp_release(struct inode *inode, struct file *file)
442 usblp->used = 0; 445 usblp->used = 0;
443 if (usblp->present) { 446 if (usblp->present) {
444 usblp_unlink_urbs(usblp); 447 usblp_unlink_urbs(usblp);
448 usb_autopm_put_interface(usblp->intf);
445 } else /* finish cleanup from disconnect */ 449 } else /* finish cleanup from disconnect */
446 usblp_cleanup (usblp); 450 usblp_cleanup (usblp);
447 mutex_unlock (&usblp_mutex); 451 mutex_unlock (&usblp_mutex);
@@ -1203,14 +1207,9 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
1203{ 1207{
1204 struct usblp *usblp = usb_get_intfdata (intf); 1208 struct usblp *usblp = usb_get_intfdata (intf);
1205 1209
1206 /* this races against normal access and open */
1207 mutex_lock (&usblp_mutex);
1208 mutex_lock (&usblp->mut);
1209 /* we take no more IO */ 1210 /* we take no more IO */
1210 usblp->sleeping = 1; 1211 usblp->sleeping = 1;
1211 usblp_unlink_urbs(usblp); 1212 usblp_unlink_urbs(usblp);
1212 mutex_unlock (&usblp->mut);
1213 mutex_unlock (&usblp_mutex);
1214 1213
1215 return 0; 1214 return 0;
1216} 1215}
@@ -1220,15 +1219,9 @@ static int usblp_resume (struct usb_interface *intf)
1220 struct usblp *usblp = usb_get_intfdata (intf); 1219 struct usblp *usblp = usb_get_intfdata (intf);
1221 int r; 1220 int r;
1222 1221
1223 mutex_lock (&usblp_mutex);
1224 mutex_lock (&usblp->mut);
1225
1226 usblp->sleeping = 0; 1222 usblp->sleeping = 0;
1227 r = handle_bidir (usblp); 1223 r = handle_bidir (usblp);
1228 1224
1229 mutex_unlock (&usblp->mut);
1230 mutex_unlock (&usblp_mutex);
1231
1232 return r; 1225 return r;
1233} 1226}
1234 1227
@@ -1251,6 +1244,7 @@ static struct usb_driver usblp_driver = {
1251 .suspend = usblp_suspend, 1244 .suspend = usblp_suspend,
1252 .resume = usblp_resume, 1245 .resume = usblp_resume,
1253 .id_table = usblp_ids, 1246 .id_table = usblp_ids,
1247 .supports_autosuspend = 1,
1254}; 1248};
1255 1249
1256static int __init usblp_init(void) 1250static int __init usblp_init(void)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 3e66b2a9974a..2fc0f88a3d86 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -33,19 +33,6 @@ config USB_DEVICEFS
33 33
34 Most users want to say Y here. 34 Most users want to say Y here.
35 35
36config USB_BANDWIDTH
37 bool "Enforce USB bandwidth allocation (EXPERIMENTAL)"
38 depends on USB && EXPERIMENTAL
39 help
40 If you say Y here, the USB subsystem enforces USB bandwidth
41 allocation and will prevent some device opens from succeeding
42 if they would cause USB bandwidth usage to go above 90% of
43 the bus bandwidth.
44
45 If you say N here, these conditions will cause warning messages
46 about USB bandwidth usage to be logged and some devices or
47 drivers may not work correctly.
48
49config USB_DYNAMIC_MINORS 36config USB_DYNAMIC_MINORS
50 bool "Dynamic USB minor allocation (EXPERIMENTAL)" 37 bool "Dynamic USB minor allocation (EXPERIMENTAL)"
51 depends on USB && EXPERIMENTAL 38 depends on USB && EXPERIMENTAL
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index c3915dc28608..ead2475406b8 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -49,9 +49,9 @@ static const size_t pool_max [HCD_BUFFER_POOLS] = {
49 * 49 *
50 * Call hcd_buffer_destroy() to clean up after using those pools. 50 * Call hcd_buffer_destroy() to clean up after using those pools.
51 */ 51 */
52int hcd_buffer_create (struct usb_hcd *hcd) 52int hcd_buffer_create(struct usb_hcd *hcd)
53{ 53{
54 char name [16]; 54 char name[16];
55 int i, size; 55 int i, size;
56 56
57 if (!hcd->self.controller->dma_mask) 57 if (!hcd->self.controller->dma_mask)
@@ -60,11 +60,11 @@ int hcd_buffer_create (struct usb_hcd *hcd)
60 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 60 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
61 if (!(size = pool_max [i])) 61 if (!(size = pool_max [i]))
62 continue; 62 continue;
63 snprintf (name, sizeof name, "buffer-%d", size); 63 snprintf(name, sizeof name, "buffer-%d", size);
64 hcd->pool [i] = dma_pool_create (name, hcd->self.controller, 64 hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
65 size, size, 0); 65 size, size, 0);
66 if (!hcd->pool [i]) { 66 if (!hcd->pool [i]) {
67 hcd_buffer_destroy (hcd); 67 hcd_buffer_destroy(hcd);
68 return -ENOMEM; 68 return -ENOMEM;
69 } 69 }
70 } 70 }
@@ -79,14 +79,14 @@ int hcd_buffer_create (struct usb_hcd *hcd)
79 * 79 *
80 * This frees the buffer pools created by hcd_buffer_create(). 80 * This frees the buffer pools created by hcd_buffer_create().
81 */ 81 */
82void hcd_buffer_destroy (struct usb_hcd *hcd) 82void hcd_buffer_destroy(struct usb_hcd *hcd)
83{ 83{
84 int i; 84 int i;
85 85
86 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 86 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
87 struct dma_pool *pool = hcd->pool [i]; 87 struct dma_pool *pool = hcd->pool[i];
88 if (pool) { 88 if (pool) {
89 dma_pool_destroy (pool); 89 dma_pool_destroy(pool);
90 hcd->pool[i] = NULL; 90 hcd->pool[i] = NULL;
91 } 91 }
92 } 92 }
@@ -97,8 +97,8 @@ void hcd_buffer_destroy (struct usb_hcd *hcd)
97 * better sharing and to leverage mm/slab.c intelligence. 97 * better sharing and to leverage mm/slab.c intelligence.
98 */ 98 */
99 99
100void *hcd_buffer_alloc ( 100void *hcd_buffer_alloc(
101 struct usb_bus *bus, 101 struct usb_bus *bus,
102 size_t size, 102 size_t size,
103 gfp_t mem_flags, 103 gfp_t mem_flags,
104 dma_addr_t *dma 104 dma_addr_t *dma
@@ -110,18 +110,18 @@ void *hcd_buffer_alloc (
110 /* some USB hosts just use PIO */ 110 /* some USB hosts just use PIO */
111 if (!bus->controller->dma_mask) { 111 if (!bus->controller->dma_mask) {
112 *dma = ~(dma_addr_t) 0; 112 *dma = ~(dma_addr_t) 0;
113 return kmalloc (size, mem_flags); 113 return kmalloc(size, mem_flags);
114 } 114 }
115 115
116 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 116 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
117 if (size <= pool_max [i]) 117 if (size <= pool_max [i])
118 return dma_pool_alloc (hcd->pool [i], mem_flags, dma); 118 return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
119 } 119 }
120 return dma_alloc_coherent (hcd->self.controller, size, dma, 0); 120 return dma_alloc_coherent(hcd->self.controller, size, dma, 0);
121} 121}
122 122
123void hcd_buffer_free ( 123void hcd_buffer_free(
124 struct usb_bus *bus, 124 struct usb_bus *bus,
125 size_t size, 125 size_t size,
126 void *addr, 126 void *addr,
127 dma_addr_t dma 127 dma_addr_t dma
@@ -134,15 +134,15 @@ void hcd_buffer_free (
134 return; 134 return;
135 135
136 if (!bus->controller->dma_mask) { 136 if (!bus->controller->dma_mask) {
137 kfree (addr); 137 kfree(addr);
138 return; 138 return;
139 } 139 }
140 140
141 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 141 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
142 if (size <= pool_max [i]) { 142 if (size <= pool_max [i]) {
143 dma_pool_free (hcd->pool [i], addr, dma); 143 dma_pool_free(hcd->pool [i], addr, dma);
144 return; 144 return;
145 } 145 }
146 } 146 }
147 dma_free_coherent (hcd->self.controller, size, addr, dma); 147 dma_free_coherent(hcd->self.controller, size, addr, dma);
148} 148}
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index ea398e5d50af..a47c30b2d764 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -104,7 +104,7 @@ static const char *format_config =
104 104
105static const char *format_iface = 105static const char *format_iface =
106/* I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=xxxx*/ 106/* I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=xxxx*/
107 "I: If#=%2d Alt=%2d #EPs=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x Driver=%s\n"; 107 "I:%c If#=%2d Alt=%2d #EPs=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x Driver=%s\n";
108 108
109static const char *format_endpt = 109static const char *format_endpt =
110/* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */ 110/* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */
@@ -164,10 +164,10 @@ static const char *class_decode(const int class)
164 for (ix = 0; clas_info[ix].class != -1; ix++) 164 for (ix = 0; clas_info[ix].class != -1; ix++)
165 if (clas_info[ix].class == class) 165 if (clas_info[ix].class == class)
166 break; 166 break;
167 return (clas_info[ix].class_name); 167 return clas_info[ix].class_name;
168} 168}
169 169
170static char *usb_dump_endpoint_descriptor ( 170static char *usb_dump_endpoint_descriptor(
171 int speed, 171 int speed,
172 char *start, 172 char *start,
173 char *end, 173 char *end,
@@ -212,9 +212,9 @@ static char *usb_dump_endpoint_descriptor (
212 break; 212 break;
213 case USB_ENDPOINT_XFER_INT: 213 case USB_ENDPOINT_XFER_INT:
214 type = "Int."; 214 type = "Int.";
215 if (speed == USB_SPEED_HIGH) { 215 if (speed == USB_SPEED_HIGH)
216 interval = 1 << (desc->bInterval - 1); 216 interval = 1 << (desc->bInterval - 1);
217 } else 217 else
218 interval = desc->bInterval; 218 interval = desc->bInterval;
219 break; 219 break;
220 default: /* "can't happen" */ 220 default: /* "can't happen" */
@@ -242,15 +242,19 @@ static char *usb_dump_interface_descriptor(char *start, char *end,
242{ 242{
243 const struct usb_interface_descriptor *desc = &intfc->altsetting[setno].desc; 243 const struct usb_interface_descriptor *desc = &intfc->altsetting[setno].desc;
244 const char *driver_name = ""; 244 const char *driver_name = "";
245 int active = 0;
245 246
246 if (start > end) 247 if (start > end)
247 return start; 248 return start;
248 down_read(&usb_bus_type.subsys.rwsem); 249 down_read(&usb_bus_type.subsys.rwsem);
249 if (iface) 250 if (iface) {
250 driver_name = (iface->dev.driver 251 driver_name = (iface->dev.driver
251 ? iface->dev.driver->name 252 ? iface->dev.driver->name
252 : "(none)"); 253 : "(none)");
254 active = (desc == &iface->cur_altsetting->desc);
255 }
253 start += sprintf(start, format_iface, 256 start += sprintf(start, format_iface,
257 active ? '*' : ' ', /* mark active altsetting */
254 desc->bInterfaceNumber, 258 desc->bInterfaceNumber,
255 desc->bAlternateSetting, 259 desc->bAlternateSetting,
256 desc->bNumEndpoints, 260 desc->bNumEndpoints,
@@ -343,7 +347,7 @@ static char *usb_dump_device_descriptor(char *start, char *end, const struct usb
343 347
344 if (start > end) 348 if (start > end)
345 return start; 349 return start;
346 start += sprintf (start, format_device1, 350 start += sprintf(start, format_device1,
347 bcdUSB >> 8, bcdUSB & 0xff, 351 bcdUSB >> 8, bcdUSB & 0xff,
348 desc->bDeviceClass, 352 desc->bDeviceClass,
349 class_decode (desc->bDeviceClass), 353 class_decode (desc->bDeviceClass),
@@ -363,7 +367,7 @@ static char *usb_dump_device_descriptor(char *start, char *end, const struct usb
363/* 367/*
364 * Dump the different strings that this device holds. 368 * Dump the different strings that this device holds.
365 */ 369 */
366static char *usb_dump_device_strings (char *start, char *end, struct usb_device *dev) 370static char *usb_dump_device_strings(char *start, char *end, struct usb_device *dev)
367{ 371{
368 if (start > end) 372 if (start > end)
369 return start; 373 return start;
@@ -395,7 +399,7 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
395 if (start > end) 399 if (start > end)
396 return start; 400 return start;
397 401
398 start = usb_dump_device_strings (start, end, dev); 402 start = usb_dump_device_strings(start, end, dev);
399 403
400 for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { 404 for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
401 if (start > end) 405 if (start > end)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4b3a6ab29bd3..2087766f9e88 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -522,19 +522,19 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, unsig
522 522
523static struct usb_device *usbdev_lookup_minor(int minor) 523static struct usb_device *usbdev_lookup_minor(int minor)
524{ 524{
525 struct class_device *class_dev; 525 struct device *device;
526 struct usb_device *dev = NULL; 526 struct usb_device *udev = NULL;
527 527
528 down(&usb_device_class->sem); 528 down(&usb_device_class->sem);
529 list_for_each_entry(class_dev, &usb_device_class->children, node) { 529 list_for_each_entry(device, &usb_device_class->devices, node) {
530 if (class_dev->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { 530 if (device->devt == MKDEV(USB_DEVICE_MAJOR, minor)) {
531 dev = class_dev->class_data; 531 udev = device->platform_data;
532 break; 532 break;
533 } 533 }
534 } 534 }
535 up(&usb_device_class->sem); 535 up(&usb_device_class->sem);
536 536
537 return dev; 537 return udev;
538}; 538};
539 539
540/* 540/*
@@ -570,6 +570,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
570 ps->dev = dev; 570 ps->dev = dev;
571 ps->file = file; 571 ps->file = file;
572 spin_lock_init(&ps->lock); 572 spin_lock_init(&ps->lock);
573 INIT_LIST_HEAD(&ps->list);
573 INIT_LIST_HEAD(&ps->async_pending); 574 INIT_LIST_HEAD(&ps->async_pending);
574 INIT_LIST_HEAD(&ps->async_completed); 575 INIT_LIST_HEAD(&ps->async_completed);
575 init_waitqueue_head(&ps->wait); 576 init_waitqueue_head(&ps->wait);
@@ -1596,19 +1597,19 @@ static int usbdev_add(struct usb_device *dev)
1596{ 1597{
1597 int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1); 1598 int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1);
1598 1599
1599 dev->class_dev = class_device_create(usb_device_class, NULL, 1600 dev->usbfs_dev = device_create(usb_device_class, &dev->dev,
1600 MKDEV(USB_DEVICE_MAJOR, minor), &dev->dev, 1601 MKDEV(USB_DEVICE_MAJOR, minor),
1601 "usbdev%d.%d", dev->bus->busnum, dev->devnum); 1602 "usbdev%d.%d", dev->bus->busnum, dev->devnum);
1602 if (IS_ERR(dev->class_dev)) 1603 if (IS_ERR(dev->usbfs_dev))
1603 return PTR_ERR(dev->class_dev); 1604 return PTR_ERR(dev->usbfs_dev);
1604 1605
1605 dev->class_dev->class_data = dev; 1606 dev->usbfs_dev->platform_data = dev;
1606 return 0; 1607 return 0;
1607} 1608}
1608 1609
1609static void usbdev_remove(struct usb_device *dev) 1610static void usbdev_remove(struct usb_device *dev)
1610{ 1611{
1611 class_device_unregister(dev->class_dev); 1612 device_unregister(dev->usbfs_dev);
1612} 1613}
1613 1614
1614static int usbdev_notify(struct notifier_block *self, unsigned long action, 1615static int usbdev_notify(struct notifier_block *self, unsigned long action,
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index d6eb5ce1dd1d..600d1bc8272a 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -28,24 +28,16 @@
28#include "hcd.h" 28#include "hcd.h"
29#include "usb.h" 29#include "usb.h"
30 30
31static int usb_match_one_id(struct usb_interface *interface,
32 const struct usb_device_id *id);
33
34struct usb_dynid {
35 struct list_head node;
36 struct usb_device_id id;
37};
38
39#ifdef CONFIG_HOTPLUG 31#ifdef CONFIG_HOTPLUG
40 32
41/* 33/*
42 * Adds a new dynamic USBdevice ID to this driver, 34 * Adds a new dynamic USBdevice ID to this driver,
43 * and cause the driver to probe for all devices again. 35 * and cause the driver to probe for all devices again.
44 */ 36 */
45static ssize_t store_new_id(struct device_driver *driver, 37ssize_t usb_store_new_id(struct usb_dynids *dynids,
46 const char *buf, size_t count) 38 struct device_driver *driver,
39 const char *buf, size_t count)
47{ 40{
48 struct usb_driver *usb_drv = to_usb_driver(driver);
49 struct usb_dynid *dynid; 41 struct usb_dynid *dynid;
50 u32 idVendor = 0; 42 u32 idVendor = 0;
51 u32 idProduct = 0; 43 u32 idProduct = 0;
@@ -65,9 +57,9 @@ static ssize_t store_new_id(struct device_driver *driver,
65 dynid->id.idProduct = idProduct; 57 dynid->id.idProduct = idProduct;
66 dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; 58 dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
67 59
68 spin_lock(&usb_drv->dynids.lock); 60 spin_lock(&dynids->lock);
69 list_add_tail(&usb_drv->dynids.list, &dynid->node); 61 list_add_tail(&dynids->list, &dynid->node);
70 spin_unlock(&usb_drv->dynids.lock); 62 spin_unlock(&dynids->lock);
71 63
72 if (get_driver(driver)) { 64 if (get_driver(driver)) {
73 retval = driver_attach(driver); 65 retval = driver_attach(driver);
@@ -78,6 +70,15 @@ static ssize_t store_new_id(struct device_driver *driver,
78 return retval; 70 return retval;
79 return count; 71 return count;
80} 72}
73EXPORT_SYMBOL_GPL(usb_store_new_id);
74
75static ssize_t store_new_id(struct device_driver *driver,
76 const char *buf, size_t count)
77{
78 struct usb_driver *usb_drv = to_usb_driver(driver);
79
80 return usb_store_new_id(&usb_drv->dynids, driver, buf, count);
81}
81static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 82static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
82 83
83static int usb_create_newid_file(struct usb_driver *usb_drv) 84static int usb_create_newid_file(struct usb_driver *usb_drv)
@@ -365,8 +366,8 @@ void usb_driver_release_interface(struct usb_driver *driver,
365EXPORT_SYMBOL(usb_driver_release_interface); 366EXPORT_SYMBOL(usb_driver_release_interface);
366 367
367/* returns 0 if no match, 1 if match */ 368/* returns 0 if no match, 1 if match */
368static int usb_match_one_id(struct usb_interface *interface, 369int usb_match_one_id(struct usb_interface *interface,
369 const struct usb_device_id *id) 370 const struct usb_device_id *id)
370{ 371{
371 struct usb_host_interface *intf; 372 struct usb_host_interface *intf;
372 struct usb_device *dev; 373 struct usb_device *dev;
@@ -432,6 +433,8 @@ static int usb_match_one_id(struct usb_interface *interface,
432 433
433 return 1; 434 return 1;
434} 435}
436EXPORT_SYMBOL_GPL(usb_match_one_id);
437
435/** 438/**
436 * usb_match_id - find first usb_device_id matching device or interface 439 * usb_match_id - find first usb_device_id matching device or interface
437 * @interface: the interface of interest 440 * @interface: the interface of interest
@@ -750,7 +753,8 @@ EXPORT_SYMBOL_GPL(usb_deregister_device_driver);
750 * usb_register_dev() to enable that functionality. This function no longer 753 * usb_register_dev() to enable that functionality. This function no longer
751 * takes care of that. 754 * takes care of that.
752 */ 755 */
753int usb_register_driver(struct usb_driver *new_driver, struct module *owner) 756int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
757 const char *mod_name)
754{ 758{
755 int retval = 0; 759 int retval = 0;
756 760
@@ -763,6 +767,7 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner)
763 new_driver->drvwrap.driver.probe = usb_probe_interface; 767 new_driver->drvwrap.driver.probe = usb_probe_interface;
764 new_driver->drvwrap.driver.remove = usb_unbind_interface; 768 new_driver->drvwrap.driver.remove = usb_unbind_interface;
765 new_driver->drvwrap.driver.owner = owner; 769 new_driver->drvwrap.driver.owner = owner;
770 new_driver->drvwrap.driver.mod_name = mod_name;
766 spin_lock_init(&new_driver->dynids.lock); 771 spin_lock_init(&new_driver->dynids.lock);
767 INIT_LIST_HEAD(&new_driver->dynids.list); 772 INIT_LIST_HEAD(&new_driver->dynids.list);
768 773
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f794f07cfb33..01c857ac27af 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -194,14 +194,13 @@ int usb_register_dev(struct usb_interface *intf,
194 ++temp; 194 ++temp;
195 else 195 else
196 temp = name; 196 temp = name;
197 intf->class_dev = class_device_create(usb_class->class, NULL, 197 intf->usb_dev = device_create(usb_class->class, &intf->dev,
198 MKDEV(USB_MAJOR, minor), 198 MKDEV(USB_MAJOR, minor), "%s", temp);
199 &intf->dev, "%s", temp); 199 if (IS_ERR(intf->usb_dev)) {
200 if (IS_ERR(intf->class_dev)) {
201 spin_lock (&minor_lock); 200 spin_lock (&minor_lock);
202 usb_minors[intf->minor] = NULL; 201 usb_minors[intf->minor] = NULL;
203 spin_unlock (&minor_lock); 202 spin_unlock (&minor_lock);
204 retval = PTR_ERR(intf->class_dev); 203 retval = PTR_ERR(intf->usb_dev);
205 } 204 }
206exit: 205exit:
207 return retval; 206 return retval;
@@ -242,8 +241,8 @@ void usb_deregister_dev(struct usb_interface *intf,
242 spin_unlock (&minor_lock); 241 spin_unlock (&minor_lock);
243 242
244 snprintf(name, BUS_ID_SIZE, class_driver->name, intf->minor - minor_base); 243 snprintf(name, BUS_ID_SIZE, class_driver->name, intf->minor - minor_base);
245 class_device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); 244 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
246 intf->class_dev = NULL; 245 intf->usb_dev = NULL;
247 intf->minor = -1; 246 intf->minor = -1;
248 destroy_usb_class(); 247 destroy_usb_class();
249} 248}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index ebb20ff7ac58..b531a4fd30c2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -25,6 +25,20 @@ static inline const char *plural(int n)
25 return (n == 1 ? "" : "s"); 25 return (n == 1 ? "" : "s");
26} 26}
27 27
28static int is_rndis(struct usb_interface_descriptor *desc)
29{
30 return desc->bInterfaceClass == USB_CLASS_COMM
31 && desc->bInterfaceSubClass == 2
32 && desc->bInterfaceProtocol == 0xff;
33}
34
35static int is_activesync(struct usb_interface_descriptor *desc)
36{
37 return desc->bInterfaceClass == USB_CLASS_MISC
38 && desc->bInterfaceSubClass == 1
39 && desc->bInterfaceProtocol == 1;
40}
41
28static int choose_configuration(struct usb_device *udev) 42static int choose_configuration(struct usb_device *udev)
29{ 43{
30 int i; 44 int i;
@@ -87,14 +101,12 @@ static int choose_configuration(struct usb_device *udev)
87 continue; 101 continue;
88 } 102 }
89 103
90 /* If the first config's first interface is COMM/2/0xff 104 /* When the first config's first interface is one of Microsoft's
91 * (MSFT RNDIS), rule it out unless Linux has host-side 105 * pet nonstandard Ethernet-over-USB protocols, ignore it unless
92 * RNDIS support. */ 106 * this kernel has enabled the necessary host side driver.
93 if (i == 0 && desc 107 */
94 && desc->bInterfaceClass == USB_CLASS_COMM 108 if (i == 0 && desc && (is_rndis(desc) || is_activesync(desc))) {
95 && desc->bInterfaceSubClass == 2 109#if !defined(CONFIG_USB_NET_RNDIS_HOST) && !defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
96 && desc->bInterfaceProtocol == 0xff) {
97#ifndef CONFIG_USB_NET_RNDIS_HOST
98 continue; 110 continue;
99#else 111#else
100 best = c; 112 best = c;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 10064af65d17..b26c19e8d19f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -45,8 +45,6 @@
45#include "hub.h" 45#include "hub.h"
46 46
47 47
48// #define USB_BANDWIDTH_MESSAGES
49
50/*-------------------------------------------------------------------------*/ 48/*-------------------------------------------------------------------------*/
51 49
52/* 50/*
@@ -891,136 +889,6 @@ long usb_calc_bus_time (int speed, int is_input, int isoc, int bytecount)
891} 889}
892EXPORT_SYMBOL (usb_calc_bus_time); 890EXPORT_SYMBOL (usb_calc_bus_time);
893 891
894/*
895 * usb_check_bandwidth():
896 *
897 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
898 * bustime is from calc_bus_time(), but converted to microseconds.
899 *
900 * returns <bustime in us> if successful,
901 * or -ENOSPC if bandwidth request fails.
902 *
903 * FIXME:
904 * This initial implementation does not use Endpoint.bInterval
905 * in managing bandwidth allocation.
906 * It probably needs to be expanded to use Endpoint.bInterval.
907 * This can be done as a later enhancement (correction).
908 *
909 * This will also probably require some kind of
910 * frame allocation tracking...meaning, for example,
911 * that if multiple drivers request interrupts every 10 USB frames,
912 * they don't all have to be allocated at
913 * frame numbers N, N+10, N+20, etc. Some of them could be at
914 * N+11, N+21, N+31, etc., and others at
915 * N+12, N+22, N+32, etc.
916 *
917 * Similarly for isochronous transfers...
918 *
919 * Individual HCDs can schedule more directly ... this logic
920 * is not correct for high speed transfers.
921 */
922int usb_check_bandwidth (struct usb_device *dev, struct urb *urb)
923{
924 unsigned int pipe = urb->pipe;
925 long bustime;
926 int is_in = usb_pipein (pipe);
927 int is_iso = usb_pipeisoc (pipe);
928 int old_alloc = dev->bus->bandwidth_allocated;
929 int new_alloc;
930
931
932 bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso,
933 usb_maxpacket (dev, pipe, !is_in)));
934 if (is_iso)
935 bustime /= urb->number_of_packets;
936
937 new_alloc = old_alloc + (int) bustime;
938 if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) {
939#ifdef DEBUG
940 char *mode =
941#ifdef CONFIG_USB_BANDWIDTH
942 "";
943#else
944 "would have ";
945#endif
946 dev_dbg (&dev->dev, "usb_check_bandwidth %sFAILED: %d + %ld = %d usec\n",
947 mode, old_alloc, bustime, new_alloc);
948#endif
949#ifdef CONFIG_USB_BANDWIDTH
950 bustime = -ENOSPC; /* report error */
951#endif
952 }
953
954 return bustime;
955}
956EXPORT_SYMBOL (usb_check_bandwidth);
957
958
959/**
960 * usb_claim_bandwidth - records bandwidth for a periodic transfer
961 * @dev: source/target of request
962 * @urb: request (urb->dev == dev)
963 * @bustime: bandwidth consumed, in (average) microseconds per frame
964 * @isoc: true iff the request is isochronous
965 *
966 * Bus bandwidth reservations are recorded purely for diagnostic purposes.
967 * HCDs are expected not to overcommit periodic bandwidth, and to record such
968 * reservations whenever endpoints are added to the periodic schedule.
969 *
970 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
971 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
972 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
973 * large its periodic schedule is.
974 */
975void usb_claim_bandwidth (struct usb_device *dev, struct urb *urb, int bustime, int isoc)
976{
977 dev->bus->bandwidth_allocated += bustime;
978 if (isoc)
979 dev->bus->bandwidth_isoc_reqs++;
980 else
981 dev->bus->bandwidth_int_reqs++;
982 urb->bandwidth = bustime;
983
984#ifdef USB_BANDWIDTH_MESSAGES
985 dev_dbg (&dev->dev, "bandwidth alloc increased by %d (%s) to %d for %d requesters\n",
986 bustime,
987 isoc ? "ISOC" : "INTR",
988 dev->bus->bandwidth_allocated,
989 dev->bus->bandwidth_int_reqs + dev->bus->bandwidth_isoc_reqs);
990#endif
991}
992EXPORT_SYMBOL (usb_claim_bandwidth);
993
994
995/**
996 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
997 * @dev: source/target of request
998 * @urb: request (urb->dev == dev)
999 * @isoc: true iff the request is isochronous
1000 *
1001 * This records that previously allocated bandwidth has been released.
1002 * Bandwidth is released when endpoints are removed from the host controller's
1003 * periodic schedule.
1004 */
1005void usb_release_bandwidth (struct usb_device *dev, struct urb *urb, int isoc)
1006{
1007 dev->bus->bandwidth_allocated -= urb->bandwidth;
1008 if (isoc)
1009 dev->bus->bandwidth_isoc_reqs--;
1010 else
1011 dev->bus->bandwidth_int_reqs--;
1012
1013#ifdef USB_BANDWIDTH_MESSAGES
1014 dev_dbg (&dev->dev, "bandwidth alloc reduced by %d (%s) to %d for %d requesters\n",
1015 urb->bandwidth,
1016 isoc ? "ISOC" : "INTR",
1017 dev->bus->bandwidth_allocated,
1018 dev->bus->bandwidth_int_reqs + dev->bus->bandwidth_isoc_reqs);
1019#endif
1020 urb->bandwidth = 0;
1021}
1022EXPORT_SYMBOL (usb_release_bandwidth);
1023
1024 892
1025/*-------------------------------------------------------------------------*/ 893/*-------------------------------------------------------------------------*/
1026 894
@@ -1034,11 +902,6 @@ static void urb_unlink (struct urb *urb)
1034{ 902{
1035 unsigned long flags; 903 unsigned long flags;
1036 904
1037 /* Release any periodic transfer bandwidth */
1038 if (urb->bandwidth)
1039 usb_release_bandwidth (urb->dev, urb,
1040 usb_pipeisoc (urb->pipe));
1041
1042 /* clear all state linking urb to this dev (and hcd) */ 905 /* clear all state linking urb to this dev (and hcd) */
1043 906
1044 spin_lock_irqsave (&hcd_data_lock, flags); 907 spin_lock_irqsave (&hcd_data_lock, flags);
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 8f8df0d4382e..2a269ca20517 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -308,10 +308,6 @@ extern void usb_destroy_configuration(struct usb_device *dev);
308#define NS_TO_US(ns) ((ns + 500L) / 1000L) 308#define NS_TO_US(ns) ((ns + 500L) / 1000L)
309 /* convert & round nanoseconds to microseconds */ 309 /* convert & round nanoseconds to microseconds */
310 310
311extern void usb_claim_bandwidth (struct usb_device *dev, struct urb *urb,
312 int bustime, int isoc);
313extern void usb_release_bandwidth (struct usb_device *dev, struct urb *urb,
314 int isoc);
315 311
316/* 312/*
317 * Full/low speed bandwidth allocation constants/support. 313 * Full/low speed bandwidth allocation constants/support.
@@ -324,8 +320,6 @@ extern void usb_release_bandwidth (struct usb_device *dev, struct urb *urb,
324#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L) 320#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L)
325#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L) 321#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L)
326 322
327extern int usb_check_bandwidth (struct usb_device *dev, struct urb *urb);
328
329/* 323/*
330 * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed 324 * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed
331 * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed 325 * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1988224b362b..590ec82d0515 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -87,9 +87,6 @@ static DECLARE_WAIT_QUEUE_HEAD(khubd_wait);
87 87
88static struct task_struct *khubd_task; 88static struct task_struct *khubd_task;
89 89
90/* multithreaded probe logic */
91static int multithread_probe = 0;
92
93/* cycle leds on hubs that aren't blinking for attention */ 90/* cycle leds on hubs that aren't blinking for attention */
94static int blinkenlights = 0; 91static int blinkenlights = 0;
95module_param (blinkenlights, bool, S_IRUGO); 92module_param (blinkenlights, bool, S_IRUGO);
@@ -1256,9 +1253,28 @@ static inline void show_string(struct usb_device *udev, char *id, char *string)
1256static int __usb_port_suspend(struct usb_device *, int port1); 1253static int __usb_port_suspend(struct usb_device *, int port1);
1257#endif 1254#endif
1258 1255
1259static int __usb_new_device(void *void_data) 1256/**
1257 * usb_new_device - perform initial device setup (usbcore-internal)
1258 * @udev: newly addressed device (in ADDRESS state)
1259 *
1260 * This is called with devices which have been enumerated, but not yet
1261 * configured. The device descriptor is available, but not descriptors
1262 * for any device configuration. The caller must have locked either
1263 * the parent hub (if udev is a normal device) or else the
1264 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
1265 * udev has already been installed, but udev is not yet visible through
1266 * sysfs or other filesystem code.
1267 *
1268 * It will return if the device is configured properly or not. Zero if
1269 * the interface was registered with the driver core; else a negative
1270 * errno value.
1271 *
1272 * This call is synchronous, and may not be used in an interrupt context.
1273 *
1274 * Only the hub driver or root-hub registrar should ever call this.
1275 */
1276int usb_new_device(struct usb_device *udev)
1260{ 1277{
1261 struct usb_device *udev = void_data;
1262 int err; 1278 int err;
1263 1279
1264 /* Lock ourself into memory in order to keep a probe sequence 1280 /* Lock ourself into memory in order to keep a probe sequence
@@ -1375,44 +1391,6 @@ fail:
1375 goto exit; 1391 goto exit;
1376} 1392}
1377 1393
1378/**
1379 * usb_new_device - perform initial device setup (usbcore-internal)
1380 * @udev: newly addressed device (in ADDRESS state)
1381 *
1382 * This is called with devices which have been enumerated, but not yet
1383 * configured. The device descriptor is available, but not descriptors
1384 * for any device configuration. The caller must have locked either
1385 * the parent hub (if udev is a normal device) or else the
1386 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
1387 * udev has already been installed, but udev is not yet visible through
1388 * sysfs or other filesystem code.
1389 *
1390 * The return value for this function depends on if the
1391 * multithread_probe variable is set or not. If it's set, it will
1392 * return a if the probe thread was successfully created or not. If the
1393 * variable is not set, it will return if the device is configured
1394 * properly or not. interfaces, in sysfs); else a negative errno value.
1395 *
1396 * This call is synchronous, and may not be used in an interrupt context.
1397 *
1398 * Only the hub driver or root-hub registrar should ever call this.
1399 */
1400int usb_new_device(struct usb_device *udev)
1401{
1402 struct task_struct *probe_task;
1403 int ret = 0;
1404
1405 if (multithread_probe) {
1406 probe_task = kthread_run(__usb_new_device, udev,
1407 "usb-probe-%s", udev->devnum);
1408 if (IS_ERR(probe_task))
1409 ret = PTR_ERR(probe_task);
1410 } else
1411 ret = __usb_new_device(udev);
1412
1413 return ret;
1414}
1415
1416static int hub_port_status(struct usb_hub *hub, int port1, 1394static int hub_port_status(struct usb_hub *hub, int port1,
1417 u16 *status, u16 *change) 1395 u16 *status, u16 *change)
1418{ 1396{
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 149aa8bfb1fe..8aca3574c2b5 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1545,11 +1545,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
1545 INIT_WORK(&req->work, driver_set_config_work); 1545 INIT_WORK(&req->work, driver_set_config_work);
1546 1546
1547 usb_get_dev(udev); 1547 usb_get_dev(udev);
1548 if (!schedule_work(&req->work)) { 1548 schedule_work(&req->work);
1549 usb_put_dev(udev);
1550 kfree(req);
1551 return -EINVAL;
1552 }
1553 return 0; 1549 return 0;
1554} 1550}
1555EXPORT_SYMBOL_GPL(usb_driver_set_configuration); 1551EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 55d8f575206d..4eaa0ee8e72f 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -16,16 +16,16 @@
16 16
17/* Active configuration fields */ 17/* Active configuration fields */
18#define usb_actconfig_show(field, multiplier, format_string) \ 18#define usb_actconfig_show(field, multiplier, format_string) \
19static ssize_t show_##field (struct device *dev, \ 19static ssize_t show_##field(struct device *dev, \
20 struct device_attribute *attr, char *buf) \ 20 struct device_attribute *attr, char *buf) \
21{ \ 21{ \
22 struct usb_device *udev; \ 22 struct usb_device *udev; \
23 struct usb_host_config *actconfig; \ 23 struct usb_host_config *actconfig; \
24 \ 24 \
25 udev = to_usb_device (dev); \ 25 udev = to_usb_device(dev); \
26 actconfig = udev->actconfig; \ 26 actconfig = udev->actconfig; \
27 if (actconfig) \ 27 if (actconfig) \
28 return sprintf (buf, format_string, \ 28 return sprintf(buf, format_string, \
29 actconfig->desc.field * multiplier); \ 29 actconfig->desc.field * multiplier); \
30 else \ 30 else \
31 return 0; \ 31 return 0; \
@@ -35,9 +35,9 @@ static ssize_t show_##field (struct device *dev, \
35usb_actconfig_show(field, multiplier, format_string) \ 35usb_actconfig_show(field, multiplier, format_string) \
36static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 36static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
37 37
38usb_actconfig_attr (bNumInterfaces, 1, "%2d\n") 38usb_actconfig_attr(bNumInterfaces, 1, "%2d\n")
39usb_actconfig_attr (bmAttributes, 1, "%2x\n") 39usb_actconfig_attr(bmAttributes, 1, "%2x\n")
40usb_actconfig_attr (bMaxPower, 2, "%3dmA\n") 40usb_actconfig_attr(bMaxPower, 2, "%3dmA\n")
41 41
42static ssize_t show_configuration_string(struct device *dev, 42static ssize_t show_configuration_string(struct device *dev,
43 struct device_attribute *attr, char *buf) 43 struct device_attribute *attr, char *buf)
@@ -45,7 +45,7 @@ static ssize_t show_configuration_string(struct device *dev,
45 struct usb_device *udev; 45 struct usb_device *udev;
46 struct usb_host_config *actconfig; 46 struct usb_host_config *actconfig;
47 47
48 udev = to_usb_device (dev); 48 udev = to_usb_device(dev);
49 actconfig = udev->actconfig; 49 actconfig = udev->actconfig;
50 if ((!actconfig) || (!actconfig->string)) 50 if ((!actconfig) || (!actconfig->string))
51 return 0; 51 return 0;
@@ -57,16 +57,16 @@ static DEVICE_ATTR(configuration, S_IRUGO, show_configuration_string, NULL);
57usb_actconfig_show(bConfigurationValue, 1, "%u\n"); 57usb_actconfig_show(bConfigurationValue, 1, "%u\n");
58 58
59static ssize_t 59static ssize_t
60set_bConfigurationValue (struct device *dev, struct device_attribute *attr, 60set_bConfigurationValue(struct device *dev, struct device_attribute *attr,
61 const char *buf, size_t count) 61 const char *buf, size_t count)
62{ 62{
63 struct usb_device *udev = to_usb_device (dev); 63 struct usb_device *udev = to_usb_device(dev);
64 int config, value; 64 int config, value;
65 65
66 if (sscanf (buf, "%u", &config) != 1 || config > 255) 66 if (sscanf(buf, "%u", &config) != 1 || config > 255)
67 return -EINVAL; 67 return -EINVAL;
68 usb_lock_device(udev); 68 usb_lock_device(udev);
69 value = usb_set_configuration (udev, config); 69 value = usb_set_configuration(udev, config);
70 usb_unlock_device(udev); 70 usb_unlock_device(udev);
71 return (value < 0) ? value : count; 71 return (value < 0) ? value : count;
72} 72}
@@ -81,7 +81,7 @@ static ssize_t show_##name(struct device *dev, \
81{ \ 81{ \
82 struct usb_device *udev; \ 82 struct usb_device *udev; \
83 \ 83 \
84 udev = to_usb_device (dev); \ 84 udev = to_usb_device(dev); \
85 return sprintf(buf, "%s\n", udev->name); \ 85 return sprintf(buf, "%s\n", udev->name); \
86} \ 86} \
87static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); 87static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
@@ -91,12 +91,12 @@ usb_string_attr(manufacturer);
91usb_string_attr(serial); 91usb_string_attr(serial);
92 92
93static ssize_t 93static ssize_t
94show_speed (struct device *dev, struct device_attribute *attr, char *buf) 94show_speed(struct device *dev, struct device_attribute *attr, char *buf)
95{ 95{
96 struct usb_device *udev; 96 struct usb_device *udev;
97 char *speed; 97 char *speed;
98 98
99 udev = to_usb_device (dev); 99 udev = to_usb_device(dev);
100 100
101 switch (udev->speed) { 101 switch (udev->speed) {
102 case USB_SPEED_LOW: 102 case USB_SPEED_LOW:
@@ -112,22 +112,22 @@ show_speed (struct device *dev, struct device_attribute *attr, char *buf)
112 default: 112 default:
113 speed = "unknown"; 113 speed = "unknown";
114 } 114 }
115 return sprintf (buf, "%s\n", speed); 115 return sprintf(buf, "%s\n", speed);
116} 116}
117static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL); 117static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL);
118 118
119static ssize_t 119static ssize_t
120show_devnum (struct device *dev, struct device_attribute *attr, char *buf) 120show_devnum(struct device *dev, struct device_attribute *attr, char *buf)
121{ 121{
122 struct usb_device *udev; 122 struct usb_device *udev;
123 123
124 udev = to_usb_device (dev); 124 udev = to_usb_device(dev);
125 return sprintf (buf, "%d\n", udev->devnum); 125 return sprintf(buf, "%d\n", udev->devnum);
126} 126}
127static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL); 127static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL);
128 128
129static ssize_t 129static ssize_t
130show_version (struct device *dev, struct device_attribute *attr, char *buf) 130show_version(struct device *dev, struct device_attribute *attr, char *buf)
131{ 131{
132 struct usb_device *udev; 132 struct usb_device *udev;
133 u16 bcdUSB; 133 u16 bcdUSB;
@@ -139,25 +139,25 @@ show_version (struct device *dev, struct device_attribute *attr, char *buf)
139static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); 139static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
140 140
141static ssize_t 141static ssize_t
142show_maxchild (struct device *dev, struct device_attribute *attr, char *buf) 142show_maxchild(struct device *dev, struct device_attribute *attr, char *buf)
143{ 143{
144 struct usb_device *udev; 144 struct usb_device *udev;
145 145
146 udev = to_usb_device (dev); 146 udev = to_usb_device(dev);
147 return sprintf (buf, "%d\n", udev->maxchild); 147 return sprintf(buf, "%d\n", udev->maxchild);
148} 148}
149static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL); 149static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL);
150 150
151/* Descriptor fields */ 151/* Descriptor fields */
152#define usb_descriptor_attr_le16(field, format_string) \ 152#define usb_descriptor_attr_le16(field, format_string) \
153static ssize_t \ 153static ssize_t \
154show_##field (struct device *dev, struct device_attribute *attr, \ 154show_##field(struct device *dev, struct device_attribute *attr, \
155 char *buf) \ 155 char *buf) \
156{ \ 156{ \
157 struct usb_device *udev; \ 157 struct usb_device *udev; \
158 \ 158 \
159 udev = to_usb_device (dev); \ 159 udev = to_usb_device(dev); \
160 return sprintf (buf, format_string, \ 160 return sprintf(buf, format_string, \
161 le16_to_cpu(udev->descriptor.field)); \ 161 le16_to_cpu(udev->descriptor.field)); \
162} \ 162} \
163static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 163static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
@@ -168,21 +168,21 @@ usb_descriptor_attr_le16(bcdDevice, "%04x\n")
168 168
169#define usb_descriptor_attr(field, format_string) \ 169#define usb_descriptor_attr(field, format_string) \
170static ssize_t \ 170static ssize_t \
171show_##field (struct device *dev, struct device_attribute *attr, \ 171show_##field(struct device *dev, struct device_attribute *attr, \
172 char *buf) \ 172 char *buf) \
173{ \ 173{ \
174 struct usb_device *udev; \ 174 struct usb_device *udev; \
175 \ 175 \
176 udev = to_usb_device (dev); \ 176 udev = to_usb_device(dev); \
177 return sprintf (buf, format_string, udev->descriptor.field); \ 177 return sprintf(buf, format_string, udev->descriptor.field); \
178} \ 178} \
179static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 179static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
180 180
181usb_descriptor_attr (bDeviceClass, "%02x\n") 181usb_descriptor_attr(bDeviceClass, "%02x\n")
182usb_descriptor_attr (bDeviceSubClass, "%02x\n") 182usb_descriptor_attr(bDeviceSubClass, "%02x\n")
183usb_descriptor_attr (bDeviceProtocol, "%02x\n") 183usb_descriptor_attr(bDeviceProtocol, "%02x\n")
184usb_descriptor_attr (bNumConfigurations, "%d\n") 184usb_descriptor_attr(bNumConfigurations, "%d\n")
185usb_descriptor_attr (bMaxPacketSize0, "%d\n") 185usb_descriptor_attr(bMaxPacketSize0, "%d\n")
186 186
187static struct attribute *dev_attrs[] = { 187static struct attribute *dev_attrs[] = {
188 /* current configuration's attributes */ 188 /* current configuration's attributes */
@@ -220,17 +220,17 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
220 return retval; 220 return retval;
221 221
222 if (udev->manufacturer) { 222 if (udev->manufacturer) {
223 retval = device_create_file (dev, &dev_attr_manufacturer); 223 retval = device_create_file(dev, &dev_attr_manufacturer);
224 if (retval) 224 if (retval)
225 goto error; 225 goto error;
226 } 226 }
227 if (udev->product) { 227 if (udev->product) {
228 retval = device_create_file (dev, &dev_attr_product); 228 retval = device_create_file(dev, &dev_attr_product);
229 if (retval) 229 if (retval)
230 goto error; 230 goto error;
231 } 231 }
232 if (udev->serial) { 232 if (udev->serial) {
233 retval = device_create_file (dev, &dev_attr_serial); 233 retval = device_create_file(dev, &dev_attr_serial);
234 if (retval) 234 if (retval)
235 goto error; 235 goto error;
236 } 236 }
@@ -246,7 +246,7 @@ error:
246 return retval; 246 return retval;
247} 247}
248 248
249void usb_remove_sysfs_dev_files (struct usb_device *udev) 249void usb_remove_sysfs_dev_files(struct usb_device *udev)
250{ 250{
251 struct device *dev = &udev->dev; 251 struct device *dev = &udev->dev;
252 252
@@ -264,22 +264,22 @@ void usb_remove_sysfs_dev_files (struct usb_device *udev)
264/* Interface fields */ 264/* Interface fields */
265#define usb_intf_attr(field, format_string) \ 265#define usb_intf_attr(field, format_string) \
266static ssize_t \ 266static ssize_t \
267show_##field (struct device *dev, struct device_attribute *attr, \ 267show_##field(struct device *dev, struct device_attribute *attr, \
268 char *buf) \ 268 char *buf) \
269{ \ 269{ \
270 struct usb_interface *intf = to_usb_interface (dev); \ 270 struct usb_interface *intf = to_usb_interface(dev); \
271 \ 271 \
272 return sprintf (buf, format_string, \ 272 return sprintf(buf, format_string, \
273 intf->cur_altsetting->desc.field); \ 273 intf->cur_altsetting->desc.field); \
274} \ 274} \
275static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 275static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
276 276
277usb_intf_attr (bInterfaceNumber, "%02x\n") 277usb_intf_attr(bInterfaceNumber, "%02x\n")
278usb_intf_attr (bAlternateSetting, "%2d\n") 278usb_intf_attr(bAlternateSetting, "%2d\n")
279usb_intf_attr (bNumEndpoints, "%02x\n") 279usb_intf_attr(bNumEndpoints, "%02x\n")
280usb_intf_attr (bInterfaceClass, "%02x\n") 280usb_intf_attr(bInterfaceClass, "%02x\n")
281usb_intf_attr (bInterfaceSubClass, "%02x\n") 281usb_intf_attr(bInterfaceSubClass, "%02x\n")
282usb_intf_attr (bInterfaceProtocol, "%02x\n") 282usb_intf_attr(bInterfaceProtocol, "%02x\n")
283 283
284static ssize_t show_interface_string(struct device *dev, 284static ssize_t show_interface_string(struct device *dev,
285 struct device_attribute *attr, char *buf) 285 struct device_attribute *attr, char *buf)
@@ -288,8 +288,8 @@ static ssize_t show_interface_string(struct device *dev,
288 struct usb_device *udev; 288 struct usb_device *udev;
289 int len; 289 int len;
290 290
291 intf = to_usb_interface (dev); 291 intf = to_usb_interface(dev);
292 udev = interface_to_usbdev (intf); 292 udev = interface_to_usbdev(intf);
293 len = snprintf(buf, 256, "%s", intf->cur_altsetting->string); 293 len = snprintf(buf, 256, "%s", intf->cur_altsetting->string);
294 if (len < 0) 294 if (len < 0)
295 return 0; 295 return 0;
@@ -384,7 +384,7 @@ error:
384 return retval; 384 return retval;
385} 385}
386 386
387void usb_remove_sysfs_intf_files (struct usb_interface *intf) 387void usb_remove_sysfs_intf_files(struct usb_interface *intf)
388{ 388{
389 usb_remove_intf_ep_files(intf); 389 usb_remove_intf_ep_files(intf);
390 sysfs_remove_group(&intf->dev.kobj, &intf_attr_grp); 390 sysfs_remove_group(&intf->dev.kobj, &intf_attr_grp);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 9801d08edacf..94ea9727ff55 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -235,16 +235,15 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
235 235
236 urb->status = -EINPROGRESS; 236 urb->status = -EINPROGRESS;
237 urb->actual_length = 0; 237 urb->actual_length = 0;
238 urb->bandwidth = 0;
239 238
240 /* Lots of sanity checks, so HCDs can rely on clean data 239 /* Lots of sanity checks, so HCDs can rely on clean data
241 * and don't need to duplicate tests 240 * and don't need to duplicate tests
242 */ 241 */
243 pipe = urb->pipe; 242 pipe = urb->pipe;
244 temp = usb_pipetype (pipe); 243 temp = usb_pipetype(pipe);
245 is_out = usb_pipeout (pipe); 244 is_out = usb_pipeout(pipe);
246 245
247 if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED) 246 if (!usb_pipecontrol(pipe) && dev->state < USB_STATE_CONFIGURED)
248 return -ENODEV; 247 return -ENODEV;
249 248
250 /* FIXME there should be a sharable lock protecting us against 249 /* FIXME there should be a sharable lock protecting us against
@@ -253,11 +252,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
253 * checks get made.) 252 * checks get made.)
254 */ 253 */
255 254
256 max = usb_maxpacket (dev, pipe, is_out); 255 max = usb_maxpacket(dev, pipe, is_out);
257 if (max <= 0) { 256 if (max <= 0) {
258 dev_dbg(&dev->dev, 257 dev_dbg(&dev->dev,
259 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 258 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
260 usb_pipeendpoint (pipe), is_out ? "out" : "in", 259 usb_pipeendpoint(pipe), is_out ? "out" : "in",
261 __FUNCTION__, max); 260 __FUNCTION__, max);
262 return -EMSGSIZE; 261 return -EMSGSIZE;
263 } 262 }
@@ -279,11 +278,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
279 if (urb->number_of_packets <= 0) 278 if (urb->number_of_packets <= 0)
280 return -EINVAL; 279 return -EINVAL;
281 for (n = 0; n < urb->number_of_packets; n++) { 280 for (n = 0; n < urb->number_of_packets; n++) {
282 len = urb->iso_frame_desc [n].length; 281 len = urb->iso_frame_desc[n].length;
283 if (len < 0 || len > max) 282 if (len < 0 || len > max)
284 return -EMSGSIZE; 283 return -EMSGSIZE;
285 urb->iso_frame_desc [n].status = -EXDEV; 284 urb->iso_frame_desc[n].status = -EXDEV;
286 urb->iso_frame_desc [n].actual_length = 0; 285 urb->iso_frame_desc[n].actual_length = 0;
287 } 286 }
288 } 287 }
289 288
@@ -322,7 +321,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
322 321
323 /* fail if submitter gave bogus flags */ 322 /* fail if submitter gave bogus flags */
324 if (urb->transfer_flags != orig_flags) { 323 if (urb->transfer_flags != orig_flags) {
325 err ("BOGUS urb flags, %x --> %x", 324 err("BOGUS urb flags, %x --> %x",
326 orig_flags, urb->transfer_flags); 325 orig_flags, urb->transfer_flags);
327 return -EINVAL; 326 return -EINVAL;
328 } 327 }
@@ -373,7 +372,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
373 urb->interval = temp; 372 urb->interval = temp;
374 } 373 }
375 374
376 return usb_hcd_submit_urb (urb, mem_flags); 375 return usb_hcd_submit_urb(urb, mem_flags);
377} 376}
378 377
379/*-------------------------------------------------------------------*/ 378/*-------------------------------------------------------------------*/
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 02426d0b9a34..3db721cd557a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -233,7 +233,7 @@ static void usb_autosuspend_work(struct work_struct *work)
233 * @parent: hub to which device is connected; null to allocate a root hub 233 * @parent: hub to which device is connected; null to allocate a root hub
234 * @bus: bus used to access the device 234 * @bus: bus used to access the device
235 * @port1: one-based index of port; ignored for root hubs 235 * @port1: one-based index of port; ignored for root hubs
236 * Context: !in_interrupt () 236 * Context: !in_interrupt()
237 * 237 *
238 * Only hub drivers (including virtual root hub drivers for host 238 * Only hub drivers (including virtual root hub drivers for host
239 * controllers) should ever call this. 239 * controllers) should ever call this.
@@ -277,22 +277,22 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
277 * as stable: bus->busnum changes easily from modprobe order, 277 * as stable: bus->busnum changes easily from modprobe order,
278 * cardbus or pci hotplugging, and so on. 278 * cardbus or pci hotplugging, and so on.
279 */ 279 */
280 if (unlikely (!parent)) { 280 if (unlikely(!parent)) {
281 dev->devpath [0] = '0'; 281 dev->devpath[0] = '0';
282 282
283 dev->dev.parent = bus->controller; 283 dev->dev.parent = bus->controller;
284 sprintf (&dev->dev.bus_id[0], "usb%d", bus->busnum); 284 sprintf(&dev->dev.bus_id[0], "usb%d", bus->busnum);
285 } else { 285 } else {
286 /* match any labeling on the hubs; it's one-based */ 286 /* match any labeling on the hubs; it's one-based */
287 if (parent->devpath [0] == '0') 287 if (parent->devpath[0] == '0')
288 snprintf (dev->devpath, sizeof dev->devpath, 288 snprintf(dev->devpath, sizeof dev->devpath,
289 "%d", port1); 289 "%d", port1);
290 else 290 else
291 snprintf (dev->devpath, sizeof dev->devpath, 291 snprintf(dev->devpath, sizeof dev->devpath,
292 "%s.%d", parent->devpath, port1); 292 "%s.%d", parent->devpath, port1);
293 293
294 dev->dev.parent = &parent->dev; 294 dev->dev.parent = &parent->dev;
295 sprintf (&dev->dev.bus_id[0], "%d-%s", 295 sprintf(&dev->dev.bus_id[0], "%d-%s",
296 bus->busnum, dev->devpath); 296 bus->busnum, dev->devpath);
297 297
298 /* hub driver sets up TT records */ 298 /* hub driver sets up TT records */
@@ -463,7 +463,7 @@ static struct usb_device *match_device(struct usb_device *dev,
463 /* see if this device matches */ 463 /* see if this device matches */
464 if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) && 464 if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) &&
465 (product_id == le16_to_cpu(dev->descriptor.idProduct))) { 465 (product_id == le16_to_cpu(dev->descriptor.idProduct))) {
466 dev_dbg (&dev->dev, "matched this device!\n"); 466 dev_dbg(&dev->dev, "matched this device!\n");
467 ret_dev = usb_get_dev(dev); 467 ret_dev = usb_get_dev(dev);
468 goto exit; 468 goto exit;
469 } 469 }
@@ -535,7 +535,7 @@ exit:
535 */ 535 */
536int usb_get_current_frame_number(struct usb_device *dev) 536int usb_get_current_frame_number(struct usb_device *dev)
537{ 537{
538 return usb_hcd_get_frame_number (dev); 538 return usb_hcd_get_frame_number(dev);
539} 539}
540 540
541/*-------------------------------------------------------------------*/ 541/*-------------------------------------------------------------------*/
@@ -593,7 +593,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
593 * 593 *
594 * When the buffer is no longer used, free it with usb_buffer_free(). 594 * When the buffer is no longer used, free it with usb_buffer_free().
595 */ 595 */
596void *usb_buffer_alloc ( 596void *usb_buffer_alloc(
597 struct usb_device *dev, 597 struct usb_device *dev,
598 size_t size, 598 size_t size,
599 gfp_t mem_flags, 599 gfp_t mem_flags,
@@ -602,7 +602,7 @@ void *usb_buffer_alloc (
602{ 602{
603 if (!dev || !dev->bus) 603 if (!dev || !dev->bus)
604 return NULL; 604 return NULL;
605 return hcd_buffer_alloc (dev->bus, size, mem_flags, dma); 605 return hcd_buffer_alloc(dev->bus, size, mem_flags, dma);
606} 606}
607 607
608/** 608/**
@@ -616,7 +616,7 @@ void *usb_buffer_alloc (
616 * been allocated using usb_buffer_alloc(), and the parameters must match 616 * been allocated using usb_buffer_alloc(), and the parameters must match
617 * those provided in that allocation request. 617 * those provided in that allocation request.
618 */ 618 */
619void usb_buffer_free ( 619void usb_buffer_free(
620 struct usb_device *dev, 620 struct usb_device *dev,
621 size_t size, 621 size_t size,
622 void *addr, 622 void *addr,
@@ -627,7 +627,7 @@ void usb_buffer_free (
627 return; 627 return;
628 if (!addr) 628 if (!addr)
629 return; 629 return;
630 hcd_buffer_free (dev->bus, size, addr, dma); 630 hcd_buffer_free(dev->bus, size, addr, dma);
631} 631}
632 632
633/** 633/**
@@ -647,7 +647,7 @@ void usb_buffer_free (
647 * Reverse the effect of this call with usb_buffer_unmap(). 647 * Reverse the effect of this call with usb_buffer_unmap().
648 */ 648 */
649#if 0 649#if 0
650struct urb *usb_buffer_map (struct urb *urb) 650struct urb *usb_buffer_map(struct urb *urb)
651{ 651{
652 struct usb_bus *bus; 652 struct usb_bus *bus;
653 struct device *controller; 653 struct device *controller;
@@ -659,14 +659,14 @@ struct urb *usb_buffer_map (struct urb *urb)
659 return NULL; 659 return NULL;
660 660
661 if (controller->dma_mask) { 661 if (controller->dma_mask) {
662 urb->transfer_dma = dma_map_single (controller, 662 urb->transfer_dma = dma_map_single(controller,
663 urb->transfer_buffer, urb->transfer_buffer_length, 663 urb->transfer_buffer, urb->transfer_buffer_length,
664 usb_pipein (urb->pipe) 664 usb_pipein(urb->pipe)
665 ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 665 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
666 if (usb_pipecontrol (urb->pipe)) 666 if (usb_pipecontrol(urb->pipe))
667 urb->setup_dma = dma_map_single (controller, 667 urb->setup_dma = dma_map_single(controller,
668 urb->setup_packet, 668 urb->setup_packet,
669 sizeof (struct usb_ctrlrequest), 669 sizeof(struct usb_ctrlrequest),
670 DMA_TO_DEVICE); 670 DMA_TO_DEVICE);
671 // FIXME generic api broken like pci, can't report errors 671 // FIXME generic api broken like pci, can't report errors
672 // if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; 672 // if (urb->transfer_dma == DMA_ADDR_INVALID) return 0;
@@ -689,7 +689,7 @@ struct urb *usb_buffer_map (struct urb *urb)
689 * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s) 689 * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
690 * @urb: urb whose transfer_buffer/setup_packet will be synchronized 690 * @urb: urb whose transfer_buffer/setup_packet will be synchronized
691 */ 691 */
692void usb_buffer_dmasync (struct urb *urb) 692void usb_buffer_dmasync(struct urb *urb)
693{ 693{
694 struct usb_bus *bus; 694 struct usb_bus *bus;
695 struct device *controller; 695 struct device *controller;
@@ -702,14 +702,14 @@ void usb_buffer_dmasync (struct urb *urb)
702 return; 702 return;
703 703
704 if (controller->dma_mask) { 704 if (controller->dma_mask) {
705 dma_sync_single (controller, 705 dma_sync_single(controller,
706 urb->transfer_dma, urb->transfer_buffer_length, 706 urb->transfer_dma, urb->transfer_buffer_length,
707 usb_pipein (urb->pipe) 707 usb_pipein(urb->pipe)
708 ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 708 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
709 if (usb_pipecontrol (urb->pipe)) 709 if (usb_pipecontrol(urb->pipe))
710 dma_sync_single (controller, 710 dma_sync_single(controller,
711 urb->setup_dma, 711 urb->setup_dma,
712 sizeof (struct usb_ctrlrequest), 712 sizeof(struct usb_ctrlrequest),
713 DMA_TO_DEVICE); 713 DMA_TO_DEVICE);
714 } 714 }
715} 715}
@@ -722,7 +722,7 @@ void usb_buffer_dmasync (struct urb *urb)
722 * Reverses the effect of usb_buffer_map(). 722 * Reverses the effect of usb_buffer_map().
723 */ 723 */
724#if 0 724#if 0
725void usb_buffer_unmap (struct urb *urb) 725void usb_buffer_unmap(struct urb *urb)
726{ 726{
727 struct usb_bus *bus; 727 struct usb_bus *bus;
728 struct device *controller; 728 struct device *controller;
@@ -735,14 +735,14 @@ void usb_buffer_unmap (struct urb *urb)
735 return; 735 return;
736 736
737 if (controller->dma_mask) { 737 if (controller->dma_mask) {
738 dma_unmap_single (controller, 738 dma_unmap_single(controller,
739 urb->transfer_dma, urb->transfer_buffer_length, 739 urb->transfer_dma, urb->transfer_buffer_length,
740 usb_pipein (urb->pipe) 740 usb_pipein(urb->pipe)
741 ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 741 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
742 if (usb_pipecontrol (urb->pipe)) 742 if (usb_pipecontrol(urb->pipe))
743 dma_unmap_single (controller, 743 dma_unmap_single(controller,
744 urb->setup_dma, 744 urb->setup_dma,
745 sizeof (struct usb_ctrlrequest), 745 sizeof(struct usb_ctrlrequest),
746 DMA_TO_DEVICE); 746 DMA_TO_DEVICE);
747 } 747 }
748 urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP 748 urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP
@@ -783,15 +783,15 @@ int usb_buffer_map_sg(const struct usb_device *dev, unsigned pipe,
783 struct device *controller; 783 struct device *controller;
784 784
785 if (!dev 785 if (!dev
786 || usb_pipecontrol (pipe) 786 || usb_pipecontrol(pipe)
787 || !(bus = dev->bus) 787 || !(bus = dev->bus)
788 || !(controller = bus->controller) 788 || !(controller = bus->controller)
789 || !controller->dma_mask) 789 || !controller->dma_mask)
790 return -1; 790 return -1;
791 791
792 // FIXME generic api broken like pci, can't report errors 792 // FIXME generic api broken like pci, can't report errors
793 return dma_map_sg (controller, sg, nents, 793 return dma_map_sg(controller, sg, nents,
794 usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 794 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
795} 795}
796 796
797/* XXX DISABLED, no users currently. If you wish to re-enable this 797/* XXX DISABLED, no users currently. If you wish to re-enable this
@@ -823,8 +823,8 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, unsigned pipe,
823 || !controller->dma_mask) 823 || !controller->dma_mask)
824 return; 824 return;
825 825
826 dma_sync_sg (controller, sg, n_hw_ents, 826 dma_sync_sg(controller, sg, n_hw_ents,
827 usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 827 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
828} 828}
829#endif 829#endif
830 830
@@ -849,8 +849,8 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, unsigned pipe,
849 || !controller->dma_mask) 849 || !controller->dma_mask)
850 return; 850 return;
851 851
852 dma_unmap_sg (controller, sg, n_hw_ents, 852 dma_unmap_sg(controller, sg, n_hw_ents,
853 usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 853 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
854} 854}
855 855
856/* format to disable USB on kernel command line is: nousb */ 856/* format to disable USB on kernel command line is: nousb */
@@ -871,7 +871,7 @@ static int __init usb_init(void)
871{ 871{
872 int retval; 872 int retval;
873 if (nousb) { 873 if (nousb) {
874 pr_info ("%s: USB support disabled\n", usbcore_name); 874 pr_info("%s: USB support disabled\n", usbcore_name);
875 return 0; 875 return 0;
876 } 876 }
877 877
@@ -971,19 +971,19 @@ EXPORT_SYMBOL(__usb_get_extra_descriptor);
971EXPORT_SYMBOL(usb_find_device); 971EXPORT_SYMBOL(usb_find_device);
972EXPORT_SYMBOL(usb_get_current_frame_number); 972EXPORT_SYMBOL(usb_get_current_frame_number);
973 973
974EXPORT_SYMBOL (usb_buffer_alloc); 974EXPORT_SYMBOL(usb_buffer_alloc);
975EXPORT_SYMBOL (usb_buffer_free); 975EXPORT_SYMBOL(usb_buffer_free);
976 976
977#if 0 977#if 0
978EXPORT_SYMBOL (usb_buffer_map); 978EXPORT_SYMBOL(usb_buffer_map);
979EXPORT_SYMBOL (usb_buffer_dmasync); 979EXPORT_SYMBOL(usb_buffer_dmasync);
980EXPORT_SYMBOL (usb_buffer_unmap); 980EXPORT_SYMBOL(usb_buffer_unmap);
981#endif 981#endif
982 982
983EXPORT_SYMBOL (usb_buffer_map_sg); 983EXPORT_SYMBOL(usb_buffer_map_sg);
984#if 0 984#if 0
985EXPORT_SYMBOL (usb_buffer_dmasync_sg); 985EXPORT_SYMBOL(usb_buffer_dmasync_sg);
986#endif 986#endif
987EXPORT_SYMBOL (usb_buffer_unmap_sg); 987EXPORT_SYMBOL(usb_buffer_unmap_sg);
988 988
989MODULE_LICENSE("GPL"); 989MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 812c733ba8ce..f39050145f1f 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -39,7 +39,7 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/usb_ch9.h> 42#include <linux/usb/ch9.h>
43#include <linux/usb_gadget.h> 43#include <linux/usb_gadget.h>
44 44
45#include <asm/byteorder.h> 45#include <asm/byteorder.h>
@@ -1807,16 +1807,13 @@ static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
1807 || !wake 1807 || !wake
1808 || at91_suspend_entering_slow_clock()) { 1808 || at91_suspend_entering_slow_clock()) {
1809 pullup(udc, 0); 1809 pullup(udc, 0);
1810 disable_irq_wake(udc->udp_irq); 1810 wake = 0;
1811 } else 1811 } else
1812 enable_irq_wake(udc->udp_irq); 1812 enable_irq_wake(udc->udp_irq);
1813 1813
1814 if (udc->board.vbus_pin > 0) { 1814 udc->active_suspend = wake;
1815 if (wake) 1815 if (udc->board.vbus_pin > 0 && wake)
1816 enable_irq_wake(udc->board.vbus_pin); 1816 enable_irq_wake(udc->board.vbus_pin);
1817 else
1818 disable_irq_wake(udc->board.vbus_pin);
1819 }
1820 return 0; 1817 return 0;
1821} 1818}
1822 1819
@@ -1824,8 +1821,14 @@ static int at91udc_resume(struct platform_device *pdev)
1824{ 1821{
1825 struct at91_udc *udc = platform_get_drvdata(pdev); 1822 struct at91_udc *udc = platform_get_drvdata(pdev);
1826 1823
1824 if (udc->board.vbus_pin > 0 && udc->active_suspend)
1825 disable_irq_wake(udc->board.vbus_pin);
1826
1827 /* maybe reconnect to host; if so, clocks on */ 1827 /* maybe reconnect to host; if so, clocks on */
1828 pullup(udc, 1); 1828 if (udc->active_suspend)
1829 disable_irq_wake(udc->udp_irq);
1830 else
1831 pullup(udc, 1);
1829 return 0; 1832 return 0;
1830} 1833}
1831#else 1834#else
diff --git a/drivers/usb/gadget/at91_udc.h b/drivers/usb/gadget/at91_udc.h
index 677089baa59d..7e34e2f864f9 100644
--- a/drivers/usb/gadget/at91_udc.h
+++ b/drivers/usb/gadget/at91_udc.h
@@ -136,6 +136,7 @@ struct at91_udc {
136 unsigned wait_for_addr_ack:1; 136 unsigned wait_for_addr_ack:1;
137 unsigned wait_for_config_ack:1; 137 unsigned wait_for_config_ack:1;
138 unsigned selfpowered:1; 138 unsigned selfpowered:1;
139 unsigned active_suspend:1;
139 u8 addr; 140 u8 addr;
140 struct at91_udc_data board; 141 struct at91_udc_data board;
141 struct clk *iclk, *fclk; 142 struct clk *iclk, *fclk;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 83b4866df9af..d18901b92cda 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -24,7 +24,7 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/device.h> 25#include <linux/device.h>
26 26
27#include <linux/usb_ch9.h> 27#include <linux/usb/ch9.h>
28#include <linux/usb_gadget.h> 28#include <linux/usb_gadget.h>
29 29
30 30
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 53d584589c26..f28af06905a5 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -27,7 +27,7 @@
27#include <linux/ctype.h> 27#include <linux/ctype.h>
28#include <linux/string.h> 28#include <linux/string.h>
29 29
30#include <linux/usb_ch9.h> 30#include <linux/usb/ch9.h>
31#include <linux/usb_gadget.h> 31#include <linux/usb_gadget.h>
32 32
33#include "gadget_chips.h" 33#include "gadget_chips.h"
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index d15bf22b9a03..22e3c9443641 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -47,7 +47,7 @@
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48#include <asm/unaligned.h> 48#include <asm/unaligned.h>
49 49
50#include <linux/usb_ch9.h> 50#include <linux/usb/ch9.h>
51#include <linux/usb/cdc.h> 51#include <linux/usb/cdc.h>
52#include <linux/usb_gadget.h> 52#include <linux/usb_gadget.h>
53 53
@@ -72,9 +72,18 @@
72 * 72 *
73 * There's some hardware that can't talk CDC. We make that hardware 73 * There's some hardware that can't talk CDC. We make that hardware
74 * implement a "minimalist" vendor-agnostic CDC core: same framing, but 74 * implement a "minimalist" vendor-agnostic CDC core: same framing, but
75 * link-level setup only requires activating the configuration. 75 * link-level setup only requires activating the configuration. Only the
76 * Linux supports it, but other host operating systems may not. 76 * endpoint descriptors, and product/vendor IDs, are relevant; no control
77 * (This is a subset of CDC Ethernet.) 77 * operations are available. Linux supports it, but other host operating
78 * systems may not. (This is a subset of CDC Ethernet.)
79 *
80 * It turns out that if you add a few descriptors to that "CDC Subset",
81 * (Windows) host side drivers from MCCI can treat it as one submode of
82 * a proprietary scheme called "SAFE" ... without needing to know about
83 * specific product/vendor IDs. So we do that, making it easier to use
84 * those MS-Windows drivers. Those added descriptors make it resemble a
85 * CDC MDLM device, but they don't change device behavior at all. (See
86 * MCCI Engineering report 950198 "SAFE Networking Functions".)
78 * 87 *
79 * A third option is also in use. Rather than CDC Ethernet, or something 88 * A third option is also in use. Rather than CDC Ethernet, or something
80 * simpler, Microsoft pushes their own approach: RNDIS. The published 89 * simpler, Microsoft pushes their own approach: RNDIS. The published
@@ -254,6 +263,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
254#define DEV_CONFIG_CDC 263#define DEV_CONFIG_CDC
255#endif 264#endif
256 265
266#ifdef CONFIG_USB_GADGET_S3C2410
267#define DEV_CONFIG_CDC
268#endif
269
257#ifdef CONFIG_USB_GADGET_AT91 270#ifdef CONFIG_USB_GADGET_AT91
258#define DEV_CONFIG_CDC 271#define DEV_CONFIG_CDC
259#endif 272#endif
@@ -266,6 +279,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
266#define DEV_CONFIG_CDC 279#define DEV_CONFIG_CDC
267#endif 280#endif
268 281
282#ifdef CONFIG_USB_GADGET_HUSB2DEV
283#define DEV_CONFIG_CDC
284#endif
285
269 286
270/* For CDC-incapable hardware, choose the simple cdc subset. 287/* For CDC-incapable hardware, choose the simple cdc subset.
271 * Anything that talks bulk (without notable bugs) can do this. 288 * Anything that talks bulk (without notable bugs) can do this.
@@ -283,9 +300,6 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
283#define DEV_CONFIG_SUBSET 300#define DEV_CONFIG_SUBSET
284#endif 301#endif
285 302
286#ifdef CONFIG_USB_GADGET_S3C2410
287#define DEV_CONFIG_CDC
288#endif
289 303
290/*-------------------------------------------------------------------------*/ 304/*-------------------------------------------------------------------------*/
291 305
@@ -487,8 +501,17 @@ rndis_config = {
487 * endpoint. Both have a "data" interface and two bulk endpoints. 501 * endpoint. Both have a "data" interface and two bulk endpoints.
488 * There are also differences in how control requests are handled. 502 * There are also differences in how control requests are handled.
489 * 503 *
490 * RNDIS shares a lot with CDC-Ethernet, since it's a variant of 504 * RNDIS shares a lot with CDC-Ethernet, since it's a variant of the
491 * the CDC-ACM (modem) spec. 505 * CDC-ACM (modem) spec. Unfortunately MSFT's RNDIS driver is buggy; it
506 * may hang or oops. Since bugfixes (or accurate specs, letting Linux
507 * work around those bugs) are unlikely to ever come from MSFT, you may
508 * wish to avoid using RNDIS.
509 *
510 * MCCI offers an alternative to RNDIS if you need to connect to Windows
511 * but have hardware that can't support CDC Ethernet. We add descriptors
512 * to present the CDC Subset as a (nonconformant) CDC MDLM variant called
513 * "SAFE". That borrows from both CDC Ethernet and CDC MDLM. You can
514 * get those drivers from MCCI, or bundled with various products.
492 */ 515 */
493 516
494#ifdef DEV_CONFIG_CDC 517#ifdef DEV_CONFIG_CDC
@@ -522,8 +545,6 @@ rndis_control_intf = {
522}; 545};
523#endif 546#endif
524 547
525#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
526
527static const struct usb_cdc_header_desc header_desc = { 548static const struct usb_cdc_header_desc header_desc = {
528 .bLength = sizeof header_desc, 549 .bLength = sizeof header_desc,
529 .bDescriptorType = USB_DT_CS_INTERFACE, 550 .bDescriptorType = USB_DT_CS_INTERFACE,
@@ -532,6 +553,8 @@ static const struct usb_cdc_header_desc header_desc = {
532 .bcdCDC = __constant_cpu_to_le16 (0x0110), 553 .bcdCDC = __constant_cpu_to_le16 (0x0110),
533}; 554};
534 555
556#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
557
535static const struct usb_cdc_union_desc union_desc = { 558static const struct usb_cdc_union_desc union_desc = {
536 .bLength = sizeof union_desc, 559 .bLength = sizeof union_desc,
537 .bDescriptorType = USB_DT_CS_INTERFACE, 560 .bDescriptorType = USB_DT_CS_INTERFACE,
@@ -564,7 +587,40 @@ static const struct usb_cdc_acm_descriptor acm_descriptor = {
564 587
565#endif 588#endif
566 589
567#ifdef DEV_CONFIG_CDC 590#ifndef DEV_CONFIG_CDC
591
592/* "SAFE" loosely follows CDC WMC MDLM, violating the spec in various
593 * ways: data endpoints live in the control interface, there's no data
594 * interface, and it's not used to talk to a cell phone radio.
595 */
596
597static const struct usb_cdc_mdlm_desc mdlm_desc = {
598 .bLength = sizeof mdlm_desc,
599 .bDescriptorType = USB_DT_CS_INTERFACE,
600 .bDescriptorSubType = USB_CDC_MDLM_TYPE,
601
602 .bcdVersion = __constant_cpu_to_le16(0x0100),
603 .bGUID = {
604 0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6,
605 0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f,
606 },
607};
608
609/* since "usb_cdc_mdlm_detail_desc" is a variable length structure, we
610 * can't really use its struct. All we do here is say that we're using
611 * the submode of "SAFE" which directly matches the CDC Subset.
612 */
613static const u8 mdlm_detail_desc[] = {
614 6,
615 USB_DT_CS_INTERFACE,
616 USB_CDC_MDLM_DETAIL_TYPE,
617
618 0, /* "SAFE" */
619 0, /* network control capabilities (none) */
620 0, /* network data capabilities ("raw" encapsulation) */
621};
622
623#endif
568 624
569static const struct usb_cdc_ether_desc ether_desc = { 625static const struct usb_cdc_ether_desc ether_desc = {
570 .bLength = sizeof ether_desc, 626 .bLength = sizeof ether_desc,
@@ -579,7 +635,6 @@ static const struct usb_cdc_ether_desc ether_desc = {
579 .bNumberPowerFilters = 0, 635 .bNumberPowerFilters = 0,
580}; 636};
581 637
582#endif
583 638
584#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS) 639#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
585 640
@@ -672,6 +727,9 @@ rndis_data_intf = {
672/* 727/*
673 * "Simple" CDC-subset option is a simple vendor-neutral model that most 728 * "Simple" CDC-subset option is a simple vendor-neutral model that most
674 * full speed controllers can handle: one interface, two bulk endpoints. 729 * full speed controllers can handle: one interface, two bulk endpoints.
730 *
731 * To assist host side drivers, we fancy it up a bit, and add descriptors
732 * so some host side drivers will understand it as a "SAFE" variant.
675 */ 733 */
676 734
677static const struct usb_interface_descriptor 735static const struct usb_interface_descriptor
@@ -682,8 +740,8 @@ subset_data_intf = {
682 .bInterfaceNumber = 0, 740 .bInterfaceNumber = 0,
683 .bAlternateSetting = 0, 741 .bAlternateSetting = 0,
684 .bNumEndpoints = 2, 742 .bNumEndpoints = 2,
685 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 743 .bInterfaceClass = USB_CLASS_COMM,
686 .bInterfaceSubClass = 0, 744 .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM,
687 .bInterfaceProtocol = 0, 745 .bInterfaceProtocol = 0,
688 .iInterface = STRING_DATA, 746 .iInterface = STRING_DATA,
689}; 747};
@@ -731,10 +789,15 @@ static const struct usb_descriptor_header *fs_eth_function [11] = {
731static inline void __init fs_subset_descriptors(void) 789static inline void __init fs_subset_descriptors(void)
732{ 790{
733#ifdef DEV_CONFIG_SUBSET 791#ifdef DEV_CONFIG_SUBSET
792 /* behavior is "CDC Subset"; extra descriptors say "SAFE" */
734 fs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf; 793 fs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
735 fs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc; 794 fs_eth_function[2] = (struct usb_descriptor_header *) &header_desc;
736 fs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc; 795 fs_eth_function[3] = (struct usb_descriptor_header *) &mdlm_desc;
737 fs_eth_function[4] = NULL; 796 fs_eth_function[4] = (struct usb_descriptor_header *) &mdlm_detail_desc;
797 fs_eth_function[5] = (struct usb_descriptor_header *) &ether_desc;
798 fs_eth_function[6] = (struct usb_descriptor_header *) &fs_source_desc;
799 fs_eth_function[7] = (struct usb_descriptor_header *) &fs_sink_desc;
800 fs_eth_function[8] = NULL;
738#else 801#else
739 fs_eth_function[1] = NULL; 802 fs_eth_function[1] = NULL;
740#endif 803#endif
@@ -828,10 +891,15 @@ static const struct usb_descriptor_header *hs_eth_function [11] = {
828static inline void __init hs_subset_descriptors(void) 891static inline void __init hs_subset_descriptors(void)
829{ 892{
830#ifdef DEV_CONFIG_SUBSET 893#ifdef DEV_CONFIG_SUBSET
894 /* behavior is "CDC Subset"; extra descriptors say "SAFE" */
831 hs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf; 895 hs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
832 hs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc; 896 hs_eth_function[2] = (struct usb_descriptor_header *) &header_desc;
833 hs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc; 897 hs_eth_function[3] = (struct usb_descriptor_header *) &mdlm_desc;
834 hs_eth_function[4] = NULL; 898 hs_eth_function[4] = (struct usb_descriptor_header *) &mdlm_detail_desc;
899 hs_eth_function[5] = (struct usb_descriptor_header *) &ether_desc;
900 hs_eth_function[6] = (struct usb_descriptor_header *) &hs_source_desc;
901 hs_eth_function[7] = (struct usb_descriptor_header *) &hs_sink_desc;
902 hs_eth_function[8] = NULL;
835#else 903#else
836 hs_eth_function[1] = NULL; 904 hs_eth_function[1] = NULL;
837#endif 905#endif
@@ -878,10 +946,8 @@ static char manufacturer [50];
878static char product_desc [40] = DRIVER_DESC; 946static char product_desc [40] = DRIVER_DESC;
879static char serial_number [20]; 947static char serial_number [20];
880 948
881#ifdef DEV_CONFIG_CDC
882/* address that the host will use ... usually assigned at random */ 949/* address that the host will use ... usually assigned at random */
883static char ethaddr [2 * ETH_ALEN + 1]; 950static char ethaddr [2 * ETH_ALEN + 1];
884#endif
885 951
886/* static strings, in UTF-8 */ 952/* static strings, in UTF-8 */
887static struct usb_string strings [] = { 953static struct usb_string strings [] = {
@@ -889,9 +955,9 @@ static struct usb_string strings [] = {
889 { STRING_PRODUCT, product_desc, }, 955 { STRING_PRODUCT, product_desc, },
890 { STRING_SERIALNUMBER, serial_number, }, 956 { STRING_SERIALNUMBER, serial_number, },
891 { STRING_DATA, "Ethernet Data", }, 957 { STRING_DATA, "Ethernet Data", },
958 { STRING_ETHADDR, ethaddr, },
892#ifdef DEV_CONFIG_CDC 959#ifdef DEV_CONFIG_CDC
893 { STRING_CDC, "CDC Ethernet", }, 960 { STRING_CDC, "CDC Ethernet", },
894 { STRING_ETHADDR, ethaddr, },
895 { STRING_CONTROL, "CDC Communications Control", }, 961 { STRING_CONTROL, "CDC Communications Control", },
896#endif 962#endif
897#ifdef DEV_CONFIG_SUBSET 963#ifdef DEV_CONFIG_SUBSET
@@ -986,10 +1052,10 @@ set_ether_config (struct eth_dev *dev, gfp_t gfp_flags)
986 } 1052 }
987#endif 1053#endif
988 1054
989 dev->in = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc); 1055 dev->in = ep_desc(gadget, &hs_source_desc, &fs_source_desc);
990 dev->in_ep->driver_data = dev; 1056 dev->in_ep->driver_data = dev;
991 1057
992 dev->out = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc); 1058 dev->out = ep_desc(gadget, &hs_sink_desc, &fs_sink_desc);
993 dev->out_ep->driver_data = dev; 1059 dev->out_ep->driver_data = dev;
994 1060
995 /* With CDC, the host isn't allowed to use these two data 1061 /* With CDC, the host isn't allowed to use these two data
@@ -2278,10 +2344,10 @@ eth_bind (struct usb_gadget *gadget)
2278 "RNDIS/%s", driver_desc); 2344 "RNDIS/%s", driver_desc);
2279 2345
2280 /* CDC subset ... recognized by Linux since 2.4.10, but Windows 2346 /* CDC subset ... recognized by Linux since 2.4.10, but Windows
2281 * drivers aren't widely available. 2347 * drivers aren't widely available. (That may be improved by
2348 * supporting one submode of the "SAFE" variant of MDLM.)
2282 */ 2349 */
2283 } else if (!cdc) { 2350 } else if (!cdc) {
2284 device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
2285 device_desc.idVendor = 2351 device_desc.idVendor =
2286 __constant_cpu_to_le16(SIMPLE_VENDOR_NUM); 2352 __constant_cpu_to_le16(SIMPLE_VENDOR_NUM);
2287 device_desc.idProduct = 2353 device_desc.idProduct =
@@ -2352,6 +2418,10 @@ autoconf_fail:
2352 if (!cdc) { 2418 if (!cdc) {
2353 eth_config.bNumInterfaces = 1; 2419 eth_config.bNumInterfaces = 1;
2354 eth_config.iConfiguration = STRING_SUBSET; 2420 eth_config.iConfiguration = STRING_SUBSET;
2421
2422 /* use functions to set these up, in case we're built to work
2423 * with multiple controllers and must override CDC Ethernet.
2424 */
2355 fs_subset_descriptors(); 2425 fs_subset_descriptors();
2356 hs_subset_descriptors(); 2426 hs_subset_descriptors();
2357 } 2427 }
@@ -2415,22 +2485,20 @@ autoconf_fail:
2415 2485
2416 /* Module params for these addresses should come from ID proms. 2486 /* Module params for these addresses should come from ID proms.
2417 * The host side address is used with CDC and RNDIS, and commonly 2487 * The host side address is used with CDC and RNDIS, and commonly
2418 * ends up in a persistent config database. 2488 * ends up in a persistent config database. It's not clear if
2489 * host side code for the SAFE thing cares -- its original BLAN
2490 * thing didn't, Sharp never assigned those addresses on Zaurii.
2419 */ 2491 */
2420 if (get_ether_addr(dev_addr, net->dev_addr)) 2492 if (get_ether_addr(dev_addr, net->dev_addr))
2421 dev_warn(&gadget->dev, 2493 dev_warn(&gadget->dev,
2422 "using random %s ethernet address\n", "self"); 2494 "using random %s ethernet address\n", "self");
2423 if (cdc || rndis) { 2495 if (get_ether_addr(host_addr, dev->host_mac))
2424 if (get_ether_addr(host_addr, dev->host_mac)) 2496 dev_warn(&gadget->dev,
2425 dev_warn(&gadget->dev, 2497 "using random %s ethernet address\n", "host");
2426 "using random %s ethernet address\n", "host"); 2498 snprintf (ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
2427#ifdef DEV_CONFIG_CDC 2499 dev->host_mac [0], dev->host_mac [1],
2428 snprintf (ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X", 2500 dev->host_mac [2], dev->host_mac [3],
2429 dev->host_mac [0], dev->host_mac [1], 2501 dev->host_mac [4], dev->host_mac [5]);
2430 dev->host_mac [2], dev->host_mac [3],
2431 dev->host_mac [4], dev->host_mac [5]);
2432#endif
2433 }
2434 2502
2435 if (rndis) { 2503 if (rndis) {
2436 status = rndis_init(); 2504 status = rndis_init();
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 72f2ae96fbf3..f04a29a46646 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -253,7 +253,7 @@
253#include <linux/freezer.h> 253#include <linux/freezer.h>
254#include <linux/utsname.h> 254#include <linux/utsname.h>
255 255
256#include <linux/usb_ch9.h> 256#include <linux/usb/ch9.h>
257#include <linux/usb_gadget.h> 257#include <linux/usb_gadget.h>
258 258
259#include "gadget_chips.h" 259#include "gadget_chips.h"
@@ -1148,7 +1148,7 @@ static int ep0_queue(struct fsg_dev *fsg)
1148 1148
1149static void ep0_complete(struct usb_ep *ep, struct usb_request *req) 1149static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
1150{ 1150{
1151 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1151 struct fsg_dev *fsg = ep->driver_data;
1152 1152
1153 if (req->actual > 0) 1153 if (req->actual > 0)
1154 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual); 1154 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
@@ -1170,8 +1170,8 @@ static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
1170 1170
1171static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 1171static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
1172{ 1172{
1173 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1173 struct fsg_dev *fsg = ep->driver_data;
1174 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context; 1174 struct fsg_buffhd *bh = req->context;
1175 1175
1176 if (req->status || req->actual != req->length) 1176 if (req->status || req->actual != req->length)
1177 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__, 1177 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
@@ -1190,8 +1190,8 @@ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
1190 1190
1191static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 1191static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
1192{ 1192{
1193 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1193 struct fsg_dev *fsg = ep->driver_data;
1194 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context; 1194 struct fsg_buffhd *bh = req->context;
1195 1195
1196 dump_msg(fsg, "bulk-out", req->buf, req->actual); 1196 dump_msg(fsg, "bulk-out", req->buf, req->actual);
1197 if (req->status || req->actual != bh->bulk_out_intended_length) 1197 if (req->status || req->actual != bh->bulk_out_intended_length)
@@ -1214,8 +1214,8 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
1214#ifdef CONFIG_USB_FILE_STORAGE_TEST 1214#ifdef CONFIG_USB_FILE_STORAGE_TEST
1215static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) 1215static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
1216{ 1216{
1217 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1217 struct fsg_dev *fsg = ep->driver_data;
1218 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context; 1218 struct fsg_buffhd *bh = req->context;
1219 1219
1220 if (req->status || req->actual != req->length) 1220 if (req->status || req->actual != req->length)
1221 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__, 1221 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
@@ -2577,7 +2577,7 @@ static int send_status(struct fsg_dev *fsg)
2577 } 2577 }
2578 2578
2579 if (transport_is_bbb()) { 2579 if (transport_is_bbb()) {
2580 struct bulk_cs_wrap *csw = (struct bulk_cs_wrap *) bh->buf; 2580 struct bulk_cs_wrap *csw = bh->buf;
2581 2581
2582 /* Store and send the Bulk-only CSW */ 2582 /* Store and send the Bulk-only CSW */
2583 csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG); 2583 csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
@@ -2596,8 +2596,7 @@ static int send_status(struct fsg_dev *fsg)
2596 return 0; 2596 return 0;
2597 2597
2598 } else { // USB_PR_CBI 2598 } else { // USB_PR_CBI
2599 struct interrupt_data *buf = (struct interrupt_data *) 2599 struct interrupt_data *buf = bh->buf;
2600 bh->buf;
2601 2600
2602 /* Store and send the Interrupt data. UFI sends the ASC 2601 /* Store and send the Interrupt data. UFI sends the ASC
2603 * and ASCQ bytes. Everything else sends a Type (which 2602 * and ASCQ bytes. Everything else sends a Type (which
@@ -2982,7 +2981,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2982static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2981static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2983{ 2982{
2984 struct usb_request *req = bh->outreq; 2983 struct usb_request *req = bh->outreq;
2985 struct bulk_cb_wrap *cbw = (struct bulk_cb_wrap *) req->buf; 2984 struct bulk_cb_wrap *cbw = req->buf;
2986 2985
2987 /* Was this a real packet? */ 2986 /* Was this a real packet? */
2988 if (req->status) 2987 if (req->status)
@@ -3428,7 +3427,7 @@ static void handle_exception(struct fsg_dev *fsg)
3428 3427
3429static int fsg_main_thread(void *fsg_) 3428static int fsg_main_thread(void *fsg_)
3430{ 3429{
3431 struct fsg_dev *fsg = (struct fsg_dev *) fsg_; 3430 struct fsg_dev *fsg = fsg_;
3432 3431
3433 /* Allow the thread to be killed by a signal, but set the signal mask 3432 /* Allow the thread to be killed by a signal, but set the signal mask
3434 * to block everything but INT, TERM, KILL, and USR1. */ 3433 * to block everything but INT, TERM, KILL, and USR1. */
@@ -3600,7 +3599,7 @@ static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *
3600static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf) 3599static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf)
3601{ 3600{
3602 struct lun *curlun = dev_to_lun(dev); 3601 struct lun *curlun = dev_to_lun(dev);
3603 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3602 struct fsg_dev *fsg = dev_get_drvdata(dev);
3604 char *p; 3603 char *p;
3605 ssize_t rc; 3604 ssize_t rc;
3606 3605
@@ -3629,7 +3628,7 @@ static ssize_t store_ro(struct device *dev, struct device_attribute *attr, const
3629{ 3628{
3630 ssize_t rc = count; 3629 ssize_t rc = count;
3631 struct lun *curlun = dev_to_lun(dev); 3630 struct lun *curlun = dev_to_lun(dev);
3632 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3631 struct fsg_dev *fsg = dev_get_drvdata(dev);
3633 int i; 3632 int i;
3634 3633
3635 if (sscanf(buf, "%d", &i) != 1) 3634 if (sscanf(buf, "%d", &i) != 1)
@@ -3652,7 +3651,7 @@ static ssize_t store_ro(struct device *dev, struct device_attribute *attr, const
3652static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3651static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3653{ 3652{
3654 struct lun *curlun = dev_to_lun(dev); 3653 struct lun *curlun = dev_to_lun(dev);
3655 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3654 struct fsg_dev *fsg = dev_get_drvdata(dev);
3656 int rc = 0; 3655 int rc = 0;
3657 3656
3658 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) { 3657 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
@@ -3700,7 +3699,7 @@ static void fsg_release(struct kref *ref)
3700 3699
3701static void lun_release(struct device *dev) 3700static void lun_release(struct device *dev)
3702{ 3701{
3703 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3702 struct fsg_dev *fsg = dev_get_drvdata(dev);
3704 3703
3705 kref_put(&fsg->ref, fsg_release); 3704 kref_put(&fsg->ref, fsg_release);
3706} 3705}
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index aa80f0910720..2e3d6620d216 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -75,6 +75,12 @@
75#define gadget_is_pxa27x(g) 0 75#define gadget_is_pxa27x(g) 0
76#endif 76#endif
77 77
78#ifdef CONFIG_USB_GADGET_HUSB2DEV
79#define gadget_is_husb2dev(g) !strcmp("husb2_udc", (g)->name)
80#else
81#define gadget_is_husb2dev(g) 0
82#endif
83
78#ifdef CONFIG_USB_GADGET_S3C2410 84#ifdef CONFIG_USB_GADGET_S3C2410
79#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name) 85#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
80#else 86#else
@@ -169,5 +175,7 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
169 return 0x16; 175 return 0x16;
170 else if (gadget_is_mpc8272(gadget)) 176 else if (gadget_is_mpc8272(gadget))
171 return 0x17; 177 return 0x17;
178 else if (gadget_is_husb2dev(gadget))
179 return 0x18;
172 return -ENOENT; 180 return -ENOENT;
173} 181}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index f1a679656c96..d08a8d0e6427 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -35,7 +35,7 @@
35#include <sound/initval.h> 35#include <sound/initval.h>
36#include <sound/rawmidi.h> 36#include <sound/rawmidi.h>
37 37
38#include <linux/usb_ch9.h> 38#include <linux/usb/ch9.h>
39#include <linux/usb_gadget.h> 39#include <linux/usb_gadget.h>
40#include <linux/usb/audio.h> 40#include <linux/usb/audio.h>
41#include <linux/usb/midi.h> 41#include <linux/usb/midi.h>
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index d0ef1d6b3fac..e873cf488246 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -39,7 +39,7 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/device.h> 41#include <linux/device.h>
42#include <linux/usb_ch9.h> 42#include <linux/usb/ch9.h>
43#include <linux/usb_gadget.h> 43#include <linux/usb_gadget.h>
44 44
45#include <asm/byteorder.h> 45#include <asm/byteorder.h>
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 3fb1044a4db0..34296e79edcf 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -20,7 +20,7 @@
20 */ 20 */
21 21
22 22
23// #define DEBUG /* data to help fault diagnosis */ 23// #define DEBUG /* data to help fault diagnosis */
24// #define VERBOSE /* extra debug messages (success too) */ 24// #define VERBOSE /* extra debug messages (success too) */
25 25
26#include <linux/init.h> 26#include <linux/init.h>
@@ -59,11 +59,11 @@
59 * may serve as a source of device events, used to handle all control 59 * may serve as a source of device events, used to handle all control
60 * requests other than basic enumeration. 60 * requests other than basic enumeration.
61 * 61 *
62 * - Then either immediately, or after a SET_CONFIGURATION control request, 62 * - Then, after a SET_CONFIGURATION control request, ep_config() is
63 * ep_config() is called when each /dev/gadget/ep* file is configured 63 * called when each /dev/gadget/ep* file is configured (by writing
64 * (by writing endpoint descriptors). Afterwards these files are used 64 * endpoint descriptors). Afterwards these files are used to write()
65 * to write() IN data or to read() OUT data. To halt the endpoint, a 65 * IN data or to read() OUT data. To halt the endpoint, a "wrong
66 * "wrong direction" request is issued (like reading an IN endpoint). 66 * direction" request is issued (like reading an IN endpoint).
67 * 67 *
68 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe 68 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
69 * not possible on all hardware. For example, precise fault handling with 69 * not possible on all hardware. For example, precise fault handling with
@@ -98,16 +98,16 @@ enum ep0_state {
98 * must always write descriptors to initialize the device, then 98 * must always write descriptors to initialize the device, then
99 * the device becomes UNCONNECTED until enumeration. 99 * the device becomes UNCONNECTED until enumeration.
100 */ 100 */
101 STATE_OPENED, 101 STATE_DEV_OPENED,
102 102
103 /* From then on, ep0 fd is in either of two basic modes: 103 /* From then on, ep0 fd is in either of two basic modes:
104 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it 104 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
105 * - SETUP: read/write will transfer control data and succeed; 105 * - SETUP: read/write will transfer control data and succeed;
106 * or if "wrong direction", performs protocol stall 106 * or if "wrong direction", performs protocol stall
107 */ 107 */
108 STATE_UNCONNECTED, 108 STATE_DEV_UNCONNECTED,
109 STATE_CONNECTED, 109 STATE_DEV_CONNECTED,
110 STATE_SETUP, 110 STATE_DEV_SETUP,
111 111
112 /* UNBOUND means the driver closed ep0, so the device won't be 112 /* UNBOUND means the driver closed ep0, so the device won't be
113 * accessible again (DEV_DISABLED) until all fds are closed. 113 * accessible again (DEV_DISABLED) until all fds are closed.
@@ -121,7 +121,7 @@ enum ep0_state {
121struct dev_data { 121struct dev_data {
122 spinlock_t lock; 122 spinlock_t lock;
123 atomic_t count; 123 atomic_t count;
124 enum ep0_state state; 124 enum ep0_state state; /* P: lock */
125 struct usb_gadgetfs_event event [N_EVENT]; 125 struct usb_gadgetfs_event event [N_EVENT];
126 unsigned ev_next; 126 unsigned ev_next;
127 struct fasync_struct *fasync; 127 struct fasync_struct *fasync;
@@ -188,7 +188,6 @@ static struct dev_data *dev_new (void)
188enum ep_state { 188enum ep_state {
189 STATE_EP_DISABLED = 0, 189 STATE_EP_DISABLED = 0,
190 STATE_EP_READY, 190 STATE_EP_READY,
191 STATE_EP_DEFER_ENABLE,
192 STATE_EP_ENABLED, 191 STATE_EP_ENABLED,
193 STATE_EP_UNBOUND, 192 STATE_EP_UNBOUND,
194}; 193};
@@ -313,18 +312,10 @@ nonblock:
313 312
314 if ((val = down_interruptible (&epdata->lock)) < 0) 313 if ((val = down_interruptible (&epdata->lock)) < 0)
315 return val; 314 return val;
316newstate: 315
317 switch (epdata->state) { 316 switch (epdata->state) {
318 case STATE_EP_ENABLED: 317 case STATE_EP_ENABLED:
319 break; 318 break;
320 case STATE_EP_DEFER_ENABLE:
321 DBG (epdata->dev, "%s wait for host\n", epdata->name);
322 if ((val = wait_event_interruptible (epdata->wait,
323 epdata->state != STATE_EP_DEFER_ENABLE
324 || epdata->dev->state == STATE_DEV_UNBOUND
325 )) < 0)
326 goto fail;
327 goto newstate;
328 // case STATE_EP_DISABLED: /* "can't happen" */ 319 // case STATE_EP_DISABLED: /* "can't happen" */
329 // case STATE_EP_READY: /* "can't happen" */ 320 // case STATE_EP_READY: /* "can't happen" */
330 default: /* error! */ 321 default: /* error! */
@@ -333,7 +324,6 @@ newstate:
333 // FALLTHROUGH 324 // FALLTHROUGH
334 case STATE_EP_UNBOUND: /* clean disconnect */ 325 case STATE_EP_UNBOUND: /* clean disconnect */
335 val = -ENODEV; 326 val = -ENODEV;
336fail:
337 up (&epdata->lock); 327 up (&epdata->lock);
338 } 328 }
339 return val; 329 return val;
@@ -565,29 +555,28 @@ static ssize_t ep_aio_read_retry(struct kiocb *iocb)
565 ssize_t len, total; 555 ssize_t len, total;
566 int i; 556 int i;
567 557
568 /* we "retry" to get the right mm context for this: */ 558 /* we "retry" to get the right mm context for this: */
569 559
570 /* copy stuff into user buffers */ 560 /* copy stuff into user buffers */
571 total = priv->actual; 561 total = priv->actual;
572 len = 0; 562 len = 0;
573 for (i=0; i < priv->nr_segs; i++) { 563 for (i=0; i < priv->nr_segs; i++) {
574 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); 564 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
575 565
576 if (copy_to_user(priv->iv[i].iov_base, priv->buf, this)) { 566 if (copy_to_user(priv->iv[i].iov_base, priv->buf, this)) {
577 if (len == 0) 567 if (len == 0)
578 len = -EFAULT; 568 len = -EFAULT;
579 break; 569 break;
580 } 570 }
581 571
582 total -= this; 572 total -= this;
583 len += this; 573 len += this;
584 if (total == 0) 574 if (total == 0)
585 break; 575 break;
586 } 576 }
587 kfree(priv->buf); 577 kfree(priv->buf);
588 kfree(priv); 578 kfree(priv);
589 aio_put_req(iocb); 579 return len;
590 return len;
591} 580}
592 581
593static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) 582static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
@@ -600,18 +589,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
600 spin_lock(&epdata->dev->lock); 589 spin_lock(&epdata->dev->lock);
601 priv->req = NULL; 590 priv->req = NULL;
602 priv->epdata = NULL; 591 priv->epdata = NULL;
603 if (priv->iv == NULL 592
604 || unlikely(req->actual == 0) 593 /* if this was a write or a read returning no data then we
605 || unlikely(kiocbIsCancelled(iocb))) { 594 * don't need to copy anything to userspace, so we can
595 * complete the aio request immediately.
596 */
597 if (priv->iv == NULL || unlikely(req->actual == 0)) {
606 kfree(req->buf); 598 kfree(req->buf);
607 kfree(priv); 599 kfree(priv);
608 iocb->private = NULL; 600 iocb->private = NULL;
609 /* aio_complete() reports bytes-transferred _and_ faults */ 601 /* aio_complete() reports bytes-transferred _and_ faults */
610 if (unlikely(kiocbIsCancelled(iocb))) 602 aio_complete(iocb, req->actual ? req->actual : req->status,
611 aio_put_req(iocb);
612 else
613 aio_complete(iocb,
614 req->actual ? req->actual : req->status,
615 req->status); 603 req->status);
616 } else { 604 } else {
617 /* retry() won't report both; so we hide some faults */ 605 /* retry() won't report both; so we hide some faults */
@@ -636,7 +624,7 @@ ep_aio_rwtail(
636 size_t len, 624 size_t len,
637 struct ep_data *epdata, 625 struct ep_data *epdata,
638 const struct iovec *iv, 626 const struct iovec *iv,
639 unsigned long nr_segs 627 unsigned long nr_segs
640) 628)
641{ 629{
642 struct kiocb_priv *priv; 630 struct kiocb_priv *priv;
@@ -852,9 +840,9 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
852 break; 840 break;
853#endif 841#endif
854 default: 842 default:
855 DBG (data->dev, "unconnected, %s init deferred\n", 843 DBG(data->dev, "unconnected, %s init abandoned\n",
856 data->name); 844 data->name);
857 data->state = STATE_EP_DEFER_ENABLE; 845 value = -EINVAL;
858 } 846 }
859 if (value == 0) { 847 if (value == 0) {
860 fd->f_op = &ep_io_operations; 848 fd->f_op = &ep_io_operations;
@@ -943,22 +931,24 @@ static void clean_req (struct usb_ep *ep, struct usb_request *req)
943static void ep0_complete (struct usb_ep *ep, struct usb_request *req) 931static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
944{ 932{
945 struct dev_data *dev = ep->driver_data; 933 struct dev_data *dev = ep->driver_data;
934 unsigned long flags;
946 int free = 1; 935 int free = 1;
947 936
948 /* for control OUT, data must still get to userspace */ 937 /* for control OUT, data must still get to userspace */
938 spin_lock_irqsave(&dev->lock, flags);
949 if (!dev->setup_in) { 939 if (!dev->setup_in) {
950 dev->setup_out_error = (req->status != 0); 940 dev->setup_out_error = (req->status != 0);
951 if (!dev->setup_out_error) 941 if (!dev->setup_out_error)
952 free = 0; 942 free = 0;
953 dev->setup_out_ready = 1; 943 dev->setup_out_ready = 1;
954 ep0_readable (dev); 944 ep0_readable (dev);
955 } else if (dev->state == STATE_SETUP) 945 }
956 dev->state = STATE_CONNECTED;
957 946
958 /* clean up as appropriate */ 947 /* clean up as appropriate */
959 if (free && req->buf != &dev->rbuf) 948 if (free && req->buf != &dev->rbuf)
960 clean_req (ep, req); 949 clean_req (ep, req);
961 req->complete = epio_complete; 950 req->complete = epio_complete;
951 spin_unlock_irqrestore(&dev->lock, flags);
962} 952}
963 953
964static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len) 954static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
@@ -998,13 +988,13 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
998 } 988 }
999 989
1000 /* control DATA stage */ 990 /* control DATA stage */
1001 if ((state = dev->state) == STATE_SETUP) { 991 if ((state = dev->state) == STATE_DEV_SETUP) {
1002 992
1003 if (dev->setup_in) { /* stall IN */ 993 if (dev->setup_in) { /* stall IN */
1004 VDEBUG(dev, "ep0in stall\n"); 994 VDEBUG(dev, "ep0in stall\n");
1005 (void) usb_ep_set_halt (dev->gadget->ep0); 995 (void) usb_ep_set_halt (dev->gadget->ep0);
1006 retval = -EL2HLT; 996 retval = -EL2HLT;
1007 dev->state = STATE_CONNECTED; 997 dev->state = STATE_DEV_CONNECTED;
1008 998
1009 } else if (len == 0) { /* ack SET_CONFIGURATION etc */ 999 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
1010 struct usb_ep *ep = dev->gadget->ep0; 1000 struct usb_ep *ep = dev->gadget->ep0;
@@ -1012,7 +1002,7 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
1012 1002
1013 if ((retval = setup_req (ep, req, 0)) == 0) 1003 if ((retval = setup_req (ep, req, 0)) == 0)
1014 retval = usb_ep_queue (ep, req, GFP_ATOMIC); 1004 retval = usb_ep_queue (ep, req, GFP_ATOMIC);
1015 dev->state = STATE_CONNECTED; 1005 dev->state = STATE_DEV_CONNECTED;
1016 1006
1017 /* assume that was SET_CONFIGURATION */ 1007 /* assume that was SET_CONFIGURATION */
1018 if (dev->current_config) { 1008 if (dev->current_config) {
@@ -1040,6 +1030,13 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
1040 spin_lock_irq (&dev->lock); 1030 spin_lock_irq (&dev->lock);
1041 if (retval) 1031 if (retval)
1042 goto done; 1032 goto done;
1033
1034 if (dev->state != STATE_DEV_SETUP) {
1035 retval = -ECANCELED;
1036 goto done;
1037 }
1038 dev->state = STATE_DEV_CONNECTED;
1039
1043 if (dev->setup_out_error) 1040 if (dev->setup_out_error)
1044 retval = -EIO; 1041 retval = -EIO;
1045 else { 1042 else {
@@ -1066,39 +1063,36 @@ scan:
1066 /* return queued events right away */ 1063 /* return queued events right away */
1067 if (dev->ev_next != 0) { 1064 if (dev->ev_next != 0) {
1068 unsigned i, n; 1065 unsigned i, n;
1069 int tmp = dev->ev_next;
1070 1066
1071 len = min (len, tmp * sizeof (struct usb_gadgetfs_event));
1072 n = len / sizeof (struct usb_gadgetfs_event); 1067 n = len / sizeof (struct usb_gadgetfs_event);
1068 if (dev->ev_next < n)
1069 n = dev->ev_next;
1073 1070
1074 /* ep0 can't deliver events when STATE_SETUP */ 1071 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1075 for (i = 0; i < n; i++) { 1072 for (i = 0; i < n; i++) {
1076 if (dev->event [i].type == GADGETFS_SETUP) { 1073 if (dev->event [i].type == GADGETFS_SETUP) {
1077 len = i + 1; 1074 dev->state = STATE_DEV_SETUP;
1078 len *= sizeof (struct usb_gadgetfs_event); 1075 n = i + 1;
1079 n = 0;
1080 break; 1076 break;
1081 } 1077 }
1082 } 1078 }
1083 spin_unlock_irq (&dev->lock); 1079 spin_unlock_irq (&dev->lock);
1080 len = n * sizeof (struct usb_gadgetfs_event);
1084 if (copy_to_user (buf, &dev->event, len)) 1081 if (copy_to_user (buf, &dev->event, len))
1085 retval = -EFAULT; 1082 retval = -EFAULT;
1086 else 1083 else
1087 retval = len; 1084 retval = len;
1088 if (len > 0) { 1085 if (len > 0) {
1089 len /= sizeof (struct usb_gadgetfs_event);
1090
1091 /* NOTE this doesn't guard against broken drivers; 1086 /* NOTE this doesn't guard against broken drivers;
1092 * concurrent ep0 readers may lose events. 1087 * concurrent ep0 readers may lose events.
1093 */ 1088 */
1094 spin_lock_irq (&dev->lock); 1089 spin_lock_irq (&dev->lock);
1095 dev->ev_next -= len; 1090 if (dev->ev_next > n) {
1096 if (dev->ev_next != 0) 1091 memmove(&dev->event[0], &dev->event[n],
1097 memmove (&dev->event, &dev->event [len],
1098 sizeof (struct usb_gadgetfs_event) 1092 sizeof (struct usb_gadgetfs_event)
1099 * (tmp - len)); 1093 * (dev->ev_next - n));
1100 if (n == 0) 1094 }
1101 dev->state = STATE_SETUP; 1095 dev->ev_next -= n;
1102 spin_unlock_irq (&dev->lock); 1096 spin_unlock_irq (&dev->lock);
1103 } 1097 }
1104 return retval; 1098 return retval;
@@ -1113,8 +1107,8 @@ scan:
1113 DBG (dev, "fail %s, state %d\n", __FUNCTION__, state); 1107 DBG (dev, "fail %s, state %d\n", __FUNCTION__, state);
1114 retval = -ESRCH; 1108 retval = -ESRCH;
1115 break; 1109 break;
1116 case STATE_UNCONNECTED: 1110 case STATE_DEV_UNCONNECTED:
1117 case STATE_CONNECTED: 1111 case STATE_DEV_CONNECTED:
1118 spin_unlock_irq (&dev->lock); 1112 spin_unlock_irq (&dev->lock);
1119 DBG (dev, "%s wait\n", __FUNCTION__); 1113 DBG (dev, "%s wait\n", __FUNCTION__);
1120 1114
@@ -1141,7 +1135,7 @@ next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1141 switch (type) { 1135 switch (type) {
1142 /* these events purge the queue */ 1136 /* these events purge the queue */
1143 case GADGETFS_DISCONNECT: 1137 case GADGETFS_DISCONNECT:
1144 if (dev->state == STATE_SETUP) 1138 if (dev->state == STATE_DEV_SETUP)
1145 dev->setup_abort = 1; 1139 dev->setup_abort = 1;
1146 // FALL THROUGH 1140 // FALL THROUGH
1147 case GADGETFS_CONNECT: 1141 case GADGETFS_CONNECT:
@@ -1153,7 +1147,7 @@ next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1153 for (i = 0; i != dev->ev_next; i++) { 1147 for (i = 0; i != dev->ev_next; i++) {
1154 if (dev->event [i].type != type) 1148 if (dev->event [i].type != type)
1155 continue; 1149 continue;
1156 DBG (dev, "discard old event %d\n", type); 1150 DBG(dev, "discard old event[%d] %d\n", i, type);
1157 dev->ev_next--; 1151 dev->ev_next--;
1158 if (i == dev->ev_next) 1152 if (i == dev->ev_next)
1159 break; 1153 break;
@@ -1166,9 +1160,9 @@ next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1166 default: 1160 default:
1167 BUG (); 1161 BUG ();
1168 } 1162 }
1163 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1169 event = &dev->event [dev->ev_next++]; 1164 event = &dev->event [dev->ev_next++];
1170 BUG_ON (dev->ev_next > N_EVENT); 1165 BUG_ON (dev->ev_next > N_EVENT);
1171 VDEBUG (dev, "ev %d, next %d\n", type, dev->ev_next);
1172 memset (event, 0, sizeof *event); 1166 memset (event, 0, sizeof *event);
1173 event->type = type; 1167 event->type = type;
1174 return event; 1168 return event;
@@ -1188,12 +1182,13 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1188 retval = -EIDRM; 1182 retval = -EIDRM;
1189 1183
1190 /* data and/or status stage for control request */ 1184 /* data and/or status stage for control request */
1191 } else if (dev->state == STATE_SETUP) { 1185 } else if (dev->state == STATE_DEV_SETUP) {
1192 1186
1193 /* IN DATA+STATUS caller makes len <= wLength */ 1187 /* IN DATA+STATUS caller makes len <= wLength */
1194 if (dev->setup_in) { 1188 if (dev->setup_in) {
1195 retval = setup_req (dev->gadget->ep0, dev->req, len); 1189 retval = setup_req (dev->gadget->ep0, dev->req, len);
1196 if (retval == 0) { 1190 if (retval == 0) {
1191 dev->state = STATE_DEV_CONNECTED;
1197 spin_unlock_irq (&dev->lock); 1192 spin_unlock_irq (&dev->lock);
1198 if (copy_from_user (dev->req->buf, buf, len)) 1193 if (copy_from_user (dev->req->buf, buf, len))
1199 retval = -EFAULT; 1194 retval = -EFAULT;
@@ -1219,7 +1214,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1219 VDEBUG(dev, "ep0out stall\n"); 1214 VDEBUG(dev, "ep0out stall\n");
1220 (void) usb_ep_set_halt (dev->gadget->ep0); 1215 (void) usb_ep_set_halt (dev->gadget->ep0);
1221 retval = -EL2HLT; 1216 retval = -EL2HLT;
1222 dev->state = STATE_CONNECTED; 1217 dev->state = STATE_DEV_CONNECTED;
1223 } else { 1218 } else {
1224 DBG(dev, "bogus ep0out stall!\n"); 1219 DBG(dev, "bogus ep0out stall!\n");
1225 } 1220 }
@@ -1261,7 +1256,9 @@ dev_release (struct inode *inode, struct file *fd)
1261 put_dev (dev); 1256 put_dev (dev);
1262 1257
1263 /* other endpoints were all decoupled from this device */ 1258 /* other endpoints were all decoupled from this device */
1259 spin_lock_irq(&dev->lock);
1264 dev->state = STATE_DEV_DISABLED; 1260 dev->state = STATE_DEV_DISABLED;
1261 spin_unlock_irq(&dev->lock);
1265 return 0; 1262 return 0;
1266} 1263}
1267 1264
@@ -1282,7 +1279,7 @@ ep0_poll (struct file *fd, poll_table *wait)
1282 goto out; 1279 goto out;
1283 } 1280 }
1284 1281
1285 if (dev->state == STATE_SETUP) { 1282 if (dev->state == STATE_DEV_SETUP) {
1286 if (dev->setup_in || dev->setup_can_stall) 1283 if (dev->setup_in || dev->setup_can_stall)
1287 mask = POLLOUT; 1284 mask = POLLOUT;
1288 } else { 1285 } else {
@@ -1392,52 +1389,29 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1392 1389
1393 spin_lock (&dev->lock); 1390 spin_lock (&dev->lock);
1394 dev->setup_abort = 0; 1391 dev->setup_abort = 0;
1395 if (dev->state == STATE_UNCONNECTED) { 1392 if (dev->state == STATE_DEV_UNCONNECTED) {
1396 struct usb_ep *ep;
1397 struct ep_data *data;
1398
1399 dev->state = STATE_CONNECTED;
1400 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
1401
1402#ifdef CONFIG_USB_GADGET_DUALSPEED 1393#ifdef CONFIG_USB_GADGET_DUALSPEED
1403 if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == 0) { 1394 if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == 0) {
1395 spin_unlock(&dev->lock);
1404 ERROR (dev, "no high speed config??\n"); 1396 ERROR (dev, "no high speed config??\n");
1405 return -EINVAL; 1397 return -EINVAL;
1406 } 1398 }
1407#endif /* CONFIG_USB_GADGET_DUALSPEED */ 1399#endif /* CONFIG_USB_GADGET_DUALSPEED */
1408 1400
1401 dev->state = STATE_DEV_CONNECTED;
1402 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
1403
1409 INFO (dev, "connected\n"); 1404 INFO (dev, "connected\n");
1410 event = next_event (dev, GADGETFS_CONNECT); 1405 event = next_event (dev, GADGETFS_CONNECT);
1411 event->u.speed = gadget->speed; 1406 event->u.speed = gadget->speed;
1412 ep0_readable (dev); 1407 ep0_readable (dev);
1413 1408
1414 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
1415 data = ep->driver_data;
1416 /* ... down_trylock (&data->lock) ... */
1417 if (data->state != STATE_EP_DEFER_ENABLE)
1418 continue;
1419#ifdef CONFIG_USB_GADGET_DUALSPEED
1420 if (gadget->speed == USB_SPEED_HIGH)
1421 value = usb_ep_enable (ep, &data->hs_desc);
1422 else
1423#endif /* CONFIG_USB_GADGET_DUALSPEED */
1424 value = usb_ep_enable (ep, &data->desc);
1425 if (value) {
1426 ERROR (dev, "deferred %s enable --> %d\n",
1427 data->name, value);
1428 continue;
1429 }
1430 data->state = STATE_EP_ENABLED;
1431 wake_up (&data->wait);
1432 DBG (dev, "woke up %s waiters\n", data->name);
1433 }
1434
1435 /* host may have given up waiting for response. we can miss control 1409 /* host may have given up waiting for response. we can miss control
1436 * requests handled lower down (device/endpoint status and features); 1410 * requests handled lower down (device/endpoint status and features);
1437 * then ep0_{read,write} will report the wrong status. controller 1411 * then ep0_{read,write} will report the wrong status. controller
1438 * driver will have aborted pending i/o. 1412 * driver will have aborted pending i/o.
1439 */ 1413 */
1440 } else if (dev->state == STATE_SETUP) 1414 } else if (dev->state == STATE_DEV_SETUP)
1441 dev->setup_abort = 1; 1415 dev->setup_abort = 1;
1442 1416
1443 req->buf = dev->rbuf; 1417 req->buf = dev->rbuf;
@@ -1583,7 +1557,7 @@ delegate:
1583 } 1557 }
1584 1558
1585 /* proceed with data transfer and status phases? */ 1559 /* proceed with data transfer and status phases? */
1586 if (value >= 0 && dev->state != STATE_SETUP) { 1560 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1587 req->length = value; 1561 req->length = value;
1588 req->zero = value < w_length; 1562 req->zero = value < w_length;
1589 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); 1563 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
@@ -1747,7 +1721,9 @@ gadgetfs_bind (struct usb_gadget *gadget)
1747 goto enomem; 1721 goto enomem;
1748 1722
1749 INFO (dev, "bound to %s driver\n", gadget->name); 1723 INFO (dev, "bound to %s driver\n", gadget->name);
1750 dev->state = STATE_UNCONNECTED; 1724 spin_lock_irq(&dev->lock);
1725 dev->state = STATE_DEV_UNCONNECTED;
1726 spin_unlock_irq(&dev->lock);
1751 get_dev (dev); 1727 get_dev (dev);
1752 return 0; 1728 return 0;
1753 1729
@@ -1762,11 +1738,9 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
1762 struct dev_data *dev = get_gadget_data (gadget); 1738 struct dev_data *dev = get_gadget_data (gadget);
1763 1739
1764 spin_lock (&dev->lock); 1740 spin_lock (&dev->lock);
1765 if (dev->state == STATE_UNCONNECTED) { 1741 if (dev->state == STATE_DEV_UNCONNECTED)
1766 DBG (dev, "already unconnected\n");
1767 goto exit; 1742 goto exit;
1768 } 1743 dev->state = STATE_DEV_UNCONNECTED;
1769 dev->state = STATE_UNCONNECTED;
1770 1744
1771 INFO (dev, "disconnected\n"); 1745 INFO (dev, "disconnected\n");
1772 next_event (dev, GADGETFS_DISCONNECT); 1746 next_event (dev, GADGETFS_DISCONNECT);
@@ -1783,9 +1757,9 @@ gadgetfs_suspend (struct usb_gadget *gadget)
1783 INFO (dev, "suspended from state %d\n", dev->state); 1757 INFO (dev, "suspended from state %d\n", dev->state);
1784 spin_lock (&dev->lock); 1758 spin_lock (&dev->lock);
1785 switch (dev->state) { 1759 switch (dev->state) {
1786 case STATE_SETUP: // VERY odd... host died?? 1760 case STATE_DEV_SETUP: // VERY odd... host died??
1787 case STATE_CONNECTED: 1761 case STATE_DEV_CONNECTED:
1788 case STATE_UNCONNECTED: 1762 case STATE_DEV_UNCONNECTED:
1789 next_event (dev, GADGETFS_SUSPEND); 1763 next_event (dev, GADGETFS_SUSPEND);
1790 ep0_readable (dev); 1764 ep0_readable (dev);
1791 /* FALLTHROUGH */ 1765 /* FALLTHROUGH */
@@ -1808,7 +1782,7 @@ static struct usb_gadget_driver gadgetfs_driver = {
1808 .disconnect = gadgetfs_disconnect, 1782 .disconnect = gadgetfs_disconnect,
1809 .suspend = gadgetfs_suspend, 1783 .suspend = gadgetfs_suspend,
1810 1784
1811 .driver = { 1785 .driver = {
1812 .name = (char *) shortname, 1786 .name = (char *) shortname,
1813 }, 1787 },
1814}; 1788};
@@ -1829,7 +1803,7 @@ static struct usb_gadget_driver probe_driver = {
1829 .unbind = gadgetfs_nop, 1803 .unbind = gadgetfs_nop,
1830 .setup = (void *)gadgetfs_nop, 1804 .setup = (void *)gadgetfs_nop,
1831 .disconnect = gadgetfs_nop, 1805 .disconnect = gadgetfs_nop,
1832 .driver = { 1806 .driver = {
1833 .name = "nop", 1807 .name = "nop",
1834 }, 1808 },
1835}; 1809};
@@ -1849,19 +1823,16 @@ static struct usb_gadget_driver probe_driver = {
1849 * . full/low speed config ... all wTotalLength bytes (with interface, 1823 * . full/low speed config ... all wTotalLength bytes (with interface,
1850 * class, altsetting, endpoint, and other descriptors) 1824 * class, altsetting, endpoint, and other descriptors)
1851 * . high speed config ... all descriptors, for high speed operation; 1825 * . high speed config ... all descriptors, for high speed operation;
1852 * this one's optional except for high-speed hardware 1826 * this one's optional except for high-speed hardware
1853 * . device descriptor 1827 * . device descriptor
1854 * 1828 *
1855 * Endpoints are not yet enabled. Drivers may want to immediately 1829 * Endpoints are not yet enabled. Drivers must wait until device
1856 * initialize them, using the /dev/gadget/ep* files that are available 1830 * configuration and interface altsetting changes create
1857 * as soon as the kernel sees the configuration, or they can wait
1858 * until device configuration and interface altsetting changes create
1859 * the need to configure (or unconfigure) them. 1831 * the need to configure (or unconfigure) them.
1860 * 1832 *
1861 * After initialization, the device stays active for as long as that 1833 * After initialization, the device stays active for as long as that
1862 * $CHIP file is open. Events may then be read from that descriptor, 1834 * $CHIP file is open. Events must then be read from that descriptor,
1863 * such as configuration notifications. More complex drivers will handle 1835 * such as configuration notifications.
1864 * some control requests in user space.
1865 */ 1836 */
1866 1837
1867static int is_valid_config (struct usb_config_descriptor *config) 1838static int is_valid_config (struct usb_config_descriptor *config)
@@ -1884,9 +1855,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1884 u32 tag; 1855 u32 tag;
1885 char *kbuf; 1856 char *kbuf;
1886 1857
1887 if (dev->state != STATE_OPENED)
1888 return -EEXIST;
1889
1890 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) 1858 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1891 return -EINVAL; 1859 return -EINVAL;
1892 1860
@@ -1978,13 +1946,15 @@ dev_open (struct inode *inode, struct file *fd)
1978 struct dev_data *dev = inode->i_private; 1946 struct dev_data *dev = inode->i_private;
1979 int value = -EBUSY; 1947 int value = -EBUSY;
1980 1948
1949 spin_lock_irq(&dev->lock);
1981 if (dev->state == STATE_DEV_DISABLED) { 1950 if (dev->state == STATE_DEV_DISABLED) {
1982 dev->ev_next = 0; 1951 dev->ev_next = 0;
1983 dev->state = STATE_OPENED; 1952 dev->state = STATE_DEV_OPENED;
1984 fd->private_data = dev; 1953 fd->private_data = dev;
1985 get_dev (dev); 1954 get_dev (dev);
1986 value = 0; 1955 value = 0;
1987 } 1956 }
1957 spin_unlock_irq(&dev->lock);
1988 return value; 1958 return value;
1989} 1959}
1990 1960
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
index e3bb78524c88..b3fe197e1eeb 100644
--- a/drivers/usb/gadget/lh7a40x_udc.h
+++ b/drivers/usb/gadget/lh7a40x_udc.h
@@ -49,7 +49,7 @@
49#include <asm/unaligned.h> 49#include <asm/unaligned.h>
50#include <asm/hardware.h> 50#include <asm/hardware.h>
51 51
52#include <linux/usb_ch9.h> 52#include <linux/usb/ch9.h>
53#include <linux/usb_gadget.h> 53#include <linux/usb_gadget.h>
54 54
55/* 55/*
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 569eb8ccf232..7617ff7bd5ac 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -63,7 +63,7 @@
63#include <linux/interrupt.h> 63#include <linux/interrupt.h>
64#include <linux/moduleparam.h> 64#include <linux/moduleparam.h>
65#include <linux/device.h> 65#include <linux/device.h>
66#include <linux/usb_ch9.h> 66#include <linux/usb/ch9.h>
67#include <linux/usb_gadget.h> 67#include <linux/usb_gadget.h>
68 68
69#include <asm/byteorder.h> 69#include <asm/byteorder.h>
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index cdcfd42843d4..140104341db4 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -38,7 +38,7 @@
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/platform_device.h> 40#include <linux/platform_device.h>
41#include <linux/usb_ch9.h> 41#include <linux/usb/ch9.h>
42#include <linux/usb_gadget.h> 42#include <linux/usb_gadget.h>
43#include <linux/usb/otg.h> 43#include <linux/usb/otg.h>
44#include <linux/dma-mapping.h> 44#include <linux/dma-mapping.h>
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index b78de9694665..0d225369847d 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -56,7 +56,7 @@
56#include <asm/arch/pxa-regs.h> 56#include <asm/arch/pxa-regs.h>
57#endif 57#endif
58 58
59#include <linux/usb_ch9.h> 59#include <linux/usb/ch9.h>
60#include <linux/usb_gadget.h> 60#include <linux/usb_gadget.h>
61 61
62#include <asm/arch/udc.h> 62#include <asm/arch/udc.h>
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index f8a3ec64635d..6c742a909225 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -43,7 +43,7 @@
43#include <asm/unaligned.h> 43#include <asm/unaligned.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45 45
46#include <linux/usb_ch9.h> 46#include <linux/usb/ch9.h>
47#include <linux/usb/cdc.h> 47#include <linux/usb/cdc.h>
48#include <linux/usb_gadget.h> 48#include <linux/usb_gadget.h>
49 49
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index b1735767660b..3459ea6c6c0b 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -14,7 +14,7 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/init.h> 15#include <linux/init.h>
16 16
17#include <linux/usb_ch9.h> 17#include <linux/usb/ch9.h>
18#include <linux/usb_gadget.h> 18#include <linux/usb_gadget.h>
19 19
20#include <asm/unaligned.h> 20#include <asm/unaligned.h>
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 40710ea1b490..ebe04e0d2879 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -84,7 +84,7 @@
84#include <asm/system.h> 84#include <asm/system.h>
85#include <asm/unaligned.h> 85#include <asm/unaligned.h>
86 86
87#include <linux/usb_ch9.h> 87#include <linux/usb/ch9.h>
88#include <linux/usb_gadget.h> 88#include <linux/usb_gadget.h>
89 89
90#include "gadget_chips.h" 90#include "gadget_chips.h"
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index cc60759083bf..62711870f8ee 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -67,6 +67,11 @@ config USB_EHCI_TT_NEWSCHED
67 67
68 If unsure, say N. 68 If unsure, say N.
69 69
70config USB_EHCI_BIG_ENDIAN_MMIO
71 bool
72 depends on USB_EHCI_HCD
73 default n
74
70config USB_ISP116X_HCD 75config USB_ISP116X_HCD
71 tristate "ISP116X HCD support" 76 tristate "ISP116X HCD support"
72 depends on USB 77 depends on USB
@@ -101,21 +106,48 @@ config USB_OHCI_HCD_PPC_SOC
101 bool "OHCI support for on-chip PPC USB controller" 106 bool "OHCI support for on-chip PPC USB controller"
102 depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx) 107 depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
103 default y 108 default y
104 select USB_OHCI_BIG_ENDIAN 109 select USB_OHCI_BIG_ENDIAN_DESC
110 select USB_OHCI_BIG_ENDIAN_MMIO
105 ---help--- 111 ---help---
106 Enables support for the USB controller on the MPC52xx or 112 Enables support for the USB controller on the MPC52xx or
107 STB03xxx processor chip. If unsure, say Y. 113 STB03xxx processor chip. If unsure, say Y.
108 114
115config USB_OHCI_HCD_PPC_OF
116 bool "OHCI support for PPC USB controller on OF platform bus"
117 depends on USB_OHCI_HCD && PPC_OF
118 default y
119 ---help---
120 Enables support for the USB controller PowerPC present on the
121 OpenFirmware platform bus.
122
123config USB_OHCI_HCD_PPC_OF_BE
124 bool "Support big endian HC"
125 depends on USB_OHCI_HCD_PPC_OF
126 default y
127 select USB_OHCI_BIG_ENDIAN_DESC
128 select USB_OHCI_BIG_ENDIAN_MMIO
129
130config USB_OHCI_HCD_PPC_OF_LE
131 bool "Support little endian HC"
132 depends on USB_OHCI_HCD_PPC_OF
133 default n
134 select USB_OHCI_LITTLE_ENDIAN
135
109config USB_OHCI_HCD_PCI 136config USB_OHCI_HCD_PCI
110 bool "OHCI support for PCI-bus USB controllers" 137 bool "OHCI support for PCI-bus USB controllers"
111 depends on USB_OHCI_HCD && PCI && (STB03xxx || PPC_MPC52xx) 138 depends on USB_OHCI_HCD && PCI && (STB03xxx || PPC_MPC52xx || USB_OHCI_HCD_PPC_OF)
112 default y 139 default y
113 select USB_OHCI_LITTLE_ENDIAN 140 select USB_OHCI_LITTLE_ENDIAN
114 ---help--- 141 ---help---
115 Enables support for PCI-bus plug-in USB controller cards. 142 Enables support for PCI-bus plug-in USB controller cards.
116 If unsure, say Y. 143 If unsure, say Y.
117 144
118config USB_OHCI_BIG_ENDIAN 145config USB_OHCI_BIG_ENDIAN_DESC
146 bool
147 depends on USB_OHCI_HCD
148 default n
149
150config USB_OHCI_BIG_ENDIAN_MMIO
119 bool 151 bool
120 depends on USB_OHCI_HCD 152 depends on USB_OHCI_HCD
121 default n 153 default n
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 56349d21e6ea..246afea9e83b 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -43,7 +43,7 @@
43 */ 43 */
44static void dbg_hcs_params (struct ehci_hcd *ehci, char *label) 44static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
45{ 45{
46 u32 params = readl (&ehci->caps->hcs_params); 46 u32 params = ehci_readl(ehci, &ehci->caps->hcs_params);
47 47
48 ehci_dbg (ehci, 48 ehci_dbg (ehci,
49 "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n", 49 "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n",
@@ -87,7 +87,7 @@ static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
87 * */ 87 * */
88static void dbg_hcc_params (struct ehci_hcd *ehci, char *label) 88static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
89{ 89{
90 u32 params = readl (&ehci->caps->hcc_params); 90 u32 params = ehci_readl(ehci, &ehci->caps->hcc_params);
91 91
92 if (HCC_ISOC_CACHE (params)) { 92 if (HCC_ISOC_CACHE (params)) {
93 ehci_dbg (ehci, 93 ehci_dbg (ehci,
@@ -653,7 +653,7 @@ show_registers (struct class_device *class_dev, char *buf)
653 } 653 }
654 654
655 /* Capability Registers */ 655 /* Capability Registers */
656 i = HC_VERSION(readl (&ehci->caps->hc_capbase)); 656 i = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
657 temp = scnprintf (next, size, 657 temp = scnprintf (next, size,
658 "bus %s, device %s (driver " DRIVER_VERSION ")\n" 658 "bus %s, device %s (driver " DRIVER_VERSION ")\n"
659 "%s\n" 659 "%s\n"
@@ -673,7 +673,7 @@ show_registers (struct class_device *class_dev, char *buf)
673 unsigned count = 256/4; 673 unsigned count = 256/4;
674 674
675 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); 675 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
676 offset = HCC_EXT_CAPS (readl (&ehci->caps->hcc_params)); 676 offset = HCC_EXT_CAPS (ehci_readl(ehci, &ehci->caps->hcc_params));
677 while (offset && count--) { 677 while (offset && count--) {
678 pci_read_config_dword (pdev, offset, &cap); 678 pci_read_config_dword (pdev, offset, &cap);
679 switch (cap & 0xff) { 679 switch (cap & 0xff) {
@@ -704,50 +704,50 @@ show_registers (struct class_device *class_dev, char *buf)
704#endif 704#endif
705 705
706 // FIXME interpret both types of params 706 // FIXME interpret both types of params
707 i = readl (&ehci->caps->hcs_params); 707 i = ehci_readl(ehci, &ehci->caps->hcs_params);
708 temp = scnprintf (next, size, "structural params 0x%08x\n", i); 708 temp = scnprintf (next, size, "structural params 0x%08x\n", i);
709 size -= temp; 709 size -= temp;
710 next += temp; 710 next += temp;
711 711
712 i = readl (&ehci->caps->hcc_params); 712 i = ehci_readl(ehci, &ehci->caps->hcc_params);
713 temp = scnprintf (next, size, "capability params 0x%08x\n", i); 713 temp = scnprintf (next, size, "capability params 0x%08x\n", i);
714 size -= temp; 714 size -= temp;
715 next += temp; 715 next += temp;
716 716
717 /* Operational Registers */ 717 /* Operational Registers */
718 temp = dbg_status_buf (scratch, sizeof scratch, label, 718 temp = dbg_status_buf (scratch, sizeof scratch, label,
719 readl (&ehci->regs->status)); 719 ehci_readl(ehci, &ehci->regs->status));
720 temp = scnprintf (next, size, fmt, temp, scratch); 720 temp = scnprintf (next, size, fmt, temp, scratch);
721 size -= temp; 721 size -= temp;
722 next += temp; 722 next += temp;
723 723
724 temp = dbg_command_buf (scratch, sizeof scratch, label, 724 temp = dbg_command_buf (scratch, sizeof scratch, label,
725 readl (&ehci->regs->command)); 725 ehci_readl(ehci, &ehci->regs->command));
726 temp = scnprintf (next, size, fmt, temp, scratch); 726 temp = scnprintf (next, size, fmt, temp, scratch);
727 size -= temp; 727 size -= temp;
728 next += temp; 728 next += temp;
729 729
730 temp = dbg_intr_buf (scratch, sizeof scratch, label, 730 temp = dbg_intr_buf (scratch, sizeof scratch, label,
731 readl (&ehci->regs->intr_enable)); 731 ehci_readl(ehci, &ehci->regs->intr_enable));
732 temp = scnprintf (next, size, fmt, temp, scratch); 732 temp = scnprintf (next, size, fmt, temp, scratch);
733 size -= temp; 733 size -= temp;
734 next += temp; 734 next += temp;
735 735
736 temp = scnprintf (next, size, "uframe %04x\n", 736 temp = scnprintf (next, size, "uframe %04x\n",
737 readl (&ehci->regs->frame_index)); 737 ehci_readl(ehci, &ehci->regs->frame_index));
738 size -= temp; 738 size -= temp;
739 next += temp; 739 next += temp;
740 740
741 for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) { 741 for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) {
742 temp = dbg_port_buf (scratch, sizeof scratch, label, i, 742 temp = dbg_port_buf (scratch, sizeof scratch, label, i,
743 readl (&ehci->regs->port_status [i - 1])); 743 ehci_readl(ehci, &ehci->regs->port_status [i - 1]));
744 temp = scnprintf (next, size, fmt, temp, scratch); 744 temp = scnprintf (next, size, fmt, temp, scratch);
745 size -= temp; 745 size -= temp;
746 next += temp; 746 next += temp;
747 if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) { 747 if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
748 temp = scnprintf (next, size, 748 temp = scnprintf (next, size,
749 " debug control %08x\n", 749 " debug control %08x\n",
750 readl (&ehci->debug->control)); 750 ehci_readl(ehci, &ehci->debug->control));
751 size -= temp; 751 size -= temp;
752 next += temp; 752 next += temp;
753 } 753 }
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 1a915e982c1c..a52480505f78 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -177,7 +177,7 @@ static void mpc83xx_setup_phy(struct ehci_hcd *ehci,
177 case FSL_USB2_PHY_NONE: 177 case FSL_USB2_PHY_NONE:
178 break; 178 break;
179 } 179 }
180 writel(portsc, &ehci->regs->port_status[port_offset]); 180 ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
181} 181}
182 182
183static void mpc83xx_usb_setup(struct usb_hcd *hcd) 183static void mpc83xx_usb_setup(struct usb_hcd *hcd)
@@ -214,7 +214,7 @@ static void mpc83xx_usb_setup(struct usb_hcd *hcd)
214 } 214 }
215 215
216 /* put controller in host mode. */ 216 /* put controller in host mode. */
217 writel(0x00000003, non_ehci + FSL_SOC_USB_USBMODE); 217 ehci_writel(ehci, 0x00000003, non_ehci + FSL_SOC_USB_USBMODE);
218 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c); 218 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
219 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040); 219 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
220 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001); 220 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
@@ -238,12 +238,12 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
238 /* EHCI registers start at offset 0x100 */ 238 /* EHCI registers start at offset 0x100 */
239 ehci->caps = hcd->regs + 0x100; 239 ehci->caps = hcd->regs + 0x100;
240 ehci->regs = hcd->regs + 0x100 + 240 ehci->regs = hcd->regs + 0x100 +
241 HC_LENGTH(readl(&ehci->caps->hc_capbase)); 241 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
242 dbg_hcs_params(ehci, "reset"); 242 dbg_hcs_params(ehci, "reset");
243 dbg_hcc_params(ehci, "reset"); 243 dbg_hcc_params(ehci, "reset");
244 244
245 /* cache this readonly data; minimize chip reads */ 245 /* cache this readonly data; minimize chip reads */
246 ehci->hcs_params = readl(&ehci->caps->hcs_params); 246 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
247 247
248 retval = ehci_halt(ehci); 248 retval = ehci_halt(ehci);
249 if (retval) 249 if (retval)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 025d33313681..185721dba42b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -157,12 +157,13 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
157 * before driver shutdown. But it also seems to be caused by bugs in cardbus 157 * before driver shutdown. But it also seems to be caused by bugs in cardbus
158 * bridge shutdown: shutting down the bridge before the devices using it. 158 * bridge shutdown: shutting down the bridge before the devices using it.
159 */ 159 */
160static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec) 160static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
161 u32 mask, u32 done, int usec)
161{ 162{
162 u32 result; 163 u32 result;
163 164
164 do { 165 do {
165 result = readl (ptr); 166 result = ehci_readl(ehci, ptr);
166 if (result == ~(u32)0) /* card removed */ 167 if (result == ~(u32)0) /* card removed */
167 return -ENODEV; 168 return -ENODEV;
168 result &= mask; 169 result &= mask;
@@ -177,18 +178,19 @@ static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec)
177/* force HC to halt state from unknown (EHCI spec section 2.3) */ 178/* force HC to halt state from unknown (EHCI spec section 2.3) */
178static int ehci_halt (struct ehci_hcd *ehci) 179static int ehci_halt (struct ehci_hcd *ehci)
179{ 180{
180 u32 temp = readl (&ehci->regs->status); 181 u32 temp = ehci_readl(ehci, &ehci->regs->status);
181 182
182 /* disable any irqs left enabled by previous code */ 183 /* disable any irqs left enabled by previous code */
183 writel (0, &ehci->regs->intr_enable); 184 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
184 185
185 if ((temp & STS_HALT) != 0) 186 if ((temp & STS_HALT) != 0)
186 return 0; 187 return 0;
187 188
188 temp = readl (&ehci->regs->command); 189 temp = ehci_readl(ehci, &ehci->regs->command);
189 temp &= ~CMD_RUN; 190 temp &= ~CMD_RUN;
190 writel (temp, &ehci->regs->command); 191 ehci_writel(ehci, temp, &ehci->regs->command);
191 return handshake (&ehci->regs->status, STS_HALT, STS_HALT, 16 * 125); 192 return handshake (ehci, &ehci->regs->status,
193 STS_HALT, STS_HALT, 16 * 125);
192} 194}
193 195
194/* put TDI/ARC silicon into EHCI mode */ 196/* put TDI/ARC silicon into EHCI mode */
@@ -198,23 +200,24 @@ static void tdi_reset (struct ehci_hcd *ehci)
198 u32 tmp; 200 u32 tmp;
199 201
200 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68); 202 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68);
201 tmp = readl (reg_ptr); 203 tmp = ehci_readl(ehci, reg_ptr);
202 tmp |= 0x3; 204 tmp |= 0x3;
203 writel (tmp, reg_ptr); 205 ehci_writel(ehci, tmp, reg_ptr);
204} 206}
205 207
206/* reset a non-running (STS_HALT == 1) controller */ 208/* reset a non-running (STS_HALT == 1) controller */
207static int ehci_reset (struct ehci_hcd *ehci) 209static int ehci_reset (struct ehci_hcd *ehci)
208{ 210{
209 int retval; 211 int retval;
210 u32 command = readl (&ehci->regs->command); 212 u32 command = ehci_readl(ehci, &ehci->regs->command);
211 213
212 command |= CMD_RESET; 214 command |= CMD_RESET;
213 dbg_cmd (ehci, "reset", command); 215 dbg_cmd (ehci, "reset", command);
214 writel (command, &ehci->regs->command); 216 ehci_writel(ehci, command, &ehci->regs->command);
215 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 217 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
216 ehci->next_statechange = jiffies; 218 ehci->next_statechange = jiffies;
217 retval = handshake (&ehci->regs->command, CMD_RESET, 0, 250 * 1000); 219 retval = handshake (ehci, &ehci->regs->command,
220 CMD_RESET, 0, 250 * 1000);
218 221
219 if (retval) 222 if (retval)
220 return retval; 223 return retval;
@@ -236,21 +239,21 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
236#endif 239#endif
237 240
238 /* wait for any schedule enables/disables to take effect */ 241 /* wait for any schedule enables/disables to take effect */
239 temp = readl (&ehci->regs->command) << 10; 242 temp = ehci_readl(ehci, &ehci->regs->command) << 10;
240 temp &= STS_ASS | STS_PSS; 243 temp &= STS_ASS | STS_PSS;
241 if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, 244 if (handshake (ehci, &ehci->regs->status, STS_ASS | STS_PSS,
242 temp, 16 * 125) != 0) { 245 temp, 16 * 125) != 0) {
243 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 246 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
244 return; 247 return;
245 } 248 }
246 249
247 /* then disable anything that's still active */ 250 /* then disable anything that's still active */
248 temp = readl (&ehci->regs->command); 251 temp = ehci_readl(ehci, &ehci->regs->command);
249 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); 252 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
250 writel (temp, &ehci->regs->command); 253 ehci_writel(ehci, temp, &ehci->regs->command);
251 254
252 /* hardware can take 16 microframes to turn off ... */ 255 /* hardware can take 16 microframes to turn off ... */
253 if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, 256 if (handshake (ehci, &ehci->regs->status, STS_ASS | STS_PSS,
254 0, 16 * 125) != 0) { 257 0, 16 * 125) != 0) {
255 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 258 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
256 return; 259 return;
@@ -277,11 +280,11 @@ static void ehci_watchdog (unsigned long param)
277 280
278 /* lost IAA irqs wedge things badly; seen with a vt8235 */ 281 /* lost IAA irqs wedge things badly; seen with a vt8235 */
279 if (ehci->reclaim) { 282 if (ehci->reclaim) {
280 u32 status = readl (&ehci->regs->status); 283 u32 status = ehci_readl(ehci, &ehci->regs->status);
281 if (status & STS_IAA) { 284 if (status & STS_IAA) {
282 ehci_vdbg (ehci, "lost IAA\n"); 285 ehci_vdbg (ehci, "lost IAA\n");
283 COUNT (ehci->stats.lost_iaa); 286 COUNT (ehci->stats.lost_iaa);
284 writel (STS_IAA, &ehci->regs->status); 287 ehci_writel(ehci, STS_IAA, &ehci->regs->status);
285 ehci->reclaim_ready = 1; 288 ehci->reclaim_ready = 1;
286 } 289 }
287 } 290 }
@@ -309,7 +312,7 @@ ehci_shutdown (struct usb_hcd *hcd)
309 (void) ehci_halt (ehci); 312 (void) ehci_halt (ehci);
310 313
311 /* make BIOS/etc use companion controller during reboot */ 314 /* make BIOS/etc use companion controller during reboot */
312 writel (0, &ehci->regs->configured_flag); 315 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
313} 316}
314 317
315static void ehci_port_power (struct ehci_hcd *ehci, int is_on) 318static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
@@ -379,12 +382,13 @@ static void ehci_stop (struct usb_hcd *hcd)
379 ehci_quiesce (ehci); 382 ehci_quiesce (ehci);
380 383
381 ehci_reset (ehci); 384 ehci_reset (ehci);
382 writel (0, &ehci->regs->intr_enable); 385 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
383 spin_unlock_irq(&ehci->lock); 386 spin_unlock_irq(&ehci->lock);
384 387
385 /* let companion controllers work when we aren't */ 388 /* let companion controllers work when we aren't */
386 writel (0, &ehci->regs->configured_flag); 389 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
387 390
391 remove_companion_file(ehci);
388 remove_debug_files (ehci); 392 remove_debug_files (ehci);
389 393
390 /* root hub is shut down separately (first, when possible) */ 394 /* root hub is shut down separately (first, when possible) */
@@ -402,7 +406,8 @@ static void ehci_stop (struct usb_hcd *hcd)
402 ehci->stats.complete, ehci->stats.unlink); 406 ehci->stats.complete, ehci->stats.unlink);
403#endif 407#endif
404 408
405 dbg_status (ehci, "ehci_stop completed", readl (&ehci->regs->status)); 409 dbg_status (ehci, "ehci_stop completed",
410 ehci_readl(ehci, &ehci->regs->status));
406} 411}
407 412
408/* one-time init, only for memory state */ 413/* one-time init, only for memory state */
@@ -428,7 +433,7 @@ static int ehci_init(struct usb_hcd *hcd)
428 return retval; 433 return retval;
429 434
430 /* controllers may cache some of the periodic schedule ... */ 435 /* controllers may cache some of the periodic schedule ... */
431 hcc_params = readl(&ehci->caps->hcc_params); 436 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
432 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache 437 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
433 ehci->i_thresh = 8; 438 ehci->i_thresh = 8;
434 else // N microframes cached 439 else // N microframes cached
@@ -496,13 +501,16 @@ static int ehci_run (struct usb_hcd *hcd)
496 u32 temp; 501 u32 temp;
497 u32 hcc_params; 502 u32 hcc_params;
498 503
504 hcd->uses_new_polling = 1;
505 hcd->poll_rh = 0;
506
499 /* EHCI spec section 4.1 */ 507 /* EHCI spec section 4.1 */
500 if ((retval = ehci_reset(ehci)) != 0) { 508 if ((retval = ehci_reset(ehci)) != 0) {
501 ehci_mem_cleanup(ehci); 509 ehci_mem_cleanup(ehci);
502 return retval; 510 return retval;
503 } 511 }
504 writel(ehci->periodic_dma, &ehci->regs->frame_list); 512 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
505 writel((u32)ehci->async->qh_dma, &ehci->regs->async_next); 513 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
506 514
507 /* 515 /*
508 * hcc_params controls whether ehci->regs->segment must (!!!) 516 * hcc_params controls whether ehci->regs->segment must (!!!)
@@ -516,9 +524,9 @@ static int ehci_run (struct usb_hcd *hcd)
516 * Scsi_Host.highmem_io, and so forth. It's readonly to all 524 * Scsi_Host.highmem_io, and so forth. It's readonly to all
517 * host side drivers though. 525 * host side drivers though.
518 */ 526 */
519 hcc_params = readl(&ehci->caps->hcc_params); 527 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
520 if (HCC_64BIT_ADDR(hcc_params)) { 528 if (HCC_64BIT_ADDR(hcc_params)) {
521 writel(0, &ehci->regs->segment); 529 ehci_writel(ehci, 0, &ehci->regs->segment);
522#if 0 530#if 0
523// this is deeply broken on almost all architectures 531// this is deeply broken on almost all architectures
524 if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK)) 532 if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK))
@@ -531,7 +539,7 @@ static int ehci_run (struct usb_hcd *hcd)
531 // root hub will detect new devices (why?); NEC doesn't 539 // root hub will detect new devices (why?); NEC doesn't
532 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); 540 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
533 ehci->command |= CMD_RUN; 541 ehci->command |= CMD_RUN;
534 writel (ehci->command, &ehci->regs->command); 542 ehci_writel(ehci, ehci->command, &ehci->regs->command);
535 dbg_cmd (ehci, "init", ehci->command); 543 dbg_cmd (ehci, "init", ehci->command);
536 544
537 /* 545 /*
@@ -541,23 +549,25 @@ static int ehci_run (struct usb_hcd *hcd)
541 * and there's no companion controller unless maybe for USB OTG.) 549 * and there's no companion controller unless maybe for USB OTG.)
542 */ 550 */
543 hcd->state = HC_STATE_RUNNING; 551 hcd->state = HC_STATE_RUNNING;
544 writel (FLAG_CF, &ehci->regs->configured_flag); 552 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
545 readl (&ehci->regs->command); /* unblock posted writes */ 553 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
546 554
547 temp = HC_VERSION(readl (&ehci->caps->hc_capbase)); 555 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
548 ehci_info (ehci, 556 ehci_info (ehci,
549 "USB %x.%x started, EHCI %x.%02x, driver %s%s\n", 557 "USB %x.%x started, EHCI %x.%02x, driver %s%s\n",
550 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), 558 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
551 temp >> 8, temp & 0xff, DRIVER_VERSION, 559 temp >> 8, temp & 0xff, DRIVER_VERSION,
552 ignore_oc ? ", overcurrent ignored" : ""); 560 ignore_oc ? ", overcurrent ignored" : "");
553 561
554 writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */ 562 ehci_writel(ehci, INTR_MASK,
563 &ehci->regs->intr_enable); /* Turn On Interrupts */
555 564
556 /* GRR this is run-once init(), being done every time the HC starts. 565 /* GRR this is run-once init(), being done every time the HC starts.
557 * So long as they're part of class devices, we can't do it init() 566 * So long as they're part of class devices, we can't do it init()
558 * since the class device isn't created that early. 567 * since the class device isn't created that early.
559 */ 568 */
560 create_debug_files(ehci); 569 create_debug_files(ehci);
570 create_companion_file(ehci);
561 571
562 return 0; 572 return 0;
563} 573}
@@ -567,12 +577,12 @@ static int ehci_run (struct usb_hcd *hcd)
567static irqreturn_t ehci_irq (struct usb_hcd *hcd) 577static irqreturn_t ehci_irq (struct usb_hcd *hcd)
568{ 578{
569 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 579 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
570 u32 status; 580 u32 status, pcd_status = 0;
571 int bh; 581 int bh;
572 582
573 spin_lock (&ehci->lock); 583 spin_lock (&ehci->lock);
574 584
575 status = readl (&ehci->regs->status); 585 status = ehci_readl(ehci, &ehci->regs->status);
576 586
577 /* e.g. cardbus physical eject */ 587 /* e.g. cardbus physical eject */
578 if (status == ~(u32) 0) { 588 if (status == ~(u32) 0) {
@@ -587,8 +597,8 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
587 } 597 }
588 598
589 /* clear (just) interrupts */ 599 /* clear (just) interrupts */
590 writel (status, &ehci->regs->status); 600 ehci_writel(ehci, status, &ehci->regs->status);
591 readl (&ehci->regs->command); /* unblock posted write */ 601 ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */
592 bh = 0; 602 bh = 0;
593 603
594#ifdef EHCI_VERBOSE_DEBUG 604#ifdef EHCI_VERBOSE_DEBUG
@@ -617,13 +627,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
617 /* remote wakeup [4.3.1] */ 627 /* remote wakeup [4.3.1] */
618 if (status & STS_PCD) { 628 if (status & STS_PCD) {
619 unsigned i = HCS_N_PORTS (ehci->hcs_params); 629 unsigned i = HCS_N_PORTS (ehci->hcs_params);
630 pcd_status = status;
620 631
621 /* resume root hub? */ 632 /* resume root hub? */
622 if (!(readl(&ehci->regs->command) & CMD_RUN)) 633 if (!(ehci_readl(ehci, &ehci->regs->command) & CMD_RUN))
623 usb_hcd_resume_root_hub(hcd); 634 usb_hcd_resume_root_hub(hcd);
624 635
625 while (i--) { 636 while (i--) {
626 int pstatus = readl (&ehci->regs->port_status [i]); 637 int pstatus = ehci_readl(ehci,
638 &ehci->regs->port_status [i]);
627 639
628 if (pstatus & PORT_OWNER) 640 if (pstatus & PORT_OWNER)
629 continue; 641 continue;
@@ -643,14 +655,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
643 /* PCI errors [4.15.2.4] */ 655 /* PCI errors [4.15.2.4] */
644 if (unlikely ((status & STS_FATAL) != 0)) { 656 if (unlikely ((status & STS_FATAL) != 0)) {
645 /* bogus "fatal" IRQs appear on some chips... why? */ 657 /* bogus "fatal" IRQs appear on some chips... why? */
646 status = readl (&ehci->regs->status); 658 status = ehci_readl(ehci, &ehci->regs->status);
647 dbg_cmd (ehci, "fatal", readl (&ehci->regs->command)); 659 dbg_cmd (ehci, "fatal", ehci_readl(ehci,
660 &ehci->regs->command));
648 dbg_status (ehci, "fatal", status); 661 dbg_status (ehci, "fatal", status);
649 if (status & STS_HALT) { 662 if (status & STS_HALT) {
650 ehci_err (ehci, "fatal error\n"); 663 ehci_err (ehci, "fatal error\n");
651dead: 664dead:
652 ehci_reset (ehci); 665 ehci_reset (ehci);
653 writel (0, &ehci->regs->configured_flag); 666 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
654 /* generic layer kills/unlinks all urbs, then 667 /* generic layer kills/unlinks all urbs, then
655 * uses ehci_stop to clean up the rest 668 * uses ehci_stop to clean up the rest
656 */ 669 */
@@ -661,6 +674,8 @@ dead:
661 if (bh) 674 if (bh)
662 ehci_work (ehci); 675 ehci_work (ehci);
663 spin_unlock (&ehci->lock); 676 spin_unlock (&ehci->lock);
677 if (pcd_status & STS_PCD)
678 usb_hcd_poll_rh_status(hcd);
664 return IRQ_HANDLED; 679 return IRQ_HANDLED;
665} 680}
666 681
@@ -873,7 +888,8 @@ done:
873static int ehci_get_frame (struct usb_hcd *hcd) 888static int ehci_get_frame (struct usb_hcd *hcd)
874{ 889{
875 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 890 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
876 return (readl (&ehci->regs->frame_index) >> 3) % ehci->periodic_size; 891 return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
892 ehci->periodic_size;
877} 893}
878 894
879/*-------------------------------------------------------------------------*/ 895/*-------------------------------------------------------------------------*/
@@ -899,7 +915,13 @@ MODULE_LICENSE ("GPL");
899#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 915#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
900#endif 916#endif
901 917
902#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) 918#ifdef CONFIG_PPC_PS3
919#include "ehci-ps3.c"
920#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_sb_driver
921#endif
922
923#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
924 !defined(PS3_SYSTEM_BUS_DRIVER)
903#error "missing bus glue for ehci-hcd" 925#error "missing bus glue for ehci-hcd"
904#endif 926#endif
905 927
@@ -924,6 +946,20 @@ static int __init ehci_hcd_init(void)
924#ifdef PLATFORM_DRIVER 946#ifdef PLATFORM_DRIVER
925 platform_driver_unregister(&PLATFORM_DRIVER); 947 platform_driver_unregister(&PLATFORM_DRIVER);
926#endif 948#endif
949 return retval;
950 }
951#endif
952
953#ifdef PS3_SYSTEM_BUS_DRIVER
954 retval = ps3_system_bus_driver_register(&PS3_SYSTEM_BUS_DRIVER);
955 if (retval < 0) {
956#ifdef PLATFORM_DRIVER
957 platform_driver_unregister(&PLATFORM_DRIVER);
958#endif
959#ifdef PCI_DRIVER
960 pci_unregister_driver(&PCI_DRIVER);
961#endif
962 return retval;
927 } 963 }
928#endif 964#endif
929 965
@@ -939,6 +975,9 @@ static void __exit ehci_hcd_cleanup(void)
939#ifdef PCI_DRIVER 975#ifdef PCI_DRIVER
940 pci_unregister_driver(&PCI_DRIVER); 976 pci_unregister_driver(&PCI_DRIVER);
941#endif 977#endif
978#ifdef PS3_SYSTEM_BUS_DRIVER
979 ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
980#endif
942} 981}
943module_exit(ehci_hcd_cleanup); 982module_exit(ehci_hcd_cleanup);
944 983
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index bfe5f307cba6..0d83c6df1a3b 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -47,7 +47,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
47 ehci_quiesce (ehci); 47 ehci_quiesce (ehci);
48 hcd->state = HC_STATE_QUIESCING; 48 hcd->state = HC_STATE_QUIESCING;
49 } 49 }
50 ehci->command = readl (&ehci->regs->command); 50 ehci->command = ehci_readl(ehci, &ehci->regs->command);
51 if (ehci->reclaim) 51 if (ehci->reclaim)
52 ehci->reclaim_ready = 1; 52 ehci->reclaim_ready = 1;
53 ehci_work(ehci); 53 ehci_work(ehci);
@@ -60,7 +60,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
60 ehci->bus_suspended = 0; 60 ehci->bus_suspended = 0;
61 while (port--) { 61 while (port--) {
62 u32 __iomem *reg = &ehci->regs->port_status [port]; 62 u32 __iomem *reg = &ehci->regs->port_status [port];
63 u32 t1 = readl (reg) & ~PORT_RWC_BITS; 63 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
64 u32 t2 = t1; 64 u32 t2 = t1;
65 65
66 /* keep track of which ports we suspend */ 66 /* keep track of which ports we suspend */
@@ -79,7 +79,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
79 if (t1 != t2) { 79 if (t1 != t2) {
80 ehci_vdbg (ehci, "port %d, %08x -> %08x\n", 80 ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
81 port + 1, t1, t2); 81 port + 1, t1, t2);
82 writel (t2, reg); 82 ehci_writel(ehci, t2, reg);
83 } 83 }
84 } 84 }
85 85
@@ -92,8 +92,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
92 mask = INTR_MASK; 92 mask = INTR_MASK;
93 if (!device_may_wakeup(&hcd->self.root_hub->dev)) 93 if (!device_may_wakeup(&hcd->self.root_hub->dev))
94 mask &= ~STS_PCD; 94 mask &= ~STS_PCD;
95 writel(mask, &ehci->regs->intr_enable); 95 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
96 readl(&ehci->regs->intr_enable); 96 ehci_readl(ehci, &ehci->regs->intr_enable);
97 97
98 ehci->next_statechange = jiffies + msecs_to_jiffies(10); 98 ehci->next_statechange = jiffies + msecs_to_jiffies(10);
99 spin_unlock_irq (&ehci->lock); 99 spin_unlock_irq (&ehci->lock);
@@ -118,26 +118,26 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
118 * the last user of the controller, not reset/pm hardware keeping 118 * the last user of the controller, not reset/pm hardware keeping
119 * state we gave to it. 119 * state we gave to it.
120 */ 120 */
121 temp = readl(&ehci->regs->intr_enable); 121 temp = ehci_readl(ehci, &ehci->regs->intr_enable);
122 ehci_dbg(ehci, "resume root hub%s\n", temp ? "" : " after power loss"); 122 ehci_dbg(ehci, "resume root hub%s\n", temp ? "" : " after power loss");
123 123
124 /* at least some APM implementations will try to deliver 124 /* at least some APM implementations will try to deliver
125 * IRQs right away, so delay them until we're ready. 125 * IRQs right away, so delay them until we're ready.
126 */ 126 */
127 writel(0, &ehci->regs->intr_enable); 127 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
128 128
129 /* re-init operational registers */ 129 /* re-init operational registers */
130 writel(0, &ehci->regs->segment); 130 ehci_writel(ehci, 0, &ehci->regs->segment);
131 writel(ehci->periodic_dma, &ehci->regs->frame_list); 131 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
132 writel((u32) ehci->async->qh_dma, &ehci->regs->async_next); 132 ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
133 133
134 /* restore CMD_RUN, framelist size, and irq threshold */ 134 /* restore CMD_RUN, framelist size, and irq threshold */
135 writel (ehci->command, &ehci->regs->command); 135 ehci_writel(ehci, ehci->command, &ehci->regs->command);
136 136
137 /* manually resume the ports we suspended during bus_suspend() */ 137 /* manually resume the ports we suspended during bus_suspend() */
138 i = HCS_N_PORTS (ehci->hcs_params); 138 i = HCS_N_PORTS (ehci->hcs_params);
139 while (i--) { 139 while (i--) {
140 temp = readl (&ehci->regs->port_status [i]); 140 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
141 temp &= ~(PORT_RWC_BITS 141 temp &= ~(PORT_RWC_BITS
142 | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E); 142 | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
143 if (test_bit(i, &ehci->bus_suspended) && 143 if (test_bit(i, &ehci->bus_suspended) &&
@@ -145,20 +145,20 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
145 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); 145 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
146 temp |= PORT_RESUME; 146 temp |= PORT_RESUME;
147 } 147 }
148 writel (temp, &ehci->regs->port_status [i]); 148 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
149 } 149 }
150 i = HCS_N_PORTS (ehci->hcs_params); 150 i = HCS_N_PORTS (ehci->hcs_params);
151 mdelay (20); 151 mdelay (20);
152 while (i--) { 152 while (i--) {
153 temp = readl (&ehci->regs->port_status [i]); 153 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
154 if (test_bit(i, &ehci->bus_suspended) && 154 if (test_bit(i, &ehci->bus_suspended) &&
155 (temp & PORT_SUSPEND)) { 155 (temp & PORT_SUSPEND)) {
156 temp &= ~(PORT_RWC_BITS | PORT_RESUME); 156 temp &= ~(PORT_RWC_BITS | PORT_RESUME);
157 writel (temp, &ehci->regs->port_status [i]); 157 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
158 ehci_vdbg (ehci, "resumed port %d\n", i + 1); 158 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
159 } 159 }
160 } 160 }
161 (void) readl (&ehci->regs->command); 161 (void) ehci_readl(ehci, &ehci->regs->command);
162 162
163 /* maybe re-activate the schedule(s) */ 163 /* maybe re-activate the schedule(s) */
164 temp = 0; 164 temp = 0;
@@ -168,14 +168,14 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
168 temp |= CMD_PSE; 168 temp |= CMD_PSE;
169 if (temp) { 169 if (temp) {
170 ehci->command |= temp; 170 ehci->command |= temp;
171 writel (ehci->command, &ehci->regs->command); 171 ehci_writel(ehci, ehci->command, &ehci->regs->command);
172 } 172 }
173 173
174 ehci->next_statechange = jiffies + msecs_to_jiffies(5); 174 ehci->next_statechange = jiffies + msecs_to_jiffies(5);
175 hcd->state = HC_STATE_RUNNING; 175 hcd->state = HC_STATE_RUNNING;
176 176
177 /* Now we can safely re-enable irqs */ 177 /* Now we can safely re-enable irqs */
178 writel(INTR_MASK, &ehci->regs->intr_enable); 178 ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
179 179
180 spin_unlock_irq (&ehci->lock); 180 spin_unlock_irq (&ehci->lock);
181 return 0; 181 return 0;
@@ -190,9 +190,107 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
190 190
191/*-------------------------------------------------------------------------*/ 191/*-------------------------------------------------------------------------*/
192 192
193/* Display the ports dedicated to the companion controller */
194static ssize_t show_companion(struct class_device *class_dev, char *buf)
195{
196 struct ehci_hcd *ehci;
197 int nports, index, n;
198 int count = PAGE_SIZE;
199 char *ptr = buf;
200
201 ehci = hcd_to_ehci(bus_to_hcd(class_get_devdata(class_dev)));
202 nports = HCS_N_PORTS(ehci->hcs_params);
203
204 for (index = 0; index < nports; ++index) {
205 if (test_bit(index, &ehci->companion_ports)) {
206 n = scnprintf(ptr, count, "%d\n", index + 1);
207 ptr += n;
208 count -= n;
209 }
210 }
211 return ptr - buf;
212}
213
214/*
215 * Dedicate or undedicate a port to the companion controller.
216 * Syntax is "[-]portnum", where a leading '-' sign means
217 * return control of the port to the EHCI controller.
218 */
219static ssize_t store_companion(struct class_device *class_dev,
220 const char *buf, size_t count)
221{
222 struct ehci_hcd *ehci;
223 int portnum, new_owner, try;
224 u32 __iomem *status_reg;
225 u32 port_status;
226
227 ehci = hcd_to_ehci(bus_to_hcd(class_get_devdata(class_dev)));
228 new_owner = PORT_OWNER; /* Owned by companion */
229 if (sscanf(buf, "%d", &portnum) != 1)
230 return -EINVAL;
231 if (portnum < 0) {
232 portnum = - portnum;
233 new_owner = 0; /* Owned by EHCI */
234 }
235 if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
236 return -ENOENT;
237 status_reg = &ehci->regs->port_status[--portnum];
238 if (new_owner)
239 set_bit(portnum, &ehci->companion_ports);
240 else
241 clear_bit(portnum, &ehci->companion_ports);
242
243 /*
244 * The controller won't set the OWNER bit if the port is
245 * enabled, so this loop will sometimes require at least two
246 * iterations: one to disable the port and one to set OWNER.
247 */
248
249 for (try = 4; try > 0; --try) {
250 spin_lock_irq(&ehci->lock);
251 port_status = ehci_readl(ehci, status_reg);
252 if ((port_status & PORT_OWNER) == new_owner
253 || (port_status & (PORT_OWNER | PORT_CONNECT))
254 == 0)
255 try = 0;
256 else {
257 port_status ^= PORT_OWNER;
258 port_status &= ~(PORT_PE | PORT_RWC_BITS);
259 ehci_writel(ehci, port_status, status_reg);
260 }
261 spin_unlock_irq(&ehci->lock);
262 if (try > 1)
263 msleep(5);
264 }
265 return count;
266}
267static CLASS_DEVICE_ATTR(companion, 0644, show_companion, store_companion);
268
269static inline void create_companion_file(struct ehci_hcd *ehci)
270{
271 int i;
272
273 /* with integrated TT there is no companion! */
274 if (!ehci_is_TDI(ehci))
275 i = class_device_create_file(ehci_to_hcd(ehci)->self.class_dev,
276 &class_device_attr_companion);
277}
278
279static inline void remove_companion_file(struct ehci_hcd *ehci)
280{
281 /* with integrated TT there is no companion! */
282 if (!ehci_is_TDI(ehci))
283 class_device_remove_file(ehci_to_hcd(ehci)->self.class_dev,
284 &class_device_attr_companion);
285}
286
287
288/*-------------------------------------------------------------------------*/
289
193static int check_reset_complete ( 290static int check_reset_complete (
194 struct ehci_hcd *ehci, 291 struct ehci_hcd *ehci,
195 int index, 292 int index,
293 u32 __iomem *status_reg,
196 int port_status 294 int port_status
197) { 295) {
198 if (!(port_status & PORT_CONNECT)) { 296 if (!(port_status & PORT_CONNECT)) {
@@ -217,7 +315,7 @@ static int check_reset_complete (
217 // what happens if HCS_N_CC(params) == 0 ? 315 // what happens if HCS_N_CC(params) == 0 ?
218 port_status |= PORT_OWNER; 316 port_status |= PORT_OWNER;
219 port_status &= ~PORT_RWC_BITS; 317 port_status &= ~PORT_RWC_BITS;
220 writel (port_status, &ehci->regs->port_status [index]); 318 ehci_writel(ehci, port_status, status_reg);
221 319
222 } else 320 } else
223 ehci_dbg (ehci, "port %d high speed\n", index + 1); 321 ehci_dbg (ehci, "port %d high speed\n", index + 1);
@@ -268,22 +366,21 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
268 /* port N changes (bit N)? */ 366 /* port N changes (bit N)? */
269 spin_lock_irqsave (&ehci->lock, flags); 367 spin_lock_irqsave (&ehci->lock, flags);
270 for (i = 0; i < ports; i++) { 368 for (i = 0; i < ports; i++) {
271 temp = readl (&ehci->regs->port_status [i]); 369 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
272 if (temp & PORT_OWNER) { 370
273 /* don't report this in GetPortStatus */ 371 /*
274 if (temp & PORT_CSC) { 372 * Return status information even for ports with OWNER set.
275 temp &= ~PORT_RWC_BITS; 373 * Otherwise khubd wouldn't see the disconnect event when a
276 temp |= PORT_CSC; 374 * high-speed device is switched over to the companion
277 writel (temp, &ehci->regs->port_status [i]); 375 * controller by the user.
278 } 376 */
279 continue; 377
280 }
281 if (!(temp & PORT_CONNECT)) 378 if (!(temp & PORT_CONNECT))
282 ehci->reset_done [i] = 0; 379 ehci->reset_done [i] = 0;
283 if ((temp & mask) != 0 380 if ((temp & mask) != 0
284 || ((temp & PORT_RESUME) != 0 381 || ((temp & PORT_RESUME) != 0
285 && time_after (jiffies, 382 && time_after_eq(jiffies,
286 ehci->reset_done [i]))) { 383 ehci->reset_done[i]))) {
287 if (i < 7) 384 if (i < 7)
288 buf [0] |= 1 << (i + 1); 385 buf [0] |= 1 << (i + 1);
289 else 386 else
@@ -345,6 +442,7 @@ static int ehci_hub_control (
345) { 442) {
346 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 443 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
347 int ports = HCS_N_PORTS (ehci->hcs_params); 444 int ports = HCS_N_PORTS (ehci->hcs_params);
445 u32 __iomem *status_reg = &ehci->regs->port_status[wIndex - 1];
348 u32 temp, status; 446 u32 temp, status;
349 unsigned long flags; 447 unsigned long flags;
350 int retval = 0; 448 int retval = 0;
@@ -373,18 +471,22 @@ static int ehci_hub_control (
373 if (!wIndex || wIndex > ports) 471 if (!wIndex || wIndex > ports)
374 goto error; 472 goto error;
375 wIndex--; 473 wIndex--;
376 temp = readl (&ehci->regs->port_status [wIndex]); 474 temp = ehci_readl(ehci, status_reg);
377 if (temp & PORT_OWNER) 475
378 break; 476 /*
477 * Even if OWNER is set, so the port is owned by the
478 * companion controller, khubd needs to be able to clear
479 * the port-change status bits (especially
480 * USB_PORT_FEAT_C_CONNECTION).
481 */
379 482
380 switch (wValue) { 483 switch (wValue) {
381 case USB_PORT_FEAT_ENABLE: 484 case USB_PORT_FEAT_ENABLE:
382 writel (temp & ~PORT_PE, 485 ehci_writel(ehci, temp & ~PORT_PE, status_reg);
383 &ehci->regs->port_status [wIndex]);
384 break; 486 break;
385 case USB_PORT_FEAT_C_ENABLE: 487 case USB_PORT_FEAT_C_ENABLE:
386 writel((temp & ~PORT_RWC_BITS) | PORT_PEC, 488 ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_PEC,
387 &ehci->regs->port_status [wIndex]); 489 status_reg);
388 break; 490 break;
389 case USB_PORT_FEAT_SUSPEND: 491 case USB_PORT_FEAT_SUSPEND:
390 if (temp & PORT_RESET) 492 if (temp & PORT_RESET)
@@ -396,8 +498,8 @@ static int ehci_hub_control (
396 goto error; 498 goto error;
397 /* resume signaling for 20 msec */ 499 /* resume signaling for 20 msec */
398 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); 500 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
399 writel (temp | PORT_RESUME, 501 ehci_writel(ehci, temp | PORT_RESUME,
400 &ehci->regs->port_status [wIndex]); 502 status_reg);
401 ehci->reset_done [wIndex] = jiffies 503 ehci->reset_done [wIndex] = jiffies
402 + msecs_to_jiffies (20); 504 + msecs_to_jiffies (20);
403 } 505 }
@@ -407,16 +509,17 @@ static int ehci_hub_control (
407 break; 509 break;
408 case USB_PORT_FEAT_POWER: 510 case USB_PORT_FEAT_POWER:
409 if (HCS_PPC (ehci->hcs_params)) 511 if (HCS_PPC (ehci->hcs_params))
410 writel (temp & ~(PORT_RWC_BITS | PORT_POWER), 512 ehci_writel(ehci,
411 &ehci->regs->port_status [wIndex]); 513 temp & ~(PORT_RWC_BITS | PORT_POWER),
514 status_reg);
412 break; 515 break;
413 case USB_PORT_FEAT_C_CONNECTION: 516 case USB_PORT_FEAT_C_CONNECTION:
414 writel((temp & ~PORT_RWC_BITS) | PORT_CSC, 517 ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
415 &ehci->regs->port_status [wIndex]); 518 status_reg);
416 break; 519 break;
417 case USB_PORT_FEAT_C_OVER_CURRENT: 520 case USB_PORT_FEAT_C_OVER_CURRENT:
418 writel((temp & ~PORT_RWC_BITS) | PORT_OCC, 521 ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_OCC,
419 &ehci->regs->port_status [wIndex]); 522 status_reg);
420 break; 523 break;
421 case USB_PORT_FEAT_C_RESET: 524 case USB_PORT_FEAT_C_RESET:
422 /* GetPortStatus clears reset */ 525 /* GetPortStatus clears reset */
@@ -424,7 +527,7 @@ static int ehci_hub_control (
424 default: 527 default:
425 goto error; 528 goto error;
426 } 529 }
427 readl (&ehci->regs->command); /* unblock posted write */ 530 ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */
428 break; 531 break;
429 case GetHubDescriptor: 532 case GetHubDescriptor:
430 ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *) 533 ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *)
@@ -440,7 +543,7 @@ static int ehci_hub_control (
440 goto error; 543 goto error;
441 wIndex--; 544 wIndex--;
442 status = 0; 545 status = 0;
443 temp = readl (&ehci->regs->port_status [wIndex]); 546 temp = ehci_readl(ehci, status_reg);
444 547
445 // wPortChange bits 548 // wPortChange bits
446 if (temp & PORT_CSC) 549 if (temp & PORT_CSC)
@@ -451,42 +554,55 @@ static int ehci_hub_control (
451 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; 554 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
452 555
453 /* whoever resumes must GetPortStatus to complete it!! */ 556 /* whoever resumes must GetPortStatus to complete it!! */
454 if ((temp & PORT_RESUME) 557 if (temp & PORT_RESUME) {
455 && time_after (jiffies,
456 ehci->reset_done [wIndex])) {
457 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
458 ehci->reset_done [wIndex] = 0;
459 558
460 /* stop resume signaling */ 559 /* Remote Wakeup received? */
461 temp = readl (&ehci->regs->port_status [wIndex]); 560 if (!ehci->reset_done[wIndex]) {
462 writel (temp & ~(PORT_RWC_BITS | PORT_RESUME), 561 /* resume signaling for 20 msec */
463 &ehci->regs->port_status [wIndex]); 562 ehci->reset_done[wIndex] = jiffies
464 retval = handshake ( 563 + msecs_to_jiffies(20);
465 &ehci->regs->port_status [wIndex], 564 /* check the port again */
466 PORT_RESUME, 0, 2000 /* 2msec */); 565 mod_timer(&ehci_to_hcd(ehci)->rh_timer,
467 if (retval != 0) { 566 ehci->reset_done[wIndex]);
468 ehci_err (ehci, "port %d resume error %d\n", 567 }
469 wIndex + 1, retval); 568
470 goto error; 569 /* resume completed? */
570 else if (time_after_eq(jiffies,
571 ehci->reset_done[wIndex])) {
572 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
573 ehci->reset_done[wIndex] = 0;
574
575 /* stop resume signaling */
576 temp = ehci_readl(ehci, status_reg);
577 ehci_writel(ehci,
578 temp & ~(PORT_RWC_BITS | PORT_RESUME),
579 status_reg);
580 retval = handshake(ehci, status_reg,
581 PORT_RESUME, 0, 2000 /* 2msec */);
582 if (retval != 0) {
583 ehci_err(ehci,
584 "port %d resume error %d\n",
585 wIndex + 1, retval);
586 goto error;
587 }
588 temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
471 } 589 }
472 temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
473 } 590 }
474 591
475 /* whoever resets must GetPortStatus to complete it!! */ 592 /* whoever resets must GetPortStatus to complete it!! */
476 if ((temp & PORT_RESET) 593 if ((temp & PORT_RESET)
477 && time_after (jiffies, 594 && time_after_eq(jiffies,
478 ehci->reset_done [wIndex])) { 595 ehci->reset_done[wIndex])) {
479 status |= 1 << USB_PORT_FEAT_C_RESET; 596 status |= 1 << USB_PORT_FEAT_C_RESET;
480 ehci->reset_done [wIndex] = 0; 597 ehci->reset_done [wIndex] = 0;
481 598
482 /* force reset to complete */ 599 /* force reset to complete */
483 writel (temp & ~(PORT_RWC_BITS | PORT_RESET), 600 ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
484 &ehci->regs->port_status [wIndex]); 601 status_reg);
485 /* REVISIT: some hardware needs 550+ usec to clear 602 /* REVISIT: some hardware needs 550+ usec to clear
486 * this bit; seems too long to spin routinely... 603 * this bit; seems too long to spin routinely...
487 */ 604 */
488 retval = handshake ( 605 retval = handshake(ehci, status_reg,
489 &ehci->regs->port_status [wIndex],
490 PORT_RESET, 0, 750); 606 PORT_RESET, 0, 750);
491 if (retval != 0) { 607 if (retval != 0) {
492 ehci_err (ehci, "port %d reset error %d\n", 608 ehci_err (ehci, "port %d reset error %d\n",
@@ -495,28 +611,41 @@ static int ehci_hub_control (
495 } 611 }
496 612
497 /* see what we found out */ 613 /* see what we found out */
498 temp = check_reset_complete (ehci, wIndex, 614 temp = check_reset_complete (ehci, wIndex, status_reg,
499 readl (&ehci->regs->port_status [wIndex])); 615 ehci_readl(ehci, status_reg));
500 } 616 }
501 617
502 // don't show wPortStatus if it's owned by a companion hc 618 /* transfer dedicated ports to the companion hc */
503 if (!(temp & PORT_OWNER)) { 619 if ((temp & PORT_CONNECT) &&
504 if (temp & PORT_CONNECT) { 620 test_bit(wIndex, &ehci->companion_ports)) {
505 status |= 1 << USB_PORT_FEAT_CONNECTION; 621 temp &= ~PORT_RWC_BITS;
506 // status may be from integrated TT 622 temp |= PORT_OWNER;
507 status |= ehci_port_speed(ehci, temp); 623 ehci_writel(ehci, temp, status_reg);
508 } 624 ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1);
509 if (temp & PORT_PE) 625 temp = ehci_readl(ehci, status_reg);
510 status |= 1 << USB_PORT_FEAT_ENABLE; 626 }
511 if (temp & (PORT_SUSPEND|PORT_RESUME)) 627
512 status |= 1 << USB_PORT_FEAT_SUSPEND; 628 /*
513 if (temp & PORT_OC) 629 * Even if OWNER is set, there's no harm letting khubd
514 status |= 1 << USB_PORT_FEAT_OVER_CURRENT; 630 * see the wPortStatus values (they should all be 0 except
515 if (temp & PORT_RESET) 631 * for PORT_POWER anyway).
516 status |= 1 << USB_PORT_FEAT_RESET; 632 */
517 if (temp & PORT_POWER) 633
518 status |= 1 << USB_PORT_FEAT_POWER; 634 if (temp & PORT_CONNECT) {
635 status |= 1 << USB_PORT_FEAT_CONNECTION;
636 // status may be from integrated TT
637 status |= ehci_port_speed(ehci, temp);
519 } 638 }
639 if (temp & PORT_PE)
640 status |= 1 << USB_PORT_FEAT_ENABLE;
641 if (temp & (PORT_SUSPEND|PORT_RESUME))
642 status |= 1 << USB_PORT_FEAT_SUSPEND;
643 if (temp & PORT_OC)
644 status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
645 if (temp & PORT_RESET)
646 status |= 1 << USB_PORT_FEAT_RESET;
647 if (temp & PORT_POWER)
648 status |= 1 << USB_PORT_FEAT_POWER;
520 649
521#ifndef EHCI_VERBOSE_DEBUG 650#ifndef EHCI_VERBOSE_DEBUG
522 if (status & ~0xffff) /* only if wPortChange is interesting */ 651 if (status & ~0xffff) /* only if wPortChange is interesting */
@@ -541,7 +670,7 @@ static int ehci_hub_control (
541 if (!wIndex || wIndex > ports) 670 if (!wIndex || wIndex > ports)
542 goto error; 671 goto error;
543 wIndex--; 672 wIndex--;
544 temp = readl (&ehci->regs->port_status [wIndex]); 673 temp = ehci_readl(ehci, status_reg);
545 if (temp & PORT_OWNER) 674 if (temp & PORT_OWNER)
546 break; 675 break;
547 676
@@ -555,13 +684,12 @@ static int ehci_hub_control (
555 goto error; 684 goto error;
556 if (device_may_wakeup(&hcd->self.root_hub->dev)) 685 if (device_may_wakeup(&hcd->self.root_hub->dev))
557 temp |= PORT_WAKE_BITS; 686 temp |= PORT_WAKE_BITS;
558 writel (temp | PORT_SUSPEND, 687 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
559 &ehci->regs->port_status [wIndex]);
560 break; 688 break;
561 case USB_PORT_FEAT_POWER: 689 case USB_PORT_FEAT_POWER:
562 if (HCS_PPC (ehci->hcs_params)) 690 if (HCS_PPC (ehci->hcs_params))
563 writel (temp | PORT_POWER, 691 ehci_writel(ehci, temp | PORT_POWER,
564 &ehci->regs->port_status [wIndex]); 692 status_reg);
565 break; 693 break;
566 case USB_PORT_FEAT_RESET: 694 case USB_PORT_FEAT_RESET:
567 if (temp & PORT_RESUME) 695 if (temp & PORT_RESUME)
@@ -589,7 +717,7 @@ static int ehci_hub_control (
589 ehci->reset_done [wIndex] = jiffies 717 ehci->reset_done [wIndex] = jiffies
590 + msecs_to_jiffies (50); 718 + msecs_to_jiffies (50);
591 } 719 }
592 writel (temp, &ehci->regs->port_status [wIndex]); 720 ehci_writel(ehci, temp, status_reg);
593 break; 721 break;
594 722
595 /* For downstream facing ports (these): one hub port is put 723 /* For downstream facing ports (these): one hub port is put
@@ -604,13 +732,13 @@ static int ehci_hub_control (
604 ehci_quiesce(ehci); 732 ehci_quiesce(ehci);
605 ehci_halt(ehci); 733 ehci_halt(ehci);
606 temp |= selector << 16; 734 temp |= selector << 16;
607 writel (temp, &ehci->regs->port_status [wIndex]); 735 ehci_writel(ehci, temp, status_reg);
608 break; 736 break;
609 737
610 default: 738 default:
611 goto error; 739 goto error;
612 } 740 }
613 readl (&ehci->regs->command); /* unblock posted writes */ 741 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
614 break; 742 break;
615 743
616 default: 744 default:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 4bc7970ba3ef..12edc723ec73 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -38,7 +38,7 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
38 if ((temp & (3 << 13)) == (1 << 13)) { 38 if ((temp & (3 << 13)) == (1 << 13)) {
39 temp &= 0x1fff; 39 temp &= 0x1fff;
40 ehci->debug = ehci_to_hcd(ehci)->regs + temp; 40 ehci->debug = ehci_to_hcd(ehci)->regs + temp;
41 temp = readl(&ehci->debug->control); 41 temp = ehci_readl(ehci, &ehci->debug->control);
42 ehci_info(ehci, "debug port %d%s\n", 42 ehci_info(ehci, "debug port %d%s\n",
43 HCS_DEBUG_PORT(ehci->hcs_params), 43 HCS_DEBUG_PORT(ehci->hcs_params),
44 (temp & DBGP_ENABLED) 44 (temp & DBGP_ENABLED)
@@ -71,8 +71,24 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
71 u32 temp; 71 u32 temp;
72 int retval; 72 int retval;
73 73
74 switch (pdev->vendor) {
75 case PCI_VENDOR_ID_TOSHIBA_2:
76 /* celleb's companion chip */
77 if (pdev->device == 0x01b5) {
78#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
79 ehci->big_endian_mmio = 1;
80#else
81 ehci_warn(ehci,
82 "unsupported big endian Toshiba quirk\n");
83#endif
84 }
85 break;
86 }
87
74 ehci->caps = hcd->regs; 88 ehci->caps = hcd->regs;
75 ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase)); 89 ehci->regs = hcd->regs +
90 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
91
76 dbg_hcs_params(ehci, "reset"); 92 dbg_hcs_params(ehci, "reset");
77 dbg_hcc_params(ehci, "reset"); 93 dbg_hcc_params(ehci, "reset");
78 94
@@ -101,7 +117,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
101 } 117 }
102 118
103 /* cache this readonly data; minimize chip reads */ 119 /* cache this readonly data; minimize chip reads */
104 ehci->hcs_params = readl(&ehci->caps->hcs_params); 120 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
105 121
106 retval = ehci_halt(ehci); 122 retval = ehci_halt(ehci);
107 if (retval) 123 if (retval)
@@ -235,8 +251,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
235 rc = -EINVAL; 251 rc = -EINVAL;
236 goto bail; 252 goto bail;
237 } 253 }
238 writel (0, &ehci->regs->intr_enable); 254 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
239 (void)readl(&ehci->regs->intr_enable); 255 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
240 256
241 /* make sure snapshot being resumed re-enumerates everything */ 257 /* make sure snapshot being resumed re-enumerates everything */
242 if (message.event == PM_EVENT_PRETHAW) { 258 if (message.event == PM_EVENT_PRETHAW) {
@@ -270,13 +286,13 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
270 /* If CF is still set, we maintained PCI Vaux power. 286 /* If CF is still set, we maintained PCI Vaux power.
271 * Just undo the effect of ehci_pci_suspend(). 287 * Just undo the effect of ehci_pci_suspend().
272 */ 288 */
273 if (readl(&ehci->regs->configured_flag) == FLAG_CF) { 289 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
274 int mask = INTR_MASK; 290 int mask = INTR_MASK;
275 291
276 if (!device_may_wakeup(&hcd->self.root_hub->dev)) 292 if (!device_may_wakeup(&hcd->self.root_hub->dev))
277 mask &= ~STS_PCD; 293 mask &= ~STS_PCD;
278 writel(mask, &ehci->regs->intr_enable); 294 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
279 readl(&ehci->regs->intr_enable); 295 ehci_readl(ehci, &ehci->regs->intr_enable);
280 return 0; 296 return 0;
281 } 297 }
282 298
@@ -300,9 +316,9 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
300 /* here we "know" root ports should always stay powered */ 316 /* here we "know" root ports should always stay powered */
301 ehci_port_power(ehci, 1); 317 ehci_port_power(ehci, 1);
302 318
303 writel(ehci->command, &ehci->regs->command); 319 ehci_writel(ehci, ehci->command, &ehci->regs->command);
304 writel(FLAG_CF, &ehci->regs->configured_flag); 320 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
305 readl(&ehci->regs->command); /* unblock posted writes */ 321 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
306 322
307 hcd->state = HC_STATE_SUSPENDED; 323 hcd->state = HC_STATE_SUSPENDED;
308 return 0; 324 return 0;
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
new file mode 100644
index 000000000000..371f194a9d39
--- /dev/null
+++ b/drivers/usb/host/ehci-ps3.c
@@ -0,0 +1,193 @@
1/*
2 * PS3 EHCI Host Controller driver
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <asm/ps3.h>
22
23static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
24{
25 int result;
26 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
27
28 ehci->big_endian_mmio = 1;
29
30 ehci->caps = hcd->regs;
31 ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
32 &ehci->caps->hc_capbase));
33
34 dbg_hcs_params(ehci, "reset");
35 dbg_hcc_params(ehci, "reset");
36
37 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
38
39 result = ehci_halt(ehci);
40
41 if (result)
42 return result;
43
44 result = ehci_init(hcd);
45
46 if (result)
47 return result;
48
49 ehci_port_power(ehci, 0);
50
51 return result;
52}
53
54static const struct hc_driver ps3_ehci_hc_driver = {
55 .description = hcd_name,
56 .product_desc = "PS3 EHCI Host Controller",
57 .hcd_priv_size = sizeof(struct ehci_hcd),
58 .irq = ehci_irq,
59 .flags = HCD_MEMORY | HCD_USB2,
60 .reset = ps3_ehci_hc_reset,
61 .start = ehci_run,
62 .stop = ehci_stop,
63 .shutdown = ehci_shutdown,
64 .urb_enqueue = ehci_urb_enqueue,
65 .urb_dequeue = ehci_urb_dequeue,
66 .endpoint_disable = ehci_endpoint_disable,
67 .get_frame_number = ehci_get_frame,
68 .hub_status_data = ehci_hub_status_data,
69 .hub_control = ehci_hub_control,
70#if defined(CONFIG_PM)
71 .bus_suspend = ehci_bus_suspend,
72 .bus_resume = ehci_bus_resume,
73#endif
74};
75
76#if !defined(DEBUG)
77#undef dev_dbg
78static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg(
79 const struct device *_dev, const char *fmt, ...) {return 0;}
80#endif
81
82
83static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev)
84{
85 int result;
86 struct usb_hcd *hcd;
87 unsigned int virq;
88 static u64 dummy_mask = DMA_32BIT_MASK;
89
90 if (usb_disabled()) {
91 result = -ENODEV;
92 goto fail_start;
93 }
94
95 result = ps3_mmio_region_create(dev->m_region);
96
97 if (result) {
98 dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
99 __func__, __LINE__);
100 result = -EPERM;
101 goto fail_mmio;
102 }
103
104 dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
105 __LINE__, dev->m_region->lpar_addr);
106
107 result = ps3_alloc_io_irq(dev->interrupt_id, &virq);
108
109 if (result) {
110 dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
111 __func__, __LINE__, virq);
112 result = -EPERM;
113 goto fail_irq;
114 }
115
116 dev->core.power.power_state = PMSG_ON;
117 dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */
118
119 hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev->core.bus_id);
120
121 if (!hcd) {
122 dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
123 __LINE__);
124 result = -ENOMEM;
125 goto fail_create_hcd;
126 }
127
128 hcd->rsrc_start = dev->m_region->lpar_addr;
129 hcd->rsrc_len = dev->m_region->len;
130 hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
131
132 if (!hcd->regs) {
133 dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
134 __LINE__);
135 result = -EPERM;
136 goto fail_ioremap;
137 }
138
139 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
140 (unsigned long)hcd->rsrc_start);
141 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__,
142 (unsigned long)hcd->rsrc_len);
143 dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__,
144 (unsigned long)hcd->regs);
145 dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
146 (unsigned long)virq);
147
148 ps3_system_bus_set_driver_data(dev, hcd);
149
150 result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
151
152 if (result) {
153 dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
154 __func__, __LINE__, result);
155 goto fail_add_hcd;
156 }
157
158 return result;
159
160fail_add_hcd:
161 iounmap(hcd->regs);
162fail_ioremap:
163 usb_put_hcd(hcd);
164fail_create_hcd:
165 ps3_free_io_irq(virq);
166fail_irq:
167 ps3_free_mmio_region(dev->m_region);
168fail_mmio:
169fail_start:
170 return result;
171}
172
173static int ps3_ehci_sb_remove(struct ps3_system_bus_device *dev)
174{
175 struct usb_hcd *hcd =
176 (struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
177
178 usb_put_hcd(hcd);
179 ps3_system_bus_set_driver_data(dev, NULL);
180
181 return 0;
182}
183
184MODULE_ALIAS("ps3-ehci");
185
186static struct ps3_system_bus_driver ps3_ehci_sb_driver = {
187 .match_id = PS3_MATCH_ID_EHCI,
188 .core = {
189 .name = "ps3-ehci-driver",
190 },
191 .probe = ps3_ehci_sb_probe,
192 .remove = ps3_ehci_sb_remove,
193};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 62e46dc60e86..e7fbbd00e7cd 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -789,13 +789,14 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
789 head = ehci->async; 789 head = ehci->async;
790 timer_action_done (ehci, TIMER_ASYNC_OFF); 790 timer_action_done (ehci, TIMER_ASYNC_OFF);
791 if (!head->qh_next.qh) { 791 if (!head->qh_next.qh) {
792 u32 cmd = readl (&ehci->regs->command); 792 u32 cmd = ehci_readl(ehci, &ehci->regs->command);
793 793
794 if (!(cmd & CMD_ASE)) { 794 if (!(cmd & CMD_ASE)) {
795 /* in case a clear of CMD_ASE didn't take yet */ 795 /* in case a clear of CMD_ASE didn't take yet */
796 (void) handshake (&ehci->regs->status, STS_ASS, 0, 150); 796 (void)handshake(ehci, &ehci->regs->status,
797 STS_ASS, 0, 150);
797 cmd |= CMD_ASE | CMD_RUN; 798 cmd |= CMD_ASE | CMD_RUN;
798 writel (cmd, &ehci->regs->command); 799 ehci_writel(ehci, cmd, &ehci->regs->command);
799 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 800 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
800 /* posted write need not be known to HC yet ... */ 801 /* posted write need not be known to HC yet ... */
801 } 802 }
@@ -1007,7 +1008,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
1007 1008
1008static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 1009static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1009{ 1010{
1010 int cmd = readl (&ehci->regs->command); 1011 int cmd = ehci_readl(ehci, &ehci->regs->command);
1011 struct ehci_qh *prev; 1012 struct ehci_qh *prev;
1012 1013
1013#ifdef DEBUG 1014#ifdef DEBUG
@@ -1025,7 +1026,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1025 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT 1026 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
1026 && !ehci->reclaim) { 1027 && !ehci->reclaim) {
1027 /* ... and CMD_IAAD clear */ 1028 /* ... and CMD_IAAD clear */
1028 writel (cmd & ~CMD_ASE, &ehci->regs->command); 1029 ehci_writel(ehci, cmd & ~CMD_ASE,
1030 &ehci->regs->command);
1029 wmb (); 1031 wmb ();
1030 // handshake later, if we need to 1032 // handshake later, if we need to
1031 timer_action_done (ehci, TIMER_ASYNC_OFF); 1033 timer_action_done (ehci, TIMER_ASYNC_OFF);
@@ -1054,8 +1056,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1054 1056
1055 ehci->reclaim_ready = 0; 1057 ehci->reclaim_ready = 0;
1056 cmd |= CMD_IAAD; 1058 cmd |= CMD_IAAD;
1057 writel (cmd, &ehci->regs->command); 1059 ehci_writel(ehci, cmd, &ehci->regs->command);
1058 (void) readl (&ehci->regs->command); 1060 (void)ehci_readl(ehci, &ehci->regs->command);
1059 timer_action (ehci, TIMER_IAA_WATCHDOG); 1061 timer_action (ehci, TIMER_IAA_WATCHDOG);
1060} 1062}
1061 1063
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 65c402a0fa7a..7b5ae7111f23 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -433,20 +433,20 @@ static int enable_periodic (struct ehci_hcd *ehci)
433 /* did clearing PSE did take effect yet? 433 /* did clearing PSE did take effect yet?
434 * takes effect only at frame boundaries... 434 * takes effect only at frame boundaries...
435 */ 435 */
436 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); 436 status = handshake(ehci, &ehci->regs->status, STS_PSS, 0, 9 * 125);
437 if (status != 0) { 437 if (status != 0) {
438 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 438 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
439 return status; 439 return status;
440 } 440 }
441 441
442 cmd = readl (&ehci->regs->command) | CMD_PSE; 442 cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
443 writel (cmd, &ehci->regs->command); 443 ehci_writel(ehci, cmd, &ehci->regs->command);
444 /* posted write ... PSS happens later */ 444 /* posted write ... PSS happens later */
445 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 445 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
446 446
447 /* make sure ehci_work scans these */ 447 /* make sure ehci_work scans these */
448 ehci->next_uframe = readl (&ehci->regs->frame_index) 448 ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
449 % (ehci->periodic_size << 3); 449 % (ehci->periodic_size << 3);
450 return 0; 450 return 0;
451} 451}
452 452
@@ -458,14 +458,14 @@ static int disable_periodic (struct ehci_hcd *ehci)
458 /* did setting PSE not take effect yet? 458 /* did setting PSE not take effect yet?
459 * takes effect only at frame boundaries... 459 * takes effect only at frame boundaries...
460 */ 460 */
461 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); 461 status = handshake(ehci, &ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
462 if (status != 0) { 462 if (status != 0) {
463 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 463 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
464 return status; 464 return status;
465 } 465 }
466 466
467 cmd = readl (&ehci->regs->command) & ~CMD_PSE; 467 cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE;
468 writel (cmd, &ehci->regs->command); 468 ehci_writel(ehci, cmd, &ehci->regs->command);
469 /* posted write ... */ 469 /* posted write ... */
470 470
471 ehci->next_uframe = -1; 471 ehci->next_uframe = -1;
@@ -1336,7 +1336,7 @@ iso_stream_schedule (
1336 goto fail; 1336 goto fail;
1337 } 1337 }
1338 1338
1339 now = readl (&ehci->regs->frame_index) % mod; 1339 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
1340 1340
1341 /* when's the last uframe this urb could start? */ 1341 /* when's the last uframe this urb could start? */
1342 max = now + mod; 1342 max = now + mod;
@@ -2088,7 +2088,7 @@ scan_periodic (struct ehci_hcd *ehci)
2088 */ 2088 */
2089 now_uframe = ehci->next_uframe; 2089 now_uframe = ehci->next_uframe;
2090 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2090 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
2091 clock = readl (&ehci->regs->frame_index); 2091 clock = ehci_readl(ehci, &ehci->regs->frame_index);
2092 else 2092 else
2093 clock = now_uframe + mod - 1; 2093 clock = now_uframe + mod - 1;
2094 clock %= mod; 2094 clock %= mod;
@@ -2213,7 +2213,7 @@ restart:
2213 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2213 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
2214 break; 2214 break;
2215 ehci->next_uframe = now_uframe; 2215 ehci->next_uframe = now_uframe;
2216 now = readl (&ehci->regs->frame_index) % mod; 2216 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
2217 if (now_uframe == now) 2217 if (now_uframe == now)
2218 break; 2218 break;
2219 2219
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 74dbc6c8228f..ec0da0343be4 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -74,7 +74,11 @@ struct ehci_hcd { /* one per controller */
74 74
75 /* per root hub port */ 75 /* per root hub port */
76 unsigned long reset_done [EHCI_MAX_ROOT_PORTS]; 76 unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
77 unsigned long bus_suspended; 77 /* bit vectors (one bit per port) */
78 unsigned long bus_suspended; /* which ports were
79 already suspended at the start of a bus suspend */
80 unsigned long companion_ports; /* which ports are
81 dedicated to the companion controller */
78 82
79 /* per-HC memory pools (could be per-bus, but ...) */ 83 /* per-HC memory pools (could be per-bus, but ...) */
80 struct dma_pool *qh_pool; /* qh per active urb */ 84 struct dma_pool *qh_pool; /* qh per active urb */
@@ -92,6 +96,7 @@ struct ehci_hcd { /* one per controller */
92 unsigned is_tdi_rh_tt:1; /* TDI roothub with TT */ 96 unsigned is_tdi_rh_tt:1; /* TDI roothub with TT */
93 unsigned no_selective_suspend:1; 97 unsigned no_selective_suspend:1;
94 unsigned has_fsl_port_bug:1; /* FreeScale */ 98 unsigned has_fsl_port_bug:1; /* FreeScale */
99 unsigned big_endian_mmio:1;
95 100
96 u8 sbrn; /* packed release number */ 101 u8 sbrn; /* packed release number */
97 102
@@ -651,6 +656,45 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
651#define ehci_has_fsl_portno_bug(e) (0) 656#define ehci_has_fsl_portno_bug(e) (0)
652#endif 657#endif
653 658
659/*
660 * While most USB host controllers implement their registers in
661 * little-endian format, a minority (celleb companion chip) implement
662 * them in big endian format.
663 *
664 * This attempts to support either format at compile time without a
665 * runtime penalty, or both formats with the additional overhead
666 * of checking a flag bit.
667 */
668
669#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
670#define ehci_big_endian_mmio(e) ((e)->big_endian_mmio)
671#else
672#define ehci_big_endian_mmio(e) 0
673#endif
674
675static inline unsigned int ehci_readl (const struct ehci_hcd *ehci,
676 __u32 __iomem * regs)
677{
678#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
679 return ehci_big_endian_mmio(ehci) ?
680 readl_be((__force u32 *)regs) :
681 readl((__force u32 *)regs);
682#else
683 return readl((__force u32 *)regs);
684#endif
685}
686
687static inline void ehci_writel (const struct ehci_hcd *ehci,
688 const unsigned int val, __u32 __iomem *regs)
689{
690#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
691 ehci_big_endian_mmio(ehci) ?
692 writel_be(val, (__force u32 *)regs) :
693 writel(val, (__force u32 *)regs);
694#else
695 writel(val, (__force u32 *)regs);
696#endif
697}
654 698
655/*-------------------------------------------------------------------------*/ 699/*-------------------------------------------------------------------------*/
656 700
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index cc405512fa1c..930346487278 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -170,7 +170,6 @@ static int usb_hcd_at91_remove(struct usb_hcd *hcd,
170 at91_stop_hc(pdev); 170 at91_stop_hc(pdev);
171 iounmap(hcd->regs); 171 iounmap(hcd->regs);
172 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 172 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
173 disable_irq_wake(hcd->irq);
174 173
175 clk_put(fclk); 174 clk_put(fclk);
176 clk_put(iclk); 175 clk_put(iclk);
@@ -271,8 +270,6 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
271 270
272 if (device_may_wakeup(&pdev->dev)) 271 if (device_may_wakeup(&pdev->dev))
273 enable_irq_wake(hcd->irq); 272 enable_irq_wake(hcd->irq);
274 else
275 disable_irq_wake(hcd->irq);
276 273
277 /* 274 /*
278 * The integrated transceivers seem unable to notice disconnect, 275 * The integrated transceivers seem unable to notice disconnect,
@@ -293,6 +290,11 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
293 290
294static int ohci_hcd_at91_drv_resume(struct platform_device *pdev) 291static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
295{ 292{
293 struct usb_hcd *hcd = platform_get_drvdata(pdev);
294
295 if (device_may_wakeup(&pdev->dev))
296 disable_irq_wake(hcd->irq);
297
296 if (!clocked) { 298 if (!clocked) {
297 clk_enable(iclk); 299 clk_enable(iclk);
298 clk_enable(fclk); 300 clk_enable(fclk);
@@ -320,18 +322,3 @@ static struct platform_driver ohci_hcd_at91_driver = {
320 }, 322 },
321}; 323};
322 324
323static int __init ohci_hcd_at91_init (void)
324{
325 if (usb_disabled())
326 return -ENODEV;
327
328 return platform_driver_register(&ohci_hcd_at91_driver);
329}
330
331static void __exit ohci_hcd_at91_cleanup (void)
332{
333 platform_driver_unregister(&ohci_hcd_at91_driver);
334}
335
336module_init (ohci_hcd_at91_init);
337module_exit (ohci_hcd_at91_cleanup);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index e70b2430e2a9..663a0600b6e7 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -345,19 +345,3 @@ static struct platform_driver ohci_hcd_au1xxx_driver = {
345 }, 345 },
346}; 346};
347 347
348static int __init ohci_hcd_au1xxx_init (void)
349{
350 pr_debug (DRIVER_INFO " (Au1xxx)");
351 pr_debug ("block sizes: ed %d td %d\n",
352 sizeof (struct ed), sizeof (struct td));
353
354 return platform_driver_register(&ohci_hcd_au1xxx_driver);
355}
356
357static void __exit ohci_hcd_au1xxx_cleanup (void)
358{
359 platform_driver_unregister(&ohci_hcd_au1xxx_driver);
360}
361
362module_init (ohci_hcd_au1xxx_init);
363module_exit (ohci_hcd_au1xxx_cleanup);
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 3348b07f0fe5..44c60fba76e1 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -214,15 +214,3 @@ static struct platform_driver ohci_hcd_ep93xx_driver = {
214 }, 214 },
215}; 215};
216 216
217static int __init ohci_hcd_ep93xx_init(void)
218{
219 return platform_driver_register(&ohci_hcd_ep93xx_driver);
220}
221
222static void __exit ohci_hcd_ep93xx_cleanup(void)
223{
224 platform_driver_unregister(&ohci_hcd_ep93xx_driver);
225}
226
227module_init(ohci_hcd_ep93xx_init);
228module_exit(ohci_hcd_ep93xx_cleanup);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index c1c1d871aba4..fa6a7ceaa0db 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -855,63 +855,167 @@ MODULE_LICENSE ("GPL");
855 855
856#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
857#include "ohci-pci.c" 857#include "ohci-pci.c"
858#define PCI_DRIVER ohci_pci_driver
858#endif 859#endif
859 860
860#ifdef CONFIG_SA1111 861#ifdef CONFIG_SA1111
861#include "ohci-sa1111.c" 862#include "ohci-sa1111.c"
863#define SA1111_DRIVER ohci_hcd_sa1111_driver
862#endif 864#endif
863 865
864#ifdef CONFIG_ARCH_S3C2410 866#ifdef CONFIG_ARCH_S3C2410
865#include "ohci-s3c2410.c" 867#include "ohci-s3c2410.c"
868#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
866#endif 869#endif
867 870
868#ifdef CONFIG_ARCH_OMAP 871#ifdef CONFIG_ARCH_OMAP
869#include "ohci-omap.c" 872#include "ohci-omap.c"
873#define PLATFORM_DRIVER ohci_hcd_omap_driver
870#endif 874#endif
871 875
872#ifdef CONFIG_ARCH_LH7A404 876#ifdef CONFIG_ARCH_LH7A404
873#include "ohci-lh7a404.c" 877#include "ohci-lh7a404.c"
878#define PLATFORM_DRIVER ohci_hcd_lh7a404_driver
874#endif 879#endif
875 880
876#ifdef CONFIG_PXA27x 881#ifdef CONFIG_PXA27x
877#include "ohci-pxa27x.c" 882#include "ohci-pxa27x.c"
883#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
878#endif 884#endif
879 885
880#ifdef CONFIG_ARCH_EP93XX 886#ifdef CONFIG_ARCH_EP93XX
881#include "ohci-ep93xx.c" 887#include "ohci-ep93xx.c"
888#define PLATFORM_DRIVER ohci_hcd_ep93xx_driver
882#endif 889#endif
883 890
884#ifdef CONFIG_SOC_AU1X00 891#ifdef CONFIG_SOC_AU1X00
885#include "ohci-au1xxx.c" 892#include "ohci-au1xxx.c"
893#define PLATFORM_DRIVER ohci_hcd_au1xxx_driver
886#endif 894#endif
887 895
888#ifdef CONFIG_PNX8550 896#ifdef CONFIG_PNX8550
889#include "ohci-pnx8550.c" 897#include "ohci-pnx8550.c"
898#define PLATFORM_DRIVER ohci_hcd_pnx8550_driver
890#endif 899#endif
891 900
892#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC 901#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
893#include "ohci-ppc-soc.c" 902#include "ohci-ppc-soc.c"
903#define PLATFORM_DRIVER ohci_hcd_ppc_soc_driver
894#endif 904#endif
895 905
896#ifdef CONFIG_ARCH_AT91 906#ifdef CONFIG_ARCH_AT91
897#include "ohci-at91.c" 907#include "ohci-at91.c"
908#define PLATFORM_DRIVER ohci_hcd_at91_driver
898#endif 909#endif
899 910
900#ifdef CONFIG_ARCH_PNX4008 911#ifdef CONFIG_ARCH_PNX4008
901#include "ohci-pnx4008.c" 912#include "ohci-pnx4008.c"
913#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
902#endif 914#endif
903 915
904#if !(defined(CONFIG_PCI) \ 916
905 || defined(CONFIG_SA1111) \ 917#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
906 || defined(CONFIG_ARCH_S3C2410) \ 918#include "ohci-ppc-of.c"
907 || defined(CONFIG_ARCH_OMAP) \ 919#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
908 || defined (CONFIG_ARCH_LH7A404) \ 920#endif
909 || defined (CONFIG_PXA27x) \ 921
910 || defined (CONFIG_ARCH_EP93XX) \ 922#ifdef CONFIG_PPC_PS3
911 || defined (CONFIG_SOC_AU1X00) \ 923#include "ohci-ps3.c"
912 || defined (CONFIG_USB_OHCI_HCD_PPC_SOC) \ 924#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_sb_driver
913 || defined (CONFIG_ARCH_AT91) \ 925#endif
914 || defined (CONFIG_ARCH_PNX4008) \ 926
915 ) 927#if !defined(PCI_DRIVER) && \
928 !defined(PLATFORM_DRIVER) && \
929 !defined(OF_PLATFORM_DRIVER) && \
930 !defined(SA1111_DRIVER) && \
931 !defined(PS3_SYSTEM_BUS_DRIVER)
916#error "missing bus glue for ohci-hcd" 932#error "missing bus glue for ohci-hcd"
917#endif 933#endif
934
935static int __init ohci_hcd_mod_init(void)
936{
937 int retval = 0;
938
939 if (usb_disabled())
940 return -ENODEV;
941
942 printk (KERN_DEBUG "%s: " DRIVER_INFO "\n", hcd_name);
943 pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
944 sizeof (struct ed), sizeof (struct td));
945
946#ifdef PS3_SYSTEM_BUS_DRIVER
947 retval = ps3_system_bus_driver_register(&PS3_SYSTEM_BUS_DRIVER);
948 if (retval < 0)
949 goto error_ps3;
950#endif
951
952#ifdef PLATFORM_DRIVER
953 retval = platform_driver_register(&PLATFORM_DRIVER);
954 if (retval < 0)
955 goto error_platform;
956#endif
957
958#ifdef OF_PLATFORM_DRIVER
959 retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
960 if (retval < 0)
961 goto error_of_platform;
962#endif
963
964#ifdef SA1111_DRIVER
965 retval = sa1111_driver_register(&SA1111_DRIVER);
966 if (retval < 0)
967 goto error_sa1111;
968#endif
969
970#ifdef PCI_DRIVER
971 retval = pci_register_driver(&PCI_DRIVER);
972 if (retval < 0)
973 goto error_pci;
974#endif
975
976 return retval;
977
978 /* Error path */
979#ifdef PCI_DRIVER
980 error_pci:
981#endif
982#ifdef SA1111_DRIVER
983 sa1111_driver_unregister(&SA1111_DRIVER);
984 error_sa1111:
985#endif
986#ifdef OF_PLATFORM_DRIVER
987 of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
988 error_of_platform:
989#endif
990#ifdef PLATFORM_DRIVER
991 platform_driver_unregister(&PLATFORM_DRIVER);
992 error_platform:
993#endif
994#ifdef PS3_SYSTEM_BUS_DRIVER
995 ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
996 error_ps3:
997#endif
998 return retval;
999}
1000module_init(ohci_hcd_mod_init);
1001
1002static void __exit ohci_hcd_mod_exit(void)
1003{
1004#ifdef PCI_DRIVER
1005 pci_unregister_driver(&PCI_DRIVER);
1006#endif
1007#ifdef SA1111_DRIVER
1008 sa1111_driver_unregister(&SA1111_DRIVER);
1009#endif
1010#ifdef OF_PLATFORM_DRIVER
1011 of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
1012#endif
1013#ifdef PLATFORM_DRIVER
1014 platform_driver_unregister(&PLATFORM_DRIVER);
1015#endif
1016#ifdef PS3_SYSTEM_BUS_DRIVER
1017 ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1018#endif
1019}
1020module_exit(ohci_hcd_mod_exit);
1021
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index e9807cf73a2f..4a043abd85ea 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -251,19 +251,3 @@ static struct platform_driver ohci_hcd_lh7a404_driver = {
251 }, 251 },
252}; 252};
253 253
254static int __init ohci_hcd_lh7a404_init (void)
255{
256 pr_debug (DRIVER_INFO " (LH7A404)");
257 pr_debug ("block sizes: ed %d td %d\n",
258 sizeof (struct ed), sizeof (struct td));
259
260 return platform_driver_register(&ohci_hcd_lh7a404_driver);
261}
262
263static void __exit ohci_hcd_lh7a404_cleanup (void)
264{
265 platform_driver_unregister(&ohci_hcd_lh7a404_driver);
266}
267
268module_init (ohci_hcd_lh7a404_init);
269module_exit (ohci_hcd_lh7a404_cleanup);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 27be1f936885..5cfa3d1c4413 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -544,22 +544,3 @@ static struct platform_driver ohci_hcd_omap_driver = {
544 }, 544 },
545}; 545};
546 546
547static int __init ohci_hcd_omap_init (void)
548{
549 printk (KERN_DEBUG "%s: " DRIVER_INFO " (OMAP)\n", hcd_name);
550 if (usb_disabled())
551 return -ENODEV;
552
553 pr_debug("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
554 sizeof (struct ed), sizeof (struct td));
555
556 return platform_driver_register(&ohci_hcd_omap_driver);
557}
558
559static void __exit ohci_hcd_omap_cleanup (void)
560{
561 platform_driver_unregister(&ohci_hcd_omap_driver);
562}
563
564module_init (ohci_hcd_omap_init);
565module_exit (ohci_hcd_omap_cleanup);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 596e0b41e606..b331ac4d0d62 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -20,79 +20,154 @@
20 20
21/*-------------------------------------------------------------------------*/ 21/*-------------------------------------------------------------------------*/
22 22
23static int 23/* AMD 756, for most chips (early revs), corrupts register
24ohci_pci_reset (struct usb_hcd *hcd) 24 * values on read ... so enable the vendor workaround.
25 */
26static int __devinit ohci_quirk_amd756(struct usb_hcd *hcd)
25{ 27{
26 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 28 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
27 29
28 ohci_hcd_init (ohci); 30 ohci->flags = OHCI_QUIRK_AMD756;
29 return ohci_init (ohci); 31 ohci_dbg (ohci, "AMD756 erratum 4 workaround\n");
32
33 /* also erratum 10 (suspend/resume issues) */
34 device_init_wakeup(&hcd->self.root_hub->dev, 0);
35
36 return 0;
30} 37}
31 38
32static int __devinit 39/* Apple's OHCI driver has a lot of bizarre workarounds
33ohci_pci_start (struct usb_hcd *hcd) 40 * for this chip. Evidently control and bulk lists
41 * can get confused. (B&W G3 models, and ...)
42 */
43static int __devinit ohci_quirk_opti(struct usb_hcd *hcd)
34{ 44{
35 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 45 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
36 int ret;
37 46
38 /* REVISIT this whole block should move to reset(), which handles 47 ohci_dbg (ohci, "WARNING: OPTi workarounds unavailable\n");
39 * all the other one-time init. 48
49 return 0;
50}
51
52/* Check for NSC87560. We have to look at the bridge (fn1) to
53 * identify the USB (fn2). This quirk might apply to more or
54 * even all NSC stuff.
55 */
56static int __devinit ohci_quirk_ns(struct usb_hcd *hcd)
57{
58 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
59 struct pci_dev *b;
60
61 b = pci_get_slot (pdev->bus, PCI_DEVFN (PCI_SLOT (pdev->devfn), 1));
62 if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO
63 && b->vendor == PCI_VENDOR_ID_NS) {
64 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
65
66 ohci->flags |= OHCI_QUIRK_SUPERIO;
67 ohci_dbg (ohci, "Using NSC SuperIO setup\n");
68 }
69 pci_dev_put(b);
70
71 return 0;
72}
73
74/* Check for Compaq's ZFMicro chipset, which needs short
75 * delays before control or bulk queues get re-activated
76 * in finish_unlinks()
77 */
78static int __devinit ohci_quirk_zfmicro(struct usb_hcd *hcd)
79{
80 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
81
82 ohci->flags |= OHCI_QUIRK_ZFMICRO;
83 ohci_dbg (ohci, "enabled Compaq ZFMicro chipset quirk\n");
84
85 return 0;
86}
87
88/* Check for Toshiba SCC OHCI which has big endian registers
89 * and little endian in memory data structures
90 */
91static int __devinit ohci_quirk_toshiba_scc(struct usb_hcd *hcd)
92{
93 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
94
95 /* That chip is only present in the southbridge of some
96 * cell based platforms which are supposed to select
97 * CONFIG_USB_OHCI_BIG_ENDIAN_MMIO. We verify here if
98 * that was the case though.
99 */
100#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
101 ohci->flags |= OHCI_QUIRK_BE_MMIO;
102 ohci_dbg (ohci, "enabled big endian Toshiba quirk\n");
103 return 0;
104#else
105 ohci_err (ohci, "unsupported big endian Toshiba quirk\n");
106 return -ENXIO;
107#endif
108}
109
110/* List of quirks for OHCI */
111static const struct pci_device_id ohci_pci_quirks[] = {
112 {
113 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x740c),
114 .driver_data = (unsigned long)ohci_quirk_amd756,
115 },
116 {
117 PCI_DEVICE(PCI_VENDOR_ID_OPTI, 0xc861),
118 .driver_data = (unsigned long)ohci_quirk_opti,
119 },
120 {
121 PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_ANY_ID),
122 .driver_data = (unsigned long)ohci_quirk_ns,
123 },
124 {
125 PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xa0f8),
126 .driver_data = (unsigned long)ohci_quirk_zfmicro,
127 },
128 {
129 PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, 0x01b6),
130 .driver_data = (unsigned long)ohci_quirk_toshiba_scc,
131 },
132 /* FIXME for some of the early AMD 760 southbridges, OHCI
133 * won't work at all. blacklist them.
40 */ 134 */
135
136 {},
137};
138
139static int ohci_pci_reset (struct usb_hcd *hcd)
140{
141 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
142 int ret = 0;
143
41 if (hcd->self.controller) { 144 if (hcd->self.controller) {
42 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 145 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
146 const struct pci_device_id *quirk_id;
43 147
44 /* AMD 756, for most chips (early revs), corrupts register 148 quirk_id = pci_match_id(ohci_pci_quirks, pdev);
45 * values on read ... so enable the vendor workaround. 149 if (quirk_id != NULL) {
46 */ 150 int (*quirk)(struct usb_hcd *ohci);
47 if (pdev->vendor == PCI_VENDOR_ID_AMD 151 quirk = (void *)quirk_id->driver_data;
48 && pdev->device == 0x740c) { 152 ret = quirk(hcd);
49 ohci->flags = OHCI_QUIRK_AMD756;
50 ohci_dbg (ohci, "AMD756 erratum 4 workaround\n");
51 /* also erratum 10 (suspend/resume issues) */
52 device_init_wakeup(&hcd->self.root_hub->dev, 0);
53 } 153 }
154 }
155 if (ret == 0) {
156 ohci_hcd_init (ohci);
157 return ohci_init (ohci);
158 }
159 return ret;
160}
54 161
55 /* FIXME for some of the early AMD 760 southbridges, OHCI
56 * won't work at all. blacklist them.
57 */
58
59 /* Apple's OHCI driver has a lot of bizarre workarounds
60 * for this chip. Evidently control and bulk lists
61 * can get confused. (B&W G3 models, and ...)
62 */
63 else if (pdev->vendor == PCI_VENDOR_ID_OPTI
64 && pdev->device == 0xc861) {
65 ohci_dbg (ohci,
66 "WARNING: OPTi workarounds unavailable\n");
67 }
68 162
69 /* Check for NSC87560. We have to look at the bridge (fn1) to 163static int __devinit ohci_pci_start (struct usb_hcd *hcd)
70 * identify the USB (fn2). This quirk might apply to more or 164{
71 * even all NSC stuff. 165 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
72 */ 166 int ret;
73 else if (pdev->vendor == PCI_VENDOR_ID_NS) {
74 struct pci_dev *b;
75
76 b = pci_get_slot (pdev->bus,
77 PCI_DEVFN (PCI_SLOT (pdev->devfn), 1));
78 if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO
79 && b->vendor == PCI_VENDOR_ID_NS) {
80 ohci->flags |= OHCI_QUIRK_SUPERIO;
81 ohci_dbg (ohci, "Using NSC SuperIO setup\n");
82 }
83 pci_dev_put(b);
84 }
85 167
86 /* Check for Compaq's ZFMicro chipset, which needs short 168#ifdef CONFIG_PM /* avoid warnings about unused pdev */
87 * delays before control or bulk queues get re-activated 169 if (hcd->self.controller) {
88 * in finish_unlinks() 170 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
89 */
90 else if (pdev->vendor == PCI_VENDOR_ID_COMPAQ
91 && pdev->device == 0xa0f8) {
92 ohci->flags |= OHCI_QUIRK_ZFMICRO;
93 ohci_dbg (ohci,
94 "enabled Compaq ZFMicro chipset quirk\n");
95 }
96 171
97 /* RWC may not be set for add-in PCI cards, since boot 172 /* RWC may not be set for add-in PCI cards, since boot
98 * firmware probably ignored them. This transfers PCI 173 * firmware probably ignored them. This transfers PCI
@@ -101,16 +176,14 @@ ohci_pci_start (struct usb_hcd *hcd)
101 if (device_may_wakeup(&pdev->dev)) 176 if (device_may_wakeup(&pdev->dev))
102 ohci->hc_control |= OHCI_CTRL_RWC; 177 ohci->hc_control |= OHCI_CTRL_RWC;
103 } 178 }
179#endif /* CONFIG_PM */
104 180
105 /* NOTE: there may have already been a first reset, to 181 ret = ohci_run (ohci);
106 * keep bios/smm irqs from making trouble 182 if (ret < 0) {
107 */
108 if ((ret = ohci_run (ohci)) < 0) {
109 ohci_err (ohci, "can't start\n"); 183 ohci_err (ohci, "can't start\n");
110 ohci_stop (hcd); 184 ohci_stop (hcd);
111 return ret;
112 } 185 }
113 return 0; 186 return ret;
114} 187}
115 188
116#ifdef CONFIG_PM 189#ifdef CONFIG_PM
@@ -238,23 +311,3 @@ static struct pci_driver ohci_pci_driver = {
238 .shutdown = usb_hcd_pci_shutdown, 311 .shutdown = usb_hcd_pci_shutdown,
239}; 312};
240 313
241
242static int __init ohci_hcd_pci_init (void)
243{
244 printk (KERN_DEBUG "%s: " DRIVER_INFO " (PCI)\n", hcd_name);
245 if (usb_disabled())
246 return -ENODEV;
247
248 pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
249 sizeof (struct ed), sizeof (struct td));
250 return pci_register_driver (&ohci_pci_driver);
251}
252module_init (ohci_hcd_pci_init);
253
254/*-------------------------------------------------------------------------*/
255
256static void __exit ohci_hcd_pci_cleanup (void)
257{
258 pci_unregister_driver (&ohci_pci_driver);
259}
260module_exit (ohci_hcd_pci_cleanup);
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 3a8cbfb69054..893b172384da 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -465,15 +465,3 @@ static struct platform_driver usb_hcd_pnx4008_driver = {
465 .remove = usb_hcd_pnx4008_remove, 465 .remove = usb_hcd_pnx4008_remove,
466}; 466};
467 467
468static int __init usb_hcd_pnx4008_init(void)
469{
470 return platform_driver_register(&usb_hcd_pnx4008_driver);
471}
472
473static void __exit usb_hcd_pnx4008_cleanup(void)
474{
475 return platform_driver_unregister(&usb_hcd_pnx4008_driver);
476}
477
478module_init(usb_hcd_pnx4008_init);
479module_exit(usb_hcd_pnx4008_cleanup);
diff --git a/drivers/usb/host/ohci-pnx8550.c b/drivers/usb/host/ohci-pnx8550.c
index 6922b91b1704..de45eb0051a7 100644
--- a/drivers/usb/host/ohci-pnx8550.c
+++ b/drivers/usb/host/ohci-pnx8550.c
@@ -240,19 +240,3 @@ static struct platform_driver ohci_hcd_pnx8550_driver = {
240 .remove = ohci_hcd_pnx8550_drv_remove, 240 .remove = ohci_hcd_pnx8550_drv_remove,
241}; 241};
242 242
243static int __init ohci_hcd_pnx8550_init (void)
244{
245 pr_debug (DRIVER_INFO " (pnx8550)");
246 pr_debug ("block sizes: ed %d td %d\n",
247 sizeof (struct ed), sizeof (struct td));
248
249 return platform_driver_register(&ohci_hcd_pnx8550_driver);
250}
251
252static void __exit ohci_hcd_pnx8550_cleanup (void)
253{
254 platform_driver_unregister(&ohci_hcd_pnx8550_driver);
255}
256
257module_init (ohci_hcd_pnx8550_init);
258module_exit (ohci_hcd_pnx8550_cleanup);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
new file mode 100644
index 000000000000..08e237c7bc43
--- /dev/null
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -0,0 +1,232 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 * (C) Copyright 2002 Hewlett-Packard Company
7 * (C) Copyright 2006 Sylvain Munaut <tnt@246tNt.com>
8 *
9 * Bus glue for OHCI HC on the of_platform bus
10 *
11 * Modified for of_platform bus from ohci-sa1111.c
12 *
13 * This file is licenced under the GPL.
14 */
15
16#include <linux/signal.h>
17
18#include <asm/of_platform.h>
19#include <asm/prom.h>
20
21
22static int __devinit
23ohci_ppc_of_start(struct usb_hcd *hcd)
24{
25 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
26 int ret;
27
28 if ((ret = ohci_init(ohci)) < 0)
29 return ret;
30
31 if ((ret = ohci_run(ohci)) < 0) {
32 err("can't start %s", ohci_to_hcd(ohci)->self.bus_name);
33 ohci_stop(hcd);
34 return ret;
35 }
36
37 return 0;
38}
39
40static const struct hc_driver ohci_ppc_of_hc_driver = {
41 .description = hcd_name,
42 .product_desc = "OF OHCI",
43 .hcd_priv_size = sizeof(struct ohci_hcd),
44
45 /*
46 * generic hardware linkage
47 */
48 .irq = ohci_irq,
49 .flags = HCD_USB11 | HCD_MEMORY,
50
51 /*
52 * basic lifecycle operations
53 */
54 .start = ohci_ppc_of_start,
55 .stop = ohci_stop,
56 .shutdown = ohci_shutdown,
57
58 /*
59 * managing i/o requests and associated device resources
60 */
61 .urb_enqueue = ohci_urb_enqueue,
62 .urb_dequeue = ohci_urb_dequeue,
63 .endpoint_disable = ohci_endpoint_disable,
64
65 /*
66 * scheduling support
67 */
68 .get_frame_number = ohci_get_frame,
69
70 /*
71 * root hub support
72 */
73 .hub_status_data = ohci_hub_status_data,
74 .hub_control = ohci_hub_control,
75 .hub_irq_enable = ohci_rhsc_enable,
76#ifdef CONFIG_PM
77 .bus_suspend = ohci_bus_suspend,
78 .bus_resume = ohci_bus_resume,
79#endif
80 .start_port_reset = ohci_start_port_reset,
81};
82
83
84static int __devinit
85ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
86{
87 struct device_node *dn = op->node;
88 struct usb_hcd *hcd;
89 struct ohci_hcd *ohci;
90 struct resource res;
91 int irq;
92
93 int rv;
94 int is_bigendian;
95
96 if (usb_disabled())
97 return -ENODEV;
98
99 is_bigendian =
100 device_is_compatible(dn, "ohci-bigendian") ||
101 device_is_compatible(dn, "ohci-be");
102
103 dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n");
104
105 rv = of_address_to_resource(dn, 0, &res);
106 if (rv)
107 return rv;
108
109 hcd = usb_create_hcd(&ohci_ppc_of_hc_driver, &op->dev, "PPC-OF USB");
110 if (!hcd)
111 return -ENOMEM;
112
113 hcd->rsrc_start = res.start;
114 hcd->rsrc_len = res.end - res.start + 1;
115
116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
117 printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
118 rv = -EBUSY;
119 goto err_rmr;
120 }
121
122 irq = irq_of_parse_and_map(dn, 0);
123 if (irq == NO_IRQ) {
124 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
125 rv = -EBUSY;
126 goto err_irq;
127 }
128
129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
130 if (!hcd->regs) {
131 printk(KERN_ERR __FILE__ ": ioremap failed\n");
132 rv = -ENOMEM;
133 goto err_ioremap;
134 }
135
136 ohci = hcd_to_ohci(hcd);
137 if (is_bigendian)
138 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
139
140 ohci_hcd_init(ohci);
141
142 rv = usb_add_hcd(hcd, irq, 0);
143 if (rv == 0)
144 return 0;
145
146 iounmap(hcd->regs);
147err_ioremap:
148 irq_dispose_mapping(irq);
149err_irq:
150 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
151err_rmr:
152 usb_put_hcd(hcd);
153
154 return rv;
155}
156
157static int ohci_hcd_ppc_of_remove(struct of_device *op)
158{
159 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
160 dev_set_drvdata(&op->dev, NULL);
161
162 dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
163
164 usb_remove_hcd(hcd);
165
166 iounmap(hcd->regs);
167 irq_dispose_mapping(hcd->irq);
168 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
169
170 usb_put_hcd(hcd);
171
172 return 0;
173}
174
175static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
176{
177 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
178
179 if (hcd->driver->shutdown)
180 hcd->driver->shutdown(hcd);
181
182 return 0;
183}
184
185
186static struct of_device_id ohci_hcd_ppc_of_match[] = {
187#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
188 {
189 .name = "usb",
190 .compatible = "ohci-bigendian",
191 },
192 {
193 .name = "usb",
194 .compatible = "ohci-be",
195 },
196#endif
197#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE
198 {
199 .name = "usb",
200 .compatible = "ohci-littledian",
201 },
202 {
203 .name = "usb",
204 .compatible = "ohci-le",
205 },
206#endif
207 {},
208};
209MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
210
211#if !defined(CONFIG_USB_OHCI_HCD_PPC_OF_BE) && \
212 !defined(CONFIG_USB_OHCI_HCD_PPC_OF_LE)
213#error "No endianess selected for ppc-of-ohci"
214#endif
215
216
217static struct of_platform_driver ohci_hcd_ppc_of_driver = {
218 .name = "ppc-of-ohci",
219 .match_table = ohci_hcd_ppc_of_match,
220 .probe = ohci_hcd_ppc_of_probe,
221 .remove = ohci_hcd_ppc_of_remove,
222 .shutdown = ohci_hcd_ppc_of_shutdown,
223#ifdef CONFIG_PM
224 /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/
225 /*.resume = ohci_hcd_ppc_soc_drv_resume,*/
226#endif
227 .driver = {
228 .name = "ppc-of-ohci",
229 .owner = THIS_MODULE,
230 },
231};
232
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index e1a7eb817313..1a2e1777ca61 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -72,7 +72,7 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
72 } 72 }
73 73
74 ohci = hcd_to_ohci(hcd); 74 ohci = hcd_to_ohci(hcd);
75 ohci->flags |= OHCI_BIG_ENDIAN; 75 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
76 ohci_hcd_init(ohci); 76 ohci_hcd_init(ohci);
77 77
78 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED); 78 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
@@ -208,19 +208,3 @@ static struct platform_driver ohci_hcd_ppc_soc_driver = {
208 }, 208 },
209}; 209};
210 210
211static int __init ohci_hcd_ppc_soc_init(void)
212{
213 pr_debug(DRIVER_INFO " (PPC SOC)\n");
214 pr_debug("block sizes: ed %d td %d\n", sizeof(struct ed),
215 sizeof(struct td));
216
217 return platform_driver_register(&ohci_hcd_ppc_soc_driver);
218}
219
220static void __exit ohci_hcd_ppc_soc_cleanup(void)
221{
222 platform_driver_unregister(&ohci_hcd_ppc_soc_driver);
223}
224
225module_init(ohci_hcd_ppc_soc_init);
226module_exit(ohci_hcd_ppc_soc_cleanup);
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
new file mode 100644
index 000000000000..69d948b4a701
--- /dev/null
+++ b/drivers/usb/host/ohci-ps3.c
@@ -0,0 +1,196 @@
1/*
2 * PS3 OHCI Host Controller driver
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <asm/ps3.h>
22
23static int ps3_ohci_hc_reset(struct usb_hcd *hcd)
24{
25 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
26
27 ohci->flags |= OHCI_QUIRK_BE_MMIO;
28 ohci_hcd_init(ohci);
29 return ohci_init(ohci);
30}
31
32static int __devinit ps3_ohci_hc_start(struct usb_hcd *hcd)
33{
34 int result;
35 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
36
37 /* Handle root hub init quirk in spider south bridge. */
38 /* Also set PwrOn2PwrGood to 0x7f (254ms). */
39
40 ohci_writel(ohci, 0x7f000000 | RH_A_PSM | RH_A_OCPM,
41 &ohci->regs->roothub.a);
42 ohci_writel(ohci, 0x00060000, &ohci->regs->roothub.b);
43
44 result = ohci_run(ohci);
45
46 if (result < 0) {
47 err("can't start %s", hcd->self.bus_name);
48 ohci_stop(hcd);
49 }
50
51 return result;
52}
53
54static const struct hc_driver ps3_ohci_hc_driver = {
55 .description = hcd_name,
56 .product_desc = "PS3 OHCI Host Controller",
57 .hcd_priv_size = sizeof(struct ohci_hcd),
58 .irq = ohci_irq,
59 .flags = HCD_MEMORY | HCD_USB11,
60 .reset = ps3_ohci_hc_reset,
61 .start = ps3_ohci_hc_start,
62 .stop = ohci_stop,
63 .shutdown = ohci_shutdown,
64 .urb_enqueue = ohci_urb_enqueue,
65 .urb_dequeue = ohci_urb_dequeue,
66 .endpoint_disable = ohci_endpoint_disable,
67 .get_frame_number = ohci_get_frame,
68 .hub_status_data = ohci_hub_status_data,
69 .hub_control = ohci_hub_control,
70 .hub_irq_enable = ohci_rhsc_enable,
71 .start_port_reset = ohci_start_port_reset,
72#if defined(CONFIG_PM)
73 .bus_suspend = ohci_bus_suspend,
74 .bus_resume = ohci_bus_resume,
75#endif
76};
77
78/* redefine dev_dbg to do a syntax check */
79
80#if !defined(DEBUG)
81#undef dev_dbg
82static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg(
83 const struct device *_dev, const char *fmt, ...) {return 0;}
84#endif
85
86static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev)
87{
88 int result;
89 struct usb_hcd *hcd;
90 unsigned int virq;
91 static u64 dummy_mask = DMA_32BIT_MASK;
92
93 if (usb_disabled()) {
94 result = -ENODEV;
95 goto fail_start;
96 }
97
98 result = ps3_mmio_region_create(dev->m_region);
99
100 if (result) {
101 dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
102 __func__, __LINE__);
103 result = -EPERM;
104 goto fail_mmio;
105 }
106
107 dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
108 __LINE__, dev->m_region->lpar_addr);
109
110 result = ps3_alloc_io_irq(dev->interrupt_id, &virq);
111
112 if (result) {
113 dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
114 __func__, __LINE__, virq);
115 result = -EPERM;
116 goto fail_irq;
117 }
118
119 dev->core.power.power_state = PMSG_ON;
120 dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */
121
122 hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev->core.bus_id);
123
124 if (!hcd) {
125 dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
126 __LINE__);
127 result = -ENOMEM;
128 goto fail_create_hcd;
129 }
130
131 hcd->rsrc_start = dev->m_region->lpar_addr;
132 hcd->rsrc_len = dev->m_region->len;
133 hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
134
135 if (!hcd->regs) {
136 dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
137 __LINE__);
138 result = -EPERM;
139 goto fail_ioremap;
140 }
141
142 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
143 (unsigned long)hcd->rsrc_start);
144 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__,
145 (unsigned long)hcd->rsrc_len);
146 dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__,
147 (unsigned long)hcd->regs);
148 dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
149 (unsigned long)virq);
150
151 ps3_system_bus_set_driver_data(dev, hcd);
152
153 result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
154
155 if (result) {
156 dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
157 __func__, __LINE__, result);
158 goto fail_add_hcd;
159 }
160
161 return result;
162
163fail_add_hcd:
164 iounmap(hcd->regs);
165fail_ioremap:
166 usb_put_hcd(hcd);
167fail_create_hcd:
168 ps3_free_io_irq(virq);
169fail_irq:
170 ps3_free_mmio_region(dev->m_region);
171fail_mmio:
172fail_start:
173 return result;
174}
175
176static int ps3_ohci_sb_remove (struct ps3_system_bus_device *dev)
177{
178 struct usb_hcd *hcd =
179 (struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
180
181 usb_put_hcd(hcd);
182 ps3_system_bus_set_driver_data(dev, NULL);
183
184 return 0;
185}
186
187MODULE_ALIAS("ps3-ohci");
188
189static struct ps3_system_bus_driver ps3_ohci_sb_driver = {
190 .match_id = PS3_MATCH_ID_OHCI,
191 .core = {
192 .name = "ps3-ohci-driver",
193 },
194 .probe = ps3_ohci_sb_probe,
195 .remove = ps3_ohci_sb_remove,
196};
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 3bbea844a9e3..f1563dc319d3 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -369,19 +369,3 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
369 }, 369 },
370}; 370};
371 371
372static int __init ohci_hcd_pxa27x_init (void)
373{
374 pr_debug (DRIVER_INFO " (pxa27x)");
375 pr_debug ("block sizes: ed %d td %d\n",
376 sizeof (struct ed), sizeof (struct td));
377
378 return platform_driver_register(&ohci_hcd_pxa27x_driver);
379}
380
381static void __exit ohci_hcd_pxa27x_cleanup (void)
382{
383 platform_driver_unregister(&ohci_hcd_pxa27x_driver);
384}
385
386module_init (ohci_hcd_pxa27x_init);
387module_exit (ohci_hcd_pxa27x_cleanup);
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index b350d45033e7..6829814b7aaf 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -501,15 +501,3 @@ static struct platform_driver ohci_hcd_s3c2410_driver = {
501 }, 501 },
502}; 502};
503 503
504static int __init ohci_hcd_s3c2410_init (void)
505{
506 return platform_driver_register(&ohci_hcd_s3c2410_driver);
507}
508
509static void __exit ohci_hcd_s3c2410_cleanup (void)
510{
511 platform_driver_unregister(&ohci_hcd_s3c2410_driver);
512}
513
514module_init (ohci_hcd_s3c2410_init);
515module_exit (ohci_hcd_s3c2410_cleanup);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index fe0090e33675..0f48f2d99226 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -269,19 +269,3 @@ static struct sa1111_driver ohci_hcd_sa1111_driver = {
269 .remove = ohci_hcd_sa1111_drv_remove, 269 .remove = ohci_hcd_sa1111_drv_remove,
270}; 270};
271 271
272static int __init ohci_hcd_sa1111_init (void)
273{
274 dbg (DRIVER_INFO " (SA-1111)");
275 dbg ("block sizes: ed %d td %d",
276 sizeof (struct ed), sizeof (struct td));
277
278 return sa1111_driver_register(&ohci_hcd_sa1111_driver);
279}
280
281static void __exit ohci_hcd_sa1111_cleanup (void)
282{
283 sa1111_driver_unregister(&ohci_hcd_sa1111_driver);
284}
285
286module_init (ohci_hcd_sa1111_init);
287module_exit (ohci_hcd_sa1111_cleanup);
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 405257f3e853..0dafcda37291 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -394,8 +394,9 @@ struct ohci_hcd {
394#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */ 394#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */
395#define OHCI_QUIRK_SUPERIO 0x02 /* natsemi */ 395#define OHCI_QUIRK_SUPERIO 0x02 /* natsemi */
396#define OHCI_QUIRK_INITRESET 0x04 /* SiS, OPTi, ... */ 396#define OHCI_QUIRK_INITRESET 0x04 /* SiS, OPTi, ... */
397#define OHCI_BIG_ENDIAN 0x08 /* big endian HC */ 397#define OHCI_QUIRK_BE_DESC 0x08 /* BE descriptors */
398#define OHCI_QUIRK_ZFMICRO 0x10 /* Compaq ZFMicro chipset*/ 398#define OHCI_QUIRK_BE_MMIO 0x10 /* BE registers */
399#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
399 // there are also chip quirks/bugs in init logic 400 // there are also chip quirks/bugs in init logic
400 401
401}; 402};
@@ -439,117 +440,164 @@ static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
439 * a minority (notably the IBM STB04XXX and the Motorola MPC5200 440 * a minority (notably the IBM STB04XXX and the Motorola MPC5200
440 * processors) implement them in big endian format. 441 * processors) implement them in big endian format.
441 * 442 *
443 * In addition some more exotic implementations like the Toshiba
444 * Spider (aka SCC) cell southbridge are "mixed" endian, that is,
445 * they have a different endianness for registers vs. in-memory
446 * descriptors.
447 *
442 * This attempts to support either format at compile time without a 448 * This attempts to support either format at compile time without a
443 * runtime penalty, or both formats with the additional overhead 449 * runtime penalty, or both formats with the additional overhead
444 * of checking a flag bit. 450 * of checking a flag bit.
451 *
452 * That leads to some tricky Kconfig rules howevber. There are
453 * different defaults based on some arch/ppc platforms, though
454 * the basic rules are:
455 *
456 * Controller type Kconfig options needed
457 * --------------- ----------------------
458 * little endian CONFIG_USB_OHCI_LITTLE_ENDIAN
459 *
460 * fully big endian CONFIG_USB_OHCI_BIG_ENDIAN_DESC _and_
461 * CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
462 *
463 * mixed endian CONFIG_USB_OHCI_LITTLE_ENDIAN _and_
464 * CONFIG_USB_OHCI_BIG_ENDIAN_{MMIO,DESC}
465 *
466 * (If you have a mixed endian controller, you -must- also define
467 * CONFIG_USB_OHCI_LITTLE_ENDIAN or things will not work when building
468 * both your mixed endian and a fully big endian controller support in
469 * the same kernel image).
445 */ 470 */
446 471
447#ifdef CONFIG_USB_OHCI_BIG_ENDIAN 472#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
473#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN
474#define big_endian_desc(ohci) (ohci->flags & OHCI_QUIRK_BE_DESC)
475#else
476#define big_endian_desc(ohci) 1 /* only big endian */
477#endif
478#else
479#define big_endian_desc(ohci) 0 /* only little endian */
480#endif
448 481
482#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
449#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN 483#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN
450#define big_endian(ohci) (ohci->flags & OHCI_BIG_ENDIAN) /* either */ 484#define big_endian_mmio(ohci) (ohci->flags & OHCI_QUIRK_BE_MMIO)
451#else 485#else
452#define big_endian(ohci) 1 /* only big endian */ 486#define big_endian_mmio(ohci) 1 /* only big endian */
487#endif
488#else
489#define big_endian_mmio(ohci) 0 /* only little endian */
453#endif 490#endif
454 491
455/* 492/*
456 * Big-endian read/write functions are arch-specific. 493 * Big-endian read/write functions are arch-specific.
457 * Other arches can be added if/when they're needed. 494 * Other arches can be added if/when they're needed.
495 *
496 * REVISIT: arch/powerpc now has readl/writel_be, so the
497 * definition below can die once the STB04xxx support is
498 * finally ported over.
458 */ 499 */
459#if defined(CONFIG_PPC) 500#if defined(CONFIG_PPC) && !defined(CONFIG_PPC_MERGE)
460#define readl_be(addr) in_be32((__force unsigned *)addr) 501#define readl_be(addr) in_be32((__force unsigned *)addr)
461#define writel_be(val, addr) out_be32((__force unsigned *)addr, val) 502#define writel_be(val, addr) out_be32((__force unsigned *)addr, val)
462#endif 503#endif
463 504
464static inline unsigned int ohci_readl (const struct ohci_hcd *ohci, 505static inline unsigned int _ohci_readl (const struct ohci_hcd *ohci,
465 __hc32 __iomem * regs) 506 __hc32 __iomem * regs)
466{ 507{
467 return big_endian(ohci) ? readl_be (regs) : readl ((__force u32 *)regs); 508#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
509 return big_endian_mmio(ohci) ?
510 readl_be ((__force u32 *)regs) :
511 readl ((__force u32 *)regs);
512#else
513 return readl ((__force u32 *)regs);
514#endif
468} 515}
469 516
470static inline void ohci_writel (const struct ohci_hcd *ohci, 517static inline void _ohci_writel (const struct ohci_hcd *ohci,
471 const unsigned int val, __hc32 __iomem *regs) 518 const unsigned int val, __hc32 __iomem *regs)
472{ 519{
473 big_endian(ohci) ? writel_be (val, regs) : 520#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
474 writel (val, (__force u32 *)regs); 521 big_endian_mmio(ohci) ?
522 writel_be (val, (__force u32 *)regs) :
523 writel (val, (__force u32 *)regs);
524#else
525 writel (val, (__force u32 *)regs);
526#endif
475} 527}
476 528
477#else /* !CONFIG_USB_OHCI_BIG_ENDIAN */
478
479#define big_endian(ohci) 0 /* only little endian */
480
481#ifdef CONFIG_ARCH_LH7A404 529#ifdef CONFIG_ARCH_LH7A404
482 /* Marc Singer: at the time this code was written, the LH7A404 530/* Marc Singer: at the time this code was written, the LH7A404
483 * had a problem reading the USB host registers. This 531 * had a problem reading the USB host registers. This
484 * implementation of the ohci_readl function performs the read 532 * implementation of the ohci_readl function performs the read
485 * twice as a work-around. 533 * twice as a work-around.
486 */ 534 */
487static inline unsigned int 535#define ohci_readl(o,r) (_ohci_readl(o,r),_ohci_readl(o,r))
488ohci_readl (const struct ohci_hcd *ohci, const __hc32 *regs) 536#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
489{
490 *(volatile __force unsigned int*) regs;
491 return *(volatile __force unsigned int*) regs;
492}
493#else 537#else
494 /* Standard version of ohci_readl uses standard, platform 538#define ohci_readl(o,r) _ohci_readl(o,r)
495 * specific implementation. */ 539#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
496static inline unsigned int
497ohci_readl (const struct ohci_hcd *ohci, __hc32 __iomem * regs)
498{
499 return readl(regs);
500}
501#endif 540#endif
502 541
503static inline void ohci_writel (const struct ohci_hcd *ohci,
504 const unsigned int val, __hc32 __iomem *regs)
505{
506 writel (val, regs);
507}
508
509#endif /* !CONFIG_USB_OHCI_BIG_ENDIAN */
510 542
511/*-------------------------------------------------------------------------*/ 543/*-------------------------------------------------------------------------*/
512 544
513/* cpu to ohci */ 545/* cpu to ohci */
514static inline __hc16 cpu_to_hc16 (const struct ohci_hcd *ohci, const u16 x) 546static inline __hc16 cpu_to_hc16 (const struct ohci_hcd *ohci, const u16 x)
515{ 547{
516 return big_endian(ohci) ? (__force __hc16)cpu_to_be16(x) : (__force __hc16)cpu_to_le16(x); 548 return big_endian_desc(ohci) ?
549 (__force __hc16)cpu_to_be16(x) :
550 (__force __hc16)cpu_to_le16(x);
517} 551}
518 552
519static inline __hc16 cpu_to_hc16p (const struct ohci_hcd *ohci, const u16 *x) 553static inline __hc16 cpu_to_hc16p (const struct ohci_hcd *ohci, const u16 *x)
520{ 554{
521 return big_endian(ohci) ? cpu_to_be16p(x) : cpu_to_le16p(x); 555 return big_endian_desc(ohci) ?
556 cpu_to_be16p(x) :
557 cpu_to_le16p(x);
522} 558}
523 559
524static inline __hc32 cpu_to_hc32 (const struct ohci_hcd *ohci, const u32 x) 560static inline __hc32 cpu_to_hc32 (const struct ohci_hcd *ohci, const u32 x)
525{ 561{
526 return big_endian(ohci) ? (__force __hc32)cpu_to_be32(x) : (__force __hc32)cpu_to_le32(x); 562 return big_endian_desc(ohci) ?
563 (__force __hc32)cpu_to_be32(x) :
564 (__force __hc32)cpu_to_le32(x);
527} 565}
528 566
529static inline __hc32 cpu_to_hc32p (const struct ohci_hcd *ohci, const u32 *x) 567static inline __hc32 cpu_to_hc32p (const struct ohci_hcd *ohci, const u32 *x)
530{ 568{
531 return big_endian(ohci) ? cpu_to_be32p(x) : cpu_to_le32p(x); 569 return big_endian_desc(ohci) ?
570 cpu_to_be32p(x) :
571 cpu_to_le32p(x);
532} 572}
533 573
534/* ohci to cpu */ 574/* ohci to cpu */
535static inline u16 hc16_to_cpu (const struct ohci_hcd *ohci, const __hc16 x) 575static inline u16 hc16_to_cpu (const struct ohci_hcd *ohci, const __hc16 x)
536{ 576{
537 return big_endian(ohci) ? be16_to_cpu((__force __be16)x) : le16_to_cpu((__force __le16)x); 577 return big_endian_desc(ohci) ?
578 be16_to_cpu((__force __be16)x) :
579 le16_to_cpu((__force __le16)x);
538} 580}
539 581
540static inline u16 hc16_to_cpup (const struct ohci_hcd *ohci, const __hc16 *x) 582static inline u16 hc16_to_cpup (const struct ohci_hcd *ohci, const __hc16 *x)
541{ 583{
542 return big_endian(ohci) ? be16_to_cpup((__force __be16 *)x) : le16_to_cpup((__force __le16 *)x); 584 return big_endian_desc(ohci) ?
585 be16_to_cpup((__force __be16 *)x) :
586 le16_to_cpup((__force __le16 *)x);
543} 587}
544 588
545static inline u32 hc32_to_cpu (const struct ohci_hcd *ohci, const __hc32 x) 589static inline u32 hc32_to_cpu (const struct ohci_hcd *ohci, const __hc32 x)
546{ 590{
547 return big_endian(ohci) ? be32_to_cpu((__force __be32)x) : le32_to_cpu((__force __le32)x); 591 return big_endian_desc(ohci) ?
592 be32_to_cpu((__force __be32)x) :
593 le32_to_cpu((__force __le32)x);
548} 594}
549 595
550static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x) 596static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
551{ 597{
552 return big_endian(ohci) ? be32_to_cpup((__force __be32 *)x) : le32_to_cpup((__force __le32 *)x); 598 return big_endian_desc(ohci) ?
599 be32_to_cpup((__force __be32 *)x) :
600 le32_to_cpup((__force __le32 *)x);
553} 601}
554 602
555/*-------------------------------------------------------------------------*/ 603/*-------------------------------------------------------------------------*/
@@ -557,6 +605,9 @@ static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
557/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all 605/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all
558 * hardware handles 16 bit reads. That creates a different confusion on 606 * hardware handles 16 bit reads. That creates a different confusion on
559 * some big-endian SOC implementations. Same thing happens with PSW access. 607 * some big-endian SOC implementations. Same thing happens with PSW access.
608 *
609 * FIXME: Deal with that as a runtime quirk when STB03xxx is ported over
610 * to arch/powerpc
560 */ 611 */
561 612
562#ifdef CONFIG_STB03xxx 613#ifdef CONFIG_STB03xxx
@@ -568,7 +619,7 @@ static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
568static inline u16 ohci_frame_no(const struct ohci_hcd *ohci) 619static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
569{ 620{
570 u32 tmp; 621 u32 tmp;
571 if (big_endian(ohci)) { 622 if (big_endian_desc(ohci)) {
572 tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no); 623 tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no);
573 tmp >>= OHCI_BE_FRAME_NO_SHIFT; 624 tmp >>= OHCI_BE_FRAME_NO_SHIFT;
574 } else 625 } else
@@ -580,7 +631,7 @@ static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
580static inline __hc16 *ohci_hwPSWp(const struct ohci_hcd *ohci, 631static inline __hc16 *ohci_hwPSWp(const struct ohci_hcd *ohci,
581 const struct td *td, int index) 632 const struct td *td, int index)
582{ 633{
583 return (__hc16 *)(big_endian(ohci) ? 634 return (__hc16 *)(big_endian_desc(ohci) ?
584 &td->hwPSW[index ^ 1] : &td->hwPSW[index]); 635 &td->hwPSW[index ^ 1] : &td->hwPSW[index]);
585} 636}
586 637
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index e345f15b7d87..5d6c06bc4524 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -168,9 +168,13 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
168 space, "", qh, qtype, 168 space, "", qh, qtype,
169 le32_to_cpu(qh->link), le32_to_cpu(element)); 169 le32_to_cpu(qh->link), le32_to_cpu(element));
170 if (qh->type == USB_ENDPOINT_XFER_ISOC) 170 if (qh->type == USB_ENDPOINT_XFER_ISOC)
171 out += sprintf(out, "%*s period %d frame %x desc [%p]\n", 171 out += sprintf(out, "%*s period %d phase %d load %d us, "
172 space, "", qh->period, qh->iso_frame, 172 "frame %x desc [%p]\n",
173 qh->iso_packet_desc); 173 space, "", qh->period, qh->phase, qh->load,
174 qh->iso_frame, qh->iso_packet_desc);
175 else if (qh->type == USB_ENDPOINT_XFER_INT)
176 out += sprintf(out, "%*s period %d phase %d load %d us\n",
177 space, "", qh->period, qh->phase, qh->load);
174 178
175 if (element & UHCI_PTR_QH) 179 if (element & UHCI_PTR_QH)
176 out += sprintf(out, "%*s Element points to QH (bug?)\n", space, ""); 180 out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
@@ -208,7 +212,7 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
208 space, "", nurbs); 212 space, "", nurbs);
209 } 213 }
210 214
211 if (qh->udev) { 215 if (qh->dummy_td) {
212 out += sprintf(out, "%*s Dummy TD\n", space, ""); 216 out += sprintf(out, "%*s Dummy TD\n", space, "");
213 out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0); 217 out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0);
214 } 218 }
@@ -347,31 +351,80 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
347 struct uhci_qh *qh; 351 struct uhci_qh *qh;
348 struct uhci_td *td; 352 struct uhci_td *td;
349 struct list_head *tmp, *head; 353 struct list_head *tmp, *head;
354 int nframes, nerrs;
350 355
351 out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); 356 out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
352 out += sprintf(out, "HC status\n"); 357 out += sprintf(out, "HC status\n");
353 out += uhci_show_status(uhci, out, len - (out - buf)); 358 out += uhci_show_status(uhci, out, len - (out - buf));
359
360 out += sprintf(out, "Periodic load table\n");
361 for (i = 0; i < MAX_PHASE; ++i) {
362 out += sprintf(out, "\t%d", uhci->load[i]);
363 if (i % 8 == 7)
364 *out++ = '\n';
365 }
366 out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n",
367 uhci->total_load,
368 uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
369 uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
354 if (debug <= 1) 370 if (debug <= 1)
355 return out - buf; 371 return out - buf;
356 372
357 out += sprintf(out, "Frame List\n"); 373 out += sprintf(out, "Frame List\n");
374 nframes = 10;
375 nerrs = 0;
358 for (i = 0; i < UHCI_NUMFRAMES; ++i) { 376 for (i = 0; i < UHCI_NUMFRAMES; ++i) {
377 __le32 link, qh_dma;
378
379 j = 0;
359 td = uhci->frame_cpu[i]; 380 td = uhci->frame_cpu[i];
381 link = uhci->frame[i];
360 if (!td) 382 if (!td)
361 continue; 383 goto check_link;
362 384
363 out += sprintf(out, "- Frame %d\n", i); \ 385 if (nframes > 0) {
364 if (td->dma_handle != (dma_addr_t)uhci->frame[i]) 386 out += sprintf(out, "- Frame %d -> (%08x)\n",
365 out += sprintf(out, " frame list does not match td->dma_handle!\n"); 387 i, le32_to_cpu(link));
388 j = 1;
389 }
366 390
367 head = &td->fl_list; 391 head = &td->fl_list;
368 tmp = head; 392 tmp = head;
369 do { 393 do {
370 td = list_entry(tmp, struct uhci_td, fl_list); 394 td = list_entry(tmp, struct uhci_td, fl_list);
371 tmp = tmp->next; 395 tmp = tmp->next;
372 out += uhci_show_td(td, out, len - (out - buf), 4); 396 if (cpu_to_le32(td->dma_handle) != link) {
397 if (nframes > 0)
398 out += sprintf(out, " link does "
399 "not match list entry!\n");
400 else
401 ++nerrs;
402 }
403 if (nframes > 0)
404 out += uhci_show_td(td, out,
405 len - (out - buf), 4);
406 link = td->link;
373 } while (tmp != head); 407 } while (tmp != head);
408
409check_link:
410 qh_dma = uhci_frame_skel_link(uhci, i);
411 if (link != qh_dma) {
412 if (nframes > 0) {
413 if (!j) {
414 out += sprintf(out,
415 "- Frame %d -> (%08x)\n",
416 i, le32_to_cpu(link));
417 j = 1;
418 }
419 out += sprintf(out, " link does not match "
420 "QH (%08x)!\n", le32_to_cpu(qh_dma));
421 } else
422 ++nerrs;
423 }
424 nframes -= j;
374 } 425 }
426 if (nerrs > 0)
427 out += sprintf(out, "Skipped %d bad links\n", nerrs);
375 428
376 out += sprintf(out, "Skeleton QHs\n"); 429 out += sprintf(out, "Skeleton QHs\n");
377 430
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index e0d4c2358b39..49b9d390b95f 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -92,6 +92,34 @@ static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
92static void wakeup_rh(struct uhci_hcd *uhci); 92static void wakeup_rh(struct uhci_hcd *uhci);
93static void uhci_get_current_frame_number(struct uhci_hcd *uhci); 93static void uhci_get_current_frame_number(struct uhci_hcd *uhci);
94 94
95/*
96 * Calculate the link pointer DMA value for the first Skeleton QH in a frame.
97 */
98static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
99{
100 int skelnum;
101
102 /*
103 * The interrupt queues will be interleaved as evenly as possible.
104 * There's not much to be done about period-1 interrupts; they have
105 * to occur in every frame. But we can schedule period-2 interrupts
106 * in odd-numbered frames, period-4 interrupts in frames congruent
107 * to 2 (mod 4), and so on. This way each frame only has two
108 * interrupt QHs, which will help spread out bandwidth utilization.
109 *
110 * ffs (Find First bit Set) does exactly what we need:
111 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8],
112 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc.
113 * ffs >= 7 => not on any high-period queue, so use
114 * skel_int1_qh = skelqh[9].
115 * Add in UHCI_NUMFRAMES to insure at least one bit is set.
116 */
117 skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
118 if (skelnum <= 1)
119 skelnum = 9;
120 return UHCI_PTR_QH | cpu_to_le32(uhci->skelqh[skelnum]->dma_handle);
121}
122
95#include "uhci-debug.c" 123#include "uhci-debug.c"
96#include "uhci-q.c" 124#include "uhci-q.c"
97#include "uhci-hub.c" 125#include "uhci-hub.c"
@@ -631,32 +659,11 @@ static int uhci_start(struct usb_hcd *hcd)
631 /* 659 /*
632 * Fill the frame list: make all entries point to the proper 660 * Fill the frame list: make all entries point to the proper
633 * interrupt queue. 661 * interrupt queue.
634 *
635 * The interrupt queues will be interleaved as evenly as possible.
636 * There's not much to be done about period-1 interrupts; they have
637 * to occur in every frame. But we can schedule period-2 interrupts
638 * in odd-numbered frames, period-4 interrupts in frames congruent
639 * to 2 (mod 4), and so on. This way each frame only has two
640 * interrupt QHs, which will help spread out bandwidth utilization.
641 */ 662 */
642 for (i = 0; i < UHCI_NUMFRAMES; i++) { 663 for (i = 0; i < UHCI_NUMFRAMES; i++) {
643 int irq;
644
645 /*
646 * ffs (Find First bit Set) does exactly what we need:
647 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8],
648 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc.
649 * ffs >= 7 => not on any high-period queue, so use
650 * skel_int1_qh = skelqh[9].
651 * Add UHCI_NUMFRAMES to insure at least one bit is set.
652 */
653 irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES);
654 if (irq <= 1)
655 irq = 9;
656 664
657 /* Only place we don't use the frame list routines */ 665 /* Only place we don't use the frame list routines */
658 uhci->frame[i] = UHCI_PTR_QH | 666 uhci->frame[i] = uhci_frame_skel_link(uhci, i);
659 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
660 } 667 }
661 668
662 /* 669 /*
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 108e3de2dc26..74469b5bcb61 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -83,6 +83,7 @@
83#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ 83#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
84#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames 84#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames
85 * can be scheduled */ 85 * can be scheduled */
86#define MAX_PHASE 32 /* Periodic scheduling length */
86 87
87/* When no queues need Full-Speed Bandwidth Reclamation, 88/* When no queues need Full-Speed Bandwidth Reclamation,
88 * delay this long before turning FSBR off */ 89 * delay this long before turning FSBR off */
@@ -141,6 +142,8 @@ struct uhci_qh {
141 unsigned long advance_jiffies; /* Time of last queue advance */ 142 unsigned long advance_jiffies; /* Time of last queue advance */
142 unsigned int unlink_frame; /* When the QH was unlinked */ 143 unsigned int unlink_frame; /* When the QH was unlinked */
143 unsigned int period; /* For Interrupt and Isochronous QHs */ 144 unsigned int period; /* For Interrupt and Isochronous QHs */
145 short phase; /* Between 0 and period-1 */
146 short load; /* Periodic time requirement, in us */
144 unsigned int iso_frame; /* Frame # for iso_packet_desc */ 147 unsigned int iso_frame; /* Frame # for iso_packet_desc */
145 int iso_status; /* Status for Isochronous URBs */ 148 int iso_status; /* Status for Isochronous URBs */
146 149
@@ -153,6 +156,8 @@ struct uhci_qh {
153 unsigned int needs_fixup:1; /* Must fix the TD toggle values */ 156 unsigned int needs_fixup:1; /* Must fix the TD toggle values */
154 unsigned int is_stopped:1; /* Queue was stopped by error/unlink */ 157 unsigned int is_stopped:1; /* Queue was stopped by error/unlink */
155 unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */ 158 unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */
159 unsigned int bandwidth_reserved:1; /* Periodic bandwidth has
160 * been allocated */
156} __attribute__((aligned(16))); 161} __attribute__((aligned(16)));
157 162
158/* 163/*
@@ -414,6 +419,9 @@ struct uhci_hcd {
414 419
415 wait_queue_head_t waitqh; /* endpoint_disable waiters */ 420 wait_queue_head_t waitqh; /* endpoint_disable waiters */
416 int num_waiting; /* Number of waiters */ 421 int num_waiting; /* Number of waiters */
422
423 int total_load; /* Sum of array values */
424 short load[MAX_PHASE]; /* Periodic allocations */
417}; 425};
418 426
419/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ 427/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 30b88459ac7d..2cbb239e63f8 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -248,16 +248,26 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
248 INIT_LIST_HEAD(&qh->node); 248 INIT_LIST_HEAD(&qh->node);
249 249
250 if (udev) { /* Normal QH */ 250 if (udev) { /* Normal QH */
251 qh->dummy_td = uhci_alloc_td(uhci); 251 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
252 if (!qh->dummy_td) { 252 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
253 dma_pool_free(uhci->qh_pool, qh, dma_handle); 253 qh->dummy_td = uhci_alloc_td(uhci);
254 return NULL; 254 if (!qh->dummy_td) {
255 dma_pool_free(uhci->qh_pool, qh, dma_handle);
256 return NULL;
257 }
255 } 258 }
256 qh->state = QH_STATE_IDLE; 259 qh->state = QH_STATE_IDLE;
257 qh->hep = hep; 260 qh->hep = hep;
258 qh->udev = udev; 261 qh->udev = udev;
259 hep->hcpriv = qh; 262 hep->hcpriv = qh;
260 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 263
264 if (qh->type == USB_ENDPOINT_XFER_INT ||
265 qh->type == USB_ENDPOINT_XFER_ISOC)
266 qh->load = usb_calc_bus_time(udev->speed,
267 usb_endpoint_dir_in(&hep->desc),
268 qh->type == USB_ENDPOINT_XFER_ISOC,
269 le16_to_cpu(hep->desc.wMaxPacketSize))
270 / 1000 + 1;
261 271
262 } else { /* Skeleton QH */ 272 } else { /* Skeleton QH */
263 qh->state = QH_STATE_ACTIVE; 273 qh->state = QH_STATE_ACTIVE;
@@ -275,7 +285,8 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
275 list_del(&qh->node); 285 list_del(&qh->node);
276 if (qh->udev) { 286 if (qh->udev) {
277 qh->hep->hcpriv = NULL; 287 qh->hep->hcpriv = NULL;
278 uhci_free_td(uhci, qh->dummy_td); 288 if (qh->dummy_td)
289 uhci_free_td(uhci, qh->dummy_td);
279 } 290 }
280 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); 291 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
281} 292}
@@ -327,7 +338,7 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
327 goto done; 338 goto done;
328 qh->element = UHCI_PTR_TERM; 339 qh->element = UHCI_PTR_TERM;
329 340
330 /* Control pipes have to worry about toggles */ 341 /* Control pipes don't have to worry about toggles */
331 if (qh->type == USB_ENDPOINT_XFER_CONTROL) 342 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
332 goto done; 343 goto done;
333 344
@@ -493,6 +504,121 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
493 wake_up_all(&uhci->waitqh); 504 wake_up_all(&uhci->waitqh);
494} 505}
495 506
507/*
508 * Find the highest existing bandwidth load for a given phase and period.
509 */
510static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
511{
512 int highest_load = uhci->load[phase];
513
514 for (phase += period; phase < MAX_PHASE; phase += period)
515 highest_load = max_t(int, highest_load, uhci->load[phase]);
516 return highest_load;
517}
518
519/*
520 * Set qh->phase to the optimal phase for a periodic transfer and
521 * check whether the bandwidth requirement is acceptable.
522 */
523static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
524{
525 int minimax_load;
526
527 /* Find the optimal phase (unless it is already set) and get
528 * its load value. */
529 if (qh->phase >= 0)
530 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
531 else {
532 int phase, load;
533 int max_phase = min_t(int, MAX_PHASE, qh->period);
534
535 qh->phase = 0;
536 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
537 for (phase = 1; phase < max_phase; ++phase) {
538 load = uhci_highest_load(uhci, phase, qh->period);
539 if (load < minimax_load) {
540 minimax_load = load;
541 qh->phase = phase;
542 }
543 }
544 }
545
546 /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
547 if (minimax_load + qh->load > 900) {
548 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
549 "period %d, phase %d, %d + %d us\n",
550 qh->period, qh->phase, minimax_load, qh->load);
551 return -ENOSPC;
552 }
553 return 0;
554}
555
556/*
557 * Reserve a periodic QH's bandwidth in the schedule
558 */
559static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
560{
561 int i;
562 int load = qh->load;
563 char *p = "??";
564
565 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
566 uhci->load[i] += load;
567 uhci->total_load += load;
568 }
569 uhci_to_hcd(uhci)->self.bandwidth_allocated =
570 uhci->total_load / MAX_PHASE;
571 switch (qh->type) {
572 case USB_ENDPOINT_XFER_INT:
573 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
574 p = "INT";
575 break;
576 case USB_ENDPOINT_XFER_ISOC:
577 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
578 p = "ISO";
579 break;
580 }
581 qh->bandwidth_reserved = 1;
582 dev_dbg(uhci_dev(uhci),
583 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
584 "reserve", qh->udev->devnum,
585 qh->hep->desc.bEndpointAddress, p,
586 qh->period, qh->phase, load);
587}
588
589/*
590 * Release a periodic QH's bandwidth reservation
591 */
592static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
593{
594 int i;
595 int load = qh->load;
596 char *p = "??";
597
598 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
599 uhci->load[i] -= load;
600 uhci->total_load -= load;
601 }
602 uhci_to_hcd(uhci)->self.bandwidth_allocated =
603 uhci->total_load / MAX_PHASE;
604 switch (qh->type) {
605 case USB_ENDPOINT_XFER_INT:
606 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
607 p = "INT";
608 break;
609 case USB_ENDPOINT_XFER_ISOC:
610 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
611 p = "ISO";
612 break;
613 }
614 qh->bandwidth_reserved = 0;
615 dev_dbg(uhci_dev(uhci),
616 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
617 "release", qh->udev->devnum,
618 qh->hep->desc.bEndpointAddress, p,
619 qh->period, qh->phase, load);
620}
621
496static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, 622static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
497 struct urb *urb) 623 struct urb *urb)
498{ 624{
@@ -796,7 +922,6 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
796 wmb(); 922 wmb();
797 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); 923 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
798 qh->dummy_td = td; 924 qh->dummy_td = td;
799 qh->period = urb->interval;
800 925
801 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 926 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
802 usb_pipeout(urb->pipe), toggle); 927 usb_pipeout(urb->pipe), toggle);
@@ -827,28 +952,42 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
827static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, 952static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
828 struct uhci_qh *qh) 953 struct uhci_qh *qh)
829{ 954{
830 int exponent; 955 int ret;
831 956
832 /* USB 1.1 interrupt transfers only involve one packet per interval. 957 /* USB 1.1 interrupt transfers only involve one packet per interval.
833 * Drivers can submit URBs of any length, but longer ones will need 958 * Drivers can submit URBs of any length, but longer ones will need
834 * multiple intervals to complete. 959 * multiple intervals to complete.
835 */ 960 */
836 961
837 /* Figure out which power-of-two queue to use */ 962 if (!qh->bandwidth_reserved) {
838 for (exponent = 7; exponent >= 0; --exponent) { 963 int exponent;
839 if ((1 << exponent) <= urb->interval)
840 break;
841 }
842 if (exponent < 0)
843 return -EINVAL;
844 urb->interval = 1 << exponent;
845 964
846 if (qh->period == 0) 965 /* Figure out which power-of-two queue to use */
966 for (exponent = 7; exponent >= 0; --exponent) {
967 if ((1 << exponent) <= urb->interval)
968 break;
969 }
970 if (exponent < 0)
971 return -EINVAL;
972 qh->period = 1 << exponent;
847 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; 973 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)];
848 else if (qh->period != urb->interval)
849 return -EINVAL; /* Can't change the period */
850 974
851 return uhci_submit_common(uhci, urb, qh); 975 /* For now, interrupt phase is fixed by the layout
976 * of the QH lists. */
977 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
978 ret = uhci_check_bandwidth(uhci, qh);
979 if (ret)
980 return ret;
981 } else if (qh->period > urb->interval)
982 return -EINVAL; /* Can't decrease the period */
983
984 ret = uhci_submit_common(uhci, urb, qh);
985 if (ret == 0) {
986 urb->interval = qh->period;
987 if (!qh->bandwidth_reserved)
988 uhci_reserve_bandwidth(uhci, qh);
989 }
990 return ret;
852} 991}
853 992
854/* 993/*
@@ -995,15 +1134,32 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
995 return -EFBIG; 1134 return -EFBIG;
996 1135
997 /* Check the period and figure out the starting frame number */ 1136 /* Check the period and figure out the starting frame number */
998 if (qh->period == 0) { 1137 if (!qh->bandwidth_reserved) {
1138 qh->period = urb->interval;
999 if (urb->transfer_flags & URB_ISO_ASAP) { 1139 if (urb->transfer_flags & URB_ISO_ASAP) {
1140 qh->phase = -1; /* Find the best phase */
1141 i = uhci_check_bandwidth(uhci, qh);
1142 if (i)
1143 return i;
1144
1145 /* Allow a little time to allocate the TDs */
1000 uhci_get_current_frame_number(uhci); 1146 uhci_get_current_frame_number(uhci);
1001 urb->start_frame = uhci->frame_number + 10; 1147 frame = uhci->frame_number + 10;
1148
1149 /* Move forward to the first frame having the
1150 * correct phase */
1151 urb->start_frame = frame + ((qh->phase - frame) &
1152 (qh->period - 1));
1002 } else { 1153 } else {
1003 i = urb->start_frame - uhci->last_iso_frame; 1154 i = urb->start_frame - uhci->last_iso_frame;
1004 if (i <= 0 || i >= UHCI_NUMFRAMES) 1155 if (i <= 0 || i >= UHCI_NUMFRAMES)
1005 return -EINVAL; 1156 return -EINVAL;
1157 qh->phase = urb->start_frame & (qh->period - 1);
1158 i = uhci_check_bandwidth(uhci, qh);
1159 if (i)
1160 return i;
1006 } 1161 }
1162
1007 } else if (qh->period != urb->interval) { 1163 } else if (qh->period != urb->interval) {
1008 return -EINVAL; /* Can't change the period */ 1164 return -EINVAL; /* Can't change the period */
1009 1165
@@ -1049,9 +1205,6 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1049 /* Set the interrupt-on-completion flag on the last packet. */ 1205 /* Set the interrupt-on-completion flag on the last packet. */
1050 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); 1206 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1051 1207
1052 qh->skel = uhci->skel_iso_qh;
1053 qh->period = urb->interval;
1054
1055 /* Add the TDs to the frame list */ 1208 /* Add the TDs to the frame list */
1056 frame = urb->start_frame; 1209 frame = urb->start_frame;
1057 list_for_each_entry(td, &urbp->td_list, list) { 1210 list_for_each_entry(td, &urbp->td_list, list) {
@@ -1065,6 +1218,9 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1065 qh->iso_status = 0; 1218 qh->iso_status = 0;
1066 } 1219 }
1067 1220
1221 qh->skel = uhci->skel_iso_qh;
1222 if (!qh->bandwidth_reserved)
1223 uhci_reserve_bandwidth(uhci, qh);
1068 return 0; 1224 return 0;
1069} 1225}
1070 1226
@@ -1119,7 +1275,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
1119 unsigned long flags; 1275 unsigned long flags;
1120 struct urb_priv *urbp; 1276 struct urb_priv *urbp;
1121 struct uhci_qh *qh; 1277 struct uhci_qh *qh;
1122 int bustime;
1123 1278
1124 spin_lock_irqsave(&uhci->lock, flags); 1279 spin_lock_irqsave(&uhci->lock, flags);
1125 1280
@@ -1149,35 +1304,11 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
1149 ret = uhci_submit_bulk(uhci, urb, qh); 1304 ret = uhci_submit_bulk(uhci, urb, qh);
1150 break; 1305 break;
1151 case USB_ENDPOINT_XFER_INT: 1306 case USB_ENDPOINT_XFER_INT:
1152 if (list_empty(&qh->queue)) { 1307 ret = uhci_submit_interrupt(uhci, urb, qh);
1153 bustime = usb_check_bandwidth(urb->dev, urb);
1154 if (bustime < 0)
1155 ret = bustime;
1156 else {
1157 ret = uhci_submit_interrupt(uhci, urb, qh);
1158 if (ret == 0)
1159 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1160 }
1161 } else { /* inherit from parent */
1162 struct urb_priv *eurbp;
1163
1164 eurbp = list_entry(qh->queue.prev, struct urb_priv,
1165 node);
1166 urb->bandwidth = eurbp->urb->bandwidth;
1167 ret = uhci_submit_interrupt(uhci, urb, qh);
1168 }
1169 break; 1308 break;
1170 case USB_ENDPOINT_XFER_ISOC: 1309 case USB_ENDPOINT_XFER_ISOC:
1171 urb->error_count = 0; 1310 urb->error_count = 0;
1172 bustime = usb_check_bandwidth(urb->dev, urb);
1173 if (bustime < 0) {
1174 ret = bustime;
1175 break;
1176 }
1177
1178 ret = uhci_submit_isochronous(uhci, urb, qh); 1311 ret = uhci_submit_isochronous(uhci, urb, qh);
1179 if (ret == 0)
1180 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1181 break; 1312 break;
1182 } 1313 }
1183 if (ret != 0) 1314 if (ret != 0)
@@ -1274,24 +1405,6 @@ __acquires(uhci->lock)
1274 1405
1275 uhci_free_urb_priv(uhci, urbp); 1406 uhci_free_urb_priv(uhci, urbp);
1276 1407
1277 switch (qh->type) {
1278 case USB_ENDPOINT_XFER_ISOC:
1279 /* Release bandwidth for Interrupt or Isoc. transfers */
1280 if (urb->bandwidth)
1281 usb_release_bandwidth(urb->dev, urb, 1);
1282 break;
1283 case USB_ENDPOINT_XFER_INT:
1284 /* Release bandwidth for Interrupt or Isoc. transfers */
1285 /* Make sure we don't release if we have a queued URB */
1286 if (list_empty(&qh->queue) && urb->bandwidth)
1287 usb_release_bandwidth(urb->dev, urb, 0);
1288 else
1289 /* bandwidth was passed on to queued URB, */
1290 /* so don't let usb_unlink_urb() release it */
1291 urb->bandwidth = 0;
1292 break;
1293 }
1294
1295 spin_unlock(&uhci->lock); 1408 spin_unlock(&uhci->lock);
1296 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); 1409 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);
1297 spin_lock(&uhci->lock); 1410 spin_lock(&uhci->lock);
@@ -1300,9 +1413,8 @@ __acquires(uhci->lock)
1300 * reserved bandwidth. */ 1413 * reserved bandwidth. */
1301 if (list_empty(&qh->queue)) { 1414 if (list_empty(&qh->queue)) {
1302 uhci_unlink_qh(uhci, qh); 1415 uhci_unlink_qh(uhci, qh);
1303 1416 if (qh->bandwidth_reserved)
1304 /* Bandwidth stuff not yet implemented */ 1417 uhci_release_bandwidth(uhci, qh);
1305 qh->period = 0;
1306 } 1418 }
1307} 1419}
1308 1420
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 63a84bbc310d..d308afd06935 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -565,11 +565,15 @@ static void mdc800_usb_disconnect (struct usb_interface *intf)
565 565
566 usb_deregister_dev(intf, &mdc800_class); 566 usb_deregister_dev(intf, &mdc800_class);
567 567
568 /* must be under lock to make sure no URB
569 is submitted after usb_kill_urb() */
570 mutex_lock(&mdc800->io_lock);
568 mdc800->state=NOT_CONNECTED; 571 mdc800->state=NOT_CONNECTED;
569 572
570 usb_kill_urb(mdc800->irq_urb); 573 usb_kill_urb(mdc800->irq_urb);
571 usb_kill_urb(mdc800->write_urb); 574 usb_kill_urb(mdc800->write_urb);
572 usb_kill_urb(mdc800->download_urb); 575 usb_kill_urb(mdc800->download_urb);
576 mutex_unlock(&mdc800->io_lock);
573 577
574 mdc800->dev = NULL; 578 mdc800->dev = NULL;
575 usb_set_intfdata(intf, NULL); 579 usb_set_intfdata(intf, NULL);
diff --git a/drivers/usb/input/Kconfig b/drivers/usb/input/Kconfig
index c7d887540d8d..2e71d3cca198 100644
--- a/drivers/usb/input/Kconfig
+++ b/drivers/usb/input/Kconfig
@@ -69,6 +69,14 @@ config LOGITECH_FF
69 Note: if you say N here, this device will still be supported, but without 69 Note: if you say N here, this device will still be supported, but without
70 force feedback. 70 force feedback.
71 71
72config PANTHERLORD_FF
73 bool "PantherLord USB/PS2 2in1 Adapter support"
74 depends on HID_FF
75 select INPUT_FF_MEMLESS if USB_HID
76 help
77 Say Y here if you have a PantherLord USB/PS2 2in1 Adapter and want
78 to enable force feedback support for it.
79
72config THRUSTMASTER_FF 80config THRUSTMASTER_FF
73 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)" 81 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)"
74 depends on HID_FF && EXPERIMENTAL 82 depends on HID_FF && EXPERIMENTAL
@@ -344,3 +352,15 @@ config USB_APPLETOUCH
344 352
345 To compile this driver as a module, choose M here: the 353 To compile this driver as a module, choose M here: the
346 module will be called appletouch. 354 module will be called appletouch.
355
356config USB_GTCO
357 tristate "GTCO CalComp/InterWrite USB Support"
358 depends on USB && INPUT
359 ---help---
360 Say Y here if you want to use the USB version of the GTCO
361 CalComp/InterWrite Tablet. Make sure to say Y to "Mouse support"
362 (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
363 (CONFIG_INPUT_EVDEV) as well.
364
365 To compile this driver as a module, choose M here: the
366 module will be called gtco.
diff --git a/drivers/usb/input/Makefile b/drivers/usb/input/Makefile
index 1a24b5bfa05f..a9d206c945e9 100644
--- a/drivers/usb/input/Makefile
+++ b/drivers/usb/input/Makefile
@@ -17,6 +17,9 @@ endif
17ifeq ($(CONFIG_LOGITECH_FF),y) 17ifeq ($(CONFIG_LOGITECH_FF),y)
18 usbhid-objs += hid-lgff.o 18 usbhid-objs += hid-lgff.o
19endif 19endif
20ifeq ($(CONFIG_PANTHERLORD_FF),y)
21 usbhid-objs += hid-plff.o
22endif
20ifeq ($(CONFIG_THRUSTMASTER_FF),y) 23ifeq ($(CONFIG_THRUSTMASTER_FF),y)
21 usbhid-objs += hid-tmff.o 24 usbhid-objs += hid-tmff.o
22endif 25endif
@@ -45,6 +48,7 @@ obj-$(CONFIG_USB_ACECAD) += acecad.o
45obj-$(CONFIG_USB_YEALINK) += yealink.o 48obj-$(CONFIG_USB_YEALINK) += yealink.o
46obj-$(CONFIG_USB_XPAD) += xpad.o 49obj-$(CONFIG_USB_XPAD) += xpad.o
47obj-$(CONFIG_USB_APPLETOUCH) += appletouch.o 50obj-$(CONFIG_USB_APPLETOUCH) += appletouch.o
51obj-$(CONFIG_USB_GTCO) += gtco.o
48 52
49ifeq ($(CONFIG_USB_DEBUG),y) 53ifeq ($(CONFIG_USB_DEBUG),y)
50EXTRA_CFLAGS += -DDEBUG 54EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/usb/input/gtco.c b/drivers/usb/input/gtco.c
new file mode 100644
index 000000000000..203cdc1bbba4
--- /dev/null
+++ b/drivers/usb/input/gtco.c
@@ -0,0 +1,1104 @@
1/* -*- linux-c -*-
2
3GTCO digitizer USB driver
4
5Use the err(), dbg() and info() macros from usb.h for system logging
6
7TO CHECK: Is pressure done right on report 5?
8
9Copyright (C) 2006 GTCO CalComp
10
11This program is free software; you can redistribute it and/or
12modify it under the terms of the GNU General Public License
13as published by the Free Software Foundation; version 2
14of the License.
15
16This program is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License
22along with this program; if not, write to the Free Software
23Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24
25Permission to use, copy, modify, distribute, and sell this software and its
26documentation for any purpose is hereby granted without fee, provided that
27the above copyright notice appear in all copies and that both that
28copyright notice and this permission notice appear in supporting
29documentation, and that the name of GTCO-CalComp not be used in advertising
30or publicity pertaining to distribution of the software without specific,
31written prior permission. GTCO-CalComp makes no representations about the
32suitability of this software for any purpose. It is provided "as is"
33without express or implied warranty.
34
35GTCO-CALCOMP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
36INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
37EVENT SHALL GTCO-CALCOMP BE LIABLE FOR ANY SPECIAL, INDIRECT OR
38CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
39DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
40TORTIOUS ACTIONS, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
41PERFORMANCE OF THIS SOFTWARE.
42
43GTCO CalComp, Inc.
447125 Riverwood Drive
45Columbia, MD 21046
46
47Jeremy Roberson jroberson@gtcocalcomp.com
48Scott Hill shill@gtcocalcomp.com
49*/
50
51
52
53/*#define DEBUG*/
54
55#include <linux/kernel.h>
56#include <linux/module.h>
57#include <linux/errno.h>
58#include <linux/init.h>
59#include <linux/slab.h>
60#include <linux/input.h>
61#include <linux/usb.h>
62#include <asm/uaccess.h>
63#include <asm/unaligned.h>
64#include <asm/byteorder.h>
65
66
67#include <linux/version.h>
68#include <linux/usb/input.h>
69
70/* Version with a Major number of 2 is for kernel inclusion only. */
71#define GTCO_VERSION "2.00.0006"
72
73
74/* MACROS */
75
76#define VENDOR_ID_GTCO 0x078C
77#define PID_400 0x400
78#define PID_401 0x401
79#define PID_1000 0x1000
80#define PID_1001 0x1001
81#define PID_1002 0x1002
82
83/* Max size of a single report */
84#define REPORT_MAX_SIZE 10
85
86
87/* Bitmask whether pen is in range */
88#define MASK_INRANGE 0x20
89#define MASK_BUTTON 0x01F
90
91#define PATHLENGTH 64
92
93/* DATA STRUCTURES */
94
95/* Device table */
96static struct usb_device_id gtco_usbid_table [] = {
97 { USB_DEVICE(VENDOR_ID_GTCO, PID_400) },
98 { USB_DEVICE(VENDOR_ID_GTCO, PID_401) },
99 { USB_DEVICE(VENDOR_ID_GTCO, PID_1000) },
100 { USB_DEVICE(VENDOR_ID_GTCO, PID_1001) },
101 { USB_DEVICE(VENDOR_ID_GTCO, PID_1002) },
102 { }
103};
104MODULE_DEVICE_TABLE (usb, gtco_usbid_table);
105
106
107/* Structure to hold all of our device specific stuff */
108struct gtco {
109
110 struct input_dev *inputdevice; /* input device struct pointer */
111 struct usb_device *usbdev; /* the usb device for this device */
112 struct urb *urbinfo; /* urb for incoming reports */
113 dma_addr_t buf_dma; /* dma addr of the data buffer*/
114 unsigned char * buffer; /* databuffer for reports */
115
116 char usbpath[PATHLENGTH];
117 int openCount;
118
119 /* Information pulled from Report Descriptor */
120 u32 usage;
121 u32 min_X;
122 u32 max_X;
123 u32 min_Y;
124 u32 max_Y;
125 s8 mintilt_X;
126 s8 maxtilt_X;
127 s8 mintilt_Y;
128 s8 maxtilt_Y;
129 u32 maxpressure;
130 u32 minpressure;
131};
132
133
134
135/* Code for parsing the HID REPORT DESCRIPTOR */
136
137/* From HID1.11 spec */
138struct hid_descriptor
139{
140 struct usb_descriptor_header header;
141 __le16 bcdHID;
142 u8 bCountryCode;
143 u8 bNumDescriptors;
144 u8 bDescriptorType;
145 __le16 wDescriptorLength;
146} __attribute__ ((packed));
147
148
149#define HID_DESCRIPTOR_SIZE 9
150#define HID_DEVICE_TYPE 33
151#define REPORT_DEVICE_TYPE 34
152
153
154#define PREF_TAG(x) ((x)>>4)
155#define PREF_TYPE(x) ((x>>2)&0x03)
156#define PREF_SIZE(x) ((x)&0x03)
157
158#define TYPE_MAIN 0
159#define TYPE_GLOBAL 1
160#define TYPE_LOCAL 2
161#define TYPE_RESERVED 3
162
163#define TAG_MAIN_INPUT 0x8
164#define TAG_MAIN_OUTPUT 0x9
165#define TAG_MAIN_FEATURE 0xB
166#define TAG_MAIN_COL_START 0xA
167#define TAG_MAIN_COL_END 0xC
168
169#define TAG_GLOB_USAGE 0
170#define TAG_GLOB_LOG_MIN 1
171#define TAG_GLOB_LOG_MAX 2
172#define TAG_GLOB_PHYS_MIN 3
173#define TAG_GLOB_PHYS_MAX 4
174#define TAG_GLOB_UNIT_EXP 5
175#define TAG_GLOB_UNIT 6
176#define TAG_GLOB_REPORT_SZ 7
177#define TAG_GLOB_REPORT_ID 8
178#define TAG_GLOB_REPORT_CNT 9
179#define TAG_GLOB_PUSH 10
180#define TAG_GLOB_POP 11
181
182#define TAG_GLOB_MAX 12
183
184#define DIGITIZER_USAGE_TIP_PRESSURE 0x30
185#define DIGITIZER_USAGE_TILT_X 0x3D
186#define DIGITIZER_USAGE_TILT_Y 0x3E
187
188
189/*
190 *
191 * This is an abbreviated parser for the HID Report Descriptor. We
192 * know what devices we are talking to, so this is by no means meant
193 * to be generic. We can make some safe assumptions:
194 *
195 * - We know there are no LONG tags, all short
196 * - We know that we have no MAIN Feature and MAIN Output items
197 * - We know what the IRQ reports are supposed to look like.
198 *
199 * The main purpose of this is to use the HID report desc to figure
200 * out the mins and maxs of the fields in the IRQ reports. The IRQ
201 * reports for 400/401 change slightly if the max X is bigger than 64K.
202 *
203 */
204static void parse_hid_report_descriptor(struct gtco *device, char * report,
205 int length)
206{
207 int x,i=0;
208
209 /* Tag primitive vars */
210 __u8 prefix;
211 __u8 size;
212 __u8 tag;
213 __u8 type;
214 __u8 data = 0;
215 __u16 data16 = 0;
216 __u32 data32 = 0;
217
218
219 /* For parsing logic */
220 int inputnum = 0;
221 __u32 usage = 0;
222
223 /* Global Values, indexed by TAG */
224 __u32 globalval[TAG_GLOB_MAX];
225 __u32 oldval[TAG_GLOB_MAX];
226
227 /* Debug stuff */
228 char maintype='x';
229 char globtype[12];
230 int indent=0;
231 char indentstr[10]="";
232
233
234
235 dbg("======>>>>>>PARSE<<<<<<======");
236
237 /* Walk this report and pull out the info we need */
238 while (i<length){
239 prefix=report[i];
240
241 /* Skip over prefix */
242 i++;
243
244 /* Determine data size and save the data in the proper variable */
245 size = PREF_SIZE(prefix);
246 switch(size){
247 case 1:
248 data = report[i];
249 break;
250 case 2:
251 data16 = le16_to_cpu(get_unaligned((__le16*)(&(report[i]))));
252 break;
253 case 3:
254 size = 4;
255 data32 = le32_to_cpu(get_unaligned((__le32*)(&(report[i]))));
256 }
257
258 /* Skip size of data */
259 i+=size;
260
261 /* What we do depends on the tag type */
262 tag = PREF_TAG(prefix);
263 type = PREF_TYPE(prefix);
264 switch(type){
265 case TYPE_MAIN:
266 strcpy(globtype,"");
267 switch(tag){
268
269 case TAG_MAIN_INPUT:
270 /*
271 * The INPUT MAIN tag signifies this is
272 * information from a report. We need to
273 * figure out what it is and store the
274 * min/max values
275 */
276
277 maintype='I';
278 if (data==2){
279 strcpy(globtype,"Variable");
280 }
281 if (data==3){
282 strcpy(globtype,"Var|Const");
283 }
284
285 dbg("::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits",
286 globalval[TAG_GLOB_REPORT_ID],inputnum,
287 globalval[TAG_GLOB_LOG_MAX],globalval[TAG_GLOB_LOG_MAX],
288 globalval[TAG_GLOB_LOG_MIN],globalval[TAG_GLOB_LOG_MIN],
289 (globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]));
290
291
292 /*
293 We can assume that the first two input items
294 are always the X and Y coordinates. After
295 that, we look for everything else by
296 local usage value
297 */
298 switch (inputnum){
299 case 0: /* X coord */
300 dbg("GER: X Usage: 0x%x",usage);
301 if (device->max_X == 0){
302 device->max_X = globalval[TAG_GLOB_LOG_MAX];
303 device->min_X = globalval[TAG_GLOB_LOG_MIN];
304 }
305
306 break;
307 case 1: /* Y coord */
308 dbg("GER: Y Usage: 0x%x",usage);
309 if (device->max_Y == 0){
310 device->max_Y = globalval[TAG_GLOB_LOG_MAX];
311 device->min_Y = globalval[TAG_GLOB_LOG_MIN];
312 }
313 break;
314 default:
315 /* Tilt X */
316 if (usage == DIGITIZER_USAGE_TILT_X){
317 if (device->maxtilt_X == 0){
318 device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX];
319 device->mintilt_X = globalval[TAG_GLOB_LOG_MIN];
320 }
321 }
322
323 /* Tilt Y */
324 if (usage == DIGITIZER_USAGE_TILT_Y){
325 if (device->maxtilt_Y == 0){
326 device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX];
327 device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN];
328 }
329 }
330
331
332 /* Pressure */
333 if (usage == DIGITIZER_USAGE_TIP_PRESSURE){
334 if (device->maxpressure == 0){
335 device->maxpressure = globalval[TAG_GLOB_LOG_MAX];
336 device->minpressure = globalval[TAG_GLOB_LOG_MIN];
337 }
338 }
339
340 break;
341 }
342
343 inputnum++;
344
345
346 break;
347 case TAG_MAIN_OUTPUT:
348 maintype='O';
349 break;
350 case TAG_MAIN_FEATURE:
351 maintype='F';
352 break;
353 case TAG_MAIN_COL_START:
354 maintype='S';
355
356 if (data==0){
357 dbg("======>>>>>> Physical");
358 strcpy(globtype,"Physical");
359 }else{
360 dbg("======>>>>>>");
361 }
362
363 /* Indent the debug output */
364 indent++;
365 for (x=0;x<indent;x++){
366 indentstr[x]='-';
367 }
368 indentstr[x]=0;
369
370 /* Save global tags */
371 for (x=0;x<TAG_GLOB_MAX;x++){
372 oldval[x] = globalval[x];
373 }
374
375 break;
376 case TAG_MAIN_COL_END:
377 dbg("<<<<<<======");
378 maintype='E';
379 indent--;
380 for (x=0;x<indent;x++){
381 indentstr[x]='-';
382 }
383 indentstr[x]=0;
384
385 /* Copy global tags back */
386 for (x=0;x<TAG_GLOB_MAX;x++){
387 globalval[x] = oldval[x];
388 }
389
390 break;
391 }
392
393 switch (size){
394 case 1:
395 dbg("%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x",
396 indentstr,tag,maintype,size,globtype,data);
397 break;
398 case 2:
399 dbg("%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x",
400 indentstr,tag,maintype,size,globtype, data16);
401 break;
402 case 4:
403 dbg("%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x",
404 indentstr,tag,maintype,size,globtype,data32);
405 break;
406 }
407 break;
408 case TYPE_GLOBAL:
409 switch(tag){
410 case TAG_GLOB_USAGE:
411 /*
412 * First time we hit the global usage tag,
413 * it should tell us the type of device
414 */
415 if (device->usage == 0){
416 device->usage = data;
417 }
418 strcpy(globtype,"USAGE");
419 break;
420 case TAG_GLOB_LOG_MIN :
421 strcpy(globtype,"LOG_MIN");
422 break;
423 case TAG_GLOB_LOG_MAX :
424 strcpy(globtype,"LOG_MAX");
425 break;
426 case TAG_GLOB_PHYS_MIN :
427 strcpy(globtype,"PHYS_MIN");
428 break;
429 case TAG_GLOB_PHYS_MAX :
430 strcpy(globtype,"PHYS_MAX");
431 break;
432 case TAG_GLOB_UNIT_EXP :
433 strcpy(globtype,"EXP");
434 break;
435 case TAG_GLOB_UNIT :
436 strcpy(globtype,"UNIT");
437 break;
438 case TAG_GLOB_REPORT_SZ :
439 strcpy(globtype,"REPORT_SZ");
440 break;
441 case TAG_GLOB_REPORT_ID :
442 strcpy(globtype,"REPORT_ID");
443 /* New report, restart numbering */
444 inputnum=0;
445 break;
446 case TAG_GLOB_REPORT_CNT:
447 strcpy(globtype,"REPORT_CNT");
448 break;
449 case TAG_GLOB_PUSH :
450 strcpy(globtype,"PUSH");
451 break;
452 case TAG_GLOB_POP:
453 strcpy(globtype,"POP");
454 break;
455 }
456
457
458 /* Check to make sure we have a good tag number
459 so we don't overflow array */
460 if (tag < TAG_GLOB_MAX){
461 switch (size){
462 case 1:
463 dbg("%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x",indentstr,globtype,tag,size,data);
464 globalval[tag]=data;
465 break;
466 case 2:
467 dbg("%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x",indentstr,globtype,tag,size,data16);
468 globalval[tag]=data16;
469 break;
470 case 4:
471 dbg("%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x",indentstr,globtype,tag,size,data32);
472 globalval[tag]=data32;
473 break;
474 }
475 }else{
476 dbg("%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d ",
477 indentstr,tag,size);
478 }
479
480
481 break;
482
483 case TYPE_LOCAL:
484 switch(tag){
485 case TAG_GLOB_USAGE:
486 strcpy(globtype,"USAGE");
487 /* Always 1 byte */
488 usage = data;
489 break;
490 case TAG_GLOB_LOG_MIN :
491 strcpy(globtype,"MIN");
492 break;
493 case TAG_GLOB_LOG_MAX :
494 strcpy(globtype,"MAX");
495 break;
496 default:
497 strcpy(globtype,"UNKNOWN");
498 }
499
500 switch (size){
501 case 1:
502 dbg("%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x",
503 indentstr,tag,globtype,size,data);
504 break;
505 case 2:
506 dbg("%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x",
507 indentstr,tag,globtype,size,data16);
508 break;
509 case 4:
510 dbg("%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x",
511 indentstr,tag,globtype,size,data32);
512 break;
513 }
514
515 break;
516 }
517
518 }
519
520}
521
522
523
524/* INPUT DRIVER Routines */
525
526
527/*
528 * Called when opening the input device. This will submit the URB to
529 * the usb system so we start getting reports
530 */
531static int gtco_input_open(struct input_dev *inputdev)
532{
533 struct gtco *device;
534 device = inputdev->private;
535
536 device->urbinfo->dev = device->usbdev;
537 if (usb_submit_urb(device->urbinfo, GFP_KERNEL)) {
538 return -EIO;
539 }
540 return 0;
541}
542
543/**
544 Called when closing the input device. This will unlink the URB
545*/
546static void gtco_input_close(struct input_dev *inputdev)
547{
548 struct gtco *device = inputdev->private;
549
550 usb_kill_urb(device->urbinfo);
551
552}
553
554
555/*
556 * Setup input device capabilities. Tell the input system what this
557 * device is capable of generating.
558 *
559 * This information is based on what is read from the HID report and
560 * placed in the struct gtco structure
561 *
562 */
563static void gtco_setup_caps(struct input_dev *inputdev)
564{
565 struct gtco *device = inputdev->private;
566
567
568 /* Which events */
569 inputdev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_MSC);
570
571
572 /* Misc event menu block */
573 inputdev->mscbit[0] = BIT(MSC_SCAN)|BIT(MSC_SERIAL)|BIT(MSC_RAW) ;
574
575
576 /* Absolute values based on HID report info */
577 input_set_abs_params(inputdev, ABS_X, device->min_X, device->max_X,
578 0, 0);
579 input_set_abs_params(inputdev, ABS_Y, device->min_Y, device->max_Y,
580 0, 0);
581
582 /* Proximity */
583 input_set_abs_params(inputdev, ABS_DISTANCE, 0, 1, 0, 0);
584
585 /* Tilt & pressure */
586 input_set_abs_params(inputdev, ABS_TILT_X, device->mintilt_X,
587 device->maxtilt_X, 0, 0);
588 input_set_abs_params(inputdev, ABS_TILT_Y, device->mintilt_Y,
589 device->maxtilt_Y, 0, 0);
590 input_set_abs_params(inputdev, ABS_PRESSURE, device->minpressure,
591 device->maxpressure, 0, 0);
592
593
594 /* Transducer */
595 input_set_abs_params(inputdev, ABS_MISC, 0,0xFF, 0, 0);
596
597}
598
599
600
601/* USB Routines */
602
603
604/*
605 * URB callback routine. Called when we get IRQ reports from the
606 * digitizer.
607 *
608 * This bridges the USB and input device worlds. It generates events
609 * on the input device based on the USB reports.
610 */
611static void gtco_urb_callback(struct urb *urbinfo)
612{
613
614
615 struct gtco *device = urbinfo->context;
616 struct input_dev *inputdev;
617 int rc;
618 u32 val = 0;
619 s8 valsigned = 0;
620 char le_buffer[2];
621
622 inputdev = device->inputdevice;
623
624
625 /* Was callback OK? */
626 if ((urbinfo->status == -ECONNRESET ) ||
627 (urbinfo->status == -ENOENT ) ||
628 (urbinfo->status == -ESHUTDOWN )){
629
630 /* Shutdown is occurring. Return and don't queue up any more */
631 return;
632 }
633
634 if (urbinfo->status != 0 ) {
635 /* Some unknown error. Hopefully temporary. Just go and */
636 /* requeue an URB */
637 goto resubmit;
638 }
639
640 /*
641 * Good URB, now process
642 */
643
644 /* PID dependent when we interpret the report */
645 if ((inputdev->id.product == PID_1000 )||
646 (inputdev->id.product == PID_1001 )||
647 (inputdev->id.product == PID_1002 ))
648 {
649
650 /*
651 * Switch on the report ID
652 * Conveniently, the reports have more information, the higher
653 * the report number. We can just fall through the case
654 * statements if we start with the highest number report
655 */
656 switch(device->buffer[0]){
657 case 5:
658 /* Pressure is 9 bits */
659 val = ((u16)(device->buffer[8]) << 1);
660 val |= (u16)(device->buffer[7] >> 7);
661 input_report_abs(inputdev, ABS_PRESSURE,
662 device->buffer[8]);
663
664 /* Mask out the Y tilt value used for pressure */
665 device->buffer[7] = (u8)((device->buffer[7]) & 0x7F);
666
667
668 /* Fall thru */
669 case 4:
670 /* Tilt */
671
672 /* Sign extend these 7 bit numbers. */
673 if (device->buffer[6] & 0x40)
674 device->buffer[6] |= 0x80;
675
676 if (device->buffer[7] & 0x40)
677 device->buffer[7] |= 0x80;
678
679
680 valsigned = (device->buffer[6]);
681 input_report_abs(inputdev, ABS_TILT_X, (s32)valsigned);
682
683 valsigned = (device->buffer[7]);
684 input_report_abs(inputdev, ABS_TILT_Y, (s32)valsigned);
685
686 /* Fall thru */
687
688 case 2:
689 case 3:
690 /* Convert buttons, only 5 bits possible */
691 val = (device->buffer[5])&MASK_BUTTON;
692
693 /* We don't apply any meaning to the bitmask,
694 just report */
695 input_event(inputdev, EV_MSC, MSC_SERIAL, val);
696
697 /* Fall thru */
698 case 1:
699
700 /* All reports have X and Y coords in the same place */
701 val = le16_to_cpu(get_unaligned((__le16 *) &(device->buffer[1])));
702 input_report_abs(inputdev, ABS_X, val);
703
704 val = le16_to_cpu(get_unaligned((__le16 *) &(device->buffer[3])));
705 input_report_abs(inputdev, ABS_Y, val);
706
707
708 /* Ditto for proximity bit */
709 if (device->buffer[5]& MASK_INRANGE){
710 val = 1;
711 }else{
712 val=0;
713 }
714 input_report_abs(inputdev, ABS_DISTANCE, val);
715
716
717 /* Report 1 is an exception to how we handle buttons */
718 /* Buttons are an index, not a bitmask */
719 if (device->buffer[0] == 1){
720
721 /* Convert buttons, 5 bit index */
722 /* Report value of index set as one,
723 the rest as 0 */
724 val = device->buffer[5]& MASK_BUTTON;
725 dbg("======>>>>>>REPORT 1: val 0x%X(%d)",
726 val,val);
727
728 /*
729 * We don't apply any meaning to the button
730 * index, just report it
731 */
732 input_event(inputdev, EV_MSC, MSC_SERIAL, val);
733
734
735 }
736
737 break;
738 case 7:
739 /* Menu blocks */
740 input_event(inputdev, EV_MSC, MSC_SCAN,
741 device->buffer[1]);
742
743
744 break;
745
746 }
747
748
749 }
750 /* Other pid class */
751 if ((inputdev->id.product == PID_400 )||
752 (inputdev->id.product == PID_401 ))
753 {
754
755 /* Report 2 */
756 if (device->buffer[0] == 2){
757 /* Menu blocks */
758 input_event(inputdev, EV_MSC, MSC_SCAN,
759 device->buffer[1]);
760 }
761
762 /* Report 1 */
763 if (device->buffer[0] == 1){
764 char buttonbyte;
765
766
767 /* IF X max > 64K, we still a bit from the y report */
768 if (device->max_X > 0x10000){
769
770 val = (u16)(((u16)(device->buffer[2]<<8))|((u8)(device->buffer[1])));
771 val |= (u32)(((u8)device->buffer[3]&0x1)<< 16);
772
773 input_report_abs(inputdev, ABS_X, val);
774
775 le_buffer[0] = (u8)((u8)(device->buffer[3])>>1);
776 le_buffer[0] |= (u8)((device->buffer[3]&0x1)<<7);
777
778 le_buffer[1] = (u8)(device->buffer[4]>>1);
779 le_buffer[1] |= (u8)((device->buffer[5]&0x1)<<7);
780
781 val = le16_to_cpu(get_unaligned((__le16 *)(le_buffer)));
782
783 input_report_abs(inputdev, ABS_Y, val);
784
785
786 /*
787 * Shift the button byte right by one to
788 * make it look like the standard report
789 */
790 buttonbyte = (device->buffer[5])>>1;
791 }else{
792
793 val = le16_to_cpu(get_unaligned((__le16 *) (&(device->buffer[1]))));
794 input_report_abs(inputdev, ABS_X, val);
795
796 val = le16_to_cpu(get_unaligned((__le16 *) (&(device->buffer[3]))));
797 input_report_abs(inputdev, ABS_Y, val);
798
799 buttonbyte = device->buffer[5];
800
801 }
802
803
804 /* BUTTONS and PROXIMITY */
805 if (buttonbyte& MASK_INRANGE){
806 val = 1;
807 }else{
808 val=0;
809 }
810 input_report_abs(inputdev, ABS_DISTANCE, val);
811
812 /* Convert buttons, only 4 bits possible */
813 val = buttonbyte&0x0F;
814#ifdef USE_BUTTONS
815 for ( i=0;i<5;i++){
816 input_report_key(inputdev, BTN_DIGI+i,val&(1<<i));
817 }
818#else
819 /* We don't apply any meaning to the bitmask, just report */
820 input_event(inputdev, EV_MSC, MSC_SERIAL, val);
821#endif
822 /* TRANSDUCER */
823 input_report_abs(inputdev, ABS_MISC, device->buffer[6]);
824
825 }
826 }
827
828 /* Everybody gets report ID's */
829 input_event(inputdev, EV_MSC, MSC_RAW, device->buffer[0]);
830
831 /* Sync it up */
832 input_sync(inputdev);
833
834 resubmit:
835 rc = usb_submit_urb(urbinfo, GFP_ATOMIC);
836 if (rc != 0) {
837 err("usb_submit_urb failed rc=0x%x",rc);
838 }
839
840}
841
842/*
843 * The probe routine. This is called when the kernel find the matching USB
844 * vendor/product. We do the following:
845 *
846 * - Allocate mem for a local structure to manage the device
847 * - Request a HID Report Descriptor from the device and parse it to
848 * find out the device parameters
849 * - Create an input device and assign it attributes
850 * - Allocate an URB so the device can talk to us when the input
851 * queue is open
852 */
853static int gtco_probe(struct usb_interface *usbinterface,
854 const struct usb_device_id *id)
855{
856
857 struct gtco *device = NULL;
858 char path[PATHLENGTH];
859 struct input_dev *inputdev;
860 struct hid_descriptor *hid_desc;
861 char *report;
862 int result=0, retry;
863 struct usb_endpoint_descriptor *endpoint;
864
865 /* Allocate memory for device structure */
866 device = kzalloc(sizeof(struct gtco), GFP_KERNEL);
867 if (device == NULL) {
868 err("No more memory");
869 return -ENOMEM;
870 }
871
872
873 device->inputdevice = input_allocate_device();
874 if (!device->inputdevice){
875 kfree(device);
876 err("No more memory");
877 return -ENOMEM;
878 }
879
880 /* Get pointer to the input device */
881 inputdev = device->inputdevice;
882
883 /* Save interface information */
884 device->usbdev = usb_get_dev(interface_to_usbdev(usbinterface));
885
886
887 /* Allocate some data for incoming reports */
888 device->buffer = usb_buffer_alloc(device->usbdev, REPORT_MAX_SIZE,
889 GFP_KERNEL, &(device->buf_dma));
890 if (!device->buffer){
891 input_free_device(device->inputdevice);
892 kfree(device);
893 err("No more memory");
894 return -ENOMEM;
895 }
896
897 /* Allocate URB for reports */
898 device->urbinfo = usb_alloc_urb(0, GFP_KERNEL);
899 if (!device->urbinfo) {
900 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
901 device->buffer, device->buf_dma);
902 input_free_device(device->inputdevice);
903 kfree(device);
904 err("No more memory");
905 return -ENOMEM;
906 }
907
908
909 /*
910 * The endpoint is always altsetting 0, we know this since we know
911 * this device only has one interrupt endpoint
912 */
913 endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
914
915 /* Some debug */
916 dbg("gtco # interfaces: %d",usbinterface->num_altsetting);
917 dbg("num endpoints: %d",usbinterface->cur_altsetting->desc.bNumEndpoints);
918 dbg("interface class: %d",usbinterface->cur_altsetting->desc.bInterfaceClass);
919 dbg("endpoint: attribute:0x%x type:0x%x",endpoint->bmAttributes,endpoint->bDescriptorType);
920 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)
921 dbg("endpoint: we have interrupt endpoint\n");
922
923 dbg("endpoint extra len:%d ",usbinterface->altsetting[0].extralen);
924
925
926
927 /*
928 * Find the HID descriptor so we can find out the size of the
929 * HID report descriptor
930 */
931 if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
932 HID_DEVICE_TYPE,&hid_desc) != 0){
933 err("Can't retrieve exta USB descriptor to get hid report descriptor length");
934 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
935 device->buffer, device->buf_dma);
936 input_free_device(device->inputdevice);
937 kfree(device);
938 return -EIO;
939 }
940
941 dbg("Extra descriptor success: type:%d len:%d",
942 hid_desc->bDescriptorType, hid_desc->wDescriptorLength);
943
944 if (!(report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL))) {
945 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
946 device->buffer, device->buf_dma);
947
948 input_free_device(device->inputdevice);
949 kfree(device);
950 err("No more memory");
951 return -ENOMEM;
952 }
953
954 /* Couple of tries to get reply */
955 for (retry=0;retry<3;retry++) {
956 result = usb_control_msg(device->usbdev,
957 usb_rcvctrlpipe(device->usbdev, 0),
958 USB_REQ_GET_DESCRIPTOR,
959 USB_RECIP_INTERFACE | USB_DIR_IN,
960 (REPORT_DEVICE_TYPE << 8),
961 0, /* interface */
962 report,
963 hid_desc->wDescriptorLength,
964 5000); /* 5 secs */
965
966 if (result == hid_desc->wDescriptorLength)
967 break;
968 }
969
970 /* If we didn't get the report, fail */
971 dbg("usb_control_msg result: :%d", result);
972 if (result != hid_desc->wDescriptorLength){
973 kfree(report);
974 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
975 device->buffer, device->buf_dma);
976 input_free_device(device->inputdevice);
977 kfree(device);
978 err("Failed to get HID Report Descriptor of size: %d",
979 hid_desc->wDescriptorLength);
980 return -EIO;
981 }
982
983
984 /* Now we parse the report */
985 parse_hid_report_descriptor(device,report,result);
986
987 /* Now we delete it */
988 kfree(report);
989
990 /* Create a device file node */
991 usb_make_path(device->usbdev, path, PATHLENGTH);
992 sprintf(device->usbpath, "%s/input0", path);
993
994
995 /* Set Input device functions */
996 inputdev->open = gtco_input_open;
997 inputdev->close = gtco_input_close;
998
999 /* Set input device information */
1000 inputdev->name = "GTCO_CalComp";
1001 inputdev->phys = device->usbpath;
1002 inputdev->private = device;
1003
1004
1005 /* Now set up all the input device capabilities */
1006 gtco_setup_caps(inputdev);
1007
1008 /* Set input device required ID information */
1009 usb_to_input_id(device->usbdev, &device->inputdevice->id);
1010 inputdev->cdev.dev = &usbinterface->dev;
1011
1012 /* Setup the URB, it will be posted later on open of input device */
1013 endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1014
1015 usb_fill_int_urb(device->urbinfo,
1016 device->usbdev,
1017 usb_rcvintpipe(device->usbdev,
1018 endpoint->bEndpointAddress),
1019 device->buffer,
1020 REPORT_MAX_SIZE,
1021 gtco_urb_callback,
1022 device,
1023 endpoint->bInterval);
1024
1025 device->urbinfo->transfer_dma = device->buf_dma;
1026 device->urbinfo->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1027
1028
1029 /* Save device pointer in USB interface device */
1030 usb_set_intfdata(usbinterface, device);
1031
1032 /* All done, now register the input device */
1033 input_register_device(inputdev);
1034
1035 info( "gtco driver created usb: %s\n", path);
1036 return 0;
1037
1038}
1039
1040/*
1041 * This function is a standard USB function called when the USB device
1042 * is disconnected. We will get rid of the URV, de-register the input
1043 * device, and free up allocated memory
1044 */
1045static void gtco_disconnect(struct usb_interface *interface)
1046{
1047
1048 /* Grab private device ptr */
1049 struct gtco *device = usb_get_intfdata (interface);
1050 struct input_dev *inputdev;
1051
1052 inputdev = device->inputdevice;
1053
1054 /* Now reverse all the registration stuff */
1055 if (device) {
1056 input_unregister_device(inputdev);
1057 usb_kill_urb(device->urbinfo);
1058 usb_free_urb(device->urbinfo);
1059 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
1060 device->buffer, device->buf_dma);
1061 kfree(device);
1062 }
1063
1064 info("gtco driver disconnected");
1065}
1066
1067
1068/* STANDARD MODULE LOAD ROUTINES */
1069
1070static struct usb_driver gtco_driverinfo_table = {
1071#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
1072 .owner = THIS_MODULE,
1073#endif
1074 .name = "gtco",
1075 .id_table = gtco_usbid_table,
1076 .probe = gtco_probe,
1077 .disconnect = gtco_disconnect,
1078};
1079/*
1080 * Register this module with the USB subsystem
1081 */
1082static int __init gtco_init(void)
1083{
1084 int rc;
1085 rc = usb_register(&gtco_driverinfo_table);
1086 if (rc) {
1087 err("usb_register() failed rc=0x%x", rc);
1088 }
1089 printk("GTCO usb driver version: %s",GTCO_VERSION);
1090 return rc;
1091}
1092
1093/*
1094 * Deregister this module with the USB subsystem
1095 */
1096static void __exit gtco_exit(void)
1097{
1098 usb_deregister(&gtco_driverinfo_table);
1099}
1100
1101module_init (gtco_init);
1102module_exit (gtco_exit);
1103
1104MODULE_LICENSE("GPL");
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index c6c9e72e5fd9..84983d1b7164 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/hid.h> 36#include <linux/hid.h>
37#include <linux/hiddev.h> 37#include <linux/hiddev.h>
38#include <linux/hid-debug.h>
38#include "usbhid.h" 39#include "usbhid.h"
39 40
40/* 41/*
@@ -220,23 +221,6 @@ static void hid_irq_in(struct urb *urb)
220 } 221 }
221} 222}
222 223
223/*
224 * Find a report field with a specified HID usage.
225 */
226#if 0
227struct hid_field *hid_find_field_by_usage(struct hid_device *hid, __u32 wanted_usage, int type)
228{
229 struct hid_report *report;
230 int i;
231
232 list_for_each_entry(report, &hid->report_enum[type].report_list, list)
233 for (i = 0; i < report->maxfield; i++)
234 if (report->field[i]->logical == wanted_usage)
235 return report->field[i];
236 return NULL;
237}
238#endif /* 0 */
239
240static int hid_submit_out(struct hid_device *hid) 224static int hid_submit_out(struct hid_device *hid)
241{ 225{
242 struct hid_report *report; 226 struct hid_report *report;
@@ -501,7 +485,7 @@ static int hid_get_class_descriptor(struct usb_device *dev, int ifnum,
501{ 485{
502 int result, retries = 4; 486 int result, retries = 4;
503 487
504 memset(buf,0,size); // Make sure we parse really received data 488 memset(buf, 0, size);
505 489
506 do { 490 do {
507 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 491 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
@@ -528,18 +512,6 @@ void usbhid_close(struct hid_device *hid)
528 usb_kill_urb(usbhid->urbin); 512 usb_kill_urb(usbhid->urbin);
529} 513}
530 514
531static int hidinput_open(struct input_dev *dev)
532{
533 struct hid_device *hid = dev->private;
534 return usbhid_open(hid);
535}
536
537static void hidinput_close(struct input_dev *dev)
538{
539 struct hid_device *hid = dev->private;
540 usbhid_close(hid);
541}
542
543#define USB_VENDOR_ID_PANJIT 0x134c 515#define USB_VENDOR_ID_PANJIT 0x134c
544 516
545#define USB_VENDOR_ID_TURBOX 0x062a 517#define USB_VENDOR_ID_TURBOX 0x062a
@@ -770,6 +742,7 @@ void usbhid_init_reports(struct hid_device *hid)
770#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c 742#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c
771#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 743#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
772#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 744#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
745#define USB_DEVICE_ID_APPLE_IR 0x8240
773 746
774#define USB_VENDOR_ID_CHERRY 0x046a 747#define USB_VENDOR_ID_CHERRY 0x046a
775#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 748#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
@@ -792,6 +765,12 @@ void usbhid_init_reports(struct hid_device *hid)
792#define USB_VENDOR_ID_IMATION 0x0718 765#define USB_VENDOR_ID_IMATION 0x0718
793#define USB_DEVICE_ID_DISC_STAKKA 0xd000 766#define USB_DEVICE_ID_DISC_STAKKA 0xd000
794 767
768#define USB_VENDOR_ID_PANTHERLORD 0x0810
769#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001
770
771#define USB_VENDOR_ID_SONY 0x054c
772#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
773
795/* 774/*
796 * Alphabetically sorted blacklist by quirk type. 775 * Alphabetically sorted blacklist by quirk type.
797 */ 776 */
@@ -946,19 +925,21 @@ static const struct hid_blacklist {
946 925
947 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION }, 926 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
948 927
949 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 928 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
950 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN }, 929 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
951 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 930 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
952 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 931 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
953 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 932 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
954 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 933 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
955 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 934 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
956 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 935 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
957 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 936 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
958 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 937 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
959 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 938 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
960 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 939 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
961 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 940 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
941
942 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IR, HID_QUIRK_IGNORE },
962 943
963 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE }, 944 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE },
964 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE }, 945 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE },
@@ -969,6 +950,10 @@ static const struct hid_blacklist {
969 950
970 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS }, 951 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
971 952
953 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
954
955 { USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER, HID_QUIRK_SONY_PS3_CONTROLLER },
956
972 { 0, 0 } 957 { 0, 0 }
973}; 958};
974 959
@@ -1033,6 +1018,32 @@ static void hid_fixup_cymotion_descriptor(char *rdesc, int rsize)
1033 } 1018 }
1034} 1019}
1035 1020
1021/*
1022 * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
1023 * to "operational". Without this, the ps3 controller will not report any
1024 * events.
1025 */
1026static void hid_fixup_sony_ps3_controller(struct usb_device *dev, int ifnum)
1027{
1028 int result;
1029 char *buf = kmalloc(18, GFP_KERNEL);
1030
1031 if (!buf)
1032 return;
1033
1034 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
1035 HID_REQ_GET_REPORT,
1036 USB_DIR_IN | USB_TYPE_CLASS |
1037 USB_RECIP_INTERFACE,
1038 (3 << 8) | 0xf2, ifnum, buf, 17,
1039 USB_CTRL_GET_TIMEOUT);
1040
1041 if (result < 0)
1042 err("%s failed: %d\n", __func__, result);
1043
1044 kfree(buf);
1045}
1046
1036static struct hid_device *usb_hid_configure(struct usb_interface *intf) 1047static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1037{ 1048{
1038 struct usb_host_interface *interface = intf->cur_altsetting; 1049 struct usb_host_interface *interface = intf->cur_altsetting;
@@ -1064,6 +1075,11 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1064 if (quirks & HID_QUIRK_IGNORE) 1075 if (quirks & HID_QUIRK_IGNORE)
1065 return NULL; 1076 return NULL;
1066 1077
1078 if ((quirks & HID_QUIRK_IGNORE_MOUSE) &&
1079 (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE))
1080 return NULL;
1081
1082
1067 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && 1083 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
1068 (!interface->desc.bNumEndpoints || 1084 (!interface->desc.bNumEndpoints ||
1069 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { 1085 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
@@ -1235,8 +1251,8 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1235 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; 1251 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
1236 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); 1252 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
1237 hid->hidinput_input_event = usb_hidinput_input_event; 1253 hid->hidinput_input_event = usb_hidinput_input_event;
1238 hid->hidinput_open = hidinput_open; 1254 hid->hid_open = usbhid_open;
1239 hid->hidinput_close = hidinput_close; 1255 hid->hid_close = usbhid_close;
1240#ifdef CONFIG_USB_HIDDEV 1256#ifdef CONFIG_USB_HIDDEV
1241 hid->hiddev_hid_event = hiddev_hid_event; 1257 hid->hiddev_hid_event = hiddev_hid_event;
1242 hid->hiddev_report_event = hiddev_report_event; 1258 hid->hiddev_report_event = hiddev_report_event;
@@ -1315,13 +1331,13 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
1315 return -ENODEV; 1331 return -ENODEV;
1316 } 1332 }
1317 1333
1318 /* This only gets called when we are a single-input (most of the 1334 if ((hid->claimed & HID_CLAIMED_INPUT))
1319 * time). IOW, not a HID_QUIRK_MULTI_INPUT. The hid_ff_init() is
1320 * only useful in this case, and not for multi-input quirks. */
1321 if ((hid->claimed & HID_CLAIMED_INPUT) &&
1322 !(hid->quirks & HID_QUIRK_MULTI_INPUT))
1323 hid_ff_init(hid); 1335 hid_ff_init(hid);
1324 1336
1337 if (hid->quirks & HID_QUIRK_SONY_PS3_CONTROLLER)
1338 hid_fixup_sony_ps3_controller(interface_to_usbdev(intf),
1339 intf->cur_altsetting->desc.bInterfaceNumber);
1340
1325 printk(KERN_INFO); 1341 printk(KERN_INFO);
1326 1342
1327 if (hid->claimed & HID_CLAIMED_INPUT) 1343 if (hid->claimed & HID_CLAIMED_INPUT)
diff --git a/drivers/usb/input/hid-ff.c b/drivers/usb/input/hid-ff.c
index 59ed65e7a621..5d145058a5cb 100644
--- a/drivers/usb/input/hid-ff.c
+++ b/drivers/usb/input/hid-ff.c
@@ -58,6 +58,9 @@ static struct hid_ff_initializer inits[] = {
58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */ 58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */
59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */ 59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */
60#endif 60#endif
61#ifdef CONFIG_PANTHERLORD_FF
62 { 0x810, 0x0001, hid_plff_init },
63#endif
61#ifdef CONFIG_THRUSTMASTER_FF 64#ifdef CONFIG_THRUSTMASTER_FF
62 { 0x44f, 0xb304, hid_tmff_init }, 65 { 0x44f, 0xb304, hid_tmff_init },
63#endif 66#endif
diff --git a/drivers/usb/input/hid-lgff.c b/drivers/usb/input/hid-lgff.c
index e47466268565..4f4fc3be192e 100644
--- a/drivers/usb/input/hid-lgff.c
+++ b/drivers/usb/input/hid-lgff.c
@@ -32,7 +32,7 @@
32#include <linux/hid.h> 32#include <linux/hid.h>
33#include "usbhid.h" 33#include "usbhid.h"
34 34
35struct device_type { 35struct dev_type {
36 u16 idVendor; 36 u16 idVendor;
37 u16 idProduct; 37 u16 idProduct;
38 const signed short *ff; 38 const signed short *ff;
@@ -48,7 +48,7 @@ static const signed short ff_joystick[] = {
48 -1 48 -1
49}; 49};
50 50
51static const struct device_type devices[] = { 51static const struct dev_type devices[] = {
52 { 0x046d, 0xc211, ff_rumble }, 52 { 0x046d, 0xc211, ff_rumble },
53 { 0x046d, 0xc219, ff_rumble }, 53 { 0x046d, 0xc219, ff_rumble },
54 { 0x046d, 0xc283, ff_joystick }, 54 { 0x046d, 0xc283, ff_joystick },
diff --git a/drivers/usb/input/hid-plff.c b/drivers/usb/input/hid-plff.c
new file mode 100644
index 000000000000..76d2e6e14db4
--- /dev/null
+++ b/drivers/usb/input/hid-plff.c
@@ -0,0 +1,129 @@
1/*
2 * Force feedback support for PantherLord USB/PS2 2in1 Adapter devices
3 *
4 * Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23
24/* #define DEBUG */
25
26#define debug(format, arg...) pr_debug("hid-plff: " format "\n" , ## arg)
27
28#include <linux/input.h>
29#include <linux/usb.h>
30#include <linux/hid.h>
31#include "usbhid.h"
32
33struct plff_device {
34 struct hid_report *report;
35};
36
37static int hid_plff_play(struct input_dev *dev, void *data,
38 struct ff_effect *effect)
39{
40 struct hid_device *hid = dev->private;
41 struct plff_device *plff = data;
42 int left, right;
43
44 left = effect->u.rumble.strong_magnitude;
45 right = effect->u.rumble.weak_magnitude;
46 debug("called with 0x%04x 0x%04x", left, right);
47
48 left = left * 0x7f / 0xffff;
49 right = right * 0x7f / 0xffff;
50
51 plff->report->field[0]->value[2] = left;
52 plff->report->field[0]->value[3] = right;
53 debug("running with 0x%02x 0x%02x", left, right);
54 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
55
56 return 0;
57}
58
59int hid_plff_init(struct hid_device *hid)
60{
61 struct plff_device *plff;
62 struct hid_report *report;
63 struct hid_input *hidinput;
64 struct list_head *report_list =
65 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
66 struct list_head *report_ptr = report_list;
67 struct input_dev *dev;
68 int error;
69
70 /* The device contains 2 output reports (one for each
71 HID_QUIRK_MULTI_INPUT device), both containing 1 field, which
72 contains 4 ff00.0002 usages and 4 16bit absolute values.
73
74 The 2 input reports also contain a field which contains
75 8 ff00.0001 usages and 8 boolean values. Their meaning is
76 currently unknown. */
77
78 if (list_empty(report_list)) {
79 printk(KERN_ERR "hid-plff: no output reports found\n");
80 return -ENODEV;
81 }
82
83 list_for_each_entry(hidinput, &hid->inputs, list) {
84
85 report_ptr = report_ptr->next;
86
87 if (report_ptr == report_list) {
88 printk(KERN_ERR "hid-plff: required output report is missing\n");
89 return -ENODEV;
90 }
91
92 report = list_entry(report_ptr, struct hid_report, list);
93 if (report->maxfield < 1) {
94 printk(KERN_ERR "hid-plff: no fields in the report\n");
95 return -ENODEV;
96 }
97
98 if (report->field[0]->report_count < 4) {
99 printk(KERN_ERR "hid-plff: not enough values in the field\n");
100 return -ENODEV;
101 }
102
103 plff = kzalloc(sizeof(struct plff_device), GFP_KERNEL);
104 if (!plff)
105 return -ENOMEM;
106
107 dev = hidinput->input;
108
109 set_bit(FF_RUMBLE, dev->ffbit);
110
111 error = input_ff_create_memless(dev, plff, hid_plff_play);
112 if (error) {
113 kfree(plff);
114 return error;
115 }
116
117 plff->report = report;
118 plff->report->field[0]->value[0] = 0x00;
119 plff->report->field[0]->value[1] = 0x00;
120 plff->report->field[0]->value[2] = 0x00;
121 plff->report->field[0]->value[3] = 0x00;
122 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
123 }
124
125 printk(KERN_INFO "hid-plff: Force feedback for PantherLord USB/PS2 "
126 "2in1 Adapters by Anssi Hannula <anssi.hannula@gmail.com>\n");
127
128 return 0;
129}
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index c9418535bef8..15c70bd048c4 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -269,7 +269,7 @@ static int idmouse_release(struct inode *inode, struct file *file)
269 /* prevent a race condition with open() */ 269 /* prevent a race condition with open() */
270 mutex_lock(&disconnect_mutex); 270 mutex_lock(&disconnect_mutex);
271 271
272 dev = (struct usb_idmouse *) file->private_data; 272 dev = file->private_data;
273 273
274 if (dev == NULL) { 274 if (dev == NULL) {
275 mutex_unlock(&disconnect_mutex); 275 mutex_unlock(&disconnect_mutex);
@@ -304,17 +304,15 @@ static int idmouse_release(struct inode *inode, struct file *file)
304static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count, 304static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count,
305 loff_t * ppos) 305 loff_t * ppos)
306{ 306{
307 struct usb_idmouse *dev; 307 struct usb_idmouse *dev = file->private_data;
308 int result; 308 int result;
309 309
310 dev = (struct usb_idmouse *) file->private_data;
311
312 /* lock this object */ 310 /* lock this object */
313 down (&dev->sem); 311 down(&dev->sem);
314 312
315 /* verify that the device wasn't unplugged */ 313 /* verify that the device wasn't unplugged */
316 if (!dev->present) { 314 if (!dev->present) {
317 up (&dev->sem); 315 up(&dev->sem);
318 return -ENODEV; 316 return -ENODEV;
319 } 317 }
320 318
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 384fa3769805..fdf68479a166 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -69,7 +69,7 @@ struct rio_usb_data {
69 char *obuf, *ibuf; /* transfer buffers */ 69 char *obuf, *ibuf; /* transfer buffers */
70 char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ 70 char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
71 wait_queue_head_t wait_q; /* for timeouts */ 71 wait_queue_head_t wait_q; /* for timeouts */
72 struct semaphore lock; /* general race avoidance */ 72 struct mutex lock; /* general race avoidance */
73}; 73};
74 74
75static struct rio_usb_data rio_instance; 75static struct rio_usb_data rio_instance;
@@ -78,17 +78,17 @@ static int open_rio(struct inode *inode, struct file *file)
78{ 78{
79 struct rio_usb_data *rio = &rio_instance; 79 struct rio_usb_data *rio = &rio_instance;
80 80
81 down(&(rio->lock)); 81 mutex_lock(&(rio->lock));
82 82
83 if (rio->isopen || !rio->present) { 83 if (rio->isopen || !rio->present) {
84 up(&(rio->lock)); 84 mutex_unlock(&(rio->lock));
85 return -EBUSY; 85 return -EBUSY;
86 } 86 }
87 rio->isopen = 1; 87 rio->isopen = 1;
88 88
89 init_waitqueue_head(&rio->wait_q); 89 init_waitqueue_head(&rio->wait_q);
90 90
91 up(&(rio->lock)); 91 mutex_unlock(&(rio->lock));
92 92
93 info("Rio opened."); 93 info("Rio opened.");
94 94
@@ -117,7 +117,7 @@ ioctl_rio(struct inode *inode, struct file *file, unsigned int cmd,
117 int retries; 117 int retries;
118 int retval=0; 118 int retval=0;
119 119
120 down(&(rio->lock)); 120 mutex_lock(&(rio->lock));
121 /* Sanity check to make sure rio is connected, powered, etc */ 121 /* Sanity check to make sure rio is connected, powered, etc */
122 if ( rio == NULL || 122 if ( rio == NULL ||
123 rio->present == 0 || 123 rio->present == 0 ||
@@ -257,7 +257,7 @@ ioctl_rio(struct inode *inode, struct file *file, unsigned int cmd,
257 257
258 258
259err_out: 259err_out:
260 up(&(rio->lock)); 260 mutex_unlock(&(rio->lock));
261 return retval; 261 return retval;
262} 262}
263 263
@@ -275,14 +275,17 @@ write_rio(struct file *file, const char __user *buffer,
275 int result = 0; 275 int result = 0;
276 int maxretry; 276 int maxretry;
277 int errn = 0; 277 int errn = 0;
278 int intr;
278 279
279 down(&(rio->lock)); 280 intr = mutex_lock_interruptible(&(rio->lock));
281 if (intr)
282 return -EINTR;
280 /* Sanity check to make sure rio is connected, powered, etc */ 283 /* Sanity check to make sure rio is connected, powered, etc */
281 if ( rio == NULL || 284 if ( rio == NULL ||
282 rio->present == 0 || 285 rio->present == 0 ||
283 rio->rio_dev == NULL ) 286 rio->rio_dev == NULL )
284 { 287 {
285 up(&(rio->lock)); 288 mutex_unlock(&(rio->lock));
286 return -ENODEV; 289 return -ENODEV;
287 } 290 }
288 291
@@ -305,7 +308,7 @@ write_rio(struct file *file, const char __user *buffer,
305 goto error; 308 goto error;
306 } 309 }
307 if (signal_pending(current)) { 310 if (signal_pending(current)) {
308 up(&(rio->lock)); 311 mutex_unlock(&(rio->lock));
309 return bytes_written ? bytes_written : -EINTR; 312 return bytes_written ? bytes_written : -EINTR;
310 } 313 }
311 314
@@ -341,12 +344,12 @@ write_rio(struct file *file, const char __user *buffer,
341 buffer += copy_size; 344 buffer += copy_size;
342 } while (count > 0); 345 } while (count > 0);
343 346
344 up(&(rio->lock)); 347 mutex_unlock(&(rio->lock));
345 348
346 return bytes_written ? bytes_written : -EIO; 349 return bytes_written ? bytes_written : -EIO;
347 350
348error: 351error:
349 up(&(rio->lock)); 352 mutex_unlock(&(rio->lock));
350 return errn; 353 return errn;
351} 354}
352 355
@@ -361,14 +364,17 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
361 int result; 364 int result;
362 int maxretry = 10; 365 int maxretry = 10;
363 char *ibuf; 366 char *ibuf;
367 int intr;
364 368
365 down(&(rio->lock)); 369 intr = mutex_lock_interruptible(&(rio->lock));
370 if (intr)
371 return -EINTR;
366 /* Sanity check to make sure rio is connected, powered, etc */ 372 /* Sanity check to make sure rio is connected, powered, etc */
367 if ( rio == NULL || 373 if ( rio == NULL ||
368 rio->present == 0 || 374 rio->present == 0 ||
369 rio->rio_dev == NULL ) 375 rio->rio_dev == NULL )
370 { 376 {
371 up(&(rio->lock)); 377 mutex_unlock(&(rio->lock));
372 return -ENODEV; 378 return -ENODEV;
373 } 379 }
374 380
@@ -379,11 +385,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
379 385
380 while (count > 0) { 386 while (count > 0) {
381 if (signal_pending(current)) { 387 if (signal_pending(current)) {
382 up(&(rio->lock)); 388 mutex_unlock(&(rio->lock));
383 return read_count ? read_count : -EINTR; 389 return read_count ? read_count : -EINTR;
384 } 390 }
385 if (!rio->rio_dev) { 391 if (!rio->rio_dev) {
386 up(&(rio->lock)); 392 mutex_unlock(&(rio->lock));
387 return -ENODEV; 393 return -ENODEV;
388 } 394 }
389 this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; 395 this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
@@ -400,7 +406,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
400 count = this_read = partial; 406 count = this_read = partial;
401 } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ 407 } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
402 if (!maxretry--) { 408 if (!maxretry--) {
403 up(&(rio->lock)); 409 mutex_unlock(&(rio->lock));
404 err("read_rio: maxretry timeout"); 410 err("read_rio: maxretry timeout");
405 return -ETIME; 411 return -ETIME;
406 } 412 }
@@ -409,18 +415,18 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
409 finish_wait(&rio->wait_q, &wait); 415 finish_wait(&rio->wait_q, &wait);
410 continue; 416 continue;
411 } else if (result != -EREMOTEIO) { 417 } else if (result != -EREMOTEIO) {
412 up(&(rio->lock)); 418 mutex_unlock(&(rio->lock));
413 err("Read Whoops - result:%u partial:%u this_read:%u", 419 err("Read Whoops - result:%u partial:%u this_read:%u",
414 result, partial, this_read); 420 result, partial, this_read);
415 return -EIO; 421 return -EIO;
416 } else { 422 } else {
417 up(&(rio->lock)); 423 mutex_unlock(&(rio->lock));
418 return (0); 424 return (0);
419 } 425 }
420 426
421 if (this_read) { 427 if (this_read) {
422 if (copy_to_user(buffer, ibuf, this_read)) { 428 if (copy_to_user(buffer, ibuf, this_read)) {
423 up(&(rio->lock)); 429 mutex_unlock(&(rio->lock));
424 return -EFAULT; 430 return -EFAULT;
425 } 431 }
426 count -= this_read; 432 count -= this_read;
@@ -428,7 +434,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
428 buffer += this_read; 434 buffer += this_read;
429 } 435 }
430 } 436 }
431 up(&(rio->lock)); 437 mutex_unlock(&(rio->lock));
432 return read_count; 438 return read_count;
433} 439}
434 440
@@ -480,7 +486,7 @@ static int probe_rio(struct usb_interface *intf,
480 } 486 }
481 dbg("probe_rio: ibuf address:%p", rio->ibuf); 487 dbg("probe_rio: ibuf address:%p", rio->ibuf);
482 488
483 init_MUTEX(&(rio->lock)); 489 mutex_init(&(rio->lock));
484 490
485 usb_set_intfdata (intf, rio); 491 usb_set_intfdata (intf, rio);
486 rio->present = 1; 492 rio->present = 1;
@@ -496,12 +502,12 @@ static void disconnect_rio(struct usb_interface *intf)
496 if (rio) { 502 if (rio) {
497 usb_deregister_dev(intf, &usb_rio_class); 503 usb_deregister_dev(intf, &usb_rio_class);
498 504
499 down(&(rio->lock)); 505 mutex_lock(&(rio->lock));
500 if (rio->isopen) { 506 if (rio->isopen) {
501 rio->isopen = 0; 507 rio->isopen = 0;
502 /* better let it finish - the release will do whats needed */ 508 /* better let it finish - the release will do whats needed */
503 rio->rio_dev = NULL; 509 rio->rio_dev = NULL;
504 up(&(rio->lock)); 510 mutex_unlock(&(rio->lock));
505 return; 511 return;
506 } 512 }
507 kfree(rio->ibuf); 513 kfree(rio->ibuf);
@@ -510,7 +516,7 @@ static void disconnect_rio(struct usb_interface *intf)
510 info("USB Rio disconnected."); 516 info("USB Rio disconnected.");
511 517
512 rio->present = 0; 518 rio->present = 0;
513 up(&(rio->lock)); 519 mutex_unlock(&(rio->lock));
514 } 520 }
515} 521}
516 522
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 3cf3ea3a88ed..90c59535778d 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -2,7 +2,7 @@
2# Makefile for USB Core files and filesystem 2# Makefile for USB Core files and filesystem
3# 3#
4 4
5usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_dma.o 5usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o
6 6
7# This does not use CONFIG_USB_MON because we want this to use a tristate. 7# This does not use CONFIG_USB_MON because we want this to use a tristate.
8obj-$(CONFIG_USB) += usbmon.o 8obj-$(CONFIG_USB) += usbmon.o
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
new file mode 100644
index 000000000000..c01dfe603672
--- /dev/null
+++ b/drivers/usb/mon/mon_bin.c
@@ -0,0 +1,1172 @@
1/*
2 * The USB Monitor, inspired by Dave Harding's USBMon.
3 *
4 * This is a binary format reader.
5 *
6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
7 * Copyright (C) 2006 Pete Zaitcev (zaitcev@redhat.com)
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/cdev.h>
14#include <linux/usb.h>
15#include <linux/poll.h>
16#include <linux/compat.h>
17#include <linux/mm.h>
18
19#include <asm/uaccess.h>
20
21#include "usb_mon.h"
22
23/*
24 * Defined by USB 2.0 clause 9.3, table 9.2.
25 */
26#define SETUP_LEN 8
27
28/* ioctl macros */
29#define MON_IOC_MAGIC 0x92
30
31#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
32/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
33#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
34#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
35#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
36#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
37#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
38#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
39#ifdef CONFIG_COMPAT
40#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
41#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
42#endif
43
44/*
45 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
46 * But it's all right. Just use a simple way to make sure the chunk is never
47 * smaller than a page.
48 *
49 * N.B. An application does not know our chunk size.
50 *
51 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
52 * page-sized chunks for the time being.
53 */
54#define CHUNK_SIZE PAGE_SIZE
55#define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
56
57/*
58 * The magic limit was calculated so that it allows the monitoring
59 * application to pick data once in two ticks. This way, another application,
60 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
61 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
62 * enormous overhead built into the bus protocol, so we need about 1000 KB.
63 *
64 * This is still too much for most cases, where we just snoop a few
65 * descriptor fetches for enumeration. So, the default is a "reasonable"
66 * amount for systems with HZ=250 and incomplete bus saturation.
67 *
68 * XXX What about multi-megabyte URBs which take minutes to transfer?
69 */
70#define BUFF_MAX CHUNK_ALIGN(1200*1024)
71#define BUFF_DFL CHUNK_ALIGN(300*1024)
72#define BUFF_MIN CHUNK_ALIGN(8*1024)
73
74/*
75 * The per-event API header (2 per URB).
76 *
77 * This structure is seen in userland as defined by the documentation.
78 */
79struct mon_bin_hdr {
80 u64 id; /* URB ID - from submission to callback */
81 unsigned char type; /* Same as in text API; extensible. */
82 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */
83 unsigned char epnum; /* Endpoint number and transfer direction */
84 unsigned char devnum; /* Device address */
85 unsigned short busnum; /* Bus number */
86 char flag_setup;
87 char flag_data;
88 s64 ts_sec; /* gettimeofday */
89 s32 ts_usec; /* gettimeofday */
90 int status;
91 unsigned int len_urb; /* Length of data (submitted or actual) */
92 unsigned int len_cap; /* Delivered length */
93 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */
94};
95
96/* per file statistic */
97struct mon_bin_stats {
98 u32 queued;
99 u32 dropped;
100};
101
102struct mon_bin_get {
103 struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */
104 void __user *data;
105 size_t alloc; /* Length of data (can be zero) */
106};
107
108struct mon_bin_mfetch {
109 u32 __user *offvec; /* Vector of events fetched */
110 u32 nfetch; /* Number of events to fetch (out: fetched) */
111 u32 nflush; /* Number of events to flush */
112};
113
114#ifdef CONFIG_COMPAT
115struct mon_bin_get32 {
116 u32 hdr32;
117 u32 data32;
118 u32 alloc32;
119};
120
121struct mon_bin_mfetch32 {
122 u32 offvec32;
123 u32 nfetch32;
124 u32 nflush32;
125};
126#endif
127
128/* Having these two values same prevents wrapping of the mon_bin_hdr */
129#define PKT_ALIGN 64
130#define PKT_SIZE 64
131
132/* max number of USB bus supported */
133#define MON_BIN_MAX_MINOR 128
134
135/*
136 * The buffer: map of used pages.
137 */
138struct mon_pgmap {
139 struct page *pg;
140 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */
141};
142
143/*
144 * This gets associated with an open file struct.
145 */
146struct mon_reader_bin {
147 /* The buffer: one per open. */
148 spinlock_t b_lock; /* Protect b_cnt, b_in */
149 unsigned int b_size; /* Current size of the buffer - bytes */
150 unsigned int b_cnt; /* Bytes used */
151 unsigned int b_in, b_out; /* Offsets into buffer - bytes */
152 unsigned int b_read; /* Amount of read data in curr. pkt. */
153 struct mon_pgmap *b_vec; /* The map array */
154 wait_queue_head_t b_wait; /* Wait for data here */
155
156 struct mutex fetch_lock; /* Protect b_read, b_out */
157 int mmap_active;
158
159 /* A list of these is needed for "bus 0". Some time later. */
160 struct mon_reader r;
161
162 /* Stats */
163 unsigned int cnt_lost;
164};
165
166static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
167 unsigned int offset)
168{
169 return (struct mon_bin_hdr *)
170 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
171}
172
173#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
174
175static dev_t mon_bin_dev0;
176static struct cdev mon_bin_cdev;
177
178static void mon_buff_area_fill(const struct mon_reader_bin *rp,
179 unsigned int offset, unsigned int size);
180static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
181static int mon_alloc_buff(struct mon_pgmap *map, int npages);
182static void mon_free_buff(struct mon_pgmap *map, int npages);
183
184/*
185 * This is a "chunked memcpy". It does not manipulate any counters.
186 * But it returns the new offset for repeated application.
187 */
188unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
189 unsigned int off, const unsigned char *from, unsigned int length)
190{
191 unsigned int step_len;
192 unsigned char *buf;
193 unsigned int in_page;
194
195 while (length) {
196 /*
197 * Determine step_len.
198 */
199 step_len = length;
200 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
201 if (in_page < step_len)
202 step_len = in_page;
203
204 /*
205 * Copy data and advance pointers.
206 */
207 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
208 memcpy(buf, from, step_len);
209 if ((off += step_len) >= this->b_size) off = 0;
210 from += step_len;
211 length -= step_len;
212 }
213 return off;
214}
215
216/*
217 * This is a little worse than the above because it's "chunked copy_to_user".
218 * The return value is an error code, not an offset.
219 */
220static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
221 char __user *to, int length)
222{
223 unsigned int step_len;
224 unsigned char *buf;
225 unsigned int in_page;
226
227 while (length) {
228 /*
229 * Determine step_len.
230 */
231 step_len = length;
232 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
233 if (in_page < step_len)
234 step_len = in_page;
235
236 /*
237 * Copy data and advance pointers.
238 */
239 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
240 if (copy_to_user(to, buf, step_len))
241 return -EINVAL;
242 if ((off += step_len) >= this->b_size) off = 0;
243 to += step_len;
244 length -= step_len;
245 }
246 return 0;
247}
248
249/*
250 * Allocate an (aligned) area in the buffer.
251 * This is called under b_lock.
252 * Returns ~0 on failure.
253 */
254static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
255 unsigned int size)
256{
257 unsigned int offset;
258
259 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
260 if (rp->b_cnt + size > rp->b_size)
261 return ~0;
262 offset = rp->b_in;
263 rp->b_cnt += size;
264 if ((rp->b_in += size) >= rp->b_size)
265 rp->b_in -= rp->b_size;
266 return offset;
267}
268
269/*
270 * This is the same thing as mon_buff_area_alloc, only it does not allow
271 * buffers to wrap. This is needed by applications which pass references
272 * into mmap-ed buffers up their stacks (libpcap can do that).
273 *
274 * Currently, we always have the header stuck with the data, although
275 * it is not strictly speaking necessary.
276 *
277 * When a buffer would wrap, we place a filler packet to mark the space.
278 */
279static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
280 unsigned int size)
281{
282 unsigned int offset;
283 unsigned int fill_size;
284
285 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
286 if (rp->b_cnt + size > rp->b_size)
287 return ~0;
288 if (rp->b_in + size > rp->b_size) {
289 /*
290 * This would wrap. Find if we still have space after
291 * skipping to the end of the buffer. If we do, place
292 * a filler packet and allocate a new packet.
293 */
294 fill_size = rp->b_size - rp->b_in;
295 if (rp->b_cnt + size + fill_size > rp->b_size)
296 return ~0;
297 mon_buff_area_fill(rp, rp->b_in, fill_size);
298
299 offset = 0;
300 rp->b_in = size;
301 rp->b_cnt += size + fill_size;
302 } else if (rp->b_in + size == rp->b_size) {
303 offset = rp->b_in;
304 rp->b_in = 0;
305 rp->b_cnt += size;
306 } else {
307 offset = rp->b_in;
308 rp->b_in += size;
309 rp->b_cnt += size;
310 }
311 return offset;
312}
313
314/*
315 * Return a few (kilo-)bytes to the head of the buffer.
316 * This is used if a DMA fetch fails.
317 */
318static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
319{
320
321 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
322 rp->b_cnt -= size;
323 if (rp->b_in < size)
324 rp->b_in += rp->b_size;
325 rp->b_in -= size;
326}
327
328/*
329 * This has to be called under both b_lock and fetch_lock, because
330 * it accesses both b_cnt and b_out.
331 */
332static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
333{
334
335 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
336 rp->b_cnt -= size;
337 if ((rp->b_out += size) >= rp->b_size)
338 rp->b_out -= rp->b_size;
339}
340
341static void mon_buff_area_fill(const struct mon_reader_bin *rp,
342 unsigned int offset, unsigned int size)
343{
344 struct mon_bin_hdr *ep;
345
346 ep = MON_OFF2HDR(rp, offset);
347 memset(ep, 0, PKT_SIZE);
348 ep->type = '@';
349 ep->len_cap = size - PKT_SIZE;
350}
351
352static inline char mon_bin_get_setup(unsigned char *setupb,
353 const struct urb *urb, char ev_type)
354{
355
356 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S')
357 return '-';
358
359 if (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)
360 return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN);
361 if (urb->setup_packet == NULL)
362 return 'Z';
363
364 memcpy(setupb, urb->setup_packet, SETUP_LEN);
365 return 0;
366}
367
368static char mon_bin_get_data(const struct mon_reader_bin *rp,
369 unsigned int offset, struct urb *urb, unsigned int length)
370{
371
372 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) {
373 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
374 return 0;
375 }
376
377 if (urb->transfer_buffer == NULL)
378 return 'Z';
379
380 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
381 return 0;
382}
383
384static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
385 char ev_type)
386{
387 unsigned long flags;
388 struct timeval ts;
389 unsigned int urb_length;
390 unsigned int offset;
391 unsigned int length;
392 struct mon_bin_hdr *ep;
393 char data_tag = 0;
394
395 do_gettimeofday(&ts);
396
397 spin_lock_irqsave(&rp->b_lock, flags);
398
399 /*
400 * Find the maximum allowable length, then allocate space.
401 */
402 urb_length = (ev_type == 'S') ?
403 urb->transfer_buffer_length : urb->actual_length;
404 length = urb_length;
405
406 if (length >= rp->b_size/5)
407 length = rp->b_size/5;
408
409 if (usb_pipein(urb->pipe)) {
410 if (ev_type == 'S') {
411 length = 0;
412 data_tag = '<';
413 }
414 } else {
415 if (ev_type == 'C') {
416 length = 0;
417 data_tag = '>';
418 }
419 }
420
421 if (rp->mmap_active)
422 offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE);
423 else
424 offset = mon_buff_area_alloc(rp, length + PKT_SIZE);
425 if (offset == ~0) {
426 rp->cnt_lost++;
427 spin_unlock_irqrestore(&rp->b_lock, flags);
428 return;
429 }
430
431 ep = MON_OFF2HDR(rp, offset);
432 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
433
434 /*
435 * Fill the allocated area.
436 */
437 memset(ep, 0, PKT_SIZE);
438 ep->type = ev_type;
439 ep->xfer_type = usb_pipetype(urb->pipe);
440 /* We use the fact that usb_pipein() returns 0x80 */
441 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
442 ep->devnum = usb_pipedevice(urb->pipe);
443 ep->busnum = rp->r.m_bus->u_bus->busnum;
444 ep->id = (unsigned long) urb;
445 ep->ts_sec = ts.tv_sec;
446 ep->ts_usec = ts.tv_usec;
447 ep->status = urb->status;
448 ep->len_urb = urb_length;
449 ep->len_cap = length;
450
451 ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type);
452 if (length != 0) {
453 ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
454 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */
455 ep->len_cap = 0;
456 mon_buff_area_shrink(rp, length);
457 }
458 } else {
459 ep->flag_data = data_tag;
460 }
461
462 spin_unlock_irqrestore(&rp->b_lock, flags);
463
464 wake_up(&rp->b_wait);
465}
466
467static void mon_bin_submit(void *data, struct urb *urb)
468{
469 struct mon_reader_bin *rp = data;
470 mon_bin_event(rp, urb, 'S');
471}
472
473static void mon_bin_complete(void *data, struct urb *urb)
474{
475 struct mon_reader_bin *rp = data;
476 mon_bin_event(rp, urb, 'C');
477}
478
479static void mon_bin_error(void *data, struct urb *urb, int error)
480{
481 struct mon_reader_bin *rp = data;
482 unsigned long flags;
483 unsigned int offset;
484 struct mon_bin_hdr *ep;
485
486 spin_lock_irqsave(&rp->b_lock, flags);
487
488 offset = mon_buff_area_alloc(rp, PKT_SIZE);
489 if (offset == ~0) {
490 /* Not incrementing cnt_lost. Just because. */
491 spin_unlock_irqrestore(&rp->b_lock, flags);
492 return;
493 }
494
495 ep = MON_OFF2HDR(rp, offset);
496
497 memset(ep, 0, PKT_SIZE);
498 ep->type = 'E';
499 ep->xfer_type = usb_pipetype(urb->pipe);
500 /* We use the fact that usb_pipein() returns 0x80 */
501 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
502 ep->devnum = usb_pipedevice(urb->pipe);
503 ep->busnum = rp->r.m_bus->u_bus->busnum;
504 ep->id = (unsigned long) urb;
505 ep->status = error;
506
507 ep->flag_setup = '-';
508 ep->flag_data = 'E';
509
510 spin_unlock_irqrestore(&rp->b_lock, flags);
511
512 wake_up(&rp->b_wait);
513}
514
515static int mon_bin_open(struct inode *inode, struct file *file)
516{
517 struct mon_bus *mbus;
518 struct usb_bus *ubus;
519 struct mon_reader_bin *rp;
520 size_t size;
521 int rc;
522
523 mutex_lock(&mon_lock);
524 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) {
525 mutex_unlock(&mon_lock);
526 return -ENODEV;
527 }
528 if ((ubus = mbus->u_bus) == NULL) {
529 printk(KERN_ERR TAG ": consistency error on open\n");
530 mutex_unlock(&mon_lock);
531 return -ENODEV;
532 }
533
534 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
535 if (rp == NULL) {
536 rc = -ENOMEM;
537 goto err_alloc;
538 }
539 spin_lock_init(&rp->b_lock);
540 init_waitqueue_head(&rp->b_wait);
541 mutex_init(&rp->fetch_lock);
542
543 rp->b_size = BUFF_DFL;
544
545 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
546 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
547 rc = -ENOMEM;
548 goto err_allocvec;
549 }
550
551 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
552 goto err_allocbuff;
553
554 rp->r.m_bus = mbus;
555 rp->r.r_data = rp;
556 rp->r.rnf_submit = mon_bin_submit;
557 rp->r.rnf_error = mon_bin_error;
558 rp->r.rnf_complete = mon_bin_complete;
559
560 mon_reader_add(mbus, &rp->r);
561
562 file->private_data = rp;
563 mutex_unlock(&mon_lock);
564 return 0;
565
566err_allocbuff:
567 kfree(rp->b_vec);
568err_allocvec:
569 kfree(rp);
570err_alloc:
571 mutex_unlock(&mon_lock);
572 return rc;
573}
574
575/*
576 * Extract an event from buffer and copy it to user space.
577 * Wait if there is no event ready.
578 * Returns zero or error.
579 */
580static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
581 struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes)
582{
583 unsigned long flags;
584 struct mon_bin_hdr *ep;
585 size_t step_len;
586 unsigned int offset;
587 int rc;
588
589 mutex_lock(&rp->fetch_lock);
590
591 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
592 mutex_unlock(&rp->fetch_lock);
593 return rc;
594 }
595
596 ep = MON_OFF2HDR(rp, rp->b_out);
597
598 if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) {
599 mutex_unlock(&rp->fetch_lock);
600 return -EFAULT;
601 }
602
603 step_len = min(ep->len_cap, nbytes);
604 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
605
606 if (copy_from_buf(rp, offset, data, step_len)) {
607 mutex_unlock(&rp->fetch_lock);
608 return -EFAULT;
609 }
610
611 spin_lock_irqsave(&rp->b_lock, flags);
612 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
613 spin_unlock_irqrestore(&rp->b_lock, flags);
614 rp->b_read = 0;
615
616 mutex_unlock(&rp->fetch_lock);
617 return 0;
618}
619
620static int mon_bin_release(struct inode *inode, struct file *file)
621{
622 struct mon_reader_bin *rp = file->private_data;
623 struct mon_bus* mbus = rp->r.m_bus;
624
625 mutex_lock(&mon_lock);
626
627 if (mbus->nreaders <= 0) {
628 printk(KERN_ERR TAG ": consistency error on close\n");
629 mutex_unlock(&mon_lock);
630 return 0;
631 }
632 mon_reader_del(mbus, &rp->r);
633
634 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
635 kfree(rp->b_vec);
636 kfree(rp);
637
638 mutex_unlock(&mon_lock);
639 return 0;
640}
641
642static ssize_t mon_bin_read(struct file *file, char __user *buf,
643 size_t nbytes, loff_t *ppos)
644{
645 struct mon_reader_bin *rp = file->private_data;
646 unsigned long flags;
647 struct mon_bin_hdr *ep;
648 unsigned int offset;
649 size_t step_len;
650 char *ptr;
651 ssize_t done = 0;
652 int rc;
653
654 mutex_lock(&rp->fetch_lock);
655
656 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
657 mutex_unlock(&rp->fetch_lock);
658 return rc;
659 }
660
661 ep = MON_OFF2HDR(rp, rp->b_out);
662
663 if (rp->b_read < sizeof(struct mon_bin_hdr)) {
664 step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read);
665 ptr = ((char *)ep) + rp->b_read;
666 if (step_len && copy_to_user(buf, ptr, step_len)) {
667 mutex_unlock(&rp->fetch_lock);
668 return -EFAULT;
669 }
670 nbytes -= step_len;
671 buf += step_len;
672 rp->b_read += step_len;
673 done += step_len;
674 }
675
676 if (rp->b_read >= sizeof(struct mon_bin_hdr)) {
677 step_len = min(nbytes, (size_t)ep->len_cap);
678 offset = rp->b_out + PKT_SIZE;
679 offset += rp->b_read - sizeof(struct mon_bin_hdr);
680 if (offset >= rp->b_size)
681 offset -= rp->b_size;
682 if (copy_from_buf(rp, offset, buf, step_len)) {
683 mutex_unlock(&rp->fetch_lock);
684 return -EFAULT;
685 }
686 nbytes -= step_len;
687 buf += step_len;
688 rp->b_read += step_len;
689 done += step_len;
690 }
691
692 /*
693 * Check if whole packet was read, and if so, jump to the next one.
694 */
695 if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) {
696 spin_lock_irqsave(&rp->b_lock, flags);
697 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
698 spin_unlock_irqrestore(&rp->b_lock, flags);
699 rp->b_read = 0;
700 }
701
702 mutex_unlock(&rp->fetch_lock);
703 return done;
704}
705
706/*
707 * Remove at most nevents from chunked buffer.
708 * Returns the number of removed events.
709 */
710static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
711{
712 unsigned long flags;
713 struct mon_bin_hdr *ep;
714 int i;
715
716 mutex_lock(&rp->fetch_lock);
717 spin_lock_irqsave(&rp->b_lock, flags);
718 for (i = 0; i < nevents; ++i) {
719 if (MON_RING_EMPTY(rp))
720 break;
721
722 ep = MON_OFF2HDR(rp, rp->b_out);
723 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
724 }
725 spin_unlock_irqrestore(&rp->b_lock, flags);
726 rp->b_read = 0;
727 mutex_unlock(&rp->fetch_lock);
728 return i;
729}
730
731/*
732 * Fetch at most max event offsets into the buffer and put them into vec.
733 * The events are usually freed later with mon_bin_flush.
734 * Return the effective number of events fetched.
735 */
736static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
737 u32 __user *vec, unsigned int max)
738{
739 unsigned int cur_out;
740 unsigned int bytes, avail;
741 unsigned int size;
742 unsigned int nevents;
743 struct mon_bin_hdr *ep;
744 unsigned long flags;
745 int rc;
746
747 mutex_lock(&rp->fetch_lock);
748
749 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
750 mutex_unlock(&rp->fetch_lock);
751 return rc;
752 }
753
754 spin_lock_irqsave(&rp->b_lock, flags);
755 avail = rp->b_cnt;
756 spin_unlock_irqrestore(&rp->b_lock, flags);
757
758 cur_out = rp->b_out;
759 nevents = 0;
760 bytes = 0;
761 while (bytes < avail) {
762 if (nevents >= max)
763 break;
764
765 ep = MON_OFF2HDR(rp, cur_out);
766 if (put_user(cur_out, &vec[nevents])) {
767 mutex_unlock(&rp->fetch_lock);
768 return -EFAULT;
769 }
770
771 nevents++;
772 size = ep->len_cap + PKT_SIZE;
773 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
774 if ((cur_out += size) >= rp->b_size)
775 cur_out -= rp->b_size;
776 bytes += size;
777 }
778
779 mutex_unlock(&rp->fetch_lock);
780 return nevents;
781}
782
783/*
784 * Count events. This is almost the same as the above mon_bin_fetch,
785 * only we do not store offsets into user vector, and we have no limit.
786 */
787static int mon_bin_queued(struct mon_reader_bin *rp)
788{
789 unsigned int cur_out;
790 unsigned int bytes, avail;
791 unsigned int size;
792 unsigned int nevents;
793 struct mon_bin_hdr *ep;
794 unsigned long flags;
795
796 mutex_lock(&rp->fetch_lock);
797
798 spin_lock_irqsave(&rp->b_lock, flags);
799 avail = rp->b_cnt;
800 spin_unlock_irqrestore(&rp->b_lock, flags);
801
802 cur_out = rp->b_out;
803 nevents = 0;
804 bytes = 0;
805 while (bytes < avail) {
806 ep = MON_OFF2HDR(rp, cur_out);
807
808 nevents++;
809 size = ep->len_cap + PKT_SIZE;
810 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
811 if ((cur_out += size) >= rp->b_size)
812 cur_out -= rp->b_size;
813 bytes += size;
814 }
815
816 mutex_unlock(&rp->fetch_lock);
817 return nevents;
818}
819
820/*
821 */
822static int mon_bin_ioctl(struct inode *inode, struct file *file,
823 unsigned int cmd, unsigned long arg)
824{
825 struct mon_reader_bin *rp = file->private_data;
826 // struct mon_bus* mbus = rp->r.m_bus;
827 int ret = 0;
828 struct mon_bin_hdr *ep;
829 unsigned long flags;
830
831 switch (cmd) {
832
833 case MON_IOCQ_URB_LEN:
834 /*
835 * N.B. This only returns the size of data, without the header.
836 */
837 spin_lock_irqsave(&rp->b_lock, flags);
838 if (!MON_RING_EMPTY(rp)) {
839 ep = MON_OFF2HDR(rp, rp->b_out);
840 ret = ep->len_cap;
841 }
842 spin_unlock_irqrestore(&rp->b_lock, flags);
843 break;
844
845 case MON_IOCQ_RING_SIZE:
846 ret = rp->b_size;
847 break;
848
849 case MON_IOCT_RING_SIZE:
850 /*
851 * Changing the buffer size will flush it's contents; the new
852 * buffer is allocated before releasing the old one to be sure
853 * the device will stay functional also in case of memory
854 * pressure.
855 */
856 {
857 int size;
858 struct mon_pgmap *vec;
859
860 if (arg < BUFF_MIN || arg > BUFF_MAX)
861 return -EINVAL;
862
863 size = CHUNK_ALIGN(arg);
864 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE),
865 GFP_KERNEL)) == NULL) {
866 ret = -ENOMEM;
867 break;
868 }
869
870 ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
871 if (ret < 0) {
872 kfree(vec);
873 break;
874 }
875
876 mutex_lock(&rp->fetch_lock);
877 spin_lock_irqsave(&rp->b_lock, flags);
878 mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
879 kfree(rp->b_vec);
880 rp->b_vec = vec;
881 rp->b_size = size;
882 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
883 rp->cnt_lost = 0;
884 spin_unlock_irqrestore(&rp->b_lock, flags);
885 mutex_unlock(&rp->fetch_lock);
886 }
887 break;
888
889 case MON_IOCH_MFLUSH:
890 ret = mon_bin_flush(rp, arg);
891 break;
892
893 case MON_IOCX_GET:
894 {
895 struct mon_bin_get getb;
896
897 if (copy_from_user(&getb, (void __user *)arg,
898 sizeof(struct mon_bin_get)))
899 return -EFAULT;
900
901 if (getb.alloc > 0x10000000) /* Want to cast to u32 */
902 return -EINVAL;
903 ret = mon_bin_get_event(file, rp,
904 getb.hdr, getb.data, (unsigned int)getb.alloc);
905 }
906 break;
907
908#ifdef CONFIG_COMPAT
909 case MON_IOCX_GET32: {
910 struct mon_bin_get32 getb;
911
912 if (copy_from_user(&getb, (void __user *)arg,
913 sizeof(struct mon_bin_get32)))
914 return -EFAULT;
915
916 ret = mon_bin_get_event(file, rp,
917 compat_ptr(getb.hdr32), compat_ptr(getb.data32),
918 getb.alloc32);
919 }
920 break;
921#endif
922
923 case MON_IOCX_MFETCH:
924 {
925 struct mon_bin_mfetch mfetch;
926 struct mon_bin_mfetch __user *uptr;
927
928 uptr = (struct mon_bin_mfetch __user *)arg;
929
930 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
931 return -EFAULT;
932
933 if (mfetch.nflush) {
934 ret = mon_bin_flush(rp, mfetch.nflush);
935 if (ret < 0)
936 return ret;
937 if (put_user(ret, &uptr->nflush))
938 return -EFAULT;
939 }
940 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
941 if (ret < 0)
942 return ret;
943 if (put_user(ret, &uptr->nfetch))
944 return -EFAULT;
945 ret = 0;
946 }
947 break;
948
949#ifdef CONFIG_COMPAT
950 case MON_IOCX_MFETCH32:
951 {
952 struct mon_bin_mfetch32 mfetch;
953 struct mon_bin_mfetch32 __user *uptr;
954
955 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg);
956
957 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
958 return -EFAULT;
959
960 if (mfetch.nflush32) {
961 ret = mon_bin_flush(rp, mfetch.nflush32);
962 if (ret < 0)
963 return ret;
964 if (put_user(ret, &uptr->nflush32))
965 return -EFAULT;
966 }
967 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
968 mfetch.nfetch32);
969 if (ret < 0)
970 return ret;
971 if (put_user(ret, &uptr->nfetch32))
972 return -EFAULT;
973 ret = 0;
974 }
975 break;
976#endif
977
978 case MON_IOCG_STATS: {
979 struct mon_bin_stats __user *sp;
980 unsigned int nevents;
981 unsigned int ndropped;
982
983 spin_lock_irqsave(&rp->b_lock, flags);
984 ndropped = rp->cnt_lost;
985 rp->cnt_lost = 0;
986 spin_unlock_irqrestore(&rp->b_lock, flags);
987 nevents = mon_bin_queued(rp);
988
989 sp = (struct mon_bin_stats __user *)arg;
990 if (put_user(rp->cnt_lost, &sp->dropped))
991 return -EFAULT;
992 if (put_user(nevents, &sp->queued))
993 return -EFAULT;
994
995 }
996 break;
997
998 default:
999 return -ENOTTY;
1000 }
1001
1002 return ret;
1003}
1004
1005static unsigned int
1006mon_bin_poll(struct file *file, struct poll_table_struct *wait)
1007{
1008 struct mon_reader_bin *rp = file->private_data;
1009 unsigned int mask = 0;
1010 unsigned long flags;
1011
1012 if (file->f_mode & FMODE_READ)
1013 poll_wait(file, &rp->b_wait, wait);
1014
1015 spin_lock_irqsave(&rp->b_lock, flags);
1016 if (!MON_RING_EMPTY(rp))
1017 mask |= POLLIN | POLLRDNORM; /* readable */
1018 spin_unlock_irqrestore(&rp->b_lock, flags);
1019 return mask;
1020}
1021
1022/*
1023 * open and close: just keep track of how many times the device is
1024 * mapped, to use the proper memory allocation function.
1025 */
1026static void mon_bin_vma_open(struct vm_area_struct *vma)
1027{
1028 struct mon_reader_bin *rp = vma->vm_private_data;
1029 rp->mmap_active++;
1030}
1031
1032static void mon_bin_vma_close(struct vm_area_struct *vma)
1033{
1034 struct mon_reader_bin *rp = vma->vm_private_data;
1035 rp->mmap_active--;
1036}
1037
1038/*
1039 * Map ring pages to user space.
1040 */
1041struct page *mon_bin_vma_nopage(struct vm_area_struct *vma,
1042 unsigned long address, int *type)
1043{
1044 struct mon_reader_bin *rp = vma->vm_private_data;
1045 unsigned long offset, chunk_idx;
1046 struct page *pageptr;
1047
1048 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
1049 if (offset >= rp->b_size)
1050 return NOPAGE_SIGBUS;
1051 chunk_idx = offset / CHUNK_SIZE;
1052 pageptr = rp->b_vec[chunk_idx].pg;
1053 get_page(pageptr);
1054 if (type)
1055 *type = VM_FAULT_MINOR;
1056 return pageptr;
1057}
1058
1059struct vm_operations_struct mon_bin_vm_ops = {
1060 .open = mon_bin_vma_open,
1061 .close = mon_bin_vma_close,
1062 .nopage = mon_bin_vma_nopage,
1063};
1064
1065int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
1066{
1067 /* don't do anything here: "nopage" will set up page table entries */
1068 vma->vm_ops = &mon_bin_vm_ops;
1069 vma->vm_flags |= VM_RESERVED;
1070 vma->vm_private_data = filp->private_data;
1071 mon_bin_vma_open(vma);
1072 return 0;
1073}
1074
1075struct file_operations mon_fops_binary = {
1076 .owner = THIS_MODULE,
1077 .open = mon_bin_open,
1078 .llseek = no_llseek,
1079 .read = mon_bin_read,
1080 /* .write = mon_text_write, */
1081 .poll = mon_bin_poll,
1082 .ioctl = mon_bin_ioctl,
1083 .release = mon_bin_release,
1084};
1085
1086static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
1087{
1088 DECLARE_WAITQUEUE(waita, current);
1089 unsigned long flags;
1090
1091 add_wait_queue(&rp->b_wait, &waita);
1092 set_current_state(TASK_INTERRUPTIBLE);
1093
1094 spin_lock_irqsave(&rp->b_lock, flags);
1095 while (MON_RING_EMPTY(rp)) {
1096 spin_unlock_irqrestore(&rp->b_lock, flags);
1097
1098 if (file->f_flags & O_NONBLOCK) {
1099 set_current_state(TASK_RUNNING);
1100 remove_wait_queue(&rp->b_wait, &waita);
1101 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
1102 }
1103 schedule();
1104 if (signal_pending(current)) {
1105 remove_wait_queue(&rp->b_wait, &waita);
1106 return -EINTR;
1107 }
1108 set_current_state(TASK_INTERRUPTIBLE);
1109
1110 spin_lock_irqsave(&rp->b_lock, flags);
1111 }
1112 spin_unlock_irqrestore(&rp->b_lock, flags);
1113
1114 set_current_state(TASK_RUNNING);
1115 remove_wait_queue(&rp->b_wait, &waita);
1116 return 0;
1117}
1118
1119static int mon_alloc_buff(struct mon_pgmap *map, int npages)
1120{
1121 int n;
1122 unsigned long vaddr;
1123
1124 for (n = 0; n < npages; n++) {
1125 vaddr = get_zeroed_page(GFP_KERNEL);
1126 if (vaddr == 0) {
1127 while (n-- != 0)
1128 free_page((unsigned long) map[n].ptr);
1129 return -ENOMEM;
1130 }
1131 map[n].ptr = (unsigned char *) vaddr;
1132 map[n].pg = virt_to_page(vaddr);
1133 }
1134 return 0;
1135}
1136
1137static void mon_free_buff(struct mon_pgmap *map, int npages)
1138{
1139 int n;
1140
1141 for (n = 0; n < npages; n++)
1142 free_page((unsigned long) map[n].ptr);
1143}
1144
1145int __init mon_bin_init(void)
1146{
1147 int rc;
1148
1149 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
1150 if (rc < 0)
1151 goto err_dev;
1152
1153 cdev_init(&mon_bin_cdev, &mon_fops_binary);
1154 mon_bin_cdev.owner = THIS_MODULE;
1155
1156 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR);
1157 if (rc < 0)
1158 goto err_add;
1159
1160 return 0;
1161
1162err_add:
1163 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1164err_dev:
1165 return rc;
1166}
1167
1168void __exit mon_bin_exit(void)
1169{
1170 cdev_del(&mon_bin_cdev);
1171 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1172}
diff --git a/drivers/usb/mon/mon_dma.c b/drivers/usb/mon/mon_dma.c
index ddcfc01e77a0..140cc80bd2b1 100644
--- a/drivers/usb/mon/mon_dma.c
+++ b/drivers/usb/mon/mon_dma.c
@@ -48,6 +48,36 @@ char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
48 local_irq_restore(flags); 48 local_irq_restore(flags);
49 return 0; 49 return 0;
50} 50}
51
52void mon_dmapeek_vec(const struct mon_reader_bin *rp,
53 unsigned int offset, dma_addr_t dma_addr, unsigned int length)
54{
55 unsigned long flags;
56 unsigned int step_len;
57 struct page *pg;
58 unsigned char *map;
59 unsigned long page_off, page_len;
60
61 local_irq_save(flags);
62 while (length) {
63 /* compute number of bytes we are going to copy in this page */
64 step_len = length;
65 page_off = dma_addr & (PAGE_SIZE-1);
66 page_len = PAGE_SIZE - page_off;
67 if (page_len < step_len)
68 step_len = page_len;
69
70 /* copy data and advance pointers */
71 pg = phys_to_page(dma_addr);
72 map = kmap_atomic(pg, KM_IRQ0);
73 offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
74 kunmap_atomic(map, KM_IRQ0);
75 dma_addr += step_len;
76 length -= step_len;
77 }
78 local_irq_restore(flags);
79}
80
51#endif /* __i386__ */ 81#endif /* __i386__ */
52 82
53#ifndef MON_HAS_UNMAP 83#ifndef MON_HAS_UNMAP
@@ -55,4 +85,11 @@ char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
55{ 85{
56 return 'D'; 86 return 'D';
57} 87}
58#endif 88
89void mon_dmapeek_vec(const struct mon_reader_bin *rp,
90 unsigned int offset, dma_addr_t dma_addr, unsigned int length)
91{
92 ;
93}
94
95#endif /* MON_HAS_UNMAP */
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 394bbf2f68d4..c9739e7b35e5 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -9,7 +9,6 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/usb.h> 11#include <linux/usb.h>
12#include <linux/debugfs.h>
13#include <linux/smp_lock.h> 12#include <linux/smp_lock.h>
14#include <linux/notifier.h> 13#include <linux/notifier.h>
15#include <linux/mutex.h> 14#include <linux/mutex.h>
@@ -22,11 +21,10 @@ static void mon_complete(struct usb_bus *ubus, struct urb *urb);
22static void mon_stop(struct mon_bus *mbus); 21static void mon_stop(struct mon_bus *mbus);
23static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus); 22static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
24static void mon_bus_drop(struct kref *r); 23static void mon_bus_drop(struct kref *r);
25static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus); 24static void mon_bus_init(struct usb_bus *ubus);
26 25
27DEFINE_MUTEX(mon_lock); 26DEFINE_MUTEX(mon_lock);
28 27
29static struct dentry *mon_dir; /* /dbg/usbmon */
30static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */ 28static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */
31 29
32/* 30/*
@@ -200,7 +198,7 @@ static void mon_stop(struct mon_bus *mbus)
200 */ 198 */
201static void mon_bus_add(struct usb_bus *ubus) 199static void mon_bus_add(struct usb_bus *ubus)
202{ 200{
203 mon_bus_init(mon_dir, ubus); 201 mon_bus_init(ubus);
204} 202}
205 203
206/* 204/*
@@ -212,8 +210,8 @@ static void mon_bus_remove(struct usb_bus *ubus)
212 210
213 mutex_lock(&mon_lock); 211 mutex_lock(&mon_lock);
214 list_del(&mbus->bus_link); 212 list_del(&mbus->bus_link);
215 debugfs_remove(mbus->dent_t); 213 if (mbus->text_inited)
216 debugfs_remove(mbus->dent_s); 214 mon_text_del(mbus);
217 215
218 mon_dissolve(mbus, ubus); 216 mon_dissolve(mbus, ubus);
219 kref_put(&mbus->ref, mon_bus_drop); 217 kref_put(&mbus->ref, mon_bus_drop);
@@ -281,13 +279,9 @@ static void mon_bus_drop(struct kref *r)
281 * - refcount USB bus struct 279 * - refcount USB bus struct
282 * - link 280 * - link
283 */ 281 */
284static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus) 282static void mon_bus_init(struct usb_bus *ubus)
285{ 283{
286 struct dentry *d;
287 struct mon_bus *mbus; 284 struct mon_bus *mbus;
288 enum { NAMESZ = 10 };
289 char name[NAMESZ];
290 int rc;
291 285
292 if ((mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL)) == NULL) 286 if ((mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL)) == NULL)
293 goto err_alloc; 287 goto err_alloc;
@@ -303,57 +297,54 @@ static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus)
303 ubus->mon_bus = mbus; 297 ubus->mon_bus = mbus;
304 mbus->uses_dma = ubus->uses_dma; 298 mbus->uses_dma = ubus->uses_dma;
305 299
306 rc = snprintf(name, NAMESZ, "%dt", ubus->busnum); 300 mbus->text_inited = mon_text_add(mbus, ubus);
307 if (rc <= 0 || rc >= NAMESZ) 301 // mon_bin_add(...)
308 goto err_print_t;
309 d = debugfs_create_file(name, 0600, mondir, mbus, &mon_fops_text);
310 if (d == NULL)
311 goto err_create_t;
312 mbus->dent_t = d;
313
314 rc = snprintf(name, NAMESZ, "%ds", ubus->busnum);
315 if (rc <= 0 || rc >= NAMESZ)
316 goto err_print_s;
317 d = debugfs_create_file(name, 0600, mondir, mbus, &mon_fops_stat);
318 if (d == NULL)
319 goto err_create_s;
320 mbus->dent_s = d;
321 302
322 mutex_lock(&mon_lock); 303 mutex_lock(&mon_lock);
323 list_add_tail(&mbus->bus_link, &mon_buses); 304 list_add_tail(&mbus->bus_link, &mon_buses);
324 mutex_unlock(&mon_lock); 305 mutex_unlock(&mon_lock);
325 return; 306 return;
326 307
327err_create_s:
328err_print_s:
329 debugfs_remove(mbus->dent_t);
330err_create_t:
331err_print_t:
332 kfree(mbus);
333err_alloc: 308err_alloc:
334 return; 309 return;
335} 310}
336 311
312/*
313 * Search a USB bus by number. Notice that USB bus numbers start from one,
314 * which we may later use to identify "all" with zero.
315 *
316 * This function must be called with mon_lock held.
317 *
318 * This is obviously inefficient and may be revised in the future.
319 */
320struct mon_bus *mon_bus_lookup(unsigned int num)
321{
322 struct list_head *p;
323 struct mon_bus *mbus;
324
325 list_for_each (p, &mon_buses) {
326 mbus = list_entry(p, struct mon_bus, bus_link);
327 if (mbus->u_bus->busnum == num) {
328 return mbus;
329 }
330 }
331 return NULL;
332}
333
337static int __init mon_init(void) 334static int __init mon_init(void)
338{ 335{
339 struct usb_bus *ubus; 336 struct usb_bus *ubus;
340 struct dentry *mondir; 337 int rc;
341 338
342 mondir = debugfs_create_dir("usbmon", NULL); 339 if ((rc = mon_text_init()) != 0)
343 if (IS_ERR(mondir)) { 340 goto err_text;
344 printk(KERN_NOTICE TAG ": debugfs is not available\n"); 341 if ((rc = mon_bin_init()) != 0)
345 return -ENODEV; 342 goto err_bin;
346 }
347 if (mondir == NULL) {
348 printk(KERN_NOTICE TAG ": unable to create usbmon directory\n");
349 return -ENODEV;
350 }
351 mon_dir = mondir;
352 343
353 if (usb_mon_register(&mon_ops_0) != 0) { 344 if (usb_mon_register(&mon_ops_0) != 0) {
354 printk(KERN_NOTICE TAG ": unable to register with the core\n"); 345 printk(KERN_NOTICE TAG ": unable to register with the core\n");
355 debugfs_remove(mondir); 346 rc = -ENODEV;
356 return -ENODEV; 347 goto err_reg;
357 } 348 }
358 // MOD_INC_USE_COUNT(which_module?); 349 // MOD_INC_USE_COUNT(which_module?);
359 350
@@ -361,10 +352,17 @@ static int __init mon_init(void)
361 352
362 mutex_lock(&usb_bus_list_lock); 353 mutex_lock(&usb_bus_list_lock);
363 list_for_each_entry (ubus, &usb_bus_list, bus_list) { 354 list_for_each_entry (ubus, &usb_bus_list, bus_list) {
364 mon_bus_init(mondir, ubus); 355 mon_bus_init(ubus);
365 } 356 }
366 mutex_unlock(&usb_bus_list_lock); 357 mutex_unlock(&usb_bus_list_lock);
367 return 0; 358 return 0;
359
360err_reg:
361 mon_bin_exit();
362err_bin:
363 mon_text_exit();
364err_text:
365 return rc;
368} 366}
369 367
370static void __exit mon_exit(void) 368static void __exit mon_exit(void)
@@ -381,8 +379,8 @@ static void __exit mon_exit(void)
381 mbus = list_entry(p, struct mon_bus, bus_link); 379 mbus = list_entry(p, struct mon_bus, bus_link);
382 list_del(p); 380 list_del(p);
383 381
384 debugfs_remove(mbus->dent_t); 382 if (mbus->text_inited)
385 debugfs_remove(mbus->dent_s); 383 mon_text_del(mbus);
386 384
387 /* 385 /*
388 * This never happens, because the open/close paths in 386 * This never happens, because the open/close paths in
@@ -401,7 +399,8 @@ static void __exit mon_exit(void)
401 } 399 }
402 mutex_unlock(&mon_lock); 400 mutex_unlock(&mon_lock);
403 401
404 debugfs_remove(mon_dir); 402 mon_text_exit();
403 mon_bin_exit();
405} 404}
406 405
407module_init(mon_init); 406module_init(mon_init);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 05cf2c9a8f84..d38a1279d9d9 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -9,6 +9,7 @@
9#include <linux/usb.h> 9#include <linux/usb.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/debugfs.h>
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13 14
14#include "usb_mon.h" 15#include "usb_mon.h"
@@ -63,6 +64,8 @@ struct mon_reader_text {
63 char slab_name[SLAB_NAME_SZ]; 64 char slab_name[SLAB_NAME_SZ];
64}; 65};
65 66
67static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */
68
66static void mon_text_ctor(void *, struct kmem_cache *, unsigned long); 69static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
67 70
68/* 71/*
@@ -436,7 +439,7 @@ static int mon_text_release(struct inode *inode, struct file *file)
436 return 0; 439 return 0;
437} 440}
438 441
439const struct file_operations mon_fops_text = { 442static const struct file_operations mon_fops_text = {
440 .owner = THIS_MODULE, 443 .owner = THIS_MODULE,
441 .open = mon_text_open, 444 .open = mon_text_open,
442 .llseek = no_llseek, 445 .llseek = no_llseek,
@@ -447,6 +450,47 @@ const struct file_operations mon_fops_text = {
447 .release = mon_text_release, 450 .release = mon_text_release,
448}; 451};
449 452
453int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
454{
455 struct dentry *d;
456 enum { NAMESZ = 10 };
457 char name[NAMESZ];
458 int rc;
459
460 rc = snprintf(name, NAMESZ, "%dt", ubus->busnum);
461 if (rc <= 0 || rc >= NAMESZ)
462 goto err_print_t;
463 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_text);
464 if (d == NULL)
465 goto err_create_t;
466 mbus->dent_t = d;
467
468 /* XXX The stats do not belong to here (text API), but oh well... */
469 rc = snprintf(name, NAMESZ, "%ds", ubus->busnum);
470 if (rc <= 0 || rc >= NAMESZ)
471 goto err_print_s;
472 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_stat);
473 if (d == NULL)
474 goto err_create_s;
475 mbus->dent_s = d;
476
477 return 1;
478
479err_create_s:
480err_print_s:
481 debugfs_remove(mbus->dent_t);
482 mbus->dent_t = NULL;
483err_create_t:
484err_print_t:
485 return 0;
486}
487
488void mon_text_del(struct mon_bus *mbus)
489{
490 debugfs_remove(mbus->dent_t);
491 debugfs_remove(mbus->dent_s);
492}
493
450/* 494/*
451 * Slab interface: constructor. 495 * Slab interface: constructor.
452 */ 496 */
@@ -459,3 +503,24 @@ static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sfla
459 memset(mem, 0xe5, sizeof(struct mon_event_text)); 503 memset(mem, 0xe5, sizeof(struct mon_event_text));
460} 504}
461 505
506int __init mon_text_init(void)
507{
508 struct dentry *mondir;
509
510 mondir = debugfs_create_dir("usbmon", NULL);
511 if (IS_ERR(mondir)) {
512 printk(KERN_NOTICE TAG ": debugfs is not available\n");
513 return -ENODEV;
514 }
515 if (mondir == NULL) {
516 printk(KERN_NOTICE TAG ": unable to create usbmon directory\n");
517 return -ENODEV;
518 }
519 mon_dir = mondir;
520 return 0;
521}
522
523void __exit mon_text_exit(void)
524{
525 debugfs_remove(mon_dir);
526}
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index ab9d02d5df77..4f949ce8a7f3 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -17,9 +17,11 @@
17struct mon_bus { 17struct mon_bus {
18 struct list_head bus_link; 18 struct list_head bus_link;
19 spinlock_t lock; 19 spinlock_t lock;
20 struct usb_bus *u_bus;
21
22 int text_inited;
20 struct dentry *dent_s; /* Debugging file */ 23 struct dentry *dent_s; /* Debugging file */
21 struct dentry *dent_t; /* Text interface file */ 24 struct dentry *dent_t; /* Text interface file */
22 struct usb_bus *u_bus;
23 int uses_dma; 25 int uses_dma;
24 26
25 /* Ref */ 27 /* Ref */
@@ -48,13 +50,35 @@ struct mon_reader {
48void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r); 50void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r);
49void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r); 51void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r);
50 52
53struct mon_bus *mon_bus_lookup(unsigned int num);
54
55int /*bool*/ mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus);
56void mon_text_del(struct mon_bus *mbus);
57// void mon_bin_add(struct mon_bus *);
58
59int __init mon_text_init(void);
60void __exit mon_text_exit(void);
61int __init mon_bin_init(void);
62void __exit mon_bin_exit(void);
63
51/* 64/*
52 */ 65 * DMA interface.
66 *
67 * XXX The vectored side needs a serious re-thinking. Abstracting vectors,
68 * like in Paolo's original patch, produces a double pkmap. We need an idea.
69*/
53extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len); 70extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len);
54 71
72struct mon_reader_bin;
73extern void mon_dmapeek_vec(const struct mon_reader_bin *rp,
74 unsigned int offset, dma_addr_t dma_addr, unsigned int len);
75extern unsigned int mon_copy_to_buff(const struct mon_reader_bin *rp,
76 unsigned int offset, const unsigned char *from, unsigned int len);
77
78/*
79 */
55extern struct mutex mon_lock; 80extern struct mutex mon_lock;
56 81
57extern const struct file_operations mon_fops_text;
58extern const struct file_operations mon_fops_stat; 82extern const struct file_operations mon_fops_stat;
59 83
60#endif /* __USB_MON_H */ 84#endif /* __USB_MON_H */
diff --git a/drivers/usb/net/Kconfig b/drivers/usb/net/Kconfig
index e081836014ac..a2b94ef512bc 100644
--- a/drivers/usb/net/Kconfig
+++ b/drivers/usb/net/Kconfig
@@ -222,13 +222,15 @@ config USB_NET_MCS7830
222 adapters marketed under the DeLOCK brand. 222 adapters marketed under the DeLOCK brand.
223 223
224config USB_NET_RNDIS_HOST 224config USB_NET_RNDIS_HOST
225 tristate "Host for RNDIS devices (EXPERIMENTAL)" 225 tristate "Host for RNDIS and ActiveSync devices (EXPERIMENTAL)"
226 depends on USB_USBNET && EXPERIMENTAL 226 depends on USB_USBNET && EXPERIMENTAL
227 select USB_NET_CDCETHER 227 select USB_NET_CDCETHER
228 help 228 help
229 This option enables hosting "Remote NDIS" USB networking links, 229 This option enables hosting "Remote NDIS" USB networking links,
230 as encouraged by Microsoft (instead of CDC Ethernet!) for use in 230 as encouraged by Microsoft (instead of CDC Ethernet!) for use in
231 various devices that may only support this protocol. 231 various devices that may only support this protocol. A variant
232 of this protocol (with even less public documentation) seems to
233 be at the root of Microsoft's "ActiveSync" too.
232 234
233 Avoid using this protocol unless you have no better options. 235 Avoid using this protocol unless you have no better options.
234 The protocol specification is incomplete, and is controlled by 236 The protocol specification is incomplete, and is controlled by
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 896449f0cf85..4206df2d61b7 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -1449,6 +1449,10 @@ static const struct usb_device_id products [] = {
1449 // Linksys USB1000 1449 // Linksys USB1000
1450 USB_DEVICE (0x1737, 0x0039), 1450 USB_DEVICE (0x1737, 0x0039),
1451 .driver_info = (unsigned long) &ax88178_info, 1451 .driver_info = (unsigned long) &ax88178_info,
1452}, {
1453 // IO-DATA ETG-US2
1454 USB_DEVICE (0x04bb, 0x0930),
1455 .driver_info = (unsigned long) &ax88178_info,
1452}, 1456},
1453 { }, // END 1457 { }, // END
1454}; 1458};
diff --git a/drivers/usb/net/cdc_ether.c b/drivers/usb/net/cdc_ether.c
index 44a91547146e..e5cdafa258dd 100644
--- a/drivers/usb/net/cdc_ether.c
+++ b/drivers/usb/net/cdc_ether.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * CDC Ethernet based networking peripherals 2 * CDC Ethernet based networking peripherals
3 * Copyright (C) 2003-2005 by David Brownell 3 * Copyright (C) 2003-2005 by David Brownell
4 * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -35,6 +36,29 @@
35#include "usbnet.h" 36#include "usbnet.h"
36 37
37 38
39#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
40
41static int is_rndis(struct usb_interface_descriptor *desc)
42{
43 return desc->bInterfaceClass == USB_CLASS_COMM
44 && desc->bInterfaceSubClass == 2
45 && desc->bInterfaceProtocol == 0xff;
46}
47
48static int is_activesync(struct usb_interface_descriptor *desc)
49{
50 return desc->bInterfaceClass == USB_CLASS_MISC
51 && desc->bInterfaceSubClass == 1
52 && desc->bInterfaceProtocol == 1;
53}
54
55#else
56
57#define is_rndis(desc) 0
58#define is_activesync(desc) 0
59
60#endif
61
38/* 62/*
39 * probes control interface, claims data interface, collects the bulk 63 * probes control interface, claims data interface, collects the bulk
40 * endpoints, activates data interface (if needed), maybe sets MTU. 64 * endpoints, activates data interface (if needed), maybe sets MTU.
@@ -71,7 +95,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
71 /* this assumes that if there's a non-RNDIS vendor variant 95 /* this assumes that if there's a non-RNDIS vendor variant
72 * of cdc-acm, it'll fail RNDIS requests cleanly. 96 * of cdc-acm, it'll fail RNDIS requests cleanly.
73 */ 97 */
74 rndis = (intf->cur_altsetting->desc.bInterfaceProtocol == 0xff); 98 rndis = is_rndis(&intf->cur_altsetting->desc)
99 || is_activesync(&intf->cur_altsetting->desc);
75 100
76 memset(info, 0, sizeof *info); 101 memset(info, 0, sizeof *info);
77 info->control = intf; 102 info->control = intf;
@@ -99,6 +124,23 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
99 goto bad_desc; 124 goto bad_desc;
100 } 125 }
101 break; 126 break;
127 case USB_CDC_ACM_TYPE:
128 /* paranoia: disambiguate a "real" vendor-specific
129 * modem interface from an RNDIS non-modem.
130 */
131 if (rndis) {
132 struct usb_cdc_acm_descriptor *d;
133
134 d = (void *) buf;
135 if (d->bmCapabilities) {
136 dev_dbg(&intf->dev,
137 "ACM capabilities %02x, "
138 "not really RNDIS?\n",
139 d->bmCapabilities);
140 goto bad_desc;
141 }
142 }
143 break;
102 case USB_CDC_UNION_TYPE: 144 case USB_CDC_UNION_TYPE:
103 if (info->u) { 145 if (info->u) {
104 dev_dbg(&intf->dev, "extra CDC union\n"); 146 dev_dbg(&intf->dev, "extra CDC union\n");
@@ -171,7 +213,21 @@ next_desc:
171 buf += buf [0]; 213 buf += buf [0];
172 } 214 }
173 215
174 if (!info->header || !info->u || (!rndis && !info->ether)) { 216 /* Microsoft ActiveSync based RNDIS devices lack the CDC descriptors,
217 * so we'll hard-wire the interfaces and not check for descriptors.
218 */
219 if (is_activesync(&intf->cur_altsetting->desc) && !info->u) {
220 info->control = usb_ifnum_to_if(dev->udev, 0);
221 info->data = usb_ifnum_to_if(dev->udev, 1);
222 if (!info->control || !info->data) {
223 dev_dbg(&intf->dev,
224 "activesync: master #0/%p slave #1/%p\n",
225 info->control,
226 info->data);
227 goto bad_desc;
228 }
229
230 } else if (!info->header || !info->u || (!rndis && !info->ether)) {
175 dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n", 231 dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
176 info->header ? "" : "header ", 232 info->header ? "" : "header ",
177 info->u ? "" : "union ", 233 info->u ? "" : "union ",
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index fa78326d0bf0..36a989160a68 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -179,6 +179,7 @@ static struct usb_driver kaweth_driver = {
179 .suspend = kaweth_suspend, 179 .suspend = kaweth_suspend,
180 .resume = kaweth_resume, 180 .resume = kaweth_resume,
181 .id_table = usb_klsi_table, 181 .id_table = usb_klsi_table,
182 .supports_autosuspend = 1,
182}; 183};
183 184
184typedef __u8 eth_addr_t[6]; 185typedef __u8 eth_addr_t[6];
@@ -225,6 +226,7 @@ struct kaweth_device
225 struct delayed_work lowmem_work; 226 struct delayed_work lowmem_work;
226 227
227 struct usb_device *dev; 228 struct usb_device *dev;
229 struct usb_interface *intf;
228 struct net_device *net; 230 struct net_device *net;
229 wait_queue_head_t term_wait; 231 wait_queue_head_t term_wait;
230 232
@@ -662,9 +664,14 @@ static int kaweth_open(struct net_device *net)
662 664
663 dbg("Opening network device."); 665 dbg("Opening network device.");
664 666
667 res = usb_autopm_get_interface(kaweth->intf);
668 if (res) {
669 err("Interface cannot be resumed.");
670 return -EIO;
671 }
665 res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL); 672 res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL);
666 if (res) 673 if (res)
667 return -EIO; 674 goto err_out;
668 675
669 usb_fill_int_urb( 676 usb_fill_int_urb(
670 kaweth->irq_urb, 677 kaweth->irq_urb,
@@ -681,7 +688,7 @@ static int kaweth_open(struct net_device *net)
681 res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL); 688 res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL);
682 if (res) { 689 if (res) {
683 usb_kill_urb(kaweth->rx_urb); 690 usb_kill_urb(kaweth->rx_urb);
684 return -EIO; 691 goto err_out;
685 } 692 }
686 kaweth->opened = 1; 693 kaweth->opened = 1;
687 694
@@ -689,10 +696,14 @@ static int kaweth_open(struct net_device *net)
689 696
690 kaweth_async_set_rx_mode(kaweth); 697 kaweth_async_set_rx_mode(kaweth);
691 return 0; 698 return 0;
699
700err_out:
701 usb_autopm_enable(kaweth->intf);
702 return -EIO;
692} 703}
693 704
694/**************************************************************** 705/****************************************************************
695 * kaweth_close 706 * kaweth_kill_urbs
696 ****************************************************************/ 707 ****************************************************************/
697static void kaweth_kill_urbs(struct kaweth_device *kaweth) 708static void kaweth_kill_urbs(struct kaweth_device *kaweth)
698{ 709{
@@ -724,17 +735,29 @@ static int kaweth_close(struct net_device *net)
724 735
725 kaweth->status &= ~KAWETH_STATUS_CLOSING; 736 kaweth->status &= ~KAWETH_STATUS_CLOSING;
726 737
738 usb_autopm_enable(kaweth->intf);
739
727 return 0; 740 return 0;
728} 741}
729 742
730static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 743static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
731{ 744{
745 struct kaweth_device *kaweth = netdev_priv(dev);
732 746
733 strlcpy(info->driver, driver_name, sizeof(info->driver)); 747 strlcpy(info->driver, driver_name, sizeof(info->driver));
748 usb_make_path(kaweth->dev, info->bus_info, sizeof (info->bus_info));
749}
750
751static u32 kaweth_get_link(struct net_device *dev)
752{
753 struct kaweth_device *kaweth = netdev_priv(dev);
754
755 return kaweth->linkstate;
734} 756}
735 757
736static struct ethtool_ops ops = { 758static struct ethtool_ops ops = {
737 .get_drvinfo = kaweth_get_drvinfo 759 .get_drvinfo = kaweth_get_drvinfo,
760 .get_link = kaweth_get_link
738}; 761};
739 762
740/**************************************************************** 763/****************************************************************
@@ -908,6 +931,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
908 struct kaweth_device *kaweth = usb_get_intfdata(intf); 931 struct kaweth_device *kaweth = usb_get_intfdata(intf);
909 unsigned long flags; 932 unsigned long flags;
910 933
934 dbg("Suspending device");
911 spin_lock_irqsave(&kaweth->device_lock, flags); 935 spin_lock_irqsave(&kaweth->device_lock, flags);
912 kaweth->status |= KAWETH_STATUS_SUSPENDING; 936 kaweth->status |= KAWETH_STATUS_SUSPENDING;
913 spin_unlock_irqrestore(&kaweth->device_lock, flags); 937 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -924,6 +948,7 @@ static int kaweth_resume(struct usb_interface *intf)
924 struct kaweth_device *kaweth = usb_get_intfdata(intf); 948 struct kaweth_device *kaweth = usb_get_intfdata(intf);
925 unsigned long flags; 949 unsigned long flags;
926 950
951 dbg("Resuming device");
927 spin_lock_irqsave(&kaweth->device_lock, flags); 952 spin_lock_irqsave(&kaweth->device_lock, flags);
928 kaweth->status &= ~KAWETH_STATUS_SUSPENDING; 953 kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
929 spin_unlock_irqrestore(&kaweth->device_lock, flags); 954 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1086,6 +1111,8 @@ err_fw:
1086 1111
1087 dbg("Initializing net device."); 1112 dbg("Initializing net device.");
1088 1113
1114 kaweth->intf = intf;
1115
1089 kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 1116 kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
1090 if (!kaweth->tx_urb) 1117 if (!kaweth->tx_urb)
1091 goto err_free_netdev; 1118 goto err_free_netdev;
@@ -1265,7 +1292,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
1265{ 1292{
1266 struct urb *urb; 1293 struct urb *urb;
1267 int retv; 1294 int retv;
1268 int length; 1295 int length = 0; /* shut up GCC */
1269 1296
1270 urb = usb_alloc_urb(0, GFP_NOIO); 1297 urb = usb_alloc_urb(0, GFP_NOIO);
1271 if (!urb) 1298 if (!urb)
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 98f6898cae1f..c7467823cd1c 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -214,9 +214,9 @@ PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
214 DEFAULT_GPIO_RESET ) 214 DEFAULT_GPIO_RESET )
215PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511, 215PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
216 DEFAULT_GPIO_RESET | PEGASUS_II ) 216 DEFAULT_GPIO_RESET | PEGASUS_II )
217PEGASUS_DEV( "Corega FEter USB-TX", VENDOR_COREGA, 0x0004, 217PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
218 DEFAULT_GPIO_RESET ) 218 DEFAULT_GPIO_RESET )
219PEGASUS_DEV( "Corega FEter USB-TXS", VENDOR_COREGA, 0x000d, 219PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
220 DEFAULT_GPIO_RESET | PEGASUS_II ) 220 DEFAULT_GPIO_RESET | PEGASUS_II )
221PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001, 221PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
222 DEFAULT_GPIO_RESET ) 222 DEFAULT_GPIO_RESET )
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index a322a16d9cf8..be888d2d813c 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -49,6 +49,8 @@
49 * - In some cases, MS-Windows will emit undocumented requests; this 49 * - In some cases, MS-Windows will emit undocumented requests; this
50 * matters more to peripheral implementations than host ones. 50 * matters more to peripheral implementations than host ones.
51 * 51 *
52 * Moreover there's a no-open-specs variant of RNDIS called "ActiveSync".
53 *
52 * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in 54 * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in
53 * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and 55 * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and
54 * currently rare) "Ethernet Emulation Model" (EEM). 56 * currently rare) "Ethernet Emulation Model" (EEM).
@@ -61,6 +63,9 @@
61 * - control-in: GET_ENCAPSULATED 63 * - control-in: GET_ENCAPSULATED
62 * 64 *
63 * We'll try to ignore the RESPONSE_AVAILABLE notifications. 65 * We'll try to ignore the RESPONSE_AVAILABLE notifications.
66 *
67 * REVISIT some RNDIS implementations seem to have curious issues still
68 * to be resolved.
64 */ 69 */
65struct rndis_msg_hdr { 70struct rndis_msg_hdr {
66 __le32 msg_type; /* RNDIS_MSG_* */ 71 __le32 msg_type; /* RNDIS_MSG_* */
@@ -71,8 +76,14 @@ struct rndis_msg_hdr {
71 // ... and more 76 // ... and more
72} __attribute__ ((packed)); 77} __attribute__ ((packed));
73 78
74/* RNDIS defines this (absurdly huge) control timeout */ 79/* MS-Windows uses this strange size, but RNDIS spec says 1024 minimum */
75#define RNDIS_CONTROL_TIMEOUT_MS (10 * 1000) 80#define CONTROL_BUFFER_SIZE 1025
81
82/* RNDIS defines an (absurdly huge) 10 second control timeout,
83 * but ActiveSync seems to use a more usual 5 second timeout
84 * (which matches the USB 2.0 spec).
85 */
86#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000)
76 87
77 88
78#define ccpu2 __constant_cpu_to_le32 89#define ccpu2 __constant_cpu_to_le32
@@ -270,6 +281,7 @@ static void rndis_status(struct usbnet *dev, struct urb *urb)
270static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf) 281static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
271{ 282{
272 struct cdc_state *info = (void *) &dev->data; 283 struct cdc_state *info = (void *) &dev->data;
284 int master_ifnum;
273 int retval; 285 int retval;
274 unsigned count; 286 unsigned count;
275 __le32 rsp; 287 __le32 rsp;
@@ -279,7 +291,7 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
279 * disconnect(): either serialize, or dispatch responses on xid 291 * disconnect(): either serialize, or dispatch responses on xid
280 */ 292 */
281 293
282 /* Issue the request; don't bother byteswapping our xid */ 294 /* Issue the request; xid is unique, don't bother byteswapping it */
283 if (likely(buf->msg_type != RNDIS_MSG_HALT 295 if (likely(buf->msg_type != RNDIS_MSG_HALT
284 && buf->msg_type != RNDIS_MSG_RESET)) { 296 && buf->msg_type != RNDIS_MSG_RESET)) {
285 xid = dev->xid++; 297 xid = dev->xid++;
@@ -287,11 +299,12 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
287 xid = dev->xid++; 299 xid = dev->xid++;
288 buf->request_id = (__force __le32) xid; 300 buf->request_id = (__force __le32) xid;
289 } 301 }
302 master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber;
290 retval = usb_control_msg(dev->udev, 303 retval = usb_control_msg(dev->udev,
291 usb_sndctrlpipe(dev->udev, 0), 304 usb_sndctrlpipe(dev->udev, 0),
292 USB_CDC_SEND_ENCAPSULATED_COMMAND, 305 USB_CDC_SEND_ENCAPSULATED_COMMAND,
293 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 306 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
294 0, info->u->bMasterInterface0, 307 0, master_ifnum,
295 buf, le32_to_cpu(buf->msg_len), 308 buf, le32_to_cpu(buf->msg_len),
296 RNDIS_CONTROL_TIMEOUT_MS); 309 RNDIS_CONTROL_TIMEOUT_MS);
297 if (unlikely(retval < 0 || xid == 0)) 310 if (unlikely(retval < 0 || xid == 0))
@@ -306,13 +319,13 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
306 */ 319 */
307 rsp = buf->msg_type | RNDIS_MSG_COMPLETION; 320 rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
308 for (count = 0; count < 10; count++) { 321 for (count = 0; count < 10; count++) {
309 memset(buf, 0, 1024); 322 memset(buf, 0, CONTROL_BUFFER_SIZE);
310 retval = usb_control_msg(dev->udev, 323 retval = usb_control_msg(dev->udev,
311 usb_rcvctrlpipe(dev->udev, 0), 324 usb_rcvctrlpipe(dev->udev, 0),
312 USB_CDC_GET_ENCAPSULATED_RESPONSE, 325 USB_CDC_GET_ENCAPSULATED_RESPONSE,
313 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 326 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
314 0, info->u->bMasterInterface0, 327 0, master_ifnum,
315 buf, 1024, 328 buf, CONTROL_BUFFER_SIZE,
316 RNDIS_CONTROL_TIMEOUT_MS); 329 RNDIS_CONTROL_TIMEOUT_MS);
317 if (likely(retval >= 8)) { 330 if (likely(retval >= 8)) {
318 msg_len = le32_to_cpu(buf->msg_len); 331 msg_len = le32_to_cpu(buf->msg_len);
@@ -350,7 +363,7 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
350 usb_sndctrlpipe(dev->udev, 0), 363 usb_sndctrlpipe(dev->udev, 0),
351 USB_CDC_SEND_ENCAPSULATED_COMMAND, 364 USB_CDC_SEND_ENCAPSULATED_COMMAND,
352 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 365 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
353 0, info->u->bMasterInterface0, 366 0, master_ifnum,
354 msg, sizeof *msg, 367 msg, sizeof *msg,
355 RNDIS_CONTROL_TIMEOUT_MS); 368 RNDIS_CONTROL_TIMEOUT_MS);
356 if (unlikely(retval < 0)) 369 if (unlikely(retval < 0))
@@ -393,38 +406,64 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
393 u32 tmp; 406 u32 tmp;
394 407
395 /* we can't rely on i/o from stack working, or stack allocation */ 408 /* we can't rely on i/o from stack working, or stack allocation */
396 u.buf = kmalloc(1024, GFP_KERNEL); 409 u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
397 if (!u.buf) 410 if (!u.buf)
398 return -ENOMEM; 411 return -ENOMEM;
399 retval = usbnet_generic_cdc_bind(dev, intf); 412 retval = usbnet_generic_cdc_bind(dev, intf);
400 if (retval < 0) 413 if (retval < 0)
401 goto fail; 414 goto fail;
402 415
403 net->hard_header_len += sizeof (struct rndis_data_hdr);
404
405 /* initialize; max transfer is 16KB at full speed */
406 u.init->msg_type = RNDIS_MSG_INIT; 416 u.init->msg_type = RNDIS_MSG_INIT;
407 u.init->msg_len = ccpu2(sizeof *u.init); 417 u.init->msg_len = ccpu2(sizeof *u.init);
408 u.init->major_version = ccpu2(1); 418 u.init->major_version = ccpu2(1);
409 u.init->minor_version = ccpu2(0); 419 u.init->minor_version = ccpu2(0);
410 u.init->max_transfer_size = ccpu2(net->mtu + net->hard_header_len);
411 420
421 /* max transfer (in spec) is 0x4000 at full speed, but for
422 * TX we'll stick to one Ethernet packet plus RNDIS framing.
423 * For RX we handle drivers that zero-pad to end-of-packet.
424 * Don't let userspace change these settings.
425 */
426 net->hard_header_len += sizeof (struct rndis_data_hdr);
427 dev->hard_mtu = net->mtu + net->hard_header_len;
428
429 dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1);
430 dev->rx_urb_size &= ~(dev->maxpacket - 1);
431 u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size);
432
433 net->change_mtu = NULL;
412 retval = rndis_command(dev, u.header); 434 retval = rndis_command(dev, u.header);
413 if (unlikely(retval < 0)) { 435 if (unlikely(retval < 0)) {
414 /* it might not even be an RNDIS device!! */ 436 /* it might not even be an RNDIS device!! */
415 dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); 437 dev_err(&intf->dev, "RNDIS init failed, %d\n", retval);
438 goto fail_and_release;
439 }
440 tmp = le32_to_cpu(u.init_c->max_transfer_size);
441 if (tmp < dev->hard_mtu) {
442 dev_err(&intf->dev,
443 "dev can't take %u byte packets (max %u)\n",
444 dev->hard_mtu, tmp);
416 goto fail_and_release; 445 goto fail_and_release;
417 } 446 }
418 dev->hard_mtu = le32_to_cpu(u.init_c->max_transfer_size); 447
419 /* REVISIT: peripheral "alignment" request is ignored ... */ 448 /* REVISIT: peripheral "alignment" request is ignored ... */
420 dev_dbg(&intf->dev, "hard mtu %u, align %d\n", dev->hard_mtu, 449 dev_dbg(&intf->dev,
450 "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n",
451 dev->hard_mtu, tmp, dev->rx_urb_size,
421 1 << le32_to_cpu(u.init_c->packet_alignment)); 452 1 << le32_to_cpu(u.init_c->packet_alignment));
422 453
423 /* get designated host ethernet address */ 454 /* Get designated host ethernet address.
424 memset(u.get, 0, sizeof *u.get); 455 *
456 * Adding a payload exactly the same size as the expected response
457 * payload is an evident requirement MSFT added for ActiveSync.
458 * This undocumented (and nonsensical) issue was found by sniffing
459 * protocol requests from the ActiveSync 4.1 Windows driver.
460 */
461 memset(u.get, 0, sizeof *u.get + 48);
425 u.get->msg_type = RNDIS_MSG_QUERY; 462 u.get->msg_type = RNDIS_MSG_QUERY;
426 u.get->msg_len = ccpu2(sizeof *u.get); 463 u.get->msg_len = ccpu2(sizeof *u.get + 48);
427 u.get->oid = OID_802_3_PERMANENT_ADDRESS; 464 u.get->oid = OID_802_3_PERMANENT_ADDRESS;
465 u.get->len = ccpu2(48);
466 u.get->offset = ccpu2(20);
428 467
429 retval = rndis_command(dev, u.header); 468 retval = rndis_command(dev, u.header);
430 if (unlikely(retval < 0)) { 469 if (unlikely(retval < 0)) {
@@ -432,7 +471,7 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
432 goto fail_and_release; 471 goto fail_and_release;
433 } 472 }
434 tmp = le32_to_cpu(u.get_c->offset); 473 tmp = le32_to_cpu(u.get_c->offset);
435 if (unlikely((tmp + 8) > (1024 - ETH_ALEN) 474 if (unlikely((tmp + 8) > (CONTROL_BUFFER_SIZE - ETH_ALEN)
436 || u.get_c->len != ccpu2(ETH_ALEN))) { 475 || u.get_c->len != ccpu2(ETH_ALEN))) {
437 dev_err(&intf->dev, "rndis ethaddr off %d len %d ?\n", 476 dev_err(&intf->dev, "rndis ethaddr off %d len %d ?\n",
438 tmp, le32_to_cpu(u.get_c->len)); 477 tmp, le32_to_cpu(u.get_c->len));
@@ -598,6 +637,10 @@ static const struct usb_device_id products [] = {
598 /* RNDIS is MSFT's un-official variant of CDC ACM */ 637 /* RNDIS is MSFT's un-official variant of CDC ACM */
599 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), 638 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
600 .driver_info = (unsigned long) &rndis_info, 639 .driver_info = (unsigned long) &rndis_info,
640}, {
641 /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
642 USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
643 .driver_info = (unsigned long) &rndis_info,
601}, 644},
602 { }, // END 645 { }, // END
603}; 646};
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 86bcf63b6ba5..11dad42c3c60 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -572,8 +572,20 @@ static void aircable_unthrottle(struct usb_serial_port *port)
572 schedule_work(&priv->rx_work); 572 schedule_work(&priv->rx_work);
573} 573}
574 574
575static struct usb_driver aircable_driver = {
576 .name = "aircable",
577 .probe = usb_serial_probe,
578 .disconnect = usb_serial_disconnect,
579 .id_table = id_table,
580 .no_dynamic_id = 1,
581};
582
575static struct usb_serial_driver aircable_device = { 583static struct usb_serial_driver aircable_device = {
576 .description = "aircable", 584 .driver = {
585 .owner = THIS_MODULE,
586 .name = "aircable",
587 },
588 .usb_driver = &aircable_driver,
577 .id_table = id_table, 589 .id_table = id_table,
578 .num_ports = 1, 590 .num_ports = 1,
579 .attach = aircable_attach, 591 .attach = aircable_attach,
@@ -587,13 +599,6 @@ static struct usb_serial_driver aircable_device = {
587 .unthrottle = aircable_unthrottle, 599 .unthrottle = aircable_unthrottle,
588}; 600};
589 601
590static struct usb_driver aircable_driver = {
591 .name = "aircable",
592 .probe = usb_serial_probe,
593 .disconnect = usb_serial_disconnect,
594 .id_table = id_table,
595};
596
597static int __init aircable_init (void) 602static int __init aircable_init (void)
598{ 603{
599 int retval; 604 int retval;
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index f2ca76a9cbac..0af42e32fa0a 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -277,6 +277,7 @@ static struct usb_serial_driver airprime_device = {
277 .owner = THIS_MODULE, 277 .owner = THIS_MODULE,
278 .name = "airprime", 278 .name = "airprime",
279 }, 279 },
280 .usb_driver = &airprime_driver,
280 .id_table = id_table, 281 .id_table = id_table,
281 .num_interrupt_in = NUM_DONT_CARE, 282 .num_interrupt_in = NUM_DONT_CARE,
282 .num_bulk_in = NUM_DONT_CARE, 283 .num_bulk_in = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5261cd22ee6b..edd685791a6b 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -444,6 +444,7 @@ static struct usb_driver ark3116_driver = {
444 .probe = usb_serial_probe, 444 .probe = usb_serial_probe,
445 .disconnect = usb_serial_disconnect, 445 .disconnect = usb_serial_disconnect,
446 .id_table = id_table, 446 .id_table = id_table,
447 .no_dynamic_id = 1,
447}; 448};
448 449
449static struct usb_serial_driver ark3116_device = { 450static struct usb_serial_driver ark3116_device = {
@@ -452,6 +453,7 @@ static struct usb_serial_driver ark3116_device = {
452 .name = "ark3116", 453 .name = "ark3116",
453 }, 454 },
454 .id_table = id_table, 455 .id_table = id_table,
456 .usb_driver = &ark3116_driver,
455 .num_interrupt_in = 1, 457 .num_interrupt_in = 1,
456 .num_bulk_in = 1, 458 .num_bulk_in = 1,
457 .num_bulk_out = 1, 459 .num_bulk_out = 1,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 38b4dae319ee..3b800d277c4b 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -126,6 +126,7 @@ static struct usb_serial_driver belkin_device = {
126 .name = "belkin", 126 .name = "belkin",
127 }, 127 },
128 .description = "Belkin / Peracom / GoHubs USB Serial Adapter", 128 .description = "Belkin / Peracom / GoHubs USB Serial Adapter",
129 .usb_driver = &belkin_driver,
129 .id_table = id_table_combined, 130 .id_table = id_table_combined,
130 .num_interrupt_in = 1, 131 .num_interrupt_in = 1,
131 .num_bulk_in = 1, 132 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 6542f220468f..c08a38402b93 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -103,11 +103,52 @@ exit:
103 return retval; 103 return retval;
104} 104}
105 105
106#ifdef CONFIG_HOTPLUG
107static ssize_t store_new_id(struct device_driver *driver,
108 const char *buf, size_t count)
109{
110 struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
111 ssize_t retval = usb_store_new_id(&usb_drv->dynids, driver, buf, count);
112
113 if (retval >= 0 && usb_drv->usb_driver != NULL)
114 retval = usb_store_new_id(&usb_drv->usb_driver->dynids,
115 &usb_drv->usb_driver->drvwrap.driver,
116 buf, count);
117 return retval;
118}
119
120static struct driver_attribute drv_attrs[] = {
121 __ATTR(new_id, S_IWUSR, NULL, store_new_id),
122 __ATTR_NULL,
123};
124
125static void free_dynids(struct usb_serial_driver *drv)
126{
127 struct usb_dynid *dynid, *n;
128
129 spin_lock(&drv->dynids.lock);
130 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
131 list_del(&dynid->node);
132 kfree(dynid);
133 }
134 spin_unlock(&drv->dynids.lock);
135}
136
137#else
138static struct driver_attribute drv_attrs[] = {
139 __ATTR_NULL,
140};
141static inline void free_dynids(struct usb_driver *drv)
142{
143}
144#endif
145
106struct bus_type usb_serial_bus_type = { 146struct bus_type usb_serial_bus_type = {
107 .name = "usb-serial", 147 .name = "usb-serial",
108 .match = usb_serial_device_match, 148 .match = usb_serial_device_match,
109 .probe = usb_serial_device_probe, 149 .probe = usb_serial_device_probe,
110 .remove = usb_serial_device_remove, 150 .remove = usb_serial_device_remove,
151 .drv_attrs = drv_attrs,
111}; 152};
112 153
113int usb_serial_bus_register(struct usb_serial_driver *driver) 154int usb_serial_bus_register(struct usb_serial_driver *driver)
@@ -115,6 +156,9 @@ int usb_serial_bus_register(struct usb_serial_driver *driver)
115 int retval; 156 int retval;
116 157
117 driver->driver.bus = &usb_serial_bus_type; 158 driver->driver.bus = &usb_serial_bus_type;
159 spin_lock_init(&driver->dynids.lock);
160 INIT_LIST_HEAD(&driver->dynids.list);
161
118 retval = driver_register(&driver->driver); 162 retval = driver_register(&driver->driver);
119 163
120 return retval; 164 return retval;
@@ -122,6 +166,7 @@ int usb_serial_bus_register(struct usb_serial_driver *driver)
122 166
123void usb_serial_bus_deregister(struct usb_serial_driver *driver) 167void usb_serial_bus_deregister(struct usb_serial_driver *driver)
124{ 168{
169 free_dynids(driver);
125 driver_unregister(&driver->driver); 170 driver_unregister(&driver->driver);
126} 171}
127 172
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index 7ebaffd6ed86..06b4fffc189c 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -89,6 +89,7 @@ static struct usb_serial_driver cp2101_device = {
89 .owner = THIS_MODULE, 89 .owner = THIS_MODULE,
90 .name = "cp2101", 90 .name = "cp2101",
91 }, 91 },
92 .usb_driver = &cp2101_driver,
92 .id_table = id_table, 93 .id_table = id_table,
93 .num_interrupt_in = 0, 94 .num_interrupt_in = 0,
94 .num_bulk_in = 0, 95 .num_bulk_in = 0,
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index a63c3286caa0..4167753ed31f 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -88,6 +88,7 @@ static struct usb_serial_driver cyberjack_device = {
88 .name = "cyberjack", 88 .name = "cyberjack",
89 }, 89 },
90 .description = "Reiner SCT Cyberjack USB card reader", 90 .description = "Reiner SCT Cyberjack USB card reader",
91 .usb_driver = &cyberjack_driver,
91 .id_table = id_table, 92 .id_table = id_table,
92 .num_interrupt_in = 1, 93 .num_interrupt_in = 1,
93 .num_bulk_in = 1, 94 .num_bulk_in = 1,
@@ -98,7 +99,7 @@ static struct usb_serial_driver cyberjack_device = {
98 .open = cyberjack_open, 99 .open = cyberjack_open,
99 .close = cyberjack_close, 100 .close = cyberjack_close,
100 .write = cyberjack_write, 101 .write = cyberjack_write,
101 .write_room = cyberjack_write_room, 102 .write_room = cyberjack_write_room,
102 .read_int_callback = cyberjack_read_int_callback, 103 .read_int_callback = cyberjack_read_int_callback,
103 .read_bulk_callback = cyberjack_read_bulk_callback, 104 .read_bulk_callback = cyberjack_read_bulk_callback,
104 .write_bulk_callback = cyberjack_write_bulk_callback, 105 .write_bulk_callback = cyberjack_write_bulk_callback,
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 6bc1f404e186..57b8e27285fc 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -193,6 +193,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
193 .name = "earthmate", 193 .name = "earthmate",
194 }, 194 },
195 .description = "DeLorme Earthmate USB", 195 .description = "DeLorme Earthmate USB",
196 .usb_driver = &cypress_driver,
196 .id_table = id_table_earthmate, 197 .id_table = id_table_earthmate,
197 .num_interrupt_in = 1, 198 .num_interrupt_in = 1,
198 .num_interrupt_out = 1, 199 .num_interrupt_out = 1,
@@ -222,6 +223,7 @@ static struct usb_serial_driver cypress_hidcom_device = {
222 .name = "cyphidcom", 223 .name = "cyphidcom",
223 }, 224 },
224 .description = "HID->COM RS232 Adapter", 225 .description = "HID->COM RS232 Adapter",
226 .usb_driver = &cypress_driver,
225 .id_table = id_table_cyphidcomrs232, 227 .id_table = id_table_cyphidcomrs232,
226 .num_interrupt_in = 1, 228 .num_interrupt_in = 1,
227 .num_interrupt_out = 1, 229 .num_interrupt_out = 1,
@@ -251,6 +253,7 @@ static struct usb_serial_driver cypress_ca42v2_device = {
251 .name = "nokiaca42v2", 253 .name = "nokiaca42v2",
252 }, 254 },
253 .description = "Nokia CA-42 V2 Adapter", 255 .description = "Nokia CA-42 V2 Adapter",
256 .usb_driver = &cypress_driver,
254 .id_table = id_table_nokiaca42v2, 257 .id_table = id_table_nokiaca42v2,
255 .num_interrupt_in = 1, 258 .num_interrupt_in = 1,
256 .num_interrupt_out = 1, 259 .num_interrupt_out = 1,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index efd9ce3f931f..0b0fb51bad3e 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -509,6 +509,7 @@ static struct usb_serial_driver digi_acceleport_2_device = {
509 .name = "digi_2", 509 .name = "digi_2",
510 }, 510 },
511 .description = "Digi 2 port USB adapter", 511 .description = "Digi 2 port USB adapter",
512 .usb_driver = &digi_driver,
512 .id_table = id_table_2, 513 .id_table = id_table_2,
513 .num_interrupt_in = 0, 514 .num_interrupt_in = 0,
514 .num_bulk_in = 4, 515 .num_bulk_in = 4,
@@ -538,6 +539,7 @@ static struct usb_serial_driver digi_acceleport_4_device = {
538 .name = "digi_4", 539 .name = "digi_4",
539 }, 540 },
540 .description = "Digi 4 port USB adapter", 541 .description = "Digi 4 port USB adapter",
542 .usb_driver = &digi_driver,
541 .id_table = id_table_4, 543 .id_table = id_table_4,
542 .num_interrupt_in = 0, 544 .num_interrupt_in = 0,
543 .num_bulk_in = 5, 545 .num_bulk_in = 5,
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 92beeb19795f..4703c8f85383 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -117,6 +117,7 @@ static struct usb_serial_driver empeg_device = {
117 .name = "empeg", 117 .name = "empeg",
118 }, 118 },
119 .id_table = id_table, 119 .id_table = id_table,
120 .usb_driver = &empeg_driver,
120 .num_interrupt_in = 0, 121 .num_interrupt_in = 0,
121 .num_bulk_in = 1, 122 .num_bulk_in = 1,
122 .num_bulk_out = 1, 123 .num_bulk_out = 1,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6986e756f7c0..4695952b6470 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -464,7 +464,6 @@ static struct usb_device_id id_table_combined [] = {
464 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 464 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
465 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, 465 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
466 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, 466 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
467 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_0_PID) },
468 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, 467 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
469 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, 468 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
470 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) }, 469 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
@@ -615,6 +614,7 @@ static struct usb_serial_driver ftdi_sio_device = {
615 .name = "ftdi_sio", 614 .name = "ftdi_sio",
616 }, 615 },
617 .description = "FTDI USB Serial Device", 616 .description = "FTDI USB Serial Device",
617 .usb_driver = &ftdi_driver ,
618 .id_table = id_table_combined, 618 .id_table = id_table_combined,
619 .num_interrupt_in = 0, 619 .num_interrupt_in = 0,
620 .num_bulk_in = 1, 620 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 40dd394de58d..7eff1c03ba80 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -364,7 +364,6 @@
364 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices 364 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
365 * and I'm not entirely sure which are used by which. 365 * and I'm not entirely sure which are used by which.
366 */ 366 */
367#define FTDI_4N_GALAXY_DE_0_PID 0x8372
368#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 367#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
369#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1 368#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
370 369
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index 2bebd63d5ed1..4092f6dc9efd 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -58,6 +58,7 @@ static struct usb_serial_driver funsoft_device = {
58 .name = "funsoft", 58 .name = "funsoft",
59 }, 59 },
60 .id_table = id_table, 60 .id_table = id_table,
61 .usb_driver = &funsoft_driver,
61 .num_interrupt_in = NUM_DONT_CARE, 62 .num_interrupt_in = NUM_DONT_CARE,
62 .num_bulk_in = NUM_DONT_CARE, 63 .num_bulk_in = NUM_DONT_CARE,
63 .num_bulk_out = NUM_DONT_CARE, 64 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 6530d391ebed..74660a3aa670 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1566,6 +1566,7 @@ static struct usb_serial_driver garmin_device = {
1566 .name = "garmin_gps", 1566 .name = "garmin_gps",
1567 }, 1567 },
1568 .description = "Garmin GPS usb/tty", 1568 .description = "Garmin GPS usb/tty",
1569 .usb_driver = &garmin_driver,
1569 .id_table = id_table, 1570 .id_table = id_table,
1570 .num_interrupt_in = 1, 1571 .num_interrupt_in = 1,
1571 .num_bulk_in = 1, 1572 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 36042937e77f..601e0648dec6 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -20,6 +20,10 @@
20#include <linux/usb/serial.h> 20#include <linux/usb/serial.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22 22
23static int generic_probe(struct usb_interface *interface,
24 const struct usb_device_id *id);
25
26
23static int debug; 27static int debug;
24 28
25#ifdef CONFIG_USB_SERIAL_GENERIC 29#ifdef CONFIG_USB_SERIAL_GENERIC
@@ -34,6 +38,21 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
34 38
35static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ 39static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
36 40
41/* we want to look at all devices, as the vendor/product id can change
42 * depending on the command line argument */
43static struct usb_device_id generic_serial_ids[] = {
44 {.driver_info = 42},
45 {}
46};
47
48static struct usb_driver generic_driver = {
49 .name = "usbserial_generic",
50 .probe = generic_probe,
51 .disconnect = usb_serial_disconnect,
52 .id_table = generic_serial_ids,
53 .no_dynamic_id = 1,
54};
55
37/* All of the device info needed for the Generic Serial Converter */ 56/* All of the device info needed for the Generic Serial Converter */
38struct usb_serial_driver usb_serial_generic_device = { 57struct usb_serial_driver usb_serial_generic_device = {
39 .driver = { 58 .driver = {
@@ -41,6 +60,7 @@ struct usb_serial_driver usb_serial_generic_device = {
41 .name = "generic", 60 .name = "generic",
42 }, 61 },
43 .id_table = generic_device_ids, 62 .id_table = generic_device_ids,
63 .usb_driver = &generic_driver,
44 .num_interrupt_in = NUM_DONT_CARE, 64 .num_interrupt_in = NUM_DONT_CARE,
45 .num_bulk_in = NUM_DONT_CARE, 65 .num_bulk_in = NUM_DONT_CARE,
46 .num_bulk_out = NUM_DONT_CARE, 66 .num_bulk_out = NUM_DONT_CARE,
@@ -48,13 +68,6 @@ struct usb_serial_driver usb_serial_generic_device = {
48 .shutdown = usb_serial_generic_shutdown, 68 .shutdown = usb_serial_generic_shutdown,
49}; 69};
50 70
51/* we want to look at all devices, as the vendor/product id can change
52 * depending on the command line argument */
53static struct usb_device_id generic_serial_ids[] = {
54 {.driver_info = 42},
55 {}
56};
57
58static int generic_probe(struct usb_interface *interface, 71static int generic_probe(struct usb_interface *interface,
59 const struct usb_device_id *id) 72 const struct usb_device_id *id)
60{ 73{
@@ -65,14 +78,6 @@ static int generic_probe(struct usb_interface *interface,
65 return usb_serial_probe(interface, id); 78 return usb_serial_probe(interface, id);
66 return -ENODEV; 79 return -ENODEV;
67} 80}
68
69static struct usb_driver generic_driver = {
70 .name = "usbserial_generic",
71 .probe = generic_probe,
72 .disconnect = usb_serial_disconnect,
73 .id_table = generic_serial_ids,
74 .no_dynamic_id = 1,
75};
76#endif 81#endif
77 82
78int usb_serial_generic_register (int _debug) 83int usb_serial_generic_register (int _debug)
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index ebcac701b069..6c6ebae741c9 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -49,6 +49,7 @@ static struct usb_serial_driver hp49gp_device = {
49 .name = "hp4X", 49 .name = "hp4X",
50 }, 50 },
51 .id_table = id_table, 51 .id_table = id_table,
52 .usb_driver = &hp49gp_driver,
52 .num_interrupt_in = NUM_DONT_CARE, 53 .num_interrupt_in = NUM_DONT_CARE,
53 .num_bulk_in = NUM_DONT_CARE, 54 .num_bulk_in = NUM_DONT_CARE,
54 .num_bulk_out = NUM_DONT_CARE, 55 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index f623d58370a4..6a26a2e683a6 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -146,6 +146,8 @@ struct edgeport_serial {
146 struct edge_manuf_descriptor manuf_descriptor; /* the manufacturer descriptor */ 146 struct edge_manuf_descriptor manuf_descriptor; /* the manufacturer descriptor */
147 struct edge_boot_descriptor boot_descriptor; /* the boot firmware descriptor */ 147 struct edge_boot_descriptor boot_descriptor; /* the boot firmware descriptor */
148 struct edgeport_product_info product_info; /* Product Info */ 148 struct edgeport_product_info product_info; /* Product Info */
149 struct edge_compatibility_descriptor epic_descriptor; /* Edgeport compatible descriptor */
150 int is_epic; /* flag if EPiC device or not */
149 151
150 __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */ 152 __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */
151 unsigned char * interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */ 153 unsigned char * interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */
@@ -240,14 +242,6 @@ static void edge_shutdown (struct usb_serial *serial);
240 242
241#include "io_tables.h" /* all of the devices that this driver supports */ 243#include "io_tables.h" /* all of the devices that this driver supports */
242 244
243static struct usb_driver io_driver = {
244 .name = "io_edgeport",
245 .probe = usb_serial_probe,
246 .disconnect = usb_serial_disconnect,
247 .id_table = id_table_combined,
248 .no_dynamic_id = 1,
249};
250
251/* function prototypes for all of our local functions */ 245/* function prototypes for all of our local functions */
252static void process_rcvd_data (struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength); 246static void process_rcvd_data (struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength);
253static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3); 247static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3);
@@ -397,6 +391,7 @@ static int get_string (struct usb_device *dev, int Id, char *string, int buflen)
397 unicode_to_ascii(string, buflen, pStringDesc->wData, pStringDesc->bLength/2); 391 unicode_to_ascii(string, buflen, pStringDesc->wData, pStringDesc->bLength/2);
398 392
399 kfree(pStringDesc); 393 kfree(pStringDesc);
394 dbg("%s - USB String %s", __FUNCTION__, string);
400 return strlen(string); 395 return strlen(string);
401} 396}
402 397
@@ -434,6 +429,34 @@ static int get_string_desc (struct usb_device *dev, int Id, struct usb_string_de
434} 429}
435#endif 430#endif
436 431
432static void dump_product_info(struct edgeport_product_info *product_info)
433{
434 // Dump Product Info structure
435 dbg("**Product Information:");
436 dbg(" ProductId %x", product_info->ProductId );
437 dbg(" NumPorts %d", product_info->NumPorts );
438 dbg(" ProdInfoVer %d", product_info->ProdInfoVer );
439 dbg(" IsServer %d", product_info->IsServer);
440 dbg(" IsRS232 %d", product_info->IsRS232 );
441 dbg(" IsRS422 %d", product_info->IsRS422 );
442 dbg(" IsRS485 %d", product_info->IsRS485 );
443 dbg(" RomSize %d", product_info->RomSize );
444 dbg(" RamSize %d", product_info->RamSize );
445 dbg(" CpuRev %x", product_info->CpuRev );
446 dbg(" BoardRev %x", product_info->BoardRev);
447 dbg(" BootMajorVersion %d.%d.%d", product_info->BootMajorVersion,
448 product_info->BootMinorVersion,
449 le16_to_cpu(product_info->BootBuildNumber));
450 dbg(" FirmwareMajorVersion %d.%d.%d", product_info->FirmwareMajorVersion,
451 product_info->FirmwareMinorVersion,
452 le16_to_cpu(product_info->FirmwareBuildNumber));
453 dbg(" ManufactureDescDate %d/%d/%d", product_info->ManufactureDescDate[0],
454 product_info->ManufactureDescDate[1],
455 product_info->ManufactureDescDate[2]+1900);
456 dbg(" iDownloadFile 0x%x", product_info->iDownloadFile);
457 dbg(" EpicVer %d", product_info->EpicVer);
458}
459
437static void get_product_info(struct edgeport_serial *edge_serial) 460static void get_product_info(struct edgeport_serial *edge_serial)
438{ 461{
439 struct edgeport_product_info *product_info = &edge_serial->product_info; 462 struct edgeport_product_info *product_info = &edge_serial->product_info;
@@ -495,30 +518,60 @@ static void get_product_info(struct edgeport_serial *edge_serial)
495 break; 518 break;
496 } 519 }
497 520
498 // Dump Product Info structure 521 dump_product_info(product_info);
499 dbg("**Product Information:"); 522}
500 dbg(" ProductId %x", product_info->ProductId );
501 dbg(" NumPorts %d", product_info->NumPorts );
502 dbg(" ProdInfoVer %d", product_info->ProdInfoVer );
503 dbg(" IsServer %d", product_info->IsServer);
504 dbg(" IsRS232 %d", product_info->IsRS232 );
505 dbg(" IsRS422 %d", product_info->IsRS422 );
506 dbg(" IsRS485 %d", product_info->IsRS485 );
507 dbg(" RomSize %d", product_info->RomSize );
508 dbg(" RamSize %d", product_info->RamSize );
509 dbg(" CpuRev %x", product_info->CpuRev );
510 dbg(" BoardRev %x", product_info->BoardRev);
511 dbg(" BootMajorVersion %d.%d.%d", product_info->BootMajorVersion,
512 product_info->BootMinorVersion,
513 le16_to_cpu(product_info->BootBuildNumber));
514 dbg(" FirmwareMajorVersion %d.%d.%d", product_info->FirmwareMajorVersion,
515 product_info->FirmwareMinorVersion,
516 le16_to_cpu(product_info->FirmwareBuildNumber));
517 dbg(" ManufactureDescDate %d/%d/%d", product_info->ManufactureDescDate[0],
518 product_info->ManufactureDescDate[1],
519 product_info->ManufactureDescDate[2]+1900);
520 dbg(" iDownloadFile 0x%x", product_info->iDownloadFile);
521 523
524static int get_epic_descriptor(struct edgeport_serial *ep)
525{
526 int result;
527 struct usb_serial *serial = ep->serial;
528 struct edgeport_product_info *product_info = &ep->product_info;
529 struct edge_compatibility_descriptor *epic = &ep->epic_descriptor;
530 struct edge_compatibility_bits *bits;
531
532 ep->is_epic = 0;
533 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
534 USB_REQUEST_ION_GET_EPIC_DESC,
535 0xC0, 0x00, 0x00,
536 &ep->epic_descriptor,
537 sizeof(struct edge_compatibility_descriptor),
538 300);
539
540 dbg("%s result = %d", __FUNCTION__, result);
541
542 if (result > 0) {
543 ep->is_epic = 1;
544 memset(product_info, 0, sizeof(struct edgeport_product_info));
545
546 product_info->NumPorts = epic->NumPorts;
547 product_info->ProdInfoVer = 0;
548 product_info->FirmwareMajorVersion = epic->MajorVersion;
549 product_info->FirmwareMinorVersion = epic->MinorVersion;
550 product_info->FirmwareBuildNumber = epic->BuildNumber;
551 product_info->iDownloadFile = epic->iDownloadFile;
552 product_info->EpicVer = epic->EpicVer;
553 product_info->Epic = epic->Supports;
554 product_info->ProductId = ION_DEVICE_ID_EDGEPORT_COMPATIBLE;
555 dump_product_info(product_info);
556
557 bits = &ep->epic_descriptor.Supports;
558 dbg("**EPIC descriptor:");
559 dbg(" VendEnableSuspend: %s", bits->VendEnableSuspend ? "TRUE": "FALSE");
560 dbg(" IOSPOpen : %s", bits->IOSPOpen ? "TRUE": "FALSE" );
561 dbg(" IOSPClose : %s", bits->IOSPClose ? "TRUE": "FALSE" );
562 dbg(" IOSPChase : %s", bits->IOSPChase ? "TRUE": "FALSE" );
563 dbg(" IOSPSetRxFlow : %s", bits->IOSPSetRxFlow ? "TRUE": "FALSE" );
564 dbg(" IOSPSetTxFlow : %s", bits->IOSPSetTxFlow ? "TRUE": "FALSE" );
565 dbg(" IOSPSetXChar : %s", bits->IOSPSetXChar ? "TRUE": "FALSE" );
566 dbg(" IOSPRxCheck : %s", bits->IOSPRxCheck ? "TRUE": "FALSE" );
567 dbg(" IOSPSetClrBreak : %s", bits->IOSPSetClrBreak ? "TRUE": "FALSE" );
568 dbg(" IOSPWriteMCR : %s", bits->IOSPWriteMCR ? "TRUE": "FALSE" );
569 dbg(" IOSPWriteLCR : %s", bits->IOSPWriteLCR ? "TRUE": "FALSE" );
570 dbg(" IOSPSetBaudRate : %s", bits->IOSPSetBaudRate ? "TRUE": "FALSE" );
571 dbg(" TrueEdgeport : %s", bits->TrueEdgeport ? "TRUE": "FALSE" );
572 }
573
574 return result;
522} 575}
523 576
524 577
@@ -1017,21 +1070,29 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
1017 1070
1018 edge_port->closePending = TRUE; 1071 edge_port->closePending = TRUE;
1019 1072
1020 /* flush and chase */ 1073 if ((!edge_serial->is_epic) ||
1021 edge_port->chaseResponsePending = TRUE; 1074 ((edge_serial->is_epic) &&
1022 1075 (edge_serial->epic_descriptor.Supports.IOSPChase))) {
1023 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__); 1076 /* flush and chase */
1024 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0); 1077 edge_port->chaseResponsePending = TRUE;
1025 if (status == 0) { 1078
1026 // block until chase finished 1079 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__);
1027 block_until_chase_response(edge_port); 1080 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
1028 } else { 1081 if (status == 0) {
1029 edge_port->chaseResponsePending = FALSE; 1082 // block until chase finished
1083 block_until_chase_response(edge_port);
1084 } else {
1085 edge_port->chaseResponsePending = FALSE;
1086 }
1030 } 1087 }
1031 1088
1032 /* close the port */ 1089 if ((!edge_serial->is_epic) ||
1033 dbg("%s - Sending IOSP_CMD_CLOSE_PORT", __FUNCTION__); 1090 ((edge_serial->is_epic) &&
1034 send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0); 1091 (edge_serial->epic_descriptor.Supports.IOSPClose))) {
1092 /* close the port */
1093 dbg("%s - Sending IOSP_CMD_CLOSE_PORT", __FUNCTION__);
1094 send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0);
1095 }
1035 1096
1036 //port->close = TRUE; 1097 //port->close = TRUE;
1037 edge_port->closePending = FALSE; 1098 edge_port->closePending = FALSE;
@@ -1694,29 +1755,38 @@ static int edge_ioctl (struct usb_serial_port *port, struct file *file, unsigned
1694static void edge_break (struct usb_serial_port *port, int break_state) 1755static void edge_break (struct usb_serial_port *port, int break_state)
1695{ 1756{
1696 struct edgeport_port *edge_port = usb_get_serial_port_data(port); 1757 struct edgeport_port *edge_port = usb_get_serial_port_data(port);
1758 struct edgeport_serial *edge_serial = usb_get_serial_data(port->serial);
1697 int status; 1759 int status;
1698 1760
1699 /* flush and chase */ 1761 if ((!edge_serial->is_epic) ||
1700 edge_port->chaseResponsePending = TRUE; 1762 ((edge_serial->is_epic) &&
1701 1763 (edge_serial->epic_descriptor.Supports.IOSPChase))) {
1702 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__); 1764 /* flush and chase */
1703 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0); 1765 edge_port->chaseResponsePending = TRUE;
1704 if (status == 0) { 1766
1705 // block until chase finished 1767 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__);
1706 block_until_chase_response(edge_port); 1768 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
1707 } else { 1769 if (status == 0) {
1708 edge_port->chaseResponsePending = FALSE; 1770 // block until chase finished
1771 block_until_chase_response(edge_port);
1772 } else {
1773 edge_port->chaseResponsePending = FALSE;
1774 }
1709 } 1775 }
1710 1776
1711 if (break_state == -1) { 1777 if ((!edge_serial->is_epic) ||
1712 dbg("%s - Sending IOSP_CMD_SET_BREAK", __FUNCTION__); 1778 ((edge_serial->is_epic) &&
1713 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_BREAK, 0); 1779 (edge_serial->epic_descriptor.Supports.IOSPSetClrBreak))) {
1714 } else { 1780 if (break_state == -1) {
1715 dbg("%s - Sending IOSP_CMD_CLEAR_BREAK", __FUNCTION__); 1781 dbg("%s - Sending IOSP_CMD_SET_BREAK", __FUNCTION__);
1716 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CLEAR_BREAK, 0); 1782 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_BREAK, 0);
1717 } 1783 } else {
1718 if (status) { 1784 dbg("%s - Sending IOSP_CMD_CLEAR_BREAK", __FUNCTION__);
1719 dbg("%s - error sending break set/clear command.", __FUNCTION__); 1785 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CLEAR_BREAK, 0);
1786 }
1787 if (status) {
1788 dbg("%s - error sending break set/clear command.", __FUNCTION__);
1789 }
1720 } 1790 }
1721 1791
1722 return; 1792 return;
@@ -2288,6 +2358,7 @@ static int write_cmd_usb (struct edgeport_port *edge_port, unsigned char *buffer
2288 *****************************************************************************/ 2358 *****************************************************************************/
2289static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRate) 2359static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRate)
2290{ 2360{
2361 struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial);
2291 unsigned char *cmdBuffer; 2362 unsigned char *cmdBuffer;
2292 unsigned char *currCmd; 2363 unsigned char *currCmd;
2293 int cmdLen = 0; 2364 int cmdLen = 0;
@@ -2295,6 +2366,14 @@ static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRa
2295 int status; 2366 int status;
2296 unsigned char number = edge_port->port->number - edge_port->port->serial->minor; 2367 unsigned char number = edge_port->port->number - edge_port->port->serial->minor;
2297 2368
2369 if ((!edge_serial->is_epic) ||
2370 ((edge_serial->is_epic) &&
2371 (!edge_serial->epic_descriptor.Supports.IOSPSetBaudRate))) {
2372 dbg("SendCmdWriteBaudRate - NOT Setting baud rate for port = %d, baud = %d",
2373 edge_port->port->number, baudRate);
2374 return 0;
2375 }
2376
2298 dbg("%s - port = %d, baud = %d", __FUNCTION__, edge_port->port->number, baudRate); 2377 dbg("%s - port = %d, baud = %d", __FUNCTION__, edge_port->port->number, baudRate);
2299 2378
2300 status = calc_baud_rate_divisor (baudRate, &divisor); 2379 status = calc_baud_rate_divisor (baudRate, &divisor);
@@ -2374,6 +2453,7 @@ static int calc_baud_rate_divisor (int baudrate, int *divisor)
2374 *****************************************************************************/ 2453 *****************************************************************************/
2375static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 regNum, __u8 regValue) 2454static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 regNum, __u8 regValue)
2376{ 2455{
2456 struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial);
2377 unsigned char *cmdBuffer; 2457 unsigned char *cmdBuffer;
2378 unsigned char *currCmd; 2458 unsigned char *currCmd;
2379 unsigned long cmdLen = 0; 2459 unsigned long cmdLen = 0;
@@ -2381,6 +2461,22 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2381 2461
2382 dbg("%s - write to %s register 0x%02x", (regNum == MCR) ? "MCR" : "LCR", __FUNCTION__, regValue); 2462 dbg("%s - write to %s register 0x%02x", (regNum == MCR) ? "MCR" : "LCR", __FUNCTION__, regValue);
2383 2463
2464 if ((!edge_serial->is_epic) ||
2465 ((edge_serial->is_epic) &&
2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) &&
2467 (regNum == MCR))) {
2468 dbg("SendCmdWriteUartReg - Not writting to MCR Register");
2469 return 0;
2470 }
2471
2472 if ((!edge_serial->is_epic) ||
2473 ((edge_serial->is_epic) &&
2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) &&
2475 (regNum == LCR))) {
2476 dbg ("SendCmdWriteUartReg - Not writting to LCR Register");
2477 return 0;
2478 }
2479
2384 // Alloc memory for the string of commands. 2480 // Alloc memory for the string of commands.
2385 cmdBuffer = kmalloc (0x10, GFP_ATOMIC); 2481 cmdBuffer = kmalloc (0x10, GFP_ATOMIC);
2386 if (cmdBuffer == NULL ) { 2482 if (cmdBuffer == NULL ) {
@@ -2414,6 +2510,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2414#endif 2510#endif
2415static void change_port_settings (struct edgeport_port *edge_port, struct ktermios *old_termios) 2511static void change_port_settings (struct edgeport_port *edge_port, struct ktermios *old_termios)
2416{ 2512{
2513 struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial);
2417 struct tty_struct *tty; 2514 struct tty_struct *tty;
2418 int baud; 2515 int baud;
2419 unsigned cflag; 2516 unsigned cflag;
@@ -2494,8 +2591,12 @@ static void change_port_settings (struct edgeport_port *edge_port, struct ktermi
2494 unsigned char stop_char = STOP_CHAR(tty); 2591 unsigned char stop_char = STOP_CHAR(tty);
2495 unsigned char start_char = START_CHAR(tty); 2592 unsigned char start_char = START_CHAR(tty);
2496 2593
2497 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_XON_CHAR, start_char); 2594 if ((!edge_serial->is_epic) ||
2498 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_XOFF_CHAR, stop_char); 2595 ((edge_serial->is_epic) &&
2596 (edge_serial->epic_descriptor.Supports.IOSPSetXChar))) {
2597 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XON_CHAR, start_char);
2598 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XOFF_CHAR, stop_char);
2599 }
2499 2600
2500 /* if we are implementing INBOUND XON/XOFF */ 2601 /* if we are implementing INBOUND XON/XOFF */
2501 if (I_IXOFF(tty)) { 2602 if (I_IXOFF(tty)) {
@@ -2515,8 +2616,14 @@ static void change_port_settings (struct edgeport_port *edge_port, struct ktermi
2515 } 2616 }
2516 2617
2517 /* Set flow control to the configured value */ 2618 /* Set flow control to the configured value */
2518 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow); 2619 if ((!edge_serial->is_epic) ||
2519 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_TX_FLOW, txFlow); 2620 ((edge_serial->is_epic) &&
2621 (edge_serial->epic_descriptor.Supports.IOSPSetRxFlow)))
2622 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow);
2623 if ((!edge_serial->is_epic) ||
2624 ((edge_serial->is_epic) &&
2625 (edge_serial->epic_descriptor.Supports.IOSPSetTxFlow)))
2626 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_TX_FLOW, txFlow);
2520 2627
2521 2628
2522 edge_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); 2629 edge_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
@@ -2728,6 +2835,13 @@ static int edge_startup (struct usb_serial *serial)
2728 struct edgeport_port *edge_port; 2835 struct edgeport_port *edge_port;
2729 struct usb_device *dev; 2836 struct usb_device *dev;
2730 int i, j; 2837 int i, j;
2838 int response;
2839 int interrupt_in_found;
2840 int bulk_in_found;
2841 int bulk_out_found;
2842 static __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0,
2843 EDGE_COMPATIBILITY_MASK1,
2844 EDGE_COMPATIBILITY_MASK2 };
2731 2845
2732 dev = serial->dev; 2846 dev = serial->dev;
2733 2847
@@ -2750,38 +2864,50 @@ static int edge_startup (struct usb_serial *serial)
2750 2864
2751 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); 2865 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
2752 2866
2753 /* get the manufacturing descriptor for this device */ 2867 /* Read the epic descriptor */
2754 get_manufacturing_desc (edge_serial); 2868 if (get_epic_descriptor(edge_serial) <= 0) {
2869 /* memcpy descriptor to Supports structures */
2870 memcpy(&edge_serial->epic_descriptor.Supports, descriptor,
2871 sizeof(struct edge_compatibility_bits));
2872
2873 /* get the manufacturing descriptor for this device */
2874 get_manufacturing_desc (edge_serial);
2755 2875
2756 /* get the boot descriptor */ 2876 /* get the boot descriptor */
2757 get_boot_desc (edge_serial); 2877 get_boot_desc (edge_serial);
2758 2878
2759 get_product_info(edge_serial); 2879 get_product_info(edge_serial);
2880 }
2760 2881
2761 /* set the number of ports from the manufacturing description */ 2882 /* set the number of ports from the manufacturing description */
2762 /* serial->num_ports = serial->product_info.NumPorts; */ 2883 /* serial->num_ports = serial->product_info.NumPorts; */
2763 if (edge_serial->product_info.NumPorts != serial->num_ports) { 2884 if ((!edge_serial->is_epic) &&
2764 warn("%s - Device Reported %d serial ports vs core " 2885 (edge_serial->product_info.NumPorts != serial->num_ports)) {
2765 "thinking we have %d ports, email greg@kroah.com this info.", 2886 dev_warn(&serial->dev->dev, "Device Reported %d serial ports "
2766 __FUNCTION__, edge_serial->product_info.NumPorts, 2887 "vs. core thinking we have %d ports, email "
2767 serial->num_ports); 2888 "greg@kroah.com this information.",
2889 edge_serial->product_info.NumPorts,
2890 serial->num_ports);
2768 } 2891 }
2769 2892
2770 dbg("%s - time 1 %ld", __FUNCTION__, jiffies); 2893 dbg("%s - time 1 %ld", __FUNCTION__, jiffies);
2771 2894
2772 /* now load the application firmware into this device */ 2895 /* If not an EPiC device */
2773 load_application_firmware (edge_serial); 2896 if (!edge_serial->is_epic) {
2897 /* now load the application firmware into this device */
2898 load_application_firmware (edge_serial);
2774 2899
2775 dbg("%s - time 2 %ld", __FUNCTION__, jiffies); 2900 dbg("%s - time 2 %ld", __FUNCTION__, jiffies);
2776 2901
2777 /* Check current Edgeport EEPROM and update if necessary */ 2902 /* Check current Edgeport EEPROM and update if necessary */
2778 update_edgeport_E2PROM (edge_serial); 2903 update_edgeport_E2PROM (edge_serial);
2779
2780 dbg("%s - time 3 %ld", __FUNCTION__, jiffies);
2781 2904
2782 /* set the configuration to use #1 */ 2905 dbg("%s - time 3 %ld", __FUNCTION__, jiffies);
2783// dbg("set_configuration 1"); 2906
2784// usb_set_configuration (dev, 1); 2907 /* set the configuration to use #1 */
2908// dbg("set_configuration 1");
2909// usb_set_configuration (dev, 1);
2910 }
2785 2911
2786 /* we set up the pointers to the endpoints in the edge_open function, 2912 /* we set up the pointers to the endpoints in the edge_open function,
2787 * as the structures aren't created yet. */ 2913 * as the structures aren't created yet. */
@@ -2804,8 +2930,101 @@ static int edge_startup (struct usb_serial *serial)
2804 edge_port->port = serial->port[i]; 2930 edge_port->port = serial->port[i];
2805 usb_set_serial_port_data(serial->port[i], edge_port); 2931 usb_set_serial_port_data(serial->port[i], edge_port);
2806 } 2932 }
2807 2933
2808 return 0; 2934 response = 0;
2935
2936 if (edge_serial->is_epic) {
2937 /* EPIC thing, set up our interrupt polling now and our read urb, so
2938 * that the device knows it really is connected. */
2939 interrupt_in_found = bulk_in_found = bulk_out_found = FALSE;
2940 for (i = 0; i < serial->interface->altsetting[0].desc.bNumEndpoints; ++i) {
2941 struct usb_endpoint_descriptor *endpoint;
2942 int buffer_size;
2943
2944 endpoint = &serial->interface->altsetting[0].endpoint[i].desc;
2945 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
2946 if ((!interrupt_in_found) &&
2947 (usb_endpoint_is_int_in(endpoint))) {
2948 /* we found a interrupt in endpoint */
2949 dbg("found interrupt in");
2950
2951 /* not set up yet, so do it now */
2952 edge_serial->interrupt_read_urb = usb_alloc_urb(0, GFP_KERNEL);
2953 if (!edge_serial->interrupt_read_urb) {
2954 err("out of memory");
2955 return -ENOMEM;
2956 }
2957 edge_serial->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
2958 if (!edge_serial->interrupt_in_buffer) {
2959 err("out of memory");
2960 usb_free_urb(edge_serial->interrupt_read_urb);
2961 return -ENOMEM;
2962 }
2963 edge_serial->interrupt_in_endpoint = endpoint->bEndpointAddress;
2964
2965 /* set up our interrupt urb */
2966 usb_fill_int_urb(edge_serial->interrupt_read_urb,
2967 dev,
2968 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
2969 edge_serial->interrupt_in_buffer,
2970 buffer_size,
2971 edge_interrupt_callback,
2972 edge_serial,
2973 endpoint->bInterval);
2974
2975 interrupt_in_found = TRUE;
2976 }
2977
2978 if ((!bulk_in_found) &&
2979 (usb_endpoint_is_bulk_in(endpoint))) {
2980 /* we found a bulk in endpoint */
2981 dbg("found bulk in");
2982
2983 /* not set up yet, so do it now */
2984 edge_serial->read_urb = usb_alloc_urb(0, GFP_KERNEL);
2985 if (!edge_serial->read_urb) {
2986 err("out of memory");
2987 return -ENOMEM;
2988 }
2989 edge_serial->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
2990 if (!edge_serial->bulk_in_buffer) {
2991 err ("out of memory");
2992 usb_free_urb(edge_serial->read_urb);
2993 return -ENOMEM;
2994 }
2995 edge_serial->bulk_in_endpoint = endpoint->bEndpointAddress;
2996
2997 /* set up our bulk in urb */
2998 usb_fill_bulk_urb(edge_serial->read_urb, dev,
2999 usb_rcvbulkpipe(dev, endpoint->bEndpointAddress),
3000 edge_serial->bulk_in_buffer,
3001 endpoint->wMaxPacketSize,
3002 edge_bulk_in_callback,
3003 edge_serial);
3004 bulk_in_found = TRUE;
3005 }
3006
3007 if ((!bulk_out_found) &&
3008 (usb_endpoint_is_bulk_out(endpoint))) {
3009 /* we found a bulk out endpoint */
3010 dbg("found bulk out");
3011 edge_serial->bulk_out_endpoint = endpoint->bEndpointAddress;
3012 bulk_out_found = TRUE;
3013 }
3014 }
3015
3016 if ((!interrupt_in_found) || (!bulk_in_found) || (!bulk_out_found)) {
3017 err ("Error - the proper endpoints were not found!");
3018 return -ENODEV;
3019 }
3020
3021 /* start interrupt read for this edgeport this interrupt will
3022 * continue as long as the edgeport is connected */
3023 response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL);
3024 if (response)
3025 err("%s - Error %d submitting control urb", __FUNCTION__, response);
3026 }
3027 return response;
2809} 3028}
2810 3029
2811 3030
@@ -2815,6 +3034,7 @@ static int edge_startup (struct usb_serial *serial)
2815 ****************************************************************************/ 3034 ****************************************************************************/
2816static void edge_shutdown (struct usb_serial *serial) 3035static void edge_shutdown (struct usb_serial *serial)
2817{ 3036{
3037 struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
2818 int i; 3038 int i;
2819 3039
2820 dbg("%s", __FUNCTION__); 3040 dbg("%s", __FUNCTION__);
@@ -2824,7 +3044,18 @@ static void edge_shutdown (struct usb_serial *serial)
2824 kfree (usb_get_serial_port_data(serial->port[i])); 3044 kfree (usb_get_serial_port_data(serial->port[i]));
2825 usb_set_serial_port_data(serial->port[i], NULL); 3045 usb_set_serial_port_data(serial->port[i], NULL);
2826 } 3046 }
2827 kfree (usb_get_serial_data(serial)); 3047 /* free up our endpoint stuff */
3048 if (edge_serial->is_epic) {
3049 usb_unlink_urb(edge_serial->interrupt_read_urb);
3050 usb_free_urb(edge_serial->interrupt_read_urb);
3051 kfree(edge_serial->interrupt_in_buffer);
3052
3053 usb_unlink_urb(edge_serial->read_urb);
3054 usb_free_urb(edge_serial->read_urb);
3055 kfree(edge_serial->bulk_in_buffer);
3056 }
3057
3058 kfree(edge_serial);
2828 usb_set_serial_data(serial, NULL); 3059 usb_set_serial_data(serial, NULL);
2829} 3060}
2830 3061
@@ -2846,6 +3077,9 @@ static int __init edgeport_init(void)
2846 retval = usb_serial_register(&edgeport_8port_device); 3077 retval = usb_serial_register(&edgeport_8port_device);
2847 if (retval) 3078 if (retval)
2848 goto failed_8port_device_register; 3079 goto failed_8port_device_register;
3080 retval = usb_serial_register(&epic_device);
3081 if (retval)
3082 goto failed_epic_device_register;
2849 retval = usb_register(&io_driver); 3083 retval = usb_register(&io_driver);
2850 if (retval) 3084 if (retval)
2851 goto failed_usb_register; 3085 goto failed_usb_register;
@@ -2853,6 +3087,8 @@ static int __init edgeport_init(void)
2853 return 0; 3087 return 0;
2854 3088
2855failed_usb_register: 3089failed_usb_register:
3090 usb_serial_deregister(&epic_device);
3091failed_epic_device_register:
2856 usb_serial_deregister(&edgeport_8port_device); 3092 usb_serial_deregister(&edgeport_8port_device);
2857failed_8port_device_register: 3093failed_8port_device_register:
2858 usb_serial_deregister(&edgeport_4port_device); 3094 usb_serial_deregister(&edgeport_4port_device);
@@ -2873,6 +3109,7 @@ static void __exit edgeport_exit (void)
2873 usb_serial_deregister (&edgeport_2port_device); 3109 usb_serial_deregister (&edgeport_2port_device);
2874 usb_serial_deregister (&edgeport_4port_device); 3110 usb_serial_deregister (&edgeport_4port_device);
2875 usb_serial_deregister (&edgeport_8port_device); 3111 usb_serial_deregister (&edgeport_8port_device);
3112 usb_serial_deregister (&epic_device);
2876} 3113}
2877 3114
2878module_init(edgeport_init); 3115module_init(edgeport_init);
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index 123fa8a904e6..29a913a6daca 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -111,10 +111,12 @@ struct edgeport_product_info {
111 __le16 FirmwareBuildNumber; /* zzzz (LE format) */ 111 __le16 FirmwareBuildNumber; /* zzzz (LE format) */
112 112
113 __u8 ManufactureDescDate[3]; /* MM/DD/YY when descriptor template was compiled */ 113 __u8 ManufactureDescDate[3]; /* MM/DD/YY when descriptor template was compiled */
114 __u8 Unused1[1]; /* Available */ 114 __u8 HardwareType;
115 115
116 __u8 iDownloadFile; /* What to download to EPiC device */ 116 __u8 iDownloadFile; /* What to download to EPiC device */
117 __u8 Unused2[2]; /* Available */ 117 __u8 EpicVer; /* What version of EPiC spec this device supports */
118
119 struct edge_compatibility_bits Epic;
118}; 120};
119 121
120/* 122/*
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index fad561c04c76..6d3008772540 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -47,6 +47,18 @@ static struct usb_device_id edgeport_8port_id_table [] = {
47 { } 47 { }
48}; 48};
49 49
50static struct usb_device_id Epic_port_id_table [] = {
51 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
52 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
53 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
54 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) },
55 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) },
56 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) },
57 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) },
58 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) },
59 { }
60};
61
50/* Devices that this driver supports */ 62/* Devices that this driver supports */
51static struct usb_device_id id_table_combined [] = { 63static struct usb_device_id id_table_combined [] = {
52 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, 64 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
@@ -70,17 +82,34 @@ static struct usb_device_id id_table_combined [] = {
70 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) }, 82 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) },
71 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) }, 83 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) },
72 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) }, 84 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) },
85 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
86 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
87 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
88 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) },
89 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) },
90 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) },
91 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) },
92 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) },
73 { } /* Terminating entry */ 93 { } /* Terminating entry */
74}; 94};
75 95
76MODULE_DEVICE_TABLE (usb, id_table_combined); 96MODULE_DEVICE_TABLE (usb, id_table_combined);
77 97
98static struct usb_driver io_driver = {
99 .name = "io_edgeport",
100 .probe = usb_serial_probe,
101 .disconnect = usb_serial_disconnect,
102 .id_table = id_table_combined,
103 .no_dynamic_id = 1,
104};
105
78static struct usb_serial_driver edgeport_2port_device = { 106static struct usb_serial_driver edgeport_2port_device = {
79 .driver = { 107 .driver = {
80 .owner = THIS_MODULE, 108 .owner = THIS_MODULE,
81 .name = "edgeport_2", 109 .name = "edgeport_2",
82 }, 110 },
83 .description = "Edgeport 2 port adapter", 111 .description = "Edgeport 2 port adapter",
112 .usb_driver = &io_driver,
84 .id_table = edgeport_2port_id_table, 113 .id_table = edgeport_2port_id_table,
85 .num_interrupt_in = 1, 114 .num_interrupt_in = 1,
86 .num_bulk_in = 1, 115 .num_bulk_in = 1,
@@ -111,6 +140,7 @@ static struct usb_serial_driver edgeport_4port_device = {
111 .name = "edgeport_4", 140 .name = "edgeport_4",
112 }, 141 },
113 .description = "Edgeport 4 port adapter", 142 .description = "Edgeport 4 port adapter",
143 .usb_driver = &io_driver,
114 .id_table = edgeport_4port_id_table, 144 .id_table = edgeport_4port_id_table,
115 .num_interrupt_in = 1, 145 .num_interrupt_in = 1,
116 .num_bulk_in = 1, 146 .num_bulk_in = 1,
@@ -141,6 +171,7 @@ static struct usb_serial_driver edgeport_8port_device = {
141 .name = "edgeport_8", 171 .name = "edgeport_8",
142 }, 172 },
143 .description = "Edgeport 8 port adapter", 173 .description = "Edgeport 8 port adapter",
174 .usb_driver = &io_driver,
144 .id_table = edgeport_8port_id_table, 175 .id_table = edgeport_8port_id_table,
145 .num_interrupt_in = 1, 176 .num_interrupt_in = 1,
146 .num_bulk_in = 1, 177 .num_bulk_in = 1,
@@ -165,5 +196,35 @@ static struct usb_serial_driver edgeport_8port_device = {
165 .write_bulk_callback = edge_bulk_out_data_callback, 196 .write_bulk_callback = edge_bulk_out_data_callback,
166}; 197};
167 198
199static struct usb_serial_driver epic_device = {
200 .driver = {
201 .owner = THIS_MODULE,
202 .name = "epic",
203 },
204 .description = "EPiC device",
205 .id_table = Epic_port_id_table,
206 .num_interrupt_in = 1,
207 .num_bulk_in = 1,
208 .num_bulk_out = 1,
209 .num_ports = 1,
210 .open = edge_open,
211 .close = edge_close,
212 .throttle = edge_throttle,
213 .unthrottle = edge_unthrottle,
214 .attach = edge_startup,
215 .shutdown = edge_shutdown,
216 .ioctl = edge_ioctl,
217 .set_termios = edge_set_termios,
218 .tiocmget = edge_tiocmget,
219 .tiocmset = edge_tiocmset,
220 .write = edge_write,
221 .write_room = edge_write_room,
222 .chars_in_buffer = edge_chars_in_buffer,
223 .break_ctl = edge_break,
224 .read_int_callback = edge_interrupt_callback,
225 .read_bulk_callback = edge_bulk_in_callback,
226 .write_bulk_callback = edge_bulk_out_data_callback,
227};
228
168#endif 229#endif
169 230
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 980285c0233a..544098d2b775 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2979,6 +2979,7 @@ static struct usb_serial_driver edgeport_1port_device = {
2979 .name = "edgeport_ti_1", 2979 .name = "edgeport_ti_1",
2980 }, 2980 },
2981 .description = "Edgeport TI 1 port adapter", 2981 .description = "Edgeport TI 1 port adapter",
2982 .usb_driver = &io_driver,
2982 .id_table = edgeport_1port_id_table, 2983 .id_table = edgeport_1port_id_table,
2983 .num_interrupt_in = 1, 2984 .num_interrupt_in = 1,
2984 .num_bulk_in = 1, 2985 .num_bulk_in = 1,
@@ -3009,6 +3010,7 @@ static struct usb_serial_driver edgeport_2port_device = {
3009 .name = "edgeport_ti_2", 3010 .name = "edgeport_ti_2",
3010 }, 3011 },
3011 .description = "Edgeport TI 2 port adapter", 3012 .description = "Edgeport TI 2 port adapter",
3013 .usb_driver = &io_driver,
3012 .id_table = edgeport_2port_id_table, 3014 .id_table = edgeport_2port_id_table,
3013 .num_interrupt_in = 1, 3015 .num_interrupt_in = 1,
3014 .num_bulk_in = 2, 3016 .num_bulk_in = 2,
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index f1804fd5a3dd..e57fa117e486 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -30,6 +30,7 @@
30 30
31#define USB_VENDOR_ID_ION 0x1608 // Our VID 31#define USB_VENDOR_ID_ION 0x1608 // Our VID
32#define USB_VENDOR_ID_TI 0x0451 // TI VID 32#define USB_VENDOR_ID_TI 0x0451 // TI VID
33#define USB_VENDOR_ID_AXIOHM 0x05D9 /* Axiohm VID */
33 34
34// 35//
35// Definitions of USB product IDs (PID) 36// Definitions of USB product IDs (PID)
@@ -334,6 +335,10 @@ struct edge_compatibility_bits
334 335
335}; 336};
336 337
338#define EDGE_COMPATIBILITY_MASK0 0x0001
339#define EDGE_COMPATIBILITY_MASK1 0x3FFF
340#define EDGE_COMPATIBILITY_MASK2 0x0001
341
337struct edge_compatibility_descriptor 342struct edge_compatibility_descriptor
338{ 343{
339 __u8 Length; // Descriptor Length (per USB spec) 344 __u8 Length; // Descriptor Length (per USB spec)
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 42f757a5b876..a408184334ea 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -563,6 +563,7 @@ static struct usb_serial_driver ipaq_device = {
563 .name = "ipaq", 563 .name = "ipaq",
564 }, 564 },
565 .description = "PocketPC PDA", 565 .description = "PocketPC PDA",
566 .usb_driver = &ipaq_driver,
566 .id_table = ipaq_id_table, 567 .id_table = ipaq_id_table,
567 .num_interrupt_in = NUM_DONT_CARE, 568 .num_interrupt_in = NUM_DONT_CARE,
568 .num_bulk_in = 1, 569 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index d3b9a351cef8..1bc586064c77 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -442,6 +442,7 @@ static struct usb_serial_driver ipw_device = {
442 .name = "ipw", 442 .name = "ipw",
443 }, 443 },
444 .description = "IPWireless converter", 444 .description = "IPWireless converter",
445 .usb_driver = &usb_ipw_driver,
445 .id_table = usb_ipw_ids, 446 .id_table = usb_ipw_ids,
446 .num_interrupt_in = NUM_DONT_CARE, 447 .num_interrupt_in = NUM_DONT_CARE,
447 .num_bulk_in = 1, 448 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 8fdf486e3465..9d847f69291c 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -138,6 +138,7 @@ static struct usb_serial_driver ir_device = {
138 .name = "ir-usb", 138 .name = "ir-usb",
139 }, 139 },
140 .description = "IR Dongle", 140 .description = "IR Dongle",
141 .usb_driver = &ir_driver,
141 .id_table = id_table, 142 .id_table = id_table,
142 .num_interrupt_in = 1, 143 .num_interrupt_in = 1,
143 .num_bulk_in = 1, 144 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 9d2fdfd6865f..e6966f12ed5a 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1275,11 +1275,31 @@ static int keyspan_fake_startup (struct usb_serial *serial)
1275} 1275}
1276 1276
1277/* Helper functions used by keyspan_setup_urbs */ 1277/* Helper functions used by keyspan_setup_urbs */
1278static struct usb_endpoint_descriptor const *find_ep(struct usb_serial const *serial,
1279 int endpoint)
1280{
1281 struct usb_host_interface *iface_desc;
1282 struct usb_endpoint_descriptor *ep;
1283 int i;
1284
1285 iface_desc = serial->interface->cur_altsetting;
1286 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1287 ep = &iface_desc->endpoint[i].desc;
1288 if (ep->bEndpointAddress == endpoint)
1289 return ep;
1290 }
1291 dev_warn(&serial->interface->dev, "found no endpoint descriptor for "
1292 "endpoint %x\n", endpoint);
1293 return NULL;
1294}
1295
1278static struct urb *keyspan_setup_urb (struct usb_serial *serial, int endpoint, 1296static struct urb *keyspan_setup_urb (struct usb_serial *serial, int endpoint,
1279 int dir, void *ctx, char *buf, int len, 1297 int dir, void *ctx, char *buf, int len,
1280 void (*callback)(struct urb *)) 1298 void (*callback)(struct urb *))
1281{ 1299{
1282 struct urb *urb; 1300 struct urb *urb;
1301 struct usb_endpoint_descriptor const *ep_desc;
1302 char const *ep_type_name;
1283 1303
1284 if (endpoint == -1) 1304 if (endpoint == -1)
1285 return NULL; /* endpoint not needed */ 1305 return NULL; /* endpoint not needed */
@@ -1291,11 +1311,32 @@ static struct urb *keyspan_setup_urb (struct usb_serial *serial, int endpoint,
1291 return NULL; 1311 return NULL;
1292 } 1312 }
1293 1313
1294 /* Fill URB using supplied data. */ 1314 ep_desc = find_ep(serial, endpoint);
1295 usb_fill_bulk_urb(urb, serial->dev, 1315 if (!ep_desc) {
1296 usb_sndbulkpipe(serial->dev, endpoint) | dir, 1316 /* leak the urb, something's wrong and the callers don't care */
1297 buf, len, callback, ctx); 1317 return urb;
1318 }
1319 if (usb_endpoint_xfer_int(ep_desc)) {
1320 ep_type_name = "INT";
1321 usb_fill_int_urb(urb, serial->dev,
1322 usb_sndintpipe(serial->dev, endpoint) | dir,
1323 buf, len, callback, ctx,
1324 ep_desc->bInterval);
1325 } else if (usb_endpoint_xfer_bulk(ep_desc)) {
1326 ep_type_name = "BULK";
1327 usb_fill_bulk_urb(urb, serial->dev,
1328 usb_sndbulkpipe(serial->dev, endpoint) | dir,
1329 buf, len, callback, ctx);
1330 } else {
1331 dev_warn(&serial->interface->dev,
1332 "unsupported endpoint type %x\n",
1333 ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
1334 usb_free_urb(urb);
1335 return NULL;
1336 }
1298 1337
1338 dbg("%s - using urb %p for %s endpoint %x",
1339 __func__, urb, ep_type_name, endpoint);
1299 return urb; 1340 return urb;
1300} 1341}
1301 1342
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 6413d73c139c..c6830cbdc6df 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -229,7 +229,6 @@ struct ezusb_hex_record {
229#define keyspan_usa28_product_id 0x010f 229#define keyspan_usa28_product_id 0x010f
230#define keyspan_usa28x_product_id 0x0110 230#define keyspan_usa28x_product_id 0x0110
231#define keyspan_usa28xa_product_id 0x0115 231#define keyspan_usa28xa_product_id 0x0115
232#define keyspan_usa28xb_product_id 0x0110
233#define keyspan_usa49w_product_id 0x010a 232#define keyspan_usa49w_product_id 0x010a
234#define keyspan_usa49wlc_product_id 0x012a 233#define keyspan_usa49wlc_product_id 0x012a
235 234
@@ -511,7 +510,6 @@ static struct usb_device_id keyspan_ids_combined[] = {
511 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) }, 510 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
512 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) }, 511 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
513 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) }, 512 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
514 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_product_id) },
515 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id)}, 513 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id)},
516 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)}, 514 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
517 { } /* Terminating entry */ 515 { } /* Terminating entry */
@@ -559,7 +557,6 @@ static struct usb_device_id keyspan_2port_ids[] = {
559 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) }, 557 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
560 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) }, 558 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
561 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) }, 559 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
562 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_product_id) },
563 { } /* Terminating entry */ 560 { } /* Terminating entry */
564}; 561};
565 562
@@ -576,6 +573,7 @@ static struct usb_serial_driver keyspan_pre_device = {
576 .name = "keyspan_no_firm", 573 .name = "keyspan_no_firm",
577 }, 574 },
578 .description = "Keyspan - (without firmware)", 575 .description = "Keyspan - (without firmware)",
576 .usb_driver = &keyspan_driver,
579 .id_table = keyspan_pre_ids, 577 .id_table = keyspan_pre_ids,
580 .num_interrupt_in = NUM_DONT_CARE, 578 .num_interrupt_in = NUM_DONT_CARE,
581 .num_bulk_in = NUM_DONT_CARE, 579 .num_bulk_in = NUM_DONT_CARE,
@@ -590,6 +588,7 @@ static struct usb_serial_driver keyspan_1port_device = {
590 .name = "keyspan_1", 588 .name = "keyspan_1",
591 }, 589 },
592 .description = "Keyspan 1 port adapter", 590 .description = "Keyspan 1 port adapter",
591 .usb_driver = &keyspan_driver,
593 .id_table = keyspan_1port_ids, 592 .id_table = keyspan_1port_ids,
594 .num_interrupt_in = NUM_DONT_CARE, 593 .num_interrupt_in = NUM_DONT_CARE,
595 .num_bulk_in = NUM_DONT_CARE, 594 .num_bulk_in = NUM_DONT_CARE,
@@ -617,6 +616,7 @@ static struct usb_serial_driver keyspan_2port_device = {
617 .name = "keyspan_2", 616 .name = "keyspan_2",
618 }, 617 },
619 .description = "Keyspan 2 port adapter", 618 .description = "Keyspan 2 port adapter",
619 .usb_driver = &keyspan_driver,
620 .id_table = keyspan_2port_ids, 620 .id_table = keyspan_2port_ids,
621 .num_interrupt_in = NUM_DONT_CARE, 621 .num_interrupt_in = NUM_DONT_CARE,
622 .num_bulk_in = NUM_DONT_CARE, 622 .num_bulk_in = NUM_DONT_CARE,
@@ -644,6 +644,7 @@ static struct usb_serial_driver keyspan_4port_device = {
644 .name = "keyspan_4", 644 .name = "keyspan_4",
645 }, 645 },
646 .description = "Keyspan 4 port adapter", 646 .description = "Keyspan 4 port adapter",
647 .usb_driver = &keyspan_driver,
647 .id_table = keyspan_4port_ids, 648 .id_table = keyspan_4port_ids,
648 .num_interrupt_in = NUM_DONT_CARE, 649 .num_interrupt_in = NUM_DONT_CARE,
649 .num_bulk_in = 5, 650 .num_bulk_in = 5,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 126b9703bbaf..da514cb785b3 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -793,6 +793,7 @@ static struct usb_serial_driver keyspan_pda_fake_device = {
793 .name = "keyspan_pda_pre", 793 .name = "keyspan_pda_pre",
794 }, 794 },
795 .description = "Keyspan PDA - (prerenumeration)", 795 .description = "Keyspan PDA - (prerenumeration)",
796 .usb_driver = &keyspan_pda_driver,
796 .id_table = id_table_fake, 797 .id_table = id_table_fake,
797 .num_interrupt_in = NUM_DONT_CARE, 798 .num_interrupt_in = NUM_DONT_CARE,
798 .num_bulk_in = NUM_DONT_CARE, 799 .num_bulk_in = NUM_DONT_CARE,
@@ -809,6 +810,7 @@ static struct usb_serial_driver xircom_pgs_fake_device = {
809 .name = "xircom_no_firm", 810 .name = "xircom_no_firm",
810 }, 811 },
811 .description = "Xircom / Entregra PGS - (prerenumeration)", 812 .description = "Xircom / Entregra PGS - (prerenumeration)",
813 .usb_driver = &keyspan_pda_driver,
812 .id_table = id_table_fake_xircom, 814 .id_table = id_table_fake_xircom,
813 .num_interrupt_in = NUM_DONT_CARE, 815 .num_interrupt_in = NUM_DONT_CARE,
814 .num_bulk_in = NUM_DONT_CARE, 816 .num_bulk_in = NUM_DONT_CARE,
@@ -824,6 +826,7 @@ static struct usb_serial_driver keyspan_pda_device = {
824 .name = "keyspan_pda", 826 .name = "keyspan_pda",
825 }, 827 },
826 .description = "Keyspan PDA", 828 .description = "Keyspan PDA",
829 .usb_driver = &keyspan_pda_driver,
827 .id_table = id_table_std, 830 .id_table = id_table_std,
828 .num_interrupt_in = 1, 831 .num_interrupt_in = 1,
829 .num_bulk_in = 0, 832 .num_bulk_in = 0,
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 5c4b06a99ac0..b2097c45a235 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -124,6 +124,7 @@ static struct usb_serial_driver kl5kusb105d_device = {
124 .name = "kl5kusb105d", 124 .name = "kl5kusb105d",
125 }, 125 },
126 .description = "KL5KUSB105D / PalmConnect", 126 .description = "KL5KUSB105D / PalmConnect",
127 .usb_driver = &kl5kusb105d_driver,
127 .id_table = id_table, 128 .id_table = id_table,
128 .num_interrupt_in = 1, 129 .num_interrupt_in = 1,
129 .num_bulk_in = 1, 130 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 62bea0c923bd..0683b51f0932 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -110,6 +110,7 @@ static struct usb_serial_driver kobil_device = {
110 .name = "kobil", 110 .name = "kobil",
111 }, 111 },
112 .description = "KOBIL USB smart card terminal", 112 .description = "KOBIL USB smart card terminal",
113 .usb_driver = &kobil_driver,
113 .id_table = id_table, 114 .id_table = id_table,
114 .num_interrupt_in = NUM_DONT_CARE, 115 .num_interrupt_in = NUM_DONT_CARE,
115 .num_bulk_in = 0, 116 .num_bulk_in = 0,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 38b1d17e06ef..4cd839b1407f 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -137,6 +137,7 @@ static struct usb_serial_driver mct_u232_device = {
137 .name = "mct_u232", 137 .name = "mct_u232",
138 }, 138 },
139 .description = "MCT U232", 139 .description = "MCT U232",
140 .usb_driver = &mct_u232_driver,
140 .id_table = id_table_combined, 141 .id_table = id_table_combined,
141 .num_interrupt_in = 2, 142 .num_interrupt_in = 2,
142 .num_bulk_in = 0, 143 .num_bulk_in = 0,
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index e55f4ed81d7b..6109c6704a73 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1605,12 +1605,21 @@ static void mos7720_shutdown(struct usb_serial *serial)
1605 usb_set_serial_data(serial, NULL); 1605 usb_set_serial_data(serial, NULL);
1606} 1606}
1607 1607
1608static struct usb_driver usb_driver = {
1609 .name = "moschip7720",
1610 .probe = usb_serial_probe,
1611 .disconnect = usb_serial_disconnect,
1612 .id_table = moschip_port_id_table,
1613 .no_dynamic_id = 1,
1614};
1615
1608static struct usb_serial_driver moschip7720_2port_driver = { 1616static struct usb_serial_driver moschip7720_2port_driver = {
1609 .driver = { 1617 .driver = {
1610 .owner = THIS_MODULE, 1618 .owner = THIS_MODULE,
1611 .name = "moschip7720", 1619 .name = "moschip7720",
1612 }, 1620 },
1613 .description = "Moschip 2 port adapter", 1621 .description = "Moschip 2 port adapter",
1622 .usb_driver = &usb_driver,
1614 .id_table = moschip_port_id_table, 1623 .id_table = moschip_port_id_table,
1615 .num_interrupt_in = 1, 1624 .num_interrupt_in = 1,
1616 .num_bulk_in = 2, 1625 .num_bulk_in = 2,
@@ -1631,13 +1640,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
1631 .read_bulk_callback = mos7720_bulk_in_callback, 1640 .read_bulk_callback = mos7720_bulk_in_callback,
1632}; 1641};
1633 1642
1634static struct usb_driver usb_driver = {
1635 .name = "moschip7720",
1636 .probe = usb_serial_probe,
1637 .disconnect = usb_serial_disconnect,
1638 .id_table = moschip_port_id_table,
1639};
1640
1641static int __init moschip7720_init(void) 1643static int __init moschip7720_init(void)
1642{ 1644{
1643 int retval; 1645 int retval;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 83f661403ba1..b2264a87617b 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2834,12 +2834,21 @@ static void mos7840_shutdown(struct usb_serial *serial)
2834 2834
2835} 2835}
2836 2836
2837static struct usb_driver io_driver = {
2838 .name = "mos7840",
2839 .probe = usb_serial_probe,
2840 .disconnect = usb_serial_disconnect,
2841 .id_table = moschip_id_table_combined,
2842 .no_dynamic_id = 1,
2843};
2844
2837static struct usb_serial_driver moschip7840_4port_device = { 2845static struct usb_serial_driver moschip7840_4port_device = {
2838 .driver = { 2846 .driver = {
2839 .owner = THIS_MODULE, 2847 .owner = THIS_MODULE,
2840 .name = "mos7840", 2848 .name = "mos7840",
2841 }, 2849 },
2842 .description = DRIVER_DESC, 2850 .description = DRIVER_DESC,
2851 .usb_driver = &io_driver,
2843 .id_table = moschip_port_id_table, 2852 .id_table = moschip_port_id_table,
2844 .num_interrupt_in = 1, //NUM_DONT_CARE,//1, 2853 .num_interrupt_in = 1, //NUM_DONT_CARE,//1,
2845#ifdef check 2854#ifdef check
@@ -2869,13 +2878,6 @@ static struct usb_serial_driver moschip7840_4port_device = {
2869 .read_int_callback = mos7840_interrupt_callback, 2878 .read_int_callback = mos7840_interrupt_callback,
2870}; 2879};
2871 2880
2872static struct usb_driver io_driver = {
2873 .name = "mos7840",
2874 .probe = usb_serial_probe,
2875 .disconnect = usb_serial_disconnect,
2876 .id_table = moschip_id_table_combined,
2877};
2878
2879/**************************************************************************** 2881/****************************************************************************
2880 * moschip7840_init 2882 * moschip7840_init
2881 * This is called by the module subsystem, or on startup to initialize us 2883 * This is called by the module subsystem, or on startup to initialize us
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 054abee81652..90701111d746 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -119,6 +119,7 @@ static struct usb_serial_driver navman_device = {
119 .name = "navman", 119 .name = "navman",
120 }, 120 },
121 .id_table = id_table, 121 .id_table = id_table,
122 .usb_driver = &navman_driver,
122 .num_interrupt_in = NUM_DONT_CARE, 123 .num_interrupt_in = NUM_DONT_CARE,
123 .num_bulk_in = NUM_DONT_CARE, 124 .num_bulk_in = NUM_DONT_CARE,
124 .num_bulk_out = NUM_DONT_CARE, 125 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index bc91d3b726fc..0216ac12a27d 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -93,6 +93,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
93 .name = "omninet", 93 .name = "omninet",
94 }, 94 },
95 .description = "ZyXEL - omni.net lcd plus usb", 95 .description = "ZyXEL - omni.net lcd plus usb",
96 .usb_driver = &omninet_driver,
96 .id_table = id_table, 97 .id_table = id_table,
97 .num_interrupt_in = 1, 98 .num_interrupt_in = 1,
98 .num_bulk_in = 1, 99 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0fed43a96871..ced9f32b29d9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -135,6 +135,7 @@ static struct usb_serial_driver option_1port_device = {
135 .name = "option1", 135 .name = "option1",
136 }, 136 },
137 .description = "GSM modem (1-port)", 137 .description = "GSM modem (1-port)",
138 .usb_driver = &option_driver,
138 .id_table = option_ids1, 139 .id_table = option_ids1,
139 .num_interrupt_in = NUM_DONT_CARE, 140 .num_interrupt_in = NUM_DONT_CARE,
140 .num_bulk_in = NUM_DONT_CARE, 141 .num_bulk_in = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 5dc2ac9afa90..6c083d4e2c9b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -1118,6 +1118,7 @@ static struct usb_serial_driver pl2303_device = {
1118 .name = "pl2303", 1118 .name = "pl2303",
1119 }, 1119 },
1120 .id_table = id_table, 1120 .id_table = id_table,
1121 .usb_driver = &pl2303_driver,
1121 .num_interrupt_in = NUM_DONT_CARE, 1122 .num_interrupt_in = NUM_DONT_CARE,
1122 .num_bulk_in = 1, 1123 .num_bulk_in = 1,
1123 .num_bulk_out = 1, 1124 .num_bulk_out = 1,
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 30b7ebc8d45d..5a03a3fc9386 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -402,6 +402,7 @@ static struct usb_serial_driver safe_device = {
402 .name = "safe_serial", 402 .name = "safe_serial",
403 }, 403 },
404 .id_table = id_table, 404 .id_table = id_table,
405 .usb_driver = &safe_driver,
405 .num_interrupt_in = NUM_DONT_CARE, 406 .num_interrupt_in = NUM_DONT_CARE,
406 .num_bulk_in = NUM_DONT_CARE, 407 .num_bulk_in = NUM_DONT_CARE,
407 .num_bulk_out = NUM_DONT_CARE, 408 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 6d8e91e00ecf..ecedd833818d 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -13,10 +13,9 @@
13 Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> 13 Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
15 15
16 History:
17*/ 16*/
18 17
19#define DRIVER_VERSION "v.1.0.5" 18#define DRIVER_VERSION "v.1.0.6"
20#define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>" 19#define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>"
21#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 20#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
22 21
@@ -31,14 +30,15 @@
31 30
32 31
33static struct usb_device_id id_table [] = { 32static struct usb_device_id id_table [] = {
33 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
34 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 34 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
35 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
35 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 36 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
36 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
37 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 37 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
38 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 38 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
39 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 39 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
40 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
40 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 41 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
41 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 for Europe */
42 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ 42 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */
43 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 43 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
44 44
@@ -55,14 +55,15 @@ static struct usb_device_id id_table_1port [] = {
55}; 55};
56 56
57static struct usb_device_id id_table_3port [] = { 57static struct usb_device_id id_table_3port [] = {
58 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
58 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 59 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
60 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
59 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 61 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
60 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
61 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 62 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
62 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 63 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
63 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 64 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
65 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
64 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 66 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
65 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 for Europe */
66 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ 67 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */
67 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 68 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
68 { } 69 { }
@@ -81,7 +82,7 @@ static int debug;
81 82
82/* per port private data */ 83/* per port private data */
83#define N_IN_URB 4 84#define N_IN_URB 4
84#define N_OUT_URB 1 85#define N_OUT_URB 4
85#define IN_BUFLEN 4096 86#define IN_BUFLEN 4096
86#define OUT_BUFLEN 128 87#define OUT_BUFLEN 128
87 88
@@ -396,6 +397,8 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp)
396 struct usb_serial *serial = port->serial; 397 struct usb_serial *serial = port->serial;
397 int i, err; 398 int i, err;
398 struct urb *urb; 399 struct urb *urb;
400 int result;
401 __u16 set_mode_dzero = 0x0000;
399 402
400 portdata = usb_get_serial_port_data(port); 403 portdata = usb_get_serial_port_data(port);
401 404
@@ -442,6 +445,12 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp)
442 445
443 port->tty->low_latency = 1; 446 port->tty->low_latency = 1;
444 447
448 /* set mode to D0 */
449 result = usb_control_msg(serial->dev,
450 usb_rcvctrlpipe(serial->dev, 0),
451 0x00, 0x40, set_mode_dzero, 0, NULL,
452 0, USB_CTRL_SET_TIMEOUT);
453
445 sierra_send_setup(port); 454 sierra_send_setup(port);
446 455
447 return (0); 456 return (0);
@@ -614,6 +623,7 @@ static struct usb_serial_driver sierra_1port_device = {
614 }, 623 },
615 .description = "Sierra USB modem (1 port)", 624 .description = "Sierra USB modem (1 port)",
616 .id_table = id_table_1port, 625 .id_table = id_table_1port,
626 .usb_driver = &sierra_driver,
617 .num_interrupt_in = NUM_DONT_CARE, 627 .num_interrupt_in = NUM_DONT_CARE,
618 .num_bulk_in = 1, 628 .num_bulk_in = 1,
619 .num_bulk_out = 1, 629 .num_bulk_out = 1,
@@ -642,6 +652,7 @@ static struct usb_serial_driver sierra_3port_device = {
642 }, 652 },
643 .description = "Sierra USB modem (3 port)", 653 .description = "Sierra USB modem (3 port)",
644 .id_table = id_table_3port, 654 .id_table = id_table_3port,
655 .usb_driver = &sierra_driver,
645 .num_interrupt_in = NUM_DONT_CARE, 656 .num_interrupt_in = NUM_DONT_CARE,
646 .num_bulk_in = 3, 657 .num_bulk_in = 3,
647 .num_bulk_out = 3, 658 .num_bulk_out = 3,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 83189005c6fb..4203e2b1a761 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -262,6 +262,7 @@ static struct usb_serial_driver ti_1port_device = {
262 .name = "ti_usb_3410_5052_1", 262 .name = "ti_usb_3410_5052_1",
263 }, 263 },
264 .description = "TI USB 3410 1 port adapter", 264 .description = "TI USB 3410 1 port adapter",
265 .usb_driver = &ti_usb_driver,
265 .id_table = ti_id_table_3410, 266 .id_table = ti_id_table_3410,
266 .num_interrupt_in = 1, 267 .num_interrupt_in = 1,
267 .num_bulk_in = 1, 268 .num_bulk_in = 1,
@@ -292,6 +293,7 @@ static struct usb_serial_driver ti_2port_device = {
292 .name = "ti_usb_3410_5052_2", 293 .name = "ti_usb_3410_5052_2",
293 }, 294 },
294 .description = "TI USB 5052 2 port adapter", 295 .description = "TI USB 5052 2 port adapter",
296 .usb_driver = &ti_usb_driver,
295 .id_table = ti_id_table_5052, 297 .id_table = ti_id_table_5052,
296 .num_interrupt_in = 1, 298 .num_interrupt_in = 1,
297 .num_bulk_in = 2, 299 .num_bulk_in = 2,
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 716f6806cc89..6bf22a28adb8 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -59,14 +59,19 @@ static struct usb_driver usb_serial_driver = {
59 59
60static int debug; 60static int debug;
61static struct usb_serial *serial_table[SERIAL_TTY_MINORS]; /* initially all NULL */ 61static struct usb_serial *serial_table[SERIAL_TTY_MINORS]; /* initially all NULL */
62static spinlock_t table_lock;
62static LIST_HEAD(usb_serial_driver_list); 63static LIST_HEAD(usb_serial_driver_list);
63 64
64struct usb_serial *usb_serial_get_by_index(unsigned index) 65struct usb_serial *usb_serial_get_by_index(unsigned index)
65{ 66{
66 struct usb_serial *serial = serial_table[index]; 67 struct usb_serial *serial;
68
69 spin_lock(&table_lock);
70 serial = serial_table[index];
67 71
68 if (serial) 72 if (serial)
69 kref_get(&serial->kref); 73 kref_get(&serial->kref);
74 spin_unlock(&table_lock);
70 return serial; 75 return serial;
71} 76}
72 77
@@ -78,6 +83,7 @@ static struct usb_serial *get_free_serial (struct usb_serial *serial, int num_po
78 dbg("%s %d", __FUNCTION__, num_ports); 83 dbg("%s %d", __FUNCTION__, num_ports);
79 84
80 *minor = 0; 85 *minor = 0;
86 spin_lock(&table_lock);
81 for (i = 0; i < SERIAL_TTY_MINORS; ++i) { 87 for (i = 0; i < SERIAL_TTY_MINORS; ++i) {
82 if (serial_table[i]) 88 if (serial_table[i])
83 continue; 89 continue;
@@ -96,8 +102,10 @@ static struct usb_serial *get_free_serial (struct usb_serial *serial, int num_po
96 dbg("%s - minor base = %d", __FUNCTION__, *minor); 102 dbg("%s - minor base = %d", __FUNCTION__, *minor);
97 for (i = *minor; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i) 103 for (i = *minor; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i)
98 serial_table[i] = serial; 104 serial_table[i] = serial;
105 spin_unlock(&table_lock);
99 return serial; 106 return serial;
100 } 107 }
108 spin_unlock(&table_lock);
101 return NULL; 109 return NULL;
102} 110}
103 111
@@ -110,9 +118,11 @@ static void return_serial(struct usb_serial *serial)
110 if (serial == NULL) 118 if (serial == NULL)
111 return; 119 return;
112 120
121 spin_lock(&table_lock);
113 for (i = 0; i < serial->num_ports; ++i) { 122 for (i = 0; i < serial->num_ports; ++i) {
114 serial_table[serial->minor + i] = NULL; 123 serial_table[serial->minor + i] = NULL;
115 } 124 }
125 spin_unlock(&table_lock);
116} 126}
117 127
118static void destroy_serial(struct kref *kref) 128static void destroy_serial(struct kref *kref)
@@ -271,7 +281,7 @@ static void serial_close(struct tty_struct *tty, struct file * filp)
271static int serial_write (struct tty_struct * tty, const unsigned char *buf, int count) 281static int serial_write (struct tty_struct * tty, const unsigned char *buf, int count)
272{ 282{
273 struct usb_serial_port *port = tty->driver_data; 283 struct usb_serial_port *port = tty->driver_data;
274 int retval = -EINVAL; 284 int retval = -ENODEV;
275 285
276 if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED) 286 if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED)
277 goto exit; 287 goto exit;
@@ -279,6 +289,7 @@ static int serial_write (struct tty_struct * tty, const unsigned char *buf, int
279 dbg("%s - port %d, %d byte(s)", __FUNCTION__, port->number, count); 289 dbg("%s - port %d, %d byte(s)", __FUNCTION__, port->number, count);
280 290
281 if (!port->open_count) { 291 if (!port->open_count) {
292 retval = -EINVAL;
282 dbg("%s - port not opened", __FUNCTION__); 293 dbg("%s - port not opened", __FUNCTION__);
283 goto exit; 294 goto exit;
284 } 295 }
@@ -559,15 +570,20 @@ static void port_release(struct device *dev)
559 port_free(port); 570 port_free(port);
560} 571}
561 572
562static void port_free(struct usb_serial_port *port) 573static void kill_traffic(struct usb_serial_port *port)
563{ 574{
564 usb_kill_urb(port->read_urb); 575 usb_kill_urb(port->read_urb);
565 usb_free_urb(port->read_urb);
566 usb_kill_urb(port->write_urb); 576 usb_kill_urb(port->write_urb);
567 usb_free_urb(port->write_urb);
568 usb_kill_urb(port->interrupt_in_urb); 577 usb_kill_urb(port->interrupt_in_urb);
569 usb_free_urb(port->interrupt_in_urb);
570 usb_kill_urb(port->interrupt_out_urb); 578 usb_kill_urb(port->interrupt_out_urb);
579}
580
581static void port_free(struct usb_serial_port *port)
582{
583 kill_traffic(port);
584 usb_free_urb(port->read_urb);
585 usb_free_urb(port->write_urb);
586 usb_free_urb(port->interrupt_in_urb);
571 usb_free_urb(port->interrupt_out_urb); 587 usb_free_urb(port->interrupt_out_urb);
572 kfree(port->bulk_in_buffer); 588 kfree(port->bulk_in_buffer);
573 kfree(port->bulk_out_buffer); 589 kfree(port->bulk_out_buffer);
@@ -596,6 +612,39 @@ static struct usb_serial * create_serial (struct usb_device *dev,
596 return serial; 612 return serial;
597} 613}
598 614
615static const struct usb_device_id *match_dynamic_id(struct usb_interface *intf,
616 struct usb_serial_driver *drv)
617{
618 struct usb_dynid *dynid;
619
620 spin_lock(&drv->dynids.lock);
621 list_for_each_entry(dynid, &drv->dynids.list, node) {
622 if (usb_match_one_id(intf, &dynid->id)) {
623 spin_unlock(&drv->dynids.lock);
624 return &dynid->id;
625 }
626 }
627 spin_unlock(&drv->dynids.lock);
628 return NULL;
629}
630
631static const struct usb_device_id *get_iface_id(struct usb_serial_driver *drv,
632 struct usb_interface *intf)
633{
634 const struct usb_device_id *id;
635
636 id = usb_match_id(intf, drv->id_table);
637 if (id) {
638 dbg("static descriptor matches");
639 goto exit;
640 }
641 id = match_dynamic_id(intf, drv);
642 if (id)
643 dbg("dynamic descriptor matches");
644exit:
645 return id;
646}
647
599static struct usb_serial_driver *search_serial_device(struct usb_interface *iface) 648static struct usb_serial_driver *search_serial_device(struct usb_interface *iface)
600{ 649{
601 struct list_head *p; 650 struct list_head *p;
@@ -605,11 +654,9 @@ static struct usb_serial_driver *search_serial_device(struct usb_interface *ifac
605 /* Check if the usb id matches a known device */ 654 /* Check if the usb id matches a known device */
606 list_for_each(p, &usb_serial_driver_list) { 655 list_for_each(p, &usb_serial_driver_list) {
607 t = list_entry(p, struct usb_serial_driver, driver_list); 656 t = list_entry(p, struct usb_serial_driver, driver_list);
608 id = usb_match_id(iface, t->id_table); 657 id = get_iface_id(t, iface);
609 if (id != NULL) { 658 if (id)
610 dbg("descriptor matches");
611 return t; 659 return t;
612 }
613 } 660 }
614 661
615 return NULL; 662 return NULL;
@@ -639,14 +686,17 @@ int usb_serial_probe(struct usb_interface *interface,
639 int num_ports = 0; 686 int num_ports = 0;
640 int max_endpoints; 687 int max_endpoints;
641 688
689 lock_kernel(); /* guard against unloading a serial driver module */
642 type = search_serial_device(interface); 690 type = search_serial_device(interface);
643 if (!type) { 691 if (!type) {
692 unlock_kernel();
644 dbg("none matched"); 693 dbg("none matched");
645 return -ENODEV; 694 return -ENODEV;
646 } 695 }
647 696
648 serial = create_serial (dev, interface, type); 697 serial = create_serial (dev, interface, type);
649 if (!serial) { 698 if (!serial) {
699 unlock_kernel();
650 dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__); 700 dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__);
651 return -ENOMEM; 701 return -ENOMEM;
652 } 702 }
@@ -656,16 +706,18 @@ int usb_serial_probe(struct usb_interface *interface,
656 const struct usb_device_id *id; 706 const struct usb_device_id *id;
657 707
658 if (!try_module_get(type->driver.owner)) { 708 if (!try_module_get(type->driver.owner)) {
709 unlock_kernel();
659 dev_err(&interface->dev, "module get failed, exiting\n"); 710 dev_err(&interface->dev, "module get failed, exiting\n");
660 kfree (serial); 711 kfree (serial);
661 return -EIO; 712 return -EIO;
662 } 713 }
663 714
664 id = usb_match_id(interface, type->id_table); 715 id = get_iface_id(type, interface);
665 retval = type->probe(serial, id); 716 retval = type->probe(serial, id);
666 module_put(type->driver.owner); 717 module_put(type->driver.owner);
667 718
668 if (retval) { 719 if (retval) {
720 unlock_kernel();
669 dbg ("sub driver rejected device"); 721 dbg ("sub driver rejected device");
670 kfree (serial); 722 kfree (serial);
671 return retval; 723 return retval;
@@ -735,6 +787,7 @@ int usb_serial_probe(struct usb_interface *interface,
735 * properly during a later invocation of usb_serial_probe 787 * properly during a later invocation of usb_serial_probe
736 */ 788 */
737 if (num_bulk_in == 0 || num_bulk_out == 0) { 789 if (num_bulk_in == 0 || num_bulk_out == 0) {
790 unlock_kernel();
738 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); 791 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
739 kfree (serial); 792 kfree (serial);
740 return -ENODEV; 793 return -ENODEV;
@@ -750,6 +803,7 @@ int usb_serial_probe(struct usb_interface *interface,
750 if (type == &usb_serial_generic_device) { 803 if (type == &usb_serial_generic_device) {
751 num_ports = num_bulk_out; 804 num_ports = num_bulk_out;
752 if (num_ports == 0) { 805 if (num_ports == 0) {
806 unlock_kernel();
753 dev_err(&interface->dev, "Generic device with no bulk out, not allowed.\n"); 807 dev_err(&interface->dev, "Generic device with no bulk out, not allowed.\n");
754 kfree (serial); 808 kfree (serial);
755 return -EIO; 809 return -EIO;
@@ -760,6 +814,7 @@ int usb_serial_probe(struct usb_interface *interface,
760 /* if this device type has a calc_num_ports function, call it */ 814 /* if this device type has a calc_num_ports function, call it */
761 if (type->calc_num_ports) { 815 if (type->calc_num_ports) {
762 if (!try_module_get(type->driver.owner)) { 816 if (!try_module_get(type->driver.owner)) {
817 unlock_kernel();
763 dev_err(&interface->dev, "module get failed, exiting\n"); 818 dev_err(&interface->dev, "module get failed, exiting\n");
764 kfree (serial); 819 kfree (serial);
765 return -EIO; 820 return -EIO;
@@ -771,12 +826,6 @@ int usb_serial_probe(struct usb_interface *interface,
771 num_ports = type->num_ports; 826 num_ports = type->num_ports;
772 } 827 }
773 828
774 if (get_free_serial (serial, num_ports, &minor) == NULL) {
775 dev_err(&interface->dev, "No more free serial devices\n");
776 kfree (serial);
777 return -ENOMEM;
778 }
779
780 serial->minor = minor; 829 serial->minor = minor;
781 serial->num_ports = num_ports; 830 serial->num_ports = num_ports;
782 serial->num_bulk_in = num_bulk_in; 831 serial->num_bulk_in = num_bulk_in;
@@ -791,6 +840,8 @@ int usb_serial_probe(struct usb_interface *interface,
791 max_endpoints = max(max_endpoints, num_interrupt_out); 840 max_endpoints = max(max_endpoints, num_interrupt_out);
792 max_endpoints = max(max_endpoints, (int)serial->num_ports); 841 max_endpoints = max(max_endpoints, (int)serial->num_ports);
793 serial->num_port_pointers = max_endpoints; 842 serial->num_port_pointers = max_endpoints;
843 unlock_kernel();
844
794 dbg("%s - setting up %d port structures for this device", __FUNCTION__, max_endpoints); 845 dbg("%s - setting up %d port structures for this device", __FUNCTION__, max_endpoints);
795 for (i = 0; i < max_endpoints; ++i) { 846 for (i = 0; i < max_endpoints; ++i) {
796 port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL); 847 port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL);
@@ -925,6 +976,11 @@ int usb_serial_probe(struct usb_interface *interface,
925 } 976 }
926 } 977 }
927 978
979 if (get_free_serial (serial, num_ports, &minor) == NULL) {
980 dev_err(&interface->dev, "No more free serial devices\n");
981 goto probe_error;
982 }
983
928 /* register all of the individual ports with the driver core */ 984 /* register all of the individual ports with the driver core */
929 for (i = 0; i < num_ports; ++i) { 985 for (i = 0; i < num_ports; ++i) {
930 port = serial->port[i]; 986 port = serial->port[i];
@@ -1002,8 +1058,11 @@ void usb_serial_disconnect(struct usb_interface *interface)
1002 if (serial) { 1058 if (serial) {
1003 for (i = 0; i < serial->num_ports; ++i) { 1059 for (i = 0; i < serial->num_ports; ++i) {
1004 port = serial->port[i]; 1060 port = serial->port[i];
1005 if (port && port->tty) 1061 if (port) {
1006 tty_hangup(port->tty); 1062 if (port->tty)
1063 tty_hangup(port->tty);
1064 kill_traffic(port);
1065 }
1007 } 1066 }
1008 /* let the last holder of this object 1067 /* let the last holder of this object
1009 * cause it to be cleaned up */ 1068 * cause it to be cleaned up */
@@ -1040,6 +1099,7 @@ static int __init usb_serial_init(void)
1040 return -ENOMEM; 1099 return -ENOMEM;
1041 1100
1042 /* Initialize our global data */ 1101 /* Initialize our global data */
1102 spin_lock_init(&table_lock);
1043 for (i = 0; i < SERIAL_TTY_MINORS; ++i) { 1103 for (i = 0; i < SERIAL_TTY_MINORS; ++i) {
1044 serial_table[i] = NULL; 1104 serial_table[i] = NULL;
1045 } 1105 }
@@ -1138,7 +1198,7 @@ static void fixup_generic(struct usb_serial_driver *device)
1138 set_to_generic_if_null(device, shutdown); 1198 set_to_generic_if_null(device, shutdown);
1139} 1199}
1140 1200
1141int usb_serial_register(struct usb_serial_driver *driver) 1201int usb_serial_register(struct usb_serial_driver *driver) /* must be called with BKL held */
1142{ 1202{
1143 int retval; 1203 int retval;
1144 1204
@@ -1162,7 +1222,7 @@ int usb_serial_register(struct usb_serial_driver *driver)
1162} 1222}
1163 1223
1164 1224
1165void usb_serial_deregister(struct usb_serial_driver *device) 1225void usb_serial_deregister(struct usb_serial_driver *device) /* must be called with BKL held */
1166{ 1226{
1167 info("USB Serial deregistering driver %s", device->description); 1227 info("USB Serial deregistering driver %s", device->description);
1168 list_del(&device->driver_list); 1228 list_del(&device->driver_list);
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index b09f06096056..2f59ff226e2c 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -90,8 +90,6 @@ static struct usb_device_id id_table [] = {
90 .driver_info = (kernel_ulong_t)&palm_os_4_probe }, 90 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
91 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID), 91 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID),
92 .driver_info = (kernel_ulong_t)&palm_os_4_probe }, 92 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
93 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE31_ID),
94 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
95 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID), 93 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID),
96 .driver_info = (kernel_ulong_t)&palm_os_4_probe }, 94 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
97 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID), 95 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID),
@@ -151,7 +149,6 @@ static struct usb_device_id id_table_combined [] = {
151 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) }, 149 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) },
152 { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) }, 150 { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) },
153 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) }, 151 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) },
154 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE31_ID) },
155 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) }, 152 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) },
156 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) }, 153 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
157 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) }, 154 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) },
@@ -189,6 +186,7 @@ static struct usb_serial_driver handspring_device = {
189 .name = "visor", 186 .name = "visor",
190 }, 187 },
191 .description = "Handspring Visor / Palm OS", 188 .description = "Handspring Visor / Palm OS",
189 .usb_driver = &visor_driver,
192 .id_table = id_table, 190 .id_table = id_table,
193 .num_interrupt_in = NUM_DONT_CARE, 191 .num_interrupt_in = NUM_DONT_CARE,
194 .num_bulk_in = 2, 192 .num_bulk_in = 2,
@@ -219,6 +217,7 @@ static struct usb_serial_driver clie_5_device = {
219 .name = "clie_5", 217 .name = "clie_5",
220 }, 218 },
221 .description = "Sony Clie 5.0", 219 .description = "Sony Clie 5.0",
220 .usb_driver = &visor_driver,
222 .id_table = clie_id_5_table, 221 .id_table = clie_id_5_table,
223 .num_interrupt_in = NUM_DONT_CARE, 222 .num_interrupt_in = NUM_DONT_CARE,
224 .num_bulk_in = 2, 223 .num_bulk_in = 2,
@@ -249,6 +248,7 @@ static struct usb_serial_driver clie_3_5_device = {
249 .name = "clie_3.5", 248 .name = "clie_3.5",
250 }, 249 },
251 .description = "Sony Clie 3.5", 250 .description = "Sony Clie 3.5",
251 .usb_driver = &visor_driver,
252 .id_table = clie_id_3_5_table, 252 .id_table = clie_id_3_5_table,
253 .num_interrupt_in = 0, 253 .num_interrupt_in = 0,
254 .num_bulk_in = 1, 254 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 765118d83fb6..4ce6f62a6f39 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -32,7 +32,6 @@
32#define PALM_TUNGSTEN_T_ID 0x0060 32#define PALM_TUNGSTEN_T_ID 0x0060
33#define PALM_TREO_650 0x0061 33#define PALM_TREO_650 0x0061
34#define PALM_TUNGSTEN_Z_ID 0x0031 34#define PALM_TUNGSTEN_Z_ID 0x0031
35#define PALM_ZIRE31_ID 0x0061
36#define PALM_ZIRE_ID 0x0070 35#define PALM_ZIRE_ID 0x0070
37#define PALM_M100_ID 0x0080 36#define PALM_M100_ID 0x0080
38 37
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5483d8564c1b..bf16e9e1d84e 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -161,6 +161,7 @@ static struct usb_serial_driver whiteheat_fake_device = {
161 .name = "whiteheatnofirm", 161 .name = "whiteheatnofirm",
162 }, 162 },
163 .description = "Connect Tech - WhiteHEAT - (prerenumeration)", 163 .description = "Connect Tech - WhiteHEAT - (prerenumeration)",
164 .usb_driver = &whiteheat_driver,
164 .id_table = id_table_prerenumeration, 165 .id_table = id_table_prerenumeration,
165 .num_interrupt_in = NUM_DONT_CARE, 166 .num_interrupt_in = NUM_DONT_CARE,
166 .num_bulk_in = NUM_DONT_CARE, 167 .num_bulk_in = NUM_DONT_CARE,
@@ -176,6 +177,7 @@ static struct usb_serial_driver whiteheat_device = {
176 .name = "whiteheat", 177 .name = "whiteheat",
177 }, 178 },
178 .description = "Connect Tech - WhiteHEAT", 179 .description = "Connect Tech - WhiteHEAT",
180 .usb_driver = &whiteheat_driver,
179 .id_table = id_table_std, 181 .id_table = id_table_std,
180 .num_interrupt_in = NUM_DONT_CARE, 182 .num_interrupt_in = NUM_DONT_CARE,
181 .num_bulk_in = NUM_DONT_CARE, 183 .num_bulk_in = NUM_DONT_CARE,
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index e565d3d2ab29..6d3dad3d1dae 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -33,7 +33,6 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/usb_ch9.h>
37#include <linux/usb/input.h> 36#include <linux/usb/input.h>
38#include "usb.h" 37#include "usb.h"
39#include "onetouch.h" 38#include "onetouch.h"
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e1072d52d641..70234f5dbeeb 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -110,23 +110,6 @@ static int slave_configure(struct scsi_device *sdev)
110 * the end, scatter-gather buffers follow page boundaries. */ 110 * the end, scatter-gather buffers follow page boundaries. */
111 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 111 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
112 112
113 /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
114 * what is originally reported. We need this to avoid confusing
115 * the SCSI layer with devices that report 0 or 1, but need 10-byte
116 * commands (ala ATAPI devices behind certain bridges, or devices
117 * which simply have broken INQUIRY data).
118 *
119 * NOTE: This means /dev/sg programs (ala cdrecord) will get the
120 * actual information. This seems to be the preference for
121 * programs like that.
122 *
123 * NOTE: This also means that /proc/scsi/scsi and sysfs may report
124 * the actual value or the modified one, depending on where the
125 * data comes from.
126 */
127 if (sdev->scsi_level < SCSI_2)
128 sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
129
130 /* Many devices have trouble transfering more than 32KB at a time, 113 /* Many devices have trouble transfering more than 32KB at a time,
131 * while others have trouble with more than 64K. At this time we 114 * while others have trouble with more than 64K. At this time we
132 * are limiting both to 32K (64 sectores). 115 * are limiting both to 32K (64 sectores).
@@ -176,7 +159,9 @@ static int slave_configure(struct scsi_device *sdev)
176 * a Get-Max-LUN request, we won't lose much by setting the 159 * a Get-Max-LUN request, we won't lose much by setting the
177 * revision level down to 2. The only devices that would be 160 * revision level down to 2. The only devices that would be
178 * affected are those with sparse LUNs. */ 161 * affected are those with sparse LUNs. */
179 sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2; 162 if (sdev->scsi_level > SCSI_2)
163 sdev->sdev_target->scsi_level =
164 sdev->scsi_level = SCSI_2;
180 165
181 /* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable 166 /* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
182 * Hardware Error) when any low-level error occurs, 167 * Hardware Error) when any low-level error occurs,
@@ -194,6 +179,16 @@ static int slave_configure(struct scsi_device *sdev)
194 sdev->use_10_for_ms = 1; 179 sdev->use_10_for_ms = 1;
195 } 180 }
196 181
182 /* The CB and CBI transports have no way to pass LUN values
183 * other than the bits in the second byte of a CDB. But those
184 * bits don't get set to the LUN value if the device reports
185 * scsi_level == 0 (UNKNOWN). Hence such devices must necessarily
186 * be single-LUN.
187 */
188 if ((us->protocol == US_PR_CB || us->protocol == US_PR_CBI) &&
189 sdev->scsi_level == SCSI_UNKNOWN)
190 us->max_lun = 0;
191
197 /* Some devices choke when they receive a PREVENT-ALLOW MEDIUM 192 /* Some devices choke when they receive a PREVENT-ALLOW MEDIUM
198 * REMOVAL command, so suppress those commands. */ 193 * REMOVAL command, so suppress those commands. */
199 if (us->flags & US_FL_NOT_LOCKABLE) 194 if (us->flags & US_FL_NOT_LOCKABLE)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index b49f2a78189e..f49a62fc32d2 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -573,7 +573,7 @@ UNUSUAL_DEV( 0x054c, 0x002b, 0x0100, 0x0110,
573#endif 573#endif
574 574
575/* Submitted by Olaf Hering, <olh@suse.de> SuSE Bugzilla #49049 */ 575/* Submitted by Olaf Hering, <olh@suse.de> SuSE Bugzilla #49049 */
576UNUSUAL_DEV( 0x054c, 0x002c, 0x0501, 0x0501, 576UNUSUAL_DEV( 0x054c, 0x002c, 0x0501, 0x2000,
577 "Sony", 577 "Sony",
578 "USB Floppy Drive", 578 "USB Floppy Drive",
579 US_SC_DEVICE, US_PR_DEVICE, NULL, 579 US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1325,13 +1325,6 @@ UNUSUAL_DEV( 0x0fce, 0xe031, 0x0000, 0x0000,
1325 US_SC_DEVICE, US_PR_DEVICE, NULL, 1325 US_SC_DEVICE, US_PR_DEVICE, NULL,
1326 US_FL_FIX_CAPACITY ), 1326 US_FL_FIX_CAPACITY ),
1327 1327
1328/* Reported by Jan Mate <mate@fiit.stuba.sk> */
1329UNUSUAL_DEV( 0x0fce, 0xe030, 0x0000, 0x0000,
1330 "Sony Ericsson",
1331 "P990i",
1332 US_SC_DEVICE, US_PR_DEVICE, NULL,
1333 US_FL_FIX_CAPACITY ),
1334
1335/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu> 1328/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
1336 * Tested on hardware version 1.10. 1329 * Tested on hardware version 1.10.
1337 * Entry is needed only for the initializer function override. 1330 * Entry is needed only for the initializer function override.
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 70644506651f..7e7ec29782f1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -731,26 +731,27 @@ static int get_pipes(struct us_data *us)
731 struct usb_endpoint_descriptor *ep_int = NULL; 731 struct usb_endpoint_descriptor *ep_int = NULL;
732 732
733 /* 733 /*
734 * Find the endpoints we need. 734 * Find the first endpoint of each type we need.
735 * We are expecting a minimum of 2 endpoints - in and out (bulk). 735 * We are expecting a minimum of 2 endpoints - in and out (bulk).
736 * An optional interrupt is OK (necessary for CBI protocol). 736 * An optional interrupt-in is OK (necessary for CBI protocol).
737 * We will ignore any others. 737 * We will ignore any others.
738 */ 738 */
739 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { 739 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
740 ep = &altsetting->endpoint[i].desc; 740 ep = &altsetting->endpoint[i].desc;
741 741
742 /* Is it a BULK endpoint? */
743 if (usb_endpoint_xfer_bulk(ep)) { 742 if (usb_endpoint_xfer_bulk(ep)) {
744 /* BULK in or out? */ 743 if (usb_endpoint_dir_in(ep)) {
745 if (usb_endpoint_dir_in(ep)) 744 if (!ep_in)
746 ep_in = ep; 745 ep_in = ep;
747 else 746 } else {
748 ep_out = ep; 747 if (!ep_out)
748 ep_out = ep;
749 }
749 } 750 }
750 751
751 /* Is it an interrupt endpoint? */ 752 else if (usb_endpoint_is_int_in(ep)) {
752 else if (usb_endpoint_xfer_int(ep)) { 753 if (!ep_int)
753 ep_int = ep; 754 ep_int = ep;
754 } 755 }
755 } 756 }
756 757
diff --git a/drivers/video/output.c b/drivers/video/output.c
new file mode 100644
index 000000000000..1473f2c892d2
--- /dev/null
+++ b/drivers/video/output.c
@@ -0,0 +1,129 @@
1/*
2 * output.c - Display Output Switch driver
3 *
4 * Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/module.h>
25#include <linux/video_output.h>
26#include <linux/err.h>
27#include <linux/ctype.h>
28
29
30MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
31MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
33
34static ssize_t video_output_show_state(struct class_device *dev,char *buf)
35{
36 ssize_t ret_size = 0;
37 struct output_device *od = to_output_device(dev);
38 if (od->props)
39 ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od));
40 return ret_size;
41}
42
43static ssize_t video_output_store_state(struct class_device *dev,
44 const char *buf,size_t count)
45{
46 char *endp;
47 struct output_device *od = to_output_device(dev);
48 int request_state = simple_strtoul(buf,&endp,0);
49 size_t size = endp - buf;
50
51 if (*endp && isspace(*endp))
52 size++;
53 if (size != count)
54 return -EINVAL;
55
56 if (od->props) {
57 od->request_state = request_state;
58 od->props->set_state(od);
59 }
60 return count;
61}
62
63static void video_output_class_release(struct class_device *dev)
64{
65 struct output_device *od = to_output_device(dev);
66 kfree(od);
67}
68
69static struct class_device_attribute video_output_attributes[] = {
70 __ATTR(state, 0644, video_output_show_state, video_output_store_state),
71 __ATTR_NULL,
72};
73
74static struct class video_output_class = {
75 .name = "video_output",
76 .release = video_output_class_release,
77 .class_dev_attrs = video_output_attributes,
78};
79
80struct output_device *video_output_register(const char *name,
81 struct device *dev,
82 void *devdata,
83 struct output_properties *op)
84{
85 struct output_device *new_dev;
86 int ret_code = 0;
87
88 new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL);
89 if (!new_dev) {
90 ret_code = -ENOMEM;
91 goto error_return;
92 }
93 new_dev->props = op;
94 new_dev->class_dev.class = &video_output_class;
95 new_dev->class_dev.dev = dev;
96 strlcpy(new_dev->class_dev.class_id,name,KOBJ_NAME_LEN);
97 class_set_devdata(&new_dev->class_dev,devdata);
98 ret_code = class_device_register(&new_dev->class_dev);
99 if (ret_code) {
100 kfree(new_dev);
101 goto error_return;
102 }
103 return new_dev;
104
105error_return:
106 return ERR_PTR(ret_code);
107}
108EXPORT_SYMBOL(video_output_register);
109
110void video_output_unregister(struct output_device *dev)
111{
112 if (!dev)
113 return;
114 class_device_unregister(&dev->class_dev);
115}
116EXPORT_SYMBOL(video_output_unregister);
117
118static void __exit video_output_class_exit(void)
119{
120 class_unregister(&video_output_class);
121}
122
123static int __init video_output_class_init(void)
124{
125 return class_register(&video_output_class);
126}
127
128postcore_initcall(video_output_class_init);
129module_exit(video_output_class_exit);
diff --git a/fs/Kconfig b/fs/Kconfig
index 8cd2417a14db..5e8e9d9ccb33 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -426,7 +426,6 @@ config OCFS2_FS
426 select CONFIGFS_FS 426 select CONFIGFS_FS
427 select JBD 427 select JBD
428 select CRC32 428 select CRC32
429 select INET
430 help 429 help
431 OCFS2 is a general purpose extent based shared disk cluster file 430 OCFS2 is a general purpose extent based shared disk cluster file
432 system with many similarities to ext3. It supports 64 bit inode 431 system with many similarities to ext3. It supports 64 bit inode
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index d04d2f7448d9..85e3850bf2c9 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,6 +1,8 @@
1Version 1.47 1Version 1.47
2------------ 2------------
3Fix oops in list_del during mount caused by unaligned string. 3Fix oops in list_del during mount caused by unaligned string.
4Seek to SEEK_END forces check for update of file size for non-cached
5files.
4 6
5Version 1.46 7Version 1.46
6------------ 8------------
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 10c90294cd18..93ef09971d2f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -511,7 +511,15 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
511{ 511{
512 /* origin == SEEK_END => we must revalidate the cached file length */ 512 /* origin == SEEK_END => we must revalidate the cached file length */
513 if (origin == SEEK_END) { 513 if (origin == SEEK_END) {
514 int retval = cifs_revalidate(file->f_path.dentry); 514 int retval;
515
516 /* some applications poll for the file length in this strange
517 way so we must seek to end on non-oplocked files by
518 setting the revalidate time to zero */
519 if(file->f_path.dentry->d_inode)
520 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
521
522 retval = cifs_revalidate(file->f_path.dentry);
515 if (retval < 0) 523 if (retval < 0)
516 return (loff_t)retval; 524 return (loff_t)retval;
517 } 525 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8a49b2e77d37..e9dcf5ee29a2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1146,7 +1146,7 @@ static int cifs_writepages(struct address_space *mapping,
1146 pgoff_t end; 1146 pgoff_t end;
1147 pgoff_t index; 1147 pgoff_t index;
1148 int range_whole = 0; 1148 int range_whole = 0;
1149 struct kvec iov[32]; 1149 struct kvec * iov;
1150 int len; 1150 int len;
1151 int n_iov = 0; 1151 int n_iov = 0;
1152 pgoff_t next; 1152 pgoff_t next;
@@ -1171,15 +1171,21 @@ static int cifs_writepages(struct address_space *mapping,
1171 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server)) 1171 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1172 if(cifs_sb->tcon->ses->server->secMode & 1172 if(cifs_sb->tcon->ses->server->secMode &
1173 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 1173 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1174 if(!experimEnabled) 1174 if(!experimEnabled)
1175 return generic_writepages(mapping, wbc); 1175 return generic_writepages(mapping, wbc);
1176 1176
1177 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1178 if(iov == NULL)
1179 return generic_writepages(mapping, wbc);
1180
1181
1177 /* 1182 /*
1178 * BB: Is this meaningful for a non-block-device file system? 1183 * BB: Is this meaningful for a non-block-device file system?
1179 * If it is, we should test it again after we do I/O 1184 * If it is, we should test it again after we do I/O
1180 */ 1185 */
1181 if (wbc->nonblocking && bdi_write_congested(bdi)) { 1186 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1182 wbc->encountered_congestion = 1; 1187 wbc->encountered_congestion = 1;
1188 kfree(iov);
1183 return 0; 1189 return 0;
1184 } 1190 }
1185 1191
@@ -1345,7 +1351,7 @@ retry:
1345 mapping->writeback_index = index; 1351 mapping->writeback_index = index;
1346 1352
1347 FreeXid(xid); 1353 FreeXid(xid);
1348 1354 kfree(iov);
1349 return rc; 1355 return rc;
1350} 1356}
1351 1357
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 99dfb5337e31..782940be550f 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -156,9 +156,9 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
156 tmp_inode->i_atime = cnvrtDosUnixTm( 156 tmp_inode->i_atime = cnvrtDosUnixTm(
157 le16_to_cpu(pfindData->LastAccessDate), 157 le16_to_cpu(pfindData->LastAccessDate),
158 le16_to_cpu(pfindData->LastAccessTime)); 158 le16_to_cpu(pfindData->LastAccessTime));
159 tmp_inode->i_ctime = cnvrtDosUnixTm( 159 tmp_inode->i_ctime = cnvrtDosUnixTm(
160 le16_to_cpu(pfindData->LastWriteDate), 160 le16_to_cpu(pfindData->LastWriteDate),
161 le16_to_cpu(pfindData->LastWriteTime)); 161 le16_to_cpu(pfindData->LastWriteTime));
162 AdjustForTZ(cifs_sb->tcon, tmp_inode); 162 AdjustForTZ(cifs_sb->tcon, tmp_inode);
163 attr = le16_to_cpu(pfindData->Attributes); 163 attr = le16_to_cpu(pfindData->Attributes);
164 allocation_size = le32_to_cpu(pfindData->AllocationSize); 164 allocation_size = le32_to_cpu(pfindData->AllocationSize);
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index 7a1b2b961ec8..1b1daf63f062 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -196,7 +196,7 @@ dohash(char *out, char *in, char *key, int forw)
196 char c[28]; 196 char c[28];
197 char d[28]; 197 char d[28];
198 char *cd; 198 char *cd;
199 char ki[16][48]; 199 char (*ki)[48];
200 char *pd1; 200 char *pd1;
201 char l[32], r[32]; 201 char l[32], r[32];
202 char *rl; 202 char *rl;
@@ -206,6 +206,12 @@ dohash(char *out, char *in, char *key, int forw)
206 if(pk1 == NULL) 206 if(pk1 == NULL)
207 return; 207 return;
208 208
209 ki = kmalloc(16*48, GFP_KERNEL);
210 if(ki == NULL) {
211 kfree(pk1);
212 return;
213 }
214
209 cd = pk1 + 56; 215 cd = pk1 + 56;
210 pd1= cd + 56; 216 pd1= cd + 56;
211 rl = pd1 + 64; 217 rl = pd1 + 64;
@@ -243,6 +249,7 @@ dohash(char *out, char *in, char *key, int forw)
243 er = kmalloc(48+48+32+32+32, GFP_KERNEL); 249 er = kmalloc(48+48+32+32+32, GFP_KERNEL);
244 if(er == NULL) { 250 if(er == NULL) {
245 kfree(pk1); 251 kfree(pk1);
252 kfree(ki);
246 return; 253 return;
247 } 254 }
248 erk = er+48; 255 erk = er+48;
@@ -290,6 +297,7 @@ dohash(char *out, char *in, char *key, int forw)
290 297
291 permute(out, rl, perm6, 64); 298 permute(out, rl, perm6, 64);
292 kfree(pk1); 299 kfree(pk1);
300 kfree(ki);
293} 301}
294 302
295static void 303static void
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 2a7cb086e80c..d98be5e01328 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -162,14 +162,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
162 int error; 162 int error;
163 163
164 if (!buffer->page) 164 if (!buffer->page)
165 buffer->page = (char *)get_zeroed_page(GFP_KERNEL); 165 buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
166 if (!buffer->page) 166 if (!buffer->page)
167 return -ENOMEM; 167 return -ENOMEM;
168 168
169 if (count > PAGE_SIZE) 169 if (count >= PAGE_SIZE)
170 count = PAGE_SIZE; 170 count = PAGE_SIZE - 1;
171 error = copy_from_user(buffer->page,buf,count); 171 error = copy_from_user(buffer->page,buf,count);
172 buffer->needs_read_fill = 1; 172 buffer->needs_read_fill = 1;
173 /* if buf is assumed to contain a string, terminate it by \0,
174 * so e.g. sscanf() can scan the string easily */
175 buffer->page[count] = 0;
173 return error ? -EFAULT : count; 176 return error ? -EFAULT : count;
174} 177}
175 178
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index b5654a284fef..6fa7b0d5c043 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -3,21 +3,21 @@ menu "Distributed Lock Manager"
3 3
4config DLM 4config DLM
5 tristate "Distributed Lock Manager (DLM)" 5 tristate "Distributed Lock Manager (DLM)"
6 depends on IPV6 || IPV6=n 6 depends on SYSFS && (IPV6 || IPV6=n)
7 select CONFIGFS_FS 7 select CONFIGFS_FS
8 select IP_SCTP if DLM_SCTP 8 select IP_SCTP if DLM_SCTP
9 help 9 help
10 A general purpose distributed lock manager for kernel or userspace 10 A general purpose distributed lock manager for kernel or userspace
11 applications. 11 applications.
12 12
13choice 13choice
14 prompt "Select DLM communications protocol" 14 prompt "Select DLM communications protocol"
15 depends on DLM 15 depends on DLM
16 default DLM_TCP 16 default DLM_TCP
17 help 17 help
18 The DLM Can use TCP or SCTP for it's network communications. 18 The DLM Can use TCP or SCTP for it's network communications.
19 SCTP supports multi-homed operations whereas TCP doesn't. 19 SCTP supports multi-homed operations whereas TCP doesn't.
20 However, SCTP seems to have stability problems at the moment. 20 However, SCTP seems to have stability problems at the moment.
21 21
22config DLM_TCP 22config DLM_TCP
23 bool "TCP/IP" 23 bool "TCP/IP"
@@ -31,8 +31,8 @@ config DLM_DEBUG
31 bool "DLM debugging" 31 bool "DLM debugging"
32 depends on DLM 32 depends on DLM
33 help 33 help
34 Under the debugfs mount point, the name of each lockspace will 34 Under the debugfs mount point, the name of each lockspace will
35 appear as a file in the "dlm" directory. The output is the 35 appear as a file in the "dlm" directory. The output is the
36 list of resource and locks the local node knows about. 36 list of resource and locks the local node knows about.
37 37
38endmenu 38endmenu
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 88553054bbfa..8665c88e5af2 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -54,6 +54,11 @@ static struct config_item *make_node(struct config_group *, const char *);
54static void drop_node(struct config_group *, struct config_item *); 54static void drop_node(struct config_group *, struct config_item *);
55static void release_node(struct config_item *); 55static void release_node(struct config_item *);
56 56
57static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
58 char *buf);
59static ssize_t store_cluster(struct config_item *i,
60 struct configfs_attribute *a,
61 const char *buf, size_t len);
57static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 62static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
58 char *buf); 63 char *buf);
59static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, 64static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
@@ -73,6 +78,101 @@ static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
73static ssize_t node_weight_read(struct node *nd, char *buf); 78static ssize_t node_weight_read(struct node *nd, char *buf);
74static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len); 79static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
75 80
81struct cluster {
82 struct config_group group;
83 unsigned int cl_tcp_port;
84 unsigned int cl_buffer_size;
85 unsigned int cl_rsbtbl_size;
86 unsigned int cl_lkbtbl_size;
87 unsigned int cl_dirtbl_size;
88 unsigned int cl_recover_timer;
89 unsigned int cl_toss_secs;
90 unsigned int cl_scan_secs;
91 unsigned int cl_log_debug;
92};
93
94enum {
95 CLUSTER_ATTR_TCP_PORT = 0,
96 CLUSTER_ATTR_BUFFER_SIZE,
97 CLUSTER_ATTR_RSBTBL_SIZE,
98 CLUSTER_ATTR_LKBTBL_SIZE,
99 CLUSTER_ATTR_DIRTBL_SIZE,
100 CLUSTER_ATTR_RECOVER_TIMER,
101 CLUSTER_ATTR_TOSS_SECS,
102 CLUSTER_ATTR_SCAN_SECS,
103 CLUSTER_ATTR_LOG_DEBUG,
104};
105
106struct cluster_attribute {
107 struct configfs_attribute attr;
108 ssize_t (*show)(struct cluster *, char *);
109 ssize_t (*store)(struct cluster *, const char *, size_t);
110};
111
112static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
113 unsigned int *info_field, int check_zero,
114 const char *buf, size_t len)
115{
116 unsigned int x;
117
118 if (!capable(CAP_SYS_ADMIN))
119 return -EACCES;
120
121 x = simple_strtoul(buf, NULL, 0);
122
123 if (check_zero && !x)
124 return -EINVAL;
125
126 *cl_field = x;
127 *info_field = x;
128
129 return len;
130}
131
132#define __CONFIGFS_ATTR(_name,_mode,_read,_write) { \
133 .attr = { .ca_name = __stringify(_name), \
134 .ca_mode = _mode, \
135 .ca_owner = THIS_MODULE }, \
136 .show = _read, \
137 .store = _write, \
138}
139
140#define CLUSTER_ATTR(name, check_zero) \
141static ssize_t name##_write(struct cluster *cl, const char *buf, size_t len) \
142{ \
143 return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \
144 check_zero, buf, len); \
145} \
146static ssize_t name##_read(struct cluster *cl, char *buf) \
147{ \
148 return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \
149} \
150static struct cluster_attribute cluster_attr_##name = \
151__CONFIGFS_ATTR(name, 0644, name##_read, name##_write)
152
153CLUSTER_ATTR(tcp_port, 1);
154CLUSTER_ATTR(buffer_size, 1);
155CLUSTER_ATTR(rsbtbl_size, 1);
156CLUSTER_ATTR(lkbtbl_size, 1);
157CLUSTER_ATTR(dirtbl_size, 1);
158CLUSTER_ATTR(recover_timer, 1);
159CLUSTER_ATTR(toss_secs, 1);
160CLUSTER_ATTR(scan_secs, 1);
161CLUSTER_ATTR(log_debug, 0);
162
163static struct configfs_attribute *cluster_attrs[] = {
164 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
165 [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr,
166 [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr,
167 [CLUSTER_ATTR_LKBTBL_SIZE] = &cluster_attr_lkbtbl_size.attr,
168 [CLUSTER_ATTR_DIRTBL_SIZE] = &cluster_attr_dirtbl_size.attr,
169 [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr,
170 [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr,
171 [CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs.attr,
172 [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr,
173 NULL,
174};
175
76enum { 176enum {
77 COMM_ATTR_NODEID = 0, 177 COMM_ATTR_NODEID = 0,
78 COMM_ATTR_LOCAL, 178 COMM_ATTR_LOCAL,
@@ -152,10 +252,6 @@ struct clusters {
152 struct configfs_subsystem subsys; 252 struct configfs_subsystem subsys;
153}; 253};
154 254
155struct cluster {
156 struct config_group group;
157};
158
159struct spaces { 255struct spaces {
160 struct config_group ss_group; 256 struct config_group ss_group;
161}; 257};
@@ -197,6 +293,8 @@ static struct configfs_group_operations clusters_ops = {
197 293
198static struct configfs_item_operations cluster_ops = { 294static struct configfs_item_operations cluster_ops = {
199 .release = release_cluster, 295 .release = release_cluster,
296 .show_attribute = show_cluster,
297 .store_attribute = store_cluster,
200}; 298};
201 299
202static struct configfs_group_operations spaces_ops = { 300static struct configfs_group_operations spaces_ops = {
@@ -237,6 +335,7 @@ static struct config_item_type clusters_type = {
237 335
238static struct config_item_type cluster_type = { 336static struct config_item_type cluster_type = {
239 .ct_item_ops = &cluster_ops, 337 .ct_item_ops = &cluster_ops,
338 .ct_attrs = cluster_attrs,
240 .ct_owner = THIS_MODULE, 339 .ct_owner = THIS_MODULE,
241}; 340};
242 341
@@ -317,6 +416,16 @@ static struct config_group *make_cluster(struct config_group *g,
317 cl->group.default_groups[1] = &cms->cs_group; 416 cl->group.default_groups[1] = &cms->cs_group;
318 cl->group.default_groups[2] = NULL; 417 cl->group.default_groups[2] = NULL;
319 418
419 cl->cl_tcp_port = dlm_config.ci_tcp_port;
420 cl->cl_buffer_size = dlm_config.ci_buffer_size;
421 cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
422 cl->cl_lkbtbl_size = dlm_config.ci_lkbtbl_size;
423 cl->cl_dirtbl_size = dlm_config.ci_dirtbl_size;
424 cl->cl_recover_timer = dlm_config.ci_recover_timer;
425 cl->cl_toss_secs = dlm_config.ci_toss_secs;
426 cl->cl_scan_secs = dlm_config.ci_scan_secs;
427 cl->cl_log_debug = dlm_config.ci_log_debug;
428
320 space_list = &sps->ss_group; 429 space_list = &sps->ss_group;
321 comm_list = &cms->cs_group; 430 comm_list = &cms->cs_group;
322 return &cl->group; 431 return &cl->group;
@@ -509,6 +618,25 @@ void dlm_config_exit(void)
509 * Functions for user space to read/write attributes 618 * Functions for user space to read/write attributes
510 */ 619 */
511 620
621static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
622 char *buf)
623{
624 struct cluster *cl = to_cluster(i);
625 struct cluster_attribute *cla =
626 container_of(a, struct cluster_attribute, attr);
627 return cla->show ? cla->show(cl, buf) : 0;
628}
629
630static ssize_t store_cluster(struct config_item *i,
631 struct configfs_attribute *a,
632 const char *buf, size_t len)
633{
634 struct cluster *cl = to_cluster(i);
635 struct cluster_attribute *cla =
636 container_of(a, struct cluster_attribute, attr);
637 return cla->store ? cla->store(cl, buf, len) : -EINVAL;
638}
639
512static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 640static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
513 char *buf) 641 char *buf)
514{ 642{
@@ -775,15 +903,17 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
775#define DEFAULT_RECOVER_TIMER 5 903#define DEFAULT_RECOVER_TIMER 5
776#define DEFAULT_TOSS_SECS 10 904#define DEFAULT_TOSS_SECS 10
777#define DEFAULT_SCAN_SECS 5 905#define DEFAULT_SCAN_SECS 5
906#define DEFAULT_LOG_DEBUG 0
778 907
779struct dlm_config_info dlm_config = { 908struct dlm_config_info dlm_config = {
780 .tcp_port = DEFAULT_TCP_PORT, 909 .ci_tcp_port = DEFAULT_TCP_PORT,
781 .buffer_size = DEFAULT_BUFFER_SIZE, 910 .ci_buffer_size = DEFAULT_BUFFER_SIZE,
782 .rsbtbl_size = DEFAULT_RSBTBL_SIZE, 911 .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
783 .lkbtbl_size = DEFAULT_LKBTBL_SIZE, 912 .ci_lkbtbl_size = DEFAULT_LKBTBL_SIZE,
784 .dirtbl_size = DEFAULT_DIRTBL_SIZE, 913 .ci_dirtbl_size = DEFAULT_DIRTBL_SIZE,
785 .recover_timer = DEFAULT_RECOVER_TIMER, 914 .ci_recover_timer = DEFAULT_RECOVER_TIMER,
786 .toss_secs = DEFAULT_TOSS_SECS, 915 .ci_toss_secs = DEFAULT_TOSS_SECS,
787 .scan_secs = DEFAULT_SCAN_SECS 916 .ci_scan_secs = DEFAULT_SCAN_SECS,
917 .ci_log_debug = DEFAULT_LOG_DEBUG
788}; 918};
789 919
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index 9da7839958a9..1e978611a96e 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -17,14 +17,15 @@
17#define DLM_MAX_ADDR_COUNT 3 17#define DLM_MAX_ADDR_COUNT 3
18 18
19struct dlm_config_info { 19struct dlm_config_info {
20 int tcp_port; 20 int ci_tcp_port;
21 int buffer_size; 21 int ci_buffer_size;
22 int rsbtbl_size; 22 int ci_rsbtbl_size;
23 int lkbtbl_size; 23 int ci_lkbtbl_size;
24 int dirtbl_size; 24 int ci_dirtbl_size;
25 int recover_timer; 25 int ci_recover_timer;
26 int toss_secs; 26 int ci_toss_secs;
27 int scan_secs; 27 int ci_scan_secs;
28 int ci_log_debug;
28}; 29};
29 30
30extern struct dlm_config_info dlm_config; 31extern struct dlm_config_info dlm_config;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 1ee8195e6fc0..61d93201e1b2 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -41,6 +41,7 @@
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42 42
43#include <linux/dlm.h> 43#include <linux/dlm.h>
44#include "config.h"
44 45
45#define DLM_LOCKSPACE_LEN 64 46#define DLM_LOCKSPACE_LEN 64
46 47
@@ -69,12 +70,12 @@ struct dlm_mhandle;
69#define log_error(ls, fmt, args...) \ 70#define log_error(ls, fmt, args...) \
70 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args) 71 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
71 72
72#define DLM_LOG_DEBUG 73#define log_debug(ls, fmt, args...) \
73#ifdef DLM_LOG_DEBUG 74do { \
74#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args) 75 if (dlm_config.ci_log_debug) \
75#else 76 printk(KERN_DEBUG "dlm: %s: " fmt "\n", \
76#define log_debug(ls, fmt, args...) 77 (ls)->ls_name , ##args); \
77#endif 78} while (0)
78 79
79#define DLM_ASSERT(x, do) \ 80#define DLM_ASSERT(x, do) \
80{ \ 81{ \
@@ -309,8 +310,8 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
309 310
310/* dlm_header is first element of all structs sent between nodes */ 311/* dlm_header is first element of all structs sent between nodes */
311 312
312#define DLM_HEADER_MAJOR 0x00020000 313#define DLM_HEADER_MAJOR 0x00030000
313#define DLM_HEADER_MINOR 0x00000001 314#define DLM_HEADER_MINOR 0x00000000
314 315
315#define DLM_MSG 1 316#define DLM_MSG 1
316#define DLM_RCOM 2 317#define DLM_RCOM 2
@@ -386,6 +387,8 @@ struct dlm_rcom {
386 uint32_t rc_type; /* DLM_RCOM_ */ 387 uint32_t rc_type; /* DLM_RCOM_ */
387 int rc_result; /* multi-purpose */ 388 int rc_result; /* multi-purpose */
388 uint64_t rc_id; /* match reply with request */ 389 uint64_t rc_id; /* match reply with request */
390 uint64_t rc_seq; /* sender's ls_recover_seq */
391 uint64_t rc_seq_reply; /* remote ls_recover_seq */
389 char rc_buf[0]; 392 char rc_buf[0];
390}; 393};
391 394
@@ -523,6 +526,7 @@ struct dlm_user_proc {
523 spinlock_t asts_spin; 526 spinlock_t asts_spin;
524 struct list_head locks; 527 struct list_head locks;
525 spinlock_t locks_spin; 528 spinlock_t locks_spin;
529 struct list_head unlocking;
526 wait_queue_head_t wait; 530 wait_queue_head_t wait;
527}; 531};
528 532
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 30878defaeb6..e725005fafd0 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -754,6 +754,11 @@ static void add_to_waiters(struct dlm_lkb *lkb, int mstype)
754 mutex_unlock(&ls->ls_waiters_mutex); 754 mutex_unlock(&ls->ls_waiters_mutex);
755} 755}
756 756
757/* We clear the RESEND flag because we might be taking an lkb off the waiters
758 list as part of process_requestqueue (e.g. a lookup that has an optimized
759 request reply on the requestqueue) between dlm_recover_waiters_pre() which
760 set RESEND and dlm_recover_waiters_post() */
761
757static int _remove_from_waiters(struct dlm_lkb *lkb) 762static int _remove_from_waiters(struct dlm_lkb *lkb)
758{ 763{
759 int error = 0; 764 int error = 0;
@@ -764,6 +769,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb)
764 goto out; 769 goto out;
765 } 770 }
766 lkb->lkb_wait_type = 0; 771 lkb->lkb_wait_type = 0;
772 lkb->lkb_flags &= ~DLM_IFL_RESEND;
767 list_del(&lkb->lkb_wait_reply); 773 list_del(&lkb->lkb_wait_reply);
768 unhold_lkb(lkb); 774 unhold_lkb(lkb);
769 out: 775 out:
@@ -810,7 +816,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
810 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss, 816 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
811 res_hashchain) { 817 res_hashchain) {
812 if (!time_after_eq(jiffies, r->res_toss_time + 818 if (!time_after_eq(jiffies, r->res_toss_time +
813 dlm_config.toss_secs * HZ)) 819 dlm_config.ci_toss_secs * HZ))
814 continue; 820 continue;
815 found = 1; 821 found = 1;
816 break; 822 break;
@@ -2144,12 +2150,24 @@ static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2144 if (lkb->lkb_astaddr) 2150 if (lkb->lkb_astaddr)
2145 ms->m_asts |= AST_COMP; 2151 ms->m_asts |= AST_COMP;
2146 2152
2147 if (ms->m_type == DLM_MSG_REQUEST || ms->m_type == DLM_MSG_LOOKUP) 2153 /* compare with switch in create_message; send_remove() doesn't
2148 memcpy(ms->m_extra, r->res_name, r->res_length); 2154 use send_args() */
2149 2155
2150 else if (lkb->lkb_lvbptr) 2156 switch (ms->m_type) {
2157 case DLM_MSG_REQUEST:
2158 case DLM_MSG_LOOKUP:
2159 memcpy(ms->m_extra, r->res_name, r->res_length);
2160 break;
2161 case DLM_MSG_CONVERT:
2162 case DLM_MSG_UNLOCK:
2163 case DLM_MSG_REQUEST_REPLY:
2164 case DLM_MSG_CONVERT_REPLY:
2165 case DLM_MSG_GRANT:
2166 if (!lkb->lkb_lvbptr)
2167 break;
2151 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); 2168 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2152 2169 break;
2170 }
2153} 2171}
2154 2172
2155static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype) 2173static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
@@ -2418,8 +2436,12 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2418 2436
2419 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb);); 2437 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
2420 2438
2421 if (receive_lvb(ls, lkb, ms)) 2439 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2422 return -ENOMEM; 2440 /* lkb was just created so there won't be an lvb yet */
2441 lkb->lkb_lvbptr = allocate_lvb(ls);
2442 if (!lkb->lkb_lvbptr)
2443 return -ENOMEM;
2444 }
2423 2445
2424 return 0; 2446 return 0;
2425} 2447}
@@ -3002,7 +3024,7 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3002{ 3024{
3003 struct dlm_message *ms = (struct dlm_message *) hd; 3025 struct dlm_message *ms = (struct dlm_message *) hd;
3004 struct dlm_ls *ls; 3026 struct dlm_ls *ls;
3005 int error; 3027 int error = 0;
3006 3028
3007 if (!recovery) 3029 if (!recovery)
3008 dlm_message_in(ms); 3030 dlm_message_in(ms);
@@ -3119,7 +3141,7 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3119 out: 3141 out:
3120 dlm_put_lockspace(ls); 3142 dlm_put_lockspace(ls);
3121 dlm_astd_wake(); 3143 dlm_astd_wake();
3122 return 0; 3144 return error;
3123} 3145}
3124 3146
3125 3147
@@ -3132,6 +3154,7 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3132 if (middle_conversion(lkb)) { 3154 if (middle_conversion(lkb)) {
3133 hold_lkb(lkb); 3155 hold_lkb(lkb);
3134 ls->ls_stub_ms.m_result = -EINPROGRESS; 3156 ls->ls_stub_ms.m_result = -EINPROGRESS;
3157 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3135 _remove_from_waiters(lkb); 3158 _remove_from_waiters(lkb);
3136 _receive_convert_reply(lkb, &ls->ls_stub_ms); 3159 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3137 3160
@@ -3205,6 +3228,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
3205 case DLM_MSG_UNLOCK: 3228 case DLM_MSG_UNLOCK:
3206 hold_lkb(lkb); 3229 hold_lkb(lkb);
3207 ls->ls_stub_ms.m_result = -DLM_EUNLOCK; 3230 ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
3231 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3208 _remove_from_waiters(lkb); 3232 _remove_from_waiters(lkb);
3209 _receive_unlock_reply(lkb, &ls->ls_stub_ms); 3233 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
3210 dlm_put_lkb(lkb); 3234 dlm_put_lkb(lkb);
@@ -3213,6 +3237,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
3213 case DLM_MSG_CANCEL: 3237 case DLM_MSG_CANCEL:
3214 hold_lkb(lkb); 3238 hold_lkb(lkb);
3215 ls->ls_stub_ms.m_result = -DLM_ECANCEL; 3239 ls->ls_stub_ms.m_result = -DLM_ECANCEL;
3240 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3216 _remove_from_waiters(lkb); 3241 _remove_from_waiters(lkb);
3217 _receive_cancel_reply(lkb, &ls->ls_stub_ms); 3242 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
3218 dlm_put_lkb(lkb); 3243 dlm_put_lkb(lkb);
@@ -3571,6 +3596,14 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3571 lock_rsb(r); 3596 lock_rsb(r);
3572 3597
3573 switch (error) { 3598 switch (error) {
3599 case -EBADR:
3600 /* There's a chance the new master received our lock before
3601 dlm_recover_master_reply(), this wouldn't happen if we did
3602 a barrier between recover_masters and recover_locks. */
3603 log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
3604 (unsigned long)r, r->res_name);
3605 dlm_send_rcom_lock(r, lkb);
3606 goto out;
3574 case -EEXIST: 3607 case -EEXIST:
3575 log_debug(ls, "master copy exists %x", lkb->lkb_id); 3608 log_debug(ls, "master copy exists %x", lkb->lkb_id);
3576 /* fall through */ 3609 /* fall through */
@@ -3585,7 +3618,7 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3585 /* an ack for dlm_recover_locks() which waits for replies from 3618 /* an ack for dlm_recover_locks() which waits for replies from
3586 all the locks it sends to new masters */ 3619 all the locks it sends to new masters */
3587 dlm_recovered_lock(r); 3620 dlm_recovered_lock(r);
3588 3621 out:
3589 unlock_rsb(r); 3622 unlock_rsb(r);
3590 put_rsb(r); 3623 put_rsb(r);
3591 dlm_put_lkb(lkb); 3624 dlm_put_lkb(lkb);
@@ -3610,7 +3643,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
3610 } 3643 }
3611 3644
3612 if (flags & DLM_LKF_VALBLK) { 3645 if (flags & DLM_LKF_VALBLK) {
3613 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL); 3646 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3614 if (!ua->lksb.sb_lvbptr) { 3647 if (!ua->lksb.sb_lvbptr) {
3615 kfree(ua); 3648 kfree(ua);
3616 __put_lkb(ls, lkb); 3649 __put_lkb(ls, lkb);
@@ -3679,7 +3712,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3679 ua = (struct dlm_user_args *)lkb->lkb_astparam; 3712 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3680 3713
3681 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { 3714 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
3682 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL); 3715 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3683 if (!ua->lksb.sb_lvbptr) { 3716 if (!ua->lksb.sb_lvbptr) {
3684 error = -ENOMEM; 3717 error = -ENOMEM;
3685 goto out_put; 3718 goto out_put;
@@ -3745,12 +3778,10 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3745 goto out_put; 3778 goto out_put;
3746 3779
3747 spin_lock(&ua->proc->locks_spin); 3780 spin_lock(&ua->proc->locks_spin);
3748 list_del_init(&lkb->lkb_ownqueue); 3781 /* dlm_user_add_ast() may have already taken lkb off the proc list */
3782 if (!list_empty(&lkb->lkb_ownqueue))
3783 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
3749 spin_unlock(&ua->proc->locks_spin); 3784 spin_unlock(&ua->proc->locks_spin);
3750
3751 /* this removes the reference for the proc->locks list added by
3752 dlm_user_request */
3753 unhold_lkb(lkb);
3754 out_put: 3785 out_put:
3755 dlm_put_lkb(lkb); 3786 dlm_put_lkb(lkb);
3756 out: 3787 out:
@@ -3790,9 +3821,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3790 /* this lkb was removed from the WAITING queue */ 3821 /* this lkb was removed from the WAITING queue */
3791 if (lkb->lkb_grmode == DLM_LOCK_IV) { 3822 if (lkb->lkb_grmode == DLM_LOCK_IV) {
3792 spin_lock(&ua->proc->locks_spin); 3823 spin_lock(&ua->proc->locks_spin);
3793 list_del_init(&lkb->lkb_ownqueue); 3824 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
3794 spin_unlock(&ua->proc->locks_spin); 3825 spin_unlock(&ua->proc->locks_spin);
3795 unhold_lkb(lkb);
3796 } 3826 }
3797 out_put: 3827 out_put:
3798 dlm_put_lkb(lkb); 3828 dlm_put_lkb(lkb);
@@ -3853,11 +3883,6 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3853 mutex_lock(&ls->ls_clear_proc_locks); 3883 mutex_lock(&ls->ls_clear_proc_locks);
3854 3884
3855 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) { 3885 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) {
3856 if (lkb->lkb_ast_type) {
3857 list_del(&lkb->lkb_astqueue);
3858 unhold_lkb(lkb);
3859 }
3860
3861 list_del_init(&lkb->lkb_ownqueue); 3886 list_del_init(&lkb->lkb_ownqueue);
3862 3887
3863 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) { 3888 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
@@ -3874,6 +3899,20 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3874 3899
3875 dlm_put_lkb(lkb); 3900 dlm_put_lkb(lkb);
3876 } 3901 }
3902
3903 /* in-progress unlocks */
3904 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
3905 list_del_init(&lkb->lkb_ownqueue);
3906 lkb->lkb_flags |= DLM_IFL_DEAD;
3907 dlm_put_lkb(lkb);
3908 }
3909
3910 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
3911 list_del(&lkb->lkb_astqueue);
3912 dlm_put_lkb(lkb);
3913 }
3914
3877 mutex_unlock(&ls->ls_clear_proc_locks); 3915 mutex_unlock(&ls->ls_clear_proc_locks);
3878 unlock_recovery(ls); 3916 unlock_recovery(ls);
3879} 3917}
3918
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 59012b089e8d..f40817b53c6f 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -236,7 +236,7 @@ static int dlm_scand(void *data)
236 while (!kthread_should_stop()) { 236 while (!kthread_should_stop()) {
237 list_for_each_entry(ls, &lslist, ls_list) 237 list_for_each_entry(ls, &lslist, ls_list)
238 dlm_scan_rsbs(ls); 238 dlm_scan_rsbs(ls);
239 schedule_timeout_interruptible(dlm_config.scan_secs * HZ); 239 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
240 } 240 }
241 return 0; 241 return 0;
242} 242}
@@ -422,7 +422,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
422 ls->ls_count = 0; 422 ls->ls_count = 0;
423 ls->ls_flags = 0; 423 ls->ls_flags = 0;
424 424
425 size = dlm_config.rsbtbl_size; 425 size = dlm_config.ci_rsbtbl_size;
426 ls->ls_rsbtbl_size = size; 426 ls->ls_rsbtbl_size = size;
427 427
428 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL); 428 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
@@ -434,7 +434,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
434 rwlock_init(&ls->ls_rsbtbl[i].lock); 434 rwlock_init(&ls->ls_rsbtbl[i].lock);
435 } 435 }
436 436
437 size = dlm_config.lkbtbl_size; 437 size = dlm_config.ci_lkbtbl_size;
438 ls->ls_lkbtbl_size = size; 438 ls->ls_lkbtbl_size = size;
439 439
440 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL); 440 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
@@ -446,7 +446,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
446 ls->ls_lkbtbl[i].counter = 1; 446 ls->ls_lkbtbl[i].counter = 1;
447 } 447 }
448 448
449 size = dlm_config.dirtbl_size; 449 size = dlm_config.ci_dirtbl_size;
450 ls->ls_dirtbl_size = size; 450 ls->ls_dirtbl_size = size;
451 451
452 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL); 452 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
@@ -489,7 +489,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
489 mutex_init(&ls->ls_requestqueue_mutex); 489 mutex_init(&ls->ls_requestqueue_mutex);
490 mutex_init(&ls->ls_clear_proc_locks); 490 mutex_init(&ls->ls_clear_proc_locks);
491 491
492 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL); 492 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
493 if (!ls->ls_recover_buf) 493 if (!ls->ls_recover_buf)
494 goto out_dirfree; 494 goto out_dirfree;
495 495
diff --git a/fs/dlm/lowcomms-sctp.c b/fs/dlm/lowcomms-sctp.c
index fe158d7a9285..dc83a9d979b5 100644
--- a/fs/dlm/lowcomms-sctp.c
+++ b/fs/dlm/lowcomms-sctp.c
@@ -72,6 +72,8 @@ struct nodeinfo {
72 struct list_head writequeue; /* outgoing writequeue_entries */ 72 struct list_head writequeue; /* outgoing writequeue_entries */
73 spinlock_t writequeue_lock; 73 spinlock_t writequeue_lock;
74 int nodeid; 74 int nodeid;
75 struct work_struct swork; /* Send workqueue */
76 struct work_struct lwork; /* Locking workqueue */
75}; 77};
76 78
77static DEFINE_IDR(nodeinfo_idr); 79static DEFINE_IDR(nodeinfo_idr);
@@ -96,6 +98,7 @@ struct connection {
96 atomic_t waiting_requests; 98 atomic_t waiting_requests;
97 struct cbuf cb; 99 struct cbuf cb;
98 int eagain_flag; 100 int eagain_flag;
101 struct work_struct work; /* Send workqueue */
99}; 102};
100 103
101/* An entry waiting to be sent */ 104/* An entry waiting to be sent */
@@ -137,19 +140,23 @@ static void cbuf_eat(struct cbuf *cb, int n)
137static LIST_HEAD(write_nodes); 140static LIST_HEAD(write_nodes);
138static DEFINE_SPINLOCK(write_nodes_lock); 141static DEFINE_SPINLOCK(write_nodes_lock);
139 142
143
140/* Maximum number of incoming messages to process before 144/* Maximum number of incoming messages to process before
141 * doing a schedule() 145 * doing a schedule()
142 */ 146 */
143#define MAX_RX_MSG_COUNT 25 147#define MAX_RX_MSG_COUNT 25
144 148
145/* Manage daemons */ 149/* Work queues */
146static struct task_struct *recv_task; 150static struct workqueue_struct *recv_workqueue;
147static struct task_struct *send_task; 151static struct workqueue_struct *send_workqueue;
148static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_wait); 152static struct workqueue_struct *lock_workqueue;
149 153
150/* The SCTP connection */ 154/* The SCTP connection */
151static struct connection sctp_con; 155static struct connection sctp_con;
152 156
157static void process_send_sockets(struct work_struct *work);
158static void process_recv_sockets(struct work_struct *work);
159static void process_lock_request(struct work_struct *work);
153 160
154static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) 161static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
155{ 162{
@@ -222,6 +229,8 @@ static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
222 spin_lock_init(&ni->lock); 229 spin_lock_init(&ni->lock);
223 INIT_LIST_HEAD(&ni->writequeue); 230 INIT_LIST_HEAD(&ni->writequeue);
224 spin_lock_init(&ni->writequeue_lock); 231 spin_lock_init(&ni->writequeue_lock);
232 INIT_WORK(&ni->lwork, process_lock_request);
233 INIT_WORK(&ni->swork, process_send_sockets);
225 ni->nodeid = nodeid; 234 ni->nodeid = nodeid;
226 235
227 if (nodeid > max_nodeid) 236 if (nodeid > max_nodeid)
@@ -249,11 +258,8 @@ static struct nodeinfo *assoc2nodeinfo(sctp_assoc_t assoc)
249/* Data or notification available on socket */ 258/* Data or notification available on socket */
250static void lowcomms_data_ready(struct sock *sk, int count_unused) 259static void lowcomms_data_ready(struct sock *sk, int count_unused)
251{ 260{
252 atomic_inc(&sctp_con.waiting_requests);
253 if (test_and_set_bit(CF_READ_PENDING, &sctp_con.flags)) 261 if (test_and_set_bit(CF_READ_PENDING, &sctp_con.flags))
254 return; 262 queue_work(recv_workqueue, &sctp_con.work);
255
256 wake_up_interruptible(&lowcomms_recv_wait);
257} 263}
258 264
259 265
@@ -361,10 +367,10 @@ static void init_failed(void)
361 spin_lock_bh(&write_nodes_lock); 367 spin_lock_bh(&write_nodes_lock);
362 list_add_tail(&ni->write_list, &write_nodes); 368 list_add_tail(&ni->write_list, &write_nodes);
363 spin_unlock_bh(&write_nodes_lock); 369 spin_unlock_bh(&write_nodes_lock);
370 queue_work(send_workqueue, &ni->swork);
364 } 371 }
365 } 372 }
366 } 373 }
367 wake_up_process(send_task);
368} 374}
369 375
370/* Something happened to an association */ 376/* Something happened to an association */
@@ -446,8 +452,8 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
446 spin_lock_bh(&write_nodes_lock); 452 spin_lock_bh(&write_nodes_lock);
447 list_add_tail(&ni->write_list, &write_nodes); 453 list_add_tail(&ni->write_list, &write_nodes);
448 spin_unlock_bh(&write_nodes_lock); 454 spin_unlock_bh(&write_nodes_lock);
455 queue_work(send_workqueue, &ni->swork);
449 } 456 }
450 wake_up_process(send_task);
451 } 457 }
452 break; 458 break;
453 459
@@ -580,8 +586,8 @@ static int receive_from_sock(void)
580 spin_lock_bh(&write_nodes_lock); 586 spin_lock_bh(&write_nodes_lock);
581 list_add_tail(&ni->write_list, &write_nodes); 587 list_add_tail(&ni->write_list, &write_nodes);
582 spin_unlock_bh(&write_nodes_lock); 588 spin_unlock_bh(&write_nodes_lock);
589 queue_work(send_workqueue, &ni->swork);
583 } 590 }
584 wake_up_process(send_task);
585 } 591 }
586 } 592 }
587 593
@@ -590,6 +596,7 @@ static int receive_from_sock(void)
590 return 0; 596 return 0;
591 597
592 cbuf_add(&sctp_con.cb, ret); 598 cbuf_add(&sctp_con.cb, ret);
599 // PJC: TODO: Add to node's workqueue....can we ??
593 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid), 600 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
594 page_address(sctp_con.rx_page), 601 page_address(sctp_con.rx_page),
595 sctp_con.cb.base, sctp_con.cb.len, 602 sctp_con.cb.base, sctp_con.cb.len,
@@ -635,7 +642,7 @@ static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
635 642
636 if (result < 0) 643 if (result < 0)
637 log_print("Can't bind to port %d addr number %d", 644 log_print("Can't bind to port %d addr number %d",
638 dlm_config.tcp_port, num); 645 dlm_config.ci_tcp_port, num);
639 646
640 return result; 647 return result;
641} 648}
@@ -711,7 +718,7 @@ static int init_sock(void)
711 /* Bind to all interfaces. */ 718 /* Bind to all interfaces. */
712 for (i = 0; i < dlm_local_count; i++) { 719 for (i = 0; i < dlm_local_count; i++) {
713 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 720 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
714 make_sockaddr(&localaddr, dlm_config.tcp_port, &addr_len); 721 make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len);
715 722
716 result = add_bind_addr(&localaddr, addr_len, num); 723 result = add_bind_addr(&localaddr, addr_len, num);
717 if (result) 724 if (result)
@@ -820,7 +827,8 @@ void dlm_lowcomms_commit_buffer(void *arg)
820 spin_lock_bh(&write_nodes_lock); 827 spin_lock_bh(&write_nodes_lock);
821 list_add_tail(&ni->write_list, &write_nodes); 828 list_add_tail(&ni->write_list, &write_nodes);
822 spin_unlock_bh(&write_nodes_lock); 829 spin_unlock_bh(&write_nodes_lock);
823 wake_up_process(send_task); 830
831 queue_work(send_workqueue, &ni->swork);
824 } 832 }
825 return; 833 return;
826 834
@@ -863,7 +871,7 @@ static void initiate_association(int nodeid)
863 return; 871 return;
864 } 872 }
865 873
866 make_sockaddr(&rem_addr, dlm_config.tcp_port, &addrlen); 874 make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen);
867 875
868 outmessage.msg_name = &rem_addr; 876 outmessage.msg_name = &rem_addr;
869 outmessage.msg_namelen = addrlen; 877 outmessage.msg_namelen = addrlen;
@@ -1088,101 +1096,75 @@ int dlm_lowcomms_close(int nodeid)
1088 return 0; 1096 return 0;
1089} 1097}
1090 1098
1091static int write_list_empty(void) 1099// PJC: The work queue function for receiving.
1100static void process_recv_sockets(struct work_struct *work)
1092{ 1101{
1093 int status; 1102 if (test_and_clear_bit(CF_READ_PENDING, &sctp_con.flags)) {
1094 1103 int ret;
1095 spin_lock_bh(&write_nodes_lock);
1096 status = list_empty(&write_nodes);
1097 spin_unlock_bh(&write_nodes_lock);
1098
1099 return status;
1100}
1101
1102static int dlm_recvd(void *data)
1103{
1104 DECLARE_WAITQUEUE(wait, current);
1105
1106 while (!kthread_should_stop()) {
1107 int count = 0; 1104 int count = 0;
1108 1105
1109 set_current_state(TASK_INTERRUPTIBLE); 1106 do {
1110 add_wait_queue(&lowcomms_recv_wait, &wait); 1107 ret = receive_from_sock();
1111 if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
1112 cond_resched();
1113 remove_wait_queue(&lowcomms_recv_wait, &wait);
1114 set_current_state(TASK_RUNNING);
1115
1116 if (test_and_clear_bit(CF_READ_PENDING, &sctp_con.flags)) {
1117 int ret;
1118
1119 do {
1120 ret = receive_from_sock();
1121 1108
1122 /* Don't starve out everyone else */ 1109 /* Don't starve out everyone else */
1123 if (++count >= MAX_RX_MSG_COUNT) { 1110 if (++count >= MAX_RX_MSG_COUNT) {
1124 cond_resched(); 1111 cond_resched();
1125 count = 0; 1112 count = 0;
1126 } 1113 }
1127 } while (!kthread_should_stop() && ret >=0); 1114 } while (!kthread_should_stop() && ret >=0);
1128 }
1129 cond_resched();
1130 } 1115 }
1131 1116 cond_resched();
1132 return 0;
1133} 1117}
1134 1118
1135static int dlm_sendd(void *data) 1119// PJC: the work queue function for sending
1120static void process_send_sockets(struct work_struct *work)
1136{ 1121{
1137 DECLARE_WAITQUEUE(wait, current); 1122 if (sctp_con.eagain_flag) {
1138 1123 sctp_con.eagain_flag = 0;
1139 add_wait_queue(sctp_con.sock->sk->sk_sleep, &wait); 1124 refill_write_queue();
1140
1141 while (!kthread_should_stop()) {
1142 set_current_state(TASK_INTERRUPTIBLE);
1143 if (write_list_empty())
1144 cond_resched();
1145 set_current_state(TASK_RUNNING);
1146
1147 if (sctp_con.eagain_flag) {
1148 sctp_con.eagain_flag = 0;
1149 refill_write_queue();
1150 }
1151 process_output_queue();
1152 } 1125 }
1126 process_output_queue();
1127}
1153 1128
1154 remove_wait_queue(sctp_con.sock->sk->sk_sleep, &wait); 1129// PJC: Process lock requests from a particular node.
1155 1130// TODO: can we optimise this out on UP ??
1156 return 0; 1131static void process_lock_request(struct work_struct *work)
1132{
1157} 1133}
1158 1134
1159static void daemons_stop(void) 1135static void daemons_stop(void)
1160{ 1136{
1161 kthread_stop(recv_task); 1137 destroy_workqueue(recv_workqueue);
1162 kthread_stop(send_task); 1138 destroy_workqueue(send_workqueue);
1139 destroy_workqueue(lock_workqueue);
1163} 1140}
1164 1141
1165static int daemons_start(void) 1142static int daemons_start(void)
1166{ 1143{
1167 struct task_struct *p;
1168 int error; 1144 int error;
1145 recv_workqueue = create_workqueue("dlm_recv");
1146 error = IS_ERR(recv_workqueue);
1147 if (error) {
1148 log_print("can't start dlm_recv %d", error);
1149 return error;
1150 }
1169 1151
1170 p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); 1152 send_workqueue = create_singlethread_workqueue("dlm_send");
1171 error = IS_ERR(p); 1153 error = IS_ERR(send_workqueue);
1172 if (error) { 1154 if (error) {
1173 log_print("can't start dlm_recvd %d", error); 1155 log_print("can't start dlm_send %d", error);
1156 destroy_workqueue(recv_workqueue);
1174 return error; 1157 return error;
1175 } 1158 }
1176 recv_task = p;
1177 1159
1178 p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); 1160 lock_workqueue = create_workqueue("dlm_rlock");
1179 error = IS_ERR(p); 1161 error = IS_ERR(lock_workqueue);
1180 if (error) { 1162 if (error) {
1181 log_print("can't start dlm_sendd %d", error); 1163 log_print("can't start dlm_rlock %d", error);
1182 kthread_stop(recv_task); 1164 destroy_workqueue(send_workqueue);
1165 destroy_workqueue(recv_workqueue);
1183 return error; 1166 return error;
1184 } 1167 }
1185 send_task = p;
1186 1168
1187 return 0; 1169 return 0;
1188} 1170}
@@ -1194,6 +1176,8 @@ int dlm_lowcomms_start(void)
1194{ 1176{
1195 int error; 1177 int error;
1196 1178
1179 INIT_WORK(&sctp_con.work, process_recv_sockets);
1180
1197 error = init_sock(); 1181 error = init_sock();
1198 if (error) 1182 if (error)
1199 goto fail_sock; 1183 goto fail_sock;
@@ -1224,4 +1208,3 @@ void dlm_lowcomms_stop(void)
1224 for (i = 0; i < dlm_local_count; i++) 1208 for (i = 0; i < dlm_local_count; i++)
1225 kfree(dlm_local_addr[i]); 1209 kfree(dlm_local_addr[i]);
1226} 1210}
1227
diff --git a/fs/dlm/lowcomms-tcp.c b/fs/dlm/lowcomms-tcp.c
index 9be3a440c42a..f1efd17b2614 100644
--- a/fs/dlm/lowcomms-tcp.c
+++ b/fs/dlm/lowcomms-tcp.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -96,10 +96,7 @@ static bool cbuf_empty(struct cbuf *cb)
96struct connection { 96struct connection {
97 struct socket *sock; /* NULL if not connected */ 97 struct socket *sock; /* NULL if not connected */
98 uint32_t nodeid; /* So we know who we are in the list */ 98 uint32_t nodeid; /* So we know who we are in the list */
99 struct rw_semaphore sock_sem; /* Stop connect races */ 99 struct mutex sock_mutex;
100 struct list_head read_list; /* On this list when ready for reading */
101 struct list_head write_list; /* On this list when ready for writing */
102 struct list_head state_list; /* On this list when ready to connect */
103 unsigned long flags; /* bit 1,2 = We are on the read/write lists */ 100 unsigned long flags; /* bit 1,2 = We are on the read/write lists */
104#define CF_READ_PENDING 1 101#define CF_READ_PENDING 1
105#define CF_WRITE_PENDING 2 102#define CF_WRITE_PENDING 2
@@ -112,9 +109,10 @@ struct connection {
112 struct page *rx_page; 109 struct page *rx_page;
113 struct cbuf cb; 110 struct cbuf cb;
114 int retries; 111 int retries;
115 atomic_t waiting_requests;
116#define MAX_CONNECT_RETRIES 3 112#define MAX_CONNECT_RETRIES 3
117 struct connection *othercon; 113 struct connection *othercon;
114 struct work_struct rwork; /* Receive workqueue */
115 struct work_struct swork; /* Send workqueue */
118}; 116};
119#define sock2con(x) ((struct connection *)(x)->sk_user_data) 117#define sock2con(x) ((struct connection *)(x)->sk_user_data)
120 118
@@ -131,14 +129,9 @@ struct writequeue_entry {
131 129
132static struct sockaddr_storage dlm_local_addr; 130static struct sockaddr_storage dlm_local_addr;
133 131
134/* Manage daemons */ 132/* Work queues */
135static struct task_struct *recv_task; 133static struct workqueue_struct *recv_workqueue;
136static struct task_struct *send_task; 134static struct workqueue_struct *send_workqueue;
137
138static wait_queue_t lowcomms_send_waitq_head;
139static DECLARE_WAIT_QUEUE_HEAD(lowcomms_send_waitq);
140static wait_queue_t lowcomms_recv_waitq_head;
141static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_waitq);
142 135
143/* An array of pointers to connections, indexed by NODEID */ 136/* An array of pointers to connections, indexed by NODEID */
144static struct connection **connections; 137static struct connection **connections;
@@ -146,17 +139,8 @@ static DECLARE_MUTEX(connections_lock);
146static struct kmem_cache *con_cache; 139static struct kmem_cache *con_cache;
147static int conn_array_size; 140static int conn_array_size;
148 141
149/* List of sockets that have reads pending */ 142static void process_recv_sockets(struct work_struct *work);
150static LIST_HEAD(read_sockets); 143static void process_send_sockets(struct work_struct *work);
151static DEFINE_SPINLOCK(read_sockets_lock);
152
153/* List of sockets which have writes pending */
154static LIST_HEAD(write_sockets);
155static DEFINE_SPINLOCK(write_sockets_lock);
156
157/* List of sockets which have connects pending */
158static LIST_HEAD(state_sockets);
159static DEFINE_SPINLOCK(state_sockets_lock);
160 144
161static struct connection *nodeid2con(int nodeid, gfp_t allocation) 145static struct connection *nodeid2con(int nodeid, gfp_t allocation)
162{ 146{
@@ -186,9 +170,11 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
186 goto finish; 170 goto finish;
187 171
188 con->nodeid = nodeid; 172 con->nodeid = nodeid;
189 init_rwsem(&con->sock_sem); 173 mutex_init(&con->sock_mutex);
190 INIT_LIST_HEAD(&con->writequeue); 174 INIT_LIST_HEAD(&con->writequeue);
191 spin_lock_init(&con->writequeue_lock); 175 spin_lock_init(&con->writequeue_lock);
176 INIT_WORK(&con->swork, process_send_sockets);
177 INIT_WORK(&con->rwork, process_recv_sockets);
192 178
193 connections[nodeid] = con; 179 connections[nodeid] = con;
194 } 180 }
@@ -203,41 +189,22 @@ static void lowcomms_data_ready(struct sock *sk, int count_unused)
203{ 189{
204 struct connection *con = sock2con(sk); 190 struct connection *con = sock2con(sk);
205 191
206 atomic_inc(&con->waiting_requests); 192 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
207 if (test_and_set_bit(CF_READ_PENDING, &con->flags)) 193 queue_work(recv_workqueue, &con->rwork);
208 return;
209
210 spin_lock_bh(&read_sockets_lock);
211 list_add_tail(&con->read_list, &read_sockets);
212 spin_unlock_bh(&read_sockets_lock);
213
214 wake_up_interruptible(&lowcomms_recv_waitq);
215} 194}
216 195
217static void lowcomms_write_space(struct sock *sk) 196static void lowcomms_write_space(struct sock *sk)
218{ 197{
219 struct connection *con = sock2con(sk); 198 struct connection *con = sock2con(sk);
220 199
221 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 200 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
222 return; 201 queue_work(send_workqueue, &con->swork);
223
224 spin_lock_bh(&write_sockets_lock);
225 list_add_tail(&con->write_list, &write_sockets);
226 spin_unlock_bh(&write_sockets_lock);
227
228 wake_up_interruptible(&lowcomms_send_waitq);
229} 202}
230 203
231static inline void lowcomms_connect_sock(struct connection *con) 204static inline void lowcomms_connect_sock(struct connection *con)
232{ 205{
233 if (test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) 206 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
234 return; 207 queue_work(send_workqueue, &con->swork);
235
236 spin_lock_bh(&state_sockets_lock);
237 list_add_tail(&con->state_list, &state_sockets);
238 spin_unlock_bh(&state_sockets_lock);
239
240 wake_up_interruptible(&lowcomms_send_waitq);
241} 208}
242 209
243static void lowcomms_state_change(struct sock *sk) 210static void lowcomms_state_change(struct sock *sk)
@@ -279,7 +246,7 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
279/* Close a remote connection and tidy up */ 246/* Close a remote connection and tidy up */
280static void close_connection(struct connection *con, bool and_other) 247static void close_connection(struct connection *con, bool and_other)
281{ 248{
282 down_write(&con->sock_sem); 249 mutex_lock(&con->sock_mutex);
283 250
284 if (con->sock) { 251 if (con->sock) {
285 sock_release(con->sock); 252 sock_release(con->sock);
@@ -294,7 +261,7 @@ static void close_connection(struct connection *con, bool and_other)
294 con->rx_page = NULL; 261 con->rx_page = NULL;
295 } 262 }
296 con->retries = 0; 263 con->retries = 0;
297 up_write(&con->sock_sem); 264 mutex_unlock(&con->sock_mutex);
298} 265}
299 266
300/* Data received from remote end */ 267/* Data received from remote end */
@@ -308,10 +275,13 @@ static int receive_from_sock(struct connection *con)
308 int r; 275 int r;
309 int call_again_soon = 0; 276 int call_again_soon = 0;
310 277
311 down_read(&con->sock_sem); 278 mutex_lock(&con->sock_mutex);
279
280 if (con->sock == NULL) {
281 ret = -EAGAIN;
282 goto out_close;
283 }
312 284
313 if (con->sock == NULL)
314 goto out;
315 if (con->rx_page == NULL) { 285 if (con->rx_page == NULL) {
316 /* 286 /*
317 * This doesn't need to be atomic, but I think it should 287 * This doesn't need to be atomic, but I think it should
@@ -359,6 +329,9 @@ static int receive_from_sock(struct connection *con)
359 329
360 if (ret <= 0) 330 if (ret <= 0)
361 goto out_close; 331 goto out_close;
332 if (ret == -EAGAIN)
333 goto out_resched;
334
362 if (ret == len) 335 if (ret == len)
363 call_again_soon = 1; 336 call_again_soon = 1;
364 cbuf_add(&con->cb, ret); 337 cbuf_add(&con->cb, ret);
@@ -381,24 +354,26 @@ static int receive_from_sock(struct connection *con)
381 con->rx_page = NULL; 354 con->rx_page = NULL;
382 } 355 }
383 356
384out:
385 if (call_again_soon) 357 if (call_again_soon)
386 goto out_resched; 358 goto out_resched;
387 up_read(&con->sock_sem); 359 mutex_unlock(&con->sock_mutex);
388 return 0; 360 return 0;
389 361
390out_resched: 362out_resched:
391 lowcomms_data_ready(con->sock->sk, 0); 363 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
392 up_read(&con->sock_sem); 364 queue_work(recv_workqueue, &con->rwork);
393 cond_resched(); 365 mutex_unlock(&con->sock_mutex);
394 return 0; 366 return -EAGAIN;
395 367
396out_close: 368out_close:
397 up_read(&con->sock_sem); 369 mutex_unlock(&con->sock_mutex);
398 if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) { 370 if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
399 close_connection(con, false); 371 close_connection(con, false);
400 /* Reconnect when there is something to send */ 372 /* Reconnect when there is something to send */
401 } 373 }
374 /* Don't return success if we really got EOF */
375 if (ret == 0)
376 ret = -EAGAIN;
402 377
403 return ret; 378 return ret;
404} 379}
@@ -412,6 +387,7 @@ static int accept_from_sock(struct connection *con)
412 int len; 387 int len;
413 int nodeid; 388 int nodeid;
414 struct connection *newcon; 389 struct connection *newcon;
390 struct connection *addcon;
415 391
416 memset(&peeraddr, 0, sizeof(peeraddr)); 392 memset(&peeraddr, 0, sizeof(peeraddr));
417 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, 393 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
@@ -419,7 +395,7 @@ static int accept_from_sock(struct connection *con)
419 if (result < 0) 395 if (result < 0)
420 return -ENOMEM; 396 return -ENOMEM;
421 397
422 down_read(&con->sock_sem); 398 mutex_lock_nested(&con->sock_mutex, 0);
423 399
424 result = -ENOTCONN; 400 result = -ENOTCONN;
425 if (con->sock == NULL) 401 if (con->sock == NULL)
@@ -445,7 +421,7 @@ static int accept_from_sock(struct connection *con)
445 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { 421 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
446 printk("dlm: connect from non cluster node\n"); 422 printk("dlm: connect from non cluster node\n");
447 sock_release(newsock); 423 sock_release(newsock);
448 up_read(&con->sock_sem); 424 mutex_unlock(&con->sock_mutex);
449 return -1; 425 return -1;
450 } 426 }
451 427
@@ -462,7 +438,7 @@ static int accept_from_sock(struct connection *con)
462 result = -ENOMEM; 438 result = -ENOMEM;
463 goto accept_err; 439 goto accept_err;
464 } 440 }
465 down_write(&newcon->sock_sem); 441 mutex_lock_nested(&newcon->sock_mutex, 1);
466 if (newcon->sock) { 442 if (newcon->sock) {
467 struct connection *othercon = newcon->othercon; 443 struct connection *othercon = newcon->othercon;
468 444
@@ -470,41 +446,45 @@ static int accept_from_sock(struct connection *con)
470 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL); 446 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL);
471 if (!othercon) { 447 if (!othercon) {
472 printk("dlm: failed to allocate incoming socket\n"); 448 printk("dlm: failed to allocate incoming socket\n");
473 up_write(&newcon->sock_sem); 449 mutex_unlock(&newcon->sock_mutex);
474 result = -ENOMEM; 450 result = -ENOMEM;
475 goto accept_err; 451 goto accept_err;
476 } 452 }
477 othercon->nodeid = nodeid; 453 othercon->nodeid = nodeid;
478 othercon->rx_action = receive_from_sock; 454 othercon->rx_action = receive_from_sock;
479 init_rwsem(&othercon->sock_sem); 455 mutex_init(&othercon->sock_mutex);
456 INIT_WORK(&othercon->swork, process_send_sockets);
457 INIT_WORK(&othercon->rwork, process_recv_sockets);
480 set_bit(CF_IS_OTHERCON, &othercon->flags); 458 set_bit(CF_IS_OTHERCON, &othercon->flags);
481 newcon->othercon = othercon; 459 newcon->othercon = othercon;
482 } 460 }
483 othercon->sock = newsock; 461 othercon->sock = newsock;
484 newsock->sk->sk_user_data = othercon; 462 newsock->sk->sk_user_data = othercon;
485 add_sock(newsock, othercon); 463 add_sock(newsock, othercon);
464 addcon = othercon;
486 } 465 }
487 else { 466 else {
488 newsock->sk->sk_user_data = newcon; 467 newsock->sk->sk_user_data = newcon;
489 newcon->rx_action = receive_from_sock; 468 newcon->rx_action = receive_from_sock;
490 add_sock(newsock, newcon); 469 add_sock(newsock, newcon);
491 470 addcon = newcon;
492 } 471 }
493 472
494 up_write(&newcon->sock_sem); 473 mutex_unlock(&newcon->sock_mutex);
495 474
496 /* 475 /*
497 * Add it to the active queue in case we got data 476 * Add it to the active queue in case we got data
498 * beween processing the accept adding the socket 477 * beween processing the accept adding the socket
499 * to the read_sockets list 478 * to the read_sockets list
500 */ 479 */
501 lowcomms_data_ready(newsock->sk, 0); 480 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
502 up_read(&con->sock_sem); 481 queue_work(recv_workqueue, &addcon->rwork);
482 mutex_unlock(&con->sock_mutex);
503 483
504 return 0; 484 return 0;
505 485
506accept_err: 486accept_err:
507 up_read(&con->sock_sem); 487 mutex_unlock(&con->sock_mutex);
508 sock_release(newsock); 488 sock_release(newsock);
509 489
510 if (result != -EAGAIN) 490 if (result != -EAGAIN)
@@ -525,7 +505,7 @@ static void connect_to_sock(struct connection *con)
525 return; 505 return;
526 } 506 }
527 507
528 down_write(&con->sock_sem); 508 mutex_lock(&con->sock_mutex);
529 if (con->retries++ > MAX_CONNECT_RETRIES) 509 if (con->retries++ > MAX_CONNECT_RETRIES)
530 goto out; 510 goto out;
531 511
@@ -548,7 +528,7 @@ static void connect_to_sock(struct connection *con)
548 sock->sk->sk_user_data = con; 528 sock->sk->sk_user_data = con;
549 con->rx_action = receive_from_sock; 529 con->rx_action = receive_from_sock;
550 530
551 make_sockaddr(&saddr, dlm_config.tcp_port, &addr_len); 531 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
552 532
553 add_sock(sock, con); 533 add_sock(sock, con);
554 534
@@ -577,7 +557,7 @@ out_err:
577 result = 0; 557 result = 0;
578 } 558 }
579out: 559out:
580 up_write(&con->sock_sem); 560 mutex_unlock(&con->sock_mutex);
581 return; 561 return;
582} 562}
583 563
@@ -616,10 +596,10 @@ static struct socket *create_listen_sock(struct connection *con,
616 con->sock = sock; 596 con->sock = sock;
617 597
618 /* Bind to our port */ 598 /* Bind to our port */
619 make_sockaddr(saddr, dlm_config.tcp_port, &addr_len); 599 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
620 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 600 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
621 if (result < 0) { 601 if (result < 0) {
622 printk("dlm: Can't bind to port %d\n", dlm_config.tcp_port); 602 printk("dlm: Can't bind to port %d\n", dlm_config.ci_tcp_port);
623 sock_release(sock); 603 sock_release(sock);
624 sock = NULL; 604 sock = NULL;
625 con->sock = NULL; 605 con->sock = NULL;
@@ -638,7 +618,7 @@ static struct socket *create_listen_sock(struct connection *con,
638 618
639 result = sock->ops->listen(sock, 5); 619 result = sock->ops->listen(sock, 5);
640 if (result < 0) { 620 if (result < 0) {
641 printk("dlm: Can't listen on port %d\n", dlm_config.tcp_port); 621 printk("dlm: Can't listen on port %d\n", dlm_config.ci_tcp_port);
642 sock_release(sock); 622 sock_release(sock);
643 sock = NULL; 623 sock = NULL;
644 goto create_out; 624 goto create_out;
@@ -709,6 +689,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len,
709 if (!con) 689 if (!con)
710 return NULL; 690 return NULL;
711 691
692 spin_lock(&con->writequeue_lock);
712 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 693 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
713 if ((&e->list == &con->writequeue) || 694 if ((&e->list == &con->writequeue) ||
714 (PAGE_CACHE_SIZE - e->end < len)) { 695 (PAGE_CACHE_SIZE - e->end < len)) {
@@ -747,6 +728,7 @@ void dlm_lowcomms_commit_buffer(void *mh)
747 struct connection *con = e->con; 728 struct connection *con = e->con;
748 int users; 729 int users;
749 730
731 spin_lock(&con->writequeue_lock);
750 users = --e->users; 732 users = --e->users;
751 if (users) 733 if (users)
752 goto out; 734 goto out;
@@ -754,12 +736,8 @@ void dlm_lowcomms_commit_buffer(void *mh)
754 kunmap(e->page); 736 kunmap(e->page);
755 spin_unlock(&con->writequeue_lock); 737 spin_unlock(&con->writequeue_lock);
756 738
757 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags) == 0) { 739 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
758 spin_lock_bh(&write_sockets_lock); 740 queue_work(send_workqueue, &con->swork);
759 list_add_tail(&con->write_list, &write_sockets);
760 spin_unlock_bh(&write_sockets_lock);
761
762 wake_up_interruptible(&lowcomms_send_waitq);
763 } 741 }
764 return; 742 return;
765 743
@@ -783,7 +761,7 @@ static void send_to_sock(struct connection *con)
783 struct writequeue_entry *e; 761 struct writequeue_entry *e;
784 int len, offset; 762 int len, offset;
785 763
786 down_read(&con->sock_sem); 764 mutex_lock(&con->sock_mutex);
787 if (con->sock == NULL) 765 if (con->sock == NULL)
788 goto out_connect; 766 goto out_connect;
789 767
@@ -800,6 +778,7 @@ static void send_to_sock(struct connection *con)
800 offset = e->offset; 778 offset = e->offset;
801 BUG_ON(len == 0 && e->users == 0); 779 BUG_ON(len == 0 && e->users == 0);
802 spin_unlock(&con->writequeue_lock); 780 spin_unlock(&con->writequeue_lock);
781 kmap(e->page);
803 782
804 ret = 0; 783 ret = 0;
805 if (len) { 784 if (len) {
@@ -828,18 +807,18 @@ static void send_to_sock(struct connection *con)
828 } 807 }
829 spin_unlock(&con->writequeue_lock); 808 spin_unlock(&con->writequeue_lock);
830out: 809out:
831 up_read(&con->sock_sem); 810 mutex_unlock(&con->sock_mutex);
832 return; 811 return;
833 812
834send_error: 813send_error:
835 up_read(&con->sock_sem); 814 mutex_unlock(&con->sock_mutex);
836 close_connection(con, false); 815 close_connection(con, false);
837 lowcomms_connect_sock(con); 816 lowcomms_connect_sock(con);
838 return; 817 return;
839 818
840out_connect: 819out_connect:
841 up_read(&con->sock_sem); 820 mutex_unlock(&con->sock_mutex);
842 lowcomms_connect_sock(con); 821 connect_to_sock(con);
843 return; 822 return;
844} 823}
845 824
@@ -872,7 +851,6 @@ int dlm_lowcomms_close(int nodeid)
872 if (con) { 851 if (con) {
873 clean_one_writequeue(con); 852 clean_one_writequeue(con);
874 close_connection(con, true); 853 close_connection(con, true);
875 atomic_set(&con->waiting_requests, 0);
876 } 854 }
877 return 0; 855 return 0;
878 856
@@ -880,102 +858,29 @@ out:
880 return -1; 858 return -1;
881} 859}
882 860
883/* API send message call, may queue the request */
884/* N.B. This is the old interface - use the new one for new calls */
885int lowcomms_send_message(int nodeid, char *buf, int len, gfp_t allocation)
886{
887 struct writequeue_entry *e;
888 char *b;
889
890 e = dlm_lowcomms_get_buffer(nodeid, len, allocation, &b);
891 if (e) {
892 memcpy(b, buf, len);
893 dlm_lowcomms_commit_buffer(e);
894 return 0;
895 }
896 return -ENOBUFS;
897}
898
899/* Look for activity on active sockets */ 861/* Look for activity on active sockets */
900static void process_sockets(void) 862static void process_recv_sockets(struct work_struct *work)
901{ 863{
902 struct list_head *list; 864 struct connection *con = container_of(work, struct connection, rwork);
903 struct list_head *temp; 865 int err;
904 int count = 0;
905
906 spin_lock_bh(&read_sockets_lock);
907 list_for_each_safe(list, temp, &read_sockets) {
908 866
909 struct connection *con = 867 clear_bit(CF_READ_PENDING, &con->flags);
910 list_entry(list, struct connection, read_list); 868 do {
911 list_del(&con->read_list); 869 err = con->rx_action(con);
912 clear_bit(CF_READ_PENDING, &con->flags); 870 } while (!err);
913
914 spin_unlock_bh(&read_sockets_lock);
915
916 /* This can reach zero if we are processing requests
917 * as they come in.
918 */
919 if (atomic_read(&con->waiting_requests) == 0) {
920 spin_lock_bh(&read_sockets_lock);
921 continue;
922 }
923
924 do {
925 con->rx_action(con);
926
927 /* Don't starve out everyone else */
928 if (++count >= MAX_RX_MSG_COUNT) {
929 cond_resched();
930 count = 0;
931 }
932
933 } while (!atomic_dec_and_test(&con->waiting_requests) &&
934 !kthread_should_stop());
935
936 spin_lock_bh(&read_sockets_lock);
937 }
938 spin_unlock_bh(&read_sockets_lock);
939} 871}
940 872
941/* Try to send any messages that are pending
942 */
943static void process_output_queue(void)
944{
945 struct list_head *list;
946 struct list_head *temp;
947
948 spin_lock_bh(&write_sockets_lock);
949 list_for_each_safe(list, temp, &write_sockets) {
950 struct connection *con =
951 list_entry(list, struct connection, write_list);
952 clear_bit(CF_WRITE_PENDING, &con->flags);
953 list_del(&con->write_list);
954
955 spin_unlock_bh(&write_sockets_lock);
956 send_to_sock(con);
957 spin_lock_bh(&write_sockets_lock);
958 }
959 spin_unlock_bh(&write_sockets_lock);
960}
961 873
962static void process_state_queue(void) 874static void process_send_sockets(struct work_struct *work)
963{ 875{
964 struct list_head *list; 876 struct connection *con = container_of(work, struct connection, swork);
965 struct list_head *temp;
966
967 spin_lock_bh(&state_sockets_lock);
968 list_for_each_safe(list, temp, &state_sockets) {
969 struct connection *con =
970 list_entry(list, struct connection, state_list);
971 list_del(&con->state_list);
972 clear_bit(CF_CONNECT_PENDING, &con->flags);
973 spin_unlock_bh(&state_sockets_lock);
974 877
878 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
975 connect_to_sock(con); 879 connect_to_sock(con);
976 spin_lock_bh(&state_sockets_lock);
977 } 880 }
978 spin_unlock_bh(&state_sockets_lock); 881
882 clear_bit(CF_WRITE_PENDING, &con->flags);
883 send_to_sock(con);
979} 884}
980 885
981 886
@@ -992,109 +897,33 @@ static void clean_writequeues(void)
992 } 897 }
993} 898}
994 899
995static int read_list_empty(void) 900static void work_stop(void)
996{ 901{
997 int status; 902 destroy_workqueue(recv_workqueue);
998 903 destroy_workqueue(send_workqueue);
999 spin_lock_bh(&read_sockets_lock);
1000 status = list_empty(&read_sockets);
1001 spin_unlock_bh(&read_sockets_lock);
1002
1003 return status;
1004}
1005
1006/* DLM Transport comms receive daemon */
1007static int dlm_recvd(void *data)
1008{
1009 init_waitqueue_entry(&lowcomms_recv_waitq_head, current);
1010 add_wait_queue(&lowcomms_recv_waitq, &lowcomms_recv_waitq_head);
1011
1012 while (!kthread_should_stop()) {
1013 set_current_state(TASK_INTERRUPTIBLE);
1014 if (read_list_empty())
1015 cond_resched();
1016 set_current_state(TASK_RUNNING);
1017
1018 process_sockets();
1019 }
1020
1021 return 0;
1022} 904}
1023 905
1024static int write_and_state_lists_empty(void) 906static int work_start(void)
1025{ 907{
1026 int status;
1027
1028 spin_lock_bh(&write_sockets_lock);
1029 status = list_empty(&write_sockets);
1030 spin_unlock_bh(&write_sockets_lock);
1031
1032 spin_lock_bh(&state_sockets_lock);
1033 if (list_empty(&state_sockets) == 0)
1034 status = 0;
1035 spin_unlock_bh(&state_sockets_lock);
1036
1037 return status;
1038}
1039
1040/* DLM Transport send daemon */
1041static int dlm_sendd(void *data)
1042{
1043 init_waitqueue_entry(&lowcomms_send_waitq_head, current);
1044 add_wait_queue(&lowcomms_send_waitq, &lowcomms_send_waitq_head);
1045
1046 while (!kthread_should_stop()) {
1047 set_current_state(TASK_INTERRUPTIBLE);
1048 if (write_and_state_lists_empty())
1049 cond_resched();
1050 set_current_state(TASK_RUNNING);
1051
1052 process_state_queue();
1053 process_output_queue();
1054 }
1055
1056 return 0;
1057}
1058
1059static void daemons_stop(void)
1060{
1061 kthread_stop(recv_task);
1062 kthread_stop(send_task);
1063}
1064
1065static int daemons_start(void)
1066{
1067 struct task_struct *p;
1068 int error; 908 int error;
1069 909 recv_workqueue = create_workqueue("dlm_recv");
1070 p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); 910 error = IS_ERR(recv_workqueue);
1071 error = IS_ERR(p);
1072 if (error) { 911 if (error) {
1073 log_print("can't start dlm_recvd %d", error); 912 log_print("can't start dlm_recv %d", error);
1074 return error; 913 return error;
1075 } 914 }
1076 recv_task = p;
1077 915
1078 p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); 916 send_workqueue = create_singlethread_workqueue("dlm_send");
1079 error = IS_ERR(p); 917 error = IS_ERR(send_workqueue);
1080 if (error) { 918 if (error) {
1081 log_print("can't start dlm_sendd %d", error); 919 log_print("can't start dlm_send %d", error);
1082 kthread_stop(recv_task); 920 destroy_workqueue(recv_workqueue);
1083 return error; 921 return error;
1084 } 922 }
1085 send_task = p;
1086 923
1087 return 0; 924 return 0;
1088} 925}
1089 926
1090/*
1091 * Return the largest buffer size we can cope with.
1092 */
1093int lowcomms_max_buffer_size(void)
1094{
1095 return PAGE_CACHE_SIZE;
1096}
1097
1098void dlm_lowcomms_stop(void) 927void dlm_lowcomms_stop(void)
1099{ 928{
1100 int i; 929 int i;
@@ -1107,7 +936,7 @@ void dlm_lowcomms_stop(void)
1107 connections[i]->flags |= 0xFF; 936 connections[i]->flags |= 0xFF;
1108 } 937 }
1109 938
1110 daemons_stop(); 939 work_stop();
1111 clean_writequeues(); 940 clean_writequeues();
1112 941
1113 for (i = 0; i < conn_array_size; i++) { 942 for (i = 0; i < conn_array_size; i++) {
@@ -1159,7 +988,7 @@ int dlm_lowcomms_start(void)
1159 if (error) 988 if (error)
1160 goto fail_unlisten; 989 goto fail_unlisten;
1161 990
1162 error = daemons_start(); 991 error = work_start();
1163 if (error) 992 if (error)
1164 goto fail_unlisten; 993 goto fail_unlisten;
1165 994
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index c9b1c3d535f4..a5126e0c68a6 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -82,7 +82,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
82 if (msglen < sizeof(struct dlm_header)) 82 if (msglen < sizeof(struct dlm_header))
83 break; 83 break;
84 err = -E2BIG; 84 err = -E2BIG;
85 if (msglen > dlm_config.buffer_size) { 85 if (msglen > dlm_config.ci_buffer_size) {
86 log_print("message size %d from %d too big, buf len %d", 86 log_print("message size %d from %d too big, buf len %d",
87 msglen, nodeid, len); 87 msglen, nodeid, len);
88 break; 88 break;
@@ -103,7 +103,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
103 103
104 if (msglen > sizeof(__tmp) && 104 if (msglen > sizeof(__tmp) &&
105 msg == (struct dlm_header *) __tmp) { 105 msg == (struct dlm_header *) __tmp) {
106 msg = kmalloc(dlm_config.buffer_size, GFP_KERNEL); 106 msg = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
107 if (msg == NULL) 107 if (msg == NULL)
108 return ret; 108 return ret;
109 } 109 }
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 4cc31be9cd9d..6bfbd6153809 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -56,6 +56,10 @@ static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
56 56
57 rc->rc_type = type; 57 rc->rc_type = type;
58 58
59 spin_lock(&ls->ls_recover_lock);
60 rc->rc_seq = ls->ls_recover_seq;
61 spin_unlock(&ls->ls_recover_lock);
62
59 *mh_ret = mh; 63 *mh_ret = mh;
60 *rc_ret = rc; 64 *rc_ret = rc;
61 return 0; 65 return 0;
@@ -78,8 +82,17 @@ static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
78 rf->rf_lsflags = ls->ls_exflags; 82 rf->rf_lsflags = ls->ls_exflags;
79} 83}
80 84
81static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid) 85static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
82{ 86{
87 struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
88
89 if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
90 log_error(ls, "version mismatch: %x nodeid %d: %x",
91 DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid,
92 rc->rc_header.h_version);
93 return -EINVAL;
94 }
95
83 if (rf->rf_lvblen != ls->ls_lvblen || 96 if (rf->rf_lvblen != ls->ls_lvblen ||
84 rf->rf_lsflags != ls->ls_exflags) { 97 rf->rf_lsflags != ls->ls_exflags) {
85 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", 98 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
@@ -125,7 +138,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
125 goto out; 138 goto out;
126 139
127 allow_sync_reply(ls, &rc->rc_id); 140 allow_sync_reply(ls, &rc->rc_id);
128 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); 141 memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size);
129 142
130 send_rcom(ls, mh, rc); 143 send_rcom(ls, mh, rc);
131 144
@@ -141,8 +154,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
141 log_debug(ls, "remote node %d not ready", nodeid); 154 log_debug(ls, "remote node %d not ready", nodeid);
142 rc->rc_result = 0; 155 rc->rc_result = 0;
143 } else 156 } else
144 error = check_config(ls, (struct rcom_config *) rc->rc_buf, 157 error = check_config(ls, rc, nodeid);
145 nodeid);
146 /* the caller looks at rc_result for the remote recovery status */ 158 /* the caller looks at rc_result for the remote recovery status */
147 out: 159 out:
148 return error; 160 return error;
@@ -159,6 +171,7 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
159 if (error) 171 if (error)
160 return; 172 return;
161 rc->rc_id = rc_in->rc_id; 173 rc->rc_id = rc_in->rc_id;
174 rc->rc_seq_reply = rc_in->rc_seq;
162 rc->rc_result = dlm_recover_status(ls); 175 rc->rc_result = dlm_recover_status(ls);
163 make_config(ls, (struct rcom_config *) rc->rc_buf); 176 make_config(ls, (struct rcom_config *) rc->rc_buf);
164 177
@@ -200,7 +213,7 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
200 if (nodeid == dlm_our_nodeid()) { 213 if (nodeid == dlm_our_nodeid()) {
201 dlm_copy_master_names(ls, last_name, last_len, 214 dlm_copy_master_names(ls, last_name, last_len,
202 ls->ls_recover_buf + len, 215 ls->ls_recover_buf + len,
203 dlm_config.buffer_size - len, nodeid); 216 dlm_config.ci_buffer_size - len, nodeid);
204 goto out; 217 goto out;
205 } 218 }
206 219
@@ -210,7 +223,7 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
210 memcpy(rc->rc_buf, last_name, last_len); 223 memcpy(rc->rc_buf, last_name, last_len);
211 224
212 allow_sync_reply(ls, &rc->rc_id); 225 allow_sync_reply(ls, &rc->rc_id);
213 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); 226 memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size);
214 227
215 send_rcom(ls, mh, rc); 228 send_rcom(ls, mh, rc);
216 229
@@ -224,30 +237,17 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
224{ 237{
225 struct dlm_rcom *rc; 238 struct dlm_rcom *rc;
226 struct dlm_mhandle *mh; 239 struct dlm_mhandle *mh;
227 int error, inlen, outlen; 240 int error, inlen, outlen, nodeid;
228 int nodeid = rc_in->rc_header.h_nodeid;
229 uint32_t status = dlm_recover_status(ls);
230
231 /*
232 * We can't run dlm_dir_rebuild_send (which uses ls_nodes) while
233 * dlm_recoverd is running ls_nodes_reconfig (which changes ls_nodes).
234 * It could only happen in rare cases where we get a late NAMES
235 * message from a previous instance of recovery.
236 */
237
238 if (!(status & DLM_RS_NODES)) {
239 log_debug(ls, "ignoring RCOM_NAMES from %u", nodeid);
240 return;
241 }
242 241
243 nodeid = rc_in->rc_header.h_nodeid; 242 nodeid = rc_in->rc_header.h_nodeid;
244 inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom); 243 inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
245 outlen = dlm_config.buffer_size - sizeof(struct dlm_rcom); 244 outlen = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom);
246 245
247 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh); 246 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh);
248 if (error) 247 if (error)
249 return; 248 return;
250 rc->rc_id = rc_in->rc_id; 249 rc->rc_id = rc_in->rc_id;
250 rc->rc_seq_reply = rc_in->rc_seq;
251 251
252 dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen, 252 dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen,
253 nodeid); 253 nodeid);
@@ -294,6 +294,7 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
294 ret_nodeid = error; 294 ret_nodeid = error;
295 rc->rc_result = ret_nodeid; 295 rc->rc_result = ret_nodeid;
296 rc->rc_id = rc_in->rc_id; 296 rc->rc_id = rc_in->rc_id;
297 rc->rc_seq_reply = rc_in->rc_seq;
297 298
298 send_rcom(ls, mh, rc); 299 send_rcom(ls, mh, rc);
299} 300}
@@ -375,20 +376,13 @@ static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
375 376
376 memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock)); 377 memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock));
377 rc->rc_id = rc_in->rc_id; 378 rc->rc_id = rc_in->rc_id;
379 rc->rc_seq_reply = rc_in->rc_seq;
378 380
379 send_rcom(ls, mh, rc); 381 send_rcom(ls, mh, rc);
380} 382}
381 383
382static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) 384static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
383{ 385{
384 uint32_t status = dlm_recover_status(ls);
385
386 if (!(status & DLM_RS_DIR)) {
387 log_debug(ls, "ignoring RCOM_LOCK_REPLY from %u",
388 rc_in->rc_header.h_nodeid);
389 return;
390 }
391
392 dlm_recover_process_copy(ls, rc_in); 386 dlm_recover_process_copy(ls, rc_in);
393} 387}
394 388
@@ -415,6 +409,7 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
415 409
416 rc->rc_type = DLM_RCOM_STATUS_REPLY; 410 rc->rc_type = DLM_RCOM_STATUS_REPLY;
417 rc->rc_id = rc_in->rc_id; 411 rc->rc_id = rc_in->rc_id;
412 rc->rc_seq_reply = rc_in->rc_seq;
418 rc->rc_result = -ESRCH; 413 rc->rc_result = -ESRCH;
419 414
420 rf = (struct rcom_config *) rc->rc_buf; 415 rf = (struct rcom_config *) rc->rc_buf;
@@ -426,6 +421,31 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
426 return 0; 421 return 0;
427} 422}
428 423
424static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
425{
426 uint64_t seq;
427 int rv = 0;
428
429 switch (rc->rc_type) {
430 case DLM_RCOM_STATUS_REPLY:
431 case DLM_RCOM_NAMES_REPLY:
432 case DLM_RCOM_LOOKUP_REPLY:
433 case DLM_RCOM_LOCK_REPLY:
434 spin_lock(&ls->ls_recover_lock);
435 seq = ls->ls_recover_seq;
436 spin_unlock(&ls->ls_recover_lock);
437 if (rc->rc_seq_reply != seq) {
438 log_debug(ls, "ignoring old reply %x from %d "
439 "seq_reply %llx expect %llx",
440 rc->rc_type, rc->rc_header.h_nodeid,
441 (unsigned long long)rc->rc_seq_reply,
442 (unsigned long long)seq);
443 rv = 1;
444 }
445 }
446 return rv;
447}
448
429/* Called by dlm_recvd; corresponds to dlm_receive_message() but special 449/* Called by dlm_recvd; corresponds to dlm_receive_message() but special
430 recovery-only comms are sent through here. */ 450 recovery-only comms are sent through here. */
431 451
@@ -449,11 +469,14 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
449 } 469 }
450 470
451 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { 471 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
452 log_error(ls, "ignoring recovery message %x from %d", 472 log_debug(ls, "ignoring recovery message %x from %d",
453 rc->rc_type, nodeid); 473 rc->rc_type, nodeid);
454 goto out; 474 goto out;
455 } 475 }
456 476
477 if (is_old_reply(ls, rc))
478 goto out;
479
457 if (nodeid != rc->rc_header.h_nodeid) { 480 if (nodeid != rc->rc_header.h_nodeid) {
458 log_error(ls, "bad rcom nodeid %d from %d", 481 log_error(ls, "bad rcom nodeid %d from %d",
459 rc->rc_header.h_nodeid, nodeid); 482 rc->rc_header.h_nodeid, nodeid);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index cf9f6831bab5..c2cc7694cd16 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -44,7 +44,7 @@
44static void dlm_wait_timer_fn(unsigned long data) 44static void dlm_wait_timer_fn(unsigned long data)
45{ 45{
46 struct dlm_ls *ls = (struct dlm_ls *) data; 46 struct dlm_ls *ls = (struct dlm_ls *) data;
47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.recover_timer * HZ)); 47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ));
48 wake_up(&ls->ls_wait_general); 48 wake_up(&ls->ls_wait_general);
49} 49}
50 50
@@ -55,7 +55,7 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
55 init_timer(&ls->ls_timer); 55 init_timer(&ls->ls_timer);
56 ls->ls_timer.function = dlm_wait_timer_fn; 56 ls->ls_timer.function = dlm_wait_timer_fn;
57 ls->ls_timer.data = (long) ls; 57 ls->ls_timer.data = (long) ls;
58 ls->ls_timer.expires = jiffies + (dlm_config.recover_timer * HZ); 58 ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ);
59 add_timer(&ls->ls_timer); 59 add_timer(&ls->ls_timer);
60 60
61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls)); 61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
@@ -397,7 +397,9 @@ int dlm_recover_masters(struct dlm_ls *ls)
397 397
398 if (dlm_no_directory(ls)) 398 if (dlm_no_directory(ls))
399 count += recover_master_static(r); 399 count += recover_master_static(r);
400 else if (!is_master(r) && dlm_is_removed(ls, r->res_nodeid)) { 400 else if (!is_master(r) &&
401 (dlm_is_removed(ls, r->res_nodeid) ||
402 rsb_flag(r, RSB_NEW_MASTER))) {
401 recover_master(r); 403 recover_master(r);
402 count++; 404 count++;
403 } 405 }
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 650536aa5139..3cb636d60249 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -77,7 +77,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
77 77
78 error = dlm_recover_members(ls, rv, &neg); 78 error = dlm_recover_members(ls, rv, &neg);
79 if (error) { 79 if (error) {
80 log_error(ls, "recover_members failed %d", error); 80 log_debug(ls, "recover_members failed %d", error);
81 goto fail; 81 goto fail;
82 } 82 }
83 start = jiffies; 83 start = jiffies;
@@ -89,7 +89,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
89 89
90 error = dlm_recover_directory(ls); 90 error = dlm_recover_directory(ls);
91 if (error) { 91 if (error) {
92 log_error(ls, "recover_directory failed %d", error); 92 log_debug(ls, "recover_directory failed %d", error);
93 goto fail; 93 goto fail;
94 } 94 }
95 95
@@ -99,7 +99,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
99 99
100 error = dlm_recover_directory_wait(ls); 100 error = dlm_recover_directory_wait(ls);
101 if (error) { 101 if (error) {
102 log_error(ls, "recover_directory_wait failed %d", error); 102 log_debug(ls, "recover_directory_wait failed %d", error);
103 goto fail; 103 goto fail;
104 } 104 }
105 105
@@ -129,7 +129,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
129 129
130 error = dlm_recover_masters(ls); 130 error = dlm_recover_masters(ls);
131 if (error) { 131 if (error) {
132 log_error(ls, "recover_masters failed %d", error); 132 log_debug(ls, "recover_masters failed %d", error);
133 goto fail; 133 goto fail;
134 } 134 }
135 135
@@ -139,13 +139,13 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
139 139
140 error = dlm_recover_locks(ls); 140 error = dlm_recover_locks(ls);
141 if (error) { 141 if (error) {
142 log_error(ls, "recover_locks failed %d", error); 142 log_debug(ls, "recover_locks failed %d", error);
143 goto fail; 143 goto fail;
144 } 144 }
145 145
146 error = dlm_recover_locks_wait(ls); 146 error = dlm_recover_locks_wait(ls);
147 if (error) { 147 if (error) {
148 log_error(ls, "recover_locks_wait failed %d", error); 148 log_debug(ls, "recover_locks_wait failed %d", error);
149 goto fail; 149 goto fail;
150 } 150 }
151 151
@@ -166,7 +166,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
166 166
167 error = dlm_recover_locks_wait(ls); 167 error = dlm_recover_locks_wait(ls);
168 if (error) { 168 if (error) {
169 log_error(ls, "recover_locks_wait failed %d", error); 169 log_debug(ls, "recover_locks_wait failed %d", error);
170 goto fail; 170 goto fail;
171 } 171 }
172 } 172 }
@@ -184,7 +184,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
184 dlm_set_recover_status(ls, DLM_RS_DONE); 184 dlm_set_recover_status(ls, DLM_RS_DONE);
185 error = dlm_recover_done_wait(ls); 185 error = dlm_recover_done_wait(ls);
186 if (error) { 186 if (error) {
187 log_error(ls, "recover_done_wait failed %d", error); 187 log_debug(ls, "recover_done_wait failed %d", error);
188 goto fail; 188 goto fail;
189 } 189 }
190 190
@@ -192,19 +192,19 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
192 192
193 error = enable_locking(ls, rv->seq); 193 error = enable_locking(ls, rv->seq);
194 if (error) { 194 if (error) {
195 log_error(ls, "enable_locking failed %d", error); 195 log_debug(ls, "enable_locking failed %d", error);
196 goto fail; 196 goto fail;
197 } 197 }
198 198
199 error = dlm_process_requestqueue(ls); 199 error = dlm_process_requestqueue(ls);
200 if (error) { 200 if (error) {
201 log_error(ls, "process_requestqueue failed %d", error); 201 log_debug(ls, "process_requestqueue failed %d", error);
202 goto fail; 202 goto fail;
203 } 203 }
204 204
205 error = dlm_recover_waiters_post(ls); 205 error = dlm_recover_waiters_post(ls);
206 if (error) { 206 if (error) {
207 log_error(ls, "recover_waiters_post failed %d", error); 207 log_debug(ls, "recover_waiters_post failed %d", error);
208 goto fail; 208 goto fail;
209 } 209 }
210 210
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index c37e93e4f2df..d378b7fe2a1e 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -180,6 +180,14 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
180 ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue)) 180 ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue))
181 remove_ownqueue = 1; 181 remove_ownqueue = 1;
182 182
183 /* unlocks or cancels of waiting requests need to be removed from the
184 proc's unlocking list, again there must be a better way... */
185
186 if (ua->lksb.sb_status == -DLM_EUNLOCK ||
187 (ua->lksb.sb_status == -DLM_ECANCEL &&
188 lkb->lkb_grmode == DLM_LOCK_IV))
189 remove_ownqueue = 1;
190
183 /* We want to copy the lvb to userspace when the completion 191 /* We want to copy the lvb to userspace when the completion
184 ast is read if the status is 0, the lock has an lvb and 192 ast is read if the status is 0, the lock has an lvb and
185 lvb_ops says we should. We could probably have set_lvb_lock() 193 lvb_ops says we should. We could probably have set_lvb_lock()
@@ -523,6 +531,7 @@ static int device_open(struct inode *inode, struct file *file)
523 proc->lockspace = ls->ls_local_handle; 531 proc->lockspace = ls->ls_local_handle;
524 INIT_LIST_HEAD(&proc->asts); 532 INIT_LIST_HEAD(&proc->asts);
525 INIT_LIST_HEAD(&proc->locks); 533 INIT_LIST_HEAD(&proc->locks);
534 INIT_LIST_HEAD(&proc->unlocking);
526 spin_lock_init(&proc->asts_spin); 535 spin_lock_init(&proc->asts_spin);
527 spin_lock_init(&proc->locks_spin); 536 spin_lock_init(&proc->locks_spin);
528 init_waitqueue_head(&proc->wait); 537 init_waitqueue_head(&proc->wait);
diff --git a/fs/dlm/util.c b/fs/dlm/util.c
index 767197db9944..963889cf6740 100644
--- a/fs/dlm/util.c
+++ b/fs/dlm/util.c
@@ -134,6 +134,8 @@ void dlm_rcom_out(struct dlm_rcom *rc)
134 rc->rc_type = cpu_to_le32(rc->rc_type); 134 rc->rc_type = cpu_to_le32(rc->rc_type);
135 rc->rc_result = cpu_to_le32(rc->rc_result); 135 rc->rc_result = cpu_to_le32(rc->rc_result);
136 rc->rc_id = cpu_to_le64(rc->rc_id); 136 rc->rc_id = cpu_to_le64(rc->rc_id);
137 rc->rc_seq = cpu_to_le64(rc->rc_seq);
138 rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply);
137 139
138 if (type == DLM_RCOM_LOCK) 140 if (type == DLM_RCOM_LOCK)
139 rcom_lock_out((struct rcom_lock *) rc->rc_buf); 141 rcom_lock_out((struct rcom_lock *) rc->rc_buf);
@@ -151,6 +153,8 @@ void dlm_rcom_in(struct dlm_rcom *rc)
151 rc->rc_type = le32_to_cpu(rc->rc_type); 153 rc->rc_type = le32_to_cpu(rc->rc_type);
152 rc->rc_result = le32_to_cpu(rc->rc_result); 154 rc->rc_result = le32_to_cpu(rc->rc_result);
153 rc->rc_id = le64_to_cpu(rc->rc_id); 155 rc->rc_id = le64_to_cpu(rc->rc_id);
156 rc->rc_seq = le64_to_cpu(rc->rc_seq);
157 rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
154 158
155 if (rc->rc_type == DLM_RCOM_LOCK) 159 if (rc->rc_type == DLM_RCOM_LOCK)
156 rcom_lock_in((struct rcom_lock *) rc->rc_buf); 160 rcom_lock_in((struct rcom_lock *) rc->rc_buf);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 6a2ffa2db14f..de8e64c03f73 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -4,44 +4,43 @@ config GFS2_FS
4 select FS_POSIX_ACL 4 select FS_POSIX_ACL
5 select CRC32 5 select CRC32
6 help 6 help
7 A cluster filesystem. 7 A cluster filesystem.
8 8
9 Allows a cluster of computers to simultaneously use a block device 9 Allows a cluster of computers to simultaneously use a block device
10 that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads 10 that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads
11 and writes to the block device like a local filesystem, but also uses 11 and writes to the block device like a local filesystem, but also uses
12 a lock module to allow the computers coordinate their I/O so 12 a lock module to allow the computers coordinate their I/O so
13 filesystem consistency is maintained. One of the nifty features of 13 filesystem consistency is maintained. One of the nifty features of
14 GFS is perfect consistency -- changes made to the filesystem on one 14 GFS is perfect consistency -- changes made to the filesystem on one
15 machine show up immediately on all other machines in the cluster. 15 machine show up immediately on all other machines in the cluster.
16 16
17 To use the GFS2 filesystem, you will need to enable one or more of 17 To use the GFS2 filesystem, you will need to enable one or more of
18 the below locking modules. Documentation and utilities for GFS2 can 18 the below locking modules. Documentation and utilities for GFS2 can
19 be found here: http://sources.redhat.com/cluster 19 be found here: http://sources.redhat.com/cluster
20 20
21config GFS2_FS_LOCKING_NOLOCK 21config GFS2_FS_LOCKING_NOLOCK
22 tristate "GFS2 \"nolock\" locking module" 22 tristate "GFS2 \"nolock\" locking module"
23 depends on GFS2_FS 23 depends on GFS2_FS
24 help 24 help
25 Single node locking module for GFS2. 25 Single node locking module for GFS2.
26 26
27 Use this module if you want to use GFS2 on a single node without 27 Use this module if you want to use GFS2 on a single node without
28 its clustering features. You can still take advantage of the 28 its clustering features. You can still take advantage of the
29 large file support, and upgrade to running a full cluster later on 29 large file support, and upgrade to running a full cluster later on
30 if required. 30 if required.
31 31
32 If you will only be using GFS2 in cluster mode, you do not need this 32 If you will only be using GFS2 in cluster mode, you do not need this
33 module. 33 module.
34 34
35config GFS2_FS_LOCKING_DLM 35config GFS2_FS_LOCKING_DLM
36 tristate "GFS2 DLM locking module" 36 tristate "GFS2 DLM locking module"
37 depends on GFS2_FS && NET && INET && (IPV6 || IPV6=n) 37 depends on GFS2_FS && SYSFS && NET && INET && (IPV6 || IPV6=n)
38 select IP_SCTP if DLM_SCTP 38 select IP_SCTP if DLM_SCTP
39 select CONFIGFS_FS 39 select CONFIGFS_FS
40 select DLM 40 select DLM
41 help 41 help
42 Multiple node locking module for GFS2 42 Multiple node locking module for GFS2
43
44 Most users of GFS2 will require this module. It provides the locking
45 interface between GFS2 and the DLM, which is required to use GFS2
46 in a cluster environment.
47 43
44 Most users of GFS2 will require this module. It provides the locking
45 interface between GFS2 and the DLM, which is required to use GFS2
46 in a cluster environment.
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 8240c1ff94f4..113f6c9110c7 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -773,7 +773,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
773 gfs2_free_data(ip, bstart, blen); 773 gfs2_free_data(ip, bstart, blen);
774 } 774 }
775 775
776 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 776 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
777 777
778 gfs2_dinode_out(ip, dibh->b_data); 778 gfs2_dinode_out(ip, dibh->b_data);
779 779
@@ -848,7 +848,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
848 } 848 }
849 849
850 ip->i_di.di_size = size; 850 ip->i_di.di_size = size;
851 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 851 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
852 852
853 error = gfs2_meta_inode_buffer(ip, &dibh); 853 error = gfs2_meta_inode_buffer(ip, &dibh);
854 if (error) 854 if (error)
@@ -963,7 +963,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
963 963
964 if (gfs2_is_stuffed(ip)) { 964 if (gfs2_is_stuffed(ip)) {
965 ip->i_di.di_size = size; 965 ip->i_di.di_size = size;
966 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 966 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
967 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 967 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
968 gfs2_dinode_out(ip, dibh->b_data); 968 gfs2_dinode_out(ip, dibh->b_data);
969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); 969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
@@ -975,7 +975,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
975 975
976 if (!error) { 976 if (!error) {
977 ip->i_di.di_size = size; 977 ip->i_di.di_size = size;
978 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 978 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; 979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
980 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 980 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
981 gfs2_dinode_out(ip, dibh->b_data); 981 gfs2_dinode_out(ip, dibh->b_data);
@@ -1048,7 +1048,7 @@ static int trunc_end(struct gfs2_inode *ip)
1048 ip->i_num.no_addr; 1048 ip->i_num.no_addr;
1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1050 } 1050 }
1051 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 1051 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; 1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
1053 1053
1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 0fdcb7713cd9..c93ca8f361b5 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -131,7 +131,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
132 if (ip->i_di.di_size < offset + size) 132 if (ip->i_di.di_size < offset + size)
133 ip->i_di.di_size = offset + size; 133 ip->i_di.di_size = offset + size;
134 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 134 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
135 gfs2_dinode_out(ip, dibh->b_data); 135 gfs2_dinode_out(ip, dibh->b_data);
136 136
137 brelse(dibh); 137 brelse(dibh);
@@ -229,7 +229,7 @@ out:
229 229
230 if (ip->i_di.di_size < offset + copied) 230 if (ip->i_di.di_size < offset + copied)
231 ip->i_di.di_size = offset + copied; 231 ip->i_di.di_size = offset + copied;
232 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 232 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
233 233
234 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 234 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
235 gfs2_dinode_out(ip, dibh->b_data); 235 gfs2_dinode_out(ip, dibh->b_data);
@@ -1198,12 +1198,11 @@ static int compare_dents(const void *a, const void *b)
1198 */ 1198 */
1199 1199
1200static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, 1200static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1201 void *opaque, gfs2_filldir_t filldir, 1201 void *opaque, filldir_t filldir,
1202 const struct gfs2_dirent **darr, u32 entries, 1202 const struct gfs2_dirent **darr, u32 entries,
1203 int *copied) 1203 int *copied)
1204{ 1204{
1205 const struct gfs2_dirent *dent, *dent_next; 1205 const struct gfs2_dirent *dent, *dent_next;
1206 struct gfs2_inum_host inum;
1207 u64 off, off_next; 1206 u64 off, off_next;
1208 unsigned int x, y; 1207 unsigned int x, y;
1209 int run = 0; 1208 int run = 0;
@@ -1240,11 +1239,9 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1240 *offset = off; 1239 *offset = off;
1241 } 1240 }
1242 1241
1243 gfs2_inum_in(&inum, (char *)&dent->de_inum);
1244
1245 error = filldir(opaque, (const char *)(dent + 1), 1242 error = filldir(opaque, (const char *)(dent + 1),
1246 be16_to_cpu(dent->de_name_len), 1243 be16_to_cpu(dent->de_name_len),
1247 off, &inum, 1244 off, be64_to_cpu(dent->de_inum.no_addr),
1248 be16_to_cpu(dent->de_type)); 1245 be16_to_cpu(dent->de_type));
1249 if (error) 1246 if (error)
1250 return 1; 1247 return 1;
@@ -1262,8 +1259,8 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1262} 1259}
1263 1260
1264static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, 1261static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1265 gfs2_filldir_t filldir, int *copied, 1262 filldir_t filldir, int *copied, unsigned *depth,
1266 unsigned *depth, u64 leaf_no) 1263 u64 leaf_no)
1267{ 1264{
1268 struct gfs2_inode *ip = GFS2_I(inode); 1265 struct gfs2_inode *ip = GFS2_I(inode);
1269 struct buffer_head *bh; 1266 struct buffer_head *bh;
@@ -1343,7 +1340,7 @@ out:
1343 */ 1340 */
1344 1341
1345static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, 1342static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1346 gfs2_filldir_t filldir) 1343 filldir_t filldir)
1347{ 1344{
1348 struct gfs2_inode *dip = GFS2_I(inode); 1345 struct gfs2_inode *dip = GFS2_I(inode);
1349 struct gfs2_sbd *sdp = GFS2_SB(inode); 1346 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1402,7 +1399,7 @@ out:
1402} 1399}
1403 1400
1404int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, 1401int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
1405 gfs2_filldir_t filldir) 1402 filldir_t filldir)
1406{ 1403{
1407 struct gfs2_inode *dip = GFS2_I(inode); 1404 struct gfs2_inode *dip = GFS2_I(inode);
1408 struct dirent_gather g; 1405 struct dirent_gather g;
@@ -1568,7 +1565,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1568 break; 1565 break;
1569 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1566 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1570 ip->i_di.di_entries++; 1567 ip->i_di.di_entries++;
1571 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 1568 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1572 gfs2_dinode_out(ip, bh->b_data); 1569 gfs2_dinode_out(ip, bh->b_data);
1573 brelse(bh); 1570 brelse(bh);
1574 error = 0; 1571 error = 0;
@@ -1654,7 +1651,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1654 gfs2_consist_inode(dip); 1651 gfs2_consist_inode(dip);
1655 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1652 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1656 dip->i_di.di_entries--; 1653 dip->i_di.di_entries--;
1657 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); 1654 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME_SEC;
1658 gfs2_dinode_out(dip, bh->b_data); 1655 gfs2_dinode_out(dip, bh->b_data);
1659 brelse(bh); 1656 brelse(bh);
1660 mark_inode_dirty(&dip->i_inode); 1657 mark_inode_dirty(&dip->i_inode);
@@ -1702,7 +1699,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1702 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1699 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1703 } 1700 }
1704 1701
1705 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); 1702 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME_SEC;
1706 gfs2_dinode_out(dip, bh->b_data); 1703 gfs2_dinode_out(dip, bh->b_data);
1707 brelse(bh); 1704 brelse(bh);
1708 return 0; 1705 return 0;
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index b21b33668a5b..48fe89046bba 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -16,30 +16,13 @@ struct inode;
16struct gfs2_inode; 16struct gfs2_inode;
17struct gfs2_inum; 17struct gfs2_inum;
18 18
19/**
20 * gfs2_filldir_t - Report a directory entry to the caller of gfs2_dir_read()
21 * @opaque: opaque data used by the function
22 * @name: the name of the directory entry
23 * @length: the length of the name
24 * @offset: the entry's offset in the directory
25 * @inum: the inode number the entry points to
26 * @type: the type of inode the entry points to
27 *
28 * Returns: 0 on success, 1 if buffer full
29 */
30
31typedef int (*gfs2_filldir_t) (void *opaque,
32 const char *name, unsigned int length,
33 u64 offset,
34 struct gfs2_inum_host *inum, unsigned int type);
35
36int gfs2_dir_search(struct inode *dir, const struct qstr *filename, 19int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
37 struct gfs2_inum_host *inum, unsigned int *type); 20 struct gfs2_inum_host *inum, unsigned int *type);
38int gfs2_dir_add(struct inode *inode, const struct qstr *filename, 21int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
39 const struct gfs2_inum_host *inum, unsigned int type); 22 const struct gfs2_inum_host *inum, unsigned int type);
40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); 23int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque, 24int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
42 gfs2_filldir_t filldir); 25 filldir_t filldir);
43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 26int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
44 struct gfs2_inum_host *new_inum, unsigned int new_type); 27 struct gfs2_inum_host *new_inum, unsigned int new_type);
45 28
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index ebebbdcd7057..0c83c7f4dda8 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -301,7 +301,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
301 301
302 error = gfs2_meta_inode_buffer(ip, &dibh); 302 error = gfs2_meta_inode_buffer(ip, &dibh);
303 if (!error) { 303 if (!error) {
304 ip->i_inode.i_ctime.tv_sec = get_seconds(); 304 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
305 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 305 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
306 gfs2_dinode_out(ip, dibh->b_data); 306 gfs2_dinode_out(ip, dibh->b_data);
307 brelse(dibh); 307 brelse(dibh);
@@ -718,7 +718,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
718 (er->er_mode & S_IFMT)); 718 (er->er_mode & S_IFMT));
719 ip->i_inode.i_mode = er->er_mode; 719 ip->i_inode.i_mode = er->er_mode;
720 } 720 }
721 ip->i_inode.i_ctime.tv_sec = get_seconds(); 721 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
722 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 722 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
723 gfs2_dinode_out(ip, dibh->b_data); 723 gfs2_dinode_out(ip, dibh->b_data);
724 brelse(dibh); 724 brelse(dibh);
@@ -853,7 +853,7 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT)); 853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
854 ip->i_inode.i_mode = er->er_mode; 854 ip->i_inode.i_mode = er->er_mode;
855 } 855 }
856 ip->i_inode.i_ctime.tv_sec = get_seconds(); 856 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
857 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 857 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
858 gfs2_dinode_out(ip, dibh->b_data); 858 gfs2_dinode_out(ip, dibh->b_data);
859 brelse(dibh); 859 brelse(dibh);
@@ -1134,7 +1134,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1134 1134
1135 error = gfs2_meta_inode_buffer(ip, &dibh); 1135 error = gfs2_meta_inode_buffer(ip, &dibh);
1136 if (!error) { 1136 if (!error) {
1137 ip->i_inode.i_ctime.tv_sec = get_seconds(); 1137 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1139 gfs2_dinode_out(ip, dibh->b_data); 1139 gfs2_dinode_out(ip, dibh->b_data);
1140 brelse(dibh); 1140 brelse(dibh);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 438146904b58..6618c1190252 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,6 +19,8 @@
19#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/lm_interface.h> 21#include <linux/lm_interface.h>
22#include <linux/wait.h>
23#include <linux/rwsem.h>
22#include <asm/uaccess.h> 24#include <asm/uaccess.h>
23 25
24#include "gfs2.h" 26#include "gfs2.h"
@@ -33,11 +35,6 @@
33#include "super.h" 35#include "super.h"
34#include "util.h" 36#include "util.h"
35 37
36struct greedy {
37 struct gfs2_holder gr_gh;
38 struct delayed_work gr_work;
39};
40
41struct gfs2_gl_hash_bucket { 38struct gfs2_gl_hash_bucket {
42 struct hlist_head hb_list; 39 struct hlist_head hb_list;
43}; 40};
@@ -47,6 +44,9 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
47static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 44static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
48static int dump_glock(struct gfs2_glock *gl); 45static int dump_glock(struct gfs2_glock *gl);
49static int dump_inode(struct gfs2_inode *ip); 46static int dump_inode(struct gfs2_inode *ip);
47static void gfs2_glock_xmote_th(struct gfs2_holder *gh);
48static void gfs2_glock_drop_th(struct gfs2_glock *gl);
49static DECLARE_RWSEM(gfs2_umount_flush_sem);
50 50
51#define GFS2_GL_HASH_SHIFT 15 51#define GFS2_GL_HASH_SHIFT 15
52#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 52#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
@@ -213,30 +213,6 @@ out:
213} 213}
214 214
215/** 215/**
216 * queue_empty - check to see if a glock's queue is empty
217 * @gl: the glock
218 * @head: the head of the queue to check
219 *
220 * This function protects the list in the event that a process already
221 * has a holder on the list and is adding a second holder for itself.
222 * The glmutex lock is what generally prevents processes from working
223 * on the same glock at once, but the special case of adding a second
224 * holder for yourself ("recursive" locking) doesn't involve locking
225 * glmutex, making the spin lock necessary.
226 *
227 * Returns: 1 if the queue is empty
228 */
229
230static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
231{
232 int empty;
233 spin_lock(&gl->gl_spin);
234 empty = list_empty(head);
235 spin_unlock(&gl->gl_spin);
236 return empty;
237}
238
239/**
240 * search_bucket() - Find struct gfs2_glock by lock number 216 * search_bucket() - Find struct gfs2_glock by lock number
241 * @bucket: the bucket to search 217 * @bucket: the bucket to search
242 * @name: The lock name 218 * @name: The lock name
@@ -395,11 +371,6 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
395 gh->gh_flags = flags; 371 gh->gh_flags = flags;
396 gh->gh_error = 0; 372 gh->gh_error = 0;
397 gh->gh_iflags = 0; 373 gh->gh_iflags = 0;
398 init_completion(&gh->gh_wait);
399
400 if (gh->gh_state == LM_ST_EXCLUSIVE)
401 gh->gh_flags |= GL_LOCAL_EXCL;
402
403 gfs2_glock_hold(gl); 374 gfs2_glock_hold(gl);
404} 375}
405 376
@@ -417,9 +388,6 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
417{ 388{
418 gh->gh_state = state; 389 gh->gh_state = state;
419 gh->gh_flags = flags; 390 gh->gh_flags = flags;
420 if (gh->gh_state == LM_ST_EXCLUSIVE)
421 gh->gh_flags |= GL_LOCAL_EXCL;
422
423 gh->gh_iflags &= 1 << HIF_ALLOCED; 391 gh->gh_iflags &= 1 << HIF_ALLOCED;
424 gh->gh_ip = (unsigned long)__builtin_return_address(0); 392 gh->gh_ip = (unsigned long)__builtin_return_address(0);
425} 393}
@@ -479,6 +447,29 @@ static void gfs2_holder_put(struct gfs2_holder *gh)
479 kfree(gh); 447 kfree(gh);
480} 448}
481 449
450static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
451{
452 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
453 gfs2_holder_put(gh);
454 return;
455 }
456 clear_bit(HIF_WAIT, &gh->gh_iflags);
457 smp_mb();
458 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
459}
460
461static int holder_wait(void *word)
462{
463 schedule();
464 return 0;
465}
466
467static void wait_on_holder(struct gfs2_holder *gh)
468{
469 might_sleep();
470 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
471}
472
482/** 473/**
483 * rq_mutex - process a mutex request in the queue 474 * rq_mutex - process a mutex request in the queue
484 * @gh: the glock holder 475 * @gh: the glock holder
@@ -493,7 +484,9 @@ static int rq_mutex(struct gfs2_holder *gh)
493 list_del_init(&gh->gh_list); 484 list_del_init(&gh->gh_list);
494 /* gh->gh_error never examined. */ 485 /* gh->gh_error never examined. */
495 set_bit(GLF_LOCK, &gl->gl_flags); 486 set_bit(GLF_LOCK, &gl->gl_flags);
496 complete(&gh->gh_wait); 487 clear_bit(HIF_WAIT, &gh->gh_iflags);
488 smp_mb();
489 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
497 490
498 return 1; 491 return 1;
499} 492}
@@ -511,7 +504,6 @@ static int rq_promote(struct gfs2_holder *gh)
511{ 504{
512 struct gfs2_glock *gl = gh->gh_gl; 505 struct gfs2_glock *gl = gh->gh_gl;
513 struct gfs2_sbd *sdp = gl->gl_sbd; 506 struct gfs2_sbd *sdp = gl->gl_sbd;
514 const struct gfs2_glock_operations *glops = gl->gl_ops;
515 507
516 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 508 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
517 if (list_empty(&gl->gl_holders)) { 509 if (list_empty(&gl->gl_holders)) {
@@ -526,7 +518,7 @@ static int rq_promote(struct gfs2_holder *gh)
526 gfs2_reclaim_glock(sdp); 518 gfs2_reclaim_glock(sdp);
527 } 519 }
528 520
529 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); 521 gfs2_glock_xmote_th(gh);
530 spin_lock(&gl->gl_spin); 522 spin_lock(&gl->gl_spin);
531 } 523 }
532 return 1; 524 return 1;
@@ -537,11 +529,11 @@ static int rq_promote(struct gfs2_holder *gh)
537 set_bit(GLF_LOCK, &gl->gl_flags); 529 set_bit(GLF_LOCK, &gl->gl_flags);
538 } else { 530 } else {
539 struct gfs2_holder *next_gh; 531 struct gfs2_holder *next_gh;
540 if (gh->gh_flags & GL_LOCAL_EXCL) 532 if (gh->gh_state == LM_ST_EXCLUSIVE)
541 return 1; 533 return 1;
542 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, 534 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
543 gh_list); 535 gh_list);
544 if (next_gh->gh_flags & GL_LOCAL_EXCL) 536 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
545 return 1; 537 return 1;
546 } 538 }
547 539
@@ -549,7 +541,7 @@ static int rq_promote(struct gfs2_holder *gh)
549 gh->gh_error = 0; 541 gh->gh_error = 0;
550 set_bit(HIF_HOLDER, &gh->gh_iflags); 542 set_bit(HIF_HOLDER, &gh->gh_iflags);
551 543
552 complete(&gh->gh_wait); 544 gfs2_holder_dispose_or_wake(gh);
553 545
554 return 0; 546 return 0;
555} 547}
@@ -564,7 +556,6 @@ static int rq_promote(struct gfs2_holder *gh)
564static int rq_demote(struct gfs2_holder *gh) 556static int rq_demote(struct gfs2_holder *gh)
565{ 557{
566 struct gfs2_glock *gl = gh->gh_gl; 558 struct gfs2_glock *gl = gh->gh_gl;
567 const struct gfs2_glock_operations *glops = gl->gl_ops;
568 559
569 if (!list_empty(&gl->gl_holders)) 560 if (!list_empty(&gl->gl_holders))
570 return 1; 561 return 1;
@@ -573,10 +564,7 @@ static int rq_demote(struct gfs2_holder *gh)
573 list_del_init(&gh->gh_list); 564 list_del_init(&gh->gh_list);
574 gh->gh_error = 0; 565 gh->gh_error = 0;
575 spin_unlock(&gl->gl_spin); 566 spin_unlock(&gl->gl_spin);
576 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 567 gfs2_holder_dispose_or_wake(gh);
577 gfs2_holder_put(gh);
578 else
579 complete(&gh->gh_wait);
580 spin_lock(&gl->gl_spin); 568 spin_lock(&gl->gl_spin);
581 } else { 569 } else {
582 gl->gl_req_gh = gh; 570 gl->gl_req_gh = gh;
@@ -585,9 +573,9 @@ static int rq_demote(struct gfs2_holder *gh)
585 573
586 if (gh->gh_state == LM_ST_UNLOCKED || 574 if (gh->gh_state == LM_ST_UNLOCKED ||
587 gl->gl_state != LM_ST_EXCLUSIVE) 575 gl->gl_state != LM_ST_EXCLUSIVE)
588 glops->go_drop_th(gl); 576 gfs2_glock_drop_th(gl);
589 else 577 else
590 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); 578 gfs2_glock_xmote_th(gh);
591 579
592 spin_lock(&gl->gl_spin); 580 spin_lock(&gl->gl_spin);
593 } 581 }
@@ -596,30 +584,6 @@ static int rq_demote(struct gfs2_holder *gh)
596} 584}
597 585
598/** 586/**
599 * rq_greedy - process a queued request to drop greedy status
600 * @gh: the glock holder
601 *
602 * Returns: 1 if the queue is blocked
603 */
604
605static int rq_greedy(struct gfs2_holder *gh)
606{
607 struct gfs2_glock *gl = gh->gh_gl;
608
609 list_del_init(&gh->gh_list);
610 /* gh->gh_error never examined. */
611 clear_bit(GLF_GREEDY, &gl->gl_flags);
612 spin_unlock(&gl->gl_spin);
613
614 gfs2_holder_uninit(gh);
615 kfree(container_of(gh, struct greedy, gr_gh));
616
617 spin_lock(&gl->gl_spin);
618
619 return 0;
620}
621
622/**
623 * run_queue - process holder structures on a glock 587 * run_queue - process holder structures on a glock
624 * @gl: the glock 588 * @gl: the glock
625 * 589 *
@@ -649,8 +613,6 @@ static void run_queue(struct gfs2_glock *gl)
649 613
650 if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) 614 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
651 blocked = rq_demote(gh); 615 blocked = rq_demote(gh);
652 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
653 blocked = rq_greedy(gh);
654 else 616 else
655 gfs2_assert_warn(gl->gl_sbd, 0); 617 gfs2_assert_warn(gl->gl_sbd, 0);
656 618
@@ -684,6 +646,8 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
684 646
685 gfs2_holder_init(gl, 0, 0, &gh); 647 gfs2_holder_init(gl, 0, 0, &gh);
686 set_bit(HIF_MUTEX, &gh.gh_iflags); 648 set_bit(HIF_MUTEX, &gh.gh_iflags);
649 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
650 BUG();
687 651
688 spin_lock(&gl->gl_spin); 652 spin_lock(&gl->gl_spin);
689 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
@@ -691,11 +655,13 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
691 } else { 655 } else {
692 gl->gl_owner = current; 656 gl->gl_owner = current;
693 gl->gl_ip = (unsigned long)__builtin_return_address(0); 657 gl->gl_ip = (unsigned long)__builtin_return_address(0);
694 complete(&gh.gh_wait); 658 clear_bit(HIF_WAIT, &gh.gh_iflags);
659 smp_mb();
660 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
695 } 661 }
696 spin_unlock(&gl->gl_spin); 662 spin_unlock(&gl->gl_spin);
697 663
698 wait_for_completion(&gh.gh_wait); 664 wait_on_holder(&gh);
699 gfs2_holder_uninit(&gh); 665 gfs2_holder_uninit(&gh);
700} 666}
701 667
@@ -774,6 +740,7 @@ restart:
774 return; 740 return;
775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags); 741 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
776 set_bit(HIF_DEALLOC, &new_gh->gh_iflags); 742 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
743 set_bit(HIF_WAIT, &new_gh->gh_iflags);
777 744
778 goto restart; 745 goto restart;
779 } 746 }
@@ -825,7 +792,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
825 int op_done = 1; 792 int op_done = 1;
826 793
827 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 794 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
828 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 795 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
829 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); 796 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
830 797
831 state_change(gl, ret & LM_OUT_ST_MASK); 798 state_change(gl, ret & LM_OUT_ST_MASK);
@@ -908,12 +875,8 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
908 875
909 gfs2_glock_put(gl); 876 gfs2_glock_put(gl);
910 877
911 if (gh) { 878 if (gh)
912 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 879 gfs2_holder_dispose_or_wake(gh);
913 gfs2_holder_put(gh);
914 else
915 complete(&gh->gh_wait);
916 }
917} 880}
918 881
919/** 882/**
@@ -924,23 +887,26 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
924 * 887 *
925 */ 888 */
926 889
927void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags) 890void gfs2_glock_xmote_th(struct gfs2_holder *gh)
928{ 891{
892 struct gfs2_glock *gl = gh->gh_gl;
929 struct gfs2_sbd *sdp = gl->gl_sbd; 893 struct gfs2_sbd *sdp = gl->gl_sbd;
894 int flags = gh->gh_flags;
895 unsigned state = gh->gh_state;
930 const struct gfs2_glock_operations *glops = gl->gl_ops; 896 const struct gfs2_glock_operations *glops = gl->gl_ops;
931 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | 897 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
932 LM_FLAG_NOEXP | LM_FLAG_ANY | 898 LM_FLAG_NOEXP | LM_FLAG_ANY |
933 LM_FLAG_PRIORITY); 899 LM_FLAG_PRIORITY);
934 unsigned int lck_ret; 900 unsigned int lck_ret;
935 901
902 if (glops->go_xmote_th)
903 glops->go_xmote_th(gl);
904
936 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 905 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
937 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 906 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
938 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); 907 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
939 gfs2_assert_warn(sdp, state != gl->gl_state); 908 gfs2_assert_warn(sdp, state != gl->gl_state);
940 909
941 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
942 glops->go_sync(gl);
943
944 gfs2_glock_hold(gl); 910 gfs2_glock_hold(gl);
945 gl->gl_req_bh = xmote_bh; 911 gl->gl_req_bh = xmote_bh;
946 912
@@ -971,10 +937,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
971 const struct gfs2_glock_operations *glops = gl->gl_ops; 937 const struct gfs2_glock_operations *glops = gl->gl_ops;
972 struct gfs2_holder *gh = gl->gl_req_gh; 938 struct gfs2_holder *gh = gl->gl_req_gh;
973 939
974 clear_bit(GLF_PREFETCH, &gl->gl_flags);
975
976 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 940 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
977 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 941 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
978 gfs2_assert_warn(sdp, !ret); 942 gfs2_assert_warn(sdp, !ret);
979 943
980 state_change(gl, LM_ST_UNLOCKED); 944 state_change(gl, LM_ST_UNLOCKED);
@@ -1001,12 +965,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1001 965
1002 gfs2_glock_put(gl); 966 gfs2_glock_put(gl);
1003 967
1004 if (gh) { 968 if (gh)
1005 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 969 gfs2_holder_dispose_or_wake(gh);
1006 gfs2_holder_put(gh);
1007 else
1008 complete(&gh->gh_wait);
1009 }
1010} 970}
1011 971
1012/** 972/**
@@ -1015,19 +975,19 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1015 * 975 *
1016 */ 976 */
1017 977
1018void gfs2_glock_drop_th(struct gfs2_glock *gl) 978static void gfs2_glock_drop_th(struct gfs2_glock *gl)
1019{ 979{
1020 struct gfs2_sbd *sdp = gl->gl_sbd; 980 struct gfs2_sbd *sdp = gl->gl_sbd;
1021 const struct gfs2_glock_operations *glops = gl->gl_ops; 981 const struct gfs2_glock_operations *glops = gl->gl_ops;
1022 unsigned int ret; 982 unsigned int ret;
1023 983
984 if (glops->go_drop_th)
985 glops->go_drop_th(gl);
986
1024 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 987 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1025 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 988 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
1026 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 989 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1027 990
1028 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1029 glops->go_sync(gl);
1030
1031 gfs2_glock_hold(gl); 991 gfs2_glock_hold(gl);
1032 gl->gl_req_bh = drop_bh; 992 gl->gl_req_bh = drop_bh;
1033 993
@@ -1107,8 +1067,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1107 if (gh->gh_flags & LM_FLAG_PRIORITY) 1067 if (gh->gh_flags & LM_FLAG_PRIORITY)
1108 do_cancels(gh); 1068 do_cancels(gh);
1109 1069
1110 wait_for_completion(&gh->gh_wait); 1070 wait_on_holder(gh);
1111
1112 if (gh->gh_error) 1071 if (gh->gh_error)
1113 return gh->gh_error; 1072 return gh->gh_error;
1114 1073
@@ -1164,6 +1123,8 @@ static void add_to_queue(struct gfs2_holder *gh)
1164 struct gfs2_holder *existing; 1123 struct gfs2_holder *existing;
1165 1124
1166 BUG_ON(!gh->gh_owner); 1125 BUG_ON(!gh->gh_owner);
1126 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1127 BUG();
1167 1128
1168 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); 1129 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1169 if (existing) { 1130 if (existing) {
@@ -1227,8 +1188,6 @@ restart:
1227 } 1188 }
1228 } 1189 }
1229 1190
1230 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1231
1232 return error; 1191 return error;
1233} 1192}
1234 1193
@@ -1321,98 +1280,6 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1321} 1280}
1322 1281
1323/** 1282/**
1324 * gfs2_glock_prefetch - Try to prefetch a glock
1325 * @gl: the glock
1326 * @state: the state to prefetch in
1327 * @flags: flags passed to go_xmote_th()
1328 *
1329 */
1330
1331static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1332 int flags)
1333{
1334 const struct gfs2_glock_operations *glops = gl->gl_ops;
1335
1336 spin_lock(&gl->gl_spin);
1337
1338 if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
1339 !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
1340 !list_empty(&gl->gl_waiters3) ||
1341 relaxed_state_ok(gl->gl_state, state, flags)) {
1342 spin_unlock(&gl->gl_spin);
1343 return;
1344 }
1345
1346 set_bit(GLF_PREFETCH, &gl->gl_flags);
1347 set_bit(GLF_LOCK, &gl->gl_flags);
1348 spin_unlock(&gl->gl_spin);
1349
1350 glops->go_xmote_th(gl, state, flags);
1351}
1352
1353static void greedy_work(struct work_struct *work)
1354{
1355 struct greedy *gr = container_of(work, struct greedy, gr_work.work);
1356 struct gfs2_holder *gh = &gr->gr_gh;
1357 struct gfs2_glock *gl = gh->gh_gl;
1358 const struct gfs2_glock_operations *glops = gl->gl_ops;
1359
1360 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1361
1362 if (glops->go_greedy)
1363 glops->go_greedy(gl);
1364
1365 spin_lock(&gl->gl_spin);
1366
1367 if (list_empty(&gl->gl_waiters2)) {
1368 clear_bit(GLF_GREEDY, &gl->gl_flags);
1369 spin_unlock(&gl->gl_spin);
1370 gfs2_holder_uninit(gh);
1371 kfree(gr);
1372 } else {
1373 gfs2_glock_hold(gl);
1374 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1375 run_queue(gl);
1376 spin_unlock(&gl->gl_spin);
1377 gfs2_glock_put(gl);
1378 }
1379}
1380
1381/**
1382 * gfs2_glock_be_greedy -
1383 * @gl:
1384 * @time:
1385 *
1386 * Returns: 0 if go_greedy will be called, 1 otherwise
1387 */
1388
1389int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1390{
1391 struct greedy *gr;
1392 struct gfs2_holder *gh;
1393
1394 if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1395 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1396 return 1;
1397
1398 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1399 if (!gr) {
1400 clear_bit(GLF_GREEDY, &gl->gl_flags);
1401 return 1;
1402 }
1403 gh = &gr->gr_gh;
1404
1405 gfs2_holder_init(gl, 0, 0, gh);
1406 set_bit(HIF_GREEDY, &gh->gh_iflags);
1407 INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
1408
1409 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1410 schedule_delayed_work(&gr->gr_work, time);
1411
1412 return 0;
1413}
1414
1415/**
1416 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1283 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1417 * @gh: the holder structure 1284 * @gh: the holder structure
1418 * 1285 *
@@ -1470,10 +1337,7 @@ static int glock_compare(const void *arg_a, const void *arg_b)
1470 return 1; 1337 return 1;
1471 if (a->ln_number < b->ln_number) 1338 if (a->ln_number < b->ln_number)
1472 return -1; 1339 return -1;
1473 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE) 1340 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1474 return 1;
1475 if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1476 return 1;
1477 return 0; 1341 return 0;
1478} 1342}
1479 1343
@@ -1618,34 +1482,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1618} 1482}
1619 1483
1620/** 1484/**
1621 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1622 * @sdp: the filesystem
1623 * @number: the lock number
1624 * @glops: the glock operations for the type of glock
1625 * @state: the state to acquire the glock in
1626 * @flags: modifier flags for the aquisition
1627 *
1628 * Returns: errno
1629 */
1630
1631void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
1632 const struct gfs2_glock_operations *glops,
1633 unsigned int state, int flags)
1634{
1635 struct gfs2_glock *gl;
1636 int error;
1637
1638 if (atomic_read(&sdp->sd_reclaim_count) <
1639 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1640 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1641 if (!error) {
1642 gfs2_glock_prefetch(gl, state, flags);
1643 gfs2_glock_put(gl);
1644 }
1645 }
1646}
1647
1648/**
1649 * gfs2_lvb_hold - attach a LVB from a glock 1485 * gfs2_lvb_hold - attach a LVB from a glock
1650 * @gl: The glock in question 1486 * @gl: The glock in question
1651 * 1487 *
@@ -1703,8 +1539,6 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1703 if (!gl) 1539 if (!gl)
1704 return; 1540 return;
1705 1541
1706 if (gl->gl_ops->go_callback)
1707 gl->gl_ops->go_callback(gl, state);
1708 handle_callback(gl, state); 1542 handle_callback(gl, state);
1709 1543
1710 spin_lock(&gl->gl_spin); 1544 spin_lock(&gl->gl_spin);
@@ -1746,12 +1580,14 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1746 struct lm_async_cb *async = data; 1580 struct lm_async_cb *async = data;
1747 struct gfs2_glock *gl; 1581 struct gfs2_glock *gl;
1748 1582
1583 down_read(&gfs2_umount_flush_sem);
1749 gl = gfs2_glock_find(sdp, &async->lc_name); 1584 gl = gfs2_glock_find(sdp, &async->lc_name);
1750 if (gfs2_assert_warn(sdp, gl)) 1585 if (gfs2_assert_warn(sdp, gl))
1751 return; 1586 return;
1752 if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) 1587 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1753 gl->gl_req_bh(gl, async->lc_ret); 1588 gl->gl_req_bh(gl, async->lc_ret);
1754 gfs2_glock_put(gl); 1589 gfs2_glock_put(gl);
1590 up_read(&gfs2_umount_flush_sem);
1755 return; 1591 return;
1756 } 1592 }
1757 1593
@@ -1781,15 +1617,11 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1781 1617
1782static int demote_ok(struct gfs2_glock *gl) 1618static int demote_ok(struct gfs2_glock *gl)
1783{ 1619{
1784 struct gfs2_sbd *sdp = gl->gl_sbd;
1785 const struct gfs2_glock_operations *glops = gl->gl_ops; 1620 const struct gfs2_glock_operations *glops = gl->gl_ops;
1786 int demote = 1; 1621 int demote = 1;
1787 1622
1788 if (test_bit(GLF_STICKY, &gl->gl_flags)) 1623 if (test_bit(GLF_STICKY, &gl->gl_flags))
1789 demote = 0; 1624 demote = 0;
1790 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1791 demote = time_after_eq(jiffies, gl->gl_stamp +
1792 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1793 else if (glops->go_demote_ok) 1625 else if (glops->go_demote_ok)
1794 demote = glops->go_demote_ok(gl); 1626 demote = glops->go_demote_ok(gl);
1795 1627
@@ -1845,7 +1677,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1845 atomic_inc(&sdp->sd_reclaimed); 1677 atomic_inc(&sdp->sd_reclaimed);
1846 1678
1847 if (gfs2_glmutex_trylock(gl)) { 1679 if (gfs2_glmutex_trylock(gl)) {
1848 if (queue_empty(gl, &gl->gl_holders) && 1680 if (list_empty(&gl->gl_holders) &&
1849 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1681 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1850 handle_callback(gl, LM_ST_UNLOCKED); 1682 handle_callback(gl, LM_ST_UNLOCKED);
1851 gfs2_glmutex_unlock(gl); 1683 gfs2_glmutex_unlock(gl);
@@ -1909,7 +1741,7 @@ static void scan_glock(struct gfs2_glock *gl)
1909 return; 1741 return;
1910 1742
1911 if (gfs2_glmutex_trylock(gl)) { 1743 if (gfs2_glmutex_trylock(gl)) {
1912 if (queue_empty(gl, &gl->gl_holders) && 1744 if (list_empty(&gl->gl_holders) &&
1913 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1745 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1914 goto out_schedule; 1746 goto out_schedule;
1915 gfs2_glmutex_unlock(gl); 1747 gfs2_glmutex_unlock(gl);
@@ -1958,7 +1790,7 @@ static void clear_glock(struct gfs2_glock *gl)
1958 } 1790 }
1959 1791
1960 if (gfs2_glmutex_trylock(gl)) { 1792 if (gfs2_glmutex_trylock(gl)) {
1961 if (queue_empty(gl, &gl->gl_holders) && 1793 if (list_empty(&gl->gl_holders) &&
1962 gl->gl_state != LM_ST_UNLOCKED) 1794 gl->gl_state != LM_ST_UNLOCKED)
1963 handle_callback(gl, LM_ST_UNLOCKED); 1795 handle_callback(gl, LM_ST_UNLOCKED);
1964 gfs2_glmutex_unlock(gl); 1796 gfs2_glmutex_unlock(gl);
@@ -2000,7 +1832,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2000 t = jiffies; 1832 t = jiffies;
2001 } 1833 }
2002 1834
1835 down_write(&gfs2_umount_flush_sem);
2003 invalidate_inodes(sdp->sd_vfs); 1836 invalidate_inodes(sdp->sd_vfs);
1837 up_write(&gfs2_umount_flush_sem);
2004 msleep(10); 1838 msleep(10);
2005 } 1839 }
2006} 1840}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index fb39108fc05c..f50e40ceca43 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -20,7 +20,6 @@
20#define LM_FLAG_ANY 0x00000008 20#define LM_FLAG_ANY 0x00000008
21#define LM_FLAG_PRIORITY 0x00000010 */ 21#define LM_FLAG_PRIORITY 0x00000010 */
22 22
23#define GL_LOCAL_EXCL 0x00000020
24#define GL_ASYNC 0x00000040 23#define GL_ASYNC 0x00000040
25#define GL_EXACT 0x00000080 24#define GL_EXACT 0x00000080
26#define GL_SKIP 0x00000100 25#define GL_SKIP 0x00000100
@@ -83,17 +82,11 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
83void gfs2_holder_reinit(unsigned int state, unsigned flags, 82void gfs2_holder_reinit(unsigned int state, unsigned flags,
84 struct gfs2_holder *gh); 83 struct gfs2_holder *gh);
85void gfs2_holder_uninit(struct gfs2_holder *gh); 84void gfs2_holder_uninit(struct gfs2_holder *gh);
86
87void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags);
88void gfs2_glock_drop_th(struct gfs2_glock *gl);
89
90int gfs2_glock_nq(struct gfs2_holder *gh); 85int gfs2_glock_nq(struct gfs2_holder *gh);
91int gfs2_glock_poll(struct gfs2_holder *gh); 86int gfs2_glock_poll(struct gfs2_holder *gh);
92int gfs2_glock_wait(struct gfs2_holder *gh); 87int gfs2_glock_wait(struct gfs2_holder *gh);
93void gfs2_glock_dq(struct gfs2_holder *gh); 88void gfs2_glock_dq(struct gfs2_holder *gh);
94 89
95int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time);
96
97void gfs2_glock_dq_uninit(struct gfs2_holder *gh); 90void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
98int gfs2_glock_nq_num(struct gfs2_sbd *sdp, 91int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
99 u64 number, const struct gfs2_glock_operations *glops, 92 u64 number, const struct gfs2_glock_operations *glops,
@@ -103,10 +96,6 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
103void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); 96void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
104void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); 97void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
105 98
106void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
107 const struct gfs2_glock_operations *glops,
108 unsigned int state, int flags);
109
110/** 99/**
111 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 100 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
112 * @gl: the glock 101 * @gl: the glock
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index b068d10bcb6e..c4b0391b7aa2 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -117,12 +117,14 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
117 117
118static void meta_go_sync(struct gfs2_glock *gl) 118static void meta_go_sync(struct gfs2_glock *gl)
119{ 119{
120 if (gl->gl_state != LM_ST_EXCLUSIVE)
121 return;
122
120 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 123 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
121 gfs2_log_flush(gl->gl_sbd, gl); 124 gfs2_log_flush(gl->gl_sbd, gl);
122 gfs2_meta_sync(gl); 125 gfs2_meta_sync(gl);
123 gfs2_ail_empty_gl(gl); 126 gfs2_ail_empty_gl(gl);
124 } 127 }
125
126} 128}
127 129
128/** 130/**
@@ -142,6 +144,37 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
142} 144}
143 145
144/** 146/**
147 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
148 * @gl: the glock protecting the inode
149 *
150 */
151
152static void inode_go_sync(struct gfs2_glock *gl)
153{
154 struct gfs2_inode *ip = gl->gl_object;
155
156 if (ip && !S_ISREG(ip->i_inode.i_mode))
157 ip = NULL;
158
159 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
160 gfs2_log_flush(gl->gl_sbd, gl);
161 if (ip)
162 filemap_fdatawrite(ip->i_inode.i_mapping);
163 gfs2_meta_sync(gl);
164 if (ip) {
165 struct address_space *mapping = ip->i_inode.i_mapping;
166 int error = filemap_fdatawait(mapping);
167 if (error == -ENOSPC)
168 set_bit(AS_ENOSPC, &mapping->flags);
169 else if (error)
170 set_bit(AS_EIO, &mapping->flags);
171 }
172 clear_bit(GLF_DIRTY, &gl->gl_flags);
173 gfs2_ail_empty_gl(gl);
174 }
175}
176
177/**
145 * inode_go_xmote_th - promote/demote a glock 178 * inode_go_xmote_th - promote/demote a glock
146 * @gl: the glock 179 * @gl: the glock
147 * @state: the requested state 180 * @state: the requested state
@@ -149,12 +182,12 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
149 * 182 *
150 */ 183 */
151 184
152static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state, 185static void inode_go_xmote_th(struct gfs2_glock *gl)
153 int flags)
154{ 186{
155 if (gl->gl_state != LM_ST_UNLOCKED) 187 if (gl->gl_state != LM_ST_UNLOCKED)
156 gfs2_pte_inval(gl); 188 gfs2_pte_inval(gl);
157 gfs2_glock_xmote_th(gl, state, flags); 189 if (gl->gl_state == LM_ST_EXCLUSIVE)
190 inode_go_sync(gl);
158} 191}
159 192
160/** 193/**
@@ -189,38 +222,8 @@ static void inode_go_xmote_bh(struct gfs2_glock *gl)
189static void inode_go_drop_th(struct gfs2_glock *gl) 222static void inode_go_drop_th(struct gfs2_glock *gl)
190{ 223{
191 gfs2_pte_inval(gl); 224 gfs2_pte_inval(gl);
192 gfs2_glock_drop_th(gl); 225 if (gl->gl_state == LM_ST_EXCLUSIVE)
193} 226 inode_go_sync(gl);
194
195/**
196 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
197 * @gl: the glock protecting the inode
198 *
199 */
200
201static void inode_go_sync(struct gfs2_glock *gl)
202{
203 struct gfs2_inode *ip = gl->gl_object;
204
205 if (ip && !S_ISREG(ip->i_inode.i_mode))
206 ip = NULL;
207
208 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
209 gfs2_log_flush(gl->gl_sbd, gl);
210 if (ip)
211 filemap_fdatawrite(ip->i_inode.i_mapping);
212 gfs2_meta_sync(gl);
213 if (ip) {
214 struct address_space *mapping = ip->i_inode.i_mapping;
215 int error = filemap_fdatawait(mapping);
216 if (error == -ENOSPC)
217 set_bit(AS_ENOSPC, &mapping->flags);
218 else if (error)
219 set_bit(AS_EIO, &mapping->flags);
220 }
221 clear_bit(GLF_DIRTY, &gl->gl_flags);
222 gfs2_ail_empty_gl(gl);
223 }
224} 227}
225 228
226/** 229/**
@@ -295,7 +298,7 @@ static int inode_go_lock(struct gfs2_holder *gh)
295 298
296 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && 299 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
297 (gl->gl_state == LM_ST_EXCLUSIVE) && 300 (gl->gl_state == LM_ST_EXCLUSIVE) &&
298 (gh->gh_flags & GL_LOCAL_EXCL)) 301 (gh->gh_state == LM_ST_EXCLUSIVE))
299 error = gfs2_truncatei_resume(ip); 302 error = gfs2_truncatei_resume(ip);
300 303
301 return error; 304 return error;
@@ -319,39 +322,6 @@ static void inode_go_unlock(struct gfs2_holder *gh)
319} 322}
320 323
321/** 324/**
322 * inode_greedy -
323 * @gl: the glock
324 *
325 */
326
327static void inode_greedy(struct gfs2_glock *gl)
328{
329 struct gfs2_sbd *sdp = gl->gl_sbd;
330 struct gfs2_inode *ip = gl->gl_object;
331 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
332 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
333 unsigned int new_time;
334
335 spin_lock(&ip->i_spin);
336
337 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
338 new_time = ip->i_greedy + quantum;
339 if (new_time > max)
340 new_time = max;
341 } else {
342 new_time = ip->i_greedy - quantum;
343 if (!new_time || new_time > max)
344 new_time = 1;
345 }
346
347 ip->i_greedy = new_time;
348
349 spin_unlock(&ip->i_spin);
350
351 iput(&ip->i_inode);
352}
353
354/**
355 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock 325 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
356 * @gl: the glock 326 * @gl: the glock
357 * 327 *
@@ -398,8 +368,7 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
398 * 368 *
399 */ 369 */
400 370
401static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state, 371static void trans_go_xmote_th(struct gfs2_glock *gl)
402 int flags)
403{ 372{
404 struct gfs2_sbd *sdp = gl->gl_sbd; 373 struct gfs2_sbd *sdp = gl->gl_sbd;
405 374
@@ -408,8 +377,6 @@ static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
408 gfs2_meta_syncfs(sdp); 377 gfs2_meta_syncfs(sdp);
409 gfs2_log_shutdown(sdp); 378 gfs2_log_shutdown(sdp);
410 } 379 }
411
412 gfs2_glock_xmote_th(gl, state, flags);
413} 380}
414 381
415/** 382/**
@@ -461,8 +428,6 @@ static void trans_go_drop_th(struct gfs2_glock *gl)
461 gfs2_meta_syncfs(sdp); 428 gfs2_meta_syncfs(sdp);
462 gfs2_log_shutdown(sdp); 429 gfs2_log_shutdown(sdp);
463 } 430 }
464
465 gfs2_glock_drop_th(gl);
466} 431}
467 432
468/** 433/**
@@ -478,8 +443,8 @@ static int quota_go_demote_ok(struct gfs2_glock *gl)
478} 443}
479 444
480const struct gfs2_glock_operations gfs2_meta_glops = { 445const struct gfs2_glock_operations gfs2_meta_glops = {
481 .go_xmote_th = gfs2_glock_xmote_th, 446 .go_xmote_th = meta_go_sync,
482 .go_drop_th = gfs2_glock_drop_th, 447 .go_drop_th = meta_go_sync,
483 .go_type = LM_TYPE_META, 448 .go_type = LM_TYPE_META,
484}; 449};
485 450
@@ -487,19 +452,14 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
487 .go_xmote_th = inode_go_xmote_th, 452 .go_xmote_th = inode_go_xmote_th,
488 .go_xmote_bh = inode_go_xmote_bh, 453 .go_xmote_bh = inode_go_xmote_bh,
489 .go_drop_th = inode_go_drop_th, 454 .go_drop_th = inode_go_drop_th,
490 .go_sync = inode_go_sync,
491 .go_inval = inode_go_inval, 455 .go_inval = inode_go_inval,
492 .go_demote_ok = inode_go_demote_ok, 456 .go_demote_ok = inode_go_demote_ok,
493 .go_lock = inode_go_lock, 457 .go_lock = inode_go_lock,
494 .go_unlock = inode_go_unlock, 458 .go_unlock = inode_go_unlock,
495 .go_greedy = inode_greedy,
496 .go_type = LM_TYPE_INODE, 459 .go_type = LM_TYPE_INODE,
497}; 460};
498 461
499const struct gfs2_glock_operations gfs2_rgrp_glops = { 462const struct gfs2_glock_operations gfs2_rgrp_glops = {
500 .go_xmote_th = gfs2_glock_xmote_th,
501 .go_drop_th = gfs2_glock_drop_th,
502 .go_sync = meta_go_sync,
503 .go_inval = meta_go_inval, 463 .go_inval = meta_go_inval,
504 .go_demote_ok = rgrp_go_demote_ok, 464 .go_demote_ok = rgrp_go_demote_ok,
505 .go_lock = rgrp_go_lock, 465 .go_lock = rgrp_go_lock,
@@ -515,33 +475,23 @@ const struct gfs2_glock_operations gfs2_trans_glops = {
515}; 475};
516 476
517const struct gfs2_glock_operations gfs2_iopen_glops = { 477const struct gfs2_glock_operations gfs2_iopen_glops = {
518 .go_xmote_th = gfs2_glock_xmote_th,
519 .go_drop_th = gfs2_glock_drop_th,
520 .go_type = LM_TYPE_IOPEN, 478 .go_type = LM_TYPE_IOPEN,
521}; 479};
522 480
523const struct gfs2_glock_operations gfs2_flock_glops = { 481const struct gfs2_glock_operations gfs2_flock_glops = {
524 .go_xmote_th = gfs2_glock_xmote_th,
525 .go_drop_th = gfs2_glock_drop_th,
526 .go_type = LM_TYPE_FLOCK, 482 .go_type = LM_TYPE_FLOCK,
527}; 483};
528 484
529const struct gfs2_glock_operations gfs2_nondisk_glops = { 485const struct gfs2_glock_operations gfs2_nondisk_glops = {
530 .go_xmote_th = gfs2_glock_xmote_th,
531 .go_drop_th = gfs2_glock_drop_th,
532 .go_type = LM_TYPE_NONDISK, 486 .go_type = LM_TYPE_NONDISK,
533}; 487};
534 488
535const struct gfs2_glock_operations gfs2_quota_glops = { 489const struct gfs2_glock_operations gfs2_quota_glops = {
536 .go_xmote_th = gfs2_glock_xmote_th,
537 .go_drop_th = gfs2_glock_drop_th,
538 .go_demote_ok = quota_go_demote_ok, 490 .go_demote_ok = quota_go_demote_ok,
539 .go_type = LM_TYPE_QUOTA, 491 .go_type = LM_TYPE_QUOTA,
540}; 492};
541 493
542const struct gfs2_glock_operations gfs2_journal_glops = { 494const struct gfs2_glock_operations gfs2_journal_glops = {
543 .go_xmote_th = gfs2_glock_xmote_th,
544 .go_drop_th = gfs2_glock_drop_th,
545 .go_type = LM_TYPE_JOURNAL, 495 .go_type = LM_TYPE_JOURNAL,
546}; 496};
547 497
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 734421edae85..12c80fd28db5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -101,17 +101,14 @@ struct gfs2_bufdata {
101}; 101};
102 102
103struct gfs2_glock_operations { 103struct gfs2_glock_operations {
104 void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags); 104 void (*go_xmote_th) (struct gfs2_glock *gl);
105 void (*go_xmote_bh) (struct gfs2_glock *gl); 105 void (*go_xmote_bh) (struct gfs2_glock *gl);
106 void (*go_drop_th) (struct gfs2_glock *gl); 106 void (*go_drop_th) (struct gfs2_glock *gl);
107 void (*go_drop_bh) (struct gfs2_glock *gl); 107 void (*go_drop_bh) (struct gfs2_glock *gl);
108 void (*go_sync) (struct gfs2_glock *gl);
109 void (*go_inval) (struct gfs2_glock *gl, int flags); 108 void (*go_inval) (struct gfs2_glock *gl, int flags);
110 int (*go_demote_ok) (struct gfs2_glock *gl); 109 int (*go_demote_ok) (struct gfs2_glock *gl);
111 int (*go_lock) (struct gfs2_holder *gh); 110 int (*go_lock) (struct gfs2_holder *gh);
112 void (*go_unlock) (struct gfs2_holder *gh); 111 void (*go_unlock) (struct gfs2_holder *gh);
113 void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
114 void (*go_greedy) (struct gfs2_glock *gl);
115 const int go_type; 112 const int go_type;
116}; 113};
117 114
@@ -120,7 +117,6 @@ enum {
120 HIF_MUTEX = 0, 117 HIF_MUTEX = 0,
121 HIF_PROMOTE = 1, 118 HIF_PROMOTE = 1,
122 HIF_DEMOTE = 2, 119 HIF_DEMOTE = 2,
123 HIF_GREEDY = 3,
124 120
125 /* States */ 121 /* States */
126 HIF_ALLOCED = 4, 122 HIF_ALLOCED = 4,
@@ -128,6 +124,7 @@ enum {
128 HIF_HOLDER = 6, 124 HIF_HOLDER = 6,
129 HIF_FIRST = 7, 125 HIF_FIRST = 7,
130 HIF_ABORTED = 9, 126 HIF_ABORTED = 9,
127 HIF_WAIT = 10,
131}; 128};
132 129
133struct gfs2_holder { 130struct gfs2_holder {
@@ -140,17 +137,14 @@ struct gfs2_holder {
140 137
141 int gh_error; 138 int gh_error;
142 unsigned long gh_iflags; 139 unsigned long gh_iflags;
143 struct completion gh_wait;
144 unsigned long gh_ip; 140 unsigned long gh_ip;
145}; 141};
146 142
147enum { 143enum {
148 GLF_LOCK = 1, 144 GLF_LOCK = 1,
149 GLF_STICKY = 2, 145 GLF_STICKY = 2,
150 GLF_PREFETCH = 3,
151 GLF_DIRTY = 5, 146 GLF_DIRTY = 5,
152 GLF_SKIP_WAITERS2 = 6, 147 GLF_SKIP_WAITERS2 = 6,
153 GLF_GREEDY = 7,
154}; 148};
155 149
156struct gfs2_glock { 150struct gfs2_glock {
@@ -167,7 +161,7 @@ struct gfs2_glock {
167 unsigned long gl_ip; 161 unsigned long gl_ip;
168 struct list_head gl_holders; 162 struct list_head gl_holders;
169 struct list_head gl_waiters1; /* HIF_MUTEX */ 163 struct list_head gl_waiters1; /* HIF_MUTEX */
170 struct list_head gl_waiters2; /* HIF_DEMOTE, HIF_GREEDY */ 164 struct list_head gl_waiters2; /* HIF_DEMOTE */
171 struct list_head gl_waiters3; /* HIF_PROMOTE */ 165 struct list_head gl_waiters3; /* HIF_PROMOTE */
172 166
173 const struct gfs2_glock_operations *gl_ops; 167 const struct gfs2_glock_operations *gl_ops;
@@ -236,7 +230,6 @@ struct gfs2_inode {
236 230
237 spinlock_t i_spin; 231 spinlock_t i_spin;
238 struct rw_semaphore i_rw_mutex; 232 struct rw_semaphore i_rw_mutex;
239 unsigned int i_greedy;
240 unsigned long i_last_pfault; 233 unsigned long i_last_pfault;
241 234
242 struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT]; 235 struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT];
@@ -418,17 +411,12 @@ struct gfs2_tune {
418 unsigned int gt_atime_quantum; /* Min secs between atime updates */ 411 unsigned int gt_atime_quantum; /* Min secs between atime updates */
419 unsigned int gt_new_files_jdata; 412 unsigned int gt_new_files_jdata;
420 unsigned int gt_new_files_directio; 413 unsigned int gt_new_files_directio;
421 unsigned int gt_max_atomic_write; /* Split big writes into this size */
422 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ 414 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
423 unsigned int gt_lockdump_size; 415 unsigned int gt_lockdump_size;
424 unsigned int gt_stall_secs; /* Detects trouble! */ 416 unsigned int gt_stall_secs; /* Detects trouble! */
425 unsigned int gt_complain_secs; 417 unsigned int gt_complain_secs;
426 unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */ 418 unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */
427 unsigned int gt_entries_per_readdir; 419 unsigned int gt_entries_per_readdir;
428 unsigned int gt_prefetch_secs; /* Usage window for prefetched glocks */
429 unsigned int gt_greedy_default;
430 unsigned int gt_greedy_quantum;
431 unsigned int gt_greedy_max;
432 unsigned int gt_statfs_quantum; 420 unsigned int gt_statfs_quantum;
433 unsigned int gt_statfs_slow; 421 unsigned int gt_statfs_slow;
434}; 422};
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d122074c45e1..0d6831a40565 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -287,10 +287,8 @@ out:
287 * 287 *
288 * Returns: errno 288 * Returns: errno
289 */ 289 */
290
291int gfs2_change_nlink(struct gfs2_inode *ip, int diff) 290int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
292{ 291{
293 struct gfs2_sbd *sdp = ip->i_inode.i_sb->s_fs_info;
294 struct buffer_head *dibh; 292 struct buffer_head *dibh;
295 u32 nlink; 293 u32 nlink;
296 int error; 294 int error;
@@ -315,42 +313,34 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
315 else 313 else
316 drop_nlink(&ip->i_inode); 314 drop_nlink(&ip->i_inode);
317 315
318 ip->i_inode.i_ctime.tv_sec = get_seconds(); 316 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
319 317
320 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 318 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
321 gfs2_dinode_out(ip, dibh->b_data); 319 gfs2_dinode_out(ip, dibh->b_data);
322 brelse(dibh); 320 brelse(dibh);
323 mark_inode_dirty(&ip->i_inode); 321 mark_inode_dirty(&ip->i_inode);
324 322
325 if (ip->i_inode.i_nlink == 0) { 323 if (ip->i_inode.i_nlink == 0)
326 struct gfs2_rgrpd *rgd;
327 struct gfs2_holder ri_gh, rg_gh;
328
329 error = gfs2_rindex_hold(sdp, &ri_gh);
330 if (error)
331 goto out;
332 error = -EIO;
333 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
334 if (!rgd)
335 goto out_norgrp;
336 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
337 if (error)
338 goto out_norgrp;
339
340 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */ 324 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
341 gfs2_glock_dq_uninit(&rg_gh); 325
342out_norgrp:
343 gfs2_glock_dq_uninit(&ri_gh);
344 }
345out:
346 return error; 326 return error;
347} 327}
348 328
349struct inode *gfs2_lookup_simple(struct inode *dip, const char *name) 329struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
350{ 330{
351 struct qstr qstr; 331 struct qstr qstr;
332 struct inode *inode;
352 gfs2_str2qstr(&qstr, name); 333 gfs2_str2qstr(&qstr, name);
353 return gfs2_lookupi(dip, &qstr, 1, NULL); 334 inode = gfs2_lookupi(dip, &qstr, 1, NULL);
335 /* gfs2_lookupi has inconsistent callers: vfs
336 * related routines expect NULL for no entry found,
337 * gfs2_lookup_simple callers expect ENOENT
338 * and do not check for NULL.
339 */
340 if (inode == NULL)
341 return ERR_PTR(-ENOENT);
342 else
343 return inode;
354} 344}
355 345
356 346
@@ -361,8 +351,10 @@ struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
361 * @is_root: If 1, ignore the caller's permissions 351 * @is_root: If 1, ignore the caller's permissions
362 * @i_gh: An uninitialized holder for the new inode glock 352 * @i_gh: An uninitialized holder for the new inode glock
363 * 353 *
364 * There will always be a vnode (Linux VFS inode) for the d_gh inode unless 354 * This can be called via the VFS filldir function when NFS is doing
365 * @is_root is true. 355 * a readdirplus and the inode which its intending to stat isn't
356 * already in cache. In this case we must not take the directory glock
357 * again, since the readdir call will have already taken that lock.
366 * 358 *
367 * Returns: errno 359 * Returns: errno
368 */ 360 */
@@ -375,8 +367,9 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
375 struct gfs2_holder d_gh; 367 struct gfs2_holder d_gh;
376 struct gfs2_inum_host inum; 368 struct gfs2_inum_host inum;
377 unsigned int type; 369 unsigned int type;
378 int error = 0; 370 int error;
379 struct inode *inode = NULL; 371 struct inode *inode = NULL;
372 int unlock = 0;
380 373
381 if (!name->len || name->len > GFS2_FNAMESIZE) 374 if (!name->len || name->len > GFS2_FNAMESIZE)
382 return ERR_PTR(-ENAMETOOLONG); 375 return ERR_PTR(-ENAMETOOLONG);
@@ -388,9 +381,12 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
388 return dir; 381 return dir;
389 } 382 }
390 383
391 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 384 if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
392 if (error) 385 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
393 return ERR_PTR(error); 386 if (error)
387 return ERR_PTR(error);
388 unlock = 1;
389 }
394 390
395 if (!is_root) { 391 if (!is_root) {
396 error = permission(dir, MAY_EXEC, NULL); 392 error = permission(dir, MAY_EXEC, NULL);
@@ -405,10 +401,11 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
405 inode = gfs2_inode_lookup(sb, &inum, type); 401 inode = gfs2_inode_lookup(sb, &inum, type);
406 402
407out: 403out:
408 gfs2_glock_dq_uninit(&d_gh); 404 if (unlock)
405 gfs2_glock_dq_uninit(&d_gh);
409 if (error == -ENOENT) 406 if (error == -ENOENT)
410 return NULL; 407 return NULL;
411 return inode; 408 return inode ? inode : ERR_PTR(error);
412} 409}
413 410
414static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino) 411static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
diff --git a/fs/gfs2/lm.c b/fs/gfs2/lm.c
index effe4a337c1d..e30673dd37e0 100644
--- a/fs/gfs2/lm.c
+++ b/fs/gfs2/lm.c
@@ -104,15 +104,9 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
104 vprintk(fmt, args); 104 vprintk(fmt, args);
105 va_end(args); 105 va_end(args);
106 106
107 fs_err(sdp, "about to withdraw from the cluster\n"); 107 fs_err(sdp, "about to withdraw this file system\n");
108 BUG_ON(sdp->sd_args.ar_debug); 108 BUG_ON(sdp->sd_args.ar_debug);
109 109
110
111 fs_err(sdp, "waiting for outstanding I/O\n");
112
113 /* FIXME: suspend dm device so oustanding bio's complete
114 and all further io requests fail */
115
116 fs_err(sdp, "telling LM to withdraw\n"); 110 fs_err(sdp, "telling LM to withdraw\n");
117 gfs2_withdraw_lockproto(&sdp->sd_lockstruct); 111 gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
118 fs_err(sdp, "withdrawn\n"); 112 fs_err(sdp, "withdrawn\n");
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 33af707a4d3f..a87c7bf3c568 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -36,7 +36,7 @@
36 36
37#define GDLM_STRNAME_BYTES 24 37#define GDLM_STRNAME_BYTES 24
38#define GDLM_LVB_SIZE 32 38#define GDLM_LVB_SIZE 32
39#define GDLM_DROP_COUNT 50000 39#define GDLM_DROP_COUNT 200000
40#define GDLM_DROP_PERIOD 60 40#define GDLM_DROP_PERIOD 60
41#define GDLM_NAME_LEN 128 41#define GDLM_NAME_LEN 128
42 42
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index 2194b1d5b5ec..a0e7eda643ed 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -11,9 +11,6 @@
11 11
12#include "lock_dlm.h" 12#include "lock_dlm.h"
13 13
14extern int gdlm_drop_count;
15extern int gdlm_drop_period;
16
17extern struct lm_lockops gdlm_ops; 14extern struct lm_lockops gdlm_ops;
18 15
19static int __init init_lock_dlm(void) 16static int __init init_lock_dlm(void)
@@ -40,9 +37,6 @@ static int __init init_lock_dlm(void)
40 return error; 37 return error;
41 } 38 }
42 39
43 gdlm_drop_count = GDLM_DROP_COUNT;
44 gdlm_drop_period = GDLM_DROP_PERIOD;
45
46 printk(KERN_INFO 40 printk(KERN_INFO
47 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__); 41 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
48 return 0; 42 return 0;
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index cdd1694e889b..1d8faa3da8af 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -9,8 +9,6 @@
9 9
10#include "lock_dlm.h" 10#include "lock_dlm.h"
11 11
12int gdlm_drop_count;
13int gdlm_drop_period;
14const struct lm_lockops gdlm_ops; 12const struct lm_lockops gdlm_ops;
15 13
16 14
@@ -24,8 +22,8 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
24 if (!ls) 22 if (!ls)
25 return NULL; 23 return NULL;
26 24
27 ls->drop_locks_count = gdlm_drop_count; 25 ls->drop_locks_count = GDLM_DROP_COUNT;
28 ls->drop_locks_period = gdlm_drop_period; 26 ls->drop_locks_period = GDLM_DROP_PERIOD;
29 ls->fscb = cb; 27 ls->fscb = cb;
30 ls->sdp = sdp; 28 ls->sdp = sdp;
31 ls->fsflags = flags; 29 ls->fsflags = flags;
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index 29ae06f94944..4746b884662d 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -116,6 +116,17 @@ static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
116 return sprintf(buf, "%d\n", ls->recover_jid_status); 116 return sprintf(buf, "%d\n", ls->recover_jid_status);
117} 117}
118 118
119static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf)
120{
121 return sprintf(buf, "%d\n", ls->drop_locks_count);
122}
123
124static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len)
125{
126 ls->drop_locks_count = simple_strtol(buf, NULL, 0);
127 return len;
128}
129
119struct gdlm_attr { 130struct gdlm_attr {
120 struct attribute attr; 131 struct attribute attr;
121 ssize_t (*show)(struct gdlm_ls *, char *); 132 ssize_t (*show)(struct gdlm_ls *, char *);
@@ -135,6 +146,7 @@ GDLM_ATTR(first_done, 0444, first_done_show, NULL);
135GDLM_ATTR(recover, 0644, recover_show, recover_store); 146GDLM_ATTR(recover, 0644, recover_show, recover_store);
136GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 147GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
137GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 148GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
149GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store);
138 150
139static struct attribute *gdlm_attrs[] = { 151static struct attribute *gdlm_attrs[] = {
140 &gdlm_attr_proto_name.attr, 152 &gdlm_attr_proto_name.attr,
@@ -147,6 +159,7 @@ static struct attribute *gdlm_attrs[] = {
147 &gdlm_attr_recover.attr, 159 &gdlm_attr_recover.attr,
148 &gdlm_attr_recover_done.attr, 160 &gdlm_attr_recover_done.attr,
149 &gdlm_attr_recover_status.attr, 161 &gdlm_attr_recover_status.attr,
162 &gdlm_attr_drop_count.attr,
150 NULL, 163 NULL,
151}; 164};
152 165
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 4d7f94d8c7bd..16bb4b4561ae 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -69,13 +69,16 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
70 struct gfs2_trans *tr; 70 struct gfs2_trans *tr;
71 71
72 if (!list_empty(&bd->bd_list_tr)) 72 gfs2_log_lock(sdp);
73 if (!list_empty(&bd->bd_list_tr)) {
74 gfs2_log_unlock(sdp);
73 return; 75 return;
74 76 }
75 tr = current->journal_info; 77 tr = current->journal_info;
76 tr->tr_touched = 1; 78 tr->tr_touched = 1;
77 tr->tr_num_buf++; 79 tr->tr_num_buf++;
78 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 80 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
81 gfs2_log_unlock(sdp);
79 82
80 if (!list_empty(&le->le_list)) 83 if (!list_empty(&le->le_list))
81 return; 84 return;
@@ -84,7 +87,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
84 87
85 gfs2_meta_check(sdp, bd->bd_bh); 88 gfs2_meta_check(sdp, bd->bd_bh);
86 gfs2_pin(sdp, bd->bd_bh); 89 gfs2_pin(sdp, bd->bd_bh);
87
88 gfs2_log_lock(sdp); 90 gfs2_log_lock(sdp);
89 sdp->sd_log_num_buf++; 91 sdp->sd_log_num_buf++;
90 list_add(&le->le_list, &sdp->sd_log_le_buf); 92 list_add(&le->le_list, &sdp->sd_log_le_buf);
@@ -98,11 +100,13 @@ static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
98 struct list_head *head = &tr->tr_list_buf; 100 struct list_head *head = &tr->tr_list_buf;
99 struct gfs2_bufdata *bd; 101 struct gfs2_bufdata *bd;
100 102
103 gfs2_log_lock(sdp);
101 while (!list_empty(head)) { 104 while (!list_empty(head)) {
102 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr); 105 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
103 list_del_init(&bd->bd_list_tr); 106 list_del_init(&bd->bd_list_tr);
104 tr->tr_num_buf--; 107 tr->tr_num_buf--;
105 } 108 }
109 gfs2_log_unlock(sdp);
106 gfs2_assert_warn(sdp, !tr->tr_num_buf); 110 gfs2_assert_warn(sdp, !tr->tr_num_buf);
107} 111}
108 112
@@ -462,13 +466,17 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
462 struct address_space *mapping = bd->bd_bh->b_page->mapping; 466 struct address_space *mapping = bd->bd_bh->b_page->mapping;
463 struct gfs2_inode *ip = GFS2_I(mapping->host); 467 struct gfs2_inode *ip = GFS2_I(mapping->host);
464 468
469 gfs2_log_lock(sdp);
465 tr->tr_touched = 1; 470 tr->tr_touched = 1;
466 if (list_empty(&bd->bd_list_tr) && 471 if (list_empty(&bd->bd_list_tr) &&
467 (ip->i_di.di_flags & GFS2_DIF_JDATA)) { 472 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
468 tr->tr_num_buf++; 473 tr->tr_num_buf++;
469 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 474 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
475 gfs2_log_unlock(sdp);
470 gfs2_pin(sdp, bd->bd_bh); 476 gfs2_pin(sdp, bd->bd_bh);
471 tr->tr_num_buf_new++; 477 tr->tr_num_buf_new++;
478 } else {
479 gfs2_log_unlock(sdp);
472 } 480 }
473 gfs2_trans_add_gl(bd->bd_gl); 481 gfs2_trans_add_gl(bd->bd_gl);
474 gfs2_log_lock(sdp); 482 gfs2_log_lock(sdp);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index d8d69a72a10d..56e33590b656 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -16,6 +16,7 @@
16#include <linux/pagevec.h> 16#include <linux/pagevec.h>
17#include <linux/mpage.h> 17#include <linux/mpage.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/writeback.h>
19#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
20#include <linux/lm_interface.h> 21#include <linux/lm_interface.h>
21 22
@@ -157,6 +158,32 @@ out_ignore:
157} 158}
158 159
159/** 160/**
161 * gfs2_writepages - Write a bunch of dirty pages back to disk
162 * @mapping: The mapping to write
163 * @wbc: Write-back control
164 *
165 * For journaled files and/or ordered writes this just falls back to the
166 * kernel's default writepages path for now. We will probably want to change
167 * that eventually (i.e. when we look at allocate on flush).
168 *
169 * For the data=writeback case though we can already ignore buffer heads
170 * and write whole extents at once. This is a big reduction in the
171 * number of I/O requests we send and the bmap calls we make in this case.
172 */
173static int gfs2_writepages(struct address_space *mapping,
174 struct writeback_control *wbc)
175{
176 struct inode *inode = mapping->host;
177 struct gfs2_inode *ip = GFS2_I(inode);
178 struct gfs2_sbd *sdp = GFS2_SB(inode);
179
180 if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip))
181 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
182
183 return generic_writepages(mapping, wbc);
184}
185
186/**
160 * stuffed_readpage - Fill in a Linux page with stuffed file data 187 * stuffed_readpage - Fill in a Linux page with stuffed file data
161 * @ip: the inode 188 * @ip: the inode
162 * @page: the page 189 * @page: the page
@@ -256,7 +283,7 @@ out_unlock:
256 * the page lock and the glock) and return having done no I/O. Its 283 * the page lock and the glock) and return having done no I/O. Its
257 * obviously not something we'd want to do on too regular a basis. 284 * obviously not something we'd want to do on too regular a basis.
258 * Any I/O we ignore at this time will be done via readpage later. 285 * Any I/O we ignore at this time will be done via readpage later.
259 * 2. We have to handle stuffed files here too. 286 * 2. We don't handle stuffed files here we let readpage do the honours.
260 * 3. mpage_readpages() does most of the heavy lifting in the common case. 287 * 3. mpage_readpages() does most of the heavy lifting in the common case.
261 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. 288 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
262 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as 289 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
@@ -269,8 +296,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
269 struct gfs2_inode *ip = GFS2_I(inode); 296 struct gfs2_inode *ip = GFS2_I(inode);
270 struct gfs2_sbd *sdp = GFS2_SB(inode); 297 struct gfs2_sbd *sdp = GFS2_SB(inode);
271 struct gfs2_holder gh; 298 struct gfs2_holder gh;
272 unsigned page_idx; 299 int ret = 0;
273 int ret;
274 int do_unlock = 0; 300 int do_unlock = 0;
275 301
276 if (likely(file != &gfs2_internal_file_sentinel)) { 302 if (likely(file != &gfs2_internal_file_sentinel)) {
@@ -289,29 +315,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
289 goto out_unlock; 315 goto out_unlock;
290 } 316 }
291skip_lock: 317skip_lock:
292 if (gfs2_is_stuffed(ip)) { 318 if (!gfs2_is_stuffed(ip))
293 struct pagevec lru_pvec;
294 pagevec_init(&lru_pvec, 0);
295 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
296 struct page *page = list_entry(pages->prev, struct page, lru);
297 prefetchw(&page->flags);
298 list_del(&page->lru);
299 if (!add_to_page_cache(page, mapping,
300 page->index, GFP_KERNEL)) {
301 ret = stuffed_readpage(ip, page);
302 unlock_page(page);
303 if (!pagevec_add(&lru_pvec, page))
304 __pagevec_lru_add(&lru_pvec);
305 } else {
306 page_cache_release(page);
307 }
308 }
309 pagevec_lru_add(&lru_pvec);
310 ret = 0;
311 } else {
312 /* What we really want to do .... */
313 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); 319 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
314 }
315 320
316 if (do_unlock) { 321 if (do_unlock) {
317 gfs2_glock_dq_m(1, &gh); 322 gfs2_glock_dq_m(1, &gh);
@@ -356,8 +361,10 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
356 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh); 361 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
357 error = gfs2_glock_nq_atime(&ip->i_gh); 362 error = gfs2_glock_nq_atime(&ip->i_gh);
358 if (unlikely(error)) { 363 if (unlikely(error)) {
359 if (error == GLR_TRYFAILED) 364 if (error == GLR_TRYFAILED) {
365 unlock_page(page);
360 error = AOP_TRUNCATED_PAGE; 366 error = AOP_TRUNCATED_PAGE;
367 }
361 goto out_uninit; 368 goto out_uninit;
362 } 369 }
363 370
@@ -594,6 +601,36 @@ static void gfs2_invalidatepage(struct page *page, unsigned long offset)
594 return; 601 return;
595} 602}
596 603
604/**
605 * gfs2_ok_for_dio - check that dio is valid on this file
606 * @ip: The inode
607 * @rw: READ or WRITE
608 * @offset: The offset at which we are reading or writing
609 *
610 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
611 * 1 (to accept the i/o request)
612 */
613static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
614{
615 /*
616 * Should we return an error here? I can't see that O_DIRECT for
617 * a journaled file makes any sense. For now we'll silently fall
618 * back to buffered I/O, likewise we do the same for stuffed
619 * files since they are (a) small and (b) unaligned.
620 */
621 if (gfs2_is_jdata(ip))
622 return 0;
623
624 if (gfs2_is_stuffed(ip))
625 return 0;
626
627 if (offset > i_size_read(&ip->i_inode))
628 return 0;
629 return 1;
630}
631
632
633
597static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, 634static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
598 const struct iovec *iov, loff_t offset, 635 const struct iovec *iov, loff_t offset,
599 unsigned long nr_segs) 636 unsigned long nr_segs)
@@ -604,42 +641,28 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
604 struct gfs2_holder gh; 641 struct gfs2_holder gh;
605 int rv; 642 int rv;
606 643
607 if (rw == READ)
608 mutex_lock(&inode->i_mutex);
609 /* 644 /*
610 * Shared lock, even if its a write, since we do no allocation 645 * Deferred lock, even if its a write, since we do no allocation
611 * on this path. All we need change is atime. 646 * on this path. All we need change is atime, and this lock mode
647 * ensures that other nodes have flushed their buffered read caches
648 * (i.e. their page cache entries for this inode). We do not,
649 * unfortunately have the option of only flushing a range like
650 * the VFS does.
612 */ 651 */
613 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 652 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
614 rv = gfs2_glock_nq_atime(&gh); 653 rv = gfs2_glock_nq_atime(&gh);
615 if (rv) 654 if (rv)
616 goto out; 655 return rv;
617 656 rv = gfs2_ok_for_dio(ip, rw, offset);
618 if (offset > i_size_read(inode)) 657 if (rv != 1)
619 goto out; 658 goto out; /* dio not valid, fall back to buffered i/o */
620 659
621 /* 660 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
622 * Should we return an error here? I can't see that O_DIRECT for 661 iov, offset, nr_segs,
623 * a journaled file makes any sense. For now we'll silently fall 662 gfs2_get_block_direct, NULL);
624 * back to buffered I/O, likewise we do the same for stuffed
625 * files since they are (a) small and (b) unaligned.
626 */
627 if (gfs2_is_jdata(ip))
628 goto out;
629
630 if (gfs2_is_stuffed(ip))
631 goto out;
632
633 rv = blockdev_direct_IO_own_locking(rw, iocb, inode,
634 inode->i_sb->s_bdev,
635 iov, offset, nr_segs,
636 gfs2_get_block_direct, NULL);
637out: 663out:
638 gfs2_glock_dq_m(1, &gh); 664 gfs2_glock_dq_m(1, &gh);
639 gfs2_holder_uninit(&gh); 665 gfs2_holder_uninit(&gh);
640 if (rw == READ)
641 mutex_unlock(&inode->i_mutex);
642
643 return rv; 666 return rv;
644} 667}
645 668
@@ -763,6 +786,7 @@ out:
763 786
764const struct address_space_operations gfs2_file_aops = { 787const struct address_space_operations gfs2_file_aops = {
765 .writepage = gfs2_writepage, 788 .writepage = gfs2_writepage,
789 .writepages = gfs2_writepages,
766 .readpage = gfs2_readpage, 790 .readpage = gfs2_readpage,
767 .readpages = gfs2_readpages, 791 .readpages = gfs2_readpages,
768 .sync_page = block_sync_page, 792 .sync_page = block_sync_page,
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index d355899585d8..9187eb174b43 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -46,6 +46,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
46 struct gfs2_inum_host inum; 46 struct gfs2_inum_host inum;
47 unsigned int type; 47 unsigned int type;
48 int error; 48 int error;
49 int had_lock=0;
49 50
50 if (inode && is_bad_inode(inode)) 51 if (inode && is_bad_inode(inode))
51 goto invalid; 52 goto invalid;
@@ -53,9 +54,12 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
53 if (sdp->sd_args.ar_localcaching) 54 if (sdp->sd_args.ar_localcaching)
54 goto valid; 55 goto valid;
55 56
56 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 57 had_lock = gfs2_glock_is_locked_by_me(dip->i_gl);
57 if (error) 58 if (!had_lock) {
58 goto fail; 59 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
60 if (error)
61 goto fail;
62 }
59 63
60 error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type); 64 error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type);
61 switch (error) { 65 switch (error) {
@@ -82,13 +86,15 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
82 } 86 }
83 87
84valid_gunlock: 88valid_gunlock:
85 gfs2_glock_dq_uninit(&d_gh); 89 if (!had_lock)
90 gfs2_glock_dq_uninit(&d_gh);
86valid: 91valid:
87 dput(parent); 92 dput(parent);
88 return 1; 93 return 1;
89 94
90invalid_gunlock: 95invalid_gunlock:
91 gfs2_glock_dq_uninit(&d_gh); 96 if (!had_lock)
97 gfs2_glock_dq_uninit(&d_gh);
92invalid: 98invalid:
93 if (inode && S_ISDIR(inode->i_mode)) { 99 if (inode && S_ISDIR(inode->i_mode)) {
94 if (have_submounts(dentry)) 100 if (have_submounts(dentry))
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index b4e7b8775315..4855e8cca622 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -22,6 +22,7 @@
22#include "glock.h" 22#include "glock.h"
23#include "glops.h" 23#include "glops.h"
24#include "inode.h" 24#include "inode.h"
25#include "ops_dentry.h"
25#include "ops_export.h" 26#include "ops_export.h"
26#include "rgrp.h" 27#include "rgrp.h"
27#include "util.h" 28#include "util.h"
@@ -112,13 +113,12 @@ struct get_name_filldir {
112 char *name; 113 char *name;
113}; 114};
114 115
115static int get_name_filldir(void *opaque, const char *name, unsigned int length, 116static int get_name_filldir(void *opaque, const char *name, int length,
116 u64 offset, struct gfs2_inum_host *inum, 117 loff_t offset, u64 inum, unsigned int type)
117 unsigned int type)
118{ 118{
119 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque; 119 struct get_name_filldir *gnfd = opaque;
120 120
121 if (!gfs2_inum_equal(inum, &gnfd->inum)) 121 if (inum != gnfd->inum.no_addr)
122 return 0; 122 return 0;
123 123
124 memcpy(gnfd->name, name, length); 124 memcpy(gnfd->name, name, length);
@@ -189,6 +189,7 @@ static struct dentry *gfs2_get_parent(struct dentry *child)
189 return ERR_PTR(-ENOMEM); 189 return ERR_PTR(-ENOMEM);
190 } 190 }
191 191
192 dentry->d_op = &gfs2_dops;
192 return dentry; 193 return dentry;
193} 194}
194 195
@@ -215,8 +216,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
215 } 216 }
216 217
217 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops, 218 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops,
218 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL, 219 LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
219 &i_gh);
220 if (error) 220 if (error)
221 return ERR_PTR(error); 221 return ERR_PTR(error);
222 222
@@ -269,6 +269,7 @@ out_inode:
269 return ERR_PTR(-ENOMEM); 269 return ERR_PTR(-ENOMEM);
270 } 270 }
271 271
272 dentry->d_op = &gfs2_dops;
272 return dentry; 273 return dentry;
273 274
274fail_rgd: 275fail_rgd:
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index faa07e4b97d0..c996aa739a05 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -43,15 +43,6 @@
43#include "util.h" 43#include "util.h"
44#include "eaops.h" 44#include "eaops.h"
45 45
46/* For regular, non-NFS */
47struct filldir_reg {
48 struct gfs2_sbd *fdr_sbd;
49 int fdr_prefetch;
50
51 filldir_t fdr_filldir;
52 void *fdr_opaque;
53};
54
55/* 46/*
56 * Most fields left uninitialised to catch anybody who tries to 47 * Most fields left uninitialised to catch anybody who tries to
57 * use them. f_flags set to prevent file_accessed() from touching 48 * use them. f_flags set to prevent file_accessed() from touching
@@ -128,41 +119,6 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
128} 119}
129 120
130/** 121/**
131 * filldir_func - Report a directory entry to the caller of gfs2_dir_read()
132 * @opaque: opaque data used by the function
133 * @name: the name of the directory entry
134 * @length: the length of the name
135 * @offset: the entry's offset in the directory
136 * @inum: the inode number the entry points to
137 * @type: the type of inode the entry points to
138 *
139 * Returns: 0 on success, 1 if buffer full
140 */
141
142static int filldir_func(void *opaque, const char *name, unsigned int length,
143 u64 offset, struct gfs2_inum_host *inum,
144 unsigned int type)
145{
146 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
147 struct gfs2_sbd *sdp = fdr->fdr_sbd;
148 int error;
149
150 error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
151 inum->no_addr, type);
152 if (error)
153 return 1;
154
155 if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
156 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_inode_glops,
157 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
158 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_iopen_glops,
159 LM_ST_SHARED, LM_FLAG_TRY);
160 }
161
162 return 0;
163}
164
165/**
166 * gfs2_readdir - Read directory entries from a directory 122 * gfs2_readdir - Read directory entries from a directory
167 * @file: The directory to read from 123 * @file: The directory to read from
168 * @dirent: Buffer for dirents 124 * @dirent: Buffer for dirents
@@ -175,16 +131,10 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
175{ 131{
176 struct inode *dir = file->f_mapping->host; 132 struct inode *dir = file->f_mapping->host;
177 struct gfs2_inode *dip = GFS2_I(dir); 133 struct gfs2_inode *dip = GFS2_I(dir);
178 struct filldir_reg fdr;
179 struct gfs2_holder d_gh; 134 struct gfs2_holder d_gh;
180 u64 offset = file->f_pos; 135 u64 offset = file->f_pos;
181 int error; 136 int error;
182 137
183 fdr.fdr_sbd = GFS2_SB(dir);
184 fdr.fdr_prefetch = 1;
185 fdr.fdr_filldir = filldir;
186 fdr.fdr_opaque = dirent;
187
188 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh); 138 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
189 error = gfs2_glock_nq_atime(&d_gh); 139 error = gfs2_glock_nq_atime(&d_gh);
190 if (error) { 140 if (error) {
@@ -192,7 +142,7 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
192 return error; 142 return error;
193 } 143 }
194 144
195 error = gfs2_dir_read(dir, &offset, &fdr, filldir_func); 145 error = gfs2_dir_read(dir, &offset, dirent, filldir);
196 146
197 gfs2_glock_dq_uninit(&d_gh); 147 gfs2_glock_dq_uninit(&d_gh);
198 148
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 636dda4c7d38..f40a84807d75 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -264,13 +264,23 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
264 struct gfs2_inode *dip = GFS2_I(dir); 264 struct gfs2_inode *dip = GFS2_I(dir);
265 struct gfs2_sbd *sdp = GFS2_SB(dir); 265 struct gfs2_sbd *sdp = GFS2_SB(dir);
266 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 266 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
267 struct gfs2_holder ghs[2]; 267 struct gfs2_holder ghs[3];
268 struct gfs2_rgrpd *rgd;
269 struct gfs2_holder ri_gh;
268 int error; 270 int error;
269 271
272 error = gfs2_rindex_hold(sdp, &ri_gh);
273 if (error)
274 return error;
275
270 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 276 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
271 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 277 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
272 278
273 error = gfs2_glock_nq_m(2, ghs); 279 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
280 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
281
282
283 error = gfs2_glock_nq_m(3, ghs);
274 if (error) 284 if (error)
275 goto out; 285 goto out;
276 286
@@ -291,10 +301,12 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
291out_end_trans: 301out_end_trans:
292 gfs2_trans_end(sdp); 302 gfs2_trans_end(sdp);
293out_gunlock: 303out_gunlock:
294 gfs2_glock_dq_m(2, ghs); 304 gfs2_glock_dq_m(3, ghs);
295out: 305out:
296 gfs2_holder_uninit(ghs); 306 gfs2_holder_uninit(ghs);
297 gfs2_holder_uninit(ghs + 1); 307 gfs2_holder_uninit(ghs + 1);
308 gfs2_holder_uninit(ghs + 2);
309 gfs2_glock_dq_uninit(&ri_gh);
298 return error; 310 return error;
299} 311}
300 312
@@ -449,13 +461,22 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
449 struct gfs2_inode *dip = GFS2_I(dir); 461 struct gfs2_inode *dip = GFS2_I(dir);
450 struct gfs2_sbd *sdp = GFS2_SB(dir); 462 struct gfs2_sbd *sdp = GFS2_SB(dir);
451 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 463 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
452 struct gfs2_holder ghs[2]; 464 struct gfs2_holder ghs[3];
465 struct gfs2_rgrpd *rgd;
466 struct gfs2_holder ri_gh;
453 int error; 467 int error;
454 468
469
470 error = gfs2_rindex_hold(sdp, &ri_gh);
471 if (error)
472 return error;
455 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 473 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
456 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 474 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
457 475
458 error = gfs2_glock_nq_m(2, ghs); 476 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
477 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
478
479 error = gfs2_glock_nq_m(3, ghs);
459 if (error) 480 if (error)
460 goto out; 481 goto out;
461 482
@@ -483,10 +504,12 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
483 gfs2_trans_end(sdp); 504 gfs2_trans_end(sdp);
484 505
485out_gunlock: 506out_gunlock:
486 gfs2_glock_dq_m(2, ghs); 507 gfs2_glock_dq_m(3, ghs);
487out: 508out:
488 gfs2_holder_uninit(ghs); 509 gfs2_holder_uninit(ghs);
489 gfs2_holder_uninit(ghs + 1); 510 gfs2_holder_uninit(ghs + 1);
511 gfs2_holder_uninit(ghs + 2);
512 gfs2_glock_dq_uninit(&ri_gh);
490 return error; 513 return error;
491} 514}
492 515
@@ -547,7 +570,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
547 struct gfs2_inode *ip = GFS2_I(odentry->d_inode); 570 struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
548 struct gfs2_inode *nip = NULL; 571 struct gfs2_inode *nip = NULL;
549 struct gfs2_sbd *sdp = GFS2_SB(odir); 572 struct gfs2_sbd *sdp = GFS2_SB(odir);
550 struct gfs2_holder ghs[4], r_gh; 573 struct gfs2_holder ghs[5], r_gh;
574 struct gfs2_rgrpd *nrgd;
551 unsigned int num_gh; 575 unsigned int num_gh;
552 int dir_rename = 0; 576 int dir_rename = 0;
553 int alloc_required; 577 int alloc_required;
@@ -587,6 +611,13 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
587 if (nip) { 611 if (nip) {
588 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); 612 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
589 num_gh++; 613 num_gh++;
614 /* grab the resource lock for unlink flag twiddling
615 * this is the case of the target file already existing
616 * so we unlink before doing the rename
617 */
618 nrgd = gfs2_blk2rgrpd(sdp, nip->i_num.no_addr);
619 if (nrgd)
620 gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++);
590 } 621 }
591 622
592 error = gfs2_glock_nq_m(num_gh, ghs); 623 error = gfs2_glock_nq_m(num_gh, ghs);
@@ -684,12 +715,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
684 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + 715 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
685 al->al_rgd->rd_ri.ri_length + 716 al->al_rgd->rd_ri.ri_length +
686 4 * RES_DINODE + 4 * RES_LEAF + 717 4 * RES_DINODE + 4 * RES_LEAF +
687 RES_STATFS + RES_QUOTA, 0); 718 RES_STATFS + RES_QUOTA + 4, 0);
688 if (error) 719 if (error)
689 goto out_ipreserv; 720 goto out_ipreserv;
690 } else { 721 } else {
691 error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 722 error = gfs2_trans_begin(sdp, 4 * RES_DINODE +
692 5 * RES_LEAF, 0); 723 5 * RES_LEAF + 4, 0);
693 if (error) 724 if (error)
694 goto out_gunlock; 725 goto out_gunlock;
695 } 726 }
@@ -728,7 +759,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
728 error = gfs2_meta_inode_buffer(ip, &dibh); 759 error = gfs2_meta_inode_buffer(ip, &dibh);
729 if (error) 760 if (error)
730 goto out_end_trans; 761 goto out_end_trans;
731 ip->i_inode.i_ctime.tv_sec = get_seconds(); 762 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
732 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 763 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
733 gfs2_dinode_out(ip, dibh->b_data); 764 gfs2_dinode_out(ip, dibh->b_data);
734 brelse(dibh); 765 brelse(dibh);
@@ -1018,7 +1049,7 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
1018 } 1049 }
1019 1050
1020 generic_fillattr(inode, stat); 1051 generic_fillattr(inode, stat);
1021 if (unlock); 1052 if (unlock)
1022 gfs2_glock_dq_uninit(&gh); 1053 gfs2_glock_dq_uninit(&gh);
1023 1054
1024 return 0; 1055 return 0;
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 7685b46f934b..47369d011214 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -173,6 +173,9 @@ static void gfs2_write_super_lockfs(struct super_block *sb)
173 struct gfs2_sbd *sdp = sb->s_fs_info; 173 struct gfs2_sbd *sdp = sb->s_fs_info;
174 int error; 174 int error;
175 175
176 if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
177 return;
178
176 for (;;) { 179 for (;;) {
177 error = gfs2_freeze_fs(sdp); 180 error = gfs2_freeze_fs(sdp);
178 if (!error) 181 if (!error)
@@ -426,6 +429,12 @@ static void gfs2_delete_inode(struct inode *inode)
426 } 429 }
427 430
428 error = gfs2_dinode_dealloc(ip); 431 error = gfs2_dinode_dealloc(ip);
432 /*
433 * Must do this before unlock to avoid trying to write back
434 * potentially dirty data now that inode no longer exists
435 * on disk.
436 */
437 truncate_inode_pages(&inode->i_data, 0);
429 438
430out_unlock: 439out_unlock:
431 gfs2_glock_dq(&ip->i_iopen_gh); 440 gfs2_glock_dq(&ip->i_iopen_gh);
@@ -443,14 +452,12 @@ out:
443 452
444static struct inode *gfs2_alloc_inode(struct super_block *sb) 453static struct inode *gfs2_alloc_inode(struct super_block *sb)
445{ 454{
446 struct gfs2_sbd *sdp = sb->s_fs_info;
447 struct gfs2_inode *ip; 455 struct gfs2_inode *ip;
448 456
449 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL); 457 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
450 if (ip) { 458 if (ip) {
451 ip->i_flags = 0; 459 ip->i_flags = 0;
452 ip->i_gl = NULL; 460 ip->i_gl = NULL;
453 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
454 ip->i_last_pfault = jiffies; 461 ip->i_last_pfault = jiffies;
455 } 462 }
456 return &ip->i_inode; 463 return &ip->i_inode;
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 45a5f11fc39a..14b380fb0602 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -28,34 +28,13 @@
28#include "trans.h" 28#include "trans.h"
29#include "util.h" 29#include "util.h"
30 30
31static void pfault_be_greedy(struct gfs2_inode *ip)
32{
33 unsigned int time;
34
35 spin_lock(&ip->i_spin);
36 time = ip->i_greedy;
37 ip->i_last_pfault = jiffies;
38 spin_unlock(&ip->i_spin);
39
40 igrab(&ip->i_inode);
41 if (gfs2_glock_be_greedy(ip->i_gl, time))
42 iput(&ip->i_inode);
43}
44
45static struct page *gfs2_private_nopage(struct vm_area_struct *area, 31static struct page *gfs2_private_nopage(struct vm_area_struct *area,
46 unsigned long address, int *type) 32 unsigned long address, int *type)
47{ 33{
48 struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host); 34 struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host);
49 struct page *result;
50 35
51 set_bit(GIF_PAGED, &ip->i_flags); 36 set_bit(GIF_PAGED, &ip->i_flags);
52 37 return filemap_nopage(area, address, type);
53 result = filemap_nopage(area, address, type);
54
55 if (result && result != NOPAGE_OOM)
56 pfault_be_greedy(ip);
57
58 return result;
59} 38}
60 39
61static int alloc_page_backing(struct gfs2_inode *ip, struct page *page) 40static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
@@ -167,7 +146,6 @@ static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
167 set_page_dirty(result); 146 set_page_dirty(result);
168 } 147 }
169 148
170 pfault_be_greedy(ip);
171out: 149out:
172 gfs2_glock_dq_uninit(&i_gh); 150 gfs2_glock_dq_uninit(&i_gh);
173 151
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 43a24f2e5905..70f424fcf1cd 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -71,17 +71,12 @@ void gfs2_tune_init(struct gfs2_tune *gt)
71 gt->gt_atime_quantum = 3600; 71 gt->gt_atime_quantum = 3600;
72 gt->gt_new_files_jdata = 0; 72 gt->gt_new_files_jdata = 0;
73 gt->gt_new_files_directio = 0; 73 gt->gt_new_files_directio = 0;
74 gt->gt_max_atomic_write = 4 << 20;
75 gt->gt_max_readahead = 1 << 18; 74 gt->gt_max_readahead = 1 << 18;
76 gt->gt_lockdump_size = 131072; 75 gt->gt_lockdump_size = 131072;
77 gt->gt_stall_secs = 600; 76 gt->gt_stall_secs = 600;
78 gt->gt_complain_secs = 10; 77 gt->gt_complain_secs = 10;
79 gt->gt_reclaim_limit = 5000; 78 gt->gt_reclaim_limit = 5000;
80 gt->gt_entries_per_readdir = 32; 79 gt->gt_entries_per_readdir = 32;
81 gt->gt_prefetch_secs = 10;
82 gt->gt_greedy_default = HZ / 10;
83 gt->gt_greedy_quantum = HZ / 40;
84 gt->gt_greedy_max = HZ / 4;
85 gt->gt_statfs_quantum = 30; 80 gt->gt_statfs_quantum = 30;
86 gt->gt_statfs_slow = 0; 81 gt->gt_statfs_slow = 0;
87} 82}
@@ -359,8 +354,7 @@ int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
359 mutex_lock(&sdp->sd_jindex_mutex); 354 mutex_lock(&sdp->sd_jindex_mutex);
360 355
361 for (;;) { 356 for (;;) {
362 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 357 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
363 GL_LOCAL_EXCL, ji_gh);
364 if (error) 358 if (error)
365 break; 359 break;
366 360
@@ -529,8 +523,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
529 struct gfs2_log_header_host head; 523 struct gfs2_log_header_host head;
530 int error; 524 int error;
531 525
532 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 526 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
533 GL_LOCAL_EXCL, &t_gh);
534 if (error) 527 if (error)
535 return error; 528 return error;
536 529
@@ -583,9 +576,8 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
583 gfs2_quota_sync(sdp); 576 gfs2_quota_sync(sdp);
584 gfs2_statfs_sync(sdp); 577 gfs2_statfs_sync(sdp);
585 578
586 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 579 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
587 GL_LOCAL_EXCL | GL_NOCACHE, 580 &t_gh);
588 &t_gh);
589 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 581 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
590 return error; 582 return error;
591 583
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 983eaf1e06be..d01f9f0fda26 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -436,17 +436,12 @@ TUNE_ATTR(atime_quantum, 0);
436TUNE_ATTR(max_readahead, 0); 436TUNE_ATTR(max_readahead, 0);
437TUNE_ATTR(complain_secs, 0); 437TUNE_ATTR(complain_secs, 0);
438TUNE_ATTR(reclaim_limit, 0); 438TUNE_ATTR(reclaim_limit, 0);
439TUNE_ATTR(prefetch_secs, 0);
440TUNE_ATTR(statfs_slow, 0); 439TUNE_ATTR(statfs_slow, 0);
441TUNE_ATTR(new_files_jdata, 0); 440TUNE_ATTR(new_files_jdata, 0);
442TUNE_ATTR(new_files_directio, 0); 441TUNE_ATTR(new_files_directio, 0);
443TUNE_ATTR(quota_simul_sync, 1); 442TUNE_ATTR(quota_simul_sync, 1);
444TUNE_ATTR(quota_cache_secs, 1); 443TUNE_ATTR(quota_cache_secs, 1);
445TUNE_ATTR(max_atomic_write, 1);
446TUNE_ATTR(stall_secs, 1); 444TUNE_ATTR(stall_secs, 1);
447TUNE_ATTR(greedy_default, 1);
448TUNE_ATTR(greedy_quantum, 1);
449TUNE_ATTR(greedy_max, 1);
450TUNE_ATTR(statfs_quantum, 1); 445TUNE_ATTR(statfs_quantum, 1);
451TUNE_ATTR_DAEMON(scand_secs, scand_process); 446TUNE_ATTR_DAEMON(scand_secs, scand_process);
452TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process); 447TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
@@ -465,15 +460,10 @@ static struct attribute *tune_attrs[] = {
465 &tune_attr_max_readahead.attr, 460 &tune_attr_max_readahead.attr,
466 &tune_attr_complain_secs.attr, 461 &tune_attr_complain_secs.attr,
467 &tune_attr_reclaim_limit.attr, 462 &tune_attr_reclaim_limit.attr,
468 &tune_attr_prefetch_secs.attr,
469 &tune_attr_statfs_slow.attr, 463 &tune_attr_statfs_slow.attr,
470 &tune_attr_quota_simul_sync.attr, 464 &tune_attr_quota_simul_sync.attr,
471 &tune_attr_quota_cache_secs.attr, 465 &tune_attr_quota_cache_secs.attr,
472 &tune_attr_max_atomic_write.attr,
473 &tune_attr_stall_secs.attr, 466 &tune_attr_stall_secs.attr,
474 &tune_attr_greedy_default.attr,
475 &tune_attr_greedy_quantum.attr,
476 &tune_attr_greedy_max.attr,
477 &tune_attr_statfs_quantum.attr, 467 &tune_attr_statfs_quantum.attr,
478 &tune_attr_scand_secs.attr, 468 &tune_attr_scand_secs.attr,
479 &tune_attr_recoverd_secs.attr, 469 &tune_attr_recoverd_secs.attr,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index f5719117edfe..e285022f006c 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -182,9 +182,9 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
182 * Take appropriate lock on inode 182 * Take appropriate lock on inode
183 */ 183 */
184 if (create) 184 if (create)
185 IWRITE_LOCK(ip); 185 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
186 else 186 else
187 IREAD_LOCK(ip); 187 IREAD_LOCK(ip, RDWRLOCK_NORMAL);
188 188
189 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && 189 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
190 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && 190 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
@@ -359,7 +359,7 @@ void jfs_truncate(struct inode *ip)
359 359
360 nobh_truncate_page(ip->i_mapping, ip->i_size); 360 nobh_truncate_page(ip->i_mapping, ip->i_size);
361 361
362 IWRITE_LOCK(ip); 362 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
363 jfs_truncate_nolock(ip, ip->i_size); 363 jfs_truncate_nolock(ip, ip->i_size);
364 IWRITE_UNLOCK(ip); 364 IWRITE_UNLOCK(ip);
365} 365}
diff --git a/fs/jfs/jfs_debug.h b/fs/jfs/jfs_debug.h
index ddffbbd4d955..7378798f0b21 100644
--- a/fs/jfs/jfs_debug.h
+++ b/fs/jfs/jfs_debug.h
@@ -39,10 +39,6 @@ extern void jfs_proc_clean(void);
39/* 39/*
40 * assert with traditional printf/panic 40 * assert with traditional printf/panic
41 */ 41 */
42#ifdef CONFIG_KERNEL_ASSERTS
43/* kgdb stuff */
44#define assert(p) KERNEL_ASSERT(#p, p)
45#else
46#define assert(p) do { \ 42#define assert(p) do { \
47 if (!(p)) { \ 43 if (!(p)) { \
48 printk(KERN_CRIT "BUG at %s:%d assert(%s)\n", \ 44 printk(KERN_CRIT "BUG at %s:%d assert(%s)\n", \
@@ -50,7 +46,6 @@ extern void jfs_proc_clean(void);
50 BUG(); \ 46 BUG(); \
51 } \ 47 } \
52} while (0) 48} while (0)
53#endif
54 49
55/* 50/*
56 * debug ON 51 * debug ON
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 23546c8fd48b..82b0544bd76d 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -337,7 +337,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
337 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 337 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
338 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 338 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
339 339
340 IREAD_LOCK(ipbmap); 340 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
341 341
342 /* block to be freed better be within the mapsize. */ 342 /* block to be freed better be within the mapsize. */
343 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { 343 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
@@ -733,7 +733,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
733 * allocation group size, try to allocate anywhere. 733 * allocation group size, try to allocate anywhere.
734 */ 734 */
735 if (l2nb > bmp->db_agl2size) { 735 if (l2nb > bmp->db_agl2size) {
736 IWRITE_LOCK(ipbmap); 736 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
737 737
738 rc = dbAllocAny(bmp, nblocks, l2nb, results); 738 rc = dbAllocAny(bmp, nblocks, l2nb, results);
739 739
@@ -774,7 +774,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
774 * the hint using a tiered strategy. 774 * the hint using a tiered strategy.
775 */ 775 */
776 if (nblocks <= BPERDMAP) { 776 if (nblocks <= BPERDMAP) {
777 IREAD_LOCK(ipbmap); 777 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
778 778
779 /* get the buffer for the dmap containing the hint. 779 /* get the buffer for the dmap containing the hint.
780 */ 780 */
@@ -844,7 +844,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
844 /* try to satisfy the allocation request with blocks within 844 /* try to satisfy the allocation request with blocks within
845 * the same allocation group as the hint. 845 * the same allocation group as the hint.
846 */ 846 */
847 IWRITE_LOCK(ipbmap); 847 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
848 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC) 848 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
849 goto write_unlock; 849 goto write_unlock;
850 850
@@ -856,7 +856,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
856 * Let dbNextAG recommend a preferred allocation group 856 * Let dbNextAG recommend a preferred allocation group
857 */ 857 */
858 agno = dbNextAG(ipbmap); 858 agno = dbNextAG(ipbmap);
859 IWRITE_LOCK(ipbmap); 859 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
860 860
861 /* Try to allocate within this allocation group. if that fails, try to 861 /* Try to allocate within this allocation group. if that fails, try to
862 * allocate anywhere in the map. 862 * allocate anywhere in the map.
@@ -900,7 +900,7 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
900 s64 lblkno; 900 s64 lblkno;
901 struct metapage *mp; 901 struct metapage *mp;
902 902
903 IREAD_LOCK(ipbmap); 903 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
904 904
905 /* 905 /*
906 * validate extent request: 906 * validate extent request:
@@ -1050,7 +1050,7 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
1050 */ 1050 */
1051 extblkno = lastblkno + 1; 1051 extblkno = lastblkno + 1;
1052 1052
1053 IREAD_LOCK(ipbmap); 1053 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
1054 1054
1055 /* better be within the file system */ 1055 /* better be within the file system */
1056 bmp = sbi->bmap; 1056 bmp = sbi->bmap;
@@ -3116,7 +3116,7 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
3116 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 3116 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
3117 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 3117 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
3118 3118
3119 IREAD_LOCK(ipbmap); 3119 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
3120 3120
3121 /* block to be allocated better be within the mapsize. */ 3121 /* block to be allocated better be within the mapsize. */
3122 ASSERT(nblocks <= bmp->db_mapsize - blkno); 3122 ASSERT(nblocks <= bmp->db_mapsize - blkno);
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 53f63b47a6d3..aa5124b643b1 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -331,7 +331,7 @@ int diRead(struct inode *ip)
331 331
332 /* read the iag */ 332 /* read the iag */
333 imap = JFS_IP(ipimap)->i_imap; 333 imap = JFS_IP(ipimap)->i_imap;
334 IREAD_LOCK(ipimap); 334 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
335 rc = diIAGRead(imap, iagno, &mp); 335 rc = diIAGRead(imap, iagno, &mp);
336 IREAD_UNLOCK(ipimap); 336 IREAD_UNLOCK(ipimap);
337 if (rc) { 337 if (rc) {
@@ -920,7 +920,7 @@ int diFree(struct inode *ip)
920 /* Obtain read lock in imap inode. Don't release it until we have 920 /* Obtain read lock in imap inode. Don't release it until we have
921 * read all of the IAG's that we are going to. 921 * read all of the IAG's that we are going to.
922 */ 922 */
923 IREAD_LOCK(ipimap); 923 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
924 924
925 /* read the iag. 925 /* read the iag.
926 */ 926 */
@@ -1415,7 +1415,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
1415 AG_LOCK(imap, agno); 1415 AG_LOCK(imap, agno);
1416 1416
1417 /* Get read lock on imap inode */ 1417 /* Get read lock on imap inode */
1418 IREAD_LOCK(ipimap); 1418 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
1419 1419
1420 /* get the iag number and read the iag */ 1420 /* get the iag number and read the iag */
1421 iagno = INOTOIAG(inum); 1421 iagno = INOTOIAG(inum);
@@ -1808,7 +1808,7 @@ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
1808 return -ENOSPC; 1808 return -ENOSPC;
1809 1809
1810 /* obtain read lock on imap inode */ 1810 /* obtain read lock on imap inode */
1811 IREAD_LOCK(imap->im_ipimap); 1811 IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
1812 1812
1813 /* read the iag at the head of the list. 1813 /* read the iag at the head of the list.
1814 */ 1814 */
@@ -1946,7 +1946,7 @@ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
1946 } else { 1946 } else {
1947 /* read the iag. 1947 /* read the iag.
1948 */ 1948 */
1949 IREAD_LOCK(imap->im_ipimap); 1949 IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
1950 if ((rc = diIAGRead(imap, iagno, &mp))) { 1950 if ((rc = diIAGRead(imap, iagno, &mp))) {
1951 IREAD_UNLOCK(imap->im_ipimap); 1951 IREAD_UNLOCK(imap->im_ipimap);
1952 jfs_error(ip->i_sb, "diAllocExt: error reading iag"); 1952 jfs_error(ip->i_sb, "diAllocExt: error reading iag");
@@ -2509,7 +2509,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2509 */ 2509 */
2510 2510
2511 /* acquire inode map lock */ 2511 /* acquire inode map lock */
2512 IWRITE_LOCK(ipimap); 2512 IWRITE_LOCK(ipimap, RDWRLOCK_IMAP);
2513 2513
2514 if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) { 2514 if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) {
2515 IWRITE_UNLOCK(ipimap); 2515 IWRITE_UNLOCK(ipimap);
@@ -2648,7 +2648,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2648 } 2648 }
2649 2649
2650 /* obtain read lock on map */ 2650 /* obtain read lock on map */
2651 IREAD_LOCK(ipimap); 2651 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
2652 2652
2653 /* read the iag */ 2653 /* read the iag */
2654 if ((rc = diIAGRead(imap, iagno, &mp))) { 2654 if ((rc = diIAGRead(imap, iagno, &mp))) {
@@ -2779,7 +2779,7 @@ diUpdatePMap(struct inode *ipimap,
2779 return -EIO; 2779 return -EIO;
2780 } 2780 }
2781 /* read the iag */ 2781 /* read the iag */
2782 IREAD_LOCK(ipimap); 2782 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
2783 rc = diIAGRead(imap, iagno, &mp); 2783 rc = diIAGRead(imap, iagno, &mp);
2784 IREAD_UNLOCK(ipimap); 2784 IREAD_UNLOCK(ipimap);
2785 if (rc) 2785 if (rc)
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 94005584445a..8f453eff3c83 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -109,9 +109,11 @@ struct jfs_inode_info {
109 109
110#define JFS_ACL_NOT_CACHED ((void *)-1) 110#define JFS_ACL_NOT_CACHED ((void *)-1)
111 111
112#define IREAD_LOCK(ip) down_read(&JFS_IP(ip)->rdwrlock) 112#define IREAD_LOCK(ip, subclass) \
113 down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
113#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock) 114#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock)
114#define IWRITE_LOCK(ip) down_write(&JFS_IP(ip)->rdwrlock) 115#define IWRITE_LOCK(ip, subclass) \
116 down_write_nested(&JFS_IP(ip)->rdwrlock, subclass)
115#define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock) 117#define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock)
116 118
117/* 119/*
@@ -127,6 +129,29 @@ enum cflags {
127 COMMIT_Synclist, /* metadata pages on group commit synclist */ 129 COMMIT_Synclist, /* metadata pages on group commit synclist */
128}; 130};
129 131
132/*
133 * commit_mutex nesting subclasses:
134 */
135enum commit_mutex_class
136{
137 COMMIT_MUTEX_PARENT,
138 COMMIT_MUTEX_CHILD,
139 COMMIT_MUTEX_SECOND_PARENT, /* Renaming */
140 COMMIT_MUTEX_VICTIM /* Inode being unlinked due to rename */
141};
142
143/*
144 * rdwrlock subclasses:
145 * The dmap inode may be locked while a normal inode or the imap inode are
146 * locked.
147 */
148enum rdwrlock_class
149{
150 RDWRLOCK_NORMAL,
151 RDWRLOCK_IMAP,
152 RDWRLOCK_DMAP
153};
154
130#define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag)) 155#define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag))
131#define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag)) 156#define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag))
132#define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag)) 157#define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag))
diff --git a/fs/jfs/jfs_lock.h b/fs/jfs/jfs_lock.h
index 7d78e83d7c40..df48ece4b7a3 100644
--- a/fs/jfs/jfs_lock.h
+++ b/fs/jfs/jfs_lock.h
@@ -42,7 +42,7 @@ do { \
42 if (cond) \ 42 if (cond) \
43 break; \ 43 break; \
44 unlock_cmd; \ 44 unlock_cmd; \
45 schedule(); \ 45 io_schedule(); \
46 lock_cmd; \ 46 lock_cmd; \
47 } \ 47 } \
48 current->state = TASK_RUNNING; \ 48 current->state = TASK_RUNNING; \
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index ceaf03b94935..58deae007507 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -56,7 +56,7 @@ static inline void __lock_metapage(struct metapage *mp)
56 set_current_state(TASK_UNINTERRUPTIBLE); 56 set_current_state(TASK_UNINTERRUPTIBLE);
57 if (metapage_locked(mp)) { 57 if (metapage_locked(mp)) {
58 unlock_page(mp->page); 58 unlock_page(mp->page);
59 schedule(); 59 io_schedule();
60 lock_page(mp->page); 60 lock_page(mp->page);
61 } 61 }
62 } while (trylock_metapage(mp)); 62 } while (trylock_metapage(mp));
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index d558e51b0df8..6988a1082f58 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -135,7 +135,7 @@ static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
135 add_wait_queue(event, &wait); 135 add_wait_queue(event, &wait);
136 set_current_state(TASK_UNINTERRUPTIBLE); 136 set_current_state(TASK_UNINTERRUPTIBLE);
137 TXN_UNLOCK(); 137 TXN_UNLOCK();
138 schedule(); 138 io_schedule();
139 current->state = TASK_RUNNING; 139 current->state = TASK_RUNNING;
140 remove_wait_queue(event, &wait); 140 remove_wait_queue(event, &wait);
141} 141}
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index e98eb03e5310..acc97c46d8a4 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -757,6 +757,11 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
757 nsplit = 0; 757 nsplit = 0;
758 758
759 /* push (bn, index) of the parent page/entry */ 759 /* push (bn, index) of the parent page/entry */
760 if (BT_STACK_FULL(btstack)) {
761 jfs_error(ip->i_sb, "stack overrun in xtSearch!");
762 XT_PUTPAGE(mp);
763 return -EIO;
764 }
760 BT_PUSH(btstack, bn, index); 765 BT_PUSH(btstack, bn, index);
761 766
762 /* get the child page block number */ 767 /* get the child page block number */
@@ -3915,6 +3920,11 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
3915 */ 3920 */
3916 getChild: 3921 getChild:
3917 /* save current parent entry for the child page */ 3922 /* save current parent entry for the child page */
3923 if (BT_STACK_FULL(&btstack)) {
3924 jfs_error(ip->i_sb, "stack overrun in xtTruncate!");
3925 XT_PUTPAGE(mp);
3926 return -EIO;
3927 }
3918 BT_PUSH(&btstack, bn, index); 3928 BT_PUSH(&btstack, bn, index);
3919 3929
3920 /* get child page */ 3930 /* get child page */
@@ -4112,6 +4122,11 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
4112 */ 4122 */
4113 getChild: 4123 getChild:
4114 /* save current parent entry for the child page */ 4124 /* save current parent entry for the child page */
4125 if (BT_STACK_FULL(&btstack)) {
4126 jfs_error(ip->i_sb, "stack overrun in xtTruncate_pmap!");
4127 XT_PUTPAGE(mp);
4128 return -EIO;
4129 }
4115 BT_PUSH(&btstack, bn, index); 4130 BT_PUSH(&btstack, bn, index);
4116 4131
4117 /* get child page */ 4132 /* get child page */
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a6a8c16c872c..7ab47561b68d 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -104,8 +104,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
104 104
105 tid = txBegin(dip->i_sb, 0); 105 tid = txBegin(dip->i_sb, 0);
106 106
107 mutex_lock(&JFS_IP(dip)->commit_mutex); 107 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
108 mutex_lock(&JFS_IP(ip)->commit_mutex); 108 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
109 109
110 rc = jfs_init_acl(tid, ip, dip); 110 rc = jfs_init_acl(tid, ip, dip);
111 if (rc) 111 if (rc)
@@ -238,8 +238,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
238 238
239 tid = txBegin(dip->i_sb, 0); 239 tid = txBegin(dip->i_sb, 0);
240 240
241 mutex_lock(&JFS_IP(dip)->commit_mutex); 241 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
242 mutex_lock(&JFS_IP(ip)->commit_mutex); 242 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
243 243
244 rc = jfs_init_acl(tid, ip, dip); 244 rc = jfs_init_acl(tid, ip, dip);
245 if (rc) 245 if (rc)
@@ -365,8 +365,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
365 365
366 tid = txBegin(dip->i_sb, 0); 366 tid = txBegin(dip->i_sb, 0);
367 367
368 mutex_lock(&JFS_IP(dip)->commit_mutex); 368 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
369 mutex_lock(&JFS_IP(ip)->commit_mutex); 369 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
370 370
371 iplist[0] = dip; 371 iplist[0] = dip;
372 iplist[1] = ip; 372 iplist[1] = ip;
@@ -483,12 +483,12 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
483 if ((rc = get_UCSname(&dname, dentry))) 483 if ((rc = get_UCSname(&dname, dentry)))
484 goto out; 484 goto out;
485 485
486 IWRITE_LOCK(ip); 486 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
487 487
488 tid = txBegin(dip->i_sb, 0); 488 tid = txBegin(dip->i_sb, 0);
489 489
490 mutex_lock(&JFS_IP(dip)->commit_mutex); 490 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
491 mutex_lock(&JFS_IP(ip)->commit_mutex); 491 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
492 492
493 iplist[0] = dip; 493 iplist[0] = dip;
494 iplist[1] = ip; 494 iplist[1] = ip;
@@ -802,8 +802,8 @@ static int jfs_link(struct dentry *old_dentry,
802 802
803 tid = txBegin(ip->i_sb, 0); 803 tid = txBegin(ip->i_sb, 0);
804 804
805 mutex_lock(&JFS_IP(dir)->commit_mutex); 805 mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
806 mutex_lock(&JFS_IP(ip)->commit_mutex); 806 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
807 807
808 /* 808 /*
809 * scan parent directory for entry/freespace 809 * scan parent directory for entry/freespace
@@ -913,8 +913,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
913 913
914 tid = txBegin(dip->i_sb, 0); 914 tid = txBegin(dip->i_sb, 0);
915 915
916 mutex_lock(&JFS_IP(dip)->commit_mutex); 916 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
917 mutex_lock(&JFS_IP(ip)->commit_mutex); 917 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
918 918
919 rc = jfs_init_security(tid, ip, dip); 919 rc = jfs_init_security(tid, ip, dip);
920 if (rc) 920 if (rc)
@@ -1127,7 +1127,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1127 goto out3; 1127 goto out3;
1128 } 1128 }
1129 } else if (new_ip) { 1129 } else if (new_ip) {
1130 IWRITE_LOCK(new_ip); 1130 IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
1131 /* Init inode for quota operations. */ 1131 /* Init inode for quota operations. */
1132 DQUOT_INIT(new_ip); 1132 DQUOT_INIT(new_ip);
1133 } 1133 }
@@ -1137,13 +1137,21 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1137 */ 1137 */
1138 tid = txBegin(new_dir->i_sb, 0); 1138 tid = txBegin(new_dir->i_sb, 0);
1139 1139
1140 mutex_lock(&JFS_IP(new_dir)->commit_mutex); 1140 /*
1141 mutex_lock(&JFS_IP(old_ip)->commit_mutex); 1141 * How do we know the locking is safe from deadlocks?
1142 * The vfs does the hard part for us. Any time we are taking nested
1143 * commit_mutexes, the vfs already has i_mutex held on the parent.
1144 * Here, the vfs has already taken i_mutex on both old_dir and new_dir.
1145 */
1146 mutex_lock_nested(&JFS_IP(new_dir)->commit_mutex, COMMIT_MUTEX_PARENT);
1147 mutex_lock_nested(&JFS_IP(old_ip)->commit_mutex, COMMIT_MUTEX_CHILD);
1142 if (old_dir != new_dir) 1148 if (old_dir != new_dir)
1143 mutex_lock(&JFS_IP(old_dir)->commit_mutex); 1149 mutex_lock_nested(&JFS_IP(old_dir)->commit_mutex,
1150 COMMIT_MUTEX_SECOND_PARENT);
1144 1151
1145 if (new_ip) { 1152 if (new_ip) {
1146 mutex_lock(&JFS_IP(new_ip)->commit_mutex); 1153 mutex_lock_nested(&JFS_IP(new_ip)->commit_mutex,
1154 COMMIT_MUTEX_VICTIM);
1147 /* 1155 /*
1148 * Change existing directory entry to new inode number 1156 * Change existing directory entry to new inode number
1149 */ 1157 */
@@ -1357,8 +1365,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
1357 1365
1358 tid = txBegin(dir->i_sb, 0); 1366 tid = txBegin(dir->i_sb, 0);
1359 1367
1360 mutex_lock(&JFS_IP(dir)->commit_mutex); 1368 mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
1361 mutex_lock(&JFS_IP(ip)->commit_mutex); 1369 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
1362 1370
1363 rc = jfs_init_acl(tid, ip, dir); 1371 rc = jfs_init_acl(tid, ip, dir);
1364 if (rc) 1372 if (rc)
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 277ca67a2ad6..5a9779bb9236 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -184,10 +184,9 @@ static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
184 flush_scheduled_work(); 184 flush_scheduled_work();
185} 185}
186 186
187static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc, 187static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
188 unsigned int num_ios)
189{ 188{
190 atomic_set(&wc->wc_num_reqs, num_ios); 189 atomic_set(&wc->wc_num_reqs, 1);
191 init_completion(&wc->wc_io_complete); 190 init_completion(&wc->wc_io_complete);
192 wc->wc_error = 0; 191 wc->wc_error = 0;
193} 192}
@@ -212,6 +211,7 @@ static void o2hb_wait_on_io(struct o2hb_region *reg,
212 struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping; 211 struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
213 212
214 blk_run_address_space(mapping); 213 blk_run_address_space(mapping);
214 o2hb_bio_wait_dec(wc, 1);
215 215
216 wait_for_completion(&wc->wc_io_complete); 216 wait_for_completion(&wc->wc_io_complete);
217} 217}
@@ -231,6 +231,7 @@ static int o2hb_bio_end_io(struct bio *bio,
231 return 1; 231 return 1;
232 232
233 o2hb_bio_wait_dec(wc, 1); 233 o2hb_bio_wait_dec(wc, 1);
234 bio_put(bio);
234 return 0; 235 return 0;
235} 236}
236 237
@@ -238,23 +239,22 @@ static int o2hb_bio_end_io(struct bio *bio,
238 * start_slot. */ 239 * start_slot. */
239static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, 240static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
240 struct o2hb_bio_wait_ctxt *wc, 241 struct o2hb_bio_wait_ctxt *wc,
241 unsigned int start_slot, 242 unsigned int *current_slot,
242 unsigned int num_slots) 243 unsigned int max_slots)
243{ 244{
244 int i, nr_vecs, len, first_page, last_page; 245 int len, current_page;
245 unsigned int vec_len, vec_start; 246 unsigned int vec_len, vec_start;
246 unsigned int bits = reg->hr_block_bits; 247 unsigned int bits = reg->hr_block_bits;
247 unsigned int spp = reg->hr_slots_per_page; 248 unsigned int spp = reg->hr_slots_per_page;
249 unsigned int cs = *current_slot;
248 struct bio *bio; 250 struct bio *bio;
249 struct page *page; 251 struct page *page;
250 252
251 nr_vecs = (num_slots + spp - 1) / spp;
252
253 /* Testing has shown this allocation to take long enough under 253 /* Testing has shown this allocation to take long enough under
254 * GFP_KERNEL that the local node can get fenced. It would be 254 * GFP_KERNEL that the local node can get fenced. It would be
255 * nicest if we could pre-allocate these bios and avoid this 255 * nicest if we could pre-allocate these bios and avoid this
256 * all together. */ 256 * all together. */
257 bio = bio_alloc(GFP_ATOMIC, nr_vecs); 257 bio = bio_alloc(GFP_ATOMIC, 16);
258 if (!bio) { 258 if (!bio) {
259 mlog(ML_ERROR, "Could not alloc slots BIO!\n"); 259 mlog(ML_ERROR, "Could not alloc slots BIO!\n");
260 bio = ERR_PTR(-ENOMEM); 260 bio = ERR_PTR(-ENOMEM);
@@ -262,137 +262,53 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
262 } 262 }
263 263
264 /* Must put everything in 512 byte sectors for the bio... */ 264 /* Must put everything in 512 byte sectors for the bio... */
265 bio->bi_sector = (reg->hr_start_block + start_slot) << (bits - 9); 265 bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9);
266 bio->bi_bdev = reg->hr_bdev; 266 bio->bi_bdev = reg->hr_bdev;
267 bio->bi_private = wc; 267 bio->bi_private = wc;
268 bio->bi_end_io = o2hb_bio_end_io; 268 bio->bi_end_io = o2hb_bio_end_io;
269 269
270 first_page = start_slot / spp; 270 vec_start = (cs << bits) % PAGE_CACHE_SIZE;
271 last_page = first_page + nr_vecs; 271 while(cs < max_slots) {
272 vec_start = (start_slot << bits) % PAGE_CACHE_SIZE; 272 current_page = cs / spp;
273 for(i = first_page; i < last_page; i++) { 273 page = reg->hr_slot_data[current_page];
274 page = reg->hr_slot_data[i];
275 274
276 vec_len = PAGE_CACHE_SIZE; 275 vec_len = min(PAGE_CACHE_SIZE,
277 /* last page might be short */ 276 (max_slots-cs) * (PAGE_CACHE_SIZE/spp) );
278 if (((i + 1) * spp) > (start_slot + num_slots))
279 vec_len = ((num_slots + start_slot) % spp) << bits;
280 vec_len -= vec_start;
281 277
282 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", 278 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
283 i, vec_len, vec_start); 279 current_page, vec_len, vec_start);
284 280
285 len = bio_add_page(bio, page, vec_len, vec_start); 281 len = bio_add_page(bio, page, vec_len, vec_start);
286 if (len != vec_len) { 282 if (len != vec_len) break;
287 bio_put(bio);
288 bio = ERR_PTR(-EIO);
289
290 mlog(ML_ERROR, "Error adding page to bio i = %d, "
291 "vec_len = %u, len = %d\n, start = %u\n",
292 i, vec_len, len, vec_start);
293 goto bail;
294 }
295 283
284 cs += vec_len / (PAGE_CACHE_SIZE/spp);
296 vec_start = 0; 285 vec_start = 0;
297 } 286 }
298 287
299bail: 288bail:
289 *current_slot = cs;
300 return bio; 290 return bio;
301} 291}
302 292
303/*
304 * Compute the maximum number of sectors the bdev can handle in one bio,
305 * as a power of two.
306 *
307 * Stolen from oracleasm, thanks Joel!
308 */
309static int compute_max_sectors(struct block_device *bdev)
310{
311 int max_pages, max_sectors, pow_two_sectors;
312
313 struct request_queue *q;
314
315 q = bdev_get_queue(bdev);
316 max_pages = q->max_sectors >> (PAGE_SHIFT - 9);
317 if (max_pages > BIO_MAX_PAGES)
318 max_pages = BIO_MAX_PAGES;
319 if (max_pages > q->max_phys_segments)
320 max_pages = q->max_phys_segments;
321 if (max_pages > q->max_hw_segments)
322 max_pages = q->max_hw_segments;
323 max_pages--; /* Handle I/Os that straddle a page */
324
325 if (max_pages) {
326 max_sectors = max_pages << (PAGE_SHIFT - 9);
327 } else {
328 /* If BIO contains 1 or less than 1 page. */
329 max_sectors = q->max_sectors;
330 }
331 /* Why is fls() 1-based???? */
332 pow_two_sectors = 1 << (fls(max_sectors) - 1);
333
334 return pow_two_sectors;
335}
336
337static inline void o2hb_compute_request_limits(struct o2hb_region *reg,
338 unsigned int num_slots,
339 unsigned int *num_bios,
340 unsigned int *slots_per_bio)
341{
342 unsigned int max_sectors, io_sectors;
343
344 max_sectors = compute_max_sectors(reg->hr_bdev);
345
346 io_sectors = num_slots << (reg->hr_block_bits - 9);
347
348 *num_bios = (io_sectors + max_sectors - 1) / max_sectors;
349 *slots_per_bio = max_sectors >> (reg->hr_block_bits - 9);
350
351 mlog(ML_HB_BIO, "My io size is %u sectors for %u slots. This "
352 "device can handle %u sectors of I/O\n", io_sectors, num_slots,
353 max_sectors);
354 mlog(ML_HB_BIO, "Will need %u bios holding %u slots each\n",
355 *num_bios, *slots_per_bio);
356}
357
358static int o2hb_read_slots(struct o2hb_region *reg, 293static int o2hb_read_slots(struct o2hb_region *reg,
359 unsigned int max_slots) 294 unsigned int max_slots)
360{ 295{
361 unsigned int num_bios, slots_per_bio, start_slot, num_slots; 296 unsigned int current_slot=0;
362 int i, status; 297 int status;
363 struct o2hb_bio_wait_ctxt wc; 298 struct o2hb_bio_wait_ctxt wc;
364 struct bio **bios;
365 struct bio *bio; 299 struct bio *bio;
366 300
367 o2hb_compute_request_limits(reg, max_slots, &num_bios, &slots_per_bio); 301 o2hb_bio_wait_init(&wc);
368 302
369 bios = kcalloc(num_bios, sizeof(struct bio *), GFP_KERNEL); 303 while(current_slot < max_slots) {
370 if (!bios) { 304 bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots);
371 status = -ENOMEM;
372 mlog_errno(status);
373 return status;
374 }
375
376 o2hb_bio_wait_init(&wc, num_bios);
377
378 num_slots = slots_per_bio;
379 for(i = 0; i < num_bios; i++) {
380 start_slot = i * slots_per_bio;
381
382 /* adjust num_slots at last bio */
383 if (max_slots < (start_slot + num_slots))
384 num_slots = max_slots - start_slot;
385
386 bio = o2hb_setup_one_bio(reg, &wc, start_slot, num_slots);
387 if (IS_ERR(bio)) { 305 if (IS_ERR(bio)) {
388 o2hb_bio_wait_dec(&wc, num_bios - i);
389
390 status = PTR_ERR(bio); 306 status = PTR_ERR(bio);
391 mlog_errno(status); 307 mlog_errno(status);
392 goto bail_and_wait; 308 goto bail_and_wait;
393 } 309 }
394 bios[i] = bio;
395 310
311 atomic_inc(&wc.wc_num_reqs);
396 submit_bio(READ, bio); 312 submit_bio(READ, bio);
397 } 313 }
398 314
@@ -403,38 +319,30 @@ bail_and_wait:
403 if (wc.wc_error && !status) 319 if (wc.wc_error && !status)
404 status = wc.wc_error; 320 status = wc.wc_error;
405 321
406 if (bios) {
407 for(i = 0; i < num_bios; i++)
408 if (bios[i])
409 bio_put(bios[i]);
410 kfree(bios);
411 }
412
413 return status; 322 return status;
414} 323}
415 324
416static int o2hb_issue_node_write(struct o2hb_region *reg, 325static int o2hb_issue_node_write(struct o2hb_region *reg,
417 struct bio **write_bio,
418 struct o2hb_bio_wait_ctxt *write_wc) 326 struct o2hb_bio_wait_ctxt *write_wc)
419{ 327{
420 int status; 328 int status;
421 unsigned int slot; 329 unsigned int slot;
422 struct bio *bio; 330 struct bio *bio;
423 331
424 o2hb_bio_wait_init(write_wc, 1); 332 o2hb_bio_wait_init(write_wc);
425 333
426 slot = o2nm_this_node(); 334 slot = o2nm_this_node();
427 335
428 bio = o2hb_setup_one_bio(reg, write_wc, slot, 1); 336 bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1);
429 if (IS_ERR(bio)) { 337 if (IS_ERR(bio)) {
430 status = PTR_ERR(bio); 338 status = PTR_ERR(bio);
431 mlog_errno(status); 339 mlog_errno(status);
432 goto bail; 340 goto bail;
433 } 341 }
434 342
343 atomic_inc(&write_wc->wc_num_reqs);
435 submit_bio(WRITE, bio); 344 submit_bio(WRITE, bio);
436 345
437 *write_bio = bio;
438 status = 0; 346 status = 0;
439bail: 347bail:
440 return status; 348 return status;
@@ -826,7 +734,6 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
826{ 734{
827 int i, ret, highest_node, change = 0; 735 int i, ret, highest_node, change = 0;
828 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; 736 unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
829 struct bio *write_bio;
830 struct o2hb_bio_wait_ctxt write_wc; 737 struct o2hb_bio_wait_ctxt write_wc;
831 738
832 ret = o2nm_configured_node_map(configured_nodes, 739 ret = o2nm_configured_node_map(configured_nodes,
@@ -864,7 +771,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
864 771
865 /* And fire off the write. Note that we don't wait on this I/O 772 /* And fire off the write. Note that we don't wait on this I/O
866 * until later. */ 773 * until later. */
867 ret = o2hb_issue_node_write(reg, &write_bio, &write_wc); 774 ret = o2hb_issue_node_write(reg, &write_wc);
868 if (ret < 0) { 775 if (ret < 0) {
869 mlog_errno(ret); 776 mlog_errno(ret);
870 return ret; 777 return ret;
@@ -882,7 +789,6 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
882 * people we find in our steady state have seen us. 789 * people we find in our steady state have seen us.
883 */ 790 */
884 o2hb_wait_on_io(reg, &write_wc); 791 o2hb_wait_on_io(reg, &write_wc);
885 bio_put(write_bio);
886 if (write_wc.wc_error) { 792 if (write_wc.wc_error) {
887 /* Do not re-arm the write timeout on I/O error - we 793 /* Do not re-arm the write timeout on I/O error - we
888 * can't be sure that the new block ever made it to 794 * can't be sure that the new block ever made it to
@@ -943,7 +849,6 @@ static int o2hb_thread(void *data)
943{ 849{
944 int i, ret; 850 int i, ret;
945 struct o2hb_region *reg = data; 851 struct o2hb_region *reg = data;
946 struct bio *write_bio;
947 struct o2hb_bio_wait_ctxt write_wc; 852 struct o2hb_bio_wait_ctxt write_wc;
948 struct timeval before_hb, after_hb; 853 struct timeval before_hb, after_hb;
949 unsigned int elapsed_msec; 854 unsigned int elapsed_msec;
@@ -993,10 +898,9 @@ static int o2hb_thread(void *data)
993 * 898 *
994 * XXX: Should we skip this on unclean_stop? */ 899 * XXX: Should we skip this on unclean_stop? */
995 o2hb_prepare_block(reg, 0); 900 o2hb_prepare_block(reg, 0);
996 ret = o2hb_issue_node_write(reg, &write_bio, &write_wc); 901 ret = o2hb_issue_node_write(reg, &write_wc);
997 if (ret == 0) { 902 if (ret == 0) {
998 o2hb_wait_on_io(reg, &write_wc); 903 o2hb_wait_on_io(reg, &write_wc);
999 bio_put(write_bio);
1000 } else { 904 } else {
1001 mlog_errno(ret); 905 mlog_errno(ret);
1002 } 906 }
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ae4ff4a6636b..1718215fc018 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -556,6 +556,8 @@ static void o2net_register_callbacks(struct sock *sk,
556 sk->sk_data_ready = o2net_data_ready; 556 sk->sk_data_ready = o2net_data_ready;
557 sk->sk_state_change = o2net_state_change; 557 sk->sk_state_change = o2net_state_change;
558 558
559 mutex_init(&sc->sc_send_lock);
560
559 write_unlock_bh(&sk->sk_callback_lock); 561 write_unlock_bh(&sk->sk_callback_lock);
560} 562}
561 563
@@ -688,6 +690,7 @@ static void o2net_handler_put(struct o2net_msg_handler *nmh)
688 * be given to the handler if their payload is longer than the max. */ 690 * be given to the handler if their payload is longer than the max. */
689int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, 691int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
690 o2net_msg_handler_func *func, void *data, 692 o2net_msg_handler_func *func, void *data,
693 o2net_post_msg_handler_func *post_func,
691 struct list_head *unreg_list) 694 struct list_head *unreg_list)
692{ 695{
693 struct o2net_msg_handler *nmh = NULL; 696 struct o2net_msg_handler *nmh = NULL;
@@ -722,6 +725,7 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
722 725
723 nmh->nh_func = func; 726 nmh->nh_func = func;
724 nmh->nh_func_data = data; 727 nmh->nh_func_data = data;
728 nmh->nh_post_func = post_func;
725 nmh->nh_msg_type = msg_type; 729 nmh->nh_msg_type = msg_type;
726 nmh->nh_max_len = max_len; 730 nmh->nh_max_len = max_len;
727 nmh->nh_key = key; 731 nmh->nh_key = key;
@@ -856,10 +860,12 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
856 ssize_t ret; 860 ssize_t ret;
857 861
858 862
863 mutex_lock(&sc->sc_send_lock);
859 ret = sc->sc_sock->ops->sendpage(sc->sc_sock, 864 ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
860 virt_to_page(kmalloced_virt), 865 virt_to_page(kmalloced_virt),
861 (long)kmalloced_virt & ~PAGE_MASK, 866 (long)kmalloced_virt & ~PAGE_MASK,
862 size, MSG_DONTWAIT); 867 size, MSG_DONTWAIT);
868 mutex_unlock(&sc->sc_send_lock);
863 if (ret != size) { 869 if (ret != size) {
864 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT 870 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
865 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); 871 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
@@ -974,8 +980,10 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
974 980
975 /* finally, convert the message header to network byte-order 981 /* finally, convert the message header to network byte-order
976 * and send */ 982 * and send */
983 mutex_lock(&sc->sc_send_lock);
977 ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen, 984 ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen,
978 sizeof(struct o2net_msg) + caller_bytes); 985 sizeof(struct o2net_msg) + caller_bytes);
986 mutex_unlock(&sc->sc_send_lock);
979 msglog(msg, "sending returned %d\n", ret); 987 msglog(msg, "sending returned %d\n", ret);
980 if (ret < 0) { 988 if (ret < 0) {
981 mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret); 989 mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
@@ -1049,6 +1057,7 @@ static int o2net_process_message(struct o2net_sock_container *sc,
1049 int ret = 0, handler_status; 1057 int ret = 0, handler_status;
1050 enum o2net_system_error syserr; 1058 enum o2net_system_error syserr;
1051 struct o2net_msg_handler *nmh = NULL; 1059 struct o2net_msg_handler *nmh = NULL;
1060 void *ret_data = NULL;
1052 1061
1053 msglog(hdr, "processing message\n"); 1062 msglog(hdr, "processing message\n");
1054 1063
@@ -1101,17 +1110,26 @@ static int o2net_process_message(struct o2net_sock_container *sc,
1101 sc->sc_msg_type = be16_to_cpu(hdr->msg_type); 1110 sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
1102 handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + 1111 handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
1103 be16_to_cpu(hdr->data_len), 1112 be16_to_cpu(hdr->data_len),
1104 nmh->nh_func_data); 1113 nmh->nh_func_data, &ret_data);
1105 do_gettimeofday(&sc->sc_tv_func_stop); 1114 do_gettimeofday(&sc->sc_tv_func_stop);
1106 1115
1107out_respond: 1116out_respond:
1108 /* this destroys the hdr, so don't use it after this */ 1117 /* this destroys the hdr, so don't use it after this */
1118 mutex_lock(&sc->sc_send_lock);
1109 ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr, 1119 ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr,
1110 handler_status); 1120 handler_status);
1121 mutex_unlock(&sc->sc_send_lock);
1111 hdr = NULL; 1122 hdr = NULL;
1112 mlog(0, "sending handler status %d, syserr %d returned %d\n", 1123 mlog(0, "sending handler status %d, syserr %d returned %d\n",
1113 handler_status, syserr, ret); 1124 handler_status, syserr, ret);
1114 1125
1126 if (nmh) {
1127 BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
1128 if (nmh->nh_post_func)
1129 (nmh->nh_post_func)(handler_status, nmh->nh_func_data,
1130 ret_data);
1131 }
1132
1115out: 1133out:
1116 if (nmh) 1134 if (nmh)
1117 o2net_handler_put(nmh); 1135 o2net_handler_put(nmh);
@@ -1795,13 +1813,13 @@ out:
1795 ready(sk, bytes); 1813 ready(sk, bytes);
1796} 1814}
1797 1815
1798static int o2net_open_listening_sock(__be16 port) 1816static int o2net_open_listening_sock(__be32 addr, __be16 port)
1799{ 1817{
1800 struct socket *sock = NULL; 1818 struct socket *sock = NULL;
1801 int ret; 1819 int ret;
1802 struct sockaddr_in sin = { 1820 struct sockaddr_in sin = {
1803 .sin_family = PF_INET, 1821 .sin_family = PF_INET,
1804 .sin_addr = { .s_addr = (__force u32)htonl(INADDR_ANY) }, 1822 .sin_addr = { .s_addr = (__force u32)addr },
1805 .sin_port = (__force u16)port, 1823 .sin_port = (__force u16)port,
1806 }; 1824 };
1807 1825
@@ -1824,15 +1842,15 @@ static int o2net_open_listening_sock(__be16 port)
1824 sock->sk->sk_reuse = 1; 1842 sock->sk->sk_reuse = 1;
1825 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 1843 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
1826 if (ret < 0) { 1844 if (ret < 0) {
1827 mlog(ML_ERROR, "unable to bind socket to port %d, ret=%d\n", 1845 mlog(ML_ERROR, "unable to bind socket at %u.%u.%u.%u:%u, "
1828 ntohs(port), ret); 1846 "ret=%d\n", NIPQUAD(addr), ntohs(port), ret);
1829 goto out; 1847 goto out;
1830 } 1848 }
1831 1849
1832 ret = sock->ops->listen(sock, 64); 1850 ret = sock->ops->listen(sock, 64);
1833 if (ret < 0) { 1851 if (ret < 0) {
1834 mlog(ML_ERROR, "unable to listen on port %d, ret=%d\n", 1852 mlog(ML_ERROR, "unable to listen on %u.%u.%u.%u:%u, ret=%d\n",
1835 ntohs(port), ret); 1853 NIPQUAD(addr), ntohs(port), ret);
1836 } 1854 }
1837 1855
1838out: 1856out:
@@ -1865,7 +1883,8 @@ int o2net_start_listening(struct o2nm_node *node)
1865 return -ENOMEM; /* ? */ 1883 return -ENOMEM; /* ? */
1866 } 1884 }
1867 1885
1868 ret = o2net_open_listening_sock(node->nd_ipv4_port); 1886 ret = o2net_open_listening_sock(node->nd_ipv4_address,
1887 node->nd_ipv4_port);
1869 if (ret) { 1888 if (ret) {
1870 destroy_workqueue(o2net_wq); 1889 destroy_workqueue(o2net_wq);
1871 o2net_wq = NULL; 1890 o2net_wq = NULL;
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index 21a4e43df836..da880fc215f0 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -50,7 +50,10 @@ struct o2net_msg
50 __u8 buf[0]; 50 __u8 buf[0];
51}; 51};
52 52
53typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data); 53typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data,
54 void **ret_data);
55typedef void (o2net_post_msg_handler_func)(int status, void *data,
56 void *ret_data);
54 57
55#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg)) 58#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg))
56 59
@@ -99,6 +102,7 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
99 102
100int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, 103int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
101 o2net_msg_handler_func *func, void *data, 104 o2net_msg_handler_func *func, void *data,
105 o2net_post_msg_handler_func *post_func,
102 struct list_head *unreg_list); 106 struct list_head *unreg_list);
103void o2net_unregister_handler_list(struct list_head *list); 107void o2net_unregister_handler_list(struct list_head *list);
104 108
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b700dc9624d1..4dae5df5e467 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -38,6 +38,12 @@
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * New in version 7:
42 * - DLM join domain includes the live nodemap
43 *
44 * New in version 6:
45 * - DLM lockres remote refcount fixes.
46 *
41 * New in version 5: 47 * New in version 5:
42 * - Network timeout checking protocol 48 * - Network timeout checking protocol
43 * 49 *
@@ -51,7 +57,7 @@
51 * - full 64 bit i_size in the metadata lock lvbs 57 * - full 64 bit i_size in the metadata lock lvbs
52 * - introduction of "rw" lock and pushing meta/data locking down 58 * - introduction of "rw" lock and pushing meta/data locking down
53 */ 59 */
54#define O2NET_PROTOCOL_VERSION 5ULL 60#define O2NET_PROTOCOL_VERSION 7ULL
55struct o2net_handshake { 61struct o2net_handshake {
56 __be64 protocol_version; 62 __be64 protocol_version;
57 __be64 connector_id; 63 __be64 connector_id;
@@ -149,6 +155,8 @@ struct o2net_sock_container {
149 struct timeval sc_tv_func_stop; 155 struct timeval sc_tv_func_stop;
150 u32 sc_msg_key; 156 u32 sc_msg_key;
151 u16 sc_msg_type; 157 u16 sc_msg_type;
158
159 struct mutex sc_send_lock;
152}; 160};
153 161
154struct o2net_msg_handler { 162struct o2net_msg_handler {
@@ -158,6 +166,8 @@ struct o2net_msg_handler {
158 u32 nh_key; 166 u32 nh_key;
159 o2net_msg_handler_func *nh_func; 167 o2net_msg_handler_func *nh_func;
160 o2net_msg_handler_func *nh_func_data; 168 o2net_msg_handler_func *nh_func_data;
169 o2net_post_msg_handler_func
170 *nh_post_func;
161 struct kref nh_kref; 171 struct kref nh_kref;
162 struct list_head nh_unregister_item; 172 struct list_head nh_unregister_item;
163}; 173};
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 681046d51393..241cad342a48 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -263,7 +263,8 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
263 263
264 264
265 265
266int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) 266int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
267 void **ret_data)
267{ 268{
268 int ret; 269 int ret;
269 unsigned int locklen; 270 unsigned int locklen;
@@ -311,8 +312,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
311 past->type != DLM_BAST) { 312 past->type != DLM_BAST) {
312 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" 313 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
313 "name=%.*s\n", past->type, 314 "name=%.*s\n", past->type,
314 dlm_get_lock_cookie_node(cookie), 315 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
315 dlm_get_lock_cookie_seq(cookie), 316 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
316 locklen, name); 317 locklen, name);
317 ret = DLM_IVLOCKID; 318 ret = DLM_IVLOCKID;
318 goto leave; 319 goto leave;
@@ -323,8 +324,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
323 mlog(0, "got %sast for unknown lockres! " 324 mlog(0, "got %sast for unknown lockres! "
324 "cookie=%u:%llu, name=%.*s, namelen=%u\n", 325 "cookie=%u:%llu, name=%.*s, namelen=%u\n",
325 past->type == DLM_AST ? "" : "b", 326 past->type == DLM_AST ? "" : "b",
326 dlm_get_lock_cookie_node(cookie), 327 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
327 dlm_get_lock_cookie_seq(cookie), 328 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
328 locklen, name, locklen); 329 locklen, name, locklen);
329 ret = DLM_IVLOCKID; 330 ret = DLM_IVLOCKID;
330 goto leave; 331 goto leave;
@@ -369,7 +370,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
369 370
370 mlog(0, "got %sast for unknown lock! cookie=%u:%llu, " 371 mlog(0, "got %sast for unknown lock! cookie=%u:%llu, "
371 "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", 372 "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b",
372 dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_seq(cookie), 373 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
374 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
373 locklen, name, locklen); 375 locklen, name, locklen);
374 376
375 ret = DLM_NORMAL; 377 ret = DLM_NORMAL;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 6b6ff76538c5..e90b92f9ece1 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -180,6 +180,11 @@ struct dlm_assert_master_priv
180 unsigned ignore_higher:1; 180 unsigned ignore_higher:1;
181}; 181};
182 182
183struct dlm_deref_lockres_priv
184{
185 struct dlm_lock_resource *deref_res;
186 u8 deref_node;
187};
183 188
184struct dlm_work_item 189struct dlm_work_item
185{ 190{
@@ -191,6 +196,7 @@ struct dlm_work_item
191 struct dlm_request_all_locks_priv ral; 196 struct dlm_request_all_locks_priv ral;
192 struct dlm_mig_lockres_priv ml; 197 struct dlm_mig_lockres_priv ml;
193 struct dlm_assert_master_priv am; 198 struct dlm_assert_master_priv am;
199 struct dlm_deref_lockres_priv dl;
194 } u; 200 } u;
195}; 201};
196 202
@@ -222,6 +228,9 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
222#define DLM_LOCK_RES_DIRTY 0x00000008 228#define DLM_LOCK_RES_DIRTY 0x00000008
223#define DLM_LOCK_RES_IN_PROGRESS 0x00000010 229#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
224#define DLM_LOCK_RES_MIGRATING 0x00000020 230#define DLM_LOCK_RES_MIGRATING 0x00000020
231#define DLM_LOCK_RES_DROPPING_REF 0x00000040
232#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
233#define DLM_LOCK_RES_SETREF_INPROG 0x00002000
225 234
226/* max milliseconds to wait to sync up a network failure with a node death */ 235/* max milliseconds to wait to sync up a network failure with a node death */
227#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) 236#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
@@ -265,6 +274,8 @@ struct dlm_lock_resource
265 u8 owner; //node which owns the lock resource, or unknown 274 u8 owner; //node which owns the lock resource, or unknown
266 u16 state; 275 u16 state;
267 char lvb[DLM_LVB_LEN]; 276 char lvb[DLM_LVB_LEN];
277 unsigned int inflight_locks;
278 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
268}; 279};
269 280
270struct dlm_migratable_lock 281struct dlm_migratable_lock
@@ -367,7 +378,7 @@ enum {
367 DLM_CONVERT_LOCK_MSG, /* 504 */ 378 DLM_CONVERT_LOCK_MSG, /* 504 */
368 DLM_PROXY_AST_MSG, /* 505 */ 379 DLM_PROXY_AST_MSG, /* 505 */
369 DLM_UNLOCK_LOCK_MSG, /* 506 */ 380 DLM_UNLOCK_LOCK_MSG, /* 506 */
370 DLM_UNUSED_MSG2, /* 507 */ 381 DLM_DEREF_LOCKRES_MSG, /* 507 */
371 DLM_MIGRATE_REQUEST_MSG, /* 508 */ 382 DLM_MIGRATE_REQUEST_MSG, /* 508 */
372 DLM_MIG_LOCKRES_MSG, /* 509 */ 383 DLM_MIG_LOCKRES_MSG, /* 509 */
373 DLM_QUERY_JOIN_MSG, /* 510 */ 384 DLM_QUERY_JOIN_MSG, /* 510 */
@@ -417,6 +428,9 @@ struct dlm_master_request
417 u8 name[O2NM_MAX_NAME_LEN]; 428 u8 name[O2NM_MAX_NAME_LEN];
418}; 429};
419 430
431#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
432#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
433
420#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001 434#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
421#define DLM_ASSERT_MASTER_REQUERY 0x00000002 435#define DLM_ASSERT_MASTER_REQUERY 0x00000002
422#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 436#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
@@ -430,6 +444,8 @@ struct dlm_assert_master
430 u8 name[O2NM_MAX_NAME_LEN]; 444 u8 name[O2NM_MAX_NAME_LEN];
431}; 445};
432 446
447#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
448
433struct dlm_migrate_request 449struct dlm_migrate_request
434{ 450{
435 u8 master; 451 u8 master;
@@ -609,12 +625,16 @@ struct dlm_begin_reco
609}; 625};
610 626
611 627
628#define BITS_PER_BYTE 8
629#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
630
612struct dlm_query_join_request 631struct dlm_query_join_request
613{ 632{
614 u8 node_idx; 633 u8 node_idx;
615 u8 pad1[2]; 634 u8 pad1[2];
616 u8 name_len; 635 u8 name_len;
617 u8 domain[O2NM_MAX_NAME_LEN]; 636 u8 domain[O2NM_MAX_NAME_LEN];
637 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
618}; 638};
619 639
620struct dlm_assert_joined 640struct dlm_assert_joined
@@ -648,6 +668,16 @@ struct dlm_finalize_reco
648 __be32 pad2; 668 __be32 pad2;
649}; 669};
650 670
671struct dlm_deref_lockres
672{
673 u32 pad1;
674 u16 pad2;
675 u8 node_idx;
676 u8 namelen;
677
678 u8 name[O2NM_MAX_NAME_LEN];
679};
680
651static inline enum dlm_status 681static inline enum dlm_status
652__dlm_lockres_state_to_status(struct dlm_lock_resource *res) 682__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
653{ 683{
@@ -688,16 +718,20 @@ void dlm_lock_put(struct dlm_lock *lock);
688void dlm_lock_attach_lockres(struct dlm_lock *lock, 718void dlm_lock_attach_lockres(struct dlm_lock *lock,
689 struct dlm_lock_resource *res); 719 struct dlm_lock_resource *res);
690 720
691int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data); 721int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
692int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data); 722 void **ret_data);
693int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data); 723int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
724 void **ret_data);
725int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
726 void **ret_data);
694 727
695void dlm_revert_pending_convert(struct dlm_lock_resource *res, 728void dlm_revert_pending_convert(struct dlm_lock_resource *res,
696 struct dlm_lock *lock); 729 struct dlm_lock *lock);
697void dlm_revert_pending_lock(struct dlm_lock_resource *res, 730void dlm_revert_pending_lock(struct dlm_lock_resource *res,
698 struct dlm_lock *lock); 731 struct dlm_lock *lock);
699 732
700int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data); 733int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
734 void **ret_data);
701void dlm_commit_pending_cancel(struct dlm_lock_resource *res, 735void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
702 struct dlm_lock *lock); 736 struct dlm_lock *lock);
703void dlm_commit_pending_unlock(struct dlm_lock_resource *res, 737void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
@@ -721,8 +755,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
721 struct dlm_lock_resource *res); 755 struct dlm_lock_resource *res);
722void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 756void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
723 struct dlm_lock_resource *res); 757 struct dlm_lock_resource *res);
724void dlm_purge_lockres(struct dlm_ctxt *dlm,
725 struct dlm_lock_resource *lockres);
726static inline void dlm_lockres_get(struct dlm_lock_resource *res) 758static inline void dlm_lockres_get(struct dlm_lock_resource *res)
727{ 759{
728 /* This is called on every lookup, so it might be worth 760 /* This is called on every lookup, so it might be worth
@@ -733,6 +765,10 @@ void dlm_lockres_put(struct dlm_lock_resource *res);
733void __dlm_unhash_lockres(struct dlm_lock_resource *res); 765void __dlm_unhash_lockres(struct dlm_lock_resource *res);
734void __dlm_insert_lockres(struct dlm_ctxt *dlm, 766void __dlm_insert_lockres(struct dlm_ctxt *dlm,
735 struct dlm_lock_resource *res); 767 struct dlm_lock_resource *res);
768struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
769 const char *name,
770 unsigned int len,
771 unsigned int hash);
736struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, 772struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
737 const char *name, 773 const char *name,
738 unsigned int len, 774 unsigned int len,
@@ -753,6 +789,47 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
753 const char *name, 789 const char *name,
754 unsigned int namelen); 790 unsigned int namelen);
755 791
792#define dlm_lockres_set_refmap_bit(bit,res) \
793 __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
794#define dlm_lockres_clear_refmap_bit(bit,res) \
795 __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
796
797static inline void __dlm_lockres_set_refmap_bit(int bit,
798 struct dlm_lock_resource *res,
799 const char *file,
800 int line)
801{
802 //printk("%s:%d:%.*s: setting bit %d\n", file, line,
803 // res->lockname.len, res->lockname.name, bit);
804 set_bit(bit, res->refmap);
805}
806
807static inline void __dlm_lockres_clear_refmap_bit(int bit,
808 struct dlm_lock_resource *res,
809 const char *file,
810 int line)
811{
812 //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
813 // res->lockname.len, res->lockname.name, bit);
814 clear_bit(bit, res->refmap);
815}
816
817void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
818 struct dlm_lock_resource *res,
819 const char *file,
820 int line);
821void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
822 struct dlm_lock_resource *res,
823 int new_lockres,
824 const char *file,
825 int line);
826#define dlm_lockres_drop_inflight_ref(d,r) \
827 __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
828#define dlm_lockres_grab_inflight_ref(d,r) \
829 __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
830#define dlm_lockres_grab_inflight_ref_new(d,r) \
831 __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
832
756void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 833void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
757void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 834void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
758void dlm_do_local_ast(struct dlm_ctxt *dlm, 835void dlm_do_local_ast(struct dlm_ctxt *dlm,
@@ -801,10 +878,7 @@ int dlm_heartbeat_init(struct dlm_ctxt *dlm);
801void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); 878void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
802void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); 879void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
803 880
804int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); 881int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
805int dlm_migrate_lockres(struct dlm_ctxt *dlm,
806 struct dlm_lock_resource *res,
807 u8 target);
808int dlm_finish_migration(struct dlm_ctxt *dlm, 882int dlm_finish_migration(struct dlm_ctxt *dlm,
809 struct dlm_lock_resource *res, 883 struct dlm_lock_resource *res,
810 u8 old_master); 884 u8 old_master);
@@ -812,15 +886,27 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
812 struct dlm_lock_resource *res); 886 struct dlm_lock_resource *res);
813void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); 887void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
814 888
815int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data); 889int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
816int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data); 890 void **ret_data);
817int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data); 891int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
818int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data); 892 void **ret_data);
819int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data); 893void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
820int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data); 894int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
821int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data); 895 void **ret_data);
822int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data); 896int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
823int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data); 897 void **ret_data);
898int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
899 void **ret_data);
900int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
901 void **ret_data);
902int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
903 void **ret_data);
904int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
905 void **ret_data);
906int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
907 void **ret_data);
908int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
909 void **ret_data);
824int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 910int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
825 u8 nodenum, u8 *real_master); 911 u8 nodenum, u8 *real_master);
826 912
@@ -856,10 +942,12 @@ static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
856int dlm_init_mle_cache(void); 942int dlm_init_mle_cache(void);
857void dlm_destroy_mle_cache(void); 943void dlm_destroy_mle_cache(void);
858void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); 944void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
945int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
946 struct dlm_lock_resource *res);
859void dlm_clean_master_list(struct dlm_ctxt *dlm, 947void dlm_clean_master_list(struct dlm_ctxt *dlm,
860 u8 dead_node); 948 u8 dead_node);
861int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); 949int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
862 950int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
863int __dlm_lockres_unused(struct dlm_lock_resource *res); 951int __dlm_lockres_unused(struct dlm_lock_resource *res);
864 952
865static inline const char * dlm_lock_mode_name(int mode) 953static inline const char * dlm_lock_mode_name(int mode)
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index c764dc8e40a2..ecb4d997221e 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -286,8 +286,8 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
286 __dlm_print_one_lock_resource(res); 286 __dlm_print_one_lock_resource(res);
287 mlog(ML_ERROR, "converting a remote lock that is already " 287 mlog(ML_ERROR, "converting a remote lock that is already "
288 "converting! (cookie=%u:%llu, conv=%d)\n", 288 "converting! (cookie=%u:%llu, conv=%d)\n",
289 dlm_get_lock_cookie_node(lock->ml.cookie), 289 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
290 dlm_get_lock_cookie_seq(lock->ml.cookie), 290 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
291 lock->ml.convert_type); 291 lock->ml.convert_type);
292 status = DLM_DENIED; 292 status = DLM_DENIED;
293 goto bail; 293 goto bail;
@@ -418,7 +418,8 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
418 * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, 418 * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
419 * status from __dlmconvert_master 419 * status from __dlmconvert_master
420 */ 420 */
421int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) 421int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
422 void **ret_data)
422{ 423{
423 struct dlm_ctxt *dlm = data; 424 struct dlm_ctxt *dlm = data;
424 struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; 425 struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
@@ -428,7 +429,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
428 struct dlm_lockstatus *lksb; 429 struct dlm_lockstatus *lksb;
429 enum dlm_status status = DLM_NORMAL; 430 enum dlm_status status = DLM_NORMAL;
430 u32 flags; 431 u32 flags;
431 int call_ast = 0, kick_thread = 0, ast_reserved = 0; 432 int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0;
432 433
433 if (!dlm_grab(dlm)) { 434 if (!dlm_grab(dlm)) {
434 dlm_error(DLM_REJECTED); 435 dlm_error(DLM_REJECTED);
@@ -479,25 +480,14 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
479 } 480 }
480 lock = NULL; 481 lock = NULL;
481 } 482 }
482 if (!lock) {
483 __dlm_print_one_lock_resource(res);
484 list_for_each(iter, &res->granted) {
485 lock = list_entry(iter, struct dlm_lock, list);
486 if (lock->ml.node == cnv->node_idx) {
487 mlog(ML_ERROR, "There is something here "
488 "for node %u, lock->ml.cookie=%llu, "
489 "cnv->cookie=%llu\n", cnv->node_idx,
490 (unsigned long long)lock->ml.cookie,
491 (unsigned long long)cnv->cookie);
492 break;
493 }
494 }
495 lock = NULL;
496 }
497 spin_unlock(&res->spinlock); 483 spin_unlock(&res->spinlock);
498 if (!lock) { 484 if (!lock) {
499 status = DLM_IVLOCKID; 485 status = DLM_IVLOCKID;
500 dlm_error(status); 486 mlog(ML_ERROR, "did not find lock to convert on grant queue! "
487 "cookie=%u:%llu\n",
488 dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
489 dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
490 __dlm_print_one_lock_resource(res);
501 goto leave; 491 goto leave;
502 } 492 }
503 493
@@ -524,8 +514,11 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
524 cnv->requested_type, 514 cnv->requested_type,
525 &call_ast, &kick_thread); 515 &call_ast, &kick_thread);
526 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 516 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
517 wake = 1;
527 } 518 }
528 spin_unlock(&res->spinlock); 519 spin_unlock(&res->spinlock);
520 if (wake)
521 wake_up(&res->wq);
529 522
530 if (status != DLM_NORMAL) { 523 if (status != DLM_NORMAL) {
531 if (status != DLM_NOTQUEUED) 524 if (status != DLM_NOTQUEUED)
@@ -534,12 +527,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
534 } 527 }
535 528
536leave: 529leave:
537 if (!lock) 530 if (lock)
538 mlog(ML_ERROR, "did not find lock to convert on grant queue! "
539 "cookie=%u:%llu\n",
540 dlm_get_lock_cookie_node(cnv->cookie),
541 dlm_get_lock_cookie_seq(cnv->cookie));
542 else
543 dlm_lock_put(lock); 531 dlm_lock_put(lock);
544 532
545 /* either queue the ast or release it, if reserved */ 533 /* either queue the ast or release it, if reserved */
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 3f6c8d88f7af..64239b37e5d4 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -53,6 +53,23 @@ void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
53 spin_unlock(&res->spinlock); 53 spin_unlock(&res->spinlock);
54} 54}
55 55
56static void dlm_print_lockres_refmap(struct dlm_lock_resource *res)
57{
58 int bit;
59 assert_spin_locked(&res->spinlock);
60
61 mlog(ML_NOTICE, " refmap nodes: [ ");
62 bit = 0;
63 while (1) {
64 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
65 if (bit >= O2NM_MAX_NODES)
66 break;
67 printk("%u ", bit);
68 bit++;
69 }
70 printk("], inflight=%u\n", res->inflight_locks);
71}
72
56void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) 73void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
57{ 74{
58 struct list_head *iter2; 75 struct list_head *iter2;
@@ -65,6 +82,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
65 res->owner, res->state); 82 res->owner, res->state);
66 mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n", 83 mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n",
67 res->last_used, list_empty(&res->purge) ? "no" : "yes"); 84 res->last_used, list_empty(&res->purge) ? "no" : "yes");
85 dlm_print_lockres_refmap(res);
68 mlog(ML_NOTICE, " granted queue: \n"); 86 mlog(ML_NOTICE, " granted queue: \n");
69 list_for_each(iter2, &res->granted) { 87 list_for_each(iter2, &res->granted) {
70 lock = list_entry(iter2, struct dlm_lock, list); 88 lock = list_entry(iter2, struct dlm_lock, list);
@@ -72,8 +90,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
72 mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " 90 mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
73 "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", 91 "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
74 lock->ml.type, lock->ml.convert_type, lock->ml.node, 92 lock->ml.type, lock->ml.convert_type, lock->ml.node,
75 dlm_get_lock_cookie_node(lock->ml.cookie), 93 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
76 dlm_get_lock_cookie_seq(lock->ml.cookie), 94 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
77 list_empty(&lock->ast_list) ? 'y' : 'n', 95 list_empty(&lock->ast_list) ? 'y' : 'n',
78 lock->ast_pending ? 'y' : 'n', 96 lock->ast_pending ? 'y' : 'n',
79 list_empty(&lock->bast_list) ? 'y' : 'n', 97 list_empty(&lock->bast_list) ? 'y' : 'n',
@@ -87,8 +105,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
87 mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " 105 mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
88 "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", 106 "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
89 lock->ml.type, lock->ml.convert_type, lock->ml.node, 107 lock->ml.type, lock->ml.convert_type, lock->ml.node,
90 dlm_get_lock_cookie_node(lock->ml.cookie), 108 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
91 dlm_get_lock_cookie_seq(lock->ml.cookie), 109 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
92 list_empty(&lock->ast_list) ? 'y' : 'n', 110 list_empty(&lock->ast_list) ? 'y' : 'n',
93 lock->ast_pending ? 'y' : 'n', 111 lock->ast_pending ? 'y' : 'n',
94 list_empty(&lock->bast_list) ? 'y' : 'n', 112 list_empty(&lock->bast_list) ? 'y' : 'n',
@@ -102,8 +120,8 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
102 mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " 120 mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
103 "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", 121 "cookie=%u:%llu, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
104 lock->ml.type, lock->ml.convert_type, lock->ml.node, 122 lock->ml.type, lock->ml.convert_type, lock->ml.node,
105 dlm_get_lock_cookie_node(lock->ml.cookie), 123 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
106 dlm_get_lock_cookie_seq(lock->ml.cookie), 124 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
107 list_empty(&lock->ast_list) ? 'y' : 'n', 125 list_empty(&lock->ast_list) ? 'y' : 'n',
108 lock->ast_pending ? 'y' : 'n', 126 lock->ast_pending ? 'y' : 'n',
109 list_empty(&lock->bast_list) ? 'y' : 'n', 127 list_empty(&lock->bast_list) ? 'y' : 'n',
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index f0b25f2dd205..6087c4749fee 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -48,6 +48,36 @@
48#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) 48#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
49#include "cluster/masklog.h" 49#include "cluster/masklog.h"
50 50
51/*
52 * ocfs2 node maps are array of long int, which limits to send them freely
53 * across the wire due to endianness issues. To workaround this, we convert
54 * long ints to byte arrays. Following 3 routines are helper functions to
55 * set/test/copy bits within those array of bytes
56 */
57static inline void byte_set_bit(u8 nr, u8 map[])
58{
59 map[nr >> 3] |= (1UL << (nr & 7));
60}
61
62static inline int byte_test_bit(u8 nr, u8 map[])
63{
64 return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0;
65}
66
67static inline void byte_copymap(u8 dmap[], unsigned long smap[],
68 unsigned int sz)
69{
70 unsigned int nn;
71
72 if (!sz)
73 return;
74
75 memset(dmap, 0, ((sz + 7) >> 3));
76 for (nn = 0 ; nn < sz; nn++)
77 if (test_bit(nn, smap))
78 byte_set_bit(nn, dmap);
79}
80
51static void dlm_free_pagevec(void **vec, int pages) 81static void dlm_free_pagevec(void **vec, int pages)
52{ 82{
53 while (pages--) 83 while (pages--)
@@ -95,10 +125,14 @@ static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
95 125
96#define DLM_DOMAIN_BACKOFF_MS 200 126#define DLM_DOMAIN_BACKOFF_MS 200
97 127
98static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data); 128static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
99static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data); 129 void **ret_data);
100static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data); 130static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
101static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data); 131 void **ret_data);
132static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
133 void **ret_data);
134static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
135 void **ret_data);
102 136
103static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); 137static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
104 138
@@ -125,10 +159,10 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
125 hlist_add_head(&res->hash_node, bucket); 159 hlist_add_head(&res->hash_node, bucket);
126} 160}
127 161
128struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, 162struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
129 const char *name, 163 const char *name,
130 unsigned int len, 164 unsigned int len,
131 unsigned int hash) 165 unsigned int hash)
132{ 166{
133 struct hlist_head *bucket; 167 struct hlist_head *bucket;
134 struct hlist_node *list; 168 struct hlist_node *list;
@@ -154,6 +188,37 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
154 return NULL; 188 return NULL;
155} 189}
156 190
191/* intended to be called by functions which do not care about lock
192 * resources which are being purged (most net _handler functions).
193 * this will return NULL for any lock resource which is found but
194 * currently in the process of dropping its mastery reference.
195 * use __dlm_lookup_lockres_full when you need the lock resource
196 * regardless (e.g. dlm_get_lock_resource) */
197struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
198 const char *name,
199 unsigned int len,
200 unsigned int hash)
201{
202 struct dlm_lock_resource *res = NULL;
203
204 mlog_entry("%.*s\n", len, name);
205
206 assert_spin_locked(&dlm->spinlock);
207
208 res = __dlm_lookup_lockres_full(dlm, name, len, hash);
209 if (res) {
210 spin_lock(&res->spinlock);
211 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
212 spin_unlock(&res->spinlock);
213 dlm_lockres_put(res);
214 return NULL;
215 }
216 spin_unlock(&res->spinlock);
217 }
218
219 return res;
220}
221
157struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, 222struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
158 const char *name, 223 const char *name,
159 unsigned int len) 224 unsigned int len)
@@ -330,43 +395,60 @@ static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
330 wake_up(&dlm_domain_events); 395 wake_up(&dlm_domain_events);
331} 396}
332 397
333static void dlm_migrate_all_locks(struct dlm_ctxt *dlm) 398static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
334{ 399{
335 int i; 400 int i, num, n, ret = 0;
336 struct dlm_lock_resource *res; 401 struct dlm_lock_resource *res;
402 struct hlist_node *iter;
403 struct hlist_head *bucket;
404 int dropped;
337 405
338 mlog(0, "Migrating locks from domain %s\n", dlm->name); 406 mlog(0, "Migrating locks from domain %s\n", dlm->name);
339restart: 407
408 num = 0;
340 spin_lock(&dlm->spinlock); 409 spin_lock(&dlm->spinlock);
341 for (i = 0; i < DLM_HASH_BUCKETS; i++) { 410 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
342 while (!hlist_empty(dlm_lockres_hash(dlm, i))) { 411redo_bucket:
343 res = hlist_entry(dlm_lockres_hash(dlm, i)->first, 412 n = 0;
344 struct dlm_lock_resource, hash_node); 413 bucket = dlm_lockres_hash(dlm, i);
345 /* need reference when manually grabbing lockres */ 414 iter = bucket->first;
415 while (iter) {
416 n++;
417 res = hlist_entry(iter, struct dlm_lock_resource,
418 hash_node);
346 dlm_lockres_get(res); 419 dlm_lockres_get(res);
347 /* this should unhash the lockres 420 /* migrate, if necessary. this will drop the dlm
348 * and exit with dlm->spinlock */ 421 * spinlock and retake it if it does migration. */
349 mlog(0, "purging res=%p\n", res); 422 dropped = dlm_empty_lockres(dlm, res);
350 if (dlm_lockres_is_dirty(dlm, res)) { 423
351 /* HACK! this should absolutely go. 424 spin_lock(&res->spinlock);
352 * need to figure out why some empty 425 __dlm_lockres_calc_usage(dlm, res);
353 * lockreses are still marked dirty */ 426 iter = res->hash_node.next;
354 mlog(ML_ERROR, "lockres %.*s dirty!\n", 427 spin_unlock(&res->spinlock);
355 res->lockname.len, res->lockname.name); 428
356
357 spin_unlock(&dlm->spinlock);
358 dlm_kick_thread(dlm, res);
359 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
360 dlm_lockres_put(res);
361 goto restart;
362 }
363 dlm_purge_lockres(dlm, res);
364 dlm_lockres_put(res); 429 dlm_lockres_put(res);
430
431 cond_resched_lock(&dlm->spinlock);
432
433 if (dropped)
434 goto redo_bucket;
365 } 435 }
436 num += n;
437 mlog(0, "%s: touched %d lockreses in bucket %d "
438 "(tot=%d)\n", dlm->name, n, i, num);
366 } 439 }
367 spin_unlock(&dlm->spinlock); 440 spin_unlock(&dlm->spinlock);
368 441 wake_up(&dlm->dlm_thread_wq);
442
443 /* let the dlm thread take care of purging, keep scanning until
444 * nothing remains in the hash */
445 if (num) {
446 mlog(0, "%s: %d lock resources in hash last pass\n",
447 dlm->name, num);
448 ret = -EAGAIN;
449 }
369 mlog(0, "DONE Migrating locks from domain %s\n", dlm->name); 450 mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
451 return ret;
370} 452}
371 453
372static int dlm_no_joining_node(struct dlm_ctxt *dlm) 454static int dlm_no_joining_node(struct dlm_ctxt *dlm)
@@ -418,7 +500,8 @@ static void __dlm_print_nodes(struct dlm_ctxt *dlm)
418 printk("\n"); 500 printk("\n");
419} 501}
420 502
421static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data) 503static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
504 void **ret_data)
422{ 505{
423 struct dlm_ctxt *dlm = data; 506 struct dlm_ctxt *dlm = data;
424 unsigned int node; 507 unsigned int node;
@@ -571,7 +654,9 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
571 /* We changed dlm state, notify the thread */ 654 /* We changed dlm state, notify the thread */
572 dlm_kick_thread(dlm, NULL); 655 dlm_kick_thread(dlm, NULL);
573 656
574 dlm_migrate_all_locks(dlm); 657 while (dlm_migrate_all_locks(dlm)) {
658 mlog(0, "%s: more migration to do\n", dlm->name);
659 }
575 dlm_mark_domain_leaving(dlm); 660 dlm_mark_domain_leaving(dlm);
576 dlm_leave_domain(dlm); 661 dlm_leave_domain(dlm);
577 dlm_complete_dlm_shutdown(dlm); 662 dlm_complete_dlm_shutdown(dlm);
@@ -580,11 +665,13 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
580} 665}
581EXPORT_SYMBOL_GPL(dlm_unregister_domain); 666EXPORT_SYMBOL_GPL(dlm_unregister_domain);
582 667
583static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data) 668static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
669 void **ret_data)
584{ 670{
585 struct dlm_query_join_request *query; 671 struct dlm_query_join_request *query;
586 enum dlm_query_join_response response; 672 enum dlm_query_join_response response;
587 struct dlm_ctxt *dlm = NULL; 673 struct dlm_ctxt *dlm = NULL;
674 u8 nodenum;
588 675
589 query = (struct dlm_query_join_request *) msg->buf; 676 query = (struct dlm_query_join_request *) msg->buf;
590 677
@@ -608,6 +695,28 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data)
608 695
609 spin_lock(&dlm_domain_lock); 696 spin_lock(&dlm_domain_lock);
610 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); 697 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
698 if (!dlm)
699 goto unlock_respond;
700
701 /*
702 * There is a small window where the joining node may not see the
703 * node(s) that just left but still part of the cluster. DISALLOW
704 * join request if joining node has different node map.
705 */
706 nodenum=0;
707 while (nodenum < O2NM_MAX_NODES) {
708 if (test_bit(nodenum, dlm->domain_map)) {
709 if (!byte_test_bit(nodenum, query->node_map)) {
710 mlog(0, "disallow join as node %u does not "
711 "have node %u in its nodemap\n",
712 query->node_idx, nodenum);
713 response = JOIN_DISALLOW;
714 goto unlock_respond;
715 }
716 }
717 nodenum++;
718 }
719
611 /* Once the dlm ctxt is marked as leaving then we don't want 720 /* Once the dlm ctxt is marked as leaving then we don't want
612 * to be put in someone's domain map. 721 * to be put in someone's domain map.
613 * Also, explicitly disallow joining at certain troublesome 722 * Also, explicitly disallow joining at certain troublesome
@@ -626,15 +735,15 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data)
626 /* Disallow parallel joins. */ 735 /* Disallow parallel joins. */
627 response = JOIN_DISALLOW; 736 response = JOIN_DISALLOW;
628 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { 737 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
629 mlog(ML_NOTICE, "node %u trying to join, but recovery " 738 mlog(0, "node %u trying to join, but recovery "
630 "is ongoing.\n", bit); 739 "is ongoing.\n", bit);
631 response = JOIN_DISALLOW; 740 response = JOIN_DISALLOW;
632 } else if (test_bit(bit, dlm->recovery_map)) { 741 } else if (test_bit(bit, dlm->recovery_map)) {
633 mlog(ML_NOTICE, "node %u trying to join, but it " 742 mlog(0, "node %u trying to join, but it "
634 "still needs recovery.\n", bit); 743 "still needs recovery.\n", bit);
635 response = JOIN_DISALLOW; 744 response = JOIN_DISALLOW;
636 } else if (test_bit(bit, dlm->domain_map)) { 745 } else if (test_bit(bit, dlm->domain_map)) {
637 mlog(ML_NOTICE, "node %u trying to join, but it " 746 mlog(0, "node %u trying to join, but it "
638 "is still in the domain! needs recovery?\n", 747 "is still in the domain! needs recovery?\n",
639 bit); 748 bit);
640 response = JOIN_DISALLOW; 749 response = JOIN_DISALLOW;
@@ -649,6 +758,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data)
649 758
650 spin_unlock(&dlm->spinlock); 759 spin_unlock(&dlm->spinlock);
651 } 760 }
761unlock_respond:
652 spin_unlock(&dlm_domain_lock); 762 spin_unlock(&dlm_domain_lock);
653 763
654respond: 764respond:
@@ -657,7 +767,8 @@ respond:
657 return response; 767 return response;
658} 768}
659 769
660static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data) 770static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
771 void **ret_data)
661{ 772{
662 struct dlm_assert_joined *assert; 773 struct dlm_assert_joined *assert;
663 struct dlm_ctxt *dlm = NULL; 774 struct dlm_ctxt *dlm = NULL;
@@ -694,7 +805,8 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data)
694 return 0; 805 return 0;
695} 806}
696 807
697static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data) 808static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
809 void **ret_data)
698{ 810{
699 struct dlm_cancel_join *cancel; 811 struct dlm_cancel_join *cancel;
700 struct dlm_ctxt *dlm = NULL; 812 struct dlm_ctxt *dlm = NULL;
@@ -796,6 +908,9 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
796 join_msg.name_len = strlen(dlm->name); 908 join_msg.name_len = strlen(dlm->name);
797 memcpy(join_msg.domain, dlm->name, join_msg.name_len); 909 memcpy(join_msg.domain, dlm->name, join_msg.name_len);
798 910
911 /* copy live node map to join message */
912 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
913
799 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, 914 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
800 sizeof(join_msg), node, &retval); 915 sizeof(join_msg), node, &retval);
801 if (status < 0 && status != -ENOPROTOOPT) { 916 if (status < 0 && status != -ENOPROTOOPT) {
@@ -1036,98 +1151,106 @@ static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
1036 status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key, 1151 status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
1037 sizeof(struct dlm_master_request), 1152 sizeof(struct dlm_master_request),
1038 dlm_master_request_handler, 1153 dlm_master_request_handler,
1039 dlm, &dlm->dlm_domain_handlers); 1154 dlm, NULL, &dlm->dlm_domain_handlers);
1040 if (status) 1155 if (status)
1041 goto bail; 1156 goto bail;
1042 1157
1043 status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key, 1158 status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
1044 sizeof(struct dlm_assert_master), 1159 sizeof(struct dlm_assert_master),
1045 dlm_assert_master_handler, 1160 dlm_assert_master_handler,
1046 dlm, &dlm->dlm_domain_handlers); 1161 dlm, dlm_assert_master_post_handler,
1162 &dlm->dlm_domain_handlers);
1047 if (status) 1163 if (status)
1048 goto bail; 1164 goto bail;
1049 1165
1050 status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key, 1166 status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
1051 sizeof(struct dlm_create_lock), 1167 sizeof(struct dlm_create_lock),
1052 dlm_create_lock_handler, 1168 dlm_create_lock_handler,
1053 dlm, &dlm->dlm_domain_handlers); 1169 dlm, NULL, &dlm->dlm_domain_handlers);
1054 if (status) 1170 if (status)
1055 goto bail; 1171 goto bail;
1056 1172
1057 status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key, 1173 status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
1058 DLM_CONVERT_LOCK_MAX_LEN, 1174 DLM_CONVERT_LOCK_MAX_LEN,
1059 dlm_convert_lock_handler, 1175 dlm_convert_lock_handler,
1060 dlm, &dlm->dlm_domain_handlers); 1176 dlm, NULL, &dlm->dlm_domain_handlers);
1061 if (status) 1177 if (status)
1062 goto bail; 1178 goto bail;
1063 1179
1064 status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key, 1180 status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
1065 DLM_UNLOCK_LOCK_MAX_LEN, 1181 DLM_UNLOCK_LOCK_MAX_LEN,
1066 dlm_unlock_lock_handler, 1182 dlm_unlock_lock_handler,
1067 dlm, &dlm->dlm_domain_handlers); 1183 dlm, NULL, &dlm->dlm_domain_handlers);
1068 if (status) 1184 if (status)
1069 goto bail; 1185 goto bail;
1070 1186
1071 status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key, 1187 status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
1072 DLM_PROXY_AST_MAX_LEN, 1188 DLM_PROXY_AST_MAX_LEN,
1073 dlm_proxy_ast_handler, 1189 dlm_proxy_ast_handler,
1074 dlm, &dlm->dlm_domain_handlers); 1190 dlm, NULL, &dlm->dlm_domain_handlers);
1075 if (status) 1191 if (status)
1076 goto bail; 1192 goto bail;
1077 1193
1078 status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key, 1194 status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
1079 sizeof(struct dlm_exit_domain), 1195 sizeof(struct dlm_exit_domain),
1080 dlm_exit_domain_handler, 1196 dlm_exit_domain_handler,
1081 dlm, &dlm->dlm_domain_handlers); 1197 dlm, NULL, &dlm->dlm_domain_handlers);
1198 if (status)
1199 goto bail;
1200
1201 status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
1202 sizeof(struct dlm_deref_lockres),
1203 dlm_deref_lockres_handler,
1204 dlm, NULL, &dlm->dlm_domain_handlers);
1082 if (status) 1205 if (status)
1083 goto bail; 1206 goto bail;
1084 1207
1085 status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key, 1208 status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
1086 sizeof(struct dlm_migrate_request), 1209 sizeof(struct dlm_migrate_request),
1087 dlm_migrate_request_handler, 1210 dlm_migrate_request_handler,
1088 dlm, &dlm->dlm_domain_handlers); 1211 dlm, NULL, &dlm->dlm_domain_handlers);
1089 if (status) 1212 if (status)
1090 goto bail; 1213 goto bail;
1091 1214
1092 status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key, 1215 status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
1093 DLM_MIG_LOCKRES_MAX_LEN, 1216 DLM_MIG_LOCKRES_MAX_LEN,
1094 dlm_mig_lockres_handler, 1217 dlm_mig_lockres_handler,
1095 dlm, &dlm->dlm_domain_handlers); 1218 dlm, NULL, &dlm->dlm_domain_handlers);
1096 if (status) 1219 if (status)
1097 goto bail; 1220 goto bail;
1098 1221
1099 status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key, 1222 status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
1100 sizeof(struct dlm_master_requery), 1223 sizeof(struct dlm_master_requery),
1101 dlm_master_requery_handler, 1224 dlm_master_requery_handler,
1102 dlm, &dlm->dlm_domain_handlers); 1225 dlm, NULL, &dlm->dlm_domain_handlers);
1103 if (status) 1226 if (status)
1104 goto bail; 1227 goto bail;
1105 1228
1106 status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key, 1229 status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
1107 sizeof(struct dlm_lock_request), 1230 sizeof(struct dlm_lock_request),
1108 dlm_request_all_locks_handler, 1231 dlm_request_all_locks_handler,
1109 dlm, &dlm->dlm_domain_handlers); 1232 dlm, NULL, &dlm->dlm_domain_handlers);
1110 if (status) 1233 if (status)
1111 goto bail; 1234 goto bail;
1112 1235
1113 status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key, 1236 status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
1114 sizeof(struct dlm_reco_data_done), 1237 sizeof(struct dlm_reco_data_done),
1115 dlm_reco_data_done_handler, 1238 dlm_reco_data_done_handler,
1116 dlm, &dlm->dlm_domain_handlers); 1239 dlm, NULL, &dlm->dlm_domain_handlers);
1117 if (status) 1240 if (status)
1118 goto bail; 1241 goto bail;
1119 1242
1120 status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key, 1243 status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
1121 sizeof(struct dlm_begin_reco), 1244 sizeof(struct dlm_begin_reco),
1122 dlm_begin_reco_handler, 1245 dlm_begin_reco_handler,
1123 dlm, &dlm->dlm_domain_handlers); 1246 dlm, NULL, &dlm->dlm_domain_handlers);
1124 if (status) 1247 if (status)
1125 goto bail; 1248 goto bail;
1126 1249
1127 status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key, 1250 status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
1128 sizeof(struct dlm_finalize_reco), 1251 sizeof(struct dlm_finalize_reco),
1129 dlm_finalize_reco_handler, 1252 dlm_finalize_reco_handler,
1130 dlm, &dlm->dlm_domain_handlers); 1253 dlm, NULL, &dlm->dlm_domain_handlers);
1131 if (status) 1254 if (status)
1132 goto bail; 1255 goto bail;
1133 1256
@@ -1141,6 +1264,8 @@ bail:
1141static int dlm_join_domain(struct dlm_ctxt *dlm) 1264static int dlm_join_domain(struct dlm_ctxt *dlm)
1142{ 1265{
1143 int status; 1266 int status;
1267 unsigned int backoff;
1268 unsigned int total_backoff = 0;
1144 1269
1145 BUG_ON(!dlm); 1270 BUG_ON(!dlm);
1146 1271
@@ -1172,18 +1297,27 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
1172 } 1297 }
1173 1298
1174 do { 1299 do {
1175 unsigned int backoff;
1176 status = dlm_try_to_join_domain(dlm); 1300 status = dlm_try_to_join_domain(dlm);
1177 1301
1178 /* If we're racing another node to the join, then we 1302 /* If we're racing another node to the join, then we
1179 * need to back off temporarily and let them 1303 * need to back off temporarily and let them
1180 * complete. */ 1304 * complete. */
1305#define DLM_JOIN_TIMEOUT_MSECS 90000
1181 if (status == -EAGAIN) { 1306 if (status == -EAGAIN) {
1182 if (signal_pending(current)) { 1307 if (signal_pending(current)) {
1183 status = -ERESTARTSYS; 1308 status = -ERESTARTSYS;
1184 goto bail; 1309 goto bail;
1185 } 1310 }
1186 1311
1312 if (total_backoff >
1313 msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) {
1314 status = -ERESTARTSYS;
1315 mlog(ML_NOTICE, "Timed out joining dlm domain "
1316 "%s after %u msecs\n", dlm->name,
1317 jiffies_to_msecs(total_backoff));
1318 goto bail;
1319 }
1320
1187 /* 1321 /*
1188 * <chip> After you! 1322 * <chip> After you!
1189 * <dale> No, after you! 1323 * <dale> No, after you!
@@ -1193,6 +1327,7 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
1193 */ 1327 */
1194 backoff = (unsigned int)(jiffies & 0x3); 1328 backoff = (unsigned int)(jiffies & 0x3);
1195 backoff *= DLM_DOMAIN_BACKOFF_MS; 1329 backoff *= DLM_DOMAIN_BACKOFF_MS;
1330 total_backoff += backoff;
1196 mlog(0, "backoff %d\n", backoff); 1331 mlog(0, "backoff %d\n", backoff);
1197 msleep(backoff); 1332 msleep(backoff);
1198 } 1333 }
@@ -1421,21 +1556,21 @@ static int dlm_register_net_handlers(void)
1421 status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, 1556 status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
1422 sizeof(struct dlm_query_join_request), 1557 sizeof(struct dlm_query_join_request),
1423 dlm_query_join_handler, 1558 dlm_query_join_handler,
1424 NULL, &dlm_join_handlers); 1559 NULL, NULL, &dlm_join_handlers);
1425 if (status) 1560 if (status)
1426 goto bail; 1561 goto bail;
1427 1562
1428 status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, 1563 status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
1429 sizeof(struct dlm_assert_joined), 1564 sizeof(struct dlm_assert_joined),
1430 dlm_assert_joined_handler, 1565 dlm_assert_joined_handler,
1431 NULL, &dlm_join_handlers); 1566 NULL, NULL, &dlm_join_handlers);
1432 if (status) 1567 if (status)
1433 goto bail; 1568 goto bail;
1434 1569
1435 status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, 1570 status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
1436 sizeof(struct dlm_cancel_join), 1571 sizeof(struct dlm_cancel_join),
1437 dlm_cancel_join_handler, 1572 dlm_cancel_join_handler,
1438 NULL, &dlm_join_handlers); 1573 NULL, NULL, &dlm_join_handlers);
1439 1574
1440bail: 1575bail:
1441 if (status < 0) 1576 if (status < 0)
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index e5ca3db197f6..52578d907d9a 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -163,6 +163,10 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
163 kick_thread = 1; 163 kick_thread = 1;
164 } 164 }
165 } 165 }
166 /* reduce the inflight count, this may result in the lockres
167 * being purged below during calc_usage */
168 if (lock->ml.node == dlm->node_num)
169 dlm_lockres_drop_inflight_ref(dlm, res);
166 170
167 spin_unlock(&res->spinlock); 171 spin_unlock(&res->spinlock);
168 wake_up(&res->wq); 172 wake_up(&res->wq);
@@ -437,7 +441,8 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
437 * held on exit: none 441 * held on exit: none
438 * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED 442 * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
439 */ 443 */
440int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data) 444int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
445 void **ret_data)
441{ 446{
442 struct dlm_ctxt *dlm = data; 447 struct dlm_ctxt *dlm = data;
443 struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; 448 struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 0ad872055cb3..77e4e6169a0d 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -99,9 +99,10 @@ static void dlm_mle_node_up(struct dlm_ctxt *dlm,
99 int idx); 99 int idx);
100 100
101static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); 101static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
102static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname, 102static int dlm_do_assert_master(struct dlm_ctxt *dlm,
103 unsigned int namelen, void *nodemap, 103 struct dlm_lock_resource *res,
104 u32 flags); 104 void *nodemap, u32 flags);
105static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
105 106
106static inline int dlm_mle_equal(struct dlm_ctxt *dlm, 107static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
107 struct dlm_master_list_entry *mle, 108 struct dlm_master_list_entry *mle,
@@ -237,7 +238,8 @@ static int dlm_find_mle(struct dlm_ctxt *dlm,
237 struct dlm_master_list_entry **mle, 238 struct dlm_master_list_entry **mle,
238 char *name, unsigned int namelen); 239 char *name, unsigned int namelen);
239 240
240static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to); 241static int dlm_do_master_request(struct dlm_lock_resource *res,
242 struct dlm_master_list_entry *mle, int to);
241 243
242 244
243static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, 245static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
@@ -687,6 +689,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
687 INIT_LIST_HEAD(&res->purge); 689 INIT_LIST_HEAD(&res->purge);
688 atomic_set(&res->asts_reserved, 0); 690 atomic_set(&res->asts_reserved, 0);
689 res->migration_pending = 0; 691 res->migration_pending = 0;
692 res->inflight_locks = 0;
690 693
691 kref_init(&res->refs); 694 kref_init(&res->refs);
692 695
@@ -700,6 +703,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
700 res->last_used = 0; 703 res->last_used = 0;
701 704
702 memset(res->lvb, 0, DLM_LVB_LEN); 705 memset(res->lvb, 0, DLM_LVB_LEN);
706 memset(res->refmap, 0, sizeof(res->refmap));
703} 707}
704 708
705struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, 709struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
@@ -722,6 +726,42 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
722 return res; 726 return res;
723} 727}
724 728
729void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
730 struct dlm_lock_resource *res,
731 int new_lockres,
732 const char *file,
733 int line)
734{
735 if (!new_lockres)
736 assert_spin_locked(&res->spinlock);
737
738 if (!test_bit(dlm->node_num, res->refmap)) {
739 BUG_ON(res->inflight_locks != 0);
740 dlm_lockres_set_refmap_bit(dlm->node_num, res);
741 }
742 res->inflight_locks++;
743 mlog(0, "%s:%.*s: inflight++: now %u\n",
744 dlm->name, res->lockname.len, res->lockname.name,
745 res->inflight_locks);
746}
747
748void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
749 struct dlm_lock_resource *res,
750 const char *file,
751 int line)
752{
753 assert_spin_locked(&res->spinlock);
754
755 BUG_ON(res->inflight_locks == 0);
756 res->inflight_locks--;
757 mlog(0, "%s:%.*s: inflight--: now %u\n",
758 dlm->name, res->lockname.len, res->lockname.name,
759 res->inflight_locks);
760 if (res->inflight_locks == 0)
761 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
762 wake_up(&res->wq);
763}
764
725/* 765/*
726 * lookup a lock resource by name. 766 * lookup a lock resource by name.
727 * may already exist in the hashtable. 767 * may already exist in the hashtable.
@@ -752,6 +792,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
752 unsigned int hash; 792 unsigned int hash;
753 int tries = 0; 793 int tries = 0;
754 int bit, wait_on_recovery = 0; 794 int bit, wait_on_recovery = 0;
795 int drop_inflight_if_nonlocal = 0;
755 796
756 BUG_ON(!lockid); 797 BUG_ON(!lockid);
757 798
@@ -761,9 +802,30 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
761 802
762lookup: 803lookup:
763 spin_lock(&dlm->spinlock); 804 spin_lock(&dlm->spinlock);
764 tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash); 805 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
765 if (tmpres) { 806 if (tmpres) {
807 int dropping_ref = 0;
808
809 spin_lock(&tmpres->spinlock);
810 if (tmpres->owner == dlm->node_num) {
811 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
812 dlm_lockres_grab_inflight_ref(dlm, tmpres);
813 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
814 dropping_ref = 1;
815 spin_unlock(&tmpres->spinlock);
766 spin_unlock(&dlm->spinlock); 816 spin_unlock(&dlm->spinlock);
817
818 /* wait until done messaging the master, drop our ref to allow
819 * the lockres to be purged, start over. */
820 if (dropping_ref) {
821 spin_lock(&tmpres->spinlock);
822 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
823 spin_unlock(&tmpres->spinlock);
824 dlm_lockres_put(tmpres);
825 tmpres = NULL;
826 goto lookup;
827 }
828
767 mlog(0, "found in hash!\n"); 829 mlog(0, "found in hash!\n");
768 if (res) 830 if (res)
769 dlm_lockres_put(res); 831 dlm_lockres_put(res);
@@ -793,6 +855,7 @@ lookup:
793 spin_lock(&res->spinlock); 855 spin_lock(&res->spinlock);
794 dlm_change_lockres_owner(dlm, res, dlm->node_num); 856 dlm_change_lockres_owner(dlm, res, dlm->node_num);
795 __dlm_insert_lockres(dlm, res); 857 __dlm_insert_lockres(dlm, res);
858 dlm_lockres_grab_inflight_ref(dlm, res);
796 spin_unlock(&res->spinlock); 859 spin_unlock(&res->spinlock);
797 spin_unlock(&dlm->spinlock); 860 spin_unlock(&dlm->spinlock);
798 /* lockres still marked IN_PROGRESS */ 861 /* lockres still marked IN_PROGRESS */
@@ -805,29 +868,40 @@ lookup:
805 /* if we found a block, wait for lock to be mastered by another node */ 868 /* if we found a block, wait for lock to be mastered by another node */
806 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); 869 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
807 if (blocked) { 870 if (blocked) {
871 int mig;
808 if (mle->type == DLM_MLE_MASTER) { 872 if (mle->type == DLM_MLE_MASTER) {
809 mlog(ML_ERROR, "master entry for nonexistent lock!\n"); 873 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
810 BUG(); 874 BUG();
811 } else if (mle->type == DLM_MLE_MIGRATION) { 875 }
812 /* migration is in progress! */ 876 mig = (mle->type == DLM_MLE_MIGRATION);
813 /* the good news is that we now know the 877 /* if there is a migration in progress, let the migration
814 * "current" master (mle->master). */ 878 * finish before continuing. we can wait for the absence
815 879 * of the MIGRATION mle: either the migrate finished or
880 * one of the nodes died and the mle was cleaned up.
881 * if there is a BLOCK here, but it already has a master
882 * set, we are too late. the master does not have a ref
883 * for us in the refmap. detach the mle and drop it.
884 * either way, go back to the top and start over. */
885 if (mig || mle->master != O2NM_MAX_NODES) {
886 BUG_ON(mig && mle->master == dlm->node_num);
887 /* we arrived too late. the master does not
888 * have a ref for us. retry. */
889 mlog(0, "%s:%.*s: late on %s\n",
890 dlm->name, namelen, lockid,
891 mig ? "MIGRATION" : "BLOCK");
816 spin_unlock(&dlm->master_lock); 892 spin_unlock(&dlm->master_lock);
817 assert_spin_locked(&dlm->spinlock);
818
819 /* set the lockres owner and hash it */
820 spin_lock(&res->spinlock);
821 dlm_set_lockres_owner(dlm, res, mle->master);
822 __dlm_insert_lockres(dlm, res);
823 spin_unlock(&res->spinlock);
824 spin_unlock(&dlm->spinlock); 893 spin_unlock(&dlm->spinlock);
825 894
826 /* master is known, detach */ 895 /* master is known, detach */
827 dlm_mle_detach_hb_events(dlm, mle); 896 if (!mig)
897 dlm_mle_detach_hb_events(dlm, mle);
828 dlm_put_mle(mle); 898 dlm_put_mle(mle);
829 mle = NULL; 899 mle = NULL;
830 goto wake_waiters; 900 /* this is lame, but we cant wait on either
901 * the mle or lockres waitqueue here */
902 if (mig)
903 msleep(100);
904 goto lookup;
831 } 905 }
832 } else { 906 } else {
833 /* go ahead and try to master lock on this node */ 907 /* go ahead and try to master lock on this node */
@@ -858,6 +932,13 @@ lookup:
858 932
859 /* finally add the lockres to its hash bucket */ 933 /* finally add the lockres to its hash bucket */
860 __dlm_insert_lockres(dlm, res); 934 __dlm_insert_lockres(dlm, res);
935 /* since this lockres is new it doesnt not require the spinlock */
936 dlm_lockres_grab_inflight_ref_new(dlm, res);
937
938 /* if this node does not become the master make sure to drop
939 * this inflight reference below */
940 drop_inflight_if_nonlocal = 1;
941
861 /* get an extra ref on the mle in case this is a BLOCK 942 /* get an extra ref on the mle in case this is a BLOCK
862 * if so, the creator of the BLOCK may try to put the last 943 * if so, the creator of the BLOCK may try to put the last
863 * ref at this time in the assert master handler, so we 944 * ref at this time in the assert master handler, so we
@@ -910,7 +991,7 @@ redo_request:
910 ret = -EINVAL; 991 ret = -EINVAL;
911 dlm_node_iter_init(mle->vote_map, &iter); 992 dlm_node_iter_init(mle->vote_map, &iter);
912 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { 993 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
913 ret = dlm_do_master_request(mle, nodenum); 994 ret = dlm_do_master_request(res, mle, nodenum);
914 if (ret < 0) 995 if (ret < 0)
915 mlog_errno(ret); 996 mlog_errno(ret);
916 if (mle->master != O2NM_MAX_NODES) { 997 if (mle->master != O2NM_MAX_NODES) {
@@ -960,6 +1041,8 @@ wait:
960 1041
961wake_waiters: 1042wake_waiters:
962 spin_lock(&res->spinlock); 1043 spin_lock(&res->spinlock);
1044 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
1045 dlm_lockres_drop_inflight_ref(dlm, res);
963 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1046 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
964 spin_unlock(&res->spinlock); 1047 spin_unlock(&res->spinlock);
965 wake_up(&res->wq); 1048 wake_up(&res->wq);
@@ -998,7 +1081,7 @@ recheck:
998 /* this will cause the master to re-assert across 1081 /* this will cause the master to re-assert across
999 * the whole cluster, freeing up mles */ 1082 * the whole cluster, freeing up mles */
1000 if (res->owner != dlm->node_num) { 1083 if (res->owner != dlm->node_num) {
1001 ret = dlm_do_master_request(mle, res->owner); 1084 ret = dlm_do_master_request(res, mle, res->owner);
1002 if (ret < 0) { 1085 if (ret < 0) {
1003 /* give recovery a chance to run */ 1086 /* give recovery a chance to run */
1004 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); 1087 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
@@ -1062,6 +1145,8 @@ recheck:
1062 * now tell other nodes that I am 1145 * now tell other nodes that I am
1063 * mastering this. */ 1146 * mastering this. */
1064 mle->master = dlm->node_num; 1147 mle->master = dlm->node_num;
1148 /* ref was grabbed in get_lock_resource
1149 * will be dropped in dlmlock_master */
1065 assert = 1; 1150 assert = 1;
1066 sleep = 0; 1151 sleep = 0;
1067 } 1152 }
@@ -1087,7 +1172,8 @@ recheck:
1087 (atomic_read(&mle->woken) == 1), 1172 (atomic_read(&mle->woken) == 1),
1088 timeo); 1173 timeo);
1089 if (res->owner == O2NM_MAX_NODES) { 1174 if (res->owner == O2NM_MAX_NODES) {
1090 mlog(0, "waiting again\n"); 1175 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1176 res->lockname.len, res->lockname.name);
1091 goto recheck; 1177 goto recheck;
1092 } 1178 }
1093 mlog(0, "done waiting, master is %u\n", res->owner); 1179 mlog(0, "done waiting, master is %u\n", res->owner);
@@ -1100,8 +1186,7 @@ recheck:
1100 m = dlm->node_num; 1186 m = dlm->node_num;
1101 mlog(0, "about to master %.*s here, this=%u\n", 1187 mlog(0, "about to master %.*s here, this=%u\n",
1102 res->lockname.len, res->lockname.name, m); 1188 res->lockname.len, res->lockname.name, m);
1103 ret = dlm_do_assert_master(dlm, res->lockname.name, 1189 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1104 res->lockname.len, mle->vote_map, 0);
1105 if (ret) { 1190 if (ret) {
1106 /* This is a failure in the network path, 1191 /* This is a failure in the network path,
1107 * not in the response to the assert_master 1192 * not in the response to the assert_master
@@ -1117,6 +1202,8 @@ recheck:
1117 1202
1118 /* set the lockres owner */ 1203 /* set the lockres owner */
1119 spin_lock(&res->spinlock); 1204 spin_lock(&res->spinlock);
1205 /* mastery reference obtained either during
1206 * assert_master_handler or in get_lock_resource */
1120 dlm_change_lockres_owner(dlm, res, m); 1207 dlm_change_lockres_owner(dlm, res, m);
1121 spin_unlock(&res->spinlock); 1208 spin_unlock(&res->spinlock);
1122 1209
@@ -1283,7 +1370,8 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1283 * 1370 *
1284 */ 1371 */
1285 1372
1286static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to) 1373static int dlm_do_master_request(struct dlm_lock_resource *res,
1374 struct dlm_master_list_entry *mle, int to)
1287{ 1375{
1288 struct dlm_ctxt *dlm = mle->dlm; 1376 struct dlm_ctxt *dlm = mle->dlm;
1289 struct dlm_master_request request; 1377 struct dlm_master_request request;
@@ -1339,6 +1427,9 @@ again:
1339 case DLM_MASTER_RESP_YES: 1427 case DLM_MASTER_RESP_YES:
1340 set_bit(to, mle->response_map); 1428 set_bit(to, mle->response_map);
1341 mlog(0, "node %u is the master, response=YES\n", to); 1429 mlog(0, "node %u is the master, response=YES\n", to);
1430 mlog(0, "%s:%.*s: master node %u now knows I have a "
1431 "reference\n", dlm->name, res->lockname.len,
1432 res->lockname.name, to);
1342 mle->master = to; 1433 mle->master = to;
1343 break; 1434 break;
1344 case DLM_MASTER_RESP_NO: 1435 case DLM_MASTER_RESP_NO:
@@ -1379,7 +1470,8 @@ out:
1379 * 1470 *
1380 * if possible, TRIM THIS DOWN!!! 1471 * if possible, TRIM THIS DOWN!!!
1381 */ 1472 */
1382int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data) 1473int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1474 void **ret_data)
1383{ 1475{
1384 u8 response = DLM_MASTER_RESP_MAYBE; 1476 u8 response = DLM_MASTER_RESP_MAYBE;
1385 struct dlm_ctxt *dlm = data; 1477 struct dlm_ctxt *dlm = data;
@@ -1417,10 +1509,11 @@ way_up_top:
1417 1509
1418 /* take care of the easy cases up front */ 1510 /* take care of the easy cases up front */
1419 spin_lock(&res->spinlock); 1511 spin_lock(&res->spinlock);
1420 if (res->state & DLM_LOCK_RES_RECOVERING) { 1512 if (res->state & (DLM_LOCK_RES_RECOVERING|
1513 DLM_LOCK_RES_MIGRATING)) {
1421 spin_unlock(&res->spinlock); 1514 spin_unlock(&res->spinlock);
1422 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1515 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1423 "being recovered\n"); 1516 "being recovered/migrated\n");
1424 response = DLM_MASTER_RESP_ERROR; 1517 response = DLM_MASTER_RESP_ERROR;
1425 if (mle) 1518 if (mle)
1426 kmem_cache_free(dlm_mle_cache, mle); 1519 kmem_cache_free(dlm_mle_cache, mle);
@@ -1428,8 +1521,10 @@ way_up_top:
1428 } 1521 }
1429 1522
1430 if (res->owner == dlm->node_num) { 1523 if (res->owner == dlm->node_num) {
1524 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1525 dlm->name, namelen, name, request->node_idx);
1526 dlm_lockres_set_refmap_bit(request->node_idx, res);
1431 spin_unlock(&res->spinlock); 1527 spin_unlock(&res->spinlock);
1432 // mlog(0, "this node is the master\n");
1433 response = DLM_MASTER_RESP_YES; 1528 response = DLM_MASTER_RESP_YES;
1434 if (mle) 1529 if (mle)
1435 kmem_cache_free(dlm_mle_cache, mle); 1530 kmem_cache_free(dlm_mle_cache, mle);
@@ -1477,7 +1572,6 @@ way_up_top:
1477 mlog(0, "node %u is master, but trying to migrate to " 1572 mlog(0, "node %u is master, but trying to migrate to "
1478 "node %u.\n", tmpmle->master, tmpmle->new_master); 1573 "node %u.\n", tmpmle->master, tmpmle->new_master);
1479 if (tmpmle->master == dlm->node_num) { 1574 if (tmpmle->master == dlm->node_num) {
1480 response = DLM_MASTER_RESP_YES;
1481 mlog(ML_ERROR, "no owner on lockres, but this " 1575 mlog(ML_ERROR, "no owner on lockres, but this "
1482 "node is trying to migrate it to %u?!\n", 1576 "node is trying to migrate it to %u?!\n",
1483 tmpmle->new_master); 1577 tmpmle->new_master);
@@ -1494,6 +1588,10 @@ way_up_top:
1494 * go back and clean the mles on any 1588 * go back and clean the mles on any
1495 * other nodes */ 1589 * other nodes */
1496 dispatch_assert = 1; 1590 dispatch_assert = 1;
1591 dlm_lockres_set_refmap_bit(request->node_idx, res);
1592 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1593 dlm->name, namelen, name,
1594 request->node_idx);
1497 } else 1595 } else
1498 response = DLM_MASTER_RESP_NO; 1596 response = DLM_MASTER_RESP_NO;
1499 } else { 1597 } else {
@@ -1607,17 +1705,24 @@ send_response:
1607 * can periodically run all locks owned by this node 1705 * can periodically run all locks owned by this node
1608 * and re-assert across the cluster... 1706 * and re-assert across the cluster...
1609 */ 1707 */
1610static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname, 1708int dlm_do_assert_master(struct dlm_ctxt *dlm,
1611 unsigned int namelen, void *nodemap, 1709 struct dlm_lock_resource *res,
1612 u32 flags) 1710 void *nodemap, u32 flags)
1613{ 1711{
1614 struct dlm_assert_master assert; 1712 struct dlm_assert_master assert;
1615 int to, tmpret; 1713 int to, tmpret;
1616 struct dlm_node_iter iter; 1714 struct dlm_node_iter iter;
1617 int ret = 0; 1715 int ret = 0;
1618 int reassert; 1716 int reassert;
1717 const char *lockname = res->lockname.name;
1718 unsigned int namelen = res->lockname.len;
1619 1719
1620 BUG_ON(namelen > O2NM_MAX_NAME_LEN); 1720 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1721
1722 spin_lock(&res->spinlock);
1723 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1724 spin_unlock(&res->spinlock);
1725
1621again: 1726again:
1622 reassert = 0; 1727 reassert = 0;
1623 1728
@@ -1647,6 +1752,7 @@ again:
1647 mlog(0, "link to %d went down!\n", to); 1752 mlog(0, "link to %d went down!\n", to);
1648 /* any nonzero status return will do */ 1753 /* any nonzero status return will do */
1649 ret = tmpret; 1754 ret = tmpret;
1755 r = 0;
1650 } else if (r < 0) { 1756 } else if (r < 0) {
1651 /* ok, something horribly messed. kill thyself. */ 1757 /* ok, something horribly messed. kill thyself. */
1652 mlog(ML_ERROR,"during assert master of %.*s to %u, " 1758 mlog(ML_ERROR,"during assert master of %.*s to %u, "
@@ -1661,17 +1767,39 @@ again:
1661 spin_unlock(&dlm->master_lock); 1767 spin_unlock(&dlm->master_lock);
1662 spin_unlock(&dlm->spinlock); 1768 spin_unlock(&dlm->spinlock);
1663 BUG(); 1769 BUG();
1664 } else if (r == EAGAIN) { 1770 }
1771
1772 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1773 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1774 mlog(ML_ERROR, "%.*s: very strange, "
1775 "master MLE but no lockres on %u\n",
1776 namelen, lockname, to);
1777 }
1778
1779 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1665 mlog(0, "%.*s: node %u create mles on other " 1780 mlog(0, "%.*s: node %u create mles on other "
1666 "nodes and requests a re-assert\n", 1781 "nodes and requests a re-assert\n",
1667 namelen, lockname, to); 1782 namelen, lockname, to);
1668 reassert = 1; 1783 reassert = 1;
1669 } 1784 }
1785 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1786 mlog(0, "%.*s: node %u has a reference to this "
1787 "lockres, set the bit in the refmap\n",
1788 namelen, lockname, to);
1789 spin_lock(&res->spinlock);
1790 dlm_lockres_set_refmap_bit(to, res);
1791 spin_unlock(&res->spinlock);
1792 }
1670 } 1793 }
1671 1794
1672 if (reassert) 1795 if (reassert)
1673 goto again; 1796 goto again;
1674 1797
1798 spin_lock(&res->spinlock);
1799 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1800 spin_unlock(&res->spinlock);
1801 wake_up(&res->wq);
1802
1675 return ret; 1803 return ret;
1676} 1804}
1677 1805
@@ -1684,7 +1812,8 @@ again:
1684 * 1812 *
1685 * if possible, TRIM THIS DOWN!!! 1813 * if possible, TRIM THIS DOWN!!!
1686 */ 1814 */
1687int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) 1815int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1816 void **ret_data)
1688{ 1817{
1689 struct dlm_ctxt *dlm = data; 1818 struct dlm_ctxt *dlm = data;
1690 struct dlm_master_list_entry *mle = NULL; 1819 struct dlm_master_list_entry *mle = NULL;
@@ -1693,7 +1822,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1693 char *name; 1822 char *name;
1694 unsigned int namelen, hash; 1823 unsigned int namelen, hash;
1695 u32 flags; 1824 u32 flags;
1696 int master_request = 0; 1825 int master_request = 0, have_lockres_ref = 0;
1697 int ret = 0; 1826 int ret = 0;
1698 1827
1699 if (!dlm_grab(dlm)) 1828 if (!dlm_grab(dlm))
@@ -1851,6 +1980,7 @@ ok:
1851 spin_unlock(&mle->spinlock); 1980 spin_unlock(&mle->spinlock);
1852 1981
1853 if (res) { 1982 if (res) {
1983 int wake = 0;
1854 spin_lock(&res->spinlock); 1984 spin_lock(&res->spinlock);
1855 if (mle->type == DLM_MLE_MIGRATION) { 1985 if (mle->type == DLM_MLE_MIGRATION) {
1856 mlog(0, "finishing off migration of lockres %.*s, " 1986 mlog(0, "finishing off migration of lockres %.*s, "
@@ -1858,12 +1988,16 @@ ok:
1858 res->lockname.len, res->lockname.name, 1988 res->lockname.len, res->lockname.name,
1859 dlm->node_num, mle->new_master); 1989 dlm->node_num, mle->new_master);
1860 res->state &= ~DLM_LOCK_RES_MIGRATING; 1990 res->state &= ~DLM_LOCK_RES_MIGRATING;
1991 wake = 1;
1861 dlm_change_lockres_owner(dlm, res, mle->new_master); 1992 dlm_change_lockres_owner(dlm, res, mle->new_master);
1862 BUG_ON(res->state & DLM_LOCK_RES_DIRTY); 1993 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1863 } else { 1994 } else {
1864 dlm_change_lockres_owner(dlm, res, mle->master); 1995 dlm_change_lockres_owner(dlm, res, mle->master);
1865 } 1996 }
1866 spin_unlock(&res->spinlock); 1997 spin_unlock(&res->spinlock);
1998 have_lockres_ref = 1;
1999 if (wake)
2000 wake_up(&res->wq);
1867 } 2001 }
1868 2002
1869 /* master is known, detach if not already detached. 2003 /* master is known, detach if not already detached.
@@ -1913,12 +2047,28 @@ ok:
1913 2047
1914done: 2048done:
1915 ret = 0; 2049 ret = 0;
1916 if (res) 2050 if (res) {
1917 dlm_lockres_put(res); 2051 spin_lock(&res->spinlock);
2052 res->state |= DLM_LOCK_RES_SETREF_INPROG;
2053 spin_unlock(&res->spinlock);
2054 *ret_data = (void *)res;
2055 }
1918 dlm_put(dlm); 2056 dlm_put(dlm);
1919 if (master_request) { 2057 if (master_request) {
1920 mlog(0, "need to tell master to reassert\n"); 2058 mlog(0, "need to tell master to reassert\n");
1921 ret = EAGAIN; // positive. negative would shoot down the node. 2059 /* positive. negative would shoot down the node. */
2060 ret |= DLM_ASSERT_RESPONSE_REASSERT;
2061 if (!have_lockres_ref) {
2062 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2063 "mle present here for %s:%.*s, but no lockres!\n",
2064 assert->node_idx, dlm->name, namelen, name);
2065 }
2066 }
2067 if (have_lockres_ref) {
2068 /* let the master know we have a reference to the lockres */
2069 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2070 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2071 dlm->name, namelen, name, assert->node_idx);
1922 } 2072 }
1923 return ret; 2073 return ret;
1924 2074
@@ -1929,11 +2079,25 @@ kill:
1929 __dlm_print_one_lock_resource(res); 2079 __dlm_print_one_lock_resource(res);
1930 spin_unlock(&res->spinlock); 2080 spin_unlock(&res->spinlock);
1931 spin_unlock(&dlm->spinlock); 2081 spin_unlock(&dlm->spinlock);
1932 dlm_lockres_put(res); 2082 *ret_data = (void *)res;
1933 dlm_put(dlm); 2083 dlm_put(dlm);
1934 return -EINVAL; 2084 return -EINVAL;
1935} 2085}
1936 2086
2087void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2088{
2089 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2090
2091 if (ret_data) {
2092 spin_lock(&res->spinlock);
2093 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2094 spin_unlock(&res->spinlock);
2095 wake_up(&res->wq);
2096 dlm_lockres_put(res);
2097 }
2098 return;
2099}
2100
1937int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 2101int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1938 struct dlm_lock_resource *res, 2102 struct dlm_lock_resource *res,
1939 int ignore_higher, u8 request_from, u32 flags) 2103 int ignore_higher, u8 request_from, u32 flags)
@@ -2023,9 +2187,7 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2023 * even if one or more nodes die */ 2187 * even if one or more nodes die */
2024 mlog(0, "worker about to master %.*s here, this=%u\n", 2188 mlog(0, "worker about to master %.*s here, this=%u\n",
2025 res->lockname.len, res->lockname.name, dlm->node_num); 2189 res->lockname.len, res->lockname.name, dlm->node_num);
2026 ret = dlm_do_assert_master(dlm, res->lockname.name, 2190 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2027 res->lockname.len,
2028 nodemap, flags);
2029 if (ret < 0) { 2191 if (ret < 0) {
2030 /* no need to restart, we are done */ 2192 /* no need to restart, we are done */
2031 if (!dlm_is_host_down(ret)) 2193 if (!dlm_is_host_down(ret))
@@ -2097,14 +2259,180 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2097 return ret; 2259 return ret;
2098} 2260}
2099 2261
2262/*
2263 * DLM_DEREF_LOCKRES_MSG
2264 */
2265
2266int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2267{
2268 struct dlm_deref_lockres deref;
2269 int ret = 0, r;
2270 const char *lockname;
2271 unsigned int namelen;
2272
2273 lockname = res->lockname.name;
2274 namelen = res->lockname.len;
2275 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2276
2277 mlog(0, "%s:%.*s: sending deref to %d\n",
2278 dlm->name, namelen, lockname, res->owner);
2279 memset(&deref, 0, sizeof(deref));
2280 deref.node_idx = dlm->node_num;
2281 deref.namelen = namelen;
2282 memcpy(deref.name, lockname, namelen);
2283
2284 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2285 &deref, sizeof(deref), res->owner, &r);
2286 if (ret < 0)
2287 mlog_errno(ret);
2288 else if (r < 0) {
2289 /* BAD. other node says I did not have a ref. */
2290 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2291 "(master=%u) got %d.\n", dlm->name, namelen,
2292 lockname, res->owner, r);
2293 dlm_print_one_lock_resource(res);
2294 BUG();
2295 }
2296 return ret;
2297}
2298
2299int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2300 void **ret_data)
2301{
2302 struct dlm_ctxt *dlm = data;
2303 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2304 struct dlm_lock_resource *res = NULL;
2305 char *name;
2306 unsigned int namelen;
2307 int ret = -EINVAL;
2308 u8 node;
2309 unsigned int hash;
2310 struct dlm_work_item *item;
2311 int cleared = 0;
2312 int dispatch = 0;
2313
2314 if (!dlm_grab(dlm))
2315 return 0;
2316
2317 name = deref->name;
2318 namelen = deref->namelen;
2319 node = deref->node_idx;
2320
2321 if (namelen > DLM_LOCKID_NAME_MAX) {
2322 mlog(ML_ERROR, "Invalid name length!");
2323 goto done;
2324 }
2325 if (deref->node_idx >= O2NM_MAX_NODES) {
2326 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2327 goto done;
2328 }
2329
2330 hash = dlm_lockid_hash(name, namelen);
2331
2332 spin_lock(&dlm->spinlock);
2333 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2334 if (!res) {
2335 spin_unlock(&dlm->spinlock);
2336 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2337 dlm->name, namelen, name);
2338 goto done;
2339 }
2340 spin_unlock(&dlm->spinlock);
2341
2342 spin_lock(&res->spinlock);
2343 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2344 dispatch = 1;
2345 else {
2346 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2347 if (test_bit(node, res->refmap)) {
2348 dlm_lockres_clear_refmap_bit(node, res);
2349 cleared = 1;
2350 }
2351 }
2352 spin_unlock(&res->spinlock);
2353
2354 if (!dispatch) {
2355 if (cleared)
2356 dlm_lockres_calc_usage(dlm, res);
2357 else {
2358 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2359 "but it is already dropped!\n", dlm->name,
2360 res->lockname.len, res->lockname.name, node);
2361 __dlm_print_one_lock_resource(res);
2362 }
2363 ret = 0;
2364 goto done;
2365 }
2366
2367 item = kzalloc(sizeof(*item), GFP_NOFS);
2368 if (!item) {
2369 ret = -ENOMEM;
2370 mlog_errno(ret);
2371 goto done;
2372 }
2373
2374 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2375 item->u.dl.deref_res = res;
2376 item->u.dl.deref_node = node;
2377
2378 spin_lock(&dlm->work_lock);
2379 list_add_tail(&item->list, &dlm->work_list);
2380 spin_unlock(&dlm->work_lock);
2381
2382 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2383 return 0;
2384
2385done:
2386 if (res)
2387 dlm_lockres_put(res);
2388 dlm_put(dlm);
2389
2390 return ret;
2391}
2392
2393static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2394{
2395 struct dlm_ctxt *dlm;
2396 struct dlm_lock_resource *res;
2397 u8 node;
2398 u8 cleared = 0;
2399
2400 dlm = item->dlm;
2401 res = item->u.dl.deref_res;
2402 node = item->u.dl.deref_node;
2403
2404 spin_lock(&res->spinlock);
2405 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2406 if (test_bit(node, res->refmap)) {
2407 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2408 dlm_lockres_clear_refmap_bit(node, res);
2409 cleared = 1;
2410 }
2411 spin_unlock(&res->spinlock);
2412
2413 if (cleared) {
2414 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2415 dlm->name, res->lockname.len, res->lockname.name, node);
2416 dlm_lockres_calc_usage(dlm, res);
2417 } else {
2418 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2419 "but it is already dropped!\n", dlm->name,
2420 res->lockname.len, res->lockname.name, node);
2421 __dlm_print_one_lock_resource(res);
2422 }
2423
2424 dlm_lockres_put(res);
2425}
2426
2100 2427
2101/* 2428/*
2102 * DLM_MIGRATE_LOCKRES 2429 * DLM_MIGRATE_LOCKRES
2103 */ 2430 */
2104 2431
2105 2432
2106int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 2433static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2107 u8 target) 2434 struct dlm_lock_resource *res,
2435 u8 target)
2108{ 2436{
2109 struct dlm_master_list_entry *mle = NULL; 2437 struct dlm_master_list_entry *mle = NULL;
2110 struct dlm_master_list_entry *oldmle = NULL; 2438 struct dlm_master_list_entry *oldmle = NULL;
@@ -2116,7 +2444,7 @@ int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2116 struct list_head *queue, *iter; 2444 struct list_head *queue, *iter;
2117 int i; 2445 int i;
2118 struct dlm_lock *lock; 2446 struct dlm_lock *lock;
2119 int empty = 1; 2447 int empty = 1, wake = 0;
2120 2448
2121 if (!dlm_grab(dlm)) 2449 if (!dlm_grab(dlm))
2122 return -EINVAL; 2450 return -EINVAL;
@@ -2241,6 +2569,7 @@ int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2241 res->lockname.name, target); 2569 res->lockname.name, target);
2242 spin_lock(&res->spinlock); 2570 spin_lock(&res->spinlock);
2243 res->state &= ~DLM_LOCK_RES_MIGRATING; 2571 res->state &= ~DLM_LOCK_RES_MIGRATING;
2572 wake = 1;
2244 spin_unlock(&res->spinlock); 2573 spin_unlock(&res->spinlock);
2245 ret = -EINVAL; 2574 ret = -EINVAL;
2246 } 2575 }
@@ -2268,6 +2597,9 @@ fail:
2268 * the lockres 2597 * the lockres
2269 */ 2598 */
2270 2599
2600 /* now that remote nodes are spinning on the MIGRATING flag,
2601 * ensure that all assert_master work is flushed. */
2602 flush_workqueue(dlm->dlm_worker);
2271 2603
2272 /* get an extra reference on the mle. 2604 /* get an extra reference on the mle.
2273 * otherwise the assert_master from the new 2605 * otherwise the assert_master from the new
@@ -2296,6 +2628,7 @@ fail:
2296 dlm_put_mle_inuse(mle); 2628 dlm_put_mle_inuse(mle);
2297 spin_lock(&res->spinlock); 2629 spin_lock(&res->spinlock);
2298 res->state &= ~DLM_LOCK_RES_MIGRATING; 2630 res->state &= ~DLM_LOCK_RES_MIGRATING;
2631 wake = 1;
2299 spin_unlock(&res->spinlock); 2632 spin_unlock(&res->spinlock);
2300 goto leave; 2633 goto leave;
2301 } 2634 }
@@ -2322,7 +2655,8 @@ fail:
2322 res->owner == target) 2655 res->owner == target)
2323 break; 2656 break;
2324 2657
2325 mlog(0, "timed out during migration\n"); 2658 mlog(0, "%s:%.*s: timed out during migration\n",
2659 dlm->name, res->lockname.len, res->lockname.name);
2326 /* avoid hang during shutdown when migrating lockres 2660 /* avoid hang during shutdown when migrating lockres
2327 * to a node which also goes down */ 2661 * to a node which also goes down */
2328 if (dlm_is_node_dead(dlm, target)) { 2662 if (dlm_is_node_dead(dlm, target)) {
@@ -2330,20 +2664,20 @@ fail:
2330 "target %u is no longer up, restarting\n", 2664 "target %u is no longer up, restarting\n",
2331 dlm->name, res->lockname.len, 2665 dlm->name, res->lockname.len,
2332 res->lockname.name, target); 2666 res->lockname.name, target);
2333 ret = -ERESTARTSYS; 2667 ret = -EINVAL;
2668 /* migration failed, detach and clean up mle */
2669 dlm_mle_detach_hb_events(dlm, mle);
2670 dlm_put_mle(mle);
2671 dlm_put_mle_inuse(mle);
2672 spin_lock(&res->spinlock);
2673 res->state &= ~DLM_LOCK_RES_MIGRATING;
2674 wake = 1;
2675 spin_unlock(&res->spinlock);
2676 goto leave;
2334 } 2677 }
2335 } 2678 } else
2336 if (ret == -ERESTARTSYS) { 2679 mlog(0, "%s:%.*s: caught signal during migration\n",
2337 /* migration failed, detach and clean up mle */ 2680 dlm->name, res->lockname.len, res->lockname.name);
2338 dlm_mle_detach_hb_events(dlm, mle);
2339 dlm_put_mle(mle);
2340 dlm_put_mle_inuse(mle);
2341 spin_lock(&res->spinlock);
2342 res->state &= ~DLM_LOCK_RES_MIGRATING;
2343 spin_unlock(&res->spinlock);
2344 goto leave;
2345 }
2346 /* TODO: if node died: stop, clean up, return error */
2347 } 2681 }
2348 2682
2349 /* all done, set the owner, clear the flag */ 2683 /* all done, set the owner, clear the flag */
@@ -2366,6 +2700,11 @@ leave:
2366 if (ret < 0) 2700 if (ret < 0)
2367 dlm_kick_thread(dlm, res); 2701 dlm_kick_thread(dlm, res);
2368 2702
2703 /* wake up waiters if the MIGRATING flag got set
2704 * but migration failed */
2705 if (wake)
2706 wake_up(&res->wq);
2707
2369 /* TODO: cleanup */ 2708 /* TODO: cleanup */
2370 if (mres) 2709 if (mres)
2371 free_page((unsigned long)mres); 2710 free_page((unsigned long)mres);
@@ -2376,6 +2715,53 @@ leave:
2376 return ret; 2715 return ret;
2377} 2716}
2378 2717
2718#define DLM_MIGRATION_RETRY_MS 100
2719
2720/* Should be called only after beginning the domain leave process.
2721 * There should not be any remaining locks on nonlocal lock resources,
2722 * and there should be no local locks left on locally mastered resources.
2723 *
2724 * Called with the dlm spinlock held, may drop it to do migration, but
2725 * will re-acquire before exit.
2726 *
2727 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2728int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2729{
2730 int ret;
2731 int lock_dropped = 0;
2732
2733 if (res->owner != dlm->node_num) {
2734 if (!__dlm_lockres_unused(res)) {
2735 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2736 "trying to free this but locks remain\n",
2737 dlm->name, res->lockname.len, res->lockname.name);
2738 }
2739 goto leave;
2740 }
2741
2742 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2743 spin_unlock(&dlm->spinlock);
2744 lock_dropped = 1;
2745 while (1) {
2746 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2747 if (ret >= 0)
2748 break;
2749 if (ret == -ENOTEMPTY) {
2750 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2751 res->lockname.len, res->lockname.name);
2752 BUG();
2753 }
2754
2755 mlog(0, "lockres %.*s: migrate failed, "
2756 "retrying\n", res->lockname.len,
2757 res->lockname.name);
2758 msleep(DLM_MIGRATION_RETRY_MS);
2759 }
2760 spin_lock(&dlm->spinlock);
2761leave:
2762 return lock_dropped;
2763}
2764
2379int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) 2765int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2380{ 2766{
2381 int ret; 2767 int ret;
@@ -2405,7 +2791,8 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2405 return can_proceed; 2791 return can_proceed;
2406} 2792}
2407 2793
2408int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 2794static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2795 struct dlm_lock_resource *res)
2409{ 2796{
2410 int ret; 2797 int ret;
2411 spin_lock(&res->spinlock); 2798 spin_lock(&res->spinlock);
@@ -2434,8 +2821,15 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2434 __dlm_lockres_reserve_ast(res); 2821 __dlm_lockres_reserve_ast(res);
2435 spin_unlock(&res->spinlock); 2822 spin_unlock(&res->spinlock);
2436 2823
2437 /* now flush all the pending asts.. hang out for a bit */ 2824 /* now flush all the pending asts */
2438 dlm_kick_thread(dlm, res); 2825 dlm_kick_thread(dlm, res);
2826 /* before waiting on DIRTY, block processes which may
2827 * try to dirty the lockres before MIGRATING is set */
2828 spin_lock(&res->spinlock);
2829 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2830 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2831 spin_unlock(&res->spinlock);
2832 /* now wait on any pending asts and the DIRTY state */
2439 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2833 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2440 dlm_lockres_release_ast(dlm, res); 2834 dlm_lockres_release_ast(dlm, res);
2441 2835
@@ -2461,6 +2855,13 @@ again:
2461 mlog(0, "trying again...\n"); 2855 mlog(0, "trying again...\n");
2462 goto again; 2856 goto again;
2463 } 2857 }
2858 /* now that we are sure the MIGRATING state is there, drop
2859 * the unneded state which blocked threads trying to DIRTY */
2860 spin_lock(&res->spinlock);
2861 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2862 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2863 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2864 spin_unlock(&res->spinlock);
2464 2865
2465 /* did the target go down or die? */ 2866 /* did the target go down or die? */
2466 spin_lock(&dlm->spinlock); 2867 spin_lock(&dlm->spinlock);
@@ -2490,7 +2891,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2490{ 2891{
2491 struct list_head *iter, *iter2; 2892 struct list_head *iter, *iter2;
2492 struct list_head *queue = &res->granted; 2893 struct list_head *queue = &res->granted;
2493 int i; 2894 int i, bit;
2494 struct dlm_lock *lock; 2895 struct dlm_lock *lock;
2495 2896
2496 assert_spin_locked(&res->spinlock); 2897 assert_spin_locked(&res->spinlock);
@@ -2508,12 +2909,28 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2508 BUG_ON(!list_empty(&lock->bast_list)); 2909 BUG_ON(!list_empty(&lock->bast_list));
2509 BUG_ON(lock->ast_pending); 2910 BUG_ON(lock->ast_pending);
2510 BUG_ON(lock->bast_pending); 2911 BUG_ON(lock->bast_pending);
2912 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2511 list_del_init(&lock->list); 2913 list_del_init(&lock->list);
2512 dlm_lock_put(lock); 2914 dlm_lock_put(lock);
2513 } 2915 }
2514 } 2916 }
2515 queue++; 2917 queue++;
2516 } 2918 }
2919 bit = 0;
2920 while (1) {
2921 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2922 if (bit >= O2NM_MAX_NODES)
2923 break;
2924 /* do not clear the local node reference, if there is a
2925 * process holding this, let it drop the ref itself */
2926 if (bit != dlm->node_num) {
2927 mlog(0, "%s:%.*s: node %u had a ref to this "
2928 "migrating lockres, clearing\n", dlm->name,
2929 res->lockname.len, res->lockname.name, bit);
2930 dlm_lockres_clear_refmap_bit(bit, res);
2931 }
2932 bit++;
2933 }
2517} 2934}
2518 2935
2519/* for now this is not too intelligent. we will 2936/* for now this is not too intelligent. we will
@@ -2601,6 +3018,16 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2601 mlog(0, "migrate request (node %u) returned %d!\n", 3018 mlog(0, "migrate request (node %u) returned %d!\n",
2602 nodenum, status); 3019 nodenum, status);
2603 ret = status; 3020 ret = status;
3021 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3022 /* during the migration request we short-circuited
3023 * the mastery of the lockres. make sure we have
3024 * a mastery ref for nodenum */
3025 mlog(0, "%s:%.*s: need ref for node %u\n",
3026 dlm->name, res->lockname.len, res->lockname.name,
3027 nodenum);
3028 spin_lock(&res->spinlock);
3029 dlm_lockres_set_refmap_bit(nodenum, res);
3030 spin_unlock(&res->spinlock);
2604 } 3031 }
2605 } 3032 }
2606 3033
@@ -2619,7 +3046,8 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2619 * we will have no mle in the list to start with. now we can add an mle for 3046 * we will have no mle in the list to start with. now we can add an mle for
2620 * the migration and this should be the only one found for those scanning the 3047 * the migration and this should be the only one found for those scanning the
2621 * list. */ 3048 * list. */
2622int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data) 3049int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3050 void **ret_data)
2623{ 3051{
2624 struct dlm_ctxt *dlm = data; 3052 struct dlm_ctxt *dlm = data;
2625 struct dlm_lock_resource *res = NULL; 3053 struct dlm_lock_resource *res = NULL;
@@ -2745,7 +3173,13 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
2745 /* remove it from the list so that only one 3173 /* remove it from the list so that only one
2746 * mle will be found */ 3174 * mle will be found */
2747 list_del_init(&tmp->list); 3175 list_del_init(&tmp->list);
2748 __dlm_mle_detach_hb_events(dlm, mle); 3176 /* this was obviously WRONG. mle is uninited here. should be tmp. */
3177 __dlm_mle_detach_hb_events(dlm, tmp);
3178 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3179 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3180 "telling master to get ref for cleared out mle "
3181 "during migration\n", dlm->name, namelen, name,
3182 master, new_master);
2749 } 3183 }
2750 spin_unlock(&tmp->spinlock); 3184 spin_unlock(&tmp->spinlock);
2751 } 3185 }
@@ -2753,6 +3187,8 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
2753 /* now add a migration mle to the tail of the list */ 3187 /* now add a migration mle to the tail of the list */
2754 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); 3188 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
2755 mle->new_master = new_master; 3189 mle->new_master = new_master;
3190 /* the new master will be sending an assert master for this.
3191 * at that point we will get the refmap reference */
2756 mle->master = master; 3192 mle->master = master;
2757 /* do this for consistency with other mle types */ 3193 /* do this for consistency with other mle types */
2758 set_bit(new_master, mle->maybe_map); 3194 set_bit(new_master, mle->maybe_map);
@@ -2902,6 +3338,13 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2902 clear_bit(dlm->node_num, iter.node_map); 3338 clear_bit(dlm->node_num, iter.node_map);
2903 spin_unlock(&dlm->spinlock); 3339 spin_unlock(&dlm->spinlock);
2904 3340
3341 /* ownership of the lockres is changing. account for the
3342 * mastery reference here since old_master will briefly have
3343 * a reference after the migration completes */
3344 spin_lock(&res->spinlock);
3345 dlm_lockres_set_refmap_bit(old_master, res);
3346 spin_unlock(&res->spinlock);
3347
2905 mlog(0, "now time to do a migrate request to other nodes\n"); 3348 mlog(0, "now time to do a migrate request to other nodes\n");
2906 ret = dlm_do_migrate_request(dlm, res, old_master, 3349 ret = dlm_do_migrate_request(dlm, res, old_master,
2907 dlm->node_num, &iter); 3350 dlm->node_num, &iter);
@@ -2914,8 +3357,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2914 res->lockname.len, res->lockname.name); 3357 res->lockname.len, res->lockname.name);
2915 /* this call now finishes out the nodemap 3358 /* this call now finishes out the nodemap
2916 * even if one or more nodes die */ 3359 * even if one or more nodes die */
2917 ret = dlm_do_assert_master(dlm, res->lockname.name, 3360 ret = dlm_do_assert_master(dlm, res, iter.node_map,
2918 res->lockname.len, iter.node_map,
2919 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3361 DLM_ASSERT_MASTER_FINISH_MIGRATION);
2920 if (ret < 0) { 3362 if (ret < 0) {
2921 /* no longer need to retry. all living nodes contacted. */ 3363 /* no longer need to retry. all living nodes contacted. */
@@ -2927,8 +3369,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2927 set_bit(old_master, iter.node_map); 3369 set_bit(old_master, iter.node_map);
2928 mlog(0, "doing assert master of %.*s back to %u\n", 3370 mlog(0, "doing assert master of %.*s back to %u\n",
2929 res->lockname.len, res->lockname.name, old_master); 3371 res->lockname.len, res->lockname.name, old_master);
2930 ret = dlm_do_assert_master(dlm, res->lockname.name, 3372 ret = dlm_do_assert_master(dlm, res, iter.node_map,
2931 res->lockname.len, iter.node_map,
2932 DLM_ASSERT_MASTER_FINISH_MIGRATION); 3373 DLM_ASSERT_MASTER_FINISH_MIGRATION);
2933 if (ret < 0) { 3374 if (ret < 0) {
2934 mlog(0, "assert master to original master failed " 3375 mlog(0, "assert master to original master failed "
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 367a11e9e2ed..6d4a83d50152 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -163,9 +163,6 @@ void dlm_dispatch_work(struct work_struct *work)
163 dlm_workfunc_t *workfunc; 163 dlm_workfunc_t *workfunc;
164 int tot=0; 164 int tot=0;
165 165
166 if (!dlm_joined(dlm))
167 return;
168
169 spin_lock(&dlm->work_lock); 166 spin_lock(&dlm->work_lock);
170 list_splice_init(&dlm->work_list, &tmp_list); 167 list_splice_init(&dlm->work_list, &tmp_list);
171 spin_unlock(&dlm->work_lock); 168 spin_unlock(&dlm->work_lock);
@@ -821,7 +818,8 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
821 818
822} 819}
823 820
824int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data) 821int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
822 void **ret_data)
825{ 823{
826 struct dlm_ctxt *dlm = data; 824 struct dlm_ctxt *dlm = data;
827 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; 825 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
@@ -978,7 +976,8 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
978} 976}
979 977
980 978
981int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data) 979int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
980 void **ret_data)
982{ 981{
983 struct dlm_ctxt *dlm = data; 982 struct dlm_ctxt *dlm = data;
984 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; 983 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
@@ -1129,6 +1128,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1129 if (total_locks == mres_total_locks) 1128 if (total_locks == mres_total_locks)
1130 mres->flags |= DLM_MRES_ALL_DONE; 1129 mres->flags |= DLM_MRES_ALL_DONE;
1131 1130
1131 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1132 dlm->name, res->lockname.len, res->lockname.name,
1133 orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery",
1134 send_to);
1135
1132 /* send it */ 1136 /* send it */
1133 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, 1137 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1134 sz, send_to, &status); 1138 sz, send_to, &status);
@@ -1213,6 +1217,34 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
1213 return 0; 1217 return 0;
1214} 1218}
1215 1219
1220static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1221 struct dlm_migratable_lockres *mres)
1222{
1223 struct dlm_lock dummy;
1224 memset(&dummy, 0, sizeof(dummy));
1225 dummy.ml.cookie = 0;
1226 dummy.ml.type = LKM_IVMODE;
1227 dummy.ml.convert_type = LKM_IVMODE;
1228 dummy.ml.highest_blocked = LKM_IVMODE;
1229 dummy.lksb = NULL;
1230 dummy.ml.node = dlm->node_num;
1231 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1232}
1233
1234static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1235 struct dlm_migratable_lock *ml,
1236 u8 *nodenum)
1237{
1238 if (unlikely(ml->cookie == 0 &&
1239 ml->type == LKM_IVMODE &&
1240 ml->convert_type == LKM_IVMODE &&
1241 ml->highest_blocked == LKM_IVMODE &&
1242 ml->list == DLM_BLOCKED_LIST)) {
1243 *nodenum = ml->node;
1244 return 1;
1245 }
1246 return 0;
1247}
1216 1248
1217int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 1249int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1218 struct dlm_migratable_lockres *mres, 1250 struct dlm_migratable_lockres *mres,
@@ -1260,6 +1292,14 @@ int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1260 goto error; 1292 goto error;
1261 } 1293 }
1262 } 1294 }
1295 if (total_locks == 0) {
1296 /* send a dummy lock to indicate a mastery reference only */
1297 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1298 dlm->name, res->lockname.len, res->lockname.name,
1299 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1300 "migration");
1301 dlm_add_dummy_lock(dlm, mres);
1302 }
1263 /* flush any remaining locks */ 1303 /* flush any remaining locks */
1264 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); 1304 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1265 if (ret < 0) 1305 if (ret < 0)
@@ -1293,7 +1333,8 @@ error:
1293 * do we spin? returning an error only delays the problem really 1333 * do we spin? returning an error only delays the problem really
1294 */ 1334 */
1295 1335
1296int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data) 1336int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1337 void **ret_data)
1297{ 1338{
1298 struct dlm_ctxt *dlm = data; 1339 struct dlm_ctxt *dlm = data;
1299 struct dlm_migratable_lockres *mres = 1340 struct dlm_migratable_lockres *mres =
@@ -1382,17 +1423,21 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1382 spin_lock(&res->spinlock); 1423 spin_lock(&res->spinlock);
1383 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1424 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1384 spin_unlock(&res->spinlock); 1425 spin_unlock(&res->spinlock);
1426 wake_up(&res->wq);
1385 1427
1386 /* add an extra ref for just-allocated lockres 1428 /* add an extra ref for just-allocated lockres
1387 * otherwise the lockres will be purged immediately */ 1429 * otherwise the lockres will be purged immediately */
1388 dlm_lockres_get(res); 1430 dlm_lockres_get(res);
1389
1390 } 1431 }
1391 1432
1392 /* at this point we have allocated everything we need, 1433 /* at this point we have allocated everything we need,
1393 * and we have a hashed lockres with an extra ref and 1434 * and we have a hashed lockres with an extra ref and
1394 * the proper res->state flags. */ 1435 * the proper res->state flags. */
1395 ret = 0; 1436 ret = 0;
1437 spin_lock(&res->spinlock);
1438 /* drop this either when master requery finds a different master
1439 * or when a lock is added by the recovery worker */
1440 dlm_lockres_grab_inflight_ref(dlm, res);
1396 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1441 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1397 /* migration cannot have an unknown master */ 1442 /* migration cannot have an unknown master */
1398 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); 1443 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
@@ -1400,10 +1445,11 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1400 "unknown owner.. will need to requery: " 1445 "unknown owner.. will need to requery: "
1401 "%.*s\n", mres->lockname_len, mres->lockname); 1446 "%.*s\n", mres->lockname_len, mres->lockname);
1402 } else { 1447 } else {
1403 spin_lock(&res->spinlock); 1448 /* take a reference now to pin the lockres, drop it
1449 * when locks are added in the worker */
1404 dlm_change_lockres_owner(dlm, res, dlm->node_num); 1450 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1405 spin_unlock(&res->spinlock);
1406 } 1451 }
1452 spin_unlock(&res->spinlock);
1407 1453
1408 /* queue up work for dlm_mig_lockres_worker */ 1454 /* queue up work for dlm_mig_lockres_worker */
1409 dlm_grab(dlm); /* get an extra ref for the work item */ 1455 dlm_grab(dlm); /* get an extra ref for the work item */
@@ -1459,6 +1505,9 @@ again:
1459 "this node will take it.\n", 1505 "this node will take it.\n",
1460 res->lockname.len, res->lockname.name); 1506 res->lockname.len, res->lockname.name);
1461 } else { 1507 } else {
1508 spin_lock(&res->spinlock);
1509 dlm_lockres_drop_inflight_ref(dlm, res);
1510 spin_unlock(&res->spinlock);
1462 mlog(0, "master needs to respond to sender " 1511 mlog(0, "master needs to respond to sender "
1463 "that node %u still owns %.*s\n", 1512 "that node %u still owns %.*s\n",
1464 real_master, res->lockname.len, 1513 real_master, res->lockname.len,
@@ -1578,7 +1627,8 @@ int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1578/* this function cannot error, so unless the sending 1627/* this function cannot error, so unless the sending
1579 * or receiving of the message failed, the owner can 1628 * or receiving of the message failed, the owner can
1580 * be trusted */ 1629 * be trusted */
1581int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data) 1630int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1631 void **ret_data)
1582{ 1632{
1583 struct dlm_ctxt *dlm = data; 1633 struct dlm_ctxt *dlm = data;
1584 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; 1634 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
@@ -1660,21 +1710,38 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1660{ 1710{
1661 struct dlm_migratable_lock *ml; 1711 struct dlm_migratable_lock *ml;
1662 struct list_head *queue; 1712 struct list_head *queue;
1713 struct list_head *tmpq = NULL;
1663 struct dlm_lock *newlock = NULL; 1714 struct dlm_lock *newlock = NULL;
1664 struct dlm_lockstatus *lksb = NULL; 1715 struct dlm_lockstatus *lksb = NULL;
1665 int ret = 0; 1716 int ret = 0;
1666 int i, bad; 1717 int i, j, bad;
1667 struct list_head *iter; 1718 struct list_head *iter;
1668 struct dlm_lock *lock = NULL; 1719 struct dlm_lock *lock = NULL;
1720 u8 from = O2NM_MAX_NODES;
1721 unsigned int added = 0;
1669 1722
1670 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1723 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1671 for (i=0; i<mres->num_locks; i++) { 1724 for (i=0; i<mres->num_locks; i++) {
1672 ml = &(mres->ml[i]); 1725 ml = &(mres->ml[i]);
1726
1727 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1728 /* placeholder, just need to set the refmap bit */
1729 BUG_ON(mres->num_locks != 1);
1730 mlog(0, "%s:%.*s: dummy lock for %u\n",
1731 dlm->name, mres->lockname_len, mres->lockname,
1732 from);
1733 spin_lock(&res->spinlock);
1734 dlm_lockres_set_refmap_bit(from, res);
1735 spin_unlock(&res->spinlock);
1736 added++;
1737 break;
1738 }
1673 BUG_ON(ml->highest_blocked != LKM_IVMODE); 1739 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1674 newlock = NULL; 1740 newlock = NULL;
1675 lksb = NULL; 1741 lksb = NULL;
1676 1742
1677 queue = dlm_list_num_to_pointer(res, ml->list); 1743 queue = dlm_list_num_to_pointer(res, ml->list);
1744 tmpq = NULL;
1678 1745
1679 /* if the lock is for the local node it needs to 1746 /* if the lock is for the local node it needs to
1680 * be moved to the proper location within the queue. 1747 * be moved to the proper location within the queue.
@@ -1684,11 +1751,16 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1684 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); 1751 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1685 1752
1686 spin_lock(&res->spinlock); 1753 spin_lock(&res->spinlock);
1687 list_for_each(iter, queue) { 1754 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1688 lock = list_entry (iter, struct dlm_lock, list); 1755 tmpq = dlm_list_idx_to_ptr(res, j);
1689 if (lock->ml.cookie != ml->cookie) 1756 list_for_each(iter, tmpq) {
1690 lock = NULL; 1757 lock = list_entry (iter, struct dlm_lock, list);
1691 else 1758 if (lock->ml.cookie != ml->cookie)
1759 lock = NULL;
1760 else
1761 break;
1762 }
1763 if (lock)
1692 break; 1764 break;
1693 } 1765 }
1694 1766
@@ -1698,12 +1770,20 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1698 u64 c = ml->cookie; 1770 u64 c = ml->cookie;
1699 mlog(ML_ERROR, "could not find local lock " 1771 mlog(ML_ERROR, "could not find local lock "
1700 "with cookie %u:%llu!\n", 1772 "with cookie %u:%llu!\n",
1701 dlm_get_lock_cookie_node(c), 1773 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1702 dlm_get_lock_cookie_seq(c)); 1774 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1775 __dlm_print_one_lock_resource(res);
1703 BUG(); 1776 BUG();
1704 } 1777 }
1705 BUG_ON(lock->ml.node != ml->node); 1778 BUG_ON(lock->ml.node != ml->node);
1706 1779
1780 if (tmpq != queue) {
1781 mlog(0, "lock was on %u instead of %u for %.*s\n",
1782 j, ml->list, res->lockname.len, res->lockname.name);
1783 spin_unlock(&res->spinlock);
1784 continue;
1785 }
1786
1707 /* see NOTE above about why we do not update 1787 /* see NOTE above about why we do not update
1708 * to match the master here */ 1788 * to match the master here */
1709 1789
@@ -1711,6 +1791,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1711 /* do not alter lock refcount. switching lists. */ 1791 /* do not alter lock refcount. switching lists. */
1712 list_move_tail(&lock->list, queue); 1792 list_move_tail(&lock->list, queue);
1713 spin_unlock(&res->spinlock); 1793 spin_unlock(&res->spinlock);
1794 added++;
1714 1795
1715 mlog(0, "just reordered a local lock!\n"); 1796 mlog(0, "just reordered a local lock!\n");
1716 continue; 1797 continue;
@@ -1799,14 +1880,14 @@ skip_lvb:
1799 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1880 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1800 "exists on this lockres!\n", dlm->name, 1881 "exists on this lockres!\n", dlm->name,
1801 res->lockname.len, res->lockname.name, 1882 res->lockname.len, res->lockname.name,
1802 dlm_get_lock_cookie_node(c), 1883 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1803 dlm_get_lock_cookie_seq(c)); 1884 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1804 1885
1805 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " 1886 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1806 "node=%u, cookie=%u:%llu, queue=%d\n", 1887 "node=%u, cookie=%u:%llu, queue=%d\n",
1807 ml->type, ml->convert_type, ml->node, 1888 ml->type, ml->convert_type, ml->node,
1808 dlm_get_lock_cookie_node(ml->cookie), 1889 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1809 dlm_get_lock_cookie_seq(ml->cookie), 1890 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1810 ml->list); 1891 ml->list);
1811 1892
1812 __dlm_print_one_lock_resource(res); 1893 __dlm_print_one_lock_resource(res);
@@ -1817,12 +1898,22 @@ skip_lvb:
1817 if (!bad) { 1898 if (!bad) {
1818 dlm_lock_get(newlock); 1899 dlm_lock_get(newlock);
1819 list_add_tail(&newlock->list, queue); 1900 list_add_tail(&newlock->list, queue);
1901 mlog(0, "%s:%.*s: added lock for node %u, "
1902 "setting refmap bit\n", dlm->name,
1903 res->lockname.len, res->lockname.name, ml->node);
1904 dlm_lockres_set_refmap_bit(ml->node, res);
1905 added++;
1820 } 1906 }
1821 spin_unlock(&res->spinlock); 1907 spin_unlock(&res->spinlock);
1822 } 1908 }
1823 mlog(0, "done running all the locks\n"); 1909 mlog(0, "done running all the locks\n");
1824 1910
1825leave: 1911leave:
1912 /* balance the ref taken when the work was queued */
1913 spin_lock(&res->spinlock);
1914 dlm_lockres_drop_inflight_ref(dlm, res);
1915 spin_unlock(&res->spinlock);
1916
1826 if (ret < 0) { 1917 if (ret < 0) {
1827 mlog_errno(ret); 1918 mlog_errno(ret);
1828 if (newlock) 1919 if (newlock)
@@ -1935,9 +2026,11 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1935 if (res->owner == dead_node) { 2026 if (res->owner == dead_node) {
1936 list_del_init(&res->recovering); 2027 list_del_init(&res->recovering);
1937 spin_lock(&res->spinlock); 2028 spin_lock(&res->spinlock);
2029 /* new_master has our reference from
2030 * the lock state sent during recovery */
1938 dlm_change_lockres_owner(dlm, res, new_master); 2031 dlm_change_lockres_owner(dlm, res, new_master);
1939 res->state &= ~DLM_LOCK_RES_RECOVERING; 2032 res->state &= ~DLM_LOCK_RES_RECOVERING;
1940 if (!__dlm_lockres_unused(res)) 2033 if (__dlm_lockres_has_locks(res))
1941 __dlm_dirty_lockres(dlm, res); 2034 __dlm_dirty_lockres(dlm, res);
1942 spin_unlock(&res->spinlock); 2035 spin_unlock(&res->spinlock);
1943 wake_up(&res->wq); 2036 wake_up(&res->wq);
@@ -1977,9 +2070,11 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1977 dlm_lockres_put(res); 2070 dlm_lockres_put(res);
1978 } 2071 }
1979 spin_lock(&res->spinlock); 2072 spin_lock(&res->spinlock);
2073 /* new_master has our reference from
2074 * the lock state sent during recovery */
1980 dlm_change_lockres_owner(dlm, res, new_master); 2075 dlm_change_lockres_owner(dlm, res, new_master);
1981 res->state &= ~DLM_LOCK_RES_RECOVERING; 2076 res->state &= ~DLM_LOCK_RES_RECOVERING;
1982 if (!__dlm_lockres_unused(res)) 2077 if (__dlm_lockres_has_locks(res))
1983 __dlm_dirty_lockres(dlm, res); 2078 __dlm_dirty_lockres(dlm, res);
1984 spin_unlock(&res->spinlock); 2079 spin_unlock(&res->spinlock);
1985 wake_up(&res->wq); 2080 wake_up(&res->wq);
@@ -2048,6 +2143,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2048{ 2143{
2049 struct list_head *iter, *tmpiter; 2144 struct list_head *iter, *tmpiter;
2050 struct dlm_lock *lock; 2145 struct dlm_lock *lock;
2146 unsigned int freed = 0;
2051 2147
2052 /* this node is the lockres master: 2148 /* this node is the lockres master:
2053 * 1) remove any stale locks for the dead node 2149 * 1) remove any stale locks for the dead node
@@ -2062,6 +2158,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2062 if (lock->ml.node == dead_node) { 2158 if (lock->ml.node == dead_node) {
2063 list_del_init(&lock->list); 2159 list_del_init(&lock->list);
2064 dlm_lock_put(lock); 2160 dlm_lock_put(lock);
2161 freed++;
2065 } 2162 }
2066 } 2163 }
2067 list_for_each_safe(iter, tmpiter, &res->converting) { 2164 list_for_each_safe(iter, tmpiter, &res->converting) {
@@ -2069,6 +2166,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2069 if (lock->ml.node == dead_node) { 2166 if (lock->ml.node == dead_node) {
2070 list_del_init(&lock->list); 2167 list_del_init(&lock->list);
2071 dlm_lock_put(lock); 2168 dlm_lock_put(lock);
2169 freed++;
2072 } 2170 }
2073 } 2171 }
2074 list_for_each_safe(iter, tmpiter, &res->blocked) { 2172 list_for_each_safe(iter, tmpiter, &res->blocked) {
@@ -2076,9 +2174,23 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2076 if (lock->ml.node == dead_node) { 2174 if (lock->ml.node == dead_node) {
2077 list_del_init(&lock->list); 2175 list_del_init(&lock->list);
2078 dlm_lock_put(lock); 2176 dlm_lock_put(lock);
2177 freed++;
2079 } 2178 }
2080 } 2179 }
2081 2180
2181 if (freed) {
2182 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2183 "dropping ref from lockres\n", dlm->name,
2184 res->lockname.len, res->lockname.name, freed, dead_node);
2185 BUG_ON(!test_bit(dead_node, res->refmap));
2186 dlm_lockres_clear_refmap_bit(dead_node, res);
2187 } else if (test_bit(dead_node, res->refmap)) {
2188 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2189 "no locks and had not purged before dying\n", dlm->name,
2190 res->lockname.len, res->lockname.name, dead_node);
2191 dlm_lockres_clear_refmap_bit(dead_node, res);
2192 }
2193
2082 /* do not kick thread yet */ 2194 /* do not kick thread yet */
2083 __dlm_dirty_lockres(dlm, res); 2195 __dlm_dirty_lockres(dlm, res);
2084} 2196}
@@ -2141,9 +2253,21 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2141 spin_lock(&res->spinlock); 2253 spin_lock(&res->spinlock);
2142 /* zero the lvb if necessary */ 2254 /* zero the lvb if necessary */
2143 dlm_revalidate_lvb(dlm, res, dead_node); 2255 dlm_revalidate_lvb(dlm, res, dead_node);
2144 if (res->owner == dead_node) 2256 if (res->owner == dead_node) {
2257 if (res->state & DLM_LOCK_RES_DROPPING_REF)
2258 mlog(0, "%s:%.*s: owned by "
2259 "dead node %u, this node was "
2260 "dropping its ref when it died. "
2261 "continue, dropping the flag.\n",
2262 dlm->name, res->lockname.len,
2263 res->lockname.name, dead_node);
2264
2265 /* the wake_up for this will happen when the
2266 * RECOVERING flag is dropped later */
2267 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
2268
2145 dlm_move_lockres_to_recovery_list(dlm, res); 2269 dlm_move_lockres_to_recovery_list(dlm, res);
2146 else if (res->owner == dlm->node_num) { 2270 } else if (res->owner == dlm->node_num) {
2147 dlm_free_dead_locks(dlm, res, dead_node); 2271 dlm_free_dead_locks(dlm, res, dead_node);
2148 __dlm_lockres_calc_usage(dlm, res); 2272 __dlm_lockres_calc_usage(dlm, res);
2149 } 2273 }
@@ -2480,7 +2604,8 @@ retry:
2480 return ret; 2604 return ret;
2481} 2605}
2482 2606
2483int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data) 2607int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2608 void **ret_data)
2484{ 2609{
2485 struct dlm_ctxt *dlm = data; 2610 struct dlm_ctxt *dlm = data;
2486 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; 2611 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
@@ -2608,7 +2733,8 @@ stage2:
2608 return ret; 2733 return ret;
2609} 2734}
2610 2735
2611int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data) 2736int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2737 void **ret_data)
2612{ 2738{
2613 struct dlm_ctxt *dlm = data; 2739 struct dlm_ctxt *dlm = data;
2614 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; 2740 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 0c822f3ffb05..8ffa0916eb86 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -54,9 +54,6 @@
54#include "cluster/masklog.h" 54#include "cluster/masklog.h"
55 55
56static int dlm_thread(void *data); 56static int dlm_thread(void *data);
57static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
58 struct dlm_lock_resource *lockres);
59
60static void dlm_flush_asts(struct dlm_ctxt *dlm); 57static void dlm_flush_asts(struct dlm_ctxt *dlm);
61 58
62#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num) 59#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
@@ -82,14 +79,33 @@ repeat:
82 current->state = TASK_RUNNING; 79 current->state = TASK_RUNNING;
83} 80}
84 81
85 82int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
86int __dlm_lockres_unused(struct dlm_lock_resource *res)
87{ 83{
88 if (list_empty(&res->granted) && 84 if (list_empty(&res->granted) &&
89 list_empty(&res->converting) && 85 list_empty(&res->converting) &&
90 list_empty(&res->blocked) && 86 list_empty(&res->blocked))
91 list_empty(&res->dirty)) 87 return 0;
92 return 1; 88 return 1;
89}
90
91/* "unused": the lockres has no locks, is not on the dirty list,
92 * has no inflight locks (in the gap between mastery and acquiring
93 * the first lock), and has no bits in its refmap.
94 * truly ready to be freed. */
95int __dlm_lockres_unused(struct dlm_lock_resource *res)
96{
97 if (!__dlm_lockres_has_locks(res) &&
98 (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
99 /* try not to scan the bitmap unless the first two
100 * conditions are already true */
101 int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
102 if (bit >= O2NM_MAX_NODES) {
103 /* since the bit for dlm->node_num is not
104 * set, inflight_locks better be zero */
105 BUG_ON(res->inflight_locks != 0);
106 return 1;
107 }
108 }
93 return 0; 109 return 0;
94} 110}
95 111
@@ -106,46 +122,21 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
106 assert_spin_locked(&res->spinlock); 122 assert_spin_locked(&res->spinlock);
107 123
108 if (__dlm_lockres_unused(res)){ 124 if (__dlm_lockres_unused(res)){
109 /* For now, just keep any resource we master */
110 if (res->owner == dlm->node_num)
111 {
112 if (!list_empty(&res->purge)) {
113 mlog(0, "we master %s:%.*s, but it is on "
114 "the purge list. Removing\n",
115 dlm->name, res->lockname.len,
116 res->lockname.name);
117 list_del_init(&res->purge);
118 dlm->purge_count--;
119 }
120 return;
121 }
122
123 if (list_empty(&res->purge)) { 125 if (list_empty(&res->purge)) {
124 mlog(0, "putting lockres %.*s from purge list\n", 126 mlog(0, "putting lockres %.*s:%p onto purge list\n",
125 res->lockname.len, res->lockname.name); 127 res->lockname.len, res->lockname.name, res);
126 128
127 res->last_used = jiffies; 129 res->last_used = jiffies;
130 dlm_lockres_get(res);
128 list_add_tail(&res->purge, &dlm->purge_list); 131 list_add_tail(&res->purge, &dlm->purge_list);
129 dlm->purge_count++; 132 dlm->purge_count++;
130
131 /* if this node is not the owner, there is
132 * no way to keep track of who the owner could be.
133 * unhash it to avoid serious problems. */
134 if (res->owner != dlm->node_num) {
135 mlog(0, "%s:%.*s: doing immediate "
136 "purge of lockres owned by %u\n",
137 dlm->name, res->lockname.len,
138 res->lockname.name, res->owner);
139
140 dlm_purge_lockres_now(dlm, res);
141 }
142 } 133 }
143 } else if (!list_empty(&res->purge)) { 134 } else if (!list_empty(&res->purge)) {
144 mlog(0, "removing lockres %.*s from purge list, " 135 mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
145 "owner=%u\n", res->lockname.len, res->lockname.name, 136 res->lockname.len, res->lockname.name, res, res->owner);
146 res->owner);
147 137
148 list_del_init(&res->purge); 138 list_del_init(&res->purge);
139 dlm_lockres_put(res);
149 dlm->purge_count--; 140 dlm->purge_count--;
150 } 141 }
151} 142}
@@ -163,68 +154,65 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
163 spin_unlock(&dlm->spinlock); 154 spin_unlock(&dlm->spinlock);
164} 155}
165 156
166/* TODO: Eventual API: Called with the dlm spinlock held, may drop it 157static int dlm_purge_lockres(struct dlm_ctxt *dlm,
167 * to do migration, but will re-acquire before exit. */ 158 struct dlm_lock_resource *res)
168void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres)
169{ 159{
170 int master; 160 int master;
171 int ret; 161 int ret = 0;
172
173 spin_lock(&lockres->spinlock);
174 master = lockres->owner == dlm->node_num;
175 spin_unlock(&lockres->spinlock);
176 162
177 mlog(0, "purging lockres %.*s, master = %d\n", lockres->lockname.len, 163 spin_lock(&res->spinlock);
178 lockres->lockname.name, master); 164 if (!__dlm_lockres_unused(res)) {
179 165 spin_unlock(&res->spinlock);
180 /* Non master is the easy case -- no migration required, just 166 mlog(0, "%s:%.*s: tried to purge but not unused\n",
181 * quit. */ 167 dlm->name, res->lockname.len, res->lockname.name);
168 return -ENOTEMPTY;
169 }
170 master = (res->owner == dlm->node_num);
182 if (!master) 171 if (!master)
183 goto finish; 172 res->state |= DLM_LOCK_RES_DROPPING_REF;
184 173 spin_unlock(&res->spinlock);
185 /* Wheee! Migrate lockres here! */
186 spin_unlock(&dlm->spinlock);
187again:
188 174
189 ret = dlm_migrate_lockres(dlm, lockres, O2NM_MAX_NODES); 175 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
190 if (ret == -ENOTEMPTY) { 176 res->lockname.name, master);
191 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
192 lockres->lockname.len, lockres->lockname.name);
193 177
194 BUG(); 178 if (!master) {
195 } else if (ret < 0) { 179 spin_lock(&res->spinlock);
196 mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", 180 /* This ensures that clear refmap is sent after the set */
197 lockres->lockname.len, lockres->lockname.name); 181 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
198 msleep(100); 182 spin_unlock(&res->spinlock);
199 goto again; 183 /* drop spinlock to do messaging, retake below */
184 spin_unlock(&dlm->spinlock);
185 /* clear our bit from the master's refmap, ignore errors */
186 ret = dlm_drop_lockres_ref(dlm, res);
187 if (ret < 0) {
188 mlog_errno(ret);
189 if (!dlm_is_host_down(ret))
190 BUG();
191 }
192 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
193 dlm->name, res->lockname.len, res->lockname.name, ret);
194 spin_lock(&dlm->spinlock);
200 } 195 }
201 196
202 spin_lock(&dlm->spinlock); 197 if (!list_empty(&res->purge)) {
203 198 mlog(0, "removing lockres %.*s:%p from purgelist, "
204finish: 199 "master = %d\n", res->lockname.len, res->lockname.name,
205 if (!list_empty(&lockres->purge)) { 200 res, master);
206 list_del_init(&lockres->purge); 201 list_del_init(&res->purge);
202 dlm_lockres_put(res);
207 dlm->purge_count--; 203 dlm->purge_count--;
208 } 204 }
209 __dlm_unhash_lockres(lockres); 205 __dlm_unhash_lockres(res);
210}
211
212/* make an unused lockres go away immediately.
213 * as soon as the dlm spinlock is dropped, this lockres
214 * will not be found. kfree still happens on last put. */
215static void dlm_purge_lockres_now(struct dlm_ctxt *dlm,
216 struct dlm_lock_resource *lockres)
217{
218 assert_spin_locked(&dlm->spinlock);
219 assert_spin_locked(&lockres->spinlock);
220 206
221 BUG_ON(!__dlm_lockres_unused(lockres)); 207 /* lockres is not in the hash now. drop the flag and wake up
222 208 * any processes waiting in dlm_get_lock_resource. */
223 if (!list_empty(&lockres->purge)) { 209 if (!master) {
224 list_del_init(&lockres->purge); 210 spin_lock(&res->spinlock);
225 dlm->purge_count--; 211 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
212 spin_unlock(&res->spinlock);
213 wake_up(&res->wq);
226 } 214 }
227 __dlm_unhash_lockres(lockres); 215 return 0;
228} 216}
229 217
230static void dlm_run_purge_list(struct dlm_ctxt *dlm, 218static void dlm_run_purge_list(struct dlm_ctxt *dlm,
@@ -268,13 +256,17 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
268 break; 256 break;
269 } 257 }
270 258
259 mlog(0, "removing lockres %.*s:%p from purgelist\n",
260 lockres->lockname.len, lockres->lockname.name, lockres);
271 list_del_init(&lockres->purge); 261 list_del_init(&lockres->purge);
262 dlm_lockres_put(lockres);
272 dlm->purge_count--; 263 dlm->purge_count--;
273 264
274 /* This may drop and reacquire the dlm spinlock if it 265 /* This may drop and reacquire the dlm spinlock if it
275 * has to do migration. */ 266 * has to do migration. */
276 mlog(0, "calling dlm_purge_lockres!\n"); 267 mlog(0, "calling dlm_purge_lockres!\n");
277 dlm_purge_lockres(dlm, lockres); 268 if (dlm_purge_lockres(dlm, lockres))
269 BUG();
278 mlog(0, "DONE calling dlm_purge_lockres!\n"); 270 mlog(0, "DONE calling dlm_purge_lockres!\n");
279 271
280 /* Avoid adding any scheduling latencies */ 272 /* Avoid adding any scheduling latencies */
@@ -467,12 +459,17 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
467 assert_spin_locked(&res->spinlock); 459 assert_spin_locked(&res->spinlock);
468 460
469 /* don't shuffle secondary queues */ 461 /* don't shuffle secondary queues */
470 if ((res->owner == dlm->node_num) && 462 if ((res->owner == dlm->node_num)) {
471 !(res->state & DLM_LOCK_RES_DIRTY)) { 463 if (res->state & (DLM_LOCK_RES_MIGRATING |
472 /* ref for dirty_list */ 464 DLM_LOCK_RES_BLOCK_DIRTY))
473 dlm_lockres_get(res); 465 return;
474 list_add_tail(&res->dirty, &dlm->dirty_list); 466
475 res->state |= DLM_LOCK_RES_DIRTY; 467 if (list_empty(&res->dirty)) {
468 /* ref for dirty_list */
469 dlm_lockres_get(res);
470 list_add_tail(&res->dirty, &dlm->dirty_list);
471 res->state |= DLM_LOCK_RES_DIRTY;
472 }
476 } 473 }
477} 474}
478 475
@@ -651,7 +648,7 @@ static int dlm_thread(void *data)
651 dlm_lockres_get(res); 648 dlm_lockres_get(res);
652 649
653 spin_lock(&res->spinlock); 650 spin_lock(&res->spinlock);
654 res->state &= ~DLM_LOCK_RES_DIRTY; 651 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
655 list_del_init(&res->dirty); 652 list_del_init(&res->dirty);
656 spin_unlock(&res->spinlock); 653 spin_unlock(&res->spinlock);
657 spin_unlock(&dlm->spinlock); 654 spin_unlock(&dlm->spinlock);
@@ -675,10 +672,11 @@ static int dlm_thread(void *data)
675 /* it is now ok to move lockreses in these states 672 /* it is now ok to move lockreses in these states
676 * to the dirty list, assuming that they will only be 673 * to the dirty list, assuming that they will only be
677 * dirty for a short while. */ 674 * dirty for a short while. */
675 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
678 if (res->state & (DLM_LOCK_RES_IN_PROGRESS | 676 if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
679 DLM_LOCK_RES_MIGRATING |
680 DLM_LOCK_RES_RECOVERING)) { 677 DLM_LOCK_RES_RECOVERING)) {
681 /* move it to the tail and keep going */ 678 /* move it to the tail and keep going */
679 res->state &= ~DLM_LOCK_RES_DIRTY;
682 spin_unlock(&res->spinlock); 680 spin_unlock(&res->spinlock);
683 mlog(0, "delaying list shuffling for in-" 681 mlog(0, "delaying list shuffling for in-"
684 "progress lockres %.*s, state=%d\n", 682 "progress lockres %.*s, state=%d\n",
@@ -699,6 +697,7 @@ static int dlm_thread(void *data)
699 697
700 /* called while holding lockres lock */ 698 /* called while holding lockres lock */
701 dlm_shuffle_lists(dlm, res); 699 dlm_shuffle_lists(dlm, res);
700 res->state &= ~DLM_LOCK_RES_DIRTY;
702 spin_unlock(&res->spinlock); 701 spin_unlock(&res->spinlock);
703 702
704 dlm_lockres_calc_usage(dlm, res); 703 dlm_lockres_calc_usage(dlm, res);
@@ -709,11 +708,8 @@ in_progress:
709 /* if the lock was in-progress, stick 708 /* if the lock was in-progress, stick
710 * it on the back of the list */ 709 * it on the back of the list */
711 if (delay) { 710 if (delay) {
712 /* ref for dirty_list */
713 dlm_lockres_get(res);
714 spin_lock(&res->spinlock); 711 spin_lock(&res->spinlock);
715 list_add_tail(&res->dirty, &dlm->dirty_list); 712 __dlm_dirty_lockres(dlm, res);
716 res->state |= DLM_LOCK_RES_DIRTY;
717 spin_unlock(&res->spinlock); 713 spin_unlock(&res->spinlock);
718 } 714 }
719 dlm_lockres_put(res); 715 dlm_lockres_put(res);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 37be4b2e0d4a..86ca085ef324 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -147,6 +147,10 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
147 goto leave; 147 goto leave;
148 } 148 }
149 149
150 if (res->state & DLM_LOCK_RES_MIGRATING) {
151 status = DLM_MIGRATING;
152 goto leave;
153 }
150 154
151 /* see above for what the spec says about 155 /* see above for what the spec says about
152 * LKM_CANCEL and the lock queue state */ 156 * LKM_CANCEL and the lock queue state */
@@ -244,8 +248,8 @@ leave:
244 /* this should always be coupled with list removal */ 248 /* this should always be coupled with list removal */
245 BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK)); 249 BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
246 mlog(0, "lock %u:%llu should be gone now! refs=%d\n", 250 mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
247 dlm_get_lock_cookie_node(lock->ml.cookie), 251 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
248 dlm_get_lock_cookie_seq(lock->ml.cookie), 252 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
249 atomic_read(&lock->lock_refs.refcount)-1); 253 atomic_read(&lock->lock_refs.refcount)-1);
250 dlm_lock_put(lock); 254 dlm_lock_put(lock);
251 } 255 }
@@ -379,7 +383,8 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
379 * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, 383 * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
380 * return value from dlmunlock_master 384 * return value from dlmunlock_master
381 */ 385 */
382int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) 386int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
387 void **ret_data)
383{ 388{
384 struct dlm_ctxt *dlm = data; 389 struct dlm_ctxt *dlm = data;
385 struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; 390 struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
@@ -502,8 +507,8 @@ not_found:
502 if (!found) 507 if (!found)
503 mlog(ML_ERROR, "failed to find lock to unlock! " 508 mlog(ML_ERROR, "failed to find lock to unlock! "
504 "cookie=%u:%llu\n", 509 "cookie=%u:%llu\n",
505 dlm_get_lock_cookie_node(unlock->cookie), 510 dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
506 dlm_get_lock_cookie_seq(unlock->cookie)); 511 dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
507 else 512 else
508 dlm_lock_put(lock); 513 dlm_lock_put(lock);
509 514
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index e1216364d191..d026b4f27757 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -306,8 +306,8 @@ int ocfs2_journal_dirty_data(handle_t *handle,
306 * for the dinode, one for the new block. */ 306 * for the dinode, one for the new block. */
307#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) 307#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
308 308
309/* file update (nlink, etc) + dir entry block */ 309/* file update (nlink, etc) + directory mtime/ctime + dir entry block */
310#define OCFS2_LINK_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) 310#define OCFS2_LINK_CREDITS (2*OCFS2_INODE_UPDATE_CREDITS + 1)
311 311
312/* inode + dir inode (if we unlink a dir), + dir entry block + orphan 312/* inode + dir inode (if we unlink a dir), + dir entry block + orphan
313 * dir inode link */ 313 * dir inode link */
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 0afd8b9af70f..f30e63b9910c 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -887,7 +887,7 @@ static inline int ocfs2_translate_response(int response)
887 887
888static int ocfs2_handle_response_message(struct o2net_msg *msg, 888static int ocfs2_handle_response_message(struct o2net_msg *msg,
889 u32 len, 889 u32 len,
890 void *data) 890 void *data, void **ret_data)
891{ 891{
892 unsigned int response_id, node_num; 892 unsigned int response_id, node_num;
893 int response_status; 893 int response_status;
@@ -943,7 +943,7 @@ bail:
943 943
944static int ocfs2_handle_vote_message(struct o2net_msg *msg, 944static int ocfs2_handle_vote_message(struct o2net_msg *msg,
945 u32 len, 945 u32 len,
946 void *data) 946 void *data, void **ret_data)
947{ 947{
948 int status; 948 int status;
949 struct ocfs2_super *osb = data; 949 struct ocfs2_super *osb = data;
@@ -1007,7 +1007,7 @@ int ocfs2_register_net_handlers(struct ocfs2_super *osb)
1007 osb->net_key, 1007 osb->net_key,
1008 sizeof(struct ocfs2_response_msg), 1008 sizeof(struct ocfs2_response_msg),
1009 ocfs2_handle_response_message, 1009 ocfs2_handle_response_message,
1010 osb, &osb->osb_net_handlers); 1010 osb, NULL, &osb->osb_net_handlers);
1011 if (status) { 1011 if (status) {
1012 mlog_errno(status); 1012 mlog_errno(status);
1013 goto bail; 1013 goto bail;
@@ -1017,7 +1017,7 @@ int ocfs2_register_net_handlers(struct ocfs2_super *osb)
1017 osb->net_key, 1017 osb->net_key,
1018 sizeof(struct ocfs2_vote_msg), 1018 sizeof(struct ocfs2_vote_msg),
1019 ocfs2_handle_vote_message, 1019 ocfs2_handle_vote_message,
1020 osb, &osb->osb_net_handlers); 1020 osb, NULL, &osb->osb_net_handlers);
1021 if (status) { 1021 if (status) {
1022 mlog_errno(status); 1022 mlog_errno(status);
1023 goto bail; 1023 goto bail;
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index e8f540d38d48..d3b9f5f07db1 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17 17
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/semaphore.h>
19 20
20#include "sysfs.h" 21#include "sysfs.h"
21 22
@@ -146,7 +147,7 @@ static int open(struct inode * inode, struct file * file)
146 Error: 147 Error:
147 module_put(attr->attr.owner); 148 module_put(attr->attr.owner);
148 Done: 149 Done:
149 if (error && kobj) 150 if (error)
150 kobject_put(kobj); 151 kobject_put(kobj);
151 return error; 152 return error;
152} 153}
@@ -157,8 +158,7 @@ static int release(struct inode * inode, struct file * file)
157 struct bin_attribute * attr = to_bin_attr(file->f_path.dentry); 158 struct bin_attribute * attr = to_bin_attr(file->f_path.dentry);
158 u8 * buffer = file->private_data; 159 u8 * buffer = file->private_data;
159 160
160 if (kobj) 161 kobject_put(kobj);
161 kobject_put(kobj);
162 module_put(attr->attr.owner); 162 module_put(attr->attr.owner);
163 kfree(buffer); 163 kfree(buffer);
164 return 0; 164 return 0;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 511edef8b321..9dcdf556c99c 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kobject.h> 10#include <linux/kobject.h>
11#include <linux/namei.h> 11#include <linux/namei.h>
12#include <asm/semaphore.h>
12#include "sysfs.h" 13#include "sysfs.h"
13 14
14DECLARE_RWSEM(sysfs_rename_sem); 15DECLARE_RWSEM(sysfs_rename_sem);
@@ -32,8 +33,7 @@ static struct dentry_operations sysfs_dentry_ops = {
32/* 33/*
33 * Allocates a new sysfs_dirent and links it to the parent sysfs_dirent 34 * Allocates a new sysfs_dirent and links it to the parent sysfs_dirent
34 */ 35 */
35static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd, 36static struct sysfs_dirent * __sysfs_new_dirent(void * element)
36 void * element)
37{ 37{
38 struct sysfs_dirent * sd; 38 struct sysfs_dirent * sd;
39 39
@@ -45,12 +45,28 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
45 atomic_set(&sd->s_count, 1); 45 atomic_set(&sd->s_count, 1);
46 atomic_set(&sd->s_event, 1); 46 atomic_set(&sd->s_event, 1);
47 INIT_LIST_HEAD(&sd->s_children); 47 INIT_LIST_HEAD(&sd->s_children);
48 list_add(&sd->s_sibling, &parent_sd->s_children); 48 INIT_LIST_HEAD(&sd->s_sibling);
49 sd->s_element = element; 49 sd->s_element = element;
50 50
51 return sd; 51 return sd;
52} 52}
53 53
54static void __sysfs_list_dirent(struct sysfs_dirent *parent_sd,
55 struct sysfs_dirent *sd)
56{
57 if (sd)
58 list_add(&sd->s_sibling, &parent_sd->s_children);
59}
60
61static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent *parent_sd,
62 void * element)
63{
64 struct sysfs_dirent *sd;
65 sd = __sysfs_new_dirent(element);
66 __sysfs_list_dirent(parent_sd, sd);
67 return sd;
68}
69
54/* 70/*
55 * 71 *
56 * Return -EEXIST if there is already a sysfs element with the same name for 72 * Return -EEXIST if there is already a sysfs element with the same name for
@@ -77,14 +93,14 @@ int sysfs_dirent_exist(struct sysfs_dirent *parent_sd,
77} 93}
78 94
79 95
80int sysfs_make_dirent(struct sysfs_dirent * parent_sd, struct dentry * dentry, 96static struct sysfs_dirent *
81 void * element, umode_t mode, int type) 97__sysfs_make_dirent(struct dentry *dentry, void *element, mode_t mode, int type)
82{ 98{
83 struct sysfs_dirent * sd; 99 struct sysfs_dirent * sd;
84 100
85 sd = sysfs_new_dirent(parent_sd, element); 101 sd = __sysfs_new_dirent(element);
86 if (!sd) 102 if (!sd)
87 return -ENOMEM; 103 goto out;
88 104
89 sd->s_mode = mode; 105 sd->s_mode = mode;
90 sd->s_type = type; 106 sd->s_type = type;
@@ -94,7 +110,19 @@ int sysfs_make_dirent(struct sysfs_dirent * parent_sd, struct dentry * dentry,
94 dentry->d_op = &sysfs_dentry_ops; 110 dentry->d_op = &sysfs_dentry_ops;
95 } 111 }
96 112
97 return 0; 113out:
114 return sd;
115}
116
117int sysfs_make_dirent(struct sysfs_dirent * parent_sd, struct dentry * dentry,
118 void * element, umode_t mode, int type)
119{
120 struct sysfs_dirent *sd;
121
122 sd = __sysfs_make_dirent(dentry, element, mode, type);
123 __sysfs_list_dirent(parent_sd, sd);
124
125 return sd ? 0 : -ENOMEM;
98} 126}
99 127
100static int init_dir(struct inode * inode) 128static int init_dir(struct inode * inode)
@@ -165,11 +193,11 @@ int sysfs_create_subdir(struct kobject * k, const char * n, struct dentry ** d)
165 193
166/** 194/**
167 * sysfs_create_dir - create a directory for an object. 195 * sysfs_create_dir - create a directory for an object.
168 * @parent: parent parent object.
169 * @kobj: object we're creating directory for. 196 * @kobj: object we're creating directory for.
197 * @shadow_parent: parent parent object.
170 */ 198 */
171 199
172int sysfs_create_dir(struct kobject * kobj) 200int sysfs_create_dir(struct kobject * kobj, struct dentry *shadow_parent)
173{ 201{
174 struct dentry * dentry = NULL; 202 struct dentry * dentry = NULL;
175 struct dentry * parent; 203 struct dentry * parent;
@@ -177,7 +205,9 @@ int sysfs_create_dir(struct kobject * kobj)
177 205
178 BUG_ON(!kobj); 206 BUG_ON(!kobj);
179 207
180 if (kobj->parent) 208 if (shadow_parent)
209 parent = shadow_parent;
210 else if (kobj->parent)
181 parent = kobj->parent->dentry; 211 parent = kobj->parent->dentry;
182 else if (sysfs_mount && sysfs_mount->mnt_sb) 212 else if (sysfs_mount && sysfs_mount->mnt_sb)
183 parent = sysfs_mount->mnt_sb->s_root; 213 parent = sysfs_mount->mnt_sb->s_root;
@@ -298,21 +328,12 @@ void sysfs_remove_subdir(struct dentry * d)
298} 328}
299 329
300 330
301/** 331static void __sysfs_remove_dir(struct dentry *dentry)
302 * sysfs_remove_dir - remove an object's directory.
303 * @kobj: object.
304 *
305 * The only thing special about this is that we remove any files in
306 * the directory before we remove the directory, and we've inlined
307 * what used to be sysfs_rmdir() below, instead of calling separately.
308 */
309
310void sysfs_remove_dir(struct kobject * kobj)
311{ 332{
312 struct dentry * dentry = dget(kobj->dentry);
313 struct sysfs_dirent * parent_sd; 333 struct sysfs_dirent * parent_sd;
314 struct sysfs_dirent * sd, * tmp; 334 struct sysfs_dirent * sd, * tmp;
315 335
336 dget(dentry);
316 if (!dentry) 337 if (!dentry)
317 return; 338 return;
318 339
@@ -333,32 +354,60 @@ void sysfs_remove_dir(struct kobject * kobj)
333 * Drop reference from dget() on entrance. 354 * Drop reference from dget() on entrance.
334 */ 355 */
335 dput(dentry); 356 dput(dentry);
357}
358
359/**
360 * sysfs_remove_dir - remove an object's directory.
361 * @kobj: object.
362 *
363 * The only thing special about this is that we remove any files in
364 * the directory before we remove the directory, and we've inlined
365 * what used to be sysfs_rmdir() below, instead of calling separately.
366 */
367
368void sysfs_remove_dir(struct kobject * kobj)
369{
370 __sysfs_remove_dir(kobj->dentry);
336 kobj->dentry = NULL; 371 kobj->dentry = NULL;
337} 372}
338 373
339int sysfs_rename_dir(struct kobject * kobj, const char *new_name) 374int sysfs_rename_dir(struct kobject * kobj, struct dentry *new_parent,
375 const char *new_name)
340{ 376{
341 int error = 0; 377 int error = 0;
342 struct dentry * new_dentry, * parent; 378 struct dentry * new_dentry;
343
344 if (!strcmp(kobject_name(kobj), new_name))
345 return -EINVAL;
346 379
347 if (!kobj->parent) 380 if (!new_parent)
348 return -EINVAL; 381 return -EFAULT;
349 382
350 down_write(&sysfs_rename_sem); 383 down_write(&sysfs_rename_sem);
351 parent = kobj->parent->dentry; 384 mutex_lock(&new_parent->d_inode->i_mutex);
352
353 mutex_lock(&parent->d_inode->i_mutex);
354 385
355 new_dentry = lookup_one_len(new_name, parent, strlen(new_name)); 386 new_dentry = lookup_one_len(new_name, new_parent, strlen(new_name));
356 if (!IS_ERR(new_dentry)) { 387 if (!IS_ERR(new_dentry)) {
357 if (!new_dentry->d_inode) { 388 /* By allowing two different directories with the
389 * same d_parent we allow this routine to move
390 * between different shadows of the same directory
391 */
392 if (kobj->dentry->d_parent->d_inode != new_parent->d_inode)
393 return -EINVAL;
394 else if (new_dentry->d_parent->d_inode != new_parent->d_inode)
395 error = -EINVAL;
396 else if (new_dentry == kobj->dentry)
397 error = -EINVAL;
398 else if (!new_dentry->d_inode) {
358 error = kobject_set_name(kobj, "%s", new_name); 399 error = kobject_set_name(kobj, "%s", new_name);
359 if (!error) { 400 if (!error) {
401 struct sysfs_dirent *sd, *parent_sd;
402
360 d_add(new_dentry, NULL); 403 d_add(new_dentry, NULL);
361 d_move(kobj->dentry, new_dentry); 404 d_move(kobj->dentry, new_dentry);
405
406 sd = kobj->dentry->d_fsdata;
407 parent_sd = new_parent->d_fsdata;
408
409 list_del_init(&sd->s_sibling);
410 list_add(&sd->s_sibling, &parent_sd->s_children);
362 } 411 }
363 else 412 else
364 d_drop(new_dentry); 413 d_drop(new_dentry);
@@ -366,7 +415,7 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
366 error = -EEXIST; 415 error = -EEXIST;
367 dput(new_dentry); 416 dput(new_dentry);
368 } 417 }
369 mutex_unlock(&parent->d_inode->i_mutex); 418 mutex_unlock(&new_parent->d_inode->i_mutex);
370 up_write(&sysfs_rename_sem); 419 up_write(&sysfs_rename_sem);
371 420
372 return error; 421 return error;
@@ -378,12 +427,10 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent)
378 struct sysfs_dirent *new_parent_sd, *sd; 427 struct sysfs_dirent *new_parent_sd, *sd;
379 int error; 428 int error;
380 429
381 if (!new_parent)
382 return -EINVAL;
383
384 old_parent_dentry = kobj->parent ? 430 old_parent_dentry = kobj->parent ?
385 kobj->parent->dentry : sysfs_mount->mnt_sb->s_root; 431 kobj->parent->dentry : sysfs_mount->mnt_sb->s_root;
386 new_parent_dentry = new_parent->dentry; 432 new_parent_dentry = new_parent ?
433 new_parent->dentry : sysfs_mount->mnt_sb->s_root;
387 434
388again: 435again:
389 mutex_lock(&old_parent_dentry->d_inode->i_mutex); 436 mutex_lock(&old_parent_dentry->d_inode->i_mutex);
@@ -547,6 +594,95 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
547 return offset; 594 return offset;
548} 595}
549 596
597
598/**
599 * sysfs_make_shadowed_dir - Setup so a directory can be shadowed
600 * @kobj: object we're creating shadow of.
601 */
602
603int sysfs_make_shadowed_dir(struct kobject *kobj,
604 void * (*follow_link)(struct dentry *, struct nameidata *))
605{
606 struct inode *inode;
607 struct inode_operations *i_op;
608
609 inode = kobj->dentry->d_inode;
610 if (inode->i_op != &sysfs_dir_inode_operations)
611 return -EINVAL;
612
613 i_op = kmalloc(sizeof(*i_op), GFP_KERNEL);
614 if (!i_op)
615 return -ENOMEM;
616
617 memcpy(i_op, &sysfs_dir_inode_operations, sizeof(*i_op));
618 i_op->follow_link = follow_link;
619
620 /* Locking of inode->i_op?
621 * Since setting i_op is a single word write and they
622 * are atomic we should be ok here.
623 */
624 inode->i_op = i_op;
625 return 0;
626}
627
628/**
629 * sysfs_create_shadow_dir - create a shadow directory for an object.
630 * @kobj: object we're creating directory for.
631 *
632 * sysfs_make_shadowed_dir must already have been called on this
633 * directory.
634 */
635
636struct dentry *sysfs_create_shadow_dir(struct kobject *kobj)
637{
638 struct sysfs_dirent *sd;
639 struct dentry *parent, *dir, *shadow;
640 struct inode *inode;
641
642 dir = kobj->dentry;
643 inode = dir->d_inode;
644 parent = dir->d_parent;
645 shadow = ERR_PTR(-EINVAL);
646 if (!sysfs_is_shadowed_inode(inode))
647 goto out;
648
649 shadow = d_alloc(parent, &dir->d_name);
650 if (!shadow)
651 goto nomem;
652
653 sd = __sysfs_make_dirent(shadow, kobj, inode->i_mode, SYSFS_DIR);
654 if (!sd)
655 goto nomem;
656
657 d_instantiate(shadow, igrab(inode));
658 inc_nlink(inode);
659 inc_nlink(parent->d_inode);
660 shadow->d_op = &sysfs_dentry_ops;
661
662 dget(shadow); /* Extra count - pin the dentry in core */
663
664out:
665 return shadow;
666nomem:
667 dput(shadow);
668 shadow = ERR_PTR(-ENOMEM);
669 goto out;
670}
671
672/**
673 * sysfs_remove_shadow_dir - remove an object's directory.
674 * @shadow: dentry of shadow directory
675 *
676 * The only thing special about this is that we remove any files in
677 * the directory before we remove the directory, and we've inlined
678 * what used to be sysfs_rmdir() below, instead of calling separately.
679 */
680
681void sysfs_remove_shadow_dir(struct dentry *shadow)
682{
683 __sysfs_remove_dir(shadow);
684}
685
550const struct file_operations sysfs_dir_operations = { 686const struct file_operations sysfs_dir_operations = {
551 .open = sysfs_dir_open, 687 .open = sysfs_dir_open,
552 .release = sysfs_dir_close, 688 .release = sysfs_dir_close,
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 9cfe53e1e00d..c0e117649a4d 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -7,6 +7,7 @@
7#include <linux/kobject.h> 7#include <linux/kobject.h>
8#include <linux/namei.h> 8#include <linux/namei.h>
9#include <linux/poll.h> 9#include <linux/poll.h>
10#include <linux/list.h>
10#include <asm/uaccess.h> 11#include <asm/uaccess.h>
11#include <asm/semaphore.h> 12#include <asm/semaphore.h>
12 13
@@ -50,17 +51,29 @@ static struct sysfs_ops subsys_sysfs_ops = {
50 .store = subsys_attr_store, 51 .store = subsys_attr_store,
51}; 52};
52 53
54/**
55 * add_to_collection - add buffer to a collection
56 * @buffer: buffer to be added
57 * @node inode of set to add to
58 */
53 59
54struct sysfs_buffer { 60static inline void
55 size_t count; 61add_to_collection(struct sysfs_buffer *buffer, struct inode *node)
56 loff_t pos; 62{
57 char * page; 63 struct sysfs_buffer_collection *set = node->i_private;
58 struct sysfs_ops * ops;
59 struct semaphore sem;
60 int needs_read_fill;
61 int event;
62};
63 64
65 mutex_lock(&node->i_mutex);
66 list_add(&buffer->associates, &set->associates);
67 mutex_unlock(&node->i_mutex);
68}
69
70static inline void
71remove_from_collection(struct sysfs_buffer *buffer, struct inode *node)
72{
73 mutex_lock(&node->i_mutex);
74 list_del(&buffer->associates);
75 mutex_unlock(&node->i_mutex);
76}
64 77
65/** 78/**
66 * fill_read_buffer - allocate and fill buffer from object. 79 * fill_read_buffer - allocate and fill buffer from object.
@@ -70,7 +83,8 @@ struct sysfs_buffer {
70 * Allocate @buffer->page, if it hasn't been already, then call the 83 * Allocate @buffer->page, if it hasn't been already, then call the
71 * kobject's show() method to fill the buffer with this attribute's 84 * kobject's show() method to fill the buffer with this attribute's
72 * data. 85 * data.
73 * This is called only once, on the file's first read. 86 * This is called only once, on the file's first read unless an error
87 * is returned.
74 */ 88 */
75static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer) 89static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
76{ 90{
@@ -88,12 +102,13 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
88 102
89 buffer->event = atomic_read(&sd->s_event); 103 buffer->event = atomic_read(&sd->s_event);
90 count = ops->show(kobj,attr,buffer->page); 104 count = ops->show(kobj,attr,buffer->page);
91 buffer->needs_read_fill = 0;
92 BUG_ON(count > (ssize_t)PAGE_SIZE); 105 BUG_ON(count > (ssize_t)PAGE_SIZE);
93 if (count >= 0) 106 if (count >= 0) {
107 buffer->needs_read_fill = 0;
94 buffer->count = count; 108 buffer->count = count;
95 else 109 } else {
96 ret = count; 110 ret = count;
111 }
97 return ret; 112 return ret;
98} 113}
99 114
@@ -153,6 +168,10 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
153 ssize_t retval = 0; 168 ssize_t retval = 0;
154 169
155 down(&buffer->sem); 170 down(&buffer->sem);
171 if (buffer->orphaned) {
172 retval = -ENODEV;
173 goto out;
174 }
156 if (buffer->needs_read_fill) { 175 if (buffer->needs_read_fill) {
157 if ((retval = fill_read_buffer(file->f_path.dentry,buffer))) 176 if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
158 goto out; 177 goto out;
@@ -165,7 +184,6 @@ out:
165 return retval; 184 return retval;
166} 185}
167 186
168
169/** 187/**
170 * fill_write_buffer - copy buffer from userspace. 188 * fill_write_buffer - copy buffer from userspace.
171 * @buffer: data buffer for file. 189 * @buffer: data buffer for file.
@@ -243,19 +261,25 @@ sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t
243 ssize_t len; 261 ssize_t len;
244 262
245 down(&buffer->sem); 263 down(&buffer->sem);
264 if (buffer->orphaned) {
265 len = -ENODEV;
266 goto out;
267 }
246 len = fill_write_buffer(buffer, buf, count); 268 len = fill_write_buffer(buffer, buf, count);
247 if (len > 0) 269 if (len > 0)
248 len = flush_write_buffer(file->f_path.dentry, buffer, len); 270 len = flush_write_buffer(file->f_path.dentry, buffer, len);
249 if (len > 0) 271 if (len > 0)
250 *ppos += len; 272 *ppos += len;
273out:
251 up(&buffer->sem); 274 up(&buffer->sem);
252 return len; 275 return len;
253} 276}
254 277
255static int check_perm(struct inode * inode, struct file * file) 278static int sysfs_open_file(struct inode *inode, struct file *file)
256{ 279{
257 struct kobject *kobj = sysfs_get_kobject(file->f_path.dentry->d_parent); 280 struct kobject *kobj = sysfs_get_kobject(file->f_path.dentry->d_parent);
258 struct attribute * attr = to_attr(file->f_path.dentry); 281 struct attribute * attr = to_attr(file->f_path.dentry);
282 struct sysfs_buffer_collection *set;
259 struct sysfs_buffer * buffer; 283 struct sysfs_buffer * buffer;
260 struct sysfs_ops * ops = NULL; 284 struct sysfs_ops * ops = NULL;
261 int error = 0; 285 int error = 0;
@@ -285,6 +309,18 @@ static int check_perm(struct inode * inode, struct file * file)
285 if (!ops) 309 if (!ops)
286 goto Eaccess; 310 goto Eaccess;
287 311
312 /* make sure we have a collection to add our buffers to */
313 mutex_lock(&inode->i_mutex);
314 if (!(set = inode->i_private)) {
315 if (!(set = inode->i_private = kmalloc(sizeof(struct sysfs_buffer_collection), GFP_KERNEL))) {
316 error = -ENOMEM;
317 goto Done;
318 } else {
319 INIT_LIST_HEAD(&set->associates);
320 }
321 }
322 mutex_unlock(&inode->i_mutex);
323
288 /* File needs write support. 324 /* File needs write support.
289 * The inode's perms must say it's ok, 325 * The inode's perms must say it's ok,
290 * and we must have a store method. 326 * and we must have a store method.
@@ -310,9 +346,11 @@ static int check_perm(struct inode * inode, struct file * file)
310 */ 346 */
311 buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL); 347 buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL);
312 if (buffer) { 348 if (buffer) {
349 INIT_LIST_HEAD(&buffer->associates);
313 init_MUTEX(&buffer->sem); 350 init_MUTEX(&buffer->sem);
314 buffer->needs_read_fill = 1; 351 buffer->needs_read_fill = 1;
315 buffer->ops = ops; 352 buffer->ops = ops;
353 add_to_collection(buffer, inode);
316 file->private_data = buffer; 354 file->private_data = buffer;
317 } else 355 } else
318 error = -ENOMEM; 356 error = -ENOMEM;
@@ -325,16 +363,11 @@ static int check_perm(struct inode * inode, struct file * file)
325 error = -EACCES; 363 error = -EACCES;
326 module_put(attr->owner); 364 module_put(attr->owner);
327 Done: 365 Done:
328 if (error && kobj) 366 if (error)
329 kobject_put(kobj); 367 kobject_put(kobj);
330 return error; 368 return error;
331} 369}
332 370
333static int sysfs_open_file(struct inode * inode, struct file * filp)
334{
335 return check_perm(inode,filp);
336}
337
338static int sysfs_release(struct inode * inode, struct file * filp) 371static int sysfs_release(struct inode * inode, struct file * filp)
339{ 372{
340 struct kobject * kobj = to_kobj(filp->f_path.dentry->d_parent); 373 struct kobject * kobj = to_kobj(filp->f_path.dentry->d_parent);
@@ -342,8 +375,9 @@ static int sysfs_release(struct inode * inode, struct file * filp)
342 struct module * owner = attr->owner; 375 struct module * owner = attr->owner;
343 struct sysfs_buffer * buffer = filp->private_data; 376 struct sysfs_buffer * buffer = filp->private_data;
344 377
345 if (kobj) 378 if (buffer)
346 kobject_put(kobj); 379 remove_from_collection(buffer, inode);
380 kobject_put(kobj);
347 /* After this point, attr should not be accessed. */ 381 /* After this point, attr should not be accessed. */
348 module_put(owner); 382 module_put(owner);
349 383
@@ -548,7 +582,7 @@ EXPORT_SYMBOL_GPL(sysfs_chmod_file);
548 582
549void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr) 583void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
550{ 584{
551 sysfs_hash_and_remove(kobj->dentry,attr->name); 585 sysfs_hash_and_remove(kobj->dentry, attr->name);
552} 586}
553 587
554 588
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 122145b0895c..b20951c93761 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -13,6 +13,8 @@
13#include <linux/dcache.h> 13#include <linux/dcache.h>
14#include <linux/namei.h> 14#include <linux/namei.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/fs.h>
17#include <asm/semaphore.h>
16#include "sysfs.h" 18#include "sysfs.h"
17 19
18 20
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index e79e38d52c00..542d2bcc73df 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -13,6 +13,7 @@
13#include <linux/backing-dev.h> 13#include <linux/backing-dev.h>
14#include <linux/capability.h> 14#include <linux/capability.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <asm/semaphore.h>
16#include "sysfs.h" 17#include "sysfs.h"
17 18
18extern struct super_block * sysfs_sb; 19extern struct super_block * sysfs_sb;
@@ -32,6 +33,16 @@ static struct inode_operations sysfs_inode_operations ={
32 .setattr = sysfs_setattr, 33 .setattr = sysfs_setattr,
33}; 34};
34 35
36void sysfs_delete_inode(struct inode *inode)
37{
38 /* Free the shadowed directory inode operations */
39 if (sysfs_is_shadowed_inode(inode)) {
40 kfree(inode->i_op);
41 inode->i_op = NULL;
42 }
43 return generic_delete_inode(inode);
44}
45
35int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) 46int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
36{ 47{
37 struct inode * inode = dentry->d_inode; 48 struct inode * inode = dentry->d_inode;
@@ -209,6 +220,22 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
209 return NULL; 220 return NULL;
210} 221}
211 222
223static inline void orphan_all_buffers(struct inode *node)
224{
225 struct sysfs_buffer_collection *set = node->i_private;
226 struct sysfs_buffer *buf;
227
228 mutex_lock_nested(&node->i_mutex, I_MUTEX_CHILD);
229 if (node->i_private) {
230 list_for_each_entry(buf, &set->associates, associates) {
231 down(&buf->sem);
232 buf->orphaned = 1;
233 up(&buf->sem);
234 }
235 }
236 mutex_unlock(&node->i_mutex);
237}
238
212 239
213/* 240/*
214 * Unhashes the dentry corresponding to given sysfs_dirent 241 * Unhashes the dentry corresponding to given sysfs_dirent
@@ -217,16 +244,23 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
217void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent) 244void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
218{ 245{
219 struct dentry * dentry = sd->s_dentry; 246 struct dentry * dentry = sd->s_dentry;
247 struct inode *inode;
220 248
221 if (dentry) { 249 if (dentry) {
222 spin_lock(&dcache_lock); 250 spin_lock(&dcache_lock);
223 spin_lock(&dentry->d_lock); 251 spin_lock(&dentry->d_lock);
224 if (!(d_unhashed(dentry) && dentry->d_inode)) { 252 if (!(d_unhashed(dentry) && dentry->d_inode)) {
253 inode = dentry->d_inode;
254 spin_lock(&inode->i_lock);
255 __iget(inode);
256 spin_unlock(&inode->i_lock);
225 dget_locked(dentry); 257 dget_locked(dentry);
226 __d_drop(dentry); 258 __d_drop(dentry);
227 spin_unlock(&dentry->d_lock); 259 spin_unlock(&dentry->d_lock);
228 spin_unlock(&dcache_lock); 260 spin_unlock(&dcache_lock);
229 simple_unlink(parent->d_inode, dentry); 261 simple_unlink(parent->d_inode, dentry);
262 orphan_all_buffers(inode);
263 iput(inode);
230 } else { 264 } else {
231 spin_unlock(&dentry->d_lock); 265 spin_unlock(&dentry->d_lock);
232 spin_unlock(&dcache_lock); 266 spin_unlock(&dcache_lock);
@@ -248,7 +282,7 @@ int sysfs_hash_and_remove(struct dentry * dir, const char * name)
248 return -ENOENT; 282 return -ENOENT;
249 283
250 parent_sd = dir->d_fsdata; 284 parent_sd = dir->d_fsdata;
251 mutex_lock(&dir->d_inode->i_mutex); 285 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
252 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 286 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
253 if (!sd->s_element) 287 if (!sd->s_element)
254 continue; 288 continue;
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index e503f858fba8..f6a87a824883 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -8,6 +8,7 @@
8#include <linux/mount.h> 8#include <linux/mount.h>
9#include <linux/pagemap.h> 9#include <linux/pagemap.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <asm/semaphore.h>
11 12
12#include "sysfs.h" 13#include "sysfs.h"
13 14
@@ -18,9 +19,12 @@ struct vfsmount *sysfs_mount;
18struct super_block * sysfs_sb = NULL; 19struct super_block * sysfs_sb = NULL;
19struct kmem_cache *sysfs_dir_cachep; 20struct kmem_cache *sysfs_dir_cachep;
20 21
22static void sysfs_clear_inode(struct inode *inode);
23
21static struct super_operations sysfs_ops = { 24static struct super_operations sysfs_ops = {
22 .statfs = simple_statfs, 25 .statfs = simple_statfs,
23 .drop_inode = generic_delete_inode, 26 .drop_inode = sysfs_delete_inode,
27 .clear_inode = sysfs_clear_inode,
24}; 28};
25 29
26static struct sysfs_dirent sysfs_root = { 30static struct sysfs_dirent sysfs_root = {
@@ -31,6 +35,11 @@ static struct sysfs_dirent sysfs_root = {
31 .s_iattr = NULL, 35 .s_iattr = NULL,
32}; 36};
33 37
38static void sysfs_clear_inode(struct inode *inode)
39{
40 kfree(inode->i_private);
41}
42
34static int sysfs_fill_super(struct super_block *sb, void *data, int silent) 43static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
35{ 44{
36 struct inode *inode; 45 struct inode *inode;
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index f50e3cc2ded8..4869f611192f 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -7,6 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/kobject.h> 8#include <linux/kobject.h>
9#include <linux/namei.h> 9#include <linux/namei.h>
10#include <asm/semaphore.h>
10 11
11#include "sysfs.h" 12#include "sysfs.h"
12 13
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index bd7cec295dab..fe1cbfd208ed 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -2,6 +2,7 @@
2extern struct vfsmount * sysfs_mount; 2extern struct vfsmount * sysfs_mount;
3extern struct kmem_cache *sysfs_dir_cachep; 3extern struct kmem_cache *sysfs_dir_cachep;
4 4
5extern void sysfs_delete_inode(struct inode *inode);
5extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *); 6extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *);
6extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *)); 7extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
7 8
@@ -33,6 +34,22 @@ struct sysfs_symlink {
33 struct kobject * target_kobj; 34 struct kobject * target_kobj;
34}; 35};
35 36
37struct sysfs_buffer {
38 struct list_head associates;
39 size_t count;
40 loff_t pos;
41 char * page;
42 struct sysfs_ops * ops;
43 struct semaphore sem;
44 int orphaned;
45 int needs_read_fill;
46 int event;
47};
48
49struct sysfs_buffer_collection {
50 struct list_head associates;
51};
52
36static inline struct kobject * to_kobj(struct dentry * dentry) 53static inline struct kobject * to_kobj(struct dentry * dentry)
37{ 54{
38 struct sysfs_dirent * sd = dentry->d_fsdata; 55 struct sysfs_dirent * sd = dentry->d_fsdata;
@@ -96,3 +113,7 @@ static inline void sysfs_put(struct sysfs_dirent * sd)
96 release_sysfs_dirent(sd); 113 release_sysfs_dirent(sd);
97} 114}
98 115
116static inline int sysfs_is_shadowed_inode(struct inode *inode)
117{
118 return S_ISDIR(inode->i_mode) && inode->i_op->follow_link;
119}
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index ebc1f697615a..422f29c06c77 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,7 +63,7 @@
63 63
64/* Current ACPICA subsystem version in YYYYMMDD format */ 64/* Current ACPICA subsystem version in YYYYMMDD format */
65 65
66#define ACPI_CA_VERSION 0x20060707 66#define ACPI_CA_VERSION 0x20070126
67 67
68/* 68/*
69 * OS name, used for the _OS object. The _OS object is essentially obsolete, 69 * OS name, used for the _OS object. The _OS object is essentially obsolete,
@@ -115,6 +115,10 @@
115 115
116#define ACPI_NUM_OWNERID_MASKS 8 116#define ACPI_NUM_OWNERID_MASKS 8
117 117
118/* Size of the root table array is increased by this increment */
119
120#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4
121
118/****************************************************************************** 122/******************************************************************************
119 * 123 *
120 * ACPI Specification constants (Do not change unless the specification changes) 124 * ACPI Specification constants (Do not change unless the specification changes)
@@ -152,6 +156,11 @@
152#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ 156#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */
153#define ACPI_PATH_SEPARATOR '.' 157#define ACPI_PATH_SEPARATOR '.'
154 158
159/* Sizes for ACPI table headers */
160
161#define ACPI_OEM_ID_SIZE 6
162#define ACPI_OEM_TABLE_ID_SIZE 8
163
155/* Constants used in searching for the RSDP in low memory */ 164/* Constants used in searching for the RSDP in low memory */
156 165
157#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */ 166#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */
diff --git a/include/acpi/acdebug.h b/include/acpi/acdebug.h
index d8167095caf3..d626bb1d2973 100644
--- a/include/acpi/acdebug.h
+++ b/include/acpi/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -159,6 +159,10 @@ void
159acpi_db_create_execution_threads(char *num_threads_arg, 159acpi_db_create_execution_threads(char *num_threads_arg,
160 char *num_loops_arg, char *method_name_arg); 160 char *num_loops_arg, char *method_name_arg);
161 161
162#ifdef ACPI_DBG_TRACK_ALLOCATIONS
163u32 acpi_db_get_cache_info(struct acpi_memory_list *cache);
164#endif
165
162/* 166/*
163 * dbfileio - Debugger file I/O commands 167 * dbfileio - Debugger file I/O commands
164 */ 168 */
@@ -214,4 +218,6 @@ void acpi_db_prep_namestring(char *name);
214 218
215struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name); 219struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name);
216 220
221void acpi_db_uint32_to_hex_string(u32 value, char *buffer);
222
217#endif /* __ACDEBUG_H__ */ 223#endif /* __ACDEBUG_H__ */
diff --git a/include/acpi/acdisasm.h b/include/acpi/acdisasm.h
index 9a7d6921f534..389d772c7d5b 100644
--- a/include/acpi/acdisasm.h
+++ b/include/acpi/acdisasm.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -97,9 +97,11 @@ typedef const struct acpi_dmtable_info {
97#define ACPI_DMT_CHKSUM 20 97#define ACPI_DMT_CHKSUM 20
98#define ACPI_DMT_SPACEID 21 98#define ACPI_DMT_SPACEID 21
99#define ACPI_DMT_GAS 22 99#define ACPI_DMT_GAS 22
100#define ACPI_DMT_MADT 23 100#define ACPI_DMT_DMAR 23
101#define ACPI_DMT_SRAT 24 101#define ACPI_DMT_MADT 24
102#define ACPI_DMT_EXIT 25 102#define ACPI_DMT_SRAT 25
103#define ACPI_DMT_EXIT 26
104#define ACPI_DMT_SIG 27
103 105
104typedef 106typedef
105void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table); 107void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table);
@@ -108,6 +110,7 @@ struct acpi_dmtable_data {
108 char *signature; 110 char *signature;
109 struct acpi_dmtable_info *table_info; 111 struct acpi_dmtable_info *table_info;
110 ACPI_TABLE_HANDLER table_handler; 112 ACPI_TABLE_HANDLER table_handler;
113 char *name;
111}; 114};
112 115
113struct acpi_op_walk_info { 116struct acpi_op_walk_info {
@@ -139,7 +142,9 @@ extern const char *acpi_gbl_match_ops[];
139 142
140extern struct acpi_dmtable_info acpi_dm_table_info_asf0[]; 143extern struct acpi_dmtable_info acpi_dm_table_info_asf0[];
141extern struct acpi_dmtable_info acpi_dm_table_info_asf1[]; 144extern struct acpi_dmtable_info acpi_dm_table_info_asf1[];
145extern struct acpi_dmtable_info acpi_dm_table_info_asf1a[];
142extern struct acpi_dmtable_info acpi_dm_table_info_asf2[]; 146extern struct acpi_dmtable_info acpi_dm_table_info_asf2[];
147extern struct acpi_dmtable_info acpi_dm_table_info_asf2a[];
143extern struct acpi_dmtable_info acpi_dm_table_info_asf3[]; 148extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
144extern struct acpi_dmtable_info acpi_dm_table_info_asf4[]; 149extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
145extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[]; 150extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
@@ -147,6 +152,11 @@ extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
147extern struct acpi_dmtable_info acpi_dm_table_info_cpep[]; 152extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
148extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[]; 153extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
149extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[]; 154extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
155extern struct acpi_dmtable_info acpi_dm_table_info_dmar[];
156extern struct acpi_dmtable_info acpi_dm_table_info_dmar_hdr[];
157extern struct acpi_dmtable_info acpi_dm_table_info_dmar_scope[];
158extern struct acpi_dmtable_info acpi_dm_table_info_dmar0[];
159extern struct acpi_dmtable_info acpi_dm_table_info_dmar1[];
150extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[]; 160extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
151extern struct acpi_dmtable_info acpi_dm_table_info_facs[]; 161extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
152extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[]; 162extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
@@ -201,6 +211,8 @@ void acpi_dm_dump_asf(struct acpi_table_header *table);
201 211
202void acpi_dm_dump_cpep(struct acpi_table_header *table); 212void acpi_dm_dump_cpep(struct acpi_table_header *table);
203 213
214void acpi_dm_dump_dmar(struct acpi_table_header *table);
215
204void acpi_dm_dump_fadt(struct acpi_table_header *table); 216void acpi_dm_dump_fadt(struct acpi_table_header *table);
205 217
206void acpi_dm_dump_srat(struct acpi_table_header *table); 218void acpi_dm_dump_srat(struct acpi_table_header *table);
@@ -314,7 +326,7 @@ acpi_dm_resource_template(struct acpi_op_walk_info *info,
314 union acpi_parse_object *op, 326 union acpi_parse_object *op,
315 u8 * byte_data, u32 byte_count); 327 u8 * byte_data, u32 byte_count);
316 328
317u8 acpi_dm_is_resource_template(union acpi_parse_object *op); 329acpi_status acpi_dm_is_resource_template(union acpi_parse_object *op);
318 330
319void acpi_dm_indent(u32 level); 331void acpi_dm_indent(u32 level);
320 332
diff --git a/include/acpi/acdispat.h b/include/acpi/acdispat.h
index a22fe9cf8493..cb8d2868c8ac 100644
--- a/include/acpi/acdispat.h
+++ b/include/acpi/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -210,7 +210,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state);
210 * dsinit 210 * dsinit
211 */ 211 */
212acpi_status 212acpi_status
213acpi_ds_initialize_objects(struct acpi_table_desc *table_desc, 213acpi_ds_initialize_objects(acpi_native_uint table_index,
214 struct acpi_namespace_node *start_node); 214 struct acpi_namespace_node *start_node);
215 215
216/* 216/*
diff --git a/include/acpi/acevents.h b/include/acpi/acevents.h
index 234142828e1a..d23cdf326808 100644
--- a/include/acpi/acevents.h
+++ b/include/acpi/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 797ca1ea5214..b73f18a48785 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -178,8 +178,10 @@
178#define AE_CTRL_BREAK (acpi_status) (0x0009 | AE_CODE_CONTROL) 178#define AE_CTRL_BREAK (acpi_status) (0x0009 | AE_CODE_CONTROL)
179#define AE_CTRL_CONTINUE (acpi_status) (0x000A | AE_CODE_CONTROL) 179#define AE_CTRL_CONTINUE (acpi_status) (0x000A | AE_CODE_CONTROL)
180#define AE_CTRL_SKIP (acpi_status) (0x000B | AE_CODE_CONTROL) 180#define AE_CTRL_SKIP (acpi_status) (0x000B | AE_CODE_CONTROL)
181#define AE_CTRL_PARSE_CONTINUE (acpi_status) (0x000C | AE_CODE_CONTROL)
182#define AE_CTRL_PARSE_PENDING (acpi_status) (0x000D | AE_CODE_CONTROL)
181 183
182#define AE_CODE_CTRL_MAX 0x000B 184#define AE_CODE_CTRL_MAX 0x000D
183 185
184#ifdef DEFINE_ACPI_GLOBALS 186#ifdef DEFINE_ACPI_GLOBALS
185 187
@@ -291,7 +293,9 @@ char const *acpi_gbl_exception_names_ctrl[] = {
291 "AE_CTRL_TRANSFER", 293 "AE_CTRL_TRANSFER",
292 "AE_CTRL_BREAK", 294 "AE_CTRL_BREAK",
293 "AE_CTRL_CONTINUE", 295 "AE_CTRL_CONTINUE",
294 "AE_CTRL_SKIP" 296 "AE_CTRL_SKIP",
297 "AE_CTRL_PARSE_CONTINUE",
298 "AE_CTRL_PARSE_PENDING"
295}; 299};
296 300
297#endif /* ACPI GLOBALS */ 301#endif /* ACPI GLOBALS */
diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h
index 06972e6637de..24c3f05ab367 100644
--- a/include/acpi/acglobal.h
+++ b/include/acpi/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -58,37 +58,6 @@
58#define ACPI_INIT_GLOBAL(a,b) a 58#define ACPI_INIT_GLOBAL(a,b) a
59#endif 59#endif
60 60
61/*
62 * Keep local copies of these FADT-based registers. NOTE: These globals
63 * are first in this file for alignment reasons on 64-bit systems.
64 */
65ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
66ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
67
68/*****************************************************************************
69 *
70 * Debug support
71 *
72 ****************************************************************************/
73
74/* Runtime configuration of debug print levels */
75
76extern u32 acpi_dbg_level;
77extern u32 acpi_dbg_layer;
78
79/* Procedure nesting level for debug output */
80
81extern u32 acpi_gbl_nesting_level;
82
83/* Support for dynamic control method tracing mechanism */
84
85ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
86ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
87ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
88ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
89ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
90ACPI_EXTERN u32 acpi_gbl_trace_flags;
91
92/***************************************************************************** 61/*****************************************************************************
93 * 62 *
94 * Runtime configuration (static defaults that can be overriden at runtime) 63 * Runtime configuration (static defaults that can be overriden at runtime)
@@ -135,52 +104,62 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
135 104
136/***************************************************************************** 105/*****************************************************************************
137 * 106 *
107 * Debug support
108 *
109 ****************************************************************************/
110
111/* Runtime configuration of debug print levels */
112
113extern u32 acpi_dbg_level;
114extern u32 acpi_dbg_layer;
115
116/* Procedure nesting level for debug output */
117
118extern u32 acpi_gbl_nesting_level;
119
120/* Event counters */
121
122ACPI_EXTERN u32 acpi_gpe_count;
123
124/* Support for dynamic control method tracing mechanism */
125
126ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
127ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
128ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
129ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
130ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
131ACPI_EXTERN u32 acpi_gbl_trace_flags;
132
133/*****************************************************************************
134 *
138 * ACPI Table globals 135 * ACPI Table globals
139 * 136 *
140 ****************************************************************************/ 137 ****************************************************************************/
141 138
142/* 139/*
143 * Table pointers. 140 * acpi_gbl_root_table_list is the master list of ACPI tables found in the
144 * Although these pointers are somewhat redundant with the global acpi_table, 141 * RSDT/XSDT.
145 * they are convenient because they are typed pointers.
146 * 142 *
147 * These tables are single-table only; meaning that there can be at most one 143 * acpi_gbl_FADT is a local copy of the FADT, converted to a common format.
148 * of each in the system. Each global points to the actual table.
149 */
150ACPI_EXTERN u32 acpi_gbl_table_flags;
151ACPI_EXTERN u32 acpi_gbl_rsdt_table_count;
152ACPI_EXTERN struct rsdp_descriptor *acpi_gbl_RSDP;
153ACPI_EXTERN struct xsdt_descriptor *acpi_gbl_XSDT;
154ACPI_EXTERN struct fadt_descriptor *acpi_gbl_FADT;
155ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
156ACPI_EXTERN struct facs_descriptor *acpi_gbl_FACS;
157ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS;
158/*
159 * Since there may be multiple SSDTs and PSDTs, a single pointer is not
160 * sufficient; Therefore, there isn't one!
161 */ 144 */
145ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
146ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT;
147extern acpi_native_uint acpi_gbl_permanent_mmap;
162 148
163/* The root table can be either an RSDT or an XSDT */ 149/* These addresses are calculated from FADT address values */
164 150
165ACPI_EXTERN u8 acpi_gbl_root_table_type; 151ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
166#define ACPI_TABLE_TYPE_RSDT 'R' 152ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
167#define ACPI_TABLE_TYPE_XSDT 'X'
168 153
169/* 154/*
170 * Handle both ACPI 1.0 and ACPI 2.0 Integer widths: 155 * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
171 * If we are executing a method that exists in a 32-bit ACPI table, 156 * determined by the revision of the DSDT: If the DSDT revision is less than
172 * use only the lower 32 bits of the (internal) 64-bit Integer. 157 * 2, use only the lower 32 bits of the internal 64-bit Integer.
173 */ 158 */
174ACPI_EXTERN u8 acpi_gbl_integer_bit_width; 159ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
175ACPI_EXTERN u8 acpi_gbl_integer_byte_width; 160ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
176ACPI_EXTERN u8 acpi_gbl_integer_nybble_width; 161ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
177 162
178/*
179 * ACPI Table info arrays
180 */
181extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
182extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
183
184/***************************************************************************** 163/*****************************************************************************
185 * 164 *
186 * Mutual exlusion within ACPICA subsystem 165 * Mutual exlusion within ACPICA subsystem
@@ -188,7 +167,7 @@ extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
188 ****************************************************************************/ 167 ****************************************************************************/
189 168
190/* 169/*
191 * Predefined mutex objects. This array contains the 170 * Predefined mutex objects. This array contains the
192 * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. 171 * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
193 * (The table maps local handles to the real OS handles) 172 * (The table maps local handles to the real OS handles)
194 */ 173 */
@@ -197,6 +176,7 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
197/* 176/*
198 * Global lock semaphore works in conjunction with the actual HW global lock 177 * Global lock semaphore works in conjunction with the actual HW global lock
199 */ 178 */
179ACPI_EXTERN acpi_mutex acpi_gbl_global_lock_mutex;
200ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; 180ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
201 181
202/* 182/*
@@ -220,6 +200,7 @@ ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE regis
220 200
221ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; 201ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
222ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; 202ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
203ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
223#endif 204#endif
224 205
225/* Object caches */ 206/* Object caches */
@@ -240,7 +221,6 @@ ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
240 221
241/* Misc */ 222/* Misc */
242 223
243ACPI_EXTERN u32 acpi_gbl_global_lock_thread_count;
244ACPI_EXTERN u32 acpi_gbl_original_mode; 224ACPI_EXTERN u32 acpi_gbl_original_mode;
245ACPI_EXTERN u32 acpi_gbl_rsdp_original_location; 225ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
246ACPI_EXTERN u32 acpi_gbl_ns_lookup_count; 226ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
@@ -260,12 +240,19 @@ ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
260 240
261extern u8 acpi_gbl_shutdown; 241extern u8 acpi_gbl_shutdown;
262extern u32 acpi_gbl_startup_flags; 242extern u32 acpi_gbl_startup_flags;
263extern const u8 acpi_gbl_decode_to8bit[8];
264extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT]; 243extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
265extern const char *acpi_gbl_highest_dstate_names[4]; 244extern const char *acpi_gbl_highest_dstate_names[4];
266extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES]; 245extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
267extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; 246extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
268 247
248/* Exception codes */
249
250extern char const *acpi_gbl_exception_names_env[];
251extern char const *acpi_gbl_exception_names_pgm[];
252extern char const *acpi_gbl_exception_names_tbl[];
253extern char const *acpi_gbl_exception_names_aml[];
254extern char const *acpi_gbl_exception_names_ctrl[];
255
269/***************************************************************************** 256/*****************************************************************************
270 * 257 *
271 * Namespace globals 258 * Namespace globals
diff --git a/include/acpi/achware.h b/include/acpi/achware.h
index 29b60a8c0593..9df275cf7bc1 100644
--- a/include/acpi/achware.h
+++ b/include/acpi/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,8 +61,6 @@
61/* 61/*
62 * hwacpi - high level functions 62 * hwacpi - high level functions
63 */ 63 */
64acpi_status acpi_hw_initialize(void);
65
66acpi_status acpi_hw_set_mode(u32 mode); 64acpi_status acpi_hw_set_mode(u32 mode);
67 65
68u32 acpi_hw_get_mode(void); 66u32 acpi_hw_get_mode(void);
@@ -84,7 +82,7 @@ acpi_hw_low_level_read(u32 width,
84acpi_status 82acpi_status
85acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg); 83acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg);
86 84
87acpi_status acpi_hw_clear_acpi_status(u32 flags); 85acpi_status acpi_hw_clear_acpi_status(void);
88 86
89/* 87/*
90 * hwgpe - GPE support 88 * hwgpe - GPE support
diff --git a/include/acpi/acinterp.h b/include/acpi/acinterp.h
index 91586d0d5bb5..ce7c9d653910 100644
--- a/include/acpi/acinterp.h
+++ b/include/acpi/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -277,12 +277,6 @@ acpi_status acpi_ex_system_do_suspend(acpi_integer time);
277 277
278acpi_status acpi_ex_system_do_stall(u32 time); 278acpi_status acpi_ex_system_do_stall(u32 time);
279 279
280acpi_status
281acpi_ex_system_acquire_mutex(union acpi_operand_object *time,
282 union acpi_operand_object *obj_desc);
283
284acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc);
285
286acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc); 280acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc);
287 281
288acpi_status 282acpi_status
@@ -451,10 +445,14 @@ acpi_ex_copy_integer_to_buffer_field(union acpi_operand_object *source_desc,
451/* 445/*
452 * exutils - interpreter/scanner utilities 446 * exutils - interpreter/scanner utilities
453 */ 447 */
454acpi_status acpi_ex_enter_interpreter(void); 448void acpi_ex_enter_interpreter(void);
455 449
456void acpi_ex_exit_interpreter(void); 450void acpi_ex_exit_interpreter(void);
457 451
452void acpi_ex_reacquire_interpreter(void);
453
454void acpi_ex_relinquish_interpreter(void);
455
458void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc); 456void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
459 457
460u8 acpi_ex_acquire_global_lock(u32 rule); 458u8 acpi_ex_acquire_global_lock(u32 rule);
diff --git a/include/acpi/aclocal.h b/include/acpi/aclocal.h
index 063c4b54290f..6f83ddbed3af 100644
--- a/include/acpi/aclocal.h
+++ b/include/acpi/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -80,8 +80,8 @@ union acpi_parse_object;
80 * table below also! 80 * table below also!
81 */ 81 */
82#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */ 82#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */
83#define ACPI_MTX_TABLES 1 /* Data for ACPI tables */ 83#define ACPI_MTX_NAMESPACE 1 /* ACPI Namespace */
84#define ACPI_MTX_NAMESPACE 2 /* ACPI Namespace */ 84#define ACPI_MTX_TABLES 2 /* Data for ACPI tables */
85#define ACPI_MTX_EVENTS 3 /* Data for ACPI events */ 85#define ACPI_MTX_EVENTS 3 /* Data for ACPI events */
86#define ACPI_MTX_CACHES 4 /* Internal caches, general purposes */ 86#define ACPI_MTX_CACHES 4 /* Internal caches, general purposes */
87#define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */ 87#define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */
@@ -162,7 +162,7 @@ struct acpi_mutex_info {
162typedef enum { 162typedef enum {
163 ACPI_IMODE_LOAD_PASS1 = 0x01, 163 ACPI_IMODE_LOAD_PASS1 = 0x01,
164 ACPI_IMODE_LOAD_PASS2 = 0x02, 164 ACPI_IMODE_LOAD_PASS2 = 0x02,
165 ACPI_IMODE_EXECUTE = 0x0E 165 ACPI_IMODE_EXECUTE = 0x03
166} acpi_interpreter_mode; 166} acpi_interpreter_mode;
167 167
168union acpi_name_union { 168union acpi_name_union {
@@ -204,7 +204,7 @@ struct acpi_namespace_node {
204/* Namespace Node flags */ 204/* Namespace Node flags */
205 205
206#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */ 206#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */
207#define ANOBJ_RESERVED 0x02 /* Available for future use */ 207#define ANOBJ_TEMPORARY 0x02 /* Node is create by a method and is temporary */
208#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */ 208#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */
209#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */ 209#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
210#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */ 210#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
@@ -219,25 +219,42 @@ struct acpi_namespace_node {
219 * ACPI Table Descriptor. One per ACPI table 219 * ACPI Table Descriptor. One per ACPI table
220 */ 220 */
221struct acpi_table_desc { 221struct acpi_table_desc {
222 struct acpi_table_desc *prev; 222 acpi_physical_address address;
223 struct acpi_table_desc *next;
224 struct acpi_table_desc *installed_desc;
225 struct acpi_table_header *pointer; 223 struct acpi_table_header *pointer;
226 u8 *aml_start; 224 u32 length; /* Length fixed at 32 bits */
227 u64 physical_address; 225 union acpi_name_union signature;
228 acpi_size length;
229 u32 aml_length;
230 acpi_owner_id owner_id; 226 acpi_owner_id owner_id;
231 u8 type; 227 u8 flags;
232 u8 allocation;
233 u8 loaded_into_namespace;
234}; 228};
235 229
236struct acpi_table_list { 230/* Flags for above */
237 struct acpi_table_desc *next; 231
232#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
233#define ACPI_TABLE_ORIGIN_MAPPED (1)
234#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
235#define ACPI_TABLE_ORIGIN_MASK (3)
236#define ACPI_TABLE_IS_LOADED (4)
237
238/* One internal RSDT for table management */
239
240struct acpi_internal_rsdt {
241 struct acpi_table_desc *tables;
238 u32 count; 242 u32 count;
243 u32 size;
244 u8 flags;
239}; 245};
240 246
247/* Flags for above */
248
249#define ACPI_ROOT_ORIGIN_UNKNOWN (0) /* ~ORIGIN_ALLOCATED */
250#define ACPI_ROOT_ORIGIN_ALLOCATED (1)
251#define ACPI_ROOT_ALLOW_RESIZE (2)
252
253/* Predefined (fixed) table indexes */
254
255#define ACPI_TABLE_INDEX_DSDT (0)
256#define ACPI_TABLE_INDEX_FACS (1)
257
241struct acpi_find_context { 258struct acpi_find_context {
242 char *search_for; 259 char *search_for;
243 acpi_handle *list; 260 acpi_handle *list;
@@ -350,7 +367,7 @@ struct acpi_gpe_event_info {
350 union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */ 367 union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */
351 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ 368 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
352 u8 flags; /* Misc info about this GPE */ 369 u8 flags; /* Misc info about this GPE */
353 u8 register_bit; /* This GPE bit within the register */ 370 u8 gpe_number; /* This GPE */
354}; 371};
355 372
356/* Information about a GPE register pair, one per each status/enable pair in an array */ 373/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -855,12 +872,30 @@ struct acpi_bit_register_info {
855 ****************************************************************************/ 872 ****************************************************************************/
856 873
857struct acpi_db_method_info { 874struct acpi_db_method_info {
858 acpi_handle thread_gate; 875 acpi_handle main_thread_gate;
876 acpi_handle thread_complete_gate;
877 u32 *threads;
878 u32 num_threads;
879 u32 num_created;
880 u32 num_completed;
881
859 char *name; 882 char *name;
860 char **args;
861 u32 flags; 883 u32 flags;
862 u32 num_loops; 884 u32 num_loops;
863 char pathname[128]; 885 char pathname[128];
886 char **args;
887
888 /*
889 * Arguments to be passed to method for the command
890 * Threads -
891 * the Number of threads, ID of current thread and
892 * Index of current thread inside all them created.
893 */
894 char init_args;
895 char *arguments[4];
896 char num_threads_str[11];
897 char id_of_thread_str[11];
898 char index_of_thread_str[11];
864}; 899};
865 900
866struct acpi_integrity_info { 901struct acpi_integrity_info {
@@ -919,6 +954,8 @@ struct acpi_memory_list {
919 954
920 u32 total_allocated; 955 u32 total_allocated;
921 u32 total_freed; 956 u32 total_freed;
957 u32 max_occupied;
958 u32 total_size;
922 u32 current_total_size; 959 u32 current_total_size;
923 u32 requests; 960 u32 requests;
924 u32 hits; 961 u32 hits;
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
index 192fa095a515..8948a6461834 100644
--- a/include/acpi/acmacros.h
+++ b/include/acpi/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -55,25 +55,12 @@
55#define ACPI_SET_BIT(target,bit) ((target) |= (bit)) 55#define ACPI_SET_BIT(target,bit) ((target) |= (bit))
56#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) 56#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
57#define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) 57#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))
58#define ACPI_MAX(a,b) (((a)>(b))?(a):(b))
58 59
59/* Size calculation */ 60/* Size calculation */
60 61
61#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) 62#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))
62 63
63#if ACPI_MACHINE_WIDTH == 16
64
65/*
66 * For 16-bit addresses, we have to assume that the upper 32 bits
67 * (out of 64) are zero.
68 */
69#define ACPI_LODWORD(l) ((u32)(l))
70#define ACPI_HIDWORD(l) ((u32)(0))
71
72#define ACPI_GET_ADDRESS(a) ((a).lo)
73#define ACPI_STORE_ADDRESS(a,b) {(a).hi=0;(a).lo=(u32)(b);}
74#define ACPI_VALID_ADDRESS(a) ((a).hi | (a).lo)
75
76#else
77#ifdef ACPI_NO_INTEGER64_SUPPORT 64#ifdef ACPI_NO_INTEGER64_SUPPORT
78/* 65/*
79 * acpi_integer is 32-bits, no 64-bit support on this platform 66 * acpi_integer is 32-bits, no 64-bit support on this platform
@@ -81,10 +68,6 @@
81#define ACPI_LODWORD(l) ((u32)(l)) 68#define ACPI_LODWORD(l) ((u32)(l))
82#define ACPI_HIDWORD(l) ((u32)(0)) 69#define ACPI_HIDWORD(l) ((u32)(0))
83 70
84#define ACPI_GET_ADDRESS(a) (a)
85#define ACPI_STORE_ADDRESS(a,b) ((a)=(b))
86#define ACPI_VALID_ADDRESS(a) (a)
87
88#else 71#else
89 72
90/* 73/*
@@ -92,11 +75,6 @@
92 */ 75 */
93#define ACPI_LODWORD(l) ((u32)(u64)(l)) 76#define ACPI_LODWORD(l) ((u32)(u64)(l))
94#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi)) 77#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi))
95
96#define ACPI_GET_ADDRESS(a) (a)
97#define ACPI_STORE_ADDRESS(a,b) ((a)=(acpi_physical_address)(b))
98#define ACPI_VALID_ADDRESS(a) (a)
99#endif
100#endif 78#endif
101 79
102/* 80/*
@@ -134,15 +112,8 @@
134#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i) 112#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i)
135#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL) 113#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL)
136#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL) 114#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL)
137
138#if ACPI_MACHINE_WIDTH == 16
139#define ACPI_STORE_POINTER(d,s) ACPI_MOVE_32_TO_32(d,s)
140#define ACPI_PHYSADDR_TO_PTR(i) (void *)(i)
141#define ACPI_PTR_TO_PHYSADDR(i) (u32) ACPI_CAST_PTR (u8,(i))
142#else
143#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) 115#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
144#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) 116#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
145#endif
146 117
147#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED 118#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
148#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b))) 119#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b)))
@@ -223,28 +194,6 @@
223 194
224/* The hardware supports unaligned transfers, just do the little-endian move */ 195/* The hardware supports unaligned transfers, just do the little-endian move */
225 196
226#if ACPI_MACHINE_WIDTH == 16
227
228/* No 64-bit integers */
229/* 16-bit source, 16/32/64 destination */
230
231#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
232#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s)
233#define ACPI_MOVE_16_TO_64(d,s) ACPI_MOVE_16_TO_32(d,s)
234
235/* 32-bit source, 16/32/64 destination */
236
237#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
238#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s)
239#define ACPI_MOVE_32_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)
240
241/* 64-bit source, 16/32/64 destination */
242
243#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
244#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
245#define ACPI_MOVE_64_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)
246
247#else
248/* 16-bit source, 16/32/64 destination */ 197/* 16-bit source, 16/32/64 destination */
249 198
250#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s) 199#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
@@ -262,7 +211,6 @@
262#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ 211#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
263#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */ 212#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
264#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s) 213#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s)
265#endif
266 214
267#else 215#else
268/* 216/*
@@ -307,10 +255,7 @@
307 255
308/* Macros based on machine integer width */ 256/* Macros based on machine integer width */
309 257
310#if ACPI_MACHINE_WIDTH == 16 258#if ACPI_MACHINE_WIDTH == 32
311#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s)
312
313#elif ACPI_MACHINE_WIDTH == 32
314#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s) 259#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s)
315 260
316#elif ACPI_MACHINE_WIDTH == 64 261#elif ACPI_MACHINE_WIDTH == 64
@@ -695,16 +640,6 @@
695#define ACPI_DEBUGGER_EXEC(a) 640#define ACPI_DEBUGGER_EXEC(a)
696#endif 641#endif
697 642
698/*
699 * For 16-bit code, we want to shrink some things even though
700 * we are using ACPI_DEBUG_OUTPUT to get the debug output
701 */
702#if ACPI_MACHINE_WIDTH == 16
703#undef ACPI_DEBUG_ONLY_MEMBERS
704#undef _VERBOSE_STRUCTURES
705#define ACPI_DEBUG_ONLY_MEMBERS(a)
706#endif
707
708#ifdef ACPI_DEBUG_OUTPUT 643#ifdef ACPI_DEBUG_OUTPUT
709/* 644/*
710 * 1) Set name to blanks 645 * 1) Set name to blanks
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index b67da3636899..34bfae8a05f3 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnamesp.h b/include/acpi/acnamesp.h
index 83b52f9f899a..535b7e1c41bc 100644
--- a/include/acpi/acnamesp.h
+++ b/include/acpi/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -65,9 +65,13 @@
65#define ACPI_NS_ERROR_IF_FOUND 0x08 65#define ACPI_NS_ERROR_IF_FOUND 0x08
66#define ACPI_NS_PREFIX_IS_SCOPE 0x10 66#define ACPI_NS_PREFIX_IS_SCOPE 0x10
67#define ACPI_NS_EXTERNAL 0x20 67#define ACPI_NS_EXTERNAL 0x20
68#define ACPI_NS_TEMPORARY 0x40
68 69
69#define ACPI_NS_WALK_UNLOCK TRUE 70/* Flags for acpi_ns_walk_namespace */
70#define ACPI_NS_WALK_NO_UNLOCK FALSE 71
72#define ACPI_NS_WALK_NO_UNLOCK 0
73#define ACPI_NS_WALK_UNLOCK 0x01
74#define ACPI_NS_WALK_TEMP_NODES 0x02
71 75
72/* 76/*
73 * nsinit - Namespace initialization 77 * nsinit - Namespace initialization
@@ -82,7 +86,7 @@ acpi_status acpi_ns_initialize_devices(void);
82acpi_status acpi_ns_load_namespace(void); 86acpi_status acpi_ns_load_namespace(void);
83 87
84acpi_status 88acpi_status
85acpi_ns_load_table(struct acpi_table_desc *table_desc, 89acpi_ns_load_table(acpi_native_uint table_index,
86 struct acpi_namespace_node *node); 90 struct acpi_namespace_node *node);
87 91
88/* 92/*
@@ -92,7 +96,7 @@ acpi_status
92acpi_ns_walk_namespace(acpi_object_type type, 96acpi_ns_walk_namespace(acpi_object_type type,
93 acpi_handle start_object, 97 acpi_handle start_object,
94 u32 max_depth, 98 u32 max_depth,
95 u8 unlock_before_callback, 99 u32 flags,
96 acpi_walk_callback user_function, 100 acpi_walk_callback user_function,
97 void *context, void **return_value); 101 void *context, void **return_value);
98 102
@@ -106,11 +110,12 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
106 * nsparse - table parsing 110 * nsparse - table parsing
107 */ 111 */
108acpi_status 112acpi_status
109acpi_ns_parse_table(struct acpi_table_desc *table_desc, 113acpi_ns_parse_table(acpi_native_uint table_index,
110 struct acpi_namespace_node *scope); 114 struct acpi_namespace_node *start_node);
111 115
112acpi_status 116acpi_status
113acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc); 117acpi_ns_one_complete_parse(acpi_native_uint pass_number,
118 acpi_native_uint table_index);
114 119
115/* 120/*
116 * nsaccess - Top-level namespace access 121 * nsaccess - Top-level namespace access
diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h
index 8fdee31119f3..04e9735a6742 100644
--- a/include/acpi/acobject.h
+++ b/include/acpi/acobject.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,15 @@
52 * to the interpreter, and to keep track of the various handlers such as 52 * to the interpreter, and to keep track of the various handlers such as
53 * address space handlers and notify handlers. The object is a constant 53 * address space handlers and notify handlers. The object is a constant
54 * size in order to allow it to be cached and reused. 54 * size in order to allow it to be cached and reused.
55 *
56 * Note: The object is optimized to be aligned and will not work if it is
57 * byte-packed.
55 */ 58 */
59#if ACPI_MACHINE_WIDTH == 64
60#pragma pack(8)
61#else
62#pragma pack(4)
63#endif
56 64
57/******************************************************************************* 65/*******************************************************************************
58 * 66 *
@@ -101,7 +109,8 @@ struct acpi_object_common {
101ACPI_OBJECT_COMMON_HEADER}; 109ACPI_OBJECT_COMMON_HEADER};
102 110
103struct acpi_object_integer { 111struct acpi_object_integer {
104 ACPI_OBJECT_COMMON_HEADER acpi_integer value; 112 ACPI_OBJECT_COMMON_HEADER u8 fill[3]; /* Prevent warning on some compilers */
113 acpi_integer value;
105}; 114};
106 115
107/* 116/*
@@ -203,7 +212,9 @@ struct acpi_object_power_resource {
203}; 212};
204 213
205struct acpi_object_processor { 214struct acpi_object_processor {
206 ACPI_OBJECT_COMMON_HEADER u8 proc_id; 215 ACPI_OBJECT_COMMON_HEADER
216 /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */
217 u8 proc_id;
207 u8 length; 218 u8 length;
208 ACPI_COMMON_NOTIFY_INFO acpi_io_address address; 219 ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
209}; 220};
@@ -406,4 +417,6 @@ union acpi_descriptor {
406 union acpi_parse_object op; 417 union acpi_parse_object op;
407}; 418};
408 419
420#pragma pack()
421
409#endif /* _ACOBJECT_H */ 422#endif /* _ACOBJECT_H */
diff --git a/include/acpi/acopcode.h b/include/acpi/acopcode.h
index 7659a46bc432..e6f76a280a94 100644
--- a/include/acpi/acopcode.h
+++ b/include/acpi/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -257,7 +257,7 @@
257#define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE 257#define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE
258#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER) 258#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER)
259#define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE 259#define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE
260#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION_OR_FIELD,ARGI_TARGETREF) 260#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION_OR_BUFFER,ARGI_TARGETREF)
261#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_ANYTYPE) 261#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_ANYTYPE)
262#define ARGI_LOCAL0 ARG_NONE 262#define ARGI_LOCAL0 ARG_NONE
263#define ARGI_LOCAL1 ARG_NONE 263#define ARGI_LOCAL1 ARG_NONE
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 8d5039d0b430..7812267b577f 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acparser.h b/include/acpi/acparser.h
index 9d49d3c41cd9..85c358e21014 100644
--- a/include/acpi/acparser.h
+++ b/include/acpi/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index b9a39d1009bd..2e5f00d3ea0d 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index fdd10953b2b6..0d9f984a60a1 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -59,7 +59,6 @@ acpi_evaluate_reference(acpi_handle handle,
59 59
60#define ACPI_BUS_FILE_ROOT "acpi" 60#define ACPI_BUS_FILE_ROOT "acpi"
61extern struct proc_dir_entry *acpi_root_dir; 61extern struct proc_dir_entry *acpi_root_dir;
62extern struct fadt_descriptor acpi_fadt;
63 62
64enum acpi_bus_removal_type { 63enum acpi_bus_removal_type {
65 ACPI_BUS_REMOVAL_NORMAL = 0, 64 ACPI_BUS_REMOVAL_NORMAL = 0,
@@ -92,13 +91,12 @@ typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
92typedef int (*acpi_op_lock) (struct acpi_device * device, int type); 91typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
93typedef int (*acpi_op_start) (struct acpi_device * device); 92typedef int (*acpi_op_start) (struct acpi_device * device);
94typedef int (*acpi_op_stop) (struct acpi_device * device, int type); 93typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
95typedef int (*acpi_op_suspend) (struct acpi_device * device, int state); 94typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state);
96typedef int (*acpi_op_resume) (struct acpi_device * device, int state); 95typedef int (*acpi_op_resume) (struct acpi_device * device);
97typedef int (*acpi_op_scan) (struct acpi_device * device); 96typedef int (*acpi_op_scan) (struct acpi_device * device);
98typedef int (*acpi_op_bind) (struct acpi_device * device); 97typedef int (*acpi_op_bind) (struct acpi_device * device);
99typedef int (*acpi_op_unbind) (struct acpi_device * device); 98typedef int (*acpi_op_unbind) (struct acpi_device * device);
100typedef int (*acpi_op_match) (struct acpi_device * device, 99typedef int (*acpi_op_shutdown) (struct acpi_device * device);
101 struct acpi_driver * driver);
102 100
103struct acpi_bus_ops { 101struct acpi_bus_ops {
104 u32 acpi_op_add:1; 102 u32 acpi_op_add:1;
@@ -111,7 +109,7 @@ struct acpi_bus_ops {
111 u32 acpi_op_scan:1; 109 u32 acpi_op_scan:1;
112 u32 acpi_op_bind:1; 110 u32 acpi_op_bind:1;
113 u32 acpi_op_unbind:1; 111 u32 acpi_op_unbind:1;
114 u32 acpi_op_match:1; 112 u32 acpi_op_shutdown:1;
115 u32 reserved:21; 113 u32 reserved:21;
116}; 114};
117 115
@@ -126,16 +124,16 @@ struct acpi_device_ops {
126 acpi_op_scan scan; 124 acpi_op_scan scan;
127 acpi_op_bind bind; 125 acpi_op_bind bind;
128 acpi_op_unbind unbind; 126 acpi_op_unbind unbind;
129 acpi_op_match match; 127 acpi_op_shutdown shutdown;
130}; 128};
131 129
132struct acpi_driver { 130struct acpi_driver {
133 struct list_head node;
134 char name[80]; 131 char name[80];
135 char class[80]; 132 char class[80];
136 atomic_t references;
137 char *ids; /* Supported Hardware IDs */ 133 char *ids; /* Supported Hardware IDs */
138 struct acpi_device_ops ops; 134 struct acpi_device_ops ops;
135 struct device_driver drv;
136 struct module *owner;
139}; 137};
140 138
141/* 139/*
@@ -185,7 +183,7 @@ struct acpi_device_dir {
185 183
186typedef char acpi_bus_id[5]; 184typedef char acpi_bus_id[5];
187typedef unsigned long acpi_bus_address; 185typedef unsigned long acpi_bus_address;
188typedef char acpi_hardware_id[9]; 186typedef char acpi_hardware_id[15];
189typedef char acpi_unique_id[9]; 187typedef char acpi_unique_id[9];
190typedef char acpi_device_name[40]; 188typedef char acpi_device_name[40];
191typedef char acpi_device_class[20]; 189typedef char acpi_device_class[20];
@@ -296,11 +294,14 @@ struct acpi_device {
296 struct acpi_device_ops ops; 294 struct acpi_device_ops ops;
297 struct acpi_driver *driver; 295 struct acpi_driver *driver;
298 void *driver_data; 296 void *driver_data;
299 struct kobject kobj;
300 struct device dev; 297 struct device dev;
298 struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */
299 enum acpi_bus_removal_type removal_type; /* indicate for different removal type */
301}; 300};
302 301
303#define acpi_driver_data(d) ((d)->driver_data) 302#define acpi_driver_data(d) ((d)->driver_data)
303#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
304#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
304 305
305/* 306/*
306 * Events 307 * Events
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 6a5bdcefec64..4dc8a5043ef0 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -36,13 +36,14 @@
36 36
37/* _HID definitions */ 37/* _HID definitions */
38 38
39#define ACPI_POWER_HID "ACPI_PWR" 39#define ACPI_POWER_HID "power_resource"
40#define ACPI_PROCESSOR_HID "ACPI_CPU" 40#define ACPI_PROCESSOR_HID "ACPI0007"
41#define ACPI_SYSTEM_HID "ACPI_SYS" 41#define ACPI_SYSTEM_HID "acpi_system"
42#define ACPI_THERMAL_HID "ACPI_THM" 42#define ACPI_THERMAL_HID "thermal"
43#define ACPI_BUTTON_HID_POWERF "ACPI_FPB" 43#define ACPI_BUTTON_HID_POWERF "button_power"
44#define ACPI_BUTTON_HID_SLEEPF "ACPI_FSB" 44#define ACPI_BUTTON_HID_SLEEPF "button_sleep"
45 45#define ACPI_VIDEO_HID "video"
46#define ACPI_BAY_HID "bay"
46/* -------------------------------------------------------------------------- 47/* --------------------------------------------------------------------------
47 PCI 48 PCI
48 -------------------------------------------------------------------------- */ 49 -------------------------------------------------------------------------- */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 0cd63bce0ae4..781394b9efe0 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -8,7 +8,7 @@
8 *****************************************************************************/ 8 *****************************************************************************/
9 9
10/* 10/*
11 * Copyright (C) 2000 - 2006, R. Byron Moore 11 * Copyright (C) 2000 - 2007, R. Byron Moore
12 * All rights reserved. 12 * All rights reserved.
13 * 13 *
14 * Redistribution and use in source and binary forms, with or without 14 * Redistribution and use in source and binary forms, with or without
@@ -85,7 +85,7 @@ acpi_status acpi_os_terminate(void);
85/* 85/*
86 * ACPI Table interfaces 86 * ACPI Table interfaces
87 */ 87 */
88acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *address); 88acpi_physical_address acpi_os_get_root_pointer(void);
89 89
90acpi_status 90acpi_status
91acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 91acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
@@ -143,9 +143,7 @@ void acpi_os_release_mutex(acpi_mutex handle);
143 */ 143 */
144void *acpi_os_allocate(acpi_size size); 144void *acpi_os_allocate(acpi_size size);
145 145
146acpi_status 146void __iomem *acpi_os_map_memory(acpi_physical_address where, acpi_native_uint length);
147acpi_os_map_memory(acpi_physical_address physical_address,
148 acpi_size size, void __iomem ** logical_address);
149 147
150void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size); 148void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
151 149
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 81458767a90e..e08f7df85a4f 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,10 @@
51/* 51/*
52 * Global interfaces 52 * Global interfaces
53 */ 53 */
54acpi_status
55acpi_initialize_tables(struct acpi_table_desc *initial_storage,
56 u32 initial_table_count, u8 allow_resize);
57
54acpi_status acpi_initialize_subsystem(void); 58acpi_status acpi_initialize_subsystem(void);
55 59
56acpi_status acpi_enable_subsystem(u32 flags); 60acpi_status acpi_enable_subsystem(u32 flags);
@@ -92,30 +96,28 @@ void acpi_free(void *address);
92/* 96/*
93 * ACPI table manipulation interfaces 97 * ACPI table manipulation interfaces
94 */ 98 */
95acpi_status 99acpi_status acpi_reallocate_root_table(void);
96acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address); 100
101acpi_status acpi_find_root_pointer(acpi_native_uint * rsdp_address);
97 102
98acpi_status acpi_load_tables(void); 103acpi_status acpi_load_tables(void);
99 104
100acpi_status acpi_load_table(struct acpi_table_header *table_ptr); 105acpi_status acpi_load_table(struct acpi_table_header *table_ptr);
101 106
102acpi_status acpi_unload_table_id(acpi_table_type table_type, acpi_owner_id id); 107acpi_status acpi_unload_table_id(acpi_owner_id id);
103 108
104#ifdef ACPI_FUTURE_USAGE
105acpi_status acpi_unload_table(acpi_table_type table_type);
106acpi_status 109acpi_status
107acpi_get_table_header(acpi_table_type table_type, 110acpi_get_table_header(acpi_string signature,
108 u32 instance, struct acpi_table_header *out_table_header); 111 acpi_native_uint instance,
109#endif /* ACPI_FUTURE_USAGE */ 112 struct acpi_table_header *out_table_header);
110 113
111acpi_status 114acpi_status
112acpi_get_table(acpi_table_type table_type, 115acpi_get_table(acpi_string signature,
113 u32 instance, struct acpi_buffer *ret_buffer); 116 acpi_native_uint instance, struct acpi_table_header **out_table);
114 117
115acpi_status 118acpi_status
116acpi_get_firmware_table(acpi_string signature, 119acpi_get_table_by_index(acpi_native_uint table_index,
117 u32 instance, 120 struct acpi_table_header **out_table);
118 u32 flags, struct acpi_table_header **table_pointer);
119 121
120/* 122/*
121 * Namespace and name interfaces 123 * Namespace and name interfaces
@@ -310,9 +312,9 @@ acpi_resource_to_address64(struct acpi_resource *resource,
310/* 312/*
311 * Hardware (ACPI device) interfaces 313 * Hardware (ACPI device) interfaces
312 */ 314 */
313acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags); 315acpi_status acpi_get_register(u32 register_id, u32 * return_value);
314 316
315acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags); 317acpi_status acpi_set_register(u32 register_id, u32 value);
316 318
317acpi_status 319acpi_status
318acpi_set_firmware_waking_vector(acpi_physical_address physical_address); 320acpi_set_firmware_waking_vector(acpi_physical_address physical_address);
diff --git a/include/acpi/acresrc.h b/include/acpi/acresrc.h
index 80a3b33571b4..9486ab266a5e 100644
--- a/include/acpi/acresrc.h
+++ b/include/acpi/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acstruct.h b/include/acpi/acstruct.h
index 5e8095f0f78f..aeb4498e5e06 100644
--- a/include/acpi/acstruct.h
+++ b/include/acpi/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -139,7 +139,8 @@ struct acpi_init_walk_info {
139 u16 buffer_init; 139 u16 buffer_init;
140 u16 package_init; 140 u16 package_init;
141 u16 object_count; 141 u16 object_count;
142 struct acpi_table_desc *table_desc; 142 acpi_owner_id owner_id;
143 acpi_native_uint table_index;
143}; 144};
144 145
145struct acpi_get_devices_info { 146struct acpi_get_devices_info {
diff --git a/include/acpi/actables.h b/include/acpi/actables.h
index 4dbaf02fe526..2b9f46f9da4d 100644
--- a/include/acpi/actables.h
+++ b/include/acpi/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,105 +44,75 @@
44#ifndef __ACTABLES_H__ 44#ifndef __ACTABLES_H__
45#define __ACTABLES_H__ 45#define __ACTABLES_H__
46 46
47/* Used in acpi_tb_map_acpi_table for size parameter if table header is to be used */ 47acpi_status acpi_allocate_root_table(u32 initial_table_count);
48
49#define SIZE_IN_HEADER 0
50 48
51/* 49/*
52 * tbconvrt - Table conversion routines 50 * tbfadt - FADT parse/convert/validate
53 */ 51 */
54acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info); 52void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags);
55
56acpi_status acpi_tb_convert_table_fadt(void);
57 53
58acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info); 54void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
59
60u32
61acpi_tb_get_table_count(struct rsdp_descriptor *RSDP,
62 struct acpi_table_header *RSDT);
63 55
64/* 56/*
65 * tbget - Table "get" routines 57 * tbfind - find ACPI table
66 */ 58 */
67acpi_status 59acpi_status
68acpi_tb_get_table(struct acpi_pointer *address, 60acpi_tb_find_table(char *signature,
69 struct acpi_table_desc *table_info); 61 char *oem_id,
70 62 char *oem_table_id, acpi_native_uint * table_index);
71acpi_status
72acpi_tb_get_table_header(struct acpi_pointer *address,
73 struct acpi_table_header *return_header);
74
75acpi_status
76acpi_tb_get_table_body(struct acpi_pointer *address,
77 struct acpi_table_header *header,
78 struct acpi_table_desc *table_info);
79
80acpi_status
81acpi_tb_get_table_ptr(acpi_table_type table_type,
82 u32 instance, struct acpi_table_header **table_ptr_loc);
83
84acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address);
85
86void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address);
87
88acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr);
89 63
90/* 64/*
91 * tbgetall - get multiple required tables 65 * tbinstal - Table removal and deletion
92 */ 66 */
93acpi_status acpi_tb_get_required_tables(void); 67acpi_status acpi_tb_resize_root_table_list(void);
94 68
95/* 69acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
96 * tbinstall - Table installation
97 */
98acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info);
99 70
100acpi_status 71acpi_status
101acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type); 72acpi_tb_add_table(struct acpi_table_desc *table_desc,
73 acpi_native_uint * table_index);
102 74
103acpi_status 75acpi_status
104acpi_tb_init_table_descriptor(acpi_table_type table_type, 76acpi_tb_store_table(acpi_physical_address address,
105 struct acpi_table_desc *table_info); 77 struct acpi_table_header *table,
78 u32 length, u8 flags, acpi_native_uint * table_index);
106 79
107/* 80void acpi_tb_delete_table(struct acpi_table_desc *table_desc);
108 * tbremove - Table removal and deletion
109 */
110void acpi_tb_delete_all_tables(void);
111 81
112void acpi_tb_delete_tables_by_type(acpi_table_type type); 82void acpi_tb_terminate(void);
113 83
114void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc); 84void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index);
115 85
116struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc 86acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index);
117 *table_desc); 87
88acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index);
118 89
119/*
120 * tbxfroot - RSDP, RSDT utilities
121 */
122acpi_status 90acpi_status
123acpi_tb_find_table(char *signature, 91acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id);
124 char *oem_id,
125 char *oem_table_id, struct acpi_table_header **table_ptr);
126 92
127acpi_status acpi_tb_get_table_rsdt(void); 93u8 acpi_tb_is_table_loaded(acpi_native_uint table_index);
128 94
129acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp); 95void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded);
130 96
131/* 97/*
132 * tbutils - common table utilities 98 * tbutils - table manager utilities
133 */ 99 */
134acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc); 100u8 acpi_tb_tables_loaded(void);
135 101
136acpi_status 102void
137acpi_tb_verify_table_checksum(struct acpi_table_header *table_header); 103acpi_tb_print_table_header(acpi_physical_address address,
104 struct acpi_table_header *header);
138 105
139u8 acpi_tb_sum_table(void *buffer, u32 length); 106u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length);
140 107
141u8 acpi_tb_generate_checksum(struct acpi_table_header *table); 108acpi_status
109acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
142 110
143void acpi_tb_set_checksum(struct acpi_table_header *table); 111void
112acpi_tb_install_table(acpi_physical_address address,
113 u8 flags, char *signature, acpi_native_uint table_index);
144 114
145acpi_status 115acpi_status
146acpi_tb_validate_table_header(struct acpi_table_header *table_header); 116acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags);
147 117
148#endif /* __ACTABLES_H__ */ 118#endif /* __ACTABLES_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index b125ceed9cb7..09469e7db6a5 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,15 +48,15 @@
48 * Values for description table header signatures. Useful because they make 48 * Values for description table header signatures. Useful because they make
49 * it more difficult to inadvertently type in the wrong signature. 49 * it more difficult to inadvertently type in the wrong signature.
50 */ 50 */
51#define DSDT_SIG "DSDT" /* Differentiated System Description Table */ 51#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */
52#define FADT_SIG "FACP" /* Fixed ACPI Description Table */ 52#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */
53#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */ 53#define ACPI_SIG_FACS "FACS" /* Firmware ACPI Control Structure */
54#define PSDT_SIG "PSDT" /* Persistent System Description Table */ 54#define ACPI_SIG_PSDT "PSDT" /* Persistent System Description Table */
55#define RSDP_SIG "RSD PTR " /* Root System Description Pointer */ 55#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Pointer */
56#define RSDT_SIG "RSDT" /* Root System Description Table */ 56#define ACPI_SIG_RSDT "RSDT" /* Root System Description Table */
57#define XSDT_SIG "XSDT" /* Extended System Description Table */ 57#define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */
58#define SSDT_SIG "SSDT" /* Secondary System Description Table */ 58#define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */
59#define RSDP_NAME "RSDP" 59#define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */
60 60
61/* 61/*
62 * All tables and structures must be byte-packed to match the ACPI 62 * All tables and structures must be byte-packed to match the ACPI
@@ -83,27 +83,29 @@
83 * 83 *
84 ******************************************************************************/ 84 ******************************************************************************/
85 85
86#define ACPI_TABLE_HEADER_DEF \
87 char signature[4]; /* ASCII table signature */\
88 u32 length; /* Length of table in bytes, including this header */\
89 u8 revision; /* ACPI Specification minor version # */\
90 u8 checksum; /* To make sum of entire table == 0 */\
91 char oem_id[6]; /* ASCII OEM identification */\
92 char oem_table_id[8]; /* ASCII OEM table identification */\
93 u32 oem_revision; /* OEM revision number */\
94 char asl_compiler_id[4]; /* ASCII ASL compiler vendor ID */\
95 u32 asl_compiler_revision; /* ASL compiler version */
96
97struct acpi_table_header { 86struct acpi_table_header {
98ACPI_TABLE_HEADER_DEF}; 87 char signature[ACPI_NAME_SIZE]; /* ASCII table signature */
88 u32 length; /* Length of table in bytes, including this header */
89 u8 revision; /* ACPI Specification minor version # */
90 u8 checksum; /* To make sum of entire table == 0 */
91 char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
92 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
93 u32 oem_revision; /* OEM revision number */
94 char asl_compiler_id[ACPI_NAME_SIZE]; /* ASCII ASL compiler vendor ID */
95 u32 asl_compiler_revision; /* ASL compiler version */
96};
99 97
100/* 98/*
101 * GAS - Generic Address Structure (ACPI 2.0+) 99 * GAS - Generic Address Structure (ACPI 2.0+)
100 *
101 * Note: Since this structure is used in the ACPI tables, it is byte aligned.
102 * If misalignment is not supported, access to the Address field must be
103 * performed with care.
102 */ 104 */
103struct acpi_generic_address { 105struct acpi_generic_address {
104 u8 address_space_id; /* Address space where struct or register exists */ 106 u8 space_id; /* Address space where struct or register exists */
105 u8 register_bit_width; /* Size in bits of given register */ 107 u8 bit_width; /* Size in bits of given register */
106 u8 register_bit_offset; /* Bit offset within the register */ 108 u8 bit_offset; /* Bit offset within the register */
107 u8 access_width; /* Minimum Access size (ACPI 3.0) */ 109 u8 access_width; /* Minimum Access size (ACPI 3.0) */
108 u64 address; /* 64-bit address of struct or register */ 110 u64 address; /* 64-bit address of struct or register */
109}; 111};
@@ -114,10 +116,10 @@ struct acpi_generic_address {
114 * 116 *
115 ******************************************************************************/ 117 ******************************************************************************/
116 118
117struct rsdp_descriptor { 119struct acpi_table_rsdp {
118 char signature[8]; /* ACPI signature, contains "RSD PTR " */ 120 char signature[8]; /* ACPI signature, contains "RSD PTR " */
119 u8 checksum; /* ACPI 1.0 checksum */ 121 u8 checksum; /* ACPI 1.0 checksum */
120 char oem_id[6]; /* OEM identification */ 122 char oem_id[ACPI_OEM_ID_SIZE]; /* OEM identification */
121 u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */ 123 u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */
122 u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */ 124 u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */
123 u32 length; /* Table length in bytes, including header (ACPI 2.0+) */ 125 u32 length; /* Table length in bytes, including header (ACPI 2.0+) */
@@ -134,12 +136,14 @@ struct rsdp_descriptor {
134 * 136 *
135 ******************************************************************************/ 137 ******************************************************************************/
136 138
137struct rsdt_descriptor { 139struct acpi_table_rsdt {
138 ACPI_TABLE_HEADER_DEF u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */ 140 struct acpi_table_header header; /* Common ACPI table header */
141 u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
139}; 142};
140 143
141struct xsdt_descriptor { 144struct acpi_table_xsdt {
142 ACPI_TABLE_HEADER_DEF u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */ 145 struct acpi_table_header header; /* Common ACPI table header */
146 u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */
143}; 147};
144 148
145/******************************************************************************* 149/*******************************************************************************
@@ -148,36 +152,27 @@ struct xsdt_descriptor {
148 * 152 *
149 ******************************************************************************/ 153 ******************************************************************************/
150 154
151struct facs_descriptor { 155struct acpi_table_facs {
152 char signature[4]; /* ASCII table signature */ 156 char signature[4]; /* ASCII table signature */
153 u32 length; /* Length of structure, in bytes */ 157 u32 length; /* Length of structure, in bytes */
154 u32 hardware_signature; /* Hardware configuration signature */ 158 u32 hardware_signature; /* Hardware configuration signature */
155 u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */ 159 u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */
156 u32 global_lock; /* Global Lock for shared hardware resources */ 160 u32 global_lock; /* Global Lock for shared hardware resources */
157 161 u32 flags;
158 /* Flags (32 bits) */
159
160 u8 S4bios_f:1; /* 00: S4BIOS support is present */
161 u8:7; /* 01-07: Reserved, must be zero */
162 u8 reserved1[3]; /* 08-31: Reserved, must be zero */
163
164 u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */ 162 u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */
165 u8 version; /* Version of this table (ACPI 2.0+) */ 163 u8 version; /* Version of this table (ACPI 2.0+) */
166 u8 reserved[31]; /* Reserved, must be zero */ 164 u8 reserved[31]; /* Reserved, must be zero */
167}; 165};
168 166
167/* Flag macros */
168
169#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */
170
171/* Global lock flags */
172
169#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */ 173#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */
170#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */ 174#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */
171 175
172/*
173 * Common FACS - This is a version-independent FACS structure used for internal use only
174 */
175struct acpi_common_facs {
176 u32 *global_lock;
177 u64 *firmware_waking_vector;
178 u8 vector_width;
179};
180
181/******************************************************************************* 176/*******************************************************************************
182 * 177 *
183 * FADT - Fixed ACPI Description Table (Signature "FACP") 178 * FADT - Fixed ACPI Description Table (Signature "FACP")
@@ -186,121 +181,98 @@ struct acpi_common_facs {
186 181
187/* Fields common to all versions of the FADT */ 182/* Fields common to all versions of the FADT */
188 183
189#define ACPI_FADT_COMMON \ 184struct acpi_table_fadt {
190 ACPI_TABLE_HEADER_DEF \ 185 struct acpi_table_header header; /* Common ACPI table header */
191 u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \ 186 u32 facs; /* 32-bit physical address of FACS */
192 u32 V1_dsdt; /* 32-bit physical address of DSDT */ \ 187 u32 dsdt; /* 32-bit physical address of DSDT */
193 u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \ 188 u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
194 u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \ 189 u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */
195 u16 sci_int; /* System vector of SCI interrupt */ \ 190 u16 sci_interrupt; /* System vector of SCI interrupt */
196 u32 smi_cmd; /* Port address of SMI command port */ \ 191 u32 smi_command; /* 32-bit Port address of SMI command port */
197 u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \ 192 u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */
198 u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \ 193 u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */
199 u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \ 194 u8 S4bios_request; /* Value to write to SMI CMD to enter S4BIOS state */
200 u8 pstate_cnt; /* Processor performance state control*/ \ 195 u8 pstate_control; /* Processor performance state control */
201 u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a Event Reg Blk */ \ 196 u32 pm1a_event_block; /* 32-bit Port address of Power Mgt 1a Event Reg Blk */
202 u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b Event Reg Blk */ \ 197 u32 pm1b_event_block; /* 32-bit Port address of Power Mgt 1b Event Reg Blk */
203 u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \ 198 u32 pm1a_control_block; /* 32-bit Port address of Power Mgt 1a Control Reg Blk */
204 u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \ 199 u32 pm1b_control_block; /* 32-bit Port address of Power Mgt 1b Control Reg Blk */
205 u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \ 200 u32 pm2_control_block; /* 32-bit Port address of Power Mgt 2 Control Reg Blk */
206 u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \ 201 u32 pm_timer_block; /* 32-bit Port address of Power Mgt Timer Ctrl Reg Blk */
207 u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \ 202 u32 gpe0_block; /* 32-bit Port address of General Purpose Event 0 Reg Blk */
208 u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \ 203 u32 gpe1_block; /* 32-bit Port address of General Purpose Event 1 Reg Blk */
209 u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ \ 204 u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */
210 u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ \ 205 u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */
211 u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \ 206 u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */
212 u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \ 207 u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */
213 u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \ 208 u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */
214 u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \ 209 u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */
215 u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \ 210 u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */
216 u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \ 211 u8 cst_control; /* Support for the _CST object and C States change notification */
217 u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \ 212 u16 C2latency; /* Worst case HW latency to enter/exit C2 state */
218 u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \ 213 u16 C3latency; /* Worst case HW latency to enter/exit C3 state */
219 u16 flush_size; /* Processor's memory cache line width, in bytes */ \ 214 u16 flush_size; /* Processor's memory cache line width, in bytes */
220 u16 flush_stride; /* Number of flush strides that need to be read */ \ 215 u16 flush_stride; /* Number of flush strides that need to be read */
221 u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \ 216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */
222 u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \ 217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register. */
223 u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \ 218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
224 u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \ 219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
225 u8 century; /* Index to century in RTC CMOS RAM */ \ 220 u8 century; /* Index to century in RTC CMOS RAM */
226 u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ \ 221 u16 boot_flags; /* IA-PC Boot Architecture Flags. See Table 5-10 for description */
227 u8 reserved2; /* Reserved, must be zero */ 222 u8 reserved; /* Reserved, must be zero */
228 223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
229/* 224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
230 * ACPI 2.0+ FADT
231 */
232struct fadt_descriptor {
233 ACPI_FADT_COMMON
234 /* Flags (32 bits) */
235 u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
236 u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
237 u8 proc_c1:1; /* 02: All processors support C1 state */
238 u8 plvl2_up:1; /* 03: C2 state works on MP system */
239 u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
240 u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
241 u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
242 u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
243 u8 tmr_val_ext:1; /* 08: tmr_val is 32 bits 0=24-bits */
244 u8 dock_cap:1; /* 09: Docking supported */
245 u8 reset_reg_sup:1; /* 10: System reset via the FADT RESET_REG supported */
246 u8 sealed_case:1; /* 11: No internal expansion capabilities and case is sealed */
247 u8 headless:1; /* 12: No local video capabilities or local input devices */
248 u8 cpu_sw_sleep:1; /* 13: Must execute native instruction after writing SLP_TYPx register */
249
250 u8 pci_exp_wak:1; /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
251 u8 use_platform_clock:1; /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
252 u8 S4rtc_sts_valid:1; /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
253 u8 remote_power_on_capable:1; /* 17: System is compatible with remote power on (ACPI 3.0) */
254 u8 force_apic_cluster_model:1; /* 18: All local APICs must use cluster model (ACPI 3.0) */
255 u8 force_apic_physical_destination_mode:1; /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
256 u8:4; /* 20-23: Reserved, must be zero */
257 u8 reserved3; /* 24-31: Reserved, must be zero */
258
259 struct acpi_generic_address reset_register; /* Reset register address in GAS format */
260 u8 reset_value; /* Value to write to the reset_register port to reset the system */ 225 u8 reset_value; /* Value to write to the reset_register port to reset the system */
261 u8 reserved4[3]; /* These three bytes must be zero */ 226 u8 reserved4[3]; /* Reserved, must be zero */
262 u64 xfirmware_ctrl; /* 64-bit physical address of FACS */ 227 u64 Xfacs; /* 64-bit physical address of FACS */
263 u64 Xdsdt; /* 64-bit physical address of DSDT */ 228 u64 Xdsdt; /* 64-bit physical address of DSDT */
264 struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */ 229 struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */
265 struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */ 230 struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */
266 struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */ 231 struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */
267 struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */ 232 struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */
268 struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */ 233 struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */
269 struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */ 234 struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
270 struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */ 235 struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */
271 struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */ 236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
272}; 237};
273 238
274/* 239/* FADT flags */
275 * "Down-revved" ACPI 2.0 FADT descriptor 240
276 * Defined here to allow compiler to generate the length of the struct 241#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */
277 */ 242#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: The wbinvd flushes but does not invalidate */
278struct fadt_descriptor_rev2_minus { 243#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: All processors support C1 state */
279 ACPI_FADT_COMMON u32 flags; 244#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: C2 state works on MP system */
280 struct acpi_generic_address reset_register; /* Reset register address in GAS format */ 245#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: Power button is handled as a generic feature */
281 u8 reset_value; /* Value to write to the reset_register port to reset the system. */ 246#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: Sleep button is handled as a generic feature, or not present */
282 u8 reserved7[3]; /* Reserved, must be zero */ 247#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: RTC wakeup stat not in fixed register space */
283}; 248#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: RTC wakeup stat not possible from S4 */
249#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: tmr_val is 32 bits 0=24-bits */
250#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: Docking supported */
251#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: System reset via the FADT RESET_REG supported */
252#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: No internal expansion capabilities and case is sealed */
253#define ACPI_FADT_HEADLESS (1<<12) /* 12: No local video capabilities or local input devices */
254#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: Must execute native instruction after writing SLP_TYPx register */
255#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
256#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
257#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
258#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: System is compatible with remote power on (ACPI 3.0) */
259#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: All local APICs must use cluster model (ACPI 3.0) */
260#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
284 261
285/* 262/*
286 * ACPI 1.0 FADT 263 * FADT Prefered Power Management Profiles
287 * Defined here to allow compiler to generate the length of the struct
288 */ 264 */
289struct fadt_descriptor_rev1 { 265enum acpi_prefered_pm_profiles {
290 ACPI_FADT_COMMON u32 flags; 266 PM_UNSPECIFIED = 0,
267 PM_DESKTOP = 1,
268 PM_MOBILE = 2,
269 PM_WORKSTATION = 3,
270 PM_ENTERPRISE_SERVER = 4,
271 PM_SOHO_SERVER = 5,
272 PM_APPLIANCE_PC = 6
291}; 273};
292 274
293/* FADT: Prefered Power Management Profiles */ 275/* FADT Boot Arch Flags */
294
295#define PM_UNSPECIFIED 0
296#define PM_DESKTOP 1
297#define PM_MOBILE 2
298#define PM_WORKSTATION 3
299#define PM_ENTERPRISE_SERVER 4
300#define PM_SOHO_SERVER 5
301#define PM_APPLIANCE_PC 6
302
303/* FADT: Boot Arch Flags */
304 276
305#define BAF_LEGACY_DEVICES 0x0001 277#define BAF_LEGACY_DEVICES 0x0001
306#define BAF_8042_KEYBOARD_CONTROLLER 0x0002 278#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
@@ -312,59 +284,12 @@ struct fadt_descriptor_rev1 {
312 284
313#pragma pack() 285#pragma pack()
314 286
315/* 287#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f)
316 * This macro is temporary until the table bitfield flag definitions
317 * are removed and replaced by a Flags field.
318 */
319#define ACPI_FLAG_OFFSET(d,f,o) (u8) (ACPI_OFFSET (d,f) + \
320 sizeof(((d *)0)->f) + o)
321/*
322 * Get the remaining ACPI tables
323 */
324#include "actbl1.h"
325 288
326/* 289/*
327 * ACPI Table information. We save the table address, length, 290 * Get the remaining ACPI tables
328 * and type of memory allocation (mapped or allocated) for each
329 * table for 1) when we exit, and 2) if a new table is installed
330 */ 291 */
331#define ACPI_MEM_NOT_ALLOCATED 0
332#define ACPI_MEM_ALLOCATED 1
333#define ACPI_MEM_MAPPED 2
334
335/* Definitions for the Flags bitfield member of struct acpi_table_support */
336
337#define ACPI_TABLE_SINGLE 0x00
338#define ACPI_TABLE_MULTIPLE 0x01
339#define ACPI_TABLE_EXECUTABLE 0x02
340
341#define ACPI_TABLE_ROOT 0x00
342#define ACPI_TABLE_PRIMARY 0x10
343#define ACPI_TABLE_SECONDARY 0x20
344#define ACPI_TABLE_ALL 0x30
345#define ACPI_TABLE_TYPE_MASK 0x30
346
347/* Data about each known table type */
348
349struct acpi_table_support {
350 char *name;
351 char *signature;
352 void **global_ptr;
353 u8 sig_length;
354 u8 flags;
355};
356
357extern u8 acpi_fadt_is_v1; /* is set to 1 if FADT is revision 1,
358 * needed for certain workarounds */
359/* Macros used to generate offsets to specific table fields */
360
361#define ACPI_FACS_OFFSET(f) (u8) ACPI_OFFSET (struct facs_descriptor,f)
362#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct fadt_descriptor, f)
363#define ACPI_GAS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_generic_address,f)
364#define ACPI_HDR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_header,f)
365#define ACPI_RSDP_OFFSET(f) (u8) ACPI_OFFSET (struct rsdp_descriptor,f)
366 292
367#define ACPI_FADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct fadt_descriptor,f,o) 293#include <acpi/actbl1.h>
368#define ACPI_FACS_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct facs_descriptor,f,o)
369 294
370#endif /* __ACTBL_H__ */ 295#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 745a6445a4f9..4e5d3ca53a8e 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,6 +61,7 @@
61#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ 61#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
62#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ 62#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
63#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ 63#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
64#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
64#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ 65#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
65#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ 66#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
66#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ 67#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
@@ -73,12 +74,6 @@
73#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ 74#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
74#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ 75#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
75 76
76/* Legacy names */
77
78#define APIC_SIG "APIC" /* Multiple APIC Description Table */
79#define BOOT_SIG "BOOT" /* Simple Boot Flag Table */
80#define SBST_SIG "SBST" /* Smart Battery Specification Table */
81
82/* 77/*
83 * All tables must be byte-packed to match the ACPI specification, since 78 * All tables must be byte-packed to match the ACPI specification, since
84 * the tables are provided by the system BIOS. 79 * the tables are provided by the system BIOS.
@@ -91,31 +86,43 @@
91 * portable, so do not use any other bitfield types. 86 * portable, so do not use any other bitfield types.
92 */ 87 */
93 88
89/* Common Sub-table header (used in MADT, SRAT, etc.) */
90
91struct acpi_subtable_header {
92 u8 type;
93 u8 length;
94};
95
94/******************************************************************************* 96/*******************************************************************************
95 * 97 *
96 * ASF - Alert Standard Format table (Signature "ASF!") 98 * ASF - Alert Standard Format table (Signature "ASF!")
97 * 99 *
100 * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003
101 *
98 ******************************************************************************/ 102 ******************************************************************************/
99 103
100struct acpi_table_asf { 104struct acpi_table_asf {
101ACPI_TABLE_HEADER_DEF}; 105 struct acpi_table_header header; /* Common ACPI table header */
106};
102 107
103#define ACPI_ASF_HEADER_DEF \ 108/* ASF subtable header */
104 u8 type; \
105 u8 reserved; \
106 u16 length;
107 109
108struct acpi_asf_header { 110struct acpi_asf_header {
109ACPI_ASF_HEADER_DEF}; 111 u8 type;
112 u8 reserved;
113 u16 length;
114};
110 115
111/* Values for Type field */ 116/* Values for Type field above */
112 117
113#define ASF_INFO 0 118enum acpi_asf_type {
114#define ASF_ALERT 1 119 ACPI_ASF_TYPE_INFO = 0,
115#define ASF_CONTROL 2 120 ACPI_ASF_TYPE_ALERT = 1,
116#define ASF_BOOT 3 121 ACPI_ASF_TYPE_CONTROL = 2,
117#define ASF_ADDRESS 4 122 ACPI_ASF_TYPE_BOOT = 3,
118#define ASF_RESERVED 5 123 ACPI_ASF_TYPE_ADDRESS = 4,
124 ACPI_ASF_TYPE_RESERVED = 5
125};
119 126
120/* 127/*
121 * ASF subtables 128 * ASF subtables
@@ -124,7 +131,8 @@ ACPI_ASF_HEADER_DEF};
124/* 0: ASF Information */ 131/* 0: ASF Information */
125 132
126struct acpi_asf_info { 133struct acpi_asf_info {
127 ACPI_ASF_HEADER_DEF u8 min_reset_value; 134 struct acpi_asf_header header;
135 u8 min_reset_value;
128 u8 min_poll_interval; 136 u8 min_poll_interval;
129 u16 system_id; 137 u16 system_id;
130 u32 mfg_id; 138 u32 mfg_id;
@@ -135,26 +143,49 @@ struct acpi_asf_info {
135/* 1: ASF Alerts */ 143/* 1: ASF Alerts */
136 144
137struct acpi_asf_alert { 145struct acpi_asf_alert {
138 ACPI_ASF_HEADER_DEF u8 assert_mask; 146 struct acpi_asf_header header;
147 u8 assert_mask;
139 u8 deassert_mask; 148 u8 deassert_mask;
140 u8 alerts; 149 u8 alerts;
141 u8 data_length; 150 u8 data_length;
142 u8 array[1]; 151};
152
153struct acpi_asf_alert_data {
154 u8 address;
155 u8 command;
156 u8 mask;
157 u8 value;
158 u8 sensor_type;
159 u8 type;
160 u8 offset;
161 u8 source_type;
162 u8 severity;
163 u8 sensor_number;
164 u8 entity;
165 u8 instance;
143}; 166};
144 167
145/* 2: ASF Remote Control */ 168/* 2: ASF Remote Control */
146 169
147struct acpi_asf_remote { 170struct acpi_asf_remote {
148 ACPI_ASF_HEADER_DEF u8 controls; 171 struct acpi_asf_header header;
172 u8 controls;
149 u8 data_length; 173 u8 data_length;
150 u16 reserved2; 174 u16 reserved2;
151 u8 array[1]; 175};
176
177struct acpi_asf_control_data {
178 u8 function;
179 u8 address;
180 u8 command;
181 u8 value;
152}; 182};
153 183
154/* 3: ASF RMCP Boot Options */ 184/* 3: ASF RMCP Boot Options */
155 185
156struct acpi_asf_rmcp { 186struct acpi_asf_rmcp {
157 ACPI_ASF_HEADER_DEF u8 capabilities[7]; 187 struct acpi_asf_header header;
188 u8 capabilities[7];
158 u8 completion_code; 189 u8 completion_code;
159 u32 enterprise_id; 190 u32 enterprise_id;
160 u8 command; 191 u8 command;
@@ -166,9 +197,9 @@ struct acpi_asf_rmcp {
166/* 4: ASF Address */ 197/* 4: ASF Address */
167 198
168struct acpi_asf_address { 199struct acpi_asf_address {
169 ACPI_ASF_HEADER_DEF u8 eprom_address; 200 struct acpi_asf_header header;
201 u8 eprom_address;
170 u8 devices; 202 u8 devices;
171 u8 smbus_addresses[1];
172}; 203};
173 204
174/******************************************************************************* 205/*******************************************************************************
@@ -178,7 +209,8 @@ struct acpi_asf_address {
178 ******************************************************************************/ 209 ******************************************************************************/
179 210
180struct acpi_table_boot { 211struct acpi_table_boot {
181 ACPI_TABLE_HEADER_DEF u8 cmos_index; /* Index in CMOS RAM for the boot register */ 212 struct acpi_table_header header; /* Common ACPI table header */
213 u8 cmos_index; /* Index in CMOS RAM for the boot register */
182 u8 reserved[3]; 214 u8 reserved[3];
183}; 215};
184 216
@@ -189,7 +221,8 @@ struct acpi_table_boot {
189 ******************************************************************************/ 221 ******************************************************************************/
190 222
191struct acpi_table_cpep { 223struct acpi_table_cpep {
192 ACPI_TABLE_HEADER_DEF u64 reserved; 224 struct acpi_table_header header; /* Common ACPI table header */
225 u64 reserved;
193}; 226};
194 227
195/* Subtable */ 228/* Subtable */
@@ -197,9 +230,9 @@ struct acpi_table_cpep {
197struct acpi_cpep_polling { 230struct acpi_cpep_polling {
198 u8 type; 231 u8 type;
199 u8 length; 232 u8 length;
200 u8 processor_id; /* Processor ID */ 233 u8 id; /* Processor ID */
201 u8 processor_eid; /* Processor EID */ 234 u8 eid; /* Processor EID */
202 u32 polling_interval; /* Polling interval (msec) */ 235 u32 interval; /* Polling interval (msec) */
203}; 236};
204 237
205/******************************************************************************* 238/*******************************************************************************
@@ -209,196 +242,281 @@ struct acpi_cpep_polling {
209 ******************************************************************************/ 242 ******************************************************************************/
210 243
211struct acpi_table_dbgp { 244struct acpi_table_dbgp {
212 ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ 245 struct acpi_table_header header; /* Common ACPI table header */
246 u8 type; /* 0=full 16550, 1=subset of 16550 */
213 u8 reserved[3]; 247 u8 reserved[3];
214 struct acpi_generic_address debug_port; 248 struct acpi_generic_address debug_port;
215}; 249};
216 250
217/******************************************************************************* 251/*******************************************************************************
218 * 252 *
219 * ECDT - Embedded Controller Boot Resources Table 253 * DMAR - DMA Remapping table
220 * 254 *
221 ******************************************************************************/ 255 ******************************************************************************/
222 256
223struct ec_boot_resources { 257struct acpi_table_dmar {
224 ACPI_TABLE_HEADER_DEF struct acpi_generic_address ec_control; /* Address of EC command/status register */ 258 struct acpi_table_header header; /* Common ACPI table header */
225 struct acpi_generic_address ec_data; /* Address of EC data register */ 259 u8 width; /* Host Address Width */
226 u32 uid; /* Unique ID - must be same as the EC _UID method */ 260 u8 reserved[11];
227 u8 gpe_bit; /* The GPE for the EC */ 261};
228 u8 ec_id[1]; /* Full namepath of the EC in the ACPI namespace */ 262
263/* DMAR subtable header */
264
265struct acpi_dmar_header {
266 u16 type;
267 u16 length;
268 u8 flags;
269 u8 reserved[3];
270};
271
272/* Values for subtable type in struct acpi_dmar_header */
273
274enum acpi_dmar_type {
275 ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
276 ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
277 ACPI_DMAR_TYPE_RESERVED = 2 /* 2 and greater are reserved */
278};
279
280struct acpi_dmar_device_scope {
281 u8 entry_type;
282 u8 length;
283 u8 segment;
284 u8 bus;
285};
286
287/* Values for entry_type in struct acpi_dmar_device_scope */
288
289enum acpi_dmar_scope_type {
290 ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
291 ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
292 ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
293 ACPI_DMAR_SCOPE_TYPE_RESERVED = 3 /* 3 and greater are reserved */
294};
295
296/*
297 * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header
298 */
299
300/* 0: Hardware Unit Definition */
301
302struct acpi_dmar_hardware_unit {
303 struct acpi_dmar_header header;
304 u64 address; /* Register Base Address */
305};
306
307/* Flags */
308
309#define ACPI_DMAR_INCLUDE_ALL (1)
310
311/* 1: Reserved Memory Defininition */
312
313struct acpi_dmar_reserved_memory {
314 struct acpi_dmar_header header;
315 u64 address; /* 4_k aligned base address */
316 u64 end_address; /* 4_k aligned limit address */
229}; 317};
230 318
319/* Flags */
320
321#define ACPI_DMAR_ALLOW_ALL (1)
322
231/******************************************************************************* 323/*******************************************************************************
232 * 324 *
233 * HPET - High Precision Event Timer table 325 * ECDT - Embedded Controller Boot Resources Table
234 * 326 *
235 ******************************************************************************/ 327 ******************************************************************************/
236 328
237struct acpi_hpet_table { 329struct acpi_table_ecdt {
238 ACPI_TABLE_HEADER_DEF u32 hardware_id; /* Hardware ID of event timer block */ 330 struct acpi_table_header header; /* Common ACPI table header */
239 struct acpi_generic_address base_address; /* Address of event timer block */ 331 struct acpi_generic_address control; /* Address of EC command/status register */
240 u8 hpet_number; /* HPET sequence number */ 332 struct acpi_generic_address data; /* Address of EC data register */
241 u16 clock_tick; /* Main counter min tick, periodic mode */ 333 u32 uid; /* Unique ID - must be same as the EC _UID method */
242 u8 attributes; 334 u8 gpe; /* The GPE for the EC */
335 u8 id[1]; /* Full namepath of the EC in the ACPI namespace */
243}; 336};
244 337
245#if 0 /* HPET flags to be converted to macros */
246struct { /* Flags (8 bits) */
247 u8 page_protect:1; /* 00: No page protection */
248 u8 page_protect4:1; /* 01: 4_kB page protected */
249 u8 page_protect64:1; /* 02: 64_kB page protected */
250 u8:5; /* 03-07: Reserved, must be zero */
251} flags;
252#endif
253
254/******************************************************************************* 338/*******************************************************************************
255 * 339 *
256 * MADT - Multiple APIC Description Table 340 * HPET - High Precision Event Timer table
257 * 341 *
258 ******************************************************************************/ 342 ******************************************************************************/
259 343
260struct multiple_apic_table { 344struct acpi_table_hpet {
261 ACPI_TABLE_HEADER_DEF u32 local_apic_address; /* Physical address of local APIC */ 345 struct acpi_table_header header; /* Common ACPI table header */
262 346 u32 id; /* Hardware ID of event timer block */
263 /* Flags (32 bits) */ 347 struct acpi_generic_address address; /* Address of event timer block */
264 348 u8 sequence; /* HPET sequence number */
265 u8 PCATcompat:1; /* 00: System also has dual 8259s */ 349 u16 minimum_tick; /* Main counter min tick, periodic mode */
266 u8:7; /* 01-07: Reserved, must be zero */ 350 u8 flags;
267 u8 reserved1[3]; /* 08-31: Reserved, must be zero */
268}; 351};
269 352
270/* Values for MADT PCATCompat */ 353/*! Flags */
271 354
272#define DUAL_PIC 0 355#define ACPI_HPET_PAGE_PROTECT (1) /* 00: No page protection */
273#define MULTIPLE_APIC 1 356#define ACPI_HPET_PAGE_PROTECT_4 (1<<1) /* 01: 4KB page protected */
357#define ACPI_HPET_PAGE_PROTECT_64 (1<<2) /* 02: 64KB page protected */
274 358
275/* Common MADT Sub-table header */ 359/*! [End] no source code translation !*/
276 360
277#define APIC_HEADER_DEF \ 361/*******************************************************************************
278 u8 type; \ 362 *
279 u8 length; 363 * MADT - Multiple APIC Description Table
280 364 *
281struct apic_header { 365 ******************************************************************************/
282APIC_HEADER_DEF};
283
284/* Values for Type in struct apic_header */
285 366
286#define APIC_PROCESSOR 0 367struct acpi_table_madt {
287#define APIC_IO 1 368 struct acpi_table_header header; /* Common ACPI table header */
288#define APIC_XRUPT_OVERRIDE 2 369 u32 address; /* Physical address of local APIC */
289#define APIC_NMI 3 370 u32 flags;
290#define APIC_LOCAL_NMI 4 371};
291#define APIC_ADDRESS_OVERRIDE 5
292#define APIC_IO_SAPIC 6
293#define APIC_LOCAL_SAPIC 7
294#define APIC_XRUPT_SOURCE 8
295#define APIC_RESERVED 9 /* 9 and greater are reserved */
296 372
297/* Flag definitions for MADT sub-tables */ 373/* Flags */
298 374
299#define ACPI_MADT_IFLAGS /* INTI flags (16 bits) */ \ 375#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */
300 u8 polarity : 2; /* 00-01: Polarity of APIC I/O input signals */\
301 u8 trigger_mode : 2; /* 02-03: Trigger mode of APIC input signals */\
302 u8 : 4; /* 04-07: Reserved, must be zero */\
303 u8 reserved1; /* 08-15: Reserved, must be zero */
304 376
305#define ACPI_MADT_LFLAGS /* Local Sapic flags (32 bits) */ \ 377/* Values for PCATCompat flag */
306 u8 processor_enabled: 1; /* 00: Processor is usable if set */\
307 u8 : 7; /* 01-07: Reserved, must be zero */\
308 u8 reserved2[3]; /* 08-31: Reserved, must be zero */
309 378
310/* Values for MPS INTI flags */ 379#define ACPI_MADT_DUAL_PIC 0
380#define ACPI_MADT_MULTIPLE_APIC 1
311 381
312#define POLARITY_CONFORMS 0 382/* Values for subtable type in struct acpi_subtable_header */
313#define POLARITY_ACTIVE_HIGH 1
314#define POLARITY_RESERVED 2
315#define POLARITY_ACTIVE_LOW 3
316 383
317#define TRIGGER_CONFORMS 0 384enum acpi_madt_type {
318#define TRIGGER_EDGE 1 385 ACPI_MADT_TYPE_LOCAL_APIC = 0,
319#define TRIGGER_RESERVED 2 386 ACPI_MADT_TYPE_IO_APIC = 1,
320#define TRIGGER_LEVEL 3 387 ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2,
388 ACPI_MADT_TYPE_NMI_SOURCE = 3,
389 ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4,
390 ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5,
391 ACPI_MADT_TYPE_IO_SAPIC = 6,
392 ACPI_MADT_TYPE_LOCAL_SAPIC = 7,
393 ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
394 ACPI_MADT_TYPE_RESERVED = 9 /* 9 and greater are reserved */
395};
321 396
322/* 397/*
323 * MADT Sub-tables, correspond to Type in struct apic_header 398 * MADT Sub-tables, correspond to Type in struct acpi_subtable_header
324 */ 399 */
325 400
326/* 0: processor APIC */ 401/* 0: Processor Local APIC */
327 402
328struct madt_processor_apic { 403struct acpi_madt_local_apic {
329 APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */ 404 struct acpi_subtable_header header;
330 u8 local_apic_id; /* Processor's local APIC id */ 405 u8 processor_id; /* ACPI processor id */
331 ACPI_MADT_LFLAGS}; 406 u8 id; /* Processor's local APIC id */
407 u32 lapic_flags;
408};
332 409
333/* 1: IO APIC */ 410/* 1: IO APIC */
334 411
335struct madt_io_apic { 412struct acpi_madt_io_apic {
336 APIC_HEADER_DEF u8 io_apic_id; /* I/O APIC ID */ 413 struct acpi_subtable_header header;
414 u8 id; /* I/O APIC ID */
337 u8 reserved; /* Reserved - must be zero */ 415 u8 reserved; /* Reserved - must be zero */
338 u32 address; /* APIC physical address */ 416 u32 address; /* APIC physical address */
339 u32 interrupt; /* Global system interrupt where INTI lines start */ 417 u32 global_irq_base; /* Global system interrupt where INTI lines start */
340}; 418};
341 419
342/* 2: Interrupt Override */ 420/* 2: Interrupt Override */
343 421
344struct madt_interrupt_override { 422struct acpi_madt_interrupt_override {
345 APIC_HEADER_DEF u8 bus; /* 0 - ISA */ 423 struct acpi_subtable_header header;
346 u8 source; /* Interrupt source (IRQ) */ 424 u8 bus; /* 0 - ISA */
347 u32 interrupt; /* Global system interrupt */ 425 u8 source_irq; /* Interrupt source (IRQ) */
348 ACPI_MADT_IFLAGS}; 426 u32 global_irq; /* Global system interrupt */
427 u16 inti_flags;
428};
349 429
350/* 3: NMI Sources */ 430/* 3: NMI Source */
351 431
352struct madt_nmi_source { 432struct acpi_madt_nmi_source {
353 APIC_HEADER_DEF ACPI_MADT_IFLAGS u32 interrupt; /* Global system interrupt */ 433 struct acpi_subtable_header header;
434 u16 inti_flags;
435 u32 global_irq; /* Global system interrupt */
354}; 436};
355 437
356/* 4: Local APIC NMI */ 438/* 4: Local APIC NMI */
357 439
358struct madt_local_apic_nmi { 440struct acpi_madt_local_apic_nmi {
359 APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */ 441 struct acpi_subtable_header header;
360 ACPI_MADT_IFLAGS u8 lint; /* LINTn to which NMI is connected */ 442 u8 processor_id; /* ACPI processor id */
443 u16 inti_flags;
444 u8 lint; /* LINTn to which NMI is connected */
361}; 445};
362 446
363/* 5: Address Override */ 447/* 5: Address Override */
364 448
365struct madt_address_override { 449struct acpi_madt_local_apic_override {
366 APIC_HEADER_DEF u16 reserved; /* Reserved, must be zero */ 450 struct acpi_subtable_header header;
451 u16 reserved; /* Reserved, must be zero */
367 u64 address; /* APIC physical address */ 452 u64 address; /* APIC physical address */
368}; 453};
369 454
370/* 6: I/O Sapic */ 455/* 6: I/O Sapic */
371 456
372struct madt_io_sapic { 457struct acpi_madt_io_sapic {
373 APIC_HEADER_DEF u8 io_sapic_id; /* I/O SAPIC ID */ 458 struct acpi_subtable_header header;
459 u8 id; /* I/O SAPIC ID */
374 u8 reserved; /* Reserved, must be zero */ 460 u8 reserved; /* Reserved, must be zero */
375 u32 interrupt_base; /* Glocal interrupt for SAPIC start */ 461 u32 global_irq_base; /* Global interrupt for SAPIC start */
376 u64 address; /* SAPIC physical address */ 462 u64 address; /* SAPIC physical address */
377}; 463};
378 464
379/* 7: Local Sapic */ 465/* 7: Local Sapic */
380 466
381struct madt_local_sapic { 467struct acpi_madt_local_sapic {
382 APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */ 468 struct acpi_subtable_header header;
383 u8 local_sapic_id; /* SAPIC ID */ 469 u8 processor_id; /* ACPI processor id */
384 u8 local_sapic_eid; /* SAPIC EID */ 470 u8 id; /* SAPIC ID */
471 u8 eid; /* SAPIC EID */
385 u8 reserved[3]; /* Reserved, must be zero */ 472 u8 reserved[3]; /* Reserved, must be zero */
386 ACPI_MADT_LFLAGS u32 processor_uID; /* Numeric UID - ACPI 3.0 */ 473 u32 lapic_flags;
387 char processor_uIDstring[1]; /* String UID - ACPI 3.0 */ 474 u32 uid; /* Numeric UID - ACPI 3.0 */
475 char uid_string[1]; /* String UID - ACPI 3.0 */
388}; 476};
389 477
390/* 8: Platform Interrupt Source */ 478/* 8: Platform Interrupt Source */
391 479
392struct madt_interrupt_source { 480struct acpi_madt_interrupt_source {
393 APIC_HEADER_DEF ACPI_MADT_IFLAGS u8 interrupt_type; /* 1=PMI, 2=INIT, 3=corrected */ 481 struct acpi_subtable_header header;
394 u8 processor_id; /* Processor ID */ 482 u16 inti_flags;
395 u8 processor_eid; /* Processor EID */ 483 u8 type; /* 1=PMI, 2=INIT, 3=corrected */
484 u8 id; /* Processor ID */
485 u8 eid; /* Processor EID */
396 u8 io_sapic_vector; /* Vector value for PMI interrupts */ 486 u8 io_sapic_vector; /* Vector value for PMI interrupts */
397 u32 interrupt; /* Global system interrupt */ 487 u32 global_irq; /* Global system interrupt */
398 u32 flags; /* Interrupt Source Flags */ 488 u32 flags; /* Interrupt Source Flags */
399}; 489};
400 490
401#ifdef DUPLICATE_DEFINITION_WITH_LINUX_ACPI_H 491/* Flags field above */
492
493#define ACPI_MADT_CPEI_OVERRIDE (1)
494
495/*
496 * Common flags fields for MADT subtables
497 */
498
499/* MADT Local APIC flags (lapic_flags) */
500
501#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
502
503/* MADT MPS INTI flags (inti_flags) */
504
505#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */
506#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */
507
508/* Values for MPS INTI flags */
509
510#define ACPI_MADT_POLARITY_CONFORMS 0
511#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1
512#define ACPI_MADT_POLARITY_RESERVED 2
513#define ACPI_MADT_POLARITY_ACTIVE_LOW 3
514
515#define ACPI_MADT_TRIGGER_CONFORMS (0)
516#define ACPI_MADT_TRIGGER_EDGE (1<<2)
517#define ACPI_MADT_TRIGGER_RESERVED (2<<2)
518#define ACPI_MADT_TRIGGER_LEVEL (3<<2)
519
402/******************************************************************************* 520/*******************************************************************************
403 * 521 *
404 * MCFG - PCI Memory Mapped Configuration table and sub-table 522 * MCFG - PCI Memory Mapped Configuration table and sub-table
@@ -406,17 +524,19 @@ struct madt_interrupt_source {
406 ******************************************************************************/ 524 ******************************************************************************/
407 525
408struct acpi_table_mcfg { 526struct acpi_table_mcfg {
409 ACPI_TABLE_HEADER_DEF u8 reserved[8]; 527 struct acpi_table_header header; /* Common ACPI table header */
528 u8 reserved[8];
410}; 529};
411 530
531/* Subtable */
532
412struct acpi_mcfg_allocation { 533struct acpi_mcfg_allocation {
413 u64 base_address; /* Base address, processor-relative */ 534 u64 address; /* Base address, processor-relative */
414 u16 pci_segment; /* PCI segment group number */ 535 u16 pci_segment; /* PCI segment group number */
415 u8 start_bus_number; /* Starting PCI Bus number */ 536 u8 start_bus_number; /* Starting PCI Bus number */
416 u8 end_bus_number; /* Final PCI Bus number */ 537 u8 end_bus_number; /* Final PCI Bus number */
417 u32 reserved; 538 u32 reserved;
418}; 539};
419#endif
420 540
421/******************************************************************************* 541/*******************************************************************************
422 * 542 *
@@ -424,8 +544,9 @@ struct acpi_mcfg_allocation {
424 * 544 *
425 ******************************************************************************/ 545 ******************************************************************************/
426 546
427struct smart_battery_table { 547struct acpi_table_sbst {
428 ACPI_TABLE_HEADER_DEF u32 warning_level; 548 struct acpi_table_header header; /* Common ACPI table header */
549 u32 warning_level;
429 u32 low_level; 550 u32 low_level;
430 u32 critical_level; 551 u32 critical_level;
431}; 552};
@@ -436,9 +557,10 @@ struct smart_battery_table {
436 * 557 *
437 ******************************************************************************/ 558 ******************************************************************************/
438 559
439struct system_locality_info { 560struct acpi_table_slit {
440 ACPI_TABLE_HEADER_DEF u64 locality_count; 561 struct acpi_table_header header; /* Common ACPI table header */
441 u8 entry[1][1]; 562 u64 locality_count;
563 u8 entry[1]; /* Real size = localities^2 */
442}; 564};
443 565
444/******************************************************************************* 566/*******************************************************************************
@@ -448,7 +570,8 @@ struct system_locality_info {
448 ******************************************************************************/ 570 ******************************************************************************/
449 571
450struct acpi_table_spcr { 572struct acpi_table_spcr {
451 ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ 573 struct acpi_table_header header; /* Common ACPI table header */
574 u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
452 u8 reserved[3]; 575 u8 reserved[3];
453 struct acpi_generic_address serial_port; 576 struct acpi_generic_address serial_port;
454 u8 interrupt_type; 577 u8 interrupt_type;
@@ -459,7 +582,7 @@ struct acpi_table_spcr {
459 u8 stop_bits; 582 u8 stop_bits;
460 u8 flow_control; 583 u8 flow_control;
461 u8 terminal_type; 584 u8 terminal_type;
462 u8 reserved2; 585 u8 reserved1;
463 u16 pci_device_id; 586 u16 pci_device_id;
464 u16 pci_vendor_id; 587 u16 pci_vendor_id;
465 u8 pci_bus; 588 u8 pci_bus;
@@ -467,7 +590,7 @@ struct acpi_table_spcr {
467 u8 pci_function; 590 u8 pci_function;
468 u32 pci_flags; 591 u32 pci_flags;
469 u8 pci_segment; 592 u8 pci_segment;
470 u32 reserved3; 593 u32 reserved2;
471}; 594};
472 595
473/******************************************************************************* 596/*******************************************************************************
@@ -477,12 +600,13 @@ struct acpi_table_spcr {
477 ******************************************************************************/ 600 ******************************************************************************/
478 601
479struct acpi_table_spmi { 602struct acpi_table_spmi {
480 ACPI_TABLE_HEADER_DEF u8 reserved; 603 struct acpi_table_header header; /* Common ACPI table header */
604 u8 reserved;
481 u8 interface_type; 605 u8 interface_type;
482 u16 spec_revision; /* Version of IPMI */ 606 u16 spec_revision; /* Version of IPMI */
483 u8 interrupt_type; 607 u8 interrupt_type;
484 u8 gpe_number; /* GPE assigned */ 608 u8 gpe_number; /* GPE assigned */
485 u8 reserved2; 609 u8 reserved1;
486 u8 pci_device_flag; 610 u8 pci_device_flag;
487 u32 interrupt; 611 u32 interrupt;
488 struct acpi_generic_address ipmi_register; 612 struct acpi_generic_address ipmi_register;
@@ -498,58 +622,53 @@ struct acpi_table_spmi {
498 * 622 *
499 ******************************************************************************/ 623 ******************************************************************************/
500 624
501struct system_resource_affinity { 625struct acpi_table_srat {
502 ACPI_TABLE_HEADER_DEF u32 reserved1; /* Must be value '1' */ 626 struct acpi_table_header header; /* Common ACPI table header */
503 u64 reserved2; /* Reserved, must be zero */ 627 u32 table_revision; /* Must be value '1' */
628 u64 reserved; /* Reserved, must be zero */
504}; 629};
505 630
506/* SRAT common sub-table header */ 631/* Values for subtable type in struct acpi_subtable_header */
507 632
508#define SRAT_SUBTABLE_HEADER \ 633enum acpi_srat_type {
509 u8 type; \ 634 ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
510 u8 length; 635 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
511 636 ACPI_SRAT_TYPE_RESERVED = 2
512/* Values for Type above */ 637};
513
514#define SRAT_CPU_AFFINITY 0
515#define SRAT_MEMORY_AFFINITY 1
516#define SRAT_RESERVED 2
517 638
518/* SRAT sub-tables */ 639/* SRAT sub-tables */
519 640
520struct static_resource_alloc { 641struct acpi_srat_cpu_affinity {
521 SRAT_SUBTABLE_HEADER u8 proximity_domain_lo; 642 struct acpi_subtable_header header;
643 u8 proximity_domain_lo;
522 u8 apic_id; 644 u8 apic_id;
523 645 u32 flags;
524 /* Flags (32 bits) */
525
526 u8 enabled:1; /* 00: Use affinity structure */
527 u8:7; /* 01-07: Reserved, must be zero */
528 u8 reserved3[3]; /* 08-31: Reserved, must be zero */
529
530 u8 local_sapic_eid; 646 u8 local_sapic_eid;
531 u8 proximity_domain_hi[3]; 647 u8 proximity_domain_hi[3];
532 u32 reserved4; /* Reserved, must be zero */ 648 u32 reserved; /* Reserved, must be zero */
533}; 649};
534 650
535struct memory_affinity { 651/* Flags */
536 SRAT_SUBTABLE_HEADER u32 proximity_domain;
537 u16 reserved3;
538 u64 base_address;
539 u64 address_length;
540 u32 reserved4;
541
542 /* Flags (32 bits) */
543 652
544 u8 enabled:1; /* 00: Use affinity structure */ 653#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
545 u8 hot_pluggable:1; /* 01: Memory region is hot pluggable */
546 u8 non_volatile:1; /* 02: Memory is non-volatile */
547 u8:5; /* 03-07: Reserved, must be zero */
548 u8 reserved5[3]; /* 08-31: Reserved, must be zero */
549 654
550 u64 reserved6; /* Reserved, must be zero */ 655struct acpi_srat_mem_affinity {
656 struct acpi_subtable_header header;
657 u32 proximity_domain;
658 u16 reserved; /* Reserved, must be zero */
659 u64 base_address;
660 u64 length;
661 u32 memory_type; /* See acpi_address_range_id */
662 u32 flags;
663 u64 reserved1; /* Reserved, must be zero */
551}; 664};
552 665
666/* Flags */
667
668#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */
669#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
670#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
671
553/******************************************************************************* 672/*******************************************************************************
554 * 673 *
555 * TCPA - Trusted Computing Platform Alliance table 674 * TCPA - Trusted Computing Platform Alliance table
@@ -557,7 +676,8 @@ struct memory_affinity {
557 ******************************************************************************/ 676 ******************************************************************************/
558 677
559struct acpi_table_tcpa { 678struct acpi_table_tcpa {
560 ACPI_TABLE_HEADER_DEF u16 reserved; 679 struct acpi_table_header header; /* Common ACPI table header */
680 u16 reserved;
561 u32 max_log_length; /* Maximum length for the event log area */ 681 u32 max_log_length; /* Maximum length for the event log area */
562 u64 log_address; /* Address of the event log area */ 682 u64 log_address; /* Address of the event log area */
563}; 683};
@@ -569,7 +689,8 @@ struct acpi_table_tcpa {
569 ******************************************************************************/ 689 ******************************************************************************/
570 690
571struct acpi_table_wdrt { 691struct acpi_table_wdrt {
572 ACPI_TABLE_HEADER_DEF u32 header_length; /* Watchdog Header Length */ 692 struct acpi_table_header header; /* Common ACPI table header */
693 u32 header_length; /* Watchdog Header Length */
573 u8 pci_segment; /* PCI Segment number */ 694 u8 pci_segment; /* PCI Segment number */
574 u8 pci_bus; /* PCI Bus number */ 695 u8 pci_bus; /* PCI Bus number */
575 u8 pci_device; /* PCI Device number */ 696 u8 pci_device; /* PCI Device number */
@@ -582,58 +703,9 @@ struct acpi_table_wdrt {
582 u32 entries; /* Number of watchdog entries that follow */ 703 u32 entries; /* Number of watchdog entries that follow */
583}; 704};
584 705
585#if 0 /* Flags, will be converted to macros */ 706/* Flags */
586u8 enabled:1; /* 00: Timer enabled */ 707
587u8:6; /* 01-06: Reserved */ 708#define ACPI_WDRT_TIMER_ENABLED (1) /* 00: Timer enabled */
588u8 sleep_stop:1; /* 07: Timer stopped in sleep state */
589#endif
590
591/* Macros used to generate offsets to specific table fields */
592
593#define ACPI_ASF0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_info,f)
594#define ACPI_ASF1_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_alert,f)
595#define ACPI_ASF2_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_remote,f)
596#define ACPI_ASF3_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_rmcp,f)
597#define ACPI_ASF4_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_address,f)
598#define ACPI_BOOT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_boot,f)
599#define ACPI_CPEP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_cpep,f)
600#define ACPI_CPEP0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_cpep_polling,f)
601#define ACPI_DBGP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_dbgp,f)
602#define ACPI_ECDT_OFFSET(f) (u8) ACPI_OFFSET (struct ec_boot_resources,f)
603#define ACPI_HPET_OFFSET(f) (u8) ACPI_OFFSET (struct hpet_table,f)
604#define ACPI_MADT_OFFSET(f) (u8) ACPI_OFFSET (struct multiple_apic_table,f)
605#define ACPI_MADT0_OFFSET(f) (u8) ACPI_OFFSET (struct madt_processor_apic,f)
606#define ACPI_MADT1_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_apic,f)
607#define ACPI_MADT2_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_override,f)
608#define ACPI_MADT3_OFFSET(f) (u8) ACPI_OFFSET (struct madt_nmi_source,f)
609#define ACPI_MADT4_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_apic_nmi,f)
610#define ACPI_MADT5_OFFSET(f) (u8) ACPI_OFFSET (struct madt_address_override,f)
611#define ACPI_MADT6_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_sapic,f)
612#define ACPI_MADT7_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_sapic,f)
613#define ACPI_MADT8_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_source,f)
614#define ACPI_MADTH_OFFSET(f) (u8) ACPI_OFFSET (struct apic_header,f)
615#define ACPI_MCFG_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_mcfg,f)
616#define ACPI_MCFG0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_mcfg_allocation,f)
617#define ACPI_SBST_OFFSET(f) (u8) ACPI_OFFSET (struct smart_battery_table,f)
618#define ACPI_SLIT_OFFSET(f) (u8) ACPI_OFFSET (struct system_locality_info,f)
619#define ACPI_SPCR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spcr,f)
620#define ACPI_SPMI_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spmi,f)
621#define ACPI_SRAT_OFFSET(f) (u8) ACPI_OFFSET (struct system_resource_affinity,f)
622#define ACPI_SRAT0_OFFSET(f) (u8) ACPI_OFFSET (struct static_resource_alloc,f)
623#define ACPI_SRAT1_OFFSET(f) (u8) ACPI_OFFSET (struct memory_affinity,f)
624#define ACPI_TCPA_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_tcpa,f)
625#define ACPI_WDRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_wdrt,f)
626
627#define ACPI_HPET_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct hpet_table,f,o)
628#define ACPI_SRAT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct static_resource_alloc,f,o)
629#define ACPI_SRAT1_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct memory_affinity,f,o)
630#define ACPI_MADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct multiple_apic_table,f,o)
631#define ACPI_MADT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_processor_apic,f,o)
632#define ACPI_MADT2_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_override,f,o)
633#define ACPI_MADT3_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_nmi_source,f,o)
634#define ACPI_MADT4_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_apic_nmi,f,o)
635#define ACPI_MADT7_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_sapic,f,o)
636#define ACPI_MADT8_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_source,f,o)
637 709
638/* Reset to default packing */ 710/* Reset to default packing */
639 711
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
deleted file mode 100644
index 67efe6cad27b..000000000000
--- a/include/acpi/actbl2.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/******************************************************************************
2 *
3 * Name: actbl2.h - ACPI Specification Revision 2.0 Tables
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACTBL2_H__
45#define __ACTBL2_H__
46
47/* Code moved to both actbl.h and actbl1.h */
48
49#endif /* __ACTBL2_H__ */
diff --git a/include/acpi/actbl71.h b/include/acpi/actbl71.h
deleted file mode 100644
index 10ac05bb36bc..000000000000
--- a/include/acpi/actbl71.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/******************************************************************************
2 *
3 * Name: actbl71.h - IA-64 Extensions to the ACPI Spec Rev. 0.71
4 * This file includes tables specific to this
5 * specification revision.
6 *
7 *****************************************************************************/
8
9/*
10 * Copyright (C) 2000 - 2003, R. Byron Moore
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#ifndef __ACTBL71_H__
28#define __ACTBL71_H__
29
30/* 0.71 FADT address_space data item bitmasks defines */
31/* If the associated bit is zero then it is in memory space else in io space */
32
33#define SMI_CMD_ADDRESS_SPACE 0x01
34#define PM1_BLK_ADDRESS_SPACE 0x02
35#define PM2_CNT_BLK_ADDRESS_SPACE 0x04
36#define PM_TMR_BLK_ADDRESS_SPACE 0x08
37#define GPE0_BLK_ADDRESS_SPACE 0x10
38#define GPE1_BLK_ADDRESS_SPACE 0x20
39
40/* Only for clarity in declarations */
41
42typedef u64 IO_ADDRESS;
43
44#pragma pack(1)
45struct { /* Root System Descriptor Pointer */
46 NATIVE_CHAR signature[8]; /* contains "RSD PTR " */
47 u8 checksum; /* to make sum of struct == 0 */
48 NATIVE_CHAR oem_id[6]; /* OEM identification */
49 u8 reserved; /* Must be 0 for 1.0, 2 for 2.0 */
50 u64 rsdt_physical_address; /* 64-bit physical address of RSDT */
51};
52
53/*****************************************/
54/* IA64 Extensions to ACPI Spec Rev 0.71 */
55/* for the Root System Description Table */
56/*****************************************/
57struct {
58 struct acpi_table_header header; /* Table header */
59 u32 reserved_pad; /* IA64 alignment, must be 0 */
60 u64 table_offset_entry[1]; /* Array of pointers to other */
61 /* tables' headers */
62};
63
64/*******************************************/
65/* IA64 Extensions to ACPI Spec Rev 0.71 */
66/* for the Firmware ACPI Control Structure */
67/*******************************************/
68struct {
69 NATIVE_CHAR signature[4]; /* signature "FACS" */
70 u32 length; /* length of structure, in bytes */
71 u32 hardware_signature; /* hardware configuration signature */
72 u32 reserved4; /* must be 0 */
73 u64 firmware_waking_vector; /* ACPI OS waking vector */
74 u64 global_lock; /* Global Lock */
75 u32 S4bios_f:1; /* Indicates if S4BIOS support is present */
76 u32 reserved1:31; /* must be 0 */
77 u8 reserved3[28]; /* reserved - must be zero */
78};
79
80/******************************************/
81/* IA64 Extensions to ACPI Spec Rev 0.71 */
82/* for the Fixed ACPI Description Table */
83/******************************************/
84struct {
85 struct acpi_table_header header; /* table header */
86 u32 reserved_pad; /* IA64 alignment, must be 0 */
87 u64 firmware_ctrl; /* 64-bit Physical address of FACS */
88 u64 dsdt; /* 64-bit Physical address of DSDT */
89 u8 model; /* System Interrupt Model */
90 u8 address_space; /* Address Space Bitmask */
91 u16 sci_int; /* System vector of SCI interrupt */
92 u8 acpi_enable; /* value to write to smi_cmd to enable ACPI */
93 u8 acpi_disable; /* value to write to smi_cmd to disable ACPI */
94 u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */
95 u8 reserved2; /* reserved - must be zero */
96 u64 smi_cmd; /* Port address of SMI command port */
97 u64 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */
98 u64 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */
99 u64 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */
100 u64 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */
101 u64 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */
102 u64 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */
103 u64 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */
104 u64 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */
105 u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */
106 u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */
107 u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */
108 u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */
109 u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */
110 u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */
111 u8 gpe1_base; /* offset in gpe model where gpe1 events start */
112 u8 reserved3; /* reserved */
113 u16 plvl2_lat; /* worst case HW latency to enter/exit C2 state */
114 u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */
115 u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */
116 u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */
117 u8 century; /* index to century in RTC CMOS RAM */
118 u8 reserved4; /* reserved */
119 u32 flush_cash:1; /* PAL_FLUSH_CACHE is correctly supported */
120 u32 reserved5:1; /* reserved - must be zero */
121 u32 proc_c1:1; /* all processors support C1 state */
122 u32 plvl2_up:1; /* C2 state works on MP system */
123 u32 pwr_button:1; /* Power button is handled as a generic feature */
124 u32 sleep_button:1; /* Sleep button is handled as a generic feature, or not present */
125 u32 fixed_rTC:1; /* RTC wakeup stat not in fixed register space */
126 u32 rtcs4:1; /* RTC wakeup stat not possible from S4 */
127 u32 tmr_val_ext:1; /* tmr_val is 32 bits */
128 u32 dock_cap:1; /* Supports Docking */
129 u32 reserved6:22; /* reserved - must be zero */
130};
131
132#pragma pack()
133
134#endif /* __ACTBL71_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 64b603cfe92e..72a6e2c3a536 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,8 @@
48 48
49/* 49/*
50 * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header 50 * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header
51 * and must be either 16, 32, or 64 51 * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of
52 * 12/2006.
52 */ 53 */
53#ifndef ACPI_MACHINE_WIDTH 54#ifndef ACPI_MACHINE_WIDTH
54#error ACPI_MACHINE_WIDTH not defined 55#error ACPI_MACHINE_WIDTH not defined
@@ -149,7 +150,6 @@ typedef int INT32;
149typedef u64 acpi_native_uint; 150typedef u64 acpi_native_uint;
150typedef s64 acpi_native_int; 151typedef s64 acpi_native_int;
151 152
152typedef u64 acpi_table_ptr;
153typedef u64 acpi_io_address; 153typedef u64 acpi_io_address;
154typedef u64 acpi_physical_address; 154typedef u64 acpi_physical_address;
155 155
@@ -189,48 +189,15 @@ typedef int INT32;
189typedef u32 acpi_native_uint; 189typedef u32 acpi_native_uint;
190typedef s32 acpi_native_int; 190typedef s32 acpi_native_int;
191 191
192typedef u64 acpi_table_ptr;
193typedef u32 acpi_io_address; 192typedef u32 acpi_io_address;
194typedef u64 acpi_physical_address; 193typedef u32 acpi_physical_address;
195 194
196#define ACPI_MAX_PTR ACPI_UINT32_MAX 195#define ACPI_MAX_PTR ACPI_UINT32_MAX
197#define ACPI_SIZE_MAX ACPI_UINT32_MAX 196#define ACPI_SIZE_MAX ACPI_UINT32_MAX
198 197
199/*******************************************************************************
200 *
201 * Types specific to 16-bit targets
202 *
203 ******************************************************************************/
204
205#elif ACPI_MACHINE_WIDTH == 16
206
207/*! [Begin] no source code translation (keep the typedefs as-is) */
208
209typedef unsigned long UINT32;
210typedef short INT16;
211typedef long INT32;
212
213/*! [End] no source code translation !*/
214
215typedef u16 acpi_native_uint;
216typedef s16 acpi_native_int;
217
218typedef u32 acpi_table_ptr;
219typedef u32 acpi_io_address;
220typedef char *acpi_physical_address;
221
222#define ACPI_MAX_PTR ACPI_UINT16_MAX
223#define ACPI_SIZE_MAX ACPI_UINT16_MAX
224
225#define ACPI_USE_NATIVE_DIVIDE /* No 64-bit integers, ok to use native divide */
226
227/* 64-bit integers cannot be supported */
228
229#define ACPI_NO_INTEGER64_SUPPORT
230
231#else 198#else
232 199
233/* ACPI_MACHINE_WIDTH must be either 64, 32, or 16 */ 200/* ACPI_MACHINE_WIDTH must be either 64 or 32 */
234 201
235#error unknown ACPI_MACHINE_WIDTH 202#error unknown ACPI_MACHINE_WIDTH
236#endif 203#endif
@@ -311,36 +278,6 @@ typedef acpi_native_uint acpi_size;
311 * 278 *
312 ******************************************************************************/ 279 ******************************************************************************/
313 280
314/*
315 * Pointer overlays to avoid lots of typecasting for
316 * code that accepts both physical and logical pointers.
317 */
318union acpi_pointers {
319 acpi_physical_address physical;
320 void *logical;
321 acpi_table_ptr value;
322};
323
324struct acpi_pointer {
325 u32 pointer_type;
326 union acpi_pointers pointer;
327};
328
329/* pointer_types for above */
330
331#define ACPI_PHYSICAL_POINTER 0x01
332#define ACPI_LOGICAL_POINTER 0x02
333
334/* Processor mode */
335
336#define ACPI_PHYSICAL_ADDRESSING 0x04
337#define ACPI_LOGICAL_ADDRESSING 0x08
338#define ACPI_MEMORY_MODE 0x0C
339
340#define ACPI_PHYSMODE_PHYSPTR ACPI_PHYSICAL_ADDRESSING | ACPI_PHYSICAL_POINTER
341#define ACPI_LOGMODE_PHYSPTR ACPI_LOGICAL_ADDRESSING | ACPI_PHYSICAL_POINTER
342#define ACPI_LOGMODE_LOGPTR ACPI_LOGICAL_ADDRESSING | ACPI_LOGICAL_POINTER
343
344/* Logical defines and NULL */ 281/* Logical defines and NULL */
345 282
346#ifdef FALSE 283#ifdef FALSE
@@ -442,7 +379,8 @@ typedef u64 acpi_integer;
442/* 379/*
443 * Initialization state 380 * Initialization state
444 */ 381 */
445#define ACPI_INITIALIZED_OK 0x01 382#define ACPI_SUBSYSTEM_INITIALIZE 0x01
383#define ACPI_INITIALIZED_OK 0x02
446 384
447/* 385/*
448 * Power state values 386 * Power state values
@@ -491,21 +429,6 @@ typedef u64 acpi_integer;
491#define ACPI_NOTIFY_POWER_FAULT (u8) 7 429#define ACPI_NOTIFY_POWER_FAULT (u8) 7
492 430
493/* 431/*
494 * Table types. These values are passed to the table related APIs
495 */
496typedef u32 acpi_table_type;
497
498#define ACPI_TABLE_ID_RSDP (acpi_table_type) 0
499#define ACPI_TABLE_ID_DSDT (acpi_table_type) 1
500#define ACPI_TABLE_ID_FADT (acpi_table_type) 2
501#define ACPI_TABLE_ID_FACS (acpi_table_type) 3
502#define ACPI_TABLE_ID_PSDT (acpi_table_type) 4
503#define ACPI_TABLE_ID_SSDT (acpi_table_type) 5
504#define ACPI_TABLE_ID_XSDT (acpi_table_type) 6
505#define ACPI_TABLE_ID_MAX 6
506#define ACPI_NUM_TABLE_TYPES (ACPI_TABLE_ID_MAX+1)
507
508/*
509 * Types associated with ACPI names and objects. The first group of 432 * Types associated with ACPI names and objects. The first group of
510 * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition 433 * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition
511 * of the ACPI object_type() operator (See the ACPI Spec). Therefore, 434 * of the ACPI object_type() operator (See the ACPI Spec). Therefore,
@@ -637,7 +560,7 @@ typedef u32 acpi_event_status;
637 * | | | +--- Type of dispatch -- to method, handler, or none 560 * | | | +--- Type of dispatch -- to method, handler, or none
638 * | | +--- Enabled for runtime? 561 * | | +--- Enabled for runtime?
639 * | +--- Enabled for wake? 562 * | +--- Enabled for wake?
640 * +--- System state when GPE ocurred (running/waking) 563 * +--- Unused
641 */ 564 */
642#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 565#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01
643#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 566#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01
@@ -663,10 +586,6 @@ typedef u32 acpi_event_status;
663 586
664#define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ 587#define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */
665 588
666#define ACPI_GPE_SYSTEM_MASK (u8) 0x80
667#define ACPI_GPE_SYSTEM_RUNNING (u8) 0x80
668#define ACPI_GPE_SYSTEM_WAKING (u8) 0x00
669
670/* 589/*
671 * Flags for GPE and Lock interfaces 590 * Flags for GPE and Lock interfaces
672 */ 591 */
@@ -816,13 +735,6 @@ struct acpi_buffer {
816#define ACPI_SYS_MODES_MASK 0x0003 735#define ACPI_SYS_MODES_MASK 0x0003
817 736
818/* 737/*
819 * ACPI Table Info. One per ACPI table _type_
820 */
821struct acpi_table_info {
822 u32 count;
823};
824
825/*
826 * System info returned by acpi_get_system_info() 738 * System info returned by acpi_get_system_info()
827 */ 739 */
828struct acpi_system_info { 740struct acpi_system_info {
@@ -833,8 +745,6 @@ struct acpi_system_info {
833 u32 reserved2; 745 u32 reserved2;
834 u32 debug_level; 746 u32 debug_level;
835 u32 debug_layer; 747 u32 debug_layer;
836 u32 num_table_types;
837 struct acpi_table_info table_info[ACPI_TABLE_ID_MAX + 1];
838}; 748};
839 749
840/* 750/*
diff --git a/include/acpi/acutils.h b/include/acpi/acutils.h
index ba039ea1a057..883ffe92148f 100644
--- a/include/acpi/acutils.h
+++ b/include/acpi/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -141,8 +141,6 @@ acpi_status acpi_ut_hardware_initialize(void);
141 141
142void acpi_ut_subsystem_shutdown(void); 142void acpi_ut_subsystem_shutdown(void);
143 143
144acpi_status acpi_ut_validate_fadt(void);
145
146/* 144/*
147 * utclib - Local implementations of C library functions 145 * utclib - Local implementations of C library functions
148 */ 146 */
@@ -453,6 +451,8 @@ acpi_ut_short_divide(acpi_integer in_dividend,
453/* 451/*
454 * utmisc 452 * utmisc
455 */ 453 */
454const char *acpi_ut_validate_exception(acpi_status status);
455
456u8 acpi_ut_is_aml_table(struct acpi_table_header *table); 456u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
457 457
458acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id); 458acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
@@ -470,7 +470,7 @@ void acpi_ut_print_string(char *string, u8 max_length);
470 470
471u8 acpi_ut_valid_acpi_name(u32 name); 471u8 acpi_ut_valid_acpi_name(u32 name);
472 472
473acpi_name acpi_ut_repair_name(acpi_name name); 473acpi_name acpi_ut_repair_name(char *name);
474 474
475u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position); 475u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position);
476 476
diff --git a/include/acpi/amlcode.h b/include/acpi/amlcode.h
index cf18426a87b1..da53a4ef287a 100644
--- a/include/acpi/amlcode.h
+++ b/include/acpi/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -273,7 +273,7 @@
273#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator */ 273#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator */
274#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */ 274#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */
275#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */ 275#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */
276#define ARGI_REGION_OR_FIELD 0x15 /* Used by LOAD op only */ 276#define ARGI_REGION_OR_BUFFER 0x15 /* Used by LOAD op only */
277#define ARGI_DATAREFOBJ 0x16 277#define ARGI_DATAREFOBJ 0x16
278 278
279/* Note: types above can expand to 0x1F maximum */ 279/* Note: types above can expand to 0x1F maximum */
diff --git a/include/acpi/amlresrc.h b/include/acpi/amlresrc.h
index be03818af9d1..f7d541239da4 100644
--- a/include/acpi/amlresrc.h
+++ b/include/acpi/amlresrc.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 453a469fd397..dab2ec59a3b0 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index da80933963db..3bb50494a38a 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 7f1e92930b62..5f532d2ac180 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index 4e115f368d5f..85aa1127c903 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -293,4 +293,6 @@ struct pci_dev *alpha_gendev_to_pci(struct device *dev);
293#define IOBASE_ROOT_BUS 5 293#define IOBASE_ROOT_BUS 5
294#define IOBASE_FROM_HOSE 0x10000 294#define IOBASE_FROM_HOSE 0x10000
295 295
296extern struct pci_dev *isa_bridge;
297
296#endif /* __ALPHA_PCI_H */ 298#endif /* __ALPHA_PCI_H */
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 7cfad93edf10..5e657eb8946c 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -39,7 +39,7 @@
39 * Calling conventions: 39 * Calling conventions:
40 * 40 *
41 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) 41 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
42 * ACPI_EXTERNAL_XFACE - External ACPI interfaces 42 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
43 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces 43 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
44 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces 44 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
45 */ 45 */
@@ -59,11 +59,11 @@
59int __acpi_acquire_global_lock(unsigned int *lock); 59int __acpi_acquire_global_lock(unsigned int *lock);
60int __acpi_release_global_lock(unsigned int *lock); 60int __acpi_release_global_lock(unsigned int *lock);
61 61
62#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ 62#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
63 ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) 63 ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
64 64
65#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ 65#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
66 ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) 66 ((Acq) = __acpi_release_global_lock(&facs->global_lock))
67 67
68/* 68/*
69 * Math helper asm macros 69 * Math helper asm macros
@@ -87,7 +87,7 @@ extern void check_acpi_pci(void);
87static inline void check_acpi_pci(void) { } 87static inline void check_acpi_pci(void) { }
88#endif 88#endif
89 89
90#ifdef CONFIG_ACPI 90#ifdef CONFIG_ACPI
91extern int acpi_lapic; 91extern int acpi_lapic;
92extern int acpi_ioapic; 92extern int acpi_ioapic;
93extern int acpi_noirq; 93extern int acpi_noirq;
@@ -95,9 +95,9 @@ extern int acpi_strict;
95extern int acpi_disabled; 95extern int acpi_disabled;
96extern int acpi_ht; 96extern int acpi_ht;
97extern int acpi_pci_disabled; 97extern int acpi_pci_disabled;
98static inline void disable_acpi(void) 98static inline void disable_acpi(void)
99{ 99{
100 acpi_disabled = 1; 100 acpi_disabled = 1;
101 acpi_ht = 0; 101 acpi_ht = 0;
102 acpi_pci_disabled = 1; 102 acpi_pci_disabled = 1;
103 acpi_noirq = 1; 103 acpi_noirq = 1;
@@ -114,9 +114,9 @@ extern int acpi_use_timer_override;
114#endif 114#endif
115 115
116static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 116static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
117static inline void acpi_disable_pci(void) 117static inline void acpi_disable_pci(void)
118{ 118{
119 acpi_pci_disabled = 1; 119 acpi_pci_disabled = 1;
120 acpi_noirq_set(); 120 acpi_noirq_set();
121} 121}
122extern int acpi_irq_balance_set(char *str); 122extern int acpi_irq_balance_set(char *str);
@@ -144,8 +144,6 @@ extern void acpi_reserve_bootmem(void);
144 144
145#endif /*CONFIG_ACPI_SLEEP*/ 145#endif /*CONFIG_ACPI_SLEEP*/
146 146
147extern u8 x86_acpiid_to_apicid[];
148
149#define ARCH_HAS_POWER_INIT 1 147#define ARCH_HAS_POWER_INIT 1
150 148
151#endif /*__KERNEL__*/ 149#endif /*__KERNEL__*/
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index 99f66be240be..24990e546da3 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -3,13 +3,13 @@
3 3
4#include <linux/acpi.h> 4#include <linux/acpi.h>
5 5
6static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 6static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
7 struct mpc_config_translation *translation) 7 struct mpc_config_translation *translation)
8{ 8{
9 Dprintk("Bus #%d is %s\n", m->mpc_busid, name); 9 Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
10} 10}
11 11
12static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 12static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
13 struct mpc_config_translation *translation) 13 struct mpc_config_translation *translation)
14{ 14{
15} 15}
@@ -22,7 +22,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
22 char *productid) 22 char *productid)
23{ 23{
24 if (mpc->mpc_oemptr) { 24 if (mpc->mpc_oemptr) {
25 struct mp_config_oemtable *oem_table = 25 struct mp_config_oemtable *oem_table =
26 (struct mp_config_oemtable *)mpc->mpc_oemptr; 26 (struct mp_config_oemtable *)mpc->mpc_oemptr;
27 if (!strncmp(oem, "UNISYS", 6)) 27 if (!strncmp(oem, "UNISYS", 6))
28 return parse_unisys_oem((char *)oem_table); 28 return parse_unisys_oem((char *)oem_table);
@@ -31,12 +31,13 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
31} 31}
32 32
33#ifdef CONFIG_ACPI 33#ifdef CONFIG_ACPI
34
34static inline int es7000_check_dsdt(void) 35static inline int es7000_check_dsdt(void)
35{ 36{
36 struct acpi_table_header *header = NULL; 37 struct acpi_table_header header;
37 if(!acpi_get_table_header_early(ACPI_DSDT, &header)) 38 memcpy(&header, 0, sizeof(struct acpi_table_header));
38 acpi_table_print(header, 0); 39 acpi_get_table_header(ACPI_SIG_DSDT, 0, &header);
39 if (!strncmp(header->oem_id, "UNISYS", 6)) 40 if (!strncmp(header.oem_id, "UNISYS", 6))
40 return 1; 41 return 1;
41 return 0; 42 return 0;
42} 43}
@@ -44,7 +45,7 @@ static inline int es7000_check_dsdt(void)
44/* Hook from generic ACPI tables.c */ 45/* Hook from generic ACPI tables.c */
45static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) 46static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
46{ 47{
47 unsigned long oem_addr; 48 unsigned long oem_addr;
48 if (!find_unisys_acpi_oem_table(&oem_addr)) { 49 if (!find_unisys_acpi_oem_table(&oem_addr)) {
49 if (es7000_check_dsdt()) 50 if (es7000_check_dsdt())
50 return parse_unisys_oem((char *)oem_addr); 51 return parse_unisys_oem((char *)oem_addr);
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index 09a5dd0e44a8..5d03792d4f65 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -82,11 +82,11 @@ ia64_acpi_release_global_lock (unsigned int *lock)
82 return old & 0x1; 82 return old & 0x1;
83} 83}
84 84
85#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ 85#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
86 ((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr)) 86 ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
87 87
88#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ 88#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
89 ((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr)) 89 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
90 90
91#define acpi_disabled 0 /* ACPI always enabled on IA64 */ 91#define acpi_disabled 0 /* ACPI always enabled on IA64 */
92#define acpi_noirq 0 /* ACPI always enabled on IA64 */ 92#define acpi_noirq 0 /* ACPI always enabled on IA64 */
@@ -119,8 +119,6 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
119extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; 119extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
120#endif 120#endif
121 121
122extern u16 ia64_acpiid_to_sapicid[];
123
124/* 122/*
125 * Refer Intel ACPI _PDC support document for bit definitions 123 * Refer Intel ACPI _PDC support document for bit definitions
126 */ 124 */
diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h
index dad3a735df8b..4d97f60f1ef5 100644
--- a/include/asm-ia64/dma.h
+++ b/include/asm-ia64/dma.h
@@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS;
19 19
20#define free_dma(x) 20#define free_dma(x)
21 21
22void dma_mark_clean(void *addr, size_t size);
23
22#endif /* _ASM_IA64_DMA_H */ 24#endif /* _ASM_IA64_DMA_H */
diff --git a/include/asm-ia64/esi.h b/include/asm-ia64/esi.h
index 84aac0e0b583..40991c6ba647 100644
--- a/include/asm-ia64/esi.h
+++ b/include/asm-ia64/esi.h
@@ -19,7 +19,6 @@ enum esi_proc_type {
19 ESI_PROC_REENTRANT /* MP-safe and reentrant */ 19 ESI_PROC_REENTRANT /* MP-safe and reentrant */
20}; 20};
21 21
22extern int ia64_esi_init (void);
23extern struct ia64_sal_retval esi_call_phys (void *, u64 *); 22extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
24extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *, 23extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
25 enum esi_proc_type, 24 enum esi_proc_type,
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index a3891eb3f217..3c96ac19154e 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -21,6 +21,7 @@ struct mm_struct;
21struct pci_bus; 21struct pci_bus;
22struct task_struct; 22struct task_struct;
23struct pci_dev; 23struct pci_dev;
24struct msi_desc;
24 25
25typedef void ia64_mv_setup_t (char **); 26typedef void ia64_mv_setup_t (char **);
26typedef void ia64_mv_cpu_init_t (void); 27typedef void ia64_mv_cpu_init_t (void);
@@ -79,7 +80,7 @@ typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
79typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); 80typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
80typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); 81typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
81 82
82typedef int ia64_mv_setup_msi_irq_t (unsigned int irq, struct pci_dev *pdev); 83typedef int ia64_mv_setup_msi_irq_t (struct pci_dev *pdev, struct msi_desc *);
83typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq); 84typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq);
84 85
85static inline void 86static inline void
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index c8df75901083..6dd476b652c6 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -51,12 +51,13 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
51 51
52#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ 52#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
53 53
54extern int register_active_ranges(u64 start, u64 end, void *arg);
55
54#ifdef CONFIG_VIRTUAL_MEM_MAP 56#ifdef CONFIG_VIRTUAL_MEM_MAP
55# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 57# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
56 extern unsigned long vmalloc_end; 58 extern unsigned long vmalloc_end;
57 extern struct page *vmem_map; 59 extern struct page *vmem_map;
58 extern int find_largest_hole (u64 start, u64 end, void *arg); 60 extern int find_largest_hole (u64 start, u64 end, void *arg);
59 extern int register_active_ranges (u64 start, u64 end, void *arg);
60 extern int create_mem_map_page_table (u64 start, u64 end, void *arg); 61 extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
61 extern int vmemmap_find_next_valid_pfn(int, int); 62 extern int vmemmap_find_next_valid_pfn(int, int);
62#else 63#else
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 393e04c42a2c..560c287b1233 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -137,7 +137,8 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
137static inline struct page *pte_alloc_one(struct mm_struct *mm, 137static inline struct page *pte_alloc_one(struct mm_struct *mm,
138 unsigned long addr) 138 unsigned long addr)
139{ 139{
140 return virt_to_page(pgtable_quicklist_alloc()); 140 void *pg = pgtable_quicklist_alloc();
141 return pg ? virt_to_page(pg) : NULL;
141} 142}
142 143
143static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
diff --git a/include/asm-ia64/sn/acpi.h b/include/asm-ia64/sn/acpi.h
index 2850a7ef5e71..9ce2801cbd57 100644
--- a/include/asm-ia64/sn/acpi.h
+++ b/include/asm-ia64/sn/acpi.h
@@ -11,6 +11,7 @@
11 11
12#include "acpi/acglobal.h" 12#include "acpi/acglobal.h"
13 13
14#define SN_ACPI_BASE_SUPPORT() (acpi_gbl_DSDT->oem_revision >= 0x20101) 14extern int sn_acpi_rev;
15#define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101)
15 16
16#endif /* _ASM_IA64_SN_ACPI_H */ 17#endif /* _ASM_IA64_SN_ACPI_H */
diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h
index da3eade0cae2..17cb6cc3f21a 100644
--- a/include/asm-ia64/sn/pcibr_provider.h
+++ b/include/asm-ia64/sn/pcibr_provider.h
@@ -142,7 +142,7 @@ extern int pcibr_ate_alloc(struct pcibus_info *, int);
142extern void pcibr_ate_free(struct pcibus_info *, int); 142extern void pcibr_ate_free(struct pcibus_info *, int);
143extern void ate_write(struct pcibus_info *, int, int, u64); 143extern void ate_write(struct pcibus_info *, int, int, u64);
144extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, 144extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
145 void *resp); 145 void *resp, char **ssdt);
146extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, 146extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
147 int action, void *resp); 147 int action, void *resp);
148extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus); 148extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
diff --git a/include/asm-ia64/sn/pcidev.h b/include/asm-ia64/sn/pcidev.h
index 9fe89a93d880..1c2382cea807 100644
--- a/include/asm-ia64/sn/pcidev.h
+++ b/include/asm-ia64/sn/pcidev.h
@@ -70,10 +70,16 @@ extern void sn_irq_fixup(struct pci_dev *pci_dev,
70 struct sn_irq_info *sn_irq_info); 70 struct sn_irq_info *sn_irq_info);
71extern void sn_irq_unfixup(struct pci_dev *pci_dev); 71extern void sn_irq_unfixup(struct pci_dev *pci_dev);
72extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *); 72extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
73extern void sn_bus_fixup(struct pci_bus *);
74extern void sn_acpi_bus_fixup(struct pci_bus *);
75extern void sn_common_bus_fixup(struct pci_bus *, struct pcibus_bussoft *);
73extern void sn_bus_store_sysdata(struct pci_dev *dev); 76extern void sn_bus_store_sysdata(struct pci_dev *dev);
74extern void sn_bus_free_sysdata(void); 77extern void sn_bus_free_sysdata(void);
75extern void sn_generate_path(struct pci_bus *pci_bus, char *address); 78extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
76extern void sn_pci_fixup_slot(struct pci_dev *dev); 79extern void sn_io_slot_fixup(struct pci_dev *);
80extern void sn_acpi_slot_fixup(struct pci_dev *);
81extern void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *,
82 struct sn_irq_info *);
77extern void sn_pci_unfixup_slot(struct pci_dev *dev); 83extern void sn_pci_unfixup_slot(struct pci_dev *dev);
78extern void sn_irq_lh_init(void); 84extern void sn_irq_lh_init(void);
79#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ 85#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/include/asm-ia64/swiotlb.h b/include/asm-ia64/swiotlb.h
new file mode 100644
index 000000000000..452c162dee4e
--- /dev/null
+++ b/include/asm-ia64/swiotlb.h
@@ -0,0 +1,9 @@
1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWIOTLB_H 1
3
4#include <asm/machvec.h>
5
6#define SWIOTLB_ARCH_NEED_LATE_INIT
7#define SWIOTLB_ARCH_NEED_ALLOC
8
9#endif /* _ASM_SWIOTLB_H */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 9b505b25544f..91698599f918 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -84,6 +84,7 @@ struct thread_info {
84#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 84#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ 85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
87#define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */
87#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 88#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
88#define TIF_MEMDIE 17 89#define TIF_MEMDIE 17
89#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 90#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
@@ -92,7 +93,8 @@ struct thread_info {
92 93
93#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 94#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
94#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 95#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
95#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 96#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
97#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
96#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 98#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
97#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 99#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 53c5c0ee122c..a9e1fa4cac4d 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -291,11 +291,13 @@
291#define __NR_sync_file_range 1300 291#define __NR_sync_file_range 1300
292#define __NR_tee 1301 292#define __NR_tee 1301
293#define __NR_vmsplice 1302 293#define __NR_vmsplice 1302
294/* 1303 reserved for move_pages */
295#define __NR_getcpu 1304
294 296
295#ifdef __KERNEL__ 297#ifdef __KERNEL__
296 298
297 299
298#define NR_syscalls 279 /* length of syscall table */ 300#define NR_syscalls 281 /* length of syscall table */
299 301
300#define __ARCH_WANT_SYS_RT_SIGACTION 302#define __ARCH_WANT_SYS_RT_SIGACTION
301 303
diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h
index 8e321f53a382..c7c945baf1ee 100644
--- a/include/asm-mips/bootinfo.h
+++ b/include/asm-mips/bootinfo.h
@@ -243,6 +243,10 @@ extern struct boot_mem_map boot_mem_map;
243extern void add_memory_region(phys_t start, phys_t size, long type); 243extern void add_memory_region(phys_t start, phys_t size, long type);
244 244
245extern void prom_init(void); 245extern void prom_init(void);
246extern void prom_free_prom_memory(void);
247
248extern void free_init_pages(const char *what,
249 unsigned long begin, unsigned long end);
246 250
247/* 251/*
248 * Initial kernel command line, usually setup by prom_init() 252 * Initial kernel command line, usually setup by prom_init()
diff --git a/include/asm-mips/ddb5xxx/ddb5477.h b/include/asm-mips/ddb5xxx/ddb5477.h
index c5af4b73fdd7..6cf177caf6d5 100644
--- a/include/asm-mips/ddb5xxx/ddb5477.h
+++ b/include/asm-mips/ddb5xxx/ddb5477.h
@@ -17,6 +17,7 @@
17#ifndef __ASM_DDB5XXX_DDB5477_H 17#ifndef __ASM_DDB5XXX_DDB5477_H
18#define __ASM_DDB5XXX_DDB5477_H 18#define __ASM_DDB5XXX_DDB5477_H
19 19
20#include <irq.h>
20 21
21/* 22/*
22 * This contains macros that are specific to DDB5477 or renamed from 23 * This contains macros that are specific to DDB5477 or renamed from
@@ -251,14 +252,10 @@ extern void ll_vrc5477_irq_disable(int vrc5477_irq);
251 */ 252 */
252 253
253#define NUM_CPU_IRQ 8 254#define NUM_CPU_IRQ 8
254#define NUM_I8259_IRQ 16
255#define NUM_VRC5477_IRQ 32 255#define NUM_VRC5477_IRQ 32
256 256
257#define DDB_IRQ_BASE 0 257#define CPU_IRQ_BASE MIPS_CPU_IRQ_BASE
258 258#define VRC5477_IRQ_BASE (CPU_IRQ_BASE + NUM_CPU_IRQ)
259#define I8259_IRQ_BASE DDB_IRQ_BASE
260#define VRC5477_IRQ_BASE (I8259_IRQ_BASE + NUM_I8259_IRQ)
261#define CPU_IRQ_BASE (VRC5477_IRQ_BASE + NUM_VRC5477_IRQ)
262 259
263/* 260/*
264 * vrc5477 irq defs 261 * vrc5477 irq defs
@@ -300,22 +297,22 @@ extern void ll_vrc5477_irq_disable(int vrc5477_irq);
300/* 297/*
301 * i2859 irq assignment 298 * i2859 irq assignment
302 */ 299 */
303#define I8259_IRQ_RESERVED_0 (0 + I8259_IRQ_BASE) 300#define I8259_IRQ_RESERVED_0 (0 + I8259A_IRQ_BASE)
304#define I8259_IRQ_KEYBOARD (1 + I8259_IRQ_BASE) /* M1543 default */ 301#define I8259_IRQ_KEYBOARD (1 + I8259A_IRQ_BASE) /* M1543 default */
305#define I8259_IRQ_CASCADE (2 + I8259_IRQ_BASE) 302#define I8259_IRQ_CASCADE (2 + I8259A_IRQ_BASE)
306#define I8259_IRQ_UART_B (3 + I8259_IRQ_BASE) /* M1543 default, may conflict with RTC according to schematic diagram */ 303#define I8259_IRQ_UART_B (3 + I8259A_IRQ_BASE) /* M1543 default, may conflict with RTC according to schematic diagram */
307#define I8259_IRQ_UART_A (4 + I8259_IRQ_BASE) /* M1543 default */ 304#define I8259_IRQ_UART_A (4 + I8259A_IRQ_BASE) /* M1543 default */
308#define I8259_IRQ_PARALLEL (5 + I8259_IRQ_BASE) /* M1543 default */ 305#define I8259_IRQ_PARALLEL (5 + I8259A_IRQ_BASE) /* M1543 default */
309#define I8259_IRQ_RESERVED_6 (6 + I8259_IRQ_BASE) 306#define I8259_IRQ_RESERVED_6 (6 + I8259A_IRQ_BASE)
310#define I8259_IRQ_RESERVED_7 (7 + I8259_IRQ_BASE) 307#define I8259_IRQ_RESERVED_7 (7 + I8259A_IRQ_BASE)
311#define I8259_IRQ_RTC (8 + I8259_IRQ_BASE) /* who set this? */ 308#define I8259_IRQ_RTC (8 + I8259A_IRQ_BASE) /* who set this? */
312#define I8259_IRQ_USB (9 + I8259_IRQ_BASE) /* ddb_setup */ 309#define I8259_IRQ_USB (9 + I8259A_IRQ_BASE) /* ddb_setup */
313#define I8259_IRQ_PMU (10 + I8259_IRQ_BASE) /* ddb_setup */ 310#define I8259_IRQ_PMU (10 + I8259A_IRQ_BASE) /* ddb_setup */
314#define I8259_IRQ_RESERVED_11 (11 + I8259_IRQ_BASE) 311#define I8259_IRQ_RESERVED_11 (11 + I8259A_IRQ_BASE)
315#define I8259_IRQ_RESERVED_12 (12 + I8259_IRQ_BASE) /* m1543_irq_setup */ 312#define I8259_IRQ_RESERVED_12 (12 + I8259A_IRQ_BASE) /* m1543_irq_setup */
316#define I8259_IRQ_RESERVED_13 (13 + I8259_IRQ_BASE) 313#define I8259_IRQ_RESERVED_13 (13 + I8259A_IRQ_BASE)
317#define I8259_IRQ_HDC1 (14 + I8259_IRQ_BASE) /* default and ddb_setup */ 314#define I8259_IRQ_HDC1 (14 + I8259A_IRQ_BASE) /* default and ddb_setup */
318#define I8259_IRQ_HDC2 (15 + I8259_IRQ_BASE) /* default */ 315#define I8259_IRQ_HDC2 (15 + I8259A_IRQ_BASE) /* default */
319 316
320 317
321/* 318/*
diff --git a/include/asm-mips/dec/interrupts.h b/include/asm-mips/dec/interrupts.h
index 273e4d65bfe6..e10d341067c8 100644
--- a/include/asm-mips/dec/interrupts.h
+++ b/include/asm-mips/dec/interrupts.h
@@ -14,6 +14,7 @@
14#ifndef __ASM_DEC_INTERRUPTS_H 14#ifndef __ASM_DEC_INTERRUPTS_H
15#define __ASM_DEC_INTERRUPTS_H 15#define __ASM_DEC_INTERRUPTS_H
16 16
17#include <irq.h>
17#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
18 19
19 20
@@ -87,7 +88,7 @@
87#define DEC_CPU_INR_SW1 1 /* software #1 */ 88#define DEC_CPU_INR_SW1 1 /* software #1 */
88#define DEC_CPU_INR_SW0 0 /* software #0 */ 89#define DEC_CPU_INR_SW0 0 /* software #0 */
89 90
90#define DEC_CPU_IRQ_BASE 0 /* first IRQ assigned to CPU */ 91#define DEC_CPU_IRQ_BASE MIPS_CPU_IRQ_BASE /* first IRQ assigned to CPU */
91 92
92#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE) 93#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE)
93#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP)) 94#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP))
diff --git a/include/asm-mips/dma.h b/include/asm-mips/dma.h
index 23f789c80845..e06ef0776d48 100644
--- a/include/asm-mips/dma.h
+++ b/include/asm-mips/dma.h
@@ -91,6 +91,7 @@
91#else 91#else
92#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000) 92#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
93#endif 93#endif
94#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
94 95
95/* 8237 DMA controllers */ 96/* 8237 DMA controllers */
96#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ 97#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/include/asm-mips/emma2rh/emma2rh.h b/include/asm-mips/emma2rh/emma2rh.h
index 4fb8df71caa9..6a1af0af51e3 100644
--- a/include/asm-mips/emma2rh/emma2rh.h
+++ b/include/asm-mips/emma2rh/emma2rh.h
@@ -24,6 +24,8 @@
24#ifndef __ASM_EMMA2RH_EMMA2RH_H 24#ifndef __ASM_EMMA2RH_EMMA2RH_H
25#define __ASM_EMMA2RH_EMMA2RH_H 25#define __ASM_EMMA2RH_EMMA2RH_H
26 26
27#include <irq.h>
28
27/* 29/*
28 * EMMA2RH registers 30 * EMMA2RH registers
29 */ 31 */
@@ -104,7 +106,8 @@
104#define NUM_EMMA2RH_IRQ 96 106#define NUM_EMMA2RH_IRQ 96
105 107
106#define CPU_EMMA2RH_CASCADE 2 108#define CPU_EMMA2RH_CASCADE 2
107#define EMMA2RH_IRQ_BASE 0 109#define CPU_IRQ_BASE MIPS_CPU_IRQ_BASE
110#define EMMA2RH_IRQ_BASE (CPU_IRQ_BASE + NUM_CPU_IRQ)
108 111
109/* 112/*
110 * emma2rh irq defs 113 * emma2rh irq defs
diff --git a/include/asm-mips/emma2rh/markeins.h b/include/asm-mips/emma2rh/markeins.h
index 8fa766795078..973b0628490d 100644
--- a/include/asm-mips/emma2rh/markeins.h
+++ b/include/asm-mips/emma2rh/markeins.h
@@ -33,7 +33,6 @@
33 33
34#define EMMA2RH_SW_IRQ_BASE (EMMA2RH_IRQ_BASE + NUM_EMMA2RH_IRQ) 34#define EMMA2RH_SW_IRQ_BASE (EMMA2RH_IRQ_BASE + NUM_EMMA2RH_IRQ)
35#define EMMA2RH_GPIO_IRQ_BASE (EMMA2RH_SW_IRQ_BASE + NUM_EMMA2RH_IRQ_SW) 35#define EMMA2RH_GPIO_IRQ_BASE (EMMA2RH_SW_IRQ_BASE + NUM_EMMA2RH_IRQ_SW)
36#define CPU_IRQ_BASE (EMMA2RH_GPIO_IRQ_BASE + NUM_EMMA2RH_IRQ_GPIO)
37 36
38#define EMMA2RH_SW_IRQ_INT0 (0+EMMA2RH_SW_IRQ_BASE) 37#define EMMA2RH_SW_IRQ_INT0 (0+EMMA2RH_SW_IRQ_BASE)
39#define EMMA2RH_SW_IRQ_INT1 (1+EMMA2RH_SW_IRQ_BASE) 38#define EMMA2RH_SW_IRQ_INT1 (1+EMMA2RH_SW_IRQ_BASE)
diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h
index 4df8d8b118c0..e88a01607fea 100644
--- a/include/asm-mips/i8259.h
+++ b/include/asm-mips/i8259.h
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21#include <irq.h>
21 22
22/* i8259A PIC registers */ 23/* i8259A PIC registers */
23#define PIC_MASTER_CMD 0x20 24#define PIC_MASTER_CMD 0x20
@@ -42,8 +43,6 @@ extern void disable_8259A_irq(unsigned int irq);
42 43
43extern void init_i8259_irqs(void); 44extern void init_i8259_irqs(void);
44 45
45#define I8259A_IRQ_BASE 0
46
47/* 46/*
48 * Do the traditional i8259 interrupt polling thing. This is for the few 47 * Do the traditional i8259 interrupt polling thing. This is for the few
49 * cases where no better interrupt acknowledge method is available and we 48 * cases where no better interrupt acknowledge method is available and we
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index d77b657c09c7..67f081078904 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -115,7 +115,7 @@ static inline void set_io_port_base(unsigned long base)
115 */ 115 */
116static inline unsigned long virt_to_phys(volatile const void *address) 116static inline unsigned long virt_to_phys(volatile const void *address)
117{ 117{
118 return (unsigned long)address - PAGE_OFFSET; 118 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
119} 119}
120 120
121/* 121/*
@@ -132,7 +132,7 @@ static inline unsigned long virt_to_phys(volatile const void *address)
132 */ 132 */
133static inline void * phys_to_virt(unsigned long address) 133static inline void * phys_to_virt(unsigned long address)
134{ 134{
135 return (void *)(address + PAGE_OFFSET); 135 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
136} 136}
137 137
138/* 138/*
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 386da82e5774..91803ba30ff2 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -18,7 +18,7 @@
18#ifdef CONFIG_I8259 18#ifdef CONFIG_I8259
19static inline int irq_canonicalize(int irq) 19static inline int irq_canonicalize(int irq)
20{ 20{
21 return ((irq == 2) ? 9 : irq); 21 return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq);
22} 22}
23#else 23#else
24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
diff --git a/include/asm-mips/irq_cpu.h b/include/asm-mips/irq_cpu.h
index ed3d1e3d09ec..ef6a07cddb23 100644
--- a/include/asm-mips/irq_cpu.h
+++ b/include/asm-mips/irq_cpu.h
@@ -13,8 +13,8 @@
13#ifndef _ASM_IRQ_CPU_H 13#ifndef _ASM_IRQ_CPU_H
14#define _ASM_IRQ_CPU_H 14#define _ASM_IRQ_CPU_H
15 15
16extern void mips_cpu_irq_init(int irq_base); 16extern void mips_cpu_irq_init(void);
17extern void rm7k_cpu_irq_init(int irq_base); 17extern void rm7k_cpu_irq_init(void);
18extern void rm9k_cpu_irq_init(int irq_base); 18extern void rm9k_cpu_irq_init(void);
19 19
20#endif /* _ASM_IRQ_CPU_H */ 20#endif /* _ASM_IRQ_CPU_H */
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 582acd8adb81..58fca8a5a9a6 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -39,6 +39,7 @@
39#ifndef _LANGUAGE_ASSEMBLY 39#ifndef _LANGUAGE_ASSEMBLY
40 40
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/types.h>
42#include <asm/io.h> 43#include <asm/io.h>
43 44
44/* cpu pipeline flush */ 45/* cpu pipeline flush */
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 00b0fc68d5cb..24a8d51a55a3 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -12,6 +12,8 @@
12#ifndef __ASM_COBALT_H 12#ifndef __ASM_COBALT_H
13#define __ASM_COBALT_H 13#define __ASM_COBALT_H
14 14
15#include <irq.h>
16
15/* 17/*
16 * i8259 legacy interrupts used on Cobalt: 18 * i8259 legacy interrupts used on Cobalt:
17 * 19 *
@@ -25,7 +27,7 @@
25/* 27/*
26 * CPU IRQs are 16 ... 23 28 * CPU IRQs are 16 ... 23
27 */ 29 */
28#define COBALT_CPU_IRQ 16 30#define COBALT_CPU_IRQ MIPS_CPU_IRQ_BASE
29 31
30#define COBALT_GALILEO_IRQ (COBALT_CPU_IRQ + 2) 32#define COBALT_GALILEO_IRQ (COBALT_CPU_IRQ + 2)
31#define COBALT_SCC_IRQ (COBALT_CPU_IRQ + 3) /* pre-production has 85C30 */ 33#define COBALT_SCC_IRQ (COBALT_CPU_IRQ + 3) /* pre-production has 85C30 */
diff --git a/include/asm-mips/mach-emma2rh/irq.h b/include/asm-mips/mach-emma2rh/irq.h
index bce64244b800..5439eb856461 100644
--- a/include/asm-mips/mach-emma2rh/irq.h
+++ b/include/asm-mips/mach-emma2rh/irq.h
@@ -10,4 +10,6 @@
10 10
11#define NR_IRQS 256 11#define NR_IRQS 256
12 12
13#include_next <irq.h>
14
13#endif /* __ASM_MACH_EMMA2RH_IRQ_H */ 15#endif /* __ASM_MACH_EMMA2RH_IRQ_H */
diff --git a/include/asm-mips/mach-generic/irq.h b/include/asm-mips/mach-generic/irq.h
index 500e10ff24de..70d9a25132c5 100644
--- a/include/asm-mips/mach-generic/irq.h
+++ b/include/asm-mips/mach-generic/irq.h
@@ -8,6 +8,38 @@
8#ifndef __ASM_MACH_GENERIC_IRQ_H 8#ifndef __ASM_MACH_GENERIC_IRQ_H
9#define __ASM_MACH_GENERIC_IRQ_H 9#define __ASM_MACH_GENERIC_IRQ_H
10 10
11#ifndef NR_IRQS
11#define NR_IRQS 128 12#define NR_IRQS 128
13#endif
14
15#ifdef CONFIG_I8259
16#ifndef I8259A_IRQ_BASE
17#define I8259A_IRQ_BASE 0
18#endif
19#endif
20
21#ifdef CONFIG_IRQ_CPU
22
23#ifndef MIPS_CPU_IRQ_BASE
24#ifdef CONFIG_I8259
25#define MIPS_CPU_IRQ_BASE 16
26#else
27#define MIPS_CPU_IRQ_BASE 0
28#endif /* CONFIG_I8259 */
29#endif
30
31#ifdef CONFIG_IRQ_CPU_RM7K
32#ifndef RM7K_CPU_IRQ_BASE
33#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8)
34#endif
35#endif
36
37#ifdef CONFIG_IRQ_CPU_RM9K
38#ifndef RM9K_CPU_IRQ_BASE
39#define RM9K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+12)
40#endif
41#endif
42
43#endif /* CONFIG_IRQ_CPU */
12 44
13#endif /* __ASM_MACH_GENERIC_IRQ_H */ 45#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/include/asm-mips/mach-mips/irq.h b/include/asm-mips/mach-mips/irq.h
index e994b0c01227..9b9da26683c2 100644
--- a/include/asm-mips/mach-mips/irq.h
+++ b/include/asm-mips/mach-mips/irq.h
@@ -4,4 +4,6 @@
4 4
5#define NR_IRQS 256 5#define NR_IRQS 256
6 6
7#include_next <irq.h>
8
7#endif /* __ASM_MACH_MIPS_IRQ_H */ 9#endif /* __ASM_MACH_MIPS_IRQ_H */
diff --git a/include/asm-mips/mach-vr41xx/irq.h b/include/asm-mips/mach-vr41xx/irq.h
new file mode 100644
index 000000000000..848812296052
--- /dev/null
+++ b/include/asm-mips/mach-vr41xx/irq.h
@@ -0,0 +1,11 @@
1#ifndef __ASM_MACH_VR41XX_IRQ_H
2#define __ASM_MACH_VR41XX_IRQ_H
3
4#include <asm/vr41xx/irq.h> /* for MIPS_CPU_IRQ_BASE */
5#ifdef CONFIG_NEC_CMBVR4133
6#include <asm/vr41xx/cmbvr4133.h> /* for I8259A_IRQ_BASE */
7#endif
8
9#include_next <irq.h>
10
11#endif /* __ASM_MACH_VR41XX_IRQ_H */
diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h
index b15e4ea0b091..76add42e486e 100644
--- a/include/asm-mips/mips-boards/atlasint.h
+++ b/include/asm-mips/mips-boards/atlasint.h
@@ -26,10 +26,12 @@
26#ifndef _MIPS_ATLASINT_H 26#ifndef _MIPS_ATLASINT_H
27#define _MIPS_ATLASINT_H 27#define _MIPS_ATLASINT_H
28 28
29#include <irq.h>
30
29/* 31/*
30 * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode) 32 * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode)
31 */ 33 */
32#define MIPSCPU_INT_BASE 0 34#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
33 35
34/* CPU interrupt offsets */ 36/* CPU interrupt offsets */
35#define MIPSCPU_INT_SW0 0 37#define MIPSCPU_INT_SW0 0
diff --git a/include/asm-mips/mips-boards/maltaint.h b/include/asm-mips/mips-boards/maltaint.h
index da6cc2fbbc78..9180d6466113 100644
--- a/include/asm-mips/mips-boards/maltaint.h
+++ b/include/asm-mips/mips-boards/maltaint.h
@@ -25,6 +25,8 @@
25#ifndef _MIPS_MALTAINT_H 25#ifndef _MIPS_MALTAINT_H
26#define _MIPS_MALTAINT_H 26#define _MIPS_MALTAINT_H
27 27
28#include <irq.h>
29
28/* 30/*
29 * Interrupts 0..15 are used for Malta ISA compatible interrupts 31 * Interrupts 0..15 are used for Malta ISA compatible interrupts
30 */ 32 */
@@ -33,7 +35,7 @@
33/* 35/*
34 * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode) 36 * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode)
35 */ 37 */
36#define MIPSCPU_INT_BASE 16 38#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
37 39
38/* CPU interrupt offsets */ 40/* CPU interrupt offsets */
39#define MIPSCPU_INT_SW0 0 41#define MIPSCPU_INT_SW0 0
diff --git a/include/asm-mips/mips-boards/prom.h b/include/asm-mips/mips-boards/prom.h
index 4168c7fcd43e..7bf6f5f6ab9c 100644
--- a/include/asm-mips/mips-boards/prom.h
+++ b/include/asm-mips/mips-boards/prom.h
@@ -33,7 +33,6 @@ extern void prom_printf(char *fmt, ...);
33extern void prom_init_cmdline(void); 33extern void prom_init_cmdline(void);
34extern void prom_meminit(void); 34extern void prom_meminit(void);
35extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem); 35extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
36extern unsigned long prom_free_prom_memory (void);
37extern void mips_display_message(const char *str); 36extern void mips_display_message(const char *str);
38extern void mips_display_word(unsigned int num); 37extern void mips_display_word(unsigned int num);
39extern int get_ethernet_addr(char *ethernet_addr); 38extern int get_ethernet_addr(char *ethernet_addr);
diff --git a/include/asm-mips/mips-boards/seadint.h b/include/asm-mips/mips-boards/seadint.h
index 365c2a3c64f5..4f6a3933699d 100644
--- a/include/asm-mips/mips-boards/seadint.h
+++ b/include/asm-mips/mips-boards/seadint.h
@@ -20,10 +20,12 @@
20#ifndef _MIPS_SEADINT_H 20#ifndef _MIPS_SEADINT_H
21#define _MIPS_SEADINT_H 21#define _MIPS_SEADINT_H
22 22
23#include <irq.h>
24
23/* 25/*
24 * Interrupts 0..7 are used for SEAD CPU interrupts 26 * Interrupts 0..7 are used for SEAD CPU interrupts
25 */ 27 */
26#define MIPSCPU_INT_BASE 0 28#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
27 29
28#define MIPSCPU_INT_UART0 2 30#define MIPSCPU_INT_UART0 2
29#define MIPSCPU_INT_UART1 3 31#define MIPSCPU_INT_UART1 3
diff --git a/include/asm-mips/mips-boards/simint.h b/include/asm-mips/mips-boards/simint.h
index 4952e0b3bf11..54f2fe621d69 100644
--- a/include/asm-mips/mips-boards/simint.h
+++ b/include/asm-mips/mips-boards/simint.h
@@ -17,10 +17,11 @@
17#ifndef _MIPS_SIMINT_H 17#ifndef _MIPS_SIMINT_H
18#define _MIPS_SIMINT_H 18#define _MIPS_SIMINT_H
19 19
20#include <irq.h>
20 21
21#define SIM_INT_BASE 0 22#define SIM_INT_BASE 0
22#define MIPSCPU_INT_MB0 2 23#define MIPSCPU_INT_MB0 2
23#define MIPSCPU_INT_BASE 16 24#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
24#define MIPS_CPU_TIMER_IRQ 7 25#define MIPS_CPU_TIMER_IRQ 7
25 26
26 27
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h
index 3e9468f424f4..294bca12cd3f 100644
--- a/include/asm-mips/mipsmtregs.h
+++ b/include/asm-mips/mipsmtregs.h
@@ -165,8 +165,6 @@
165 165
166#ifndef __ASSEMBLY__ 166#ifndef __ASSEMBLY__
167 167
168extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
169
170static inline unsigned int dvpe(void) 168static inline unsigned int dvpe(void)
171{ 169{
172 int res = 0; 170 int res = 0;
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index 2f9e1a9ec51f..d3fbd83ff545 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -34,6 +34,20 @@
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36 36
37/*
38 * This gives the physical RAM offset.
39 */
40#ifndef PHYS_OFFSET
41#define PHYS_OFFSET 0UL
42#endif
43
44/*
45 * It's normally defined only for FLATMEM config but it's
46 * used in our early mem init code for all memory models.
47 * So always define it.
48 */
49#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
50
37#include <linux/pfn.h> 51#include <linux/pfn.h>
38#include <asm/io.h> 52#include <asm/io.h>
39 53
@@ -132,20 +146,23 @@ typedef struct { unsigned long pgprot; } pgprot_t;
132/* to align the pointer to the (next) page boundary */ 146/* to align the pointer to the (next) page boundary */
133#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) 147#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
134 148
149/*
150 * __pa()/__va() should be used only during mem init.
151 */
135#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 152#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64)
136#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0) 153#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0)
137#else 154#else
138#define __pa_page_offset(x) PAGE_OFFSET 155#define __pa_page_offset(x) PAGE_OFFSET
139#endif 156#endif
140#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x)) 157#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x) + PHYS_OFFSET)
141#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) 158#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
142#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) 159#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
143 160
144#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 161#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
145 162
146#ifdef CONFIG_FLATMEM 163#ifdef CONFIG_FLATMEM
147 164
148#define pfn_valid(pfn) ((pfn) < max_mapnr) 165#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
149 166
150#elif defined(CONFIG_SPARSEMEM) 167#elif defined(CONFIG_SPARSEMEM)
151 168
diff --git a/include/asm-mips/rtlx.h b/include/asm-mips/rtlx.h
index 76cd51c6be39..59162f74a798 100644
--- a/include/asm-mips/rtlx.h
+++ b/include/asm-mips/rtlx.h
@@ -6,9 +6,10 @@
6#ifndef __ASM_RTLX_H 6#ifndef __ASM_RTLX_H
7#define __ASM_RTLX_H_ 7#define __ASM_RTLX_H_
8 8
9#include <irq.h>
10
9#define LX_NODE_BASE 10 11#define LX_NODE_BASE 10
10 12
11#define MIPSCPU_INT_BASE 16
12#define MIPS_CPU_RTLX_IRQ 0 13#define MIPS_CPU_RTLX_IRQ 0
13 14
14#define RTLX_VERSION 2 15#define RTLX_VERSION 2
diff --git a/include/asm-mips/sections.h b/include/asm-mips/sections.h
index f7016278b266..b7e37262c246 100644
--- a/include/asm-mips/sections.h
+++ b/include/asm-mips/sections.h
@@ -3,6 +3,4 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _fdata;
7
8#endif /* _ASM_SECTIONS_H */ 6#endif /* _ASM_SECTIONS_H */
diff --git a/include/asm-mips/sgi/ip22.h b/include/asm-mips/sgi/ip22.h
index bbfc05c3cab9..6592f3bd1999 100644
--- a/include/asm-mips/sgi/ip22.h
+++ b/include/asm-mips/sgi/ip22.h
@@ -21,15 +21,16 @@
21 * HAL2 driver). This will prevent many complications, trust me ;-) 21 * HAL2 driver). This will prevent many complications, trust me ;-)
22 */ 22 */
23 23
24#include <irq.h>
24#include <asm/sgi/ioc.h> 25#include <asm/sgi/ioc.h>
25 26
26#define SGINT_EISA 0 /* 16 EISA irq levels (Indigo2) */ 27#define SGINT_EISA 0 /* 16 EISA irq levels (Indigo2) */
27#define SGINT_CPU 16 /* MIPS CPU define 8 interrupt sources */ 28#define SGINT_CPU MIPS_CPU_IRQ_BASE /* MIPS CPU define 8 interrupt sources */
28#define SGINT_LOCAL0 24 /* 8 local0 irq levels */ 29#define SGINT_LOCAL0 (SGINT_CPU+8) /* 8 local0 irq levels */
29#define SGINT_LOCAL1 32 /* 8 local1 irq levels */ 30#define SGINT_LOCAL1 (SGINT_CPU+16) /* 8 local1 irq levels */
30#define SGINT_LOCAL2 40 /* 8 local2 vectored irq levels */ 31#define SGINT_LOCAL2 (SGINT_CPU+24) /* 8 local2 vectored irq levels */
31#define SGINT_LOCAL3 48 /* 8 local3 vectored irq levels */ 32#define SGINT_LOCAL3 (SGINT_CPU+32) /* 8 local3 vectored irq levels */
32#define SGINT_END 56 /* End of 'spaces' */ 33#define SGINT_END (SGINT_CPU+40) /* End of 'spaces' */
33 34
34/* 35/*
35 * Individual interrupt definitions for the Indy and Indigo2 36 * Individual interrupt definitions for the Indy and Indigo2
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h
index f22c3e2f993a..55f3419f6546 100644
--- a/include/asm-mips/smtc_ipi.h
+++ b/include/asm-mips/smtc_ipi.h
@@ -44,9 +44,6 @@ struct smtc_ipi_q {
44 int depth; 44 int depth;
45}; 45};
46 46
47extern struct smtc_ipi_q IPIQ[NR_CPUS];
48extern struct smtc_ipi_q freeIPIq;
49
50static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) 47static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
51{ 48{
52 long flags; 49 long flags;
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 1cdd4eeb2f73..c12ebc53ef31 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -488,7 +488,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
488}) 488})
489 489
490/* 490/*
491 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. 491 * __copy_from_user: - Copy a block of data from user space, with less checking.
492 * @to: Destination address, in kernel space.
492 * @from: Source address, in user space. 493 * @from: Source address, in user space.
493 * @n: Number of bytes to copy. 494 * @n: Number of bytes to copy.
494 * 495 *
diff --git a/include/asm-mips/vr41xx/cmbvr4133.h b/include/asm-mips/vr41xx/cmbvr4133.h
index 9490ade58b46..42300037d593 100644
--- a/include/asm-mips/vr41xx/cmbvr4133.h
+++ b/include/asm-mips/vr41xx/cmbvr4133.h
@@ -35,8 +35,8 @@
35#define CMBVR41XX_INTD_IRQ GIU_IRQ(CMBVR41XX_INTD_PIN) 35#define CMBVR41XX_INTD_IRQ GIU_IRQ(CMBVR41XX_INTD_PIN)
36#define CMBVR41XX_INTE_IRQ GIU_IRQ(CMBVR41XX_INTE_PIN) 36#define CMBVR41XX_INTE_IRQ GIU_IRQ(CMBVR41XX_INTE_PIN)
37 37
38#define I8259_IRQ_BASE 72 38#define I8259A_IRQ_BASE 72
39#define I8259_IRQ(x) (I8259_IRQ_BASE + (x)) 39#define I8259_IRQ(x) (I8259A_IRQ_BASE + (x))
40#define TIMER_IRQ I8259_IRQ(0) 40#define TIMER_IRQ I8259_IRQ(0)
41#define KEYBOARD_IRQ I8259_IRQ(1) 41#define KEYBOARD_IRQ I8259_IRQ(1)
42#define I8259_SLAVE_IRQ I8259_IRQ(2) 42#define I8259_SLAVE_IRQ I8259_IRQ(2)
@@ -52,6 +52,5 @@
52#define AUX_IRQ I8259_IRQ(12) 52#define AUX_IRQ I8259_IRQ(12)
53#define IDE_PRIMARY_IRQ I8259_IRQ(14) 53#define IDE_PRIMARY_IRQ I8259_IRQ(14)
54#define IDE_SECONDARY_IRQ I8259_IRQ(15) 54#define IDE_SECONDARY_IRQ I8259_IRQ(15)
55#define I8259_IRQ_LAST IDE_SECONDARY_IRQ
56 55
57#endif /* __NEC_CMBVR4133_H */ 56#endif /* __NEC_CMBVR4133_H */
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h
index 356a0b183539..296f4f1a20e1 100644
--- a/include/asm-s390/compat.h
+++ b/include/asm-s390/compat.h
@@ -6,6 +6,34 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8 8
9#define PSW32_MASK_PER 0x40000000UL
10#define PSW32_MASK_DAT 0x04000000UL
11#define PSW32_MASK_IO 0x02000000UL
12#define PSW32_MASK_EXT 0x01000000UL
13#define PSW32_MASK_KEY 0x00F00000UL
14#define PSW32_MASK_MCHECK 0x00040000UL
15#define PSW32_MASK_WAIT 0x00020000UL
16#define PSW32_MASK_PSTATE 0x00010000UL
17#define PSW32_MASK_ASC 0x0000C000UL
18#define PSW32_MASK_CC 0x00003000UL
19#define PSW32_MASK_PM 0x00000f00UL
20
21#define PSW32_ADDR_AMODE31 0x80000000UL
22#define PSW32_ADDR_INSN 0x7FFFFFFFUL
23
24#define PSW32_BASE_BITS 0x00080000UL
25
26#define PSW32_ASC_PRIMARY 0x00000000UL
27#define PSW32_ASC_ACCREG 0x00004000UL
28#define PSW32_ASC_SECONDARY 0x00008000UL
29#define PSW32_ASC_HOME 0x0000C000UL
30
31#define PSW32_MASK_MERGE(CURRENT,NEW) \
32 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
33 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
34
35extern long psw32_user_bits;
36
9#define COMPAT_USER_HZ 100 37#define COMPAT_USER_HZ 100
10 38
11typedef u32 compat_size_t; 39typedef u32 compat_size_t;
diff --git a/include/asm-s390/etr.h b/include/asm-s390/etr.h
new file mode 100644
index 000000000000..b498f19bb9a7
--- /dev/null
+++ b/include/asm-s390/etr.h
@@ -0,0 +1,219 @@
1/*
2 * include/asm-s390/etr.h
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7#ifndef __S390_ETR_H
8#define __S390_ETR_H
9
10/* ETR attachment control register */
11struct etr_eacr {
12 unsigned int e0 : 1; /* port 0 stepping control */
13 unsigned int e1 : 1; /* port 1 stepping control */
14 unsigned int _pad0 : 5; /* must be 00100 */
15 unsigned int dp : 1; /* data port control */
16 unsigned int p0 : 1; /* port 0 change recognition control */
17 unsigned int p1 : 1; /* port 1 change recognition control */
18 unsigned int _pad1 : 3; /* must be 000 */
19 unsigned int ea : 1; /* ETR alert control */
20 unsigned int es : 1; /* ETR sync check control */
21 unsigned int sl : 1; /* switch to local control */
22} __attribute__ ((packed));
23
24/* Port state returned by steai */
25enum etr_psc {
26 etr_psc_operational = 0,
27 etr_psc_semi_operational = 1,
28 etr_psc_protocol_error = 4,
29 etr_psc_no_symbols = 8,
30 etr_psc_no_signal = 12,
31 etr_psc_pps_mode = 13
32};
33
34/* Logical port state returned by stetr */
35enum etr_lpsc {
36 etr_lpsc_operational_step = 0,
37 etr_lpsc_operational_alt = 1,
38 etr_lpsc_semi_operational = 2,
39 etr_lpsc_protocol_error = 4,
40 etr_lpsc_no_symbol_sync = 8,
41 etr_lpsc_no_signal = 12,
42 etr_lpsc_pps_mode = 13
43};
44
45/* ETR status words */
46struct etr_esw {
47 struct etr_eacr eacr; /* attachment control register */
48 unsigned int y : 1; /* stepping mode */
49 unsigned int _pad0 : 5; /* must be 00000 */
50 unsigned int p : 1; /* stepping port number */
51 unsigned int q : 1; /* data port number */
52 unsigned int psc0 : 4; /* port 0 state code */
53 unsigned int psc1 : 4; /* port 1 state code */
54} __attribute__ ((packed));
55
56/* Second level data register status word */
57struct etr_slsw {
58 unsigned int vv1 : 1; /* copy of validity bit data frame 1 */
59 unsigned int vv2 : 1; /* copy of validity bit data frame 2 */
60 unsigned int vv3 : 1; /* copy of validity bit data frame 3 */
61 unsigned int vv4 : 1; /* copy of validity bit data frame 4 */
62 unsigned int _pad0 : 19; /* must by all zeroes */
63 unsigned int n : 1; /* EAF port number */
64 unsigned int v1 : 1; /* validity bit ETR data frame 1 */
65 unsigned int v2 : 1; /* validity bit ETR data frame 2 */
66 unsigned int v3 : 1; /* validity bit ETR data frame 3 */
67 unsigned int v4 : 1; /* validity bit ETR data frame 4 */
68 unsigned int _pad1 : 4; /* must be 0000 */
69} __attribute__ ((packed));
70
71/* ETR data frames */
72struct etr_edf1 {
73 unsigned int u : 1; /* untuned bit */
74 unsigned int _pad0 : 1; /* must be 0 */
75 unsigned int r : 1; /* service request bit */
76 unsigned int _pad1 : 4; /* must be 0000 */
77 unsigned int a : 1; /* time adjustment bit */
78 unsigned int net_id : 8; /* ETR network id */
79 unsigned int etr_id : 8; /* id of ETR which sends data frames */
80 unsigned int etr_pn : 8; /* port number of ETR output port */
81} __attribute__ ((packed));
82
83struct etr_edf2 {
84 unsigned int etv : 32; /* Upper 32 bits of TOD. */
85} __attribute__ ((packed));
86
87struct etr_edf3 {
88 unsigned int rc : 8; /* failure reason code */
89 unsigned int _pad0 : 3; /* must be 000 */
90 unsigned int c : 1; /* ETR coupled bit */
91 unsigned int tc : 4; /* ETR type code */
92 unsigned int blto : 8; /* biased local time offset */
93 /* (blto - 128) * 15 = minutes */
94 unsigned int buo : 8; /* biased utc offset */
95 /* (buo - 128) = leap seconds */
96} __attribute__ ((packed));
97
98struct etr_edf4 {
99 unsigned int ed : 8; /* ETS device dependent data */
100 unsigned int _pad0 : 1; /* must be 0 */
101 unsigned int buc : 5; /* biased ut1 correction */
102 /* (buc - 16) * 0.1 seconds */
103 unsigned int em : 6; /* ETS error magnitude */
104 unsigned int dc : 6; /* ETS drift code */
105 unsigned int sc : 6; /* ETS steering code */
106} __attribute__ ((packed));
107
108/*
109 * ETR attachment information block, two formats
110 * format 1 has 4 reserved words with a size of 64 bytes
111 * format 2 has 16 reserved words with a size of 96 bytes
112 */
113struct etr_aib {
114 struct etr_esw esw;
115 struct etr_slsw slsw;
116 unsigned long long tsp;
117 struct etr_edf1 edf1;
118 struct etr_edf2 edf2;
119 struct etr_edf3 edf3;
120 struct etr_edf4 edf4;
121 unsigned int reserved[16];
122} __attribute__ ((packed,aligned(8)));
123
124/* ETR interruption parameter */
125struct etr_interruption_parameter {
126 unsigned int _pad0 : 8;
127 unsigned int pc0 : 1; /* port 0 state change */
128 unsigned int pc1 : 1; /* port 1 state change */
129 unsigned int _pad1 : 3;
130 unsigned int eai : 1; /* ETR alert indication */
131 unsigned int _pad2 : 18;
132} __attribute__ ((packed));
133
134/* Query TOD offset result */
135struct etr_ptff_qto {
136 unsigned long long physical_clock;
137 unsigned long long tod_offset;
138 unsigned long long logical_tod_offset;
139 unsigned long long tod_epoch_difference;
140} __attribute__ ((packed));
141
142/* Inline assembly helper functions */
143static inline int etr_setr(struct etr_eacr *ctrl)
144{
145 int rc = -ENOSYS;
146
147 asm volatile(
148 " .insn s,0xb2160000,0(%2)\n"
149 "0: la %0,0\n"
150 "1:\n"
151 EX_TABLE(0b,1b)
152 : "+d" (rc) : "m" (*ctrl), "a" (ctrl));
153 return rc;
154}
155
156/* Stores a format 1 aib with 64 bytes */
157static inline int etr_stetr(struct etr_aib *aib)
158{
159 int rc = -ENOSYS;
160
161 asm volatile(
162 " .insn s,0xb2170000,0(%2)\n"
163 "0: la %0,0\n"
164 "1:\n"
165 EX_TABLE(0b,1b)
166 : "+d" (rc) : "m" (*aib), "a" (aib));
167 return rc;
168}
169
170/* Stores a format 2 aib with 96 bytes for specified port */
171static inline int etr_steai(struct etr_aib *aib, unsigned int func)
172{
173 register unsigned int reg0 asm("0") = func;
174 int rc = -ENOSYS;
175
176 asm volatile(
177 " .insn s,0xb2b30000,0(%2)\n"
178 "0: la %0,0\n"
179 "1:\n"
180 EX_TABLE(0b,1b)
181 : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0));
182 return rc;
183}
184
185/* Function codes for the steai instruction. */
186#define ETR_STEAI_STEPPING_PORT 0x10
187#define ETR_STEAI_ALTERNATE_PORT 0x11
188#define ETR_STEAI_PORT_0 0x12
189#define ETR_STEAI_PORT_1 0x13
190
191static inline int etr_ptff(void *ptff_block, unsigned int func)
192{
193 register unsigned int reg0 asm("0") = func;
194 register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
195 int rc = -ENOSYS;
196
197 asm volatile(
198 " .word 0x0104\n"
199 " ipm %0\n"
200 " srl %0,28\n"
201 : "=d" (rc), "=m" (ptff_block)
202 : "d" (reg0), "d" (reg1), "m" (ptff_block) : "cc");
203 return rc;
204}
205
206/* Function codes for the ptff instruction. */
207#define ETR_PTFF_QAF 0x00 /* query available functions */
208#define ETR_PTFF_QTO 0x01 /* query tod offset */
209#define ETR_PTFF_QSI 0x02 /* query steering information */
210#define ETR_PTFF_ATO 0x40 /* adjust tod offset */
211#define ETR_PTFF_STO 0x41 /* set tod offset */
212#define ETR_PTFF_SFS 0x42 /* set fine steering rate */
213#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
214
215/* Functions needed by the machine check handler */
216extern void etr_switch_to_local(void);
217extern void etr_sync_check(void);
218
219#endif /* __S390_ETR_H */
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index c2f6a8782d31..31beb18cb3d1 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -32,6 +32,6 @@ typedef struct {
32 32
33#define HARDIRQ_BITS 8 33#define HARDIRQ_BITS 8
34 34
35extern void account_ticks(void); 35extern void account_ticks(u64 time);
36 36
37#endif /* __ASM_HARDIRQ_H */ 37#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
index efb7de9c1c6b..a4c2d550dad4 100644
--- a/include/asm-s390/io.h
+++ b/include/asm-s390/io.h
@@ -28,11 +28,7 @@ static inline unsigned long virt_to_phys(volatile void * address)
28{ 28{
29 unsigned long real_address; 29 unsigned long real_address;
30 asm volatile( 30 asm volatile(
31#ifndef __s390x__
32 " lra %0,0(%1)\n" 31 " lra %0,0(%1)\n"
33#else /* __s390x__ */
34 " lrag %0,0(%1)\n"
35#endif /* __s390x__ */
36 " jz 0f\n" 32 " jz 0f\n"
37 " la %0,0\n" 33 " la %0,0\n"
38 "0:" 34 "0:"
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h
index 40cc68025e01..1b50f89819a4 100644
--- a/include/asm-s390/kdebug.h
+++ b/include/asm-s390/kdebug.h
@@ -26,7 +26,6 @@ extern int register_page_fault_notifier(struct notifier_block *);
26extern int unregister_page_fault_notifier(struct notifier_block *); 26extern int unregister_page_fault_notifier(struct notifier_block *);
27extern struct atomic_notifier_head s390die_chain; 27extern struct atomic_notifier_head s390die_chain;
28 28
29
30enum die_val { 29enum die_val {
31 DIE_OOPS = 1, 30 DIE_OOPS = 1,
32 DIE_BPT, 31 DIE_BPT,
@@ -56,4 +55,6 @@ static inline int notify_die(enum die_val val, const char *str,
56 return atomic_notifier_call_chain(&s390die_chain, val, &args); 55 return atomic_notifier_call_chain(&s390die_chain, val, &args);
57} 56}
58 57
58extern void die(const char *, struct pt_regs *, long);
59
59#endif 60#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 74f7389bd3ee..4a31d0a7ee83 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -220,7 +220,8 @@ struct _lowcore
220 __u32 kernel_asce; /* 0xc4c */ 220 __u32 kernel_asce; /* 0xc4c */
221 __u32 user_asce; /* 0xc50 */ 221 __u32 user_asce; /* 0xc50 */
222 __u32 panic_stack; /* 0xc54 */ 222 __u32 panic_stack; /* 0xc54 */
223 __u8 pad10[0xc60-0xc58]; /* 0xc58 */ 223 __u32 user_exec_asce; /* 0xc58 */
224 __u8 pad10[0xc60-0xc5c]; /* 0xc5c */
224 /* entry.S sensitive area start */ 225 /* entry.S sensitive area start */
225 struct cpuinfo_S390 cpu_data; /* 0xc60 */ 226 struct cpuinfo_S390 cpu_data; /* 0xc60 */
226 __u32 ipl_device; /* 0xc7c */ 227 __u32 ipl_device; /* 0xc7c */
@@ -310,7 +311,8 @@ struct _lowcore
310 __u64 kernel_asce; /* 0xd58 */ 311 __u64 kernel_asce; /* 0xd58 */
311 __u64 user_asce; /* 0xd60 */ 312 __u64 user_asce; /* 0xd60 */
312 __u64 panic_stack; /* 0xd68 */ 313 __u64 panic_stack; /* 0xd68 */
313 __u8 pad10[0xd80-0xd70]; /* 0xd70 */ 314 __u64 user_exec_asce; /* 0xd70 */
315 __u8 pad10[0xd80-0xd78]; /* 0xd78 */
314 /* entry.S sensitive area start */ 316 /* entry.S sensitive area start */
315 struct cpuinfo_S390 cpu_data; /* 0xd80 */ 317 struct cpuinfo_S390 cpu_data; /* 0xd80 */
316 __u32 ipl_device; /* 0xdb8 */ 318 __u32 ipl_device; /* 0xdb8 */
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index bcf24a873874..1d21da220d49 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -9,6 +9,7 @@
9#ifndef __S390_MMU_CONTEXT_H 9#ifndef __S390_MMU_CONTEXT_H
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h>
12/* 13/*
13 * get a new mmu context.. S390 don't know about contexts. 14 * get a new mmu context.. S390 don't know about contexts.
14 */ 15 */
@@ -16,29 +17,44 @@
16 17
17#define destroy_context(mm) do { } while (0) 18#define destroy_context(mm) do { } while (0)
18 19
20#ifndef __s390x__
21#define LCTL_OPCODE "lctl"
22#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
23#else
24#define LCTL_OPCODE "lctlg"
25#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
26#endif
27
19static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void enter_lazy_tlb(struct mm_struct *mm,
20 struct task_struct *tsk) 29 struct task_struct *tsk)
21{ 30{
22} 31}
23 32
24static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
25 struct task_struct *tsk) 34 struct task_struct *tsk)
26{ 35{
27 if (prev != next) { 36 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);
28#ifndef __s390x__ 37
29 S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) | 38 if (prev != next) {
30 (_SEGMENT_TABLE|USER_STD_MASK); 39 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
31 /* Load home space page table origin. */ 40 PGTABLE_BITS;
32 asm volatile("lctl 13,13,%0" 41 if (shadow_pgd) {
33 : : "m" (S390_lowcore.user_asce) ); 42 /* Load primary/secondary space page table origin. */
34#else /* __s390x__ */ 43 S390_lowcore.user_exec_asce =
35 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | 44 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
36 (_REGION_TABLE|USER_STD_MASK); 45 asm volatile(LCTL_OPCODE" 1,1,%0\n"
37 /* Load home space page table origin. */ 46 LCTL_OPCODE" 7,7,%1"
38 asm volatile("lctlg 13,13,%0" 47 : : "m" (S390_lowcore.user_exec_asce),
39 : : "m" (S390_lowcore.user_asce) ); 48 "m" (S390_lowcore.user_asce) );
40#endif /* __s390x__ */ 49 } else if (switch_amode) {
41 } 50 /* Load primary space page table origin. */
51 asm volatile(LCTL_OPCODE" 1,1,%0"
52 : : "m" (S390_lowcore.user_asce) );
53 } else
54 /* Load home space page table origin. */
55 asm volatile(LCTL_OPCODE" 13,13,%0"
56 : : "m" (S390_lowcore.user_asce) );
57 }
42 cpu_set(smp_processor_id(), next->cpu_vm_mask); 58 cpu_set(smp_processor_id(), next->cpu_vm_mask);
43} 59}
44 60
@@ -51,4 +67,4 @@ static inline void activate_mm(struct mm_struct *prev,
51 set_fs(current->thread.mm_segment); 67 set_fs(current->thread.mm_segment);
52} 68}
53 69
54#endif 70#endif /* __S390_MMU_CONTEXT_H */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 0707a7e2fc16..56c8a6c80e2e 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -47,6 +47,17 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
47 47
48 if (!pgd) 48 if (!pgd)
49 return NULL; 49 return NULL;
50 if (s390_noexec) {
51 pgd_t *shadow_pgd = (pgd_t *)
52 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
53 struct page *page = virt_to_page(pgd);
54
55 if (!shadow_pgd) {
56 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
57 return NULL;
58 }
59 page->lru.next = (void *) shadow_pgd;
60 }
50 for (i = 0; i < PTRS_PER_PGD; i++) 61 for (i = 0; i < PTRS_PER_PGD; i++)
51#ifndef __s390x__ 62#ifndef __s390x__
52 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); 63 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
@@ -58,6 +69,10 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
58 69
59static inline void pgd_free(pgd_t *pgd) 70static inline void pgd_free(pgd_t *pgd)
60{ 71{
72 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
73
74 if (shadow_pgd)
75 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
61 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); 76 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
62} 77}
63 78
@@ -71,6 +86,7 @@ static inline void pgd_free(pgd_t *pgd)
71#define pmd_free(x) do { } while (0) 86#define pmd_free(x) do { } while (0)
72#define __pmd_free_tlb(tlb,x) do { } while (0) 87#define __pmd_free_tlb(tlb,x) do { } while (0)
73#define pgd_populate(mm, pmd, pte) BUG() 88#define pgd_populate(mm, pmd, pte) BUG()
89#define pgd_populate_kernel(mm, pmd, pte) BUG()
74#else /* __s390x__ */ 90#else /* __s390x__ */
75static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 91static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
76{ 92{
@@ -79,6 +95,17 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
79 95
80 if (!pmd) 96 if (!pmd)
81 return NULL; 97 return NULL;
98 if (s390_noexec) {
99 pmd_t *shadow_pmd = (pmd_t *)
100 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
101 struct page *page = virt_to_page(pmd);
102
103 if (!shadow_pmd) {
104 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
105 return NULL;
106 }
107 page->lru.next = (void *) shadow_pmd;
108 }
82 for (i=0; i < PTRS_PER_PMD; i++) 109 for (i=0; i < PTRS_PER_PMD; i++)
83 pmd_clear(pmd + i); 110 pmd_clear(pmd + i);
84 return pmd; 111 return pmd;
@@ -86,6 +113,10 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
86 113
87static inline void pmd_free (pmd_t *pmd) 114static inline void pmd_free (pmd_t *pmd)
88{ 115{
116 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
117
118 if (shadow_pmd)
119 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
89 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); 120 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
90} 121}
91 122
@@ -95,11 +126,22 @@ static inline void pmd_free (pmd_t *pmd)
95 pmd_free(pmd); \ 126 pmd_free(pmd); \
96 } while (0) 127 } while (0)
97 128
98static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 129static inline void
130pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
99{ 131{
100 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 132 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
101} 133}
102 134
135static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
136{
137 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
138 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
139
140 if (shadow_pgd && shadow_pmd)
141 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
142 pgd_populate_kernel(mm, pgd, pmd);
143}
144
103#endif /* __s390x__ */ 145#endif /* __s390x__ */
104 146
105static inline void 147static inline void
@@ -119,7 +161,13 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
119static inline void 161static inline void
120pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 162pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
121{ 163{
122 pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page)); 164 pte_t *pte = (pte_t *)page_to_phys(page);
165 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
166 pte_t *shadow_pte = get_shadow_pte(pte);
167
168 pmd_populate_kernel(mm, pmd, pte);
169 if (shadow_pmd && shadow_pte)
170 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
123} 171}
124 172
125/* 173/*
@@ -133,6 +181,17 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
133 181
134 if (!pte) 182 if (!pte)
135 return NULL; 183 return NULL;
184 if (s390_noexec) {
185 pte_t *shadow_pte = (pte_t *)
186 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
187 struct page *page = virt_to_page(pte);
188
189 if (!shadow_pte) {
190 free_page((unsigned long) pte);
191 return NULL;
192 }
193 page->lru.next = (void *) shadow_pte;
194 }
136 for (i=0; i < PTRS_PER_PTE; i++) { 195 for (i=0; i < PTRS_PER_PTE; i++) {
137 pte_clear(mm, vmaddr, pte + i); 196 pte_clear(mm, vmaddr, pte + i);
138 vmaddr += PAGE_SIZE; 197 vmaddr += PAGE_SIZE;
@@ -151,14 +210,30 @@ pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
151 210
152static inline void pte_free_kernel(pte_t *pte) 211static inline void pte_free_kernel(pte_t *pte)
153{ 212{
154 free_page((unsigned long) pte); 213 pte_t *shadow_pte = get_shadow_pte(pte);
214
215 if (shadow_pte)
216 free_page((unsigned long) shadow_pte);
217 free_page((unsigned long) pte);
155} 218}
156 219
157static inline void pte_free(struct page *pte) 220static inline void pte_free(struct page *pte)
158{ 221{
159 __free_page(pte); 222 struct page *shadow_page = get_shadow_page(pte);
223
224 if (shadow_page)
225 __free_page(shadow_page);
226 __free_page(pte);
160} 227}
161 228
162#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte) 229#define __pte_free_tlb(tlb, pte) \
230({ \
231 struct mmu_gather *__tlb = (tlb); \
232 struct page *__pte = (pte); \
233 struct page *shadow_page = get_shadow_page(__pte); \
234 if (shadow_page) \
235 tlb_remove_page(__tlb, shadow_page); \
236 tlb_remove_page(__tlb, __pte); \
237})
163 238
164#endif /* _S390_PGALLOC_H */ 239#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index ae61aca5d483..13c16546eff5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -40,6 +40,7 @@ struct mm_struct;
40 40
41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
42extern void paging_init(void); 42extern void paging_init(void);
43extern void vmem_map_init(void);
43 44
44/* 45/*
45 * The S390 doesn't have any external MMU info: the kernel page 46 * The S390 doesn't have any external MMU info: the kernel page
@@ -223,6 +224,8 @@ extern unsigned long vmalloc_end;
223#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 224#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
224#define _PAGE_TYPE_RO 0x200 225#define _PAGE_TYPE_RO 0x200
225#define _PAGE_TYPE_RW 0x000 226#define _PAGE_TYPE_RW 0x000
227#define _PAGE_TYPE_EX_RO 0x202
228#define _PAGE_TYPE_EX_RW 0x002
226 229
227/* 230/*
228 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 231 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
@@ -243,11 +246,13 @@ extern unsigned long vmalloc_end;
243 * _PAGE_TYPE_FILE 11?1 -> 11?1 246 * _PAGE_TYPE_FILE 11?1 -> 11?1
244 * _PAGE_TYPE_RO 0100 -> 1100 247 * _PAGE_TYPE_RO 0100 -> 1100
245 * _PAGE_TYPE_RW 0000 -> 1000 248 * _PAGE_TYPE_RW 0000 -> 1000
249 * _PAGE_TYPE_EX_RO 0110 -> 1110
250 * _PAGE_TYPE_EX_RW 0010 -> 1010
246 * 251 *
247 * pte_none is true for bits combinations 1000, 1100 252 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
248 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 253 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
249 * pte_file is true for bits combinations 1101, 1111 254 * pte_file is true for bits combinations 1101, 1111
250 * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid. 255 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
251 */ 256 */
252 257
253#ifndef __s390x__ 258#ifndef __s390x__
@@ -312,33 +317,100 @@ extern unsigned long vmalloc_end;
312#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 317#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
313#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 318#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
314#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 319#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
320#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
321#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
315 322
316#define PAGE_KERNEL PAGE_RW 323#define PAGE_KERNEL PAGE_RW
317#define PAGE_COPY PAGE_RO 324#define PAGE_COPY PAGE_RO
318 325
319/* 326/*
320 * The S390 can't do page protection for execute, and considers that the 327 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
321 * same are read. Also, write permissions imply read permissions. This is 328 * Write permission always implies read permission. In theory with a
322 * the closest we can get.. 329 * primary/secondary page table execute only can be implemented but
330 * it would cost an additional bit in the pte to distinguish all the
331 * different pte types. To avoid that execute permission currently
332 * implies read permission as well.
323 */ 333 */
324 /*xwr*/ 334 /*xwr*/
325#define __P000 PAGE_NONE 335#define __P000 PAGE_NONE
326#define __P001 PAGE_RO 336#define __P001 PAGE_RO
327#define __P010 PAGE_RO 337#define __P010 PAGE_RO
328#define __P011 PAGE_RO 338#define __P011 PAGE_RO
329#define __P100 PAGE_RO 339#define __P100 PAGE_EX_RO
330#define __P101 PAGE_RO 340#define __P101 PAGE_EX_RO
331#define __P110 PAGE_RO 341#define __P110 PAGE_EX_RO
332#define __P111 PAGE_RO 342#define __P111 PAGE_EX_RO
333 343
334#define __S000 PAGE_NONE 344#define __S000 PAGE_NONE
335#define __S001 PAGE_RO 345#define __S001 PAGE_RO
336#define __S010 PAGE_RW 346#define __S010 PAGE_RW
337#define __S011 PAGE_RW 347#define __S011 PAGE_RW
338#define __S100 PAGE_RO 348#define __S100 PAGE_EX_RO
339#define __S101 PAGE_RO 349#define __S101 PAGE_EX_RO
340#define __S110 PAGE_RW 350#define __S110 PAGE_EX_RW
341#define __S111 PAGE_RW 351#define __S111 PAGE_EX_RW
352
353#ifndef __s390x__
354# define PMD_SHADOW_SHIFT 1
355# define PGD_SHADOW_SHIFT 1
356#else /* __s390x__ */
357# define PMD_SHADOW_SHIFT 2
358# define PGD_SHADOW_SHIFT 2
359#endif /* __s390x__ */
360
361static inline struct page *get_shadow_page(struct page *page)
362{
363 if (s390_noexec && !list_empty(&page->lru))
364 return virt_to_page(page->lru.next);
365 return NULL;
366}
367
368static inline pte_t *get_shadow_pte(pte_t *ptep)
369{
370 unsigned long pteptr = (unsigned long) (ptep);
371
372 if (s390_noexec) {
373 unsigned long offset = pteptr & (PAGE_SIZE - 1);
374 void *addr = (void *) (pteptr ^ offset);
375 struct page *page = virt_to_page(addr);
376 if (!list_empty(&page->lru))
377 return (pte_t *) ((unsigned long) page->lru.next |
378 offset);
379 }
380 return NULL;
381}
382
383static inline pmd_t *get_shadow_pmd(pmd_t *pmdp)
384{
385 unsigned long pmdptr = (unsigned long) (pmdp);
386
387 if (s390_noexec) {
388 unsigned long offset = pmdptr &
389 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1);
390 void *addr = (void *) (pmdptr ^ offset);
391 struct page *page = virt_to_page(addr);
392 if (!list_empty(&page->lru))
393 return (pmd_t *) ((unsigned long) page->lru.next |
394 offset);
395 }
396 return NULL;
397}
398
399static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
400{
401 unsigned long pgdptr = (unsigned long) (pgdp);
402
403 if (s390_noexec) {
404 unsigned long offset = pgdptr &
405 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1);
406 void *addr = (void *) (pgdptr ^ offset);
407 struct page *page = virt_to_page(addr);
408 if (!list_empty(&page->lru))
409 return (pgd_t *) ((unsigned long) page->lru.next |
410 offset);
411 }
412 return NULL;
413}
342 414
343/* 415/*
344 * Certain architectures need to do special things when PTEs 416 * Certain architectures need to do special things when PTEs
@@ -347,7 +419,16 @@ extern unsigned long vmalloc_end;
347 */ 419 */
348static inline void set_pte(pte_t *pteptr, pte_t pteval) 420static inline void set_pte(pte_t *pteptr, pte_t pteval)
349{ 421{
422 pte_t *shadow_pte = get_shadow_pte(pteptr);
423
350 *pteptr = pteval; 424 *pteptr = pteval;
425 if (shadow_pte) {
426 if (!(pte_val(pteval) & _PAGE_INVALID) &&
427 (pte_val(pteval) & _PAGE_SWX))
428 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
429 else
430 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
431 }
351} 432}
352#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 433#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
353 434
@@ -465,7 +546,7 @@ static inline int pte_read(pte_t pte)
465 546
466static inline void pgd_clear(pgd_t * pgdp) { } 547static inline void pgd_clear(pgd_t * pgdp) { }
467 548
468static inline void pmd_clear(pmd_t * pmdp) 549static inline void pmd_clear_kernel(pmd_t * pmdp)
469{ 550{
470 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 551 pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
471 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 552 pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
@@ -473,24 +554,55 @@ static inline void pmd_clear(pmd_t * pmdp)
473 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 554 pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
474} 555}
475 556
557static inline void pmd_clear(pmd_t * pmdp)
558{
559 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
560
561 pmd_clear_kernel(pmdp);
562 if (shadow_pmd)
563 pmd_clear_kernel(shadow_pmd);
564}
565
476#else /* __s390x__ */ 566#else /* __s390x__ */
477 567
478static inline void pgd_clear(pgd_t * pgdp) 568static inline void pgd_clear_kernel(pgd_t * pgdp)
479{ 569{
480 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 570 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
481} 571}
482 572
483static inline void pmd_clear(pmd_t * pmdp) 573static inline void pgd_clear(pgd_t * pgdp)
574{
575 pgd_t *shadow_pgd = get_shadow_pgd(pgdp);
576
577 pgd_clear_kernel(pgdp);
578 if (shadow_pgd)
579 pgd_clear_kernel(shadow_pgd);
580}
581
582static inline void pmd_clear_kernel(pmd_t * pmdp)
484{ 583{
485 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 584 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
486 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 585 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
487} 586}
488 587
588static inline void pmd_clear(pmd_t * pmdp)
589{
590 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
591
592 pmd_clear_kernel(pmdp);
593 if (shadow_pmd)
594 pmd_clear_kernel(shadow_pmd);
595}
596
489#endif /* __s390x__ */ 597#endif /* __s390x__ */
490 598
491static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 599static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
492{ 600{
601 pte_t *shadow_pte = get_shadow_pte(ptep);
602
493 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 603 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
604 if (shadow_pte)
605 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
494} 606}
495 607
496/* 608/*
@@ -608,8 +720,11 @@ ptep_clear_flush(struct vm_area_struct *vma,
608 unsigned long address, pte_t *ptep) 720 unsigned long address, pte_t *ptep)
609{ 721{
610 pte_t pte = *ptep; 722 pte_t pte = *ptep;
723 pte_t *shadow_pte = get_shadow_pte(ptep);
611 724
612 __ptep_ipte(address, ptep); 725 __ptep_ipte(address, ptep);
726 if (shadow_pte)
727 __ptep_ipte(address, shadow_pte);
613 return pte; 728 return pte;
614} 729}
615 730
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index cbbedc63ba25..4c1b73940351 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -50,6 +50,7 @@ struct cpuinfo_S390
50 unsigned long pgtable_cache_sz; 50 unsigned long pgtable_cache_sz;
51}; 51};
52 52
53extern void s390_adjust_jiffies(void);
53extern void print_cpu_info(struct cpuinfo_S390 *); 54extern void print_cpu_info(struct cpuinfo_S390 *);
54 55
55/* Lazy FPU handling on uni-processor */ 56/* Lazy FPU handling on uni-processor */
@@ -144,7 +145,8 @@ struct stack_frame {
144#ifndef __s390x__ 145#ifndef __s390x__
145 146
146#define start_thread(regs, new_psw, new_stackp) do { \ 147#define start_thread(regs, new_psw, new_stackp) do { \
147 regs->psw.mask = PSW_USER_BITS; \ 148 set_fs(USER_DS); \
149 regs->psw.mask = psw_user_bits; \
148 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 150 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
149 regs->gprs[15] = new_stackp ; \ 151 regs->gprs[15] = new_stackp ; \
150} while (0) 152} while (0)
@@ -152,13 +154,15 @@ struct stack_frame {
152#else /* __s390x__ */ 154#else /* __s390x__ */
153 155
154#define start_thread(regs, new_psw, new_stackp) do { \ 156#define start_thread(regs, new_psw, new_stackp) do { \
155 regs->psw.mask = PSW_USER_BITS; \ 157 set_fs(USER_DS); \
158 regs->psw.mask = psw_user_bits; \
156 regs->psw.addr = new_psw; \ 159 regs->psw.addr = new_psw; \
157 regs->gprs[15] = new_stackp; \ 160 regs->gprs[15] = new_stackp; \
158} while (0) 161} while (0)
159 162
160#define start_thread31(regs, new_psw, new_stackp) do { \ 163#define start_thread31(regs, new_psw, new_stackp) do { \
161 regs->psw.mask = PSW_USER32_BITS; \ 164 set_fs(USER_DS); \
165 regs->psw.mask = psw_user32_bits; \
162 regs->psw.addr = new_psw; \ 166 regs->psw.addr = new_psw; \
163 regs->gprs[15] = new_stackp; \ 167 regs->gprs[15] = new_stackp; \
164} while (0) 168} while (0)
@@ -201,9 +205,8 @@ unsigned long get_wchan(struct task_struct *p);
201static inline void cpu_relax(void) 205static inline void cpu_relax(void)
202{ 206{
203 if (MACHINE_HAS_DIAG44) 207 if (MACHINE_HAS_DIAG44)
204 asm volatile("diag 0,0,68" : : : "memory"); 208 asm volatile("diag 0,0,68");
205 else 209 barrier();
206 barrier();
207} 210}
208 211
209/* 212/*
@@ -328,6 +331,18 @@ static inline void disabled_wait(unsigned long code)
328} 331}
329 332
330/* 333/*
334 * Basic Machine Check/Program Check Handler.
335 */
336
337extern void s390_base_mcck_handler(void);
338extern void s390_base_pgm_handler(void);
339extern void s390_base_ext_handler(void);
340
341extern void (*s390_base_mcck_handler_fn)(void);
342extern void (*s390_base_pgm_handler_fn)(void);
343extern void (*s390_base_ext_handler_fn)(void);
344
345/*
331 * CPU idle notifier chain. 346 * CPU idle notifier chain.
332 */ 347 */
333#define CPU_IDLE 0 348#define CPU_IDLE 0
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 7b768c5c68a8..fa6ca87080e8 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -266,17 +266,12 @@ typedef struct
266#define PSW_ASC_SECONDARY 0x0000800000000000UL 266#define PSW_ASC_SECONDARY 0x0000800000000000UL
267#define PSW_ASC_HOME 0x0000C00000000000UL 267#define PSW_ASC_HOME 0x0000C00000000000UL
268 268
269#define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ 269extern long psw_user32_bits;
270 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
271 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
272 270
273#endif /* __s390x__ */ 271#endif /* __s390x__ */
274 272
275#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ 273extern long psw_kernel_bits;
276 PSW_MASK_MCHECK | PSW_DEFAULT_KEY) 274extern long psw_user_bits;
277#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
278 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
279 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
280 275
281/* This macro merges a NEW PSW mask specified by the user into 276/* This macro merges a NEW PSW mask specified by the user into
282 the currently active PSW mask CURRENT, modifying only those 277 the currently active PSW mask CURRENT, modifying only those
diff --git a/include/asm-s390/reset.h b/include/asm-s390/reset.h
index 532e65a2aafc..f584f4a52581 100644
--- a/include/asm-s390/reset.h
+++ b/include/asm-s390/reset.h
@@ -18,7 +18,4 @@ struct reset_call {
18extern void register_reset_call(struct reset_call *reset); 18extern void register_reset_call(struct reset_call *reset);
19extern void unregister_reset_call(struct reset_call *reset); 19extern void unregister_reset_call(struct reset_call *reset);
20extern void s390_reset_system(void); 20extern void s390_reset_system(void);
21extern void (*s390_reset_mcck_handler)(void);
22extern void (*s390_reset_pgm_handler)(void);
23
24#endif /* _ASM_S390_RESET_H */ 21#endif /* _ASM_S390_RESET_H */
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
new file mode 100644
index 000000000000..468b97018405
--- /dev/null
+++ b/include/asm-s390/sclp.h
@@ -0,0 +1,39 @@
1/*
2 * include/asm-s390/sclp.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_SCLP_H
9#define _ASM_S390_SCLP_H
10
11#include <linux/types.h>
12
13struct sccb_header {
14 u16 length;
15 u8 function_code;
16 u8 control_mask[3];
17 u16 response_code;
18} __attribute__((packed));
19
20#define LOADPARM_LEN 8
21
22struct sclp_readinfo_sccb {
23 struct sccb_header header; /* 0-7 */
24 u16 rnmax; /* 8-9 */
25 u8 rnsize; /* 10 */
26 u8 _reserved0[24 - 11]; /* 11-23 */
27 u8 loadparm[LOADPARM_LEN]; /* 24-31 */
28 u8 _reserved1[91 - 32]; /* 32-90 */
29 u8 flags; /* 91 */
30 u8 _reserved2[100 - 92]; /* 92-99 */
31 u32 rnsize2; /* 100-103 */
32 u64 rnmax2; /* 104-111 */
33 u8 _reserved3[4096 - 112]; /* 112-4095 */
34} __attribute__((packed, aligned(4096)));
35
36extern struct sclp_readinfo_sccb s390_readinfo_sccb;
37extern void sclp_readinfo_early(void);
38
39#endif /* _ASM_S390_SCLP_H */
diff --git a/include/asm-s390/sections.h b/include/asm-s390/sections.h
index 3a0b8ffeab7a..1c5a2c4ccdad 100644
--- a/include/asm-s390/sections.h
+++ b/include/asm-s390/sections.h
@@ -3,4 +3,6 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _eshared[];
7
6#endif 8#endif
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 9574fe80a046..3388bb52597c 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -42,6 +42,18 @@ struct mem_chunk {
42 42
43extern struct mem_chunk memory_chunk[]; 43extern struct mem_chunk memory_chunk[];
44 44
45#ifdef CONFIG_S390_SWITCH_AMODE
46extern unsigned int switch_amode;
47#else
48#define switch_amode (0)
49#endif
50
51#ifdef CONFIG_S390_EXEC_PROTECT
52extern unsigned int s390_noexec;
53#else
54#define s390_noexec (0)
55#endif
56
45/* 57/*
46 * Machine features detected in head.S 58 * Machine features detected in head.S
47 */ 59 */
@@ -74,6 +86,9 @@ extern unsigned int console_mode;
74extern unsigned int console_devno; 86extern unsigned int console_devno;
75extern unsigned int console_irq; 87extern unsigned int console_irq;
76 88
89extern char vmhalt_cmd[];
90extern char vmpoff_cmd[];
91
77#define CONSOLE_IS_UNDEFINED (console_mode == 0) 92#define CONSOLE_IS_UNDEFINED (console_mode == 0)
78#define CONSOLE_IS_SCLP (console_mode == 1) 93#define CONSOLE_IS_SCLP (console_mode == 1)
79#define CONSOLE_IS_3215 (console_mode == 2) 94#define CONSOLE_IS_3215 (console_mode == 2)
@@ -141,13 +156,19 @@ struct ipl_parameter_block {
141extern u32 ipl_flags; 156extern u32 ipl_flags;
142extern u16 ipl_devno; 157extern u16 ipl_devno;
143 158
144void do_reipl(void); 159extern void do_reipl(void);
160extern void ipl_save_parameters(void);
145 161
146enum { 162enum {
147 IPL_DEVNO_VALID = 1, 163 IPL_DEVNO_VALID = 1,
148 IPL_PARMBLOCK_VALID = 2, 164 IPL_PARMBLOCK_VALID = 2,
165 IPL_NSS_VALID = 4,
149}; 166};
150 167
168#define NSS_NAME_SIZE 8
169
170extern char kernel_nss_name[];
171
151#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ 172#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
152 IPL_PARMBLOCK_ORIGIN) 173 IPL_PARMBLOCK_ORIGIN)
153#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len) 174#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
diff --git a/arch/s390/math-emu/sfp-util.h b/include/asm-s390/sfp-util.h
index 5b6ca4570ea4..8cabcd23d976 100644
--- a/arch/s390/math-emu/sfp-util.h
+++ b/include/asm-s390/sfp-util.h
@@ -52,12 +52,12 @@
52}) 52})
53 53
54#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \
55 do { unsigned long __r; \ 55 do { unsigned int __r; \
56 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ 56 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
57 (r) = __r; \ 57 (r) = __r; \
58 } while (0) 58 } while (0)
59extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long, 59extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
60 unsigned long , unsigned long); 60 unsigned int , unsigned int);
61 61
62#define UDIV_NEEDS_NORMALIZATION 0 62#define UDIV_NEEDS_NORMALIZATION 0
63 63
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 7097c96ed026..b957e4cda464 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -31,6 +31,10 @@ typedef struct
31 __u16 cpu; 31 __u16 cpu;
32} sigp_info; 32} sigp_info;
33 33
34extern void machine_restart_smp(char *);
35extern void machine_halt_smp(void);
36extern void machine_power_off_smp(void);
37
34extern void smp_setup_cpu_possible_map(void); 38extern void smp_setup_cpu_possible_map(void);
35extern int smp_call_function_on(void (*func) (void *info), void *info, 39extern int smp_call_function_on(void (*func) (void *info), void *info,
36 int nonatomic, int wait, int cpu); 40 int nonatomic, int wait, int cpu);
@@ -106,7 +110,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
106static inline void smp_send_stop(void) 110static inline void smp_send_stop(void)
107{ 111{
108 /* Disable all interrupts/machine checks */ 112 /* Disable all interrupts/machine checks */
109 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 113 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
110} 114}
111 115
112#define smp_cpu_not_running(cpu) 1 116#define smp_cpu_not_running(cpu) 1
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index bd0b05ae87d2..bbe137c3ed69 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -373,8 +373,8 @@ __set_psw_mask(unsigned long mask)
373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
374} 374}
375 375
376#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 376#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
377#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) 377#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
378 378
379#ifdef CONFIG_SMP 379#ifdef CONFIG_SMP
380 380
diff --git a/include/asm-s390/tape390.h b/include/asm-s390/tape390.h
index f1d66ba0deef..884fba48f1ff 100644
--- a/include/asm-s390/tape390.h
+++ b/include/asm-s390/tape390.h
@@ -1,11 +1,11 @@
1/************************************************************************* 1/*************************************************************************
2 * 2 *
3 * tape390.h 3 * tape390.h
4 * enables user programs to display messages on the tape device 4 * enables user programs to display messages and control encryption
5 * on s390 tape devices
5 * 6 *
6 * S390 and zSeries version 7 * Copyright IBM Corp. 2001,2006
7 * Copyright (C) 2001 IBM Corporation 8 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
8 * Author(s): Despina Papadopoulou <despina_p@de.ibm.com>
9 * 9 *
10 *************************************************************************/ 10 *************************************************************************/
11 11
@@ -36,4 +36,68 @@ typedef struct display_struct {
36 char message2[8]; 36 char message2[8];
37} display_struct; 37} display_struct;
38 38
39/*
40 * Tape encryption support
41 */
42
43struct tape390_crypt_info {
44 char capability;
45 char status;
46 char medium_status;
47} __attribute__ ((packed));
48
49
50/* Macros for "capable" field */
51#define TAPE390_CRYPT_SUPPORTED_MASK 0x01
52#define TAPE390_CRYPT_SUPPORTED(x) \
53 ((x.capability & TAPE390_CRYPT_SUPPORTED_MASK))
54
55/* Macros for "status" field */
56#define TAPE390_CRYPT_ON_MASK 0x01
57#define TAPE390_CRYPT_ON(x) (((x.status) & TAPE390_CRYPT_ON_MASK))
58
59/* Macros for "medium status" field */
60#define TAPE390_MEDIUM_LOADED_MASK 0x01
61#define TAPE390_MEDIUM_ENCRYPTED_MASK 0x02
62#define TAPE390_MEDIUM_ENCRYPTED(x) \
63 (((x.medium_status) & TAPE390_MEDIUM_ENCRYPTED_MASK))
64#define TAPE390_MEDIUM_LOADED(x) \
65 (((x.medium_status) & TAPE390_MEDIUM_LOADED_MASK))
66
67/*
68 * The TAPE390_CRYPT_SET ioctl is used to switch on/off encryption.
69 * The "encryption_capable" and "tape_status" fields are ignored for this ioctl!
70 */
71#define TAPE390_CRYPT_SET _IOW('d', 2, struct tape390_crypt_info)
72
73/*
74 * The TAPE390_CRYPT_QUERY ioctl is used to query the encryption state.
75 */
76#define TAPE390_CRYPT_QUERY _IOR('d', 3, struct tape390_crypt_info)
77
78/* Values for "kekl1/2_type" and "kekl1/2_type_on_tape" fields */
79#define TAPE390_KEKL_TYPE_NONE 0
80#define TAPE390_KEKL_TYPE_LABEL 1
81#define TAPE390_KEKL_TYPE_HASH 2
82
83struct tape390_kekl {
84 unsigned char type;
85 unsigned char type_on_tape;
86 char label[65];
87} __attribute__ ((packed));
88
89struct tape390_kekl_pair {
90 struct tape390_kekl kekl[2];
91} __attribute__ ((packed));
92
93/*
94 * The TAPE390_KEKL_SET ioctl is used to set Key Encrypting Key labels.
95 */
96#define TAPE390_KEKL_SET _IOW('d', 4, struct tape390_kekl_pair)
97
98/*
99 * The TAPE390_KEKL_QUERY ioctl is used to query Key Encrypting Key labels.
100 */
101#define TAPE390_KEKL_QUERY _IOR('d', 5, struct tape390_kekl_pair)
102
39#endif 103#endif
diff --git a/include/asm-s390/timer.h b/include/asm-s390/timer.h
index 30e5cbe570f2..adb34860a543 100644
--- a/include/asm-s390/timer.h
+++ b/include/asm-s390/timer.h
@@ -45,6 +45,9 @@ extern void add_virt_timer_periodic(void *new);
45extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); 45extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
46extern int del_virt_timer(struct vtimer_list *timer); 46extern int del_virt_timer(struct vtimer_list *timer);
47 47
48extern void init_cpu_vtimer(void);
49extern void vtime_init(void);
50
48#endif /* __KERNEL__ */ 51#endif /* __KERNEL__ */
49 52
50#endif /* _ASM_S390_TIMER_H */ 53#endif /* _ASM_S390_TIMER_H */
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 4df4a41029a3..98229db24314 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -11,6 +11,41 @@
11#ifndef _ASM_S390_TIMEX_H 11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H 12#define _ASM_S390_TIMEX_H
13 13
14/* Inline functions for clock register access. */
15static inline int set_clock(__u64 time)
16{
17 int cc;
18
19 asm volatile(
20 " sck 0(%2)\n"
21 " ipm %0\n"
22 " srl %0,28\n"
23 : "=d" (cc) : "m" (time), "a" (&time) : "cc");
24 return cc;
25}
26
27static inline int store_clock(__u64 *time)
28{
29 int cc;
30
31 asm volatile(
32 " stck 0(%2)\n"
33 " ipm %0\n"
34 " srl %0,28\n"
35 : "=d" (cc), "=m" (*time) : "a" (time) : "cc");
36 return cc;
37}
38
39static inline void set_clock_comparator(__u64 time)
40{
41 asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time));
42}
43
44static inline void store_clock_comparator(__u64 *time)
45{
46 asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time));
47}
48
14#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 49#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
15 50
16typedef unsigned long long cycles_t; 51typedef unsigned long long cycles_t;
@@ -27,9 +62,24 @@ static inline unsigned long long get_clock (void)
27 return clk; 62 return clk;
28} 63}
29 64
65static inline void get_clock_extended(void *dest)
66{
67 typedef struct { unsigned long long clk[2]; } __clock_t;
68
69#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
70 asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc");
71#else /* __GNUC__ */
72 asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest))
73 : "a" ((__clock_t *)dest) : "cc");
74#endif /* __GNUC__ */
75}
76
30static inline cycles_t get_cycles(void) 77static inline cycles_t get_cycles(void)
31{ 78{
32 return (cycles_t) get_clock() >> 2; 79 return (cycles_t) get_clock() >> 2;
33} 80}
34 81
82int get_sync_clock(unsigned long long *clock);
83void init_cpu_timer(void);
84
35#endif 85#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index fa4dc916a9bf..66793f55c8b2 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/pgalloc.h>
6 7
7/* 8/*
8 * TLB flushing: 9 * TLB flushing:
@@ -102,6 +103,14 @@ static inline void __flush_tlb_mm(struct mm_struct * mm)
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 103 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 104 return;
104 if (MACHINE_HAS_IDTE) { 105 if (MACHINE_HAS_IDTE) {
106 pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd);
107
108 if (shadow_pgd) {
109 asm volatile(
110 " .insn rrf,0xb98e0000,0,%0,%1,0"
111 : : "a" (2048),
112 "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
113 }
105 asm volatile( 114 asm volatile(
106 " .insn rrf,0xb98e0000,0,%0,%1,0" 115 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); 116 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 73ac4e82217b..0235970278f0 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -90,6 +90,8 @@ struct uaccess_ops {
90extern struct uaccess_ops uaccess; 90extern struct uaccess_ops uaccess;
91extern struct uaccess_ops uaccess_std; 91extern struct uaccess_ops uaccess_std;
92extern struct uaccess_ops uaccess_mvcos; 92extern struct uaccess_ops uaccess_mvcos;
93extern struct uaccess_ops uaccess_mvcos_switch;
94extern struct uaccess_ops uaccess_pt;
93 95
94static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 96static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
95{ 97{
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index 6b6fc6f8be7e..a29f05087a31 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -37,7 +37,7 @@
37 * Calling conventions: 37 * Calling conventions:
38 * 38 *
39 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) 39 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
40 * ACPI_EXTERNAL_XFACE - External ACPI interfaces 40 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
41 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces 41 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
42 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces 42 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
43 */ 43 */
@@ -57,11 +57,11 @@
57int __acpi_acquire_global_lock(unsigned int *lock); 57int __acpi_acquire_global_lock(unsigned int *lock);
58int __acpi_release_global_lock(unsigned int *lock); 58int __acpi_release_global_lock(unsigned int *lock);
59 59
60#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ 60#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
61 ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) 61 ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
62 62
63#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ 63#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
64 ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) 64 ((Acq) = __acpi_release_global_lock(&facs->global_lock))
65 65
66/* 66/*
67 * Math helper asm macros 67 * Math helper asm macros
@@ -87,10 +87,10 @@ extern int acpi_strict;
87extern int acpi_disabled; 87extern int acpi_disabled;
88extern int acpi_pci_disabled; 88extern int acpi_pci_disabled;
89extern int acpi_ht; 89extern int acpi_ht;
90static inline void disable_acpi(void) 90static inline void disable_acpi(void)
91{ 91{
92 acpi_disabled = 1; 92 acpi_disabled = 1;
93 acpi_ht = 0; 93 acpi_ht = 0;
94 acpi_pci_disabled = 1; 94 acpi_pci_disabled = 1;
95 acpi_noirq = 1; 95 acpi_noirq = 1;
96} 96}
@@ -100,9 +100,9 @@ static inline void disable_acpi(void)
100 100
101extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); 101extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
102static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 102static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
103static inline void acpi_disable_pci(void) 103static inline void acpi_disable_pci(void)
104{ 104{
105 acpi_pci_disabled = 1; 105 acpi_pci_disabled = 1;
106 acpi_noirq_set(); 106 acpi_noirq_set();
107} 107}
108extern int acpi_irq_balance_set(char *str); 108extern int acpi_irq_balance_set(char *str);
@@ -136,8 +136,6 @@ extern void acpi_reserve_bootmem(void);
136extern int acpi_disabled; 136extern int acpi_disabled;
137extern int acpi_pci_disabled; 137extern int acpi_pci_disabled;
138 138
139extern u8 x86_acpiid_to_apicid[];
140
141#define ARCH_HAS_POWER_INIT 1 139#define ARCH_HAS_POWER_INIT 1
142 140
143extern int acpi_skip_timer_override; 141extern int acpi_skip_timer_override;
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index ba94ab3d2673..ab913ffcad56 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -1,6 +1,5 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWTIOLB_H 1 2#define _ASM_SWIOTLB_H 1
3
4 3
5#include <asm/dma-mapping.h> 4#include <asm/dma-mapping.h>
6 5
@@ -45,6 +44,7 @@ extern void swiotlb_init(void);
45extern int swiotlb_force; 44extern int swiotlb_force;
46 45
47#ifdef CONFIG_SWIOTLB 46#ifdef CONFIG_SWIOTLB
47#define SWIOTLB_ARCH_NEED_ALLOC
48extern int swiotlb; 48extern int swiotlb;
49#else 49#else
50#define swiotlb 0 50#define swiotlb 0
@@ -52,4 +52,6 @@ extern int swiotlb;
52 52
53extern void pci_swiotlb_init(void); 53extern void pci_swiotlb_init(void);
54 54
55#endif /* _ASM_SWTIOLB_H */ 55static inline void dma_mark_clean(void *addr, size_t size) {}
56
57#endif /* _ASM_SWIOTLB_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 157db77a7170..683513e310de 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -11,6 +11,7 @@ header-y += netfilter_arp/
11header-y += netfilter_bridge/ 11header-y += netfilter_bridge/
12header-y += netfilter_ipv4/ 12header-y += netfilter_ipv4/
13header-y += netfilter_ipv6/ 13header-y += netfilter_ipv6/
14header-y += usb/
14 15
15header-y += affs_hardblocks.h 16header-y += affs_hardblocks.h
16header-y += aio_abi.h 17header-y += aio_abi.h
@@ -326,7 +327,6 @@ unifdef-y += udp.h
326unifdef-y += uinput.h 327unifdef-y += uinput.h
327unifdef-y += uio.h 328unifdef-y += uio.h
328unifdef-y += unistd.h 329unifdef-y += unistd.h
329unifdef-y += usb_ch9.h
330unifdef-y += usbdevice_fs.h 330unifdef-y += usbdevice_fs.h
331unifdef-y += user.h 331unifdef-y += user.h
332unifdef-y += utsname.h 332unifdef-y += utsname.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 91f1f2363870..815f1fb4ce21 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -53,166 +53,6 @@ enum acpi_irq_model_id {
53 53
54extern enum acpi_irq_model_id acpi_irq_model; 54extern enum acpi_irq_model_id acpi_irq_model;
55 55
56
57/* Root System Description Pointer (RSDP) */
58
59struct acpi_table_rsdp {
60 char signature[8];
61 u8 checksum;
62 char oem_id[6];
63 u8 revision;
64 u32 rsdt_address;
65} __attribute__ ((packed));
66
67struct acpi20_table_rsdp {
68 char signature[8];
69 u8 checksum;
70 char oem_id[6];
71 u8 revision;
72 u32 rsdt_address;
73 u32 length;
74 u64 xsdt_address;
75 u8 ext_checksum;
76 u8 reserved[3];
77} __attribute__ ((packed));
78
79typedef struct {
80 u8 type;
81 u8 length;
82} __attribute__ ((packed)) acpi_table_entry_header;
83
84/* Root System Description Table (RSDT) */
85
86struct acpi_table_rsdt {
87 struct acpi_table_header header;
88 u32 entry[8];
89} __attribute__ ((packed));
90
91/* Extended System Description Table (XSDT) */
92
93struct acpi_table_xsdt {
94 struct acpi_table_header header;
95 u64 entry[1];
96} __attribute__ ((packed));
97
98/* Fixed ACPI Description Table (FADT) */
99
100struct acpi_table_fadt {
101 struct acpi_table_header header;
102 u32 facs_addr;
103 u32 dsdt_addr;
104 /* ... */
105} __attribute__ ((packed));
106
107/* Multiple APIC Description Table (MADT) */
108
109struct acpi_table_madt {
110 struct acpi_table_header header;
111 u32 lapic_address;
112 struct {
113 u32 pcat_compat:1;
114 u32 reserved:31;
115 } flags;
116} __attribute__ ((packed));
117
118enum acpi_madt_entry_id {
119 ACPI_MADT_LAPIC = 0,
120 ACPI_MADT_IOAPIC,
121 ACPI_MADT_INT_SRC_OVR,
122 ACPI_MADT_NMI_SRC,
123 ACPI_MADT_LAPIC_NMI,
124 ACPI_MADT_LAPIC_ADDR_OVR,
125 ACPI_MADT_IOSAPIC,
126 ACPI_MADT_LSAPIC,
127 ACPI_MADT_PLAT_INT_SRC,
128 ACPI_MADT_ENTRY_COUNT
129};
130
131typedef struct {
132 u16 polarity:2;
133 u16 trigger:2;
134 u16 reserved:12;
135} __attribute__ ((packed)) acpi_interrupt_flags;
136
137struct acpi_table_lapic {
138 acpi_table_entry_header header;
139 u8 acpi_id;
140 u8 id;
141 struct {
142 u32 enabled:1;
143 u32 reserved:31;
144 } flags;
145} __attribute__ ((packed));
146
147struct acpi_table_ioapic {
148 acpi_table_entry_header header;
149 u8 id;
150 u8 reserved;
151 u32 address;
152 u32 global_irq_base;
153} __attribute__ ((packed));
154
155struct acpi_table_int_src_ovr {
156 acpi_table_entry_header header;
157 u8 bus;
158 u8 bus_irq;
159 u32 global_irq;
160 acpi_interrupt_flags flags;
161} __attribute__ ((packed));
162
163struct acpi_table_nmi_src {
164 acpi_table_entry_header header;
165 acpi_interrupt_flags flags;
166 u32 global_irq;
167} __attribute__ ((packed));
168
169struct acpi_table_lapic_nmi {
170 acpi_table_entry_header header;
171 u8 acpi_id;
172 acpi_interrupt_flags flags;
173 u8 lint;
174} __attribute__ ((packed));
175
176struct acpi_table_lapic_addr_ovr {
177 acpi_table_entry_header header;
178 u8 reserved[2];
179 u64 address;
180} __attribute__ ((packed));
181
182struct acpi_table_iosapic {
183 acpi_table_entry_header header;
184 u8 id;
185 u8 reserved;
186 u32 global_irq_base;
187 u64 address;
188} __attribute__ ((packed));
189
190struct acpi_table_lsapic {
191 acpi_table_entry_header header;
192 u8 acpi_id;
193 u8 id;
194 u8 eid;
195 u8 reserved[3];
196 struct {
197 u32 enabled:1;
198 u32 reserved:31;
199 } flags;
200} __attribute__ ((packed));
201
202struct acpi_table_plat_int_src {
203 acpi_table_entry_header header;
204 acpi_interrupt_flags flags;
205 u8 type; /* See acpi_interrupt_type */
206 u8 id;
207 u8 eid;
208 u8 iosapic_vector;
209 u32 global_irq;
210 struct {
211 u32 cpei_override_flag:1;
212 u32 reserved:31;
213 } plint_flags;
214} __attribute__ ((packed));
215
216enum acpi_interrupt_id { 56enum acpi_interrupt_id {
217 ACPI_INTERRUPT_PMI = 1, 57 ACPI_INTERRUPT_PMI = 1,
218 ACPI_INTERRUPT_INIT, 58 ACPI_INTERRUPT_INIT,
@@ -222,89 +62,6 @@ enum acpi_interrupt_id {
222 62
223#define ACPI_SPACE_MEM 0 63#define ACPI_SPACE_MEM 0
224 64
225struct acpi_gen_regaddr {
226 u8 space_id;
227 u8 bit_width;
228 u8 bit_offset;
229 u8 resv;
230 u32 addrl;
231 u32 addrh;
232} __attribute__ ((packed));
233
234struct acpi_table_hpet {
235 struct acpi_table_header header;
236 u32 id;
237 struct acpi_gen_regaddr addr;
238 u8 number;
239 u16 min_tick;
240 u8 page_protect;
241} __attribute__ ((packed));
242
243/*
244 * Simple Boot Flags
245 * http://www.microsoft.com/whdc/hwdev/resources/specs/simp_bios.mspx
246 */
247struct acpi_table_sbf
248{
249 u8 sbf_signature[4];
250 u32 sbf_len;
251 u8 sbf_revision;
252 u8 sbf_csum;
253 u8 sbf_oemid[6];
254 u8 sbf_oemtable[8];
255 u8 sbf_revdata[4];
256 u8 sbf_creator[4];
257 u8 sbf_crearev[4];
258 u8 sbf_cmos;
259 u8 sbf_spare[3];
260} __attribute__ ((packed));
261
262/*
263 * System Resource Affinity Table (SRAT)
264 * http://www.microsoft.com/whdc/hwdev/platform/proc/SRAT.mspx
265 */
266
267struct acpi_table_srat {
268 struct acpi_table_header header;
269 u32 table_revision;
270 u64 reserved;
271} __attribute__ ((packed));
272
273enum acpi_srat_entry_id {
274 ACPI_SRAT_PROCESSOR_AFFINITY = 0,
275 ACPI_SRAT_MEMORY_AFFINITY,
276 ACPI_SRAT_ENTRY_COUNT
277};
278
279struct acpi_table_processor_affinity {
280 acpi_table_entry_header header;
281 u8 proximity_domain;
282 u8 apic_id;
283 struct {
284 u32 enabled:1;
285 u32 reserved:31;
286 } flags;
287 u8 lsapic_eid;
288 u8 reserved[7];
289} __attribute__ ((packed));
290
291struct acpi_table_memory_affinity {
292 acpi_table_entry_header header;
293 u8 proximity_domain;
294 u8 reserved1[5];
295 u32 base_addr_lo;
296 u32 base_addr_hi;
297 u32 length_lo;
298 u32 length_hi;
299 u32 memory_type; /* See acpi_address_range_id */
300 struct {
301 u32 enabled:1;
302 u32 hot_pluggable:1;
303 u32 reserved:30;
304 } flags;
305 u64 reserved2;
306} __attribute__ ((packed));
307
308enum acpi_address_range_id { 65enum acpi_address_range_id {
309 ACPI_ADDRESS_RANGE_MEMORY = 1, 66 ACPI_ADDRESS_RANGE_MEMORY = 1,
310 ACPI_ADDRESS_RANGE_RESERVED = 2, 67 ACPI_ADDRESS_RANGE_RESERVED = 2,
@@ -313,84 +70,12 @@ enum acpi_address_range_id {
313 ACPI_ADDRESS_RANGE_COUNT 70 ACPI_ADDRESS_RANGE_COUNT
314}; 71};
315 72
316/*
317 * System Locality Information Table (SLIT)
318 * see http://devresource.hp.com/devresource/docs/techpapers/ia64/slit.pdf
319 */
320
321struct acpi_table_slit {
322 struct acpi_table_header header;
323 u64 localities;
324 u8 entry[1]; /* real size = localities^2 */
325} __attribute__ ((packed));
326
327/* Smart Battery Description Table (SBST) */
328
329struct acpi_table_sbst {
330 struct acpi_table_header header;
331 u32 warning; /* Warn user */
332 u32 low; /* Critical sleep */
333 u32 critical; /* Critical shutdown */
334} __attribute__ ((packed));
335
336/* Embedded Controller Boot Resources Table (ECDT) */
337
338struct acpi_table_ecdt {
339 struct acpi_table_header header;
340 struct acpi_generic_address ec_control;
341 struct acpi_generic_address ec_data;
342 u32 uid;
343 u8 gpe_bit;
344 char ec_id[0];
345} __attribute__ ((packed));
346
347/* PCI MMCONFIG */
348
349/* Defined in PCI Firmware Specification 3.0 */
350struct acpi_table_mcfg_config {
351 u32 base_address;
352 u32 base_reserved;
353 u16 pci_segment_group_number;
354 u8 start_bus_number;
355 u8 end_bus_number;
356 u8 reserved[4];
357} __attribute__ ((packed));
358struct acpi_table_mcfg {
359 struct acpi_table_header header;
360 u8 reserved[8];
361 struct acpi_table_mcfg_config config[0];
362} __attribute__ ((packed));
363 73
364/* Table Handlers */ 74/* Table Handlers */
365 75
366enum acpi_table_id { 76typedef int (*acpi_table_handler) (struct acpi_table_header *table);
367 ACPI_TABLE_UNKNOWN = 0,
368 ACPI_APIC,
369 ACPI_BOOT,
370 ACPI_DBGP,
371 ACPI_DSDT,
372 ACPI_ECDT,
373 ACPI_ETDT,
374 ACPI_FADT,
375 ACPI_FACS,
376 ACPI_OEMX,
377 ACPI_PSDT,
378 ACPI_SBST,
379 ACPI_SLIT,
380 ACPI_SPCR,
381 ACPI_SRAT,
382 ACPI_SSDT,
383 ACPI_SPMI,
384 ACPI_HPET,
385 ACPI_MCFG,
386 ACPI_TABLE_COUNT
387};
388
389typedef int (*acpi_table_handler) (unsigned long phys_addr, unsigned long size);
390
391extern acpi_table_handler acpi_table_ops[ACPI_TABLE_COUNT];
392 77
393typedef int (*acpi_madt_entry_handler) (acpi_table_entry_header *header, const unsigned long end); 78typedef int (*acpi_madt_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
394 79
395char * __acpi_map_table (unsigned long phys_addr, unsigned long size); 80char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
396unsigned long acpi_find_rsdp (void); 81unsigned long acpi_find_rsdp (void);
@@ -399,14 +84,12 @@ int acpi_boot_table_init (void);
399int acpi_numa_init (void); 84int acpi_numa_init (void);
400 85
401int acpi_table_init (void); 86int acpi_table_init (void);
402int acpi_table_parse (enum acpi_table_id id, acpi_table_handler handler); 87int acpi_table_parse (char *id, acpi_table_handler handler);
403int acpi_get_table_header_early (enum acpi_table_id id, struct acpi_table_header **header); 88int acpi_table_parse_madt (enum acpi_madt_type id, acpi_madt_entry_handler handler, unsigned int max_entries);
404int acpi_table_parse_madt (enum acpi_madt_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries); 89int acpi_table_parse_srat (enum acpi_srat_type id, acpi_madt_entry_handler handler, unsigned int max_entries);
405int acpi_table_parse_srat (enum acpi_srat_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries); 90int acpi_parse_mcfg (struct acpi_table_header *header);
406int acpi_parse_mcfg (unsigned long phys_addr, unsigned long size); 91void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
407void acpi_table_print (struct acpi_table_header *header, unsigned long phys_addr); 92void acpi_table_print_srat_entry (struct acpi_subtable_header *srat);
408void acpi_table_print_madt_entry (acpi_table_entry_header *madt);
409void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
410 93
411/* the following four functions are architecture-dependent */ 94/* the following four functions are architecture-dependent */
412#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT 95#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT
@@ -417,8 +100,8 @@ void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
417#define acpi_numa_arch_fixup() do {} while (0) 100#define acpi_numa_arch_fixup() do {} while (0)
418#else 101#else
419void acpi_numa_slit_init (struct acpi_table_slit *slit); 102void acpi_numa_slit_init (struct acpi_table_slit *slit);
420void acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa); 103void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
421void acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma); 104void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
422void acpi_numa_arch_fixup(void); 105void acpi_numa_arch_fixup(void);
423#endif 106#endif
424 107
@@ -433,7 +116,7 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
433 116
434extern int acpi_mp_config; 117extern int acpi_mp_config;
435 118
436extern struct acpi_table_mcfg_config *pci_mmcfg_config; 119extern struct acpi_mcfg_allocation *pci_mmcfg_config;
437extern int pci_mmcfg_config_num; 120extern int pci_mmcfg_config_num;
438 121
439extern int sbf_port; 122extern int sbf_port;
diff --git a/include/linux/device.h b/include/linux/device.h
index f44247fe8135..5ca1cdba563a 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -126,6 +126,7 @@ struct device_driver {
126 struct klist_node knode_bus; 126 struct klist_node knode_bus;
127 127
128 struct module * owner; 128 struct module * owner;
129 const char * mod_name; /* used for built-in modules */
129 130
130 int (*probe) (struct device * dev); 131 int (*probe) (struct device * dev);
131 int (*remove) (struct device * dev); 132 int (*remove) (struct device * dev);
@@ -327,6 +328,13 @@ extern struct class_device *class_device_create(struct class *cls,
327 __attribute__((format(printf,5,6))); 328 __attribute__((format(printf,5,6)));
328extern void class_device_destroy(struct class *cls, dev_t devt); 329extern void class_device_destroy(struct class *cls, dev_t devt);
329 330
331struct device_type {
332 struct device_attribute *attrs;
333 int (*uevent)(struct device *dev, char **envp, int num_envp,
334 char *buffer, int buffer_size);
335 void (*release)(struct device *dev);
336};
337
330/* interface for exporting device attributes */ 338/* interface for exporting device attributes */
331struct device_attribute { 339struct device_attribute {
332 struct attribute attr; 340 struct attribute attr;
@@ -355,6 +363,7 @@ struct device {
355 363
356 struct kobject kobj; 364 struct kobject kobj;
357 char bus_id[BUS_ID_SIZE]; /* position on parent bus */ 365 char bus_id[BUS_ID_SIZE]; /* position on parent bus */
366 struct device_type *type;
358 unsigned is_registered:1; 367 unsigned is_registered:1;
359 struct device_attribute uevent_attr; 368 struct device_attribute uevent_attr;
360 struct device_attribute *devt_attr; 369 struct device_attribute *devt_attr;
@@ -390,9 +399,10 @@ struct device {
390 399
391 /* class_device migration path */ 400 /* class_device migration path */
392 struct list_head node; 401 struct list_head node;
393 struct class *class; /* optional*/ 402 struct class *class;
394 dev_t devt; /* dev_t, creates the sysfs "dev" */ 403 dev_t devt; /* dev_t, creates the sysfs "dev" */
395 struct attribute_group **groups; /* optional groups */ 404 struct attribute_group **groups; /* optional groups */
405 int uevent_suppress;
396 406
397 void (*release)(struct device * dev); 407 void (*release)(struct device * dev);
398}; 408};
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8e4dbb51fc70..50d568ec178a 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -1,10 +1,8 @@
1#ifndef __HID_DEBUG_H
2#define __HID_DEBUG_H
3
1/* 4/*
2 * $Id: hid-debug.h,v 1.8 2001/09/25 09:37:57 vojtech Exp $ 5 * Copyright (c) 2007 Jiri Kosina
3 *
4 * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
5 * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
6 *
7 * Some debug stuff for the HID parser.
8 */ 6 */
9 7
10/* 8/*
@@ -22,737 +20,26 @@
22 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * 22 *
25 * Should you need to contact me, the author, you can do so either by
26 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
27 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
28 */ 23 */
29 24
30#include <linux/input.h> 25#ifdef CONFIG_HID_DEBUG
31
32struct hid_usage_entry {
33 unsigned page;
34 unsigned usage;
35 char *description;
36};
37
38static const struct hid_usage_entry hid_usage_table[] = {
39 { 0, 0, "Undefined" },
40 { 1, 0, "GenericDesktop" },
41 {0, 0x01, "Pointer"},
42 {0, 0x02, "Mouse"},
43 {0, 0x04, "Joystick"},
44 {0, 0x05, "GamePad"},
45 {0, 0x06, "Keyboard"},
46 {0, 0x07, "Keypad"},
47 {0, 0x08, "MultiAxis"},
48 {0, 0x30, "X"},
49 {0, 0x31, "Y"},
50 {0, 0x32, "Z"},
51 {0, 0x33, "Rx"},
52 {0, 0x34, "Ry"},
53 {0, 0x35, "Rz"},
54 {0, 0x36, "Slider"},
55 {0, 0x37, "Dial"},
56 {0, 0x38, "Wheel"},
57 {0, 0x39, "HatSwitch"},
58 {0, 0x3a, "CountedBuffer"},
59 {0, 0x3b, "ByteCount"},
60 {0, 0x3c, "MotionWakeup"},
61 {0, 0x3d, "Start"},
62 {0, 0x3e, "Select"},
63 {0, 0x40, "Vx"},
64 {0, 0x41, "Vy"},
65 {0, 0x42, "Vz"},
66 {0, 0x43, "Vbrx"},
67 {0, 0x44, "Vbry"},
68 {0, 0x45, "Vbrz"},
69 {0, 0x46, "Vno"},
70 {0, 0x80, "SystemControl"},
71 {0, 0x81, "SystemPowerDown"},
72 {0, 0x82, "SystemSleep"},
73 {0, 0x83, "SystemWakeUp"},
74 {0, 0x84, "SystemContextMenu"},
75 {0, 0x85, "SystemMainMenu"},
76 {0, 0x86, "SystemAppMenu"},
77 {0, 0x87, "SystemMenuHelp"},
78 {0, 0x88, "SystemMenuExit"},
79 {0, 0x89, "SystemMenuSelect"},
80 {0, 0x8a, "SystemMenuRight"},
81 {0, 0x8b, "SystemMenuLeft"},
82 {0, 0x8c, "SystemMenuUp"},
83 {0, 0x8d, "SystemMenuDown"},
84 {0, 0x90, "D-PadUp"},
85 {0, 0x91, "D-PadDown"},
86 {0, 0x92, "D-PadRight"},
87 {0, 0x93, "D-PadLeft"},
88 { 2, 0, "Simulation" },
89 {0, 0xb0, "Aileron"},
90 {0, 0xb1, "AileronTrim"},
91 {0, 0xb2, "Anti-Torque"},
92 {0, 0xb3, "Autopilot"},
93 {0, 0xb4, "Chaff"},
94 {0, 0xb5, "Collective"},
95 {0, 0xb6, "DiveBrake"},
96 {0, 0xb7, "ElectronicCountermeasures"},
97 {0, 0xb8, "Elevator"},
98 {0, 0xb9, "ElevatorTrim"},
99 {0, 0xba, "Rudder"},
100 {0, 0xbb, "Throttle"},
101 {0, 0xbc, "FlightCommunications"},
102 {0, 0xbd, "FlareRelease"},
103 {0, 0xbe, "LandingGear"},
104 {0, 0xbf, "ToeBrake"},
105 { 7, 0, "Keyboard" },
106 { 8, 0, "LED" },
107 {0, 0x01, "NumLock"},
108 {0, 0x02, "CapsLock"},
109 {0, 0x03, "ScrollLock"},
110 {0, 0x04, "Compose"},
111 {0, 0x05, "Kana"},
112 {0, 0x4b, "GenericIndicator"},
113 { 9, 0, "Button" },
114 { 10, 0, "Ordinal" },
115 { 12, 0, "Consumer" },
116 {0, 0x238, "HorizontalWheel"},
117 { 13, 0, "Digitizers" },
118 {0, 0x01, "Digitizer"},
119 {0, 0x02, "Pen"},
120 {0, 0x03, "LightPen"},
121 {0, 0x04, "TouchScreen"},
122 {0, 0x05, "TouchPad"},
123 {0, 0x20, "Stylus"},
124 {0, 0x21, "Puck"},
125 {0, 0x22, "Finger"},
126 {0, 0x30, "TipPressure"},
127 {0, 0x31, "BarrelPressure"},
128 {0, 0x32, "InRange"},
129 {0, 0x33, "Touch"},
130 {0, 0x34, "UnTouch"},
131 {0, 0x35, "Tap"},
132 {0, 0x39, "TabletFunctionKey"},
133 {0, 0x3a, "ProgramChangeKey"},
134 {0, 0x3c, "Invert"},
135 {0, 0x42, "TipSwitch"},
136 {0, 0x43, "SecondaryTipSwitch"},
137 {0, 0x44, "BarrelSwitch"},
138 {0, 0x45, "Eraser"},
139 {0, 0x46, "TabletPick"},
140 { 15, 0, "PhysicalInterfaceDevice" },
141 {0, 0x00, "Undefined"},
142 {0, 0x01, "Physical_Interface_Device"},
143 {0, 0x20, "Normal"},
144 {0, 0x21, "Set_Effect_Report"},
145 {0, 0x22, "Effect_Block_Index"},
146 {0, 0x23, "Parameter_Block_Offset"},
147 {0, 0x24, "ROM_Flag"},
148 {0, 0x25, "Effect_Type"},
149 {0, 0x26, "ET_Constant_Force"},
150 {0, 0x27, "ET_Ramp"},
151 {0, 0x28, "ET_Custom_Force_Data"},
152 {0, 0x30, "ET_Square"},
153 {0, 0x31, "ET_Sine"},
154 {0, 0x32, "ET_Triangle"},
155 {0, 0x33, "ET_Sawtooth_Up"},
156 {0, 0x34, "ET_Sawtooth_Down"},
157 {0, 0x40, "ET_Spring"},
158 {0, 0x41, "ET_Damper"},
159 {0, 0x42, "ET_Inertia"},
160 {0, 0x43, "ET_Friction"},
161 {0, 0x50, "Duration"},
162 {0, 0x51, "Sample_Period"},
163 {0, 0x52, "Gain"},
164 {0, 0x53, "Trigger_Button"},
165 {0, 0x54, "Trigger_Repeat_Interval"},
166 {0, 0x55, "Axes_Enable"},
167 {0, 0x56, "Direction_Enable"},
168 {0, 0x57, "Direction"},
169 {0, 0x58, "Type_Specific_Block_Offset"},
170 {0, 0x59, "Block_Type"},
171 {0, 0x5A, "Set_Envelope_Report"},
172 {0, 0x5B, "Attack_Level"},
173 {0, 0x5C, "Attack_Time"},
174 {0, 0x5D, "Fade_Level"},
175 {0, 0x5E, "Fade_Time"},
176 {0, 0x5F, "Set_Condition_Report"},
177 {0, 0x60, "CP_Offset"},
178 {0, 0x61, "Positive_Coefficient"},
179 {0, 0x62, "Negative_Coefficient"},
180 {0, 0x63, "Positive_Saturation"},
181 {0, 0x64, "Negative_Saturation"},
182 {0, 0x65, "Dead_Band"},
183 {0, 0x66, "Download_Force_Sample"},
184 {0, 0x67, "Isoch_Custom_Force_Enable"},
185 {0, 0x68, "Custom_Force_Data_Report"},
186 {0, 0x69, "Custom_Force_Data"},
187 {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
188 {0, 0x6B, "Set_Custom_Force_Report"},
189 {0, 0x6C, "Custom_Force_Data_Offset"},
190 {0, 0x6D, "Sample_Count"},
191 {0, 0x6E, "Set_Periodic_Report"},
192 {0, 0x6F, "Offset"},
193 {0, 0x70, "Magnitude"},
194 {0, 0x71, "Phase"},
195 {0, 0x72, "Period"},
196 {0, 0x73, "Set_Constant_Force_Report"},
197 {0, 0x74, "Set_Ramp_Force_Report"},
198 {0, 0x75, "Ramp_Start"},
199 {0, 0x76, "Ramp_End"},
200 {0, 0x77, "Effect_Operation_Report"},
201 {0, 0x78, "Effect_Operation"},
202 {0, 0x79, "Op_Effect_Start"},
203 {0, 0x7A, "Op_Effect_Start_Solo"},
204 {0, 0x7B, "Op_Effect_Stop"},
205 {0, 0x7C, "Loop_Count"},
206 {0, 0x7D, "Device_Gain_Report"},
207 {0, 0x7E, "Device_Gain"},
208 {0, 0x7F, "PID_Pool_Report"},
209 {0, 0x80, "RAM_Pool_Size"},
210 {0, 0x81, "ROM_Pool_Size"},
211 {0, 0x82, "ROM_Effect_Block_Count"},
212 {0, 0x83, "Simultaneous_Effects_Max"},
213 {0, 0x84, "Pool_Alignment"},
214 {0, 0x85, "PID_Pool_Move_Report"},
215 {0, 0x86, "Move_Source"},
216 {0, 0x87, "Move_Destination"},
217 {0, 0x88, "Move_Length"},
218 {0, 0x89, "PID_Block_Load_Report"},
219 {0, 0x8B, "Block_Load_Status"},
220 {0, 0x8C, "Block_Load_Success"},
221 {0, 0x8D, "Block_Load_Full"},
222 {0, 0x8E, "Block_Load_Error"},
223 {0, 0x8F, "Block_Handle"},
224 {0, 0x90, "PID_Block_Free_Report"},
225 {0, 0x91, "Type_Specific_Block_Handle"},
226 {0, 0x92, "PID_State_Report"},
227 {0, 0x94, "Effect_Playing"},
228 {0, 0x95, "PID_Device_Control_Report"},
229 {0, 0x96, "PID_Device_Control"},
230 {0, 0x97, "DC_Enable_Actuators"},
231 {0, 0x98, "DC_Disable_Actuators"},
232 {0, 0x99, "DC_Stop_All_Effects"},
233 {0, 0x9A, "DC_Device_Reset"},
234 {0, 0x9B, "DC_Device_Pause"},
235 {0, 0x9C, "DC_Device_Continue"},
236 {0, 0x9F, "Device_Paused"},
237 {0, 0xA0, "Actuators_Enabled"},
238 {0, 0xA4, "Safety_Switch"},
239 {0, 0xA5, "Actuator_Override_Switch"},
240 {0, 0xA6, "Actuator_Power"},
241 {0, 0xA7, "Start_Delay"},
242 {0, 0xA8, "Parameter_Block_Size"},
243 {0, 0xA9, "Device_Managed_Pool"},
244 {0, 0xAA, "Shared_Parameter_Blocks"},
245 {0, 0xAB, "Create_New_Effect_Report"},
246 {0, 0xAC, "RAM_Pool_Available"},
247 { 0x84, 0, "Power Device" },
248 { 0x84, 0x02, "PresentStatus" },
249 { 0x84, 0x03, "ChangeStatus" },
250 { 0x84, 0x04, "UPS" },
251 { 0x84, 0x05, "PowerSupply" },
252 { 0x84, 0x10, "BatterySystem" },
253 { 0x84, 0x11, "BatterySystemID" },
254 { 0x84, 0x12, "Battery" },
255 { 0x84, 0x13, "BatteryID" },
256 { 0x84, 0x14, "Charger" },
257 { 0x84, 0x15, "ChargerID" },
258 { 0x84, 0x16, "PowerConverter" },
259 { 0x84, 0x17, "PowerConverterID" },
260 { 0x84, 0x18, "OutletSystem" },
261 { 0x84, 0x19, "OutletSystemID" },
262 { 0x84, 0x1a, "Input" },
263 { 0x84, 0x1b, "InputID" },
264 { 0x84, 0x1c, "Output" },
265 { 0x84, 0x1d, "OutputID" },
266 { 0x84, 0x1e, "Flow" },
267 { 0x84, 0x1f, "FlowID" },
268 { 0x84, 0x20, "Outlet" },
269 { 0x84, 0x21, "OutletID" },
270 { 0x84, 0x22, "Gang" },
271 { 0x84, 0x24, "PowerSummary" },
272 { 0x84, 0x25, "PowerSummaryID" },
273 { 0x84, 0x30, "Voltage" },
274 { 0x84, 0x31, "Current" },
275 { 0x84, 0x32, "Frequency" },
276 { 0x84, 0x33, "ApparentPower" },
277 { 0x84, 0x35, "PercentLoad" },
278 { 0x84, 0x40, "ConfigVoltage" },
279 { 0x84, 0x41, "ConfigCurrent" },
280 { 0x84, 0x43, "ConfigApparentPower" },
281 { 0x84, 0x53, "LowVoltageTransfer" },
282 { 0x84, 0x54, "HighVoltageTransfer" },
283 { 0x84, 0x56, "DelayBeforeStartup" },
284 { 0x84, 0x57, "DelayBeforeShutdown" },
285 { 0x84, 0x58, "Test" },
286 { 0x84, 0x5a, "AudibleAlarmControl" },
287 { 0x84, 0x60, "Present" },
288 { 0x84, 0x61, "Good" },
289 { 0x84, 0x62, "InternalFailure" },
290 { 0x84, 0x65, "Overload" },
291 { 0x84, 0x66, "OverCharged" },
292 { 0x84, 0x67, "OverTemperature" },
293 { 0x84, 0x68, "ShutdownRequested" },
294 { 0x84, 0x69, "ShutdownImminent" },
295 { 0x84, 0x6b, "SwitchOn/Off" },
296 { 0x84, 0x6c, "Switchable" },
297 { 0x84, 0x6d, "Used" },
298 { 0x84, 0x6e, "Boost" },
299 { 0x84, 0x73, "CommunicationLost" },
300 { 0x84, 0xfd, "iManufacturer" },
301 { 0x84, 0xfe, "iProduct" },
302 { 0x84, 0xff, "iSerialNumber" },
303 { 0x85, 0, "Battery System" },
304 { 0x85, 0x01, "SMBBatteryMode" },
305 { 0x85, 0x02, "SMBBatteryStatus" },
306 { 0x85, 0x03, "SMBAlarmWarning" },
307 { 0x85, 0x04, "SMBChargerMode" },
308 { 0x85, 0x05, "SMBChargerStatus" },
309 { 0x85, 0x06, "SMBChargerSpecInfo" },
310 { 0x85, 0x07, "SMBSelectorState" },
311 { 0x85, 0x08, "SMBSelectorPresets" },
312 { 0x85, 0x09, "SMBSelectorInfo" },
313 { 0x85, 0x29, "RemainingCapacityLimit" },
314 { 0x85, 0x2c, "CapacityMode" },
315 { 0x85, 0x42, "BelowRemainingCapacityLimit" },
316 { 0x85, 0x44, "Charging" },
317 { 0x85, 0x45, "Discharging" },
318 { 0x85, 0x4b, "NeedReplacement" },
319 { 0x85, 0x66, "RemainingCapacity" },
320 { 0x85, 0x68, "RunTimeToEmpty" },
321 { 0x85, 0x6a, "AverageTimeToFull" },
322 { 0x85, 0x83, "DesignCapacity" },
323 { 0x85, 0x85, "ManufacturerDate" },
324 { 0x85, 0x89, "iDeviceChemistry" },
325 { 0x85, 0x8b, "Rechargable" },
326 { 0x85, 0x8f, "iOEMInformation" },
327 { 0x85, 0x8d, "CapacityGranularity1" },
328 { 0x85, 0xd0, "ACPresent" },
329 /* pages 0xff00 to 0xffff are vendor-specific */
330 { 0xffff, 0, "Vendor-specific-FF" },
331 { 0, 0, NULL }
332};
333
334static void resolv_usage_page(unsigned page) {
335 const struct hid_usage_entry *p;
336
337 for (p = hid_usage_table; p->description; p++)
338 if (p->page == page) {
339 printk("%s", p->description);
340 return;
341 }
342 printk("%04x", page);
343}
344
345static void resolv_usage(unsigned usage) {
346 const struct hid_usage_entry *p;
347
348 resolv_usage_page(usage >> 16);
349 printk(".");
350 for (p = hid_usage_table; p->description; p++)
351 if (p->page == (usage >> 16)) {
352 for(++p; p->description && p->usage != 0; p++)
353 if (p->usage == (usage & 0xffff)) {
354 printk("%s", p->description);
355 return;
356 }
357 break;
358 }
359 printk("%04x", usage & 0xffff);
360}
361
362__inline__ static void tab(int n) {
363 while (n--) printk(" ");
364}
365
366static void hid_dump_field(struct hid_field *field, int n) {
367 int j;
368
369 if (field->physical) {
370 tab(n);
371 printk("Physical(");
372 resolv_usage(field->physical); printk(")\n");
373 }
374 if (field->logical) {
375 tab(n);
376 printk("Logical(");
377 resolv_usage(field->logical); printk(")\n");
378 }
379 tab(n); printk("Usage(%d)\n", field->maxusage);
380 for (j = 0; j < field->maxusage; j++) {
381 tab(n+2);resolv_usage(field->usage[j].hid); printk("\n");
382 }
383 if (field->logical_minimum != field->logical_maximum) {
384 tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
385 tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
386 }
387 if (field->physical_minimum != field->physical_maximum) {
388 tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
389 tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
390 }
391 if (field->unit_exponent) {
392 tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
393 }
394 if (field->unit) {
395 char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
396 char *units[5][8] = {
397 { "None", "None", "None", "None", "None", "None", "None", "None" },
398 { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
399 { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
400 { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" },
401 { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }
402 };
403
404 int i;
405 int sys;
406 __u32 data = field->unit;
407
408 /* First nibble tells us which system we're in. */
409 sys = data & 0xf;
410 data >>= 4;
411
412 if(sys > 4) {
413 tab(n); printk("Unit(Invalid)\n");
414 }
415 else {
416 int earlier_unit = 0;
417
418 tab(n); printk("Unit(%s : ", systems[sys]);
419
420 for (i=1 ; i<sizeof(__u32)*2 ; i++) {
421 char nibble = data & 0xf;
422 data >>= 4;
423 if (nibble != 0) {
424 if(earlier_unit++ > 0)
425 printk("*");
426 printk("%s", units[sys][i]);
427 if(nibble != 1) {
428 /* This is a _signed_ nibble(!) */
429
430 int val = nibble & 0x7;
431 if(nibble & 0x08)
432 val = -((0x7 & ~val) +1);
433 printk("^%d", val);
434 }
435 }
436 }
437 printk(")\n");
438 }
439 }
440 tab(n); printk("Report Size(%u)\n", field->report_size);
441 tab(n); printk("Report Count(%u)\n", field->report_count);
442 tab(n); printk("Report Offset(%u)\n", field->report_offset);
443
444 tab(n); printk("Flags( ");
445 j = field->flags;
446 printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
447 printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
448 printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
449 printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
450 printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
451 printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPrefferedState " : "");
452 printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
453 printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
454 printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
455 printk(")\n");
456}
457
458static void __attribute__((unused)) hid_dump_device(struct hid_device *device) {
459 struct hid_report_enum *report_enum;
460 struct hid_report *report;
461 struct list_head *list;
462 unsigned i,k;
463 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
464
465 for (i = 0; i < HID_REPORT_TYPES; i++) {
466 report_enum = device->report_enum + i;
467 list = report_enum->report_list.next;
468 while (list != &report_enum->report_list) {
469 report = (struct hid_report *) list;
470 tab(2);
471 printk("%s", table[i]);
472 if (report->id)
473 printk("(%d)", report->id);
474 printk("[%s]", table[report->type]);
475 printk("\n");
476 for (k = 0; k < report->maxfield; k++) {
477 tab(4);
478 printk("Field(%d)\n", k);
479 hid_dump_field(report->field[k], 6);
480 }
481 list = list->next;
482 }
483 }
484}
485
486static void __attribute__((unused)) hid_dump_input(struct hid_usage *usage, __s32 value) {
487 printk("hid-debug: input ");
488 resolv_usage(usage->hid);
489 printk(" = %d\n", value);
490}
491
492
493static char *events[EV_MAX + 1] = {
494 [EV_SYN] = "Sync", [EV_KEY] = "Key",
495 [EV_REL] = "Relative", [EV_ABS] = "Absolute",
496 [EV_MSC] = "Misc", [EV_LED] = "LED",
497 [EV_SND] = "Sound", [EV_REP] = "Repeat",
498 [EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
499 [EV_FF_STATUS] = "ForceFeedbackStatus",
500};
501
502static char *syncs[2] = {
503 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
504};
505static char *keys[KEY_MAX + 1] = {
506 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
507 [KEY_1] = "1", [KEY_2] = "2",
508 [KEY_3] = "3", [KEY_4] = "4",
509 [KEY_5] = "5", [KEY_6] = "6",
510 [KEY_7] = "7", [KEY_8] = "8",
511 [KEY_9] = "9", [KEY_0] = "0",
512 [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal",
513 [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab",
514 [KEY_Q] = "Q", [KEY_W] = "W",
515 [KEY_E] = "E", [KEY_R] = "R",
516 [KEY_T] = "T", [KEY_Y] = "Y",
517 [KEY_U] = "U", [KEY_I] = "I",
518 [KEY_O] = "O", [KEY_P] = "P",
519 [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace",
520 [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl",
521 [KEY_A] = "A", [KEY_S] = "S",
522 [KEY_D] = "D", [KEY_F] = "F",
523 [KEY_G] = "G", [KEY_H] = "H",
524 [KEY_J] = "J", [KEY_K] = "K",
525 [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon",
526 [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave",
527 [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash",
528 [KEY_Z] = "Z", [KEY_X] = "X",
529 [KEY_C] = "C", [KEY_V] = "V",
530 [KEY_B] = "B", [KEY_N] = "N",
531 [KEY_M] = "M", [KEY_COMMA] = "Comma",
532 [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash",
533 [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk",
534 [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space",
535 [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1",
536 [KEY_F2] = "F2", [KEY_F3] = "F3",
537 [KEY_F4] = "F4", [KEY_F5] = "F5",
538 [KEY_F6] = "F6", [KEY_F7] = "F7",
539 [KEY_F8] = "F8", [KEY_F9] = "F9",
540 [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock",
541 [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7",
542 [KEY_KP8] = "KP8", [KEY_KP9] = "KP9",
543 [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4",
544 [KEY_KP5] = "KP5", [KEY_KP6] = "KP6",
545 [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1",
546 [KEY_KP2] = "KP2", [KEY_KP3] = "KP3",
547 [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot",
548 [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd",
549 [KEY_F11] = "F11", [KEY_F12] = "F12",
550 [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana",
551 [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan",
552 [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan",
553 [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter",
554 [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash",
555 [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt",
556 [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home",
557 [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp",
558 [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right",
559 [KEY_END] = "End", [KEY_DOWN] = "Down",
560 [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert",
561 [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro",
562 [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown",
563 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
564 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
565 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
566 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
567 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
568 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
569 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
570 [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo",
571 [KEY_FRONT] = "Front", [KEY_COPY] = "Copy",
572 [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste",
573 [KEY_FIND] = "Find", [KEY_CUT] = "Cut",
574 [KEY_HELP] = "Help", [KEY_MENU] = "Menu",
575 [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup",
576 [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp",
577 [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile",
578 [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
579 [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
580 [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
581 [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
582 [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
583 [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
584 [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
585 [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD",
586 [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong",
587 [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong",
588 [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record",
589 [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone",
590 [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config",
591 [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh",
592 [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move",
593 [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp",
594 [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
595 [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
596 [KEY_REDO] = "Redo", [KEY_F13] = "F13",
597 [KEY_F14] = "F14", [KEY_F15] = "F15",
598 [KEY_F16] = "F16", [KEY_F17] = "F17",
599 [KEY_F18] = "F18", [KEY_F19] = "F19",
600 [KEY_F20] = "F20", [KEY_F21] = "F21",
601 [KEY_F22] = "F22", [KEY_F23] = "F23",
602 [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
603 [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
604 [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
605 [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
606 [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
607 [KEY_PRINT] = "Print", [KEY_HP] = "HP",
608 [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound",
609 [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email",
610 [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search",
611 [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance",
612 [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop",
613 [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
614 [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
615 [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
616 [BTN_0] = "Btn0", [BTN_1] = "Btn1",
617 [BTN_2] = "Btn2", [BTN_3] = "Btn3",
618 [BTN_4] = "Btn4", [BTN_5] = "Btn5",
619 [BTN_6] = "Btn6", [BTN_7] = "Btn7",
620 [BTN_8] = "Btn8", [BTN_9] = "Btn9",
621 [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn",
622 [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn",
623 [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn",
624 [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn",
625 [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn",
626 [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn",
627 [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn",
628 [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2",
629 [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4",
630 [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6",
631 [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA",
632 [BTN_B] = "BtnB", [BTN_C] = "BtnC",
633 [BTN_X] = "BtnX", [BTN_Y] = "BtnY",
634 [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL",
635 [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2",
636 [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
637 [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
638 [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
639 [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
640 [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
641 [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
642 [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
643 [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
644 [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
645 [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
646 [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
647 [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
648 [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
649 [KEY_OPTION] = "Option", [KEY_INFO] = "Info",
650 [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor",
651 [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program",
652 [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites",
653 [KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
654 [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
655 [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
656 [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
657 [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
658 [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
659 [KEY_TV] = "TV", [KEY_TV2] = "TV2",
660 [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
661 [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
662 [KEY_CD] = "CD", [KEY_TAPE] = "Tape",
663 [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner",
664 [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text",
665 [KEY_DVD] = "DVD", [KEY_AUX] = "Aux",
666 [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio",
667 [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory",
668 [KEY_LIST] = "List", [KEY_MEMO] = "Memo",
669 [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red",
670 [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow",
671 [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp",
672 [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First",
673 [KEY_LAST] = "Last", [KEY_AB] = "AB",
674 [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart",
675 [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle",
676 [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous",
677 [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN",
678 [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL",
679 [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine",
680 [KEY_DEL_LINE] = "DeleteLine",
681 [KEY_SEND] = "Send", [KEY_REPLY] = "Reply",
682 [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save",
683 [KEY_DOCUMENTS] = "Documents",
684 [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC",
685 [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2",
686 [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D",
687 [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F",
688 [KEY_FN_S] = "Fn+S",
689 [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2",
690 [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4",
691 [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6",
692 [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8",
693 [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10",
694 [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12",
695 [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle",
696 [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
697 [KEY_KBDILLUMUP] = "KbdIlluminationUp",
698 [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
699};
700
701static char *relatives[REL_MAX + 1] = {
702 [REL_X] = "X", [REL_Y] = "Y",
703 [REL_Z] = "Z", [REL_RX] = "Rx",
704 [REL_RY] = "Ry", [REL_RZ] = "Rz",
705 [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
706 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
707};
708
709static char *absolutes[ABS_MAX + 1] = {
710 [ABS_X] = "X", [ABS_Y] = "Y",
711 [ABS_Z] = "Z", [ABS_RX] = "Rx",
712 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
713 [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder",
714 [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas",
715 [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X",
716 [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X",
717 [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X",
718 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
719 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
720 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
721 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
722 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
723};
724 26
725static char *misc[MSC_MAX + 1] = { 27void hid_dump_input(struct hid_usage *, __s32);
726 [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled", 28void hid_dump_device(struct hid_device *);
727 [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData" 29void hid_dump_field(struct hid_field *, int);
728}; 30void hid_resolv_usage(unsigned);
31void hid_resolv_event(__u8, __u16);
729 32
730static char *leds[LED_MAX + 1] = { 33#else
731 [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock",
732 [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
733 [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
734 [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
735 [LED_MISC] = "Misc",
736};
737 34
738static char *repeats[REP_MAX + 1] = { 35#define hid_dump_input(a,b) do { } while (0)
739 [REP_DELAY] = "Delay", [REP_PERIOD] = "Period" 36#define hid_dump_device(c) do { } while (0)
740}; 37#define hid_dump_field(a,b) do { } while (0)
38#define hid_resolv_usage(a) do { } while (0)
39#define hid_resolv_event(a,b) do { } while (0)
741 40
742static char *sounds[SND_MAX + 1] = { 41#endif /* CONFIG_HID_DEBUG */
743 [SND_CLICK] = "Click", [SND_BELL] = "Bell",
744 [SND_TONE] = "Tone"
745};
746 42
747static char **names[EV_MAX + 1] = {
748 [EV_SYN] = syncs, [EV_KEY] = keys,
749 [EV_REL] = relatives, [EV_ABS] = absolutes,
750 [EV_MSC] = misc, [EV_LED] = leds,
751 [EV_SND] = sounds, [EV_REP] = repeats,
752};
753 43
754static void __attribute__((unused)) resolv_event(__u8 type, __u16 code) { 44#endif
755 45
756 printk("%s.%s", events[type] ? events[type] : "?",
757 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
758}
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 342b4e639acb..d26b08f461f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -264,6 +264,9 @@ struct hid_item {
264#define HID_QUIRK_INVERT_HWHEEL 0x00004000 264#define HID_QUIRK_INVERT_HWHEEL 0x00004000
265#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00008000 265#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00008000
266#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00010000 266#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00010000
267#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00020000
268#define HID_QUIRK_IGNORE_MOUSE 0x00040000
269#define HID_QUIRK_SONY_PS3_CONTROLLER 0x00080000
267 270
268/* 271/*
269 * This is the global environment of the parser. This information is 272 * This is the global environment of the parser. This information is
@@ -430,8 +433,8 @@ struct hid_device { /* device report descriptor */
430 433
431 /* device-specific function pointers */ 434 /* device-specific function pointers */
432 int (*hidinput_input_event) (struct input_dev *, unsigned int, unsigned int, int); 435 int (*hidinput_input_event) (struct input_dev *, unsigned int, unsigned int, int);
433 int (*hidinput_open) (struct input_dev *); 436 int (*hid_open) (struct hid_device *);
434 void (*hidinput_close) (struct input_dev *); 437 void (*hid_close) (struct hid_device *);
435 438
436 /* hiddev event handler */ 439 /* hiddev event handler */
437 void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, 440 void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field,
@@ -471,16 +474,6 @@ struct hid_descriptor {
471 struct hid_class_descriptor desc[1]; 474 struct hid_class_descriptor desc[1];
472} __attribute__ ((packed)); 475} __attribute__ ((packed));
473 476
474#ifdef DEBUG
475#include "hid-debug.h"
476#else
477#define hid_dump_input(a,b) do { } while (0)
478#define hid_dump_device(c) do { } while (0)
479#define hid_dump_field(a,b) do { } while (0)
480#define resolv_usage(a) do { } while (0)
481#define resolv_event(a,b) do { } while (0)
482#endif
483
484/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 477/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
485/* We ignore a few input applications that are not widely used */ 478/* We ignore a few input applications that are not widely used */
486#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) 479#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001))
@@ -503,6 +496,7 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size);
503int hid_ff_init(struct hid_device *hid); 496int hid_ff_init(struct hid_device *hid);
504 497
505int hid_lgff_init(struct hid_device *hid); 498int hid_lgff_init(struct hid_device *hid);
499int hid_plff_init(struct hid_device *hid);
506int hid_tmff_init(struct hid_device *hid); 500int hid_tmff_init(struct hid_device *hid);
507int hid_zpff_init(struct hid_device *hid); 501int hid_zpff_init(struct hid_device *hid);
508#ifdef CONFIG_HID_PID 502#ifdef CONFIG_HID_PID
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e26a03981a94..04e0fa97ac99 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -18,6 +18,9 @@
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/completion.h> 20#include <linux/completion.h>
21#ifdef CONFIG_BLK_DEV_IDEACPI
22#include <acpi/acpi.h>
23#endif
21#include <asm/byteorder.h> 24#include <asm/byteorder.h>
22#include <asm/system.h> 25#include <asm/system.h>
23#include <asm/io.h> 26#include <asm/io.h>
@@ -541,6 +544,11 @@ typedef enum {
541struct ide_driver_s; 544struct ide_driver_s;
542struct ide_settings_s; 545struct ide_settings_s;
543 546
547#ifdef CONFIG_BLK_DEV_IDEACPI
548struct ide_acpi_drive_link;
549struct ide_acpi_hwif_link;
550#endif
551
544typedef struct ide_drive_s { 552typedef struct ide_drive_s {
545 char name[4]; /* drive name, such as "hda" */ 553 char name[4]; /* drive name, such as "hda" */
546 char driver_req[10]; /* requests specific driver */ 554 char driver_req[10]; /* requests specific driver */
@@ -637,6 +645,9 @@ typedef struct ide_drive_s {
637 645
638 int lun; /* logical unit */ 646 int lun; /* logical unit */
639 int crc_count; /* crc counter to reduce drive speed */ 647 int crc_count; /* crc counter to reduce drive speed */
648#ifdef CONFIG_BLK_DEV_IDEACPI
649 struct ide_acpi_drive_link *acpidata;
650#endif
640 struct list_head list; 651 struct list_head list;
641 struct device gendev; 652 struct device gendev;
642 struct completion gendev_rel_comp; /* to deal with device release() */ 653 struct completion gendev_rel_comp; /* to deal with device release() */
@@ -804,6 +815,10 @@ typedef struct hwif_s {
804 void *hwif_data; /* extra hwif data */ 815 void *hwif_data; /* extra hwif data */
805 816
806 unsigned dma; 817 unsigned dma;
818
819#ifdef CONFIG_BLK_DEV_IDEACPI
820 struct ide_acpi_hwif_link *acpidata;
821#endif
807} ____cacheline_internodealigned_in_smp ide_hwif_t; 822} ____cacheline_internodealigned_in_smp ide_hwif_t;
808 823
809/* 824/*
@@ -1192,8 +1207,8 @@ void ide_init_disk(struct gendisk *, ide_drive_t *);
1192extern int ideprobe_init(void); 1207extern int ideprobe_init(void);
1193 1208
1194extern void ide_scan_pcibus(int scan_direction) __init; 1209extern void ide_scan_pcibus(int scan_direction) __init;
1195extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner); 1210extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
1196#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE) 1211#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
1197void ide_pci_setup_ports(struct pci_dev *, struct ide_pci_device_s *, int, ata_index_t *); 1212void ide_pci_setup_ports(struct pci_dev *, struct ide_pci_device_s *, int, ata_index_t *);
1198extern void ide_setup_pci_noise (struct pci_dev *dev, struct ide_pci_device_s *d); 1213extern void ide_setup_pci_noise (struct pci_dev *dev, struct ide_pci_device_s *d);
1199 1214
@@ -1298,6 +1313,18 @@ static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
1298static inline void ide_release_dma(ide_hwif_t *drive) {;} 1313static inline void ide_release_dma(ide_hwif_t *drive) {;}
1299#endif 1314#endif
1300 1315
1316#ifdef CONFIG_BLK_DEV_IDEACPI
1317extern int ide_acpi_exec_tfs(ide_drive_t *drive);
1318extern void ide_acpi_get_timing(ide_hwif_t *hwif);
1319extern void ide_acpi_push_timing(ide_hwif_t *hwif);
1320extern void ide_acpi_init(ide_hwif_t *hwif);
1321#else
1322static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
1323static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
1324static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
1325static inline void ide_acpi_init(ide_hwif_t *hwif) { ; }
1326#endif
1327
1301extern int ide_hwif_request_regions(ide_hwif_t *hwif); 1328extern int ide_hwif_request_regions(ide_hwif_t *hwif);
1302extern void ide_hwif_release_regions(ide_hwif_t* hwif); 1329extern void ide_hwif_release_regions(ide_hwif_t* hwif);
1303extern void ide_unregister (unsigned int index); 1330extern void ide_unregister (unsigned int index);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 52fc4052a0ae..5504b671357f 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -68,6 +68,7 @@ typedef void fastcall (*irq_flow_handler_t)(unsigned int irq,
68#define IRQ_MOVE_PENDING 0x40000000 /* need to re-target IRQ destination */ 68#define IRQ_MOVE_PENDING 0x40000000 /* need to re-target IRQ destination */
69 69
70struct proc_dir_entry; 70struct proc_dir_entry;
71struct msi_desc;
71 72
72/** 73/**
73 * struct irq_chip - hardware interrupt chip descriptor 74 * struct irq_chip - hardware interrupt chip descriptor
@@ -148,6 +149,7 @@ struct irq_chip {
148struct irq_desc { 149struct irq_desc {
149 irq_flow_handler_t handle_irq; 150 irq_flow_handler_t handle_irq;
150 struct irq_chip *chip; 151 struct irq_chip *chip;
152 struct msi_desc *msi_desc;
151 void *handler_data; 153 void *handler_data;
152 void *chip_data; 154 void *chip_data;
153 struct irqaction *action; /* IRQ action list */ 155 struct irqaction *action; /* IRQ action list */
@@ -373,10 +375,12 @@ extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
373extern int set_irq_data(unsigned int irq, void *data); 375extern int set_irq_data(unsigned int irq, void *data);
374extern int set_irq_chip_data(unsigned int irq, void *data); 376extern int set_irq_chip_data(unsigned int irq, void *data);
375extern int set_irq_type(unsigned int irq, unsigned int type); 377extern int set_irq_type(unsigned int irq, unsigned int type);
378extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
376 379
377#define get_irq_chip(irq) (irq_desc[irq].chip) 380#define get_irq_chip(irq) (irq_desc[irq].chip)
378#define get_irq_chip_data(irq) (irq_desc[irq].chip_data) 381#define get_irq_chip_data(irq) (irq_desc[irq].chip_data)
379#define get_irq_data(irq) (irq_desc[irq].handler_data) 382#define get_irq_data(irq) (irq_desc[irq].handler_data)
383#define get_irq_msi(irq) (irq_desc[irq].msi_desc)
380 384
381#endif /* CONFIG_GENERIC_HARDIRQS */ 385#endif /* CONFIG_GENERIC_HARDIRQS */
382 386
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 76538fcf2c4e..b850e0310538 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -74,9 +74,13 @@ extern void kobject_init(struct kobject *);
74extern void kobject_cleanup(struct kobject *); 74extern void kobject_cleanup(struct kobject *);
75 75
76extern int __must_check kobject_add(struct kobject *); 76extern int __must_check kobject_add(struct kobject *);
77extern int __must_check kobject_shadow_add(struct kobject *, struct dentry *);
77extern void kobject_del(struct kobject *); 78extern void kobject_del(struct kobject *);
78 79
79extern int __must_check kobject_rename(struct kobject *, const char *new_name); 80extern int __must_check kobject_rename(struct kobject *, const char *new_name);
81extern int __must_check kobject_shadow_rename(struct kobject *kobj,
82 struct dentry *new_parent,
83 const char *new_name);
80extern int __must_check kobject_move(struct kobject *, struct kobject *); 84extern int __must_check kobject_move(struct kobject *, struct kobject *);
81 85
82extern int __must_check kobject_register(struct kobject *); 86extern int __must_check kobject_register(struct kobject *);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d0e6a5497614..e45712acfac5 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -71,6 +71,7 @@ struct mmc_card {
71#define MMC_STATE_SDCARD (1<<3) /* is an SD card */ 71#define MMC_STATE_SDCARD (1<<3) /* is an SD card */
72#define MMC_STATE_READONLY (1<<4) /* card is read-only */ 72#define MMC_STATE_READONLY (1<<4) /* card is read-only */
73#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */ 73#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */
74#define MMC_STATE_BLOCKADDR (1<<6) /* card uses block-addressing */
74 u32 raw_cid[4]; /* raw card CID */ 75 u32 raw_cid[4]; /* raw card CID */
75 u32 raw_csd[4]; /* raw card CSD */ 76 u32 raw_csd[4]; /* raw card CSD */
76 u32 raw_scr[2]; /* raw card SCR */ 77 u32 raw_scr[2]; /* raw card SCR */
@@ -87,6 +88,7 @@ struct mmc_card {
87#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD) 88#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD)
88#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) 89#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
89#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 90#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
91#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
90 92
91#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 93#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
92#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD) 94#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD)
@@ -94,6 +96,7 @@ struct mmc_card {
94#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD) 96#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD)
95#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 97#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
96#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 98#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
99#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
97 100
98#define mmc_card_name(c) ((c)->cid.prod_name) 101#define mmc_card_name(c) ((c)->cid.prod_name)
99#define mmc_card_id(c) ((c)->dev.bus_id) 102#define mmc_card_id(c) ((c)->dev.bus_id)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c15ae1986b98..913e5752569f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -92,8 +92,10 @@ struct mmc_host {
92 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 92 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
93 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */ 93 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
94 unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */ 94 unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */
95 unsigned short max_sectors; /* see blk_queue_max_sectors */
96 unsigned short unused; 95 unsigned short unused;
96 unsigned int max_req_size; /* maximum number of bytes in one req */
97 unsigned int max_blk_size; /* maximum size of one mmc block */
98 unsigned int max_blk_count; /* maximum number of blocks in one req */
97 99
98 /* private data */ 100 /* private data */
99 struct mmc_ios ios; /* current io bus settings */ 101 struct mmc_ios ios; /* current io bus settings */
@@ -106,8 +108,9 @@ struct mmc_host {
106 struct list_head cards; /* devices attached to this host */ 108 struct list_head cards; /* devices attached to this host */
107 109
108 wait_queue_head_t wq; 110 wait_queue_head_t wq;
109 spinlock_t lock; /* card_busy lock */ 111 spinlock_t lock; /* claimed lock */
110 struct mmc_card *card_busy; /* the MMC card claiming host */ 112 unsigned int claimed:1; /* host exclusively claimed */
113
111 struct mmc_card *card_selected; /* the selected MMC card */ 114 struct mmc_card *card_selected; /* the selected MMC card */
112 115
113 struct delayed_work detect; 116 struct delayed_work detect;
@@ -126,6 +129,7 @@ static inline void *mmc_priv(struct mmc_host *host)
126} 129}
127 130
128#define mmc_dev(x) ((x)->parent) 131#define mmc_dev(x) ((x)->parent)
132#define mmc_classdev(x) (&(x)->class_dev)
129#define mmc_hostname(x) ((x)->class_dev.bus_id) 133#define mmc_hostname(x) ((x)->class_dev.bus_id)
130 134
131extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 135extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index bcf24909d677..cdc54be804f1 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -43,6 +43,7 @@ struct mmc_command {
43#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) 43#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
44#define MMC_RSP_R3 (MMC_RSP_PRESENT) 44#define MMC_RSP_R3 (MMC_RSP_PRESENT)
45#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) 45#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
46#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
46 47
47#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) 48#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
48 49
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index 2dce60c43f4b..c90b6768329d 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -79,9 +79,12 @@
79#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ 79#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
80 80
81/* SD commands type argument response */ 81/* SD commands type argument response */
82 /* class 8 */ 82 /* class 0 */
83/* This is basically the same command as for MMC with some quirks. */ 83/* This is basically the same command as for MMC with some quirks. */
84#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ 84#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
85#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
86
87 /* class 10 */
85#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ 88#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
86 89
87 /* Application commands */ 90 /* Application commands */
@@ -115,6 +118,14 @@
115 */ 118 */
116 119
117/* 120/*
121 * SD_SEND_IF_COND argument format:
122 *
123 * [31:12] Reserved (0)
124 * [11:8] Host Voltage Supply Flags
125 * [7:0] Check Pattern (0xAA)
126 */
127
128/*
118 MMC status in R1 129 MMC status in R1
119 Type 130 Type
120 e : error bit 131 e : error bit
diff --git a/include/linux/module.h b/include/linux/module.h
index 10f771a49997..419d3ef293dd 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -58,6 +58,7 @@ struct module_kobject
58{ 58{
59 struct kobject kobj; 59 struct kobject kobj;
60 struct module *mod; 60 struct module *mod;
61 struct kobject *drivers_dir;
61}; 62};
62 63
63/* These are either module local, or the kernel's dummy ones. */ 64/* These are either module local, or the kernel's dummy ones. */
@@ -263,7 +264,7 @@ struct module
263 struct module_attribute *modinfo_attrs; 264 struct module_attribute *modinfo_attrs;
264 const char *version; 265 const char *version;
265 const char *srcversion; 266 const char *srcversion;
266 struct kobject *drivers_dir; 267 struct kobject *holders_dir;
267 268
268 /* Exported symbols */ 269 /* Exported symbols */
269 const struct kernel_symbol *syms; 270 const struct kernel_symbol *syms;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index c7ef94343673..74c8a2ecc9dd 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -7,11 +7,10 @@ struct msi_msg {
7 u32 data; /* 16 bits of msi message data */ 7 u32 data; /* 16 bits of msi message data */
8}; 8};
9 9
10/* Heper functions */ 10/* Helper functions */
11extern void mask_msi_irq(unsigned int irq); 11extern void mask_msi_irq(unsigned int irq);
12extern void unmask_msi_irq(unsigned int irq); 12extern void unmask_msi_irq(unsigned int irq);
13extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); 13extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
14
15extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); 14extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
16 15
17struct msi_desc { 16struct msi_desc {
@@ -42,7 +41,7 @@ struct msi_desc {
42/* 41/*
43 * The arch hook for setup up msi irqs 42 * The arch hook for setup up msi irqs
44 */ 43 */
45int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev); 44int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
46void arch_teardown_msi_irq(unsigned int irq); 45void arch_teardown_msi_irq(unsigned int irq);
47 46
48 47
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fea0d9db6846..2e37f5012788 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -529,10 +529,11 @@ struct net_device
529 struct net_bridge_port *br_port; 529 struct net_bridge_port *br_port;
530 530
531 /* class/net/name entry */ 531 /* class/net/name entry */
532 struct class_device class_dev; 532 struct device dev;
533 /* space for optional statistics and wireless sysfs groups */ 533 /* space for optional statistics and wireless sysfs groups */
534 struct attribute_group *sysfs_groups[3]; 534 struct attribute_group *sysfs_groups[3];
535}; 535};
536#define to_net_dev(d) container_of(d, struct net_device, dev)
536 537
537#define NETDEV_ALIGN 32 538#define NETDEV_ALIGN 32
538#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) 539#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
@@ -548,7 +549,7 @@ static inline void *netdev_priv(struct net_device *dev)
548/* Set the sysfs physical device reference for the network logical device 549/* Set the sysfs physical device reference for the network logical device
549 * if set prior to registration will cause a symlink during initialization. 550 * if set prior to registration will cause a symlink during initialization.
550 */ 551 */
551#define SET_NETDEV_DEV(net, pdev) ((net)->class_dev.dev = (pdev)) 552#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
552 553
553struct packet_type { 554struct packet_type {
554 __be16 type; /* This is really htons(ether_type). */ 555 __be16 type; /* This is really htons(ether_type). */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f3c617eabd8d..805412cc6875 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -174,6 +174,9 @@ struct pci_dev {
174 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ 174 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
175 int rom_attr_enabled; /* has display of the rom attribute been enabled? */ 175 int rom_attr_enabled; /* has display of the rom attribute been enabled? */
176 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 176 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
177#ifdef CONFIG_PCI_MSI
178 unsigned int first_msi_irq;
179#endif
177}; 180};
178 181
179#define pci_dev_g(n) list_entry(n, struct pci_dev, global_list) 182#define pci_dev_g(n) list_entry(n, struct pci_dev, global_list)
@@ -181,6 +184,11 @@ struct pci_dev {
181#define to_pci_dev(n) container_of(n, struct pci_dev, dev) 184#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
182#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) 185#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
183 186
187static inline int pci_channel_offline(struct pci_dev *pdev)
188{
189 return (pdev->error_state != pci_channel_io_normal);
190}
191
184static inline struct pci_cap_saved_state *pci_find_saved_cap( 192static inline struct pci_cap_saved_state *pci_find_saved_cap(
185 struct pci_dev *pci_dev,char cap) 193 struct pci_dev *pci_dev,char cap)
186{ 194{
@@ -463,8 +471,7 @@ extern void pci_sort_breadthfirst(void);
463 471
464/* Generic PCI functions exported to card drivers */ 472/* Generic PCI functions exported to card drivers */
465 473
466struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from); 474struct pci_dev __deprecated *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from);
467struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from);
468struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn); 475struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
469int pci_find_capability (struct pci_dev *dev, int cap); 476int pci_find_capability (struct pci_dev *dev, int cap);
470int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap); 477int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
@@ -533,6 +540,7 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
533int __must_check pci_assign_resource(struct pci_dev *dev, int i); 540int __must_check pci_assign_resource(struct pci_dev *dev, int i);
534int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i); 541int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i);
535void pci_restore_bars(struct pci_dev *dev); 542void pci_restore_bars(struct pci_dev *dev);
543int pci_select_bars(struct pci_dev *dev, unsigned long flags);
536 544
537/* ROM control related routines */ 545/* ROM control related routines */
538void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 546void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
@@ -561,6 +569,8 @@ int __must_check pci_request_regions(struct pci_dev *, const char *);
561void pci_release_regions(struct pci_dev *); 569void pci_release_regions(struct pci_dev *);
562int __must_check pci_request_region(struct pci_dev *, int, const char *); 570int __must_check pci_request_region(struct pci_dev *, int, const char *);
563void pci_release_region(struct pci_dev *, int); 571void pci_release_region(struct pci_dev *, int);
572int pci_request_selected_regions(struct pci_dev *, int, const char *);
573void pci_release_selected_regions(struct pci_dev *, int);
564 574
565/* drivers/pci/bus.c */ 575/* drivers/pci/bus.c */
566int __must_check pci_bus_alloc_resource(struct pci_bus *bus, 576int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
@@ -573,10 +583,11 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
573void pci_enable_bridges(struct pci_bus *bus); 583void pci_enable_bridges(struct pci_bus *bus);
574 584
575/* Proper probing supporting hot-pluggable devices */ 585/* Proper probing supporting hot-pluggable devices */
576int __must_check __pci_register_driver(struct pci_driver *, struct module *); 586int __must_check __pci_register_driver(struct pci_driver *, struct module *,
587 const char *mod_name);
577static inline int __must_check pci_register_driver(struct pci_driver *driver) 588static inline int __must_check pci_register_driver(struct pci_driver *driver)
578{ 589{
579 return __pci_register_driver(driver, THIS_MODULE); 590 return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME);
580} 591}
581 592
582void pci_unregister_driver(struct pci_driver *); 593void pci_unregister_driver(struct pci_driver *);
@@ -611,10 +622,6 @@ enum pci_dma_burst_strategy {
611 strategy_parameter byte boundaries */ 622 strategy_parameter byte boundaries */
612}; 623};
613 624
614#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
615extern struct pci_dev *isa_bridge;
616#endif
617
618struct msix_entry { 625struct msix_entry {
619 u16 vector; /* kernel uses to write allocated vector */ 626 u16 vector; /* kernel uses to write allocated vector */
620 u16 entry; /* driver uses to specify entry, OS writes */ 627 u16 entry; /* driver uses to specify entry, OS writes */
@@ -622,7 +629,6 @@ struct msix_entry {
622 629
623 630
624#ifndef CONFIG_PCI_MSI 631#ifndef CONFIG_PCI_MSI
625static inline void pci_scan_msi_device(struct pci_dev *dev) {}
626static inline int pci_enable_msi(struct pci_dev *dev) {return -1;} 632static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
627static inline void pci_disable_msi(struct pci_dev *dev) {} 633static inline void pci_disable_msi(struct pci_dev *dev) {}
628static inline int pci_enable_msix(struct pci_dev* dev, 634static inline int pci_enable_msix(struct pci_dev* dev,
@@ -630,7 +636,6 @@ static inline int pci_enable_msix(struct pci_dev* dev,
630static inline void pci_disable_msix(struct pci_dev *dev) {} 636static inline void pci_disable_msix(struct pci_dev *dev) {}
631static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} 637static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
632#else 638#else
633extern void pci_scan_msi_device(struct pci_dev *dev);
634extern int pci_enable_msi(struct pci_dev *dev); 639extern int pci_enable_msi(struct pci_dev *dev);
635extern void pci_disable_msi(struct pci_dev *dev); 640extern void pci_disable_msi(struct pci_dev *dev);
636extern int pci_enable_msix(struct pci_dev* dev, 641extern int pci_enable_msix(struct pci_dev* dev,
@@ -722,8 +727,6 @@ static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) {
722static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; } 727static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; }
723static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; } 728static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; }
724 729
725#define isa_bridge ((struct pci_dev *)NULL)
726
727#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) 730#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
728 731
729static inline void pci_block_user_cfg_access(struct pci_dev *dev) { } 732static inline void pci_block_user_cfg_access(struct pci_dev *dev) { }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3d1d21035dec..defdeed20641 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -735,9 +735,11 @@
735#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 735#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
736#define PCI_DEVICE_ID_TI_4450 0x8011 736#define PCI_DEVICE_ID_TI_4450 0x8011
737#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 737#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
738#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
738#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 739#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
739#define PCI_DEVICE_ID_TI_X515 0x8036 740#define PCI_DEVICE_ID_TI_X515 0x8036
740#define PCI_DEVICE_ID_TI_XX12 0x8039 741#define PCI_DEVICE_ID_TI_XX12 0x8039
742#define PCI_DEVICE_ID_TI_XX12_FM 0x803b
741#define PCI_DEVICE_ID_TI_1130 0xac12 743#define PCI_DEVICE_ID_TI_1130 0xac12
742#define PCI_DEVICE_ID_TI_1031 0xac13 744#define PCI_DEVICE_ID_TI_1031 0xac13
743#define PCI_DEVICE_ID_TI_1131 0xac15 745#define PCI_DEVICE_ID_TI_1131 0xac15
@@ -765,6 +767,7 @@
765#define PCI_DEVICE_ID_TI_1510 0xac56 767#define PCI_DEVICE_ID_TI_1510 0xac56
766#define PCI_DEVICE_ID_TI_X620 0xac8d 768#define PCI_DEVICE_ID_TI_X620 0xac8d
767#define PCI_DEVICE_ID_TI_X420 0xac8e 769#define PCI_DEVICE_ID_TI_X420 0xac8e
770#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
768 771
769#define PCI_VENDOR_ID_SONY 0x104d 772#define PCI_VENDOR_ID_SONY 0x104d
770 773
@@ -1451,6 +1454,7 @@
1451 1454
1452#define PCI_VENDOR_ID_TOSHIBA_2 0x102f 1455#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
1453#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030 1456#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030
1457#define PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE 0x0105
1454#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108 1458#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108
1455#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3 1459#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3
1456 1460
@@ -1971,6 +1975,7 @@
1971#define PCI_DEVICE_ID_TOPIC_TP560 0x0000 1975#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
1972 1976
1973#define PCI_VENDOR_ID_ENE 0x1524 1977#define PCI_VENDOR_ID_ENE 0x1524
1978#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
1974#define PCI_DEVICE_ID_ENE_1211 0x1211 1979#define PCI_DEVICE_ID_ENE_1211 0x1211
1975#define PCI_DEVICE_ID_ENE_1225 0x1225 1980#define PCI_DEVICE_ID_ENE_1225 0x1225
1976#define PCI_DEVICE_ID_ENE_1410 0x1410 1981#define PCI_DEVICE_ID_ENE_1410 0x1410
@@ -2066,6 +2071,8 @@
2066#define PCI_VENDOR_ID_TDI 0x192E 2071#define PCI_VENDOR_ID_TDI 0x192E
2067#define PCI_DEVICE_ID_TDI_EHCI 0x0101 2072#define PCI_DEVICE_ID_TDI_EHCI 0x0101
2068 2073
2074#define PCI_VENDOR_ID_PASEMI 0x1959
2075
2069#define PCI_VENDOR_ID_JMICRON 0x197B 2076#define PCI_VENDOR_ID_JMICRON 0x197B
2070#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 2077#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
2071#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 2078#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 0f478a8791a2..ac2c70e7f760 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -86,6 +86,11 @@ static inline void serio_register_port(struct serio *serio)
86void serio_unregister_port(struct serio *serio); 86void serio_unregister_port(struct serio *serio);
87void serio_unregister_child_port(struct serio *serio); 87void serio_unregister_child_port(struct serio *serio);
88 88
89int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name);
90static inline int serio_register_driver(struct serio_driver *drv)
91{
92 return __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME);
93}
89int serio_register_driver(struct serio_driver *drv); 94int serio_register_driver(struct serio_driver *drv);
90void serio_unregister_driver(struct serio_driver *drv); 95void serio_unregister_driver(struct serio_driver *drv);
91 96
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 2129d1b6c874..192de3afa96b 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -11,10 +11,12 @@
11#define _SYSFS_H_ 11#define _SYSFS_H_
12 12
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/list.h>
14#include <asm/atomic.h> 15#include <asm/atomic.h>
15 16
16struct kobject; 17struct kobject;
17struct module; 18struct module;
19struct nameidata;
18 20
19struct attribute { 21struct attribute {
20 const char * name; 22 const char * name;
@@ -88,13 +90,13 @@ struct sysfs_dirent {
88#ifdef CONFIG_SYSFS 90#ifdef CONFIG_SYSFS
89 91
90extern int __must_check 92extern int __must_check
91sysfs_create_dir(struct kobject *); 93sysfs_create_dir(struct kobject *, struct dentry *);
92 94
93extern void 95extern void
94sysfs_remove_dir(struct kobject *); 96sysfs_remove_dir(struct kobject *);
95 97
96extern int __must_check 98extern int __must_check
97sysfs_rename_dir(struct kobject *, const char *new_name); 99sysfs_rename_dir(struct kobject *, struct dentry *, const char *new_name);
98 100
99extern int __must_check 101extern int __must_check
100sysfs_move_dir(struct kobject *, struct kobject *); 102sysfs_move_dir(struct kobject *, struct kobject *);
@@ -126,11 +128,17 @@ int __must_check sysfs_create_group(struct kobject *,
126void sysfs_remove_group(struct kobject *, const struct attribute_group *); 128void sysfs_remove_group(struct kobject *, const struct attribute_group *);
127void sysfs_notify(struct kobject * k, char *dir, char *attr); 129void sysfs_notify(struct kobject * k, char *dir, char *attr);
128 130
131
132extern int sysfs_make_shadowed_dir(struct kobject *kobj,
133 void * (*follow_link)(struct dentry *, struct nameidata *));
134extern struct dentry *sysfs_create_shadow_dir(struct kobject *kobj);
135extern void sysfs_remove_shadow_dir(struct dentry *dir);
136
129extern int __must_check sysfs_init(void); 137extern int __must_check sysfs_init(void);
130 138
131#else /* CONFIG_SYSFS */ 139#else /* CONFIG_SYSFS */
132 140
133static inline int sysfs_create_dir(struct kobject * k) 141static inline int sysfs_create_dir(struct kobject * k, struct dentry *shadow)
134{ 142{
135 return 0; 143 return 0;
136} 144}
@@ -140,7 +148,9 @@ static inline void sysfs_remove_dir(struct kobject * k)
140 ; 148 ;
141} 149}
142 150
143static inline int sysfs_rename_dir(struct kobject * k, const char *new_name) 151static inline int sysfs_rename_dir(struct kobject * k,
152 struct dentry *new_parent,
153 const char *new_name)
144{ 154{
145 return 0; 155 return 0;
146} 156}
@@ -204,6 +214,12 @@ static inline void sysfs_notify(struct kobject * k, char *dir, char *attr)
204{ 214{
205} 215}
206 216
217static inline int sysfs_make_shadowed_dir(struct kobject *kobj,
218 void * (*follow_link)(struct dentry *, struct nameidata *))
219{
220 return 0;
221}
222
207static inline int __must_check sysfs_init(void) 223static inline int __must_check sysfs_init(void)
208{ 224{
209 return 0; 225 return 0;
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
index dfb8052eee5e..3deb0a6c1370 100644
--- a/include/linux/tifm.h
+++ b/include/linux/tifm.h
@@ -17,7 +17,7 @@
17#include <linux/wait.h> 17#include <linux/wait.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/scatterlist.h> 20#include <linux/kthread.h>
21 21
22/* Host registers (relative to pci base address): */ 22/* Host registers (relative to pci base address): */
23enum { 23enum {
@@ -62,11 +62,10 @@ enum {
62 62
63 63
64#define TIFM_IRQ_ENABLE 0x80000000 64#define TIFM_IRQ_ENABLE 0x80000000
65#define TIFM_IRQ_SOCKMASK 0x00000001 65#define TIFM_IRQ_SOCKMASK(x) (x)
66#define TIFM_IRQ_CARDMASK 0x00000100 66#define TIFM_IRQ_CARDMASK(x) ((x) << 8)
67#define TIFM_IRQ_FIFOMASK 0x00010000 67#define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
68#define TIFM_IRQ_SETALL 0xffffffff 68#define TIFM_IRQ_SETALL 0xffffffff
69#define TIFM_IRQ_SETALLSOCK 0x0000000f
70 69
71#define TIFM_CTRL_LED 0x00000040 70#define TIFM_CTRL_LED 0x00000040
72#define TIFM_CTRL_FAST_CLK 0x00000100 71#define TIFM_CTRL_FAST_CLK 0x00000100
@@ -89,10 +88,9 @@ struct tifm_dev {
89 char __iomem *addr; 88 char __iomem *addr;
90 spinlock_t lock; 89 spinlock_t lock;
91 tifm_media_id media_id; 90 tifm_media_id media_id;
92 char wq_name[KOBJ_NAME_LEN]; 91 unsigned int socket_id;
93 struct workqueue_struct *wq;
94 92
95 unsigned int (*signal_irq)(struct tifm_dev *sock, 93 void (*signal_irq)(struct tifm_dev *sock,
96 unsigned int sock_irq_status); 94 unsigned int sock_irq_status);
97 95
98 struct tifm_driver *drv; 96 struct tifm_driver *drv;
@@ -103,24 +101,23 @@ struct tifm_driver {
103 tifm_media_id *id_table; 101 tifm_media_id *id_table;
104 int (*probe)(struct tifm_dev *dev); 102 int (*probe)(struct tifm_dev *dev);
105 void (*remove)(struct tifm_dev *dev); 103 void (*remove)(struct tifm_dev *dev);
104 int (*suspend)(struct tifm_dev *dev,
105 pm_message_t state);
106 int (*resume)(struct tifm_dev *dev);
106 107
107 struct device_driver driver; 108 struct device_driver driver;
108}; 109};
109 110
110struct tifm_adapter { 111struct tifm_adapter {
111 char __iomem *addr; 112 char __iomem *addr;
112 unsigned int irq_status;
113 unsigned int insert_mask;
114 unsigned int remove_mask;
115 spinlock_t lock; 113 spinlock_t lock;
114 unsigned int irq_status;
115 unsigned int socket_change_set;
116 wait_queue_head_t change_set_notify;
116 unsigned int id; 117 unsigned int id;
117 unsigned int max_sockets; 118 unsigned int num_sockets;
118 char wq_name[KOBJ_NAME_LEN];
119 unsigned int inhibit_new_cards;
120 struct workqueue_struct *wq;
121 struct work_struct media_inserter;
122 struct work_struct media_remover;
123 struct tifm_dev **sockets; 119 struct tifm_dev **sockets;
120 struct task_struct *media_switcher;
124 struct class_device cdev; 121 struct class_device cdev;
125 struct device *dev; 122 struct device *dev;
126 123
@@ -130,9 +127,9 @@ struct tifm_adapter {
130struct tifm_adapter *tifm_alloc_adapter(void); 127struct tifm_adapter *tifm_alloc_adapter(void);
131void tifm_free_device(struct device *dev); 128void tifm_free_device(struct device *dev);
132void tifm_free_adapter(struct tifm_adapter *fm); 129void tifm_free_adapter(struct tifm_adapter *fm);
133int tifm_add_adapter(struct tifm_adapter *fm); 130int tifm_add_adapter(struct tifm_adapter *fm, int (*mediathreadfn)(void *data));
134void tifm_remove_adapter(struct tifm_adapter *fm); 131void tifm_remove_adapter(struct tifm_adapter *fm);
135struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id); 132struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm);
136int tifm_register_driver(struct tifm_driver *drv); 133int tifm_register_driver(struct tifm_driver *drv);
137void tifm_unregister_driver(struct tifm_driver *drv); 134void tifm_unregister_driver(struct tifm_driver *drv);
138void tifm_eject(struct tifm_dev *sock); 135void tifm_eject(struct tifm_dev *sock);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index aab5b1b72021..b5c226a87ed8 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -2,7 +2,7 @@
2#define __LINUX_USB_H 2#define __LINUX_USB_H
3 3
4#include <linux/mod_devicetable.h> 4#include <linux/mod_devicetable.h>
5#include <linux/usb_ch9.h> 5#include <linux/usb/ch9.h>
6 6
7#define USB_MAJOR 180 7#define USB_MAJOR 180
8#define USB_DEVICE_MAJOR 189 8#define USB_DEVICE_MAJOR 189
@@ -107,7 +107,8 @@ enum usb_interface_condition {
107 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup 107 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
108 * capability during autosuspend. 108 * capability during autosuspend.
109 * @dev: driver model's view of this device 109 * @dev: driver model's view of this device
110 * @class_dev: driver model's class view of this device. 110 * @usb_dev: if an interface is bound to the USB major, this will point
111 * to the sysfs representation for that device.
111 * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not 112 * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not
112 * allowed unless the counter is 0. 113 * allowed unless the counter is 0.
113 * 114 *
@@ -152,7 +153,7 @@ struct usb_interface {
152 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ 153 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
153 154
154 struct device dev; /* interface specific device info */ 155 struct device dev; /* interface specific device info */
155 struct class_device *class_dev; 156 struct device *usb_dev; /* pointer to the usb class's device, if any */
156 int pm_usage_cnt; /* usage counter for autosuspend */ 157 int pm_usage_cnt; /* usage counter for autosuspend */
157}; 158};
158#define to_usb_interface(d) container_of(d, struct usb_interface, dev) 159#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
@@ -372,7 +373,7 @@ struct usb_device {
372 char *serial; /* iSerialNumber string, if present */ 373 char *serial; /* iSerialNumber string, if present */
373 374
374 struct list_head filelist; 375 struct list_head filelist;
375 struct class_device *class_dev; 376 struct device *usbfs_dev;
376 struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */ 377 struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */
377 378
378 /* 379 /*
@@ -475,6 +476,8 @@ extern void usb_driver_release_interface(struct usb_driver *driver,
475 struct usb_interface *iface); 476 struct usb_interface *iface);
476const struct usb_device_id *usb_match_id(struct usb_interface *interface, 477const struct usb_device_id *usb_match_id(struct usb_interface *interface,
477 const struct usb_device_id *id); 478 const struct usb_device_id *id);
479extern int usb_match_one_id(struct usb_interface *interface,
480 const struct usb_device_id *id);
478 481
479extern struct usb_interface *usb_find_interface(struct usb_driver *drv, 482extern struct usb_interface *usb_find_interface(struct usb_driver *drv,
480 int minor); 483 int minor);
@@ -554,6 +557,18 @@ static inline int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *e
554} 557}
555 558
556/** 559/**
560 * usb_endpoint_xfer_control - check if the endpoint has control transfer type
561 * @epd: endpoint to be checked
562 *
563 * Returns true if the endpoint is of type control, otherwise it returns false.
564 */
565static inline int usb_endpoint_xfer_control(const struct usb_endpoint_descriptor *epd)
566{
567 return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
568 USB_ENDPOINT_XFER_CONTROL);
569}
570
571/**
557 * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type 572 * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
558 * @epd: endpoint to be checked 573 * @epd: endpoint to be checked
559 * 574 *
@@ -723,11 +738,21 @@ static inline int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor
723 738
724/* ----------------------------------------------------------------------- */ 739/* ----------------------------------------------------------------------- */
725 740
741/* Stuff for dynamic usb ids */
726struct usb_dynids { 742struct usb_dynids {
727 spinlock_t lock; 743 spinlock_t lock;
728 struct list_head list; 744 struct list_head list;
729}; 745};
730 746
747struct usb_dynid {
748 struct list_head node;
749 struct usb_device_id id;
750};
751
752extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
753 struct device_driver *driver,
754 const char *buf, size_t count);
755
731/** 756/**
732 * struct usbdrv_wrap - wrapper for driver-model structure 757 * struct usbdrv_wrap - wrapper for driver-model structure
733 * @driver: The driver-model core driver structure. 758 * @driver: The driver-model core driver structure.
@@ -868,10 +893,11 @@ struct usb_class_driver {
868 * use these in module_init()/module_exit() 893 * use these in module_init()/module_exit()
869 * and don't forget MODULE_DEVICE_TABLE(usb, ...) 894 * and don't forget MODULE_DEVICE_TABLE(usb, ...)
870 */ 895 */
871extern int usb_register_driver(struct usb_driver *, struct module *); 896extern int usb_register_driver(struct usb_driver *, struct module *,
897 const char *);
872static inline int usb_register(struct usb_driver *driver) 898static inline int usb_register(struct usb_driver *driver)
873{ 899{
874 return usb_register_driver(driver, THIS_MODULE); 900 return usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME);
875} 901}
876extern void usb_deregister(struct usb_driver *); 902extern void usb_deregister(struct usb_driver *);
877 903
@@ -1085,7 +1111,6 @@ struct urb
1085 struct kref kref; /* reference count of the URB */ 1111 struct kref kref; /* reference count of the URB */
1086 spinlock_t lock; /* lock for the URB */ 1112 spinlock_t lock; /* lock for the URB */
1087 void *hcpriv; /* private data for host controller */ 1113 void *hcpriv; /* private data for host controller */
1088 int bandwidth; /* bandwidth for INT/ISO request */
1089 atomic_t use_count; /* concurrent submissions counter */ 1114 atomic_t use_count; /* concurrent submissions counter */
1090 u8 reject; /* submissions will fail */ 1115 u8 reject; /* submissions will fail */
1091 1116
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
new file mode 100644
index 000000000000..43f160cfe003
--- /dev/null
+++ b/include/linux/usb/Kbuild
@@ -0,0 +1,5 @@
1unifdef-y += audio.h
2unifdef-y += cdc.h
3unifdef-y += ch9.h
4unifdef-y += midi.h
5
diff --git a/include/linux/usb_ch9.h b/include/linux/usb/ch9.h
index c720d107ff29..ae7833749fa2 100644
--- a/include/linux/usb_ch9.h
+++ b/include/linux/usb/ch9.h
@@ -224,6 +224,7 @@ struct usb_device_descriptor {
224#define USB_CLASS_CONTENT_SEC 0x0d /* content security */ 224#define USB_CLASS_CONTENT_SEC 0x0d /* content security */
225#define USB_CLASS_VIDEO 0x0e 225#define USB_CLASS_VIDEO 0x0e
226#define USB_CLASS_WIRELESS_CONTROLLER 0xe0 226#define USB_CLASS_WIRELESS_CONTROLLER 0xe0
227#define USB_CLASS_MISC 0xef
227#define USB_CLASS_APP_SPEC 0xfe 228#define USB_CLASS_APP_SPEC 0xfe
228#define USB_CLASS_VENDOR_SPEC 0xff 229#define USB_CLASS_VENDOR_SPEC 0xff
229 230
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 10f99e5f1a97..33dcd8576696 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -179,6 +179,9 @@ static inline void usb_set_serial_data (struct usb_serial *serial, void *data)
179 * memory structure allocation at this point in time. 179 * memory structure allocation at this point in time.
180 * @shutdown: pointer to the driver's shutdown function. This will be 180 * @shutdown: pointer to the driver's shutdown function. This will be
181 * called when the device is removed from the system. 181 * called when the device is removed from the system.
182 * @usb_driver: pointer to the struct usb_driver that controls this
183 * device. This is necessary to allow dynamic ids to be added to
184 * the driver from sysfs.
182 * 185 *
183 * This structure is defines a USB Serial driver. It provides all of 186 * This structure is defines a USB Serial driver. It provides all of
184 * the information that the USB serial core code needs. If the function 187 * the information that the USB serial core code needs. If the function
@@ -202,6 +205,8 @@ struct usb_serial_driver {
202 205
203 struct list_head driver_list; 206 struct list_head driver_list;
204 struct device_driver driver; 207 struct device_driver driver;
208 struct usb_driver *usb_driver;
209 struct usb_dynids dynids;
205 210
206 int (*probe) (struct usb_serial *serial, const struct usb_device_id *id); 211 int (*probe) (struct usb_serial *serial, const struct usb_device_id *id);
207 int (*attach) (struct usb_serial *serial); 212 int (*attach) (struct usb_serial *serial);
diff --git a/include/linux/usb_gadgetfs.h b/include/linux/usb_gadgetfs.h
index b53d6ae8e55e..8086d5a9b94e 100644
--- a/include/linux/usb_gadgetfs.h
+++ b/include/linux/usb_gadgetfs.h
@@ -2,7 +2,7 @@
2#include <asm/types.h> 2#include <asm/types.h>
3#include <asm/ioctl.h> 3#include <asm/ioctl.h>
4 4
5#include <linux/usb_ch9.h> 5#include <linux/usb/ch9.h>
6 6
7/* 7/*
8 * Filesystem based user-mode API to USB Gadget controller hardware 8 * Filesystem based user-mode API to USB Gadget controller hardware
diff --git a/include/linux/video_output.h b/include/linux/video_output.h
new file mode 100644
index 000000000000..e63e0c03ee0d
--- /dev/null
+++ b/include/linux/video_output.h
@@ -0,0 +1,42 @@
1/*
2 *
3 * Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
4 *
5 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or (at
10 * your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 *
21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 */
23#ifndef _LINUX_VIDEO_OUTPUT_H
24#define _LINUX_VIDEO_OUTPUT_H
25#include <linux/device.h>
26struct output_device;
27struct output_properties {
28 int (*set_state)(struct output_device *);
29 int (*get_status)(struct output_device *);
30};
31struct output_device {
32 int request_state;
33 struct output_properties *props;
34 struct class_device class_dev;
35};
36#define to_output_device(obj) container_of(obj, struct output_device, class_dev)
37struct output_device *video_output_register(const char *name,
38 struct device *dev,
39 void *devdata,
40 struct output_properties *op);
41void video_output_unregister(struct output_device *dev);
42#endif
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 623a0fc0dae1..6e84258b94de 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -284,7 +284,7 @@ struct pcmcia_socket {
284#endif 284#endif
285 285
286 /* socket device */ 286 /* socket device */
287 struct class_device dev; 287 struct device dev;
288 void *driver_data; /* data internal to the socket driver */ 288 void *driver_data; /* data internal to the socket driver */
289 289
290}; 290};
diff --git a/include/rdma/ib_user_mad.h b/include/rdma/ib_user_mad.h
index 44537aa32e62..d66b15ea82c4 100644
--- a/include/rdma/ib_user_mad.h
+++ b/include/rdma/ib_user_mad.h
@@ -98,7 +98,7 @@ struct ib_user_mad_hdr {
98 */ 98 */
99struct ib_user_mad { 99struct ib_user_mad {
100 struct ib_user_mad_hdr hdr; 100 struct ib_user_mad_hdr hdr;
101 __u8 data[0]; 101 __u64 data[0];
102}; 102};
103 103
104/** 104/**
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0bfa3328d686..765589f4d166 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -45,6 +45,7 @@
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h> 46#include <linux/mm.h>
47#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
48#include <linux/kref.h>
48 49
49#include <asm/atomic.h> 50#include <asm/atomic.h>
50#include <asm/scatterlist.h> 51#include <asm/scatterlist.h>
@@ -419,8 +420,8 @@ struct ib_wc {
419 enum ib_wc_opcode opcode; 420 enum ib_wc_opcode opcode;
420 u32 vendor_err; 421 u32 vendor_err;
421 u32 byte_len; 422 u32 byte_len;
423 struct ib_qp *qp;
422 __be32 imm_data; 424 __be32 imm_data;
423 u32 qp_num;
424 u32 src_qp; 425 u32 src_qp;
425 int wc_flags; 426 int wc_flags;
426 u16 pkey_index; 427 u16 pkey_index;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index d27b25855743..475e8a71bcdc 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -39,6 +39,7 @@ void dynamic_irq_init(unsigned int irq)
39 desc->chip = &no_irq_chip; 39 desc->chip = &no_irq_chip;
40 desc->handle_irq = handle_bad_irq; 40 desc->handle_irq = handle_bad_irq;
41 desc->depth = 1; 41 desc->depth = 1;
42 desc->msi_desc = NULL;
42 desc->handler_data = NULL; 43 desc->handler_data = NULL;
43 desc->chip_data = NULL; 44 desc->chip_data = NULL;
44 desc->action = NULL; 45 desc->action = NULL;
@@ -74,6 +75,9 @@ void dynamic_irq_cleanup(unsigned int irq)
74 WARN_ON(1); 75 WARN_ON(1);
75 return; 76 return;
76 } 77 }
78 desc->msi_desc = NULL;
79 desc->handler_data = NULL;
80 desc->chip_data = NULL;
77 desc->handle_irq = handle_bad_irq; 81 desc->handle_irq = handle_bad_irq;
78 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
79 spin_unlock_irqrestore(&desc->lock, flags); 83 spin_unlock_irqrestore(&desc->lock, flags);
@@ -162,6 +166,30 @@ int set_irq_data(unsigned int irq, void *data)
162EXPORT_SYMBOL(set_irq_data); 166EXPORT_SYMBOL(set_irq_data);
163 167
164/** 168/**
169 * set_irq_data - set irq type data for an irq
170 * @irq: Interrupt number
171 * @data: Pointer to interrupt specific data
172 *
173 * Set the hardware irq controller data for an irq
174 */
175int set_irq_msi(unsigned int irq, struct msi_desc *entry)
176{
177 struct irq_desc *desc;
178 unsigned long flags;
179
180 if (irq >= NR_IRQS) {
181 printk(KERN_ERR
182 "Trying to install msi data for IRQ%d\n", irq);
183 return -EINVAL;
184 }
185 desc = irq_desc + irq;
186 spin_lock_irqsave(&desc->lock, flags);
187 desc->msi_desc = entry;
188 spin_unlock_irqrestore(&desc->lock, flags);
189 return 0;
190}
191
192/**
165 * set_irq_chip_data - set irq chip data for an irq 193 * set_irq_chip_data - set irq chip data for an irq
166 * @irq: Interrupt number 194 * @irq: Interrupt number
167 * @data: Pointer to chip specific data 195 * @data: Pointer to chip specific data
diff --git a/kernel/module.c b/kernel/module.c
index d0f2260a0210..8a94e054230c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -537,6 +537,8 @@ static int already_uses(struct module *a, struct module *b)
537static int use_module(struct module *a, struct module *b) 537static int use_module(struct module *a, struct module *b)
538{ 538{
539 struct module_use *use; 539 struct module_use *use;
540 int no_warn;
541
540 if (b == NULL || already_uses(a, b)) return 1; 542 if (b == NULL || already_uses(a, b)) return 1;
541 543
542 if (!strong_try_module_get(b)) 544 if (!strong_try_module_get(b))
@@ -552,6 +554,7 @@ static int use_module(struct module *a, struct module *b)
552 554
553 use->module_which_uses = a; 555 use->module_which_uses = a;
554 list_add(&use->list, &b->modules_which_use_me); 556 list_add(&use->list, &b->modules_which_use_me);
557 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
555 return 1; 558 return 1;
556} 559}
557 560
@@ -569,6 +572,7 @@ static void module_unload_free(struct module *mod)
569 module_put(i); 572 module_put(i);
570 list_del(&use->list); 573 list_del(&use->list);
571 kfree(use); 574 kfree(use);
575 sysfs_remove_link(i->holders_dir, mod->name);
572 /* There can be at most one match. */ 576 /* There can be at most one match. */
573 break; 577 break;
574 } 578 }
@@ -1106,9 +1110,7 @@ static void module_remove_modinfo_attrs(struct module *mod)
1106 kfree(mod->modinfo_attrs); 1110 kfree(mod->modinfo_attrs);
1107} 1111}
1108 1112
1109static int mod_sysfs_setup(struct module *mod, 1113static int mod_sysfs_init(struct module *mod)
1110 struct kernel_param *kparam,
1111 unsigned int num_params)
1112{ 1114{
1113 int err; 1115 int err;
1114 1116
@@ -1125,21 +1127,30 @@ static int mod_sysfs_setup(struct module *mod,
1125 kobj_set_kset_s(&mod->mkobj, module_subsys); 1127 kobj_set_kset_s(&mod->mkobj, module_subsys);
1126 mod->mkobj.mod = mod; 1128 mod->mkobj.mod = mod;
1127 1129
1128 /* delay uevent until full sysfs population */
1129 kobject_init(&mod->mkobj.kobj); 1130 kobject_init(&mod->mkobj.kobj);
1131
1132out:
1133 return err;
1134}
1135
1136static int mod_sysfs_setup(struct module *mod,
1137 struct kernel_param *kparam,
1138 unsigned int num_params)
1139{
1140 int err;
1141
1142 /* delay uevent until full sysfs population */
1130 err = kobject_add(&mod->mkobj.kobj); 1143 err = kobject_add(&mod->mkobj.kobj);
1131 if (err) 1144 if (err)
1132 goto out; 1145 goto out;
1133 1146
1134 mod->drivers_dir = kobject_add_dir(&mod->mkobj.kobj, "drivers"); 1147 mod->holders_dir = kobject_add_dir(&mod->mkobj.kobj, "holders");
1135 if (!mod->drivers_dir) { 1148 if (!mod->holders_dir)
1136 err = -ENOMEM;
1137 goto out_unreg; 1149 goto out_unreg;
1138 }
1139 1150
1140 err = module_param_sysfs_setup(mod, kparam, num_params); 1151 err = module_param_sysfs_setup(mod, kparam, num_params);
1141 if (err) 1152 if (err)
1142 goto out_unreg_drivers; 1153 goto out_unreg_holders;
1143 1154
1144 err = module_add_modinfo_attrs(mod); 1155 err = module_add_modinfo_attrs(mod);
1145 if (err) 1156 if (err)
@@ -1150,8 +1161,8 @@ static int mod_sysfs_setup(struct module *mod,
1150 1161
1151out_unreg_param: 1162out_unreg_param:
1152 module_param_sysfs_remove(mod); 1163 module_param_sysfs_remove(mod);
1153out_unreg_drivers: 1164out_unreg_holders:
1154 kobject_unregister(mod->drivers_dir); 1165 kobject_unregister(mod->holders_dir);
1155out_unreg: 1166out_unreg:
1156 kobject_del(&mod->mkobj.kobj); 1167 kobject_del(&mod->mkobj.kobj);
1157 kobject_put(&mod->mkobj.kobj); 1168 kobject_put(&mod->mkobj.kobj);
@@ -1163,7 +1174,10 @@ static void mod_kobject_remove(struct module *mod)
1163{ 1174{
1164 module_remove_modinfo_attrs(mod); 1175 module_remove_modinfo_attrs(mod);
1165 module_param_sysfs_remove(mod); 1176 module_param_sysfs_remove(mod);
1166 kobject_unregister(mod->drivers_dir); 1177 if (mod->mkobj.drivers_dir)
1178 kobject_unregister(mod->mkobj.drivers_dir);
1179 if (mod->holders_dir)
1180 kobject_unregister(mod->holders_dir);
1167 1181
1168 kobject_unregister(&mod->mkobj.kobj); 1182 kobject_unregister(&mod->mkobj.kobj);
1169} 1183}
@@ -1768,6 +1782,10 @@ static struct module *load_module(void __user *umod,
1768 /* Now we've moved module, initialize linked lists, etc. */ 1782 /* Now we've moved module, initialize linked lists, etc. */
1769 module_unload_init(mod); 1783 module_unload_init(mod);
1770 1784
1785 /* Initialize kobject, so we can reference it. */
1786 if (mod_sysfs_init(mod) != 0)
1787 goto cleanup;
1788
1771 /* Set up license info based on the info section */ 1789 /* Set up license info based on the info section */
1772 set_license(mod, get_modinfo(sechdrs, infoindex, "license")); 1790 set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
1773 1791
@@ -2340,19 +2358,43 @@ static char *make_driver_name(struct device_driver *drv)
2340 return driver_name; 2358 return driver_name;
2341} 2359}
2342 2360
2361static void module_create_drivers_dir(struct module_kobject *mk)
2362{
2363 if (!mk || mk->drivers_dir)
2364 return;
2365
2366 mk->drivers_dir = kobject_add_dir(&mk->kobj, "drivers");
2367}
2368
2343void module_add_driver(struct module *mod, struct device_driver *drv) 2369void module_add_driver(struct module *mod, struct device_driver *drv)
2344{ 2370{
2345 char *driver_name; 2371 char *driver_name;
2346 int no_warn; 2372 int no_warn;
2373 struct module_kobject *mk = NULL;
2374
2375 if (!drv)
2376 return;
2377
2378 if (mod)
2379 mk = &mod->mkobj;
2380 else if (drv->mod_name) {
2381 struct kobject *mkobj;
2382
2383 /* Lookup built-in module entry in /sys/modules */
2384 mkobj = kset_find_obj(&module_subsys.kset, drv->mod_name);
2385 if (mkobj)
2386 mk = container_of(mkobj, struct module_kobject, kobj);
2387 }
2347 2388
2348 if (!mod || !drv) 2389 if (!mk)
2349 return; 2390 return;
2350 2391
2351 /* Don't check return codes; these calls are idempotent */ 2392 /* Don't check return codes; these calls are idempotent */
2352 no_warn = sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module"); 2393 no_warn = sysfs_create_link(&drv->kobj, &mk->kobj, "module");
2353 driver_name = make_driver_name(drv); 2394 driver_name = make_driver_name(drv);
2354 if (driver_name) { 2395 if (driver_name) {
2355 no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj, 2396 module_create_drivers_dir(mk);
2397 no_warn = sysfs_create_link(mk->drivers_dir, &drv->kobj,
2356 driver_name); 2398 driver_name);
2357 kfree(driver_name); 2399 kfree(driver_name);
2358 } 2400 }
@@ -2367,10 +2409,10 @@ void module_remove_driver(struct device_driver *drv)
2367 return; 2409 return;
2368 2410
2369 sysfs_remove_link(&drv->kobj, "module"); 2411 sysfs_remove_link(&drv->kobj, "module");
2370 if (drv->owner && drv->owner->drivers_dir) { 2412 if (drv->owner && drv->owner->mkobj.drivers_dir) {
2371 driver_name = make_driver_name(drv); 2413 driver_name = make_driver_name(drv);
2372 if (driver_name) { 2414 if (driver_name) {
2373 sysfs_remove_link(drv->owner->drivers_dir, 2415 sysfs_remove_link(drv->owner->mkobj.drivers_dir,
2374 driver_name); 2416 driver_name);
2375 kfree(driver_name); 2417 kfree(driver_name);
2376 } 2418 }
diff --git a/kernel/params.c b/kernel/params.c
index 718945da8f58..553cf7d6a4be 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -30,6 +30,8 @@
30#define DEBUGP(fmt, a...) 30#define DEBUGP(fmt, a...)
31#endif 31#endif
32 32
33static struct kobj_type module_ktype;
34
33static inline char dash2underscore(char c) 35static inline char dash2underscore(char c)
34{ 36{
35 if (c == '-') 37 if (c == '-')
@@ -561,14 +563,11 @@ static void __init kernel_param_sysfs_setup(const char *name,
561 mk->mod = THIS_MODULE; 563 mk->mod = THIS_MODULE;
562 kobj_set_kset_s(mk, module_subsys); 564 kobj_set_kset_s(mk, module_subsys);
563 kobject_set_name(&mk->kobj, name); 565 kobject_set_name(&mk->kobj, name);
564 ret = kobject_register(&mk->kobj); 566 kobject_init(&mk->kobj);
567 ret = kobject_add(&mk->kobj);
565 BUG_ON(ret < 0); 568 BUG_ON(ret < 0);
566 569 param_sysfs_setup(mk, kparam, num_params, name_skip);
567 /* no need to keep the kobject if no parameter is exported */ 570 kobject_uevent(&mk->kobj, KOBJ_ADD);
568 if (!param_sysfs_setup(mk, kparam, num_params, name_skip)) {
569 kobject_unregister(&mk->kobj);
570 kfree(mk);
571 }
572} 571}
573 572
574/* 573/*
@@ -674,6 +673,19 @@ static struct sysfs_ops module_sysfs_ops = {
674 .store = module_attr_store, 673 .store = module_attr_store,
675}; 674};
676 675
676static int uevent_filter(struct kset *kset, struct kobject *kobj)
677{
678 struct kobj_type *ktype = get_ktype(kobj);
679
680 if (ktype == &module_ktype)
681 return 1;
682 return 0;
683}
684
685static struct kset_uevent_ops module_uevent_ops = {
686 .filter = uevent_filter,
687};
688
677#else 689#else
678static struct sysfs_ops module_sysfs_ops = { 690static struct sysfs_ops module_sysfs_ops = {
679 .show = NULL, 691 .show = NULL,
@@ -685,7 +697,7 @@ static struct kobj_type module_ktype = {
685 .sysfs_ops = &module_sysfs_ops, 697 .sysfs_ops = &module_sysfs_ops,
686}; 698};
687 699
688decl_subsys(module, &module_ktype, NULL); 700decl_subsys(module, &module_ktype, &module_uevent_ops);
689 701
690/* 702/*
691 * param_sysfs_init - wrapper for built-in params support 703 * param_sysfs_init - wrapper for built-in params support
diff --git a/lib/kobject.c b/lib/kobject.c
index 7ce6dc138e90..c2917ffe8bf1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -44,11 +44,11 @@ static int populate_dir(struct kobject * kobj)
44 return error; 44 return error;
45} 45}
46 46
47static int create_dir(struct kobject * kobj) 47static int create_dir(struct kobject * kobj, struct dentry *shadow_parent)
48{ 48{
49 int error = 0; 49 int error = 0;
50 if (kobject_name(kobj)) { 50 if (kobject_name(kobj)) {
51 error = sysfs_create_dir(kobj); 51 error = sysfs_create_dir(kobj, shadow_parent);
52 if (!error) { 52 if (!error) {
53 if ((error = populate_dir(kobj))) 53 if ((error = populate_dir(kobj)))
54 sysfs_remove_dir(kobj); 54 sysfs_remove_dir(kobj);
@@ -126,6 +126,8 @@ EXPORT_SYMBOL_GPL(kobject_get_path);
126 */ 126 */
127void kobject_init(struct kobject * kobj) 127void kobject_init(struct kobject * kobj)
128{ 128{
129 if (!kobj)
130 return;
129 kref_init(&kobj->kref); 131 kref_init(&kobj->kref);
130 INIT_LIST_HEAD(&kobj->entry); 132 INIT_LIST_HEAD(&kobj->entry);
131 init_waitqueue_head(&kobj->poll); 133 init_waitqueue_head(&kobj->poll);
@@ -156,9 +158,10 @@ static void unlink(struct kobject * kobj)
156/** 158/**
157 * kobject_add - add an object to the hierarchy. 159 * kobject_add - add an object to the hierarchy.
158 * @kobj: object. 160 * @kobj: object.
161 * @shadow_parent: sysfs directory to add to.
159 */ 162 */
160 163
161int kobject_add(struct kobject * kobj) 164int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
162{ 165{
163 int error = 0; 166 int error = 0;
164 struct kobject * parent; 167 struct kobject * parent;
@@ -189,12 +192,11 @@ int kobject_add(struct kobject * kobj)
189 } 192 }
190 kobj->parent = parent; 193 kobj->parent = parent;
191 194
192 error = create_dir(kobj); 195 error = create_dir(kobj, shadow_parent);
193 if (error) { 196 if (error) {
194 /* unlink does the kobject_put() for us */ 197 /* unlink does the kobject_put() for us */
195 unlink(kobj); 198 unlink(kobj);
196 if (parent) 199 kobject_put(parent);
197 kobject_put(parent);
198 200
199 /* be noisy on error issues */ 201 /* be noisy on error issues */
200 if (error == -EEXIST) 202 if (error == -EEXIST)
@@ -211,6 +213,15 @@ int kobject_add(struct kobject * kobj)
211 return error; 213 return error;
212} 214}
213 215
216/**
217 * kobject_add - add an object to the hierarchy.
218 * @kobj: object.
219 */
220int kobject_add(struct kobject * kobj)
221{
222 return kobject_shadow_add(kobj, NULL);
223}
224
214 225
215/** 226/**
216 * kobject_register - initialize and add an object. 227 * kobject_register - initialize and add an object.
@@ -303,7 +314,29 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
303 kobj = kobject_get(kobj); 314 kobj = kobject_get(kobj);
304 if (!kobj) 315 if (!kobj)
305 return -EINVAL; 316 return -EINVAL;
306 error = sysfs_rename_dir(kobj, new_name); 317 if (!kobj->parent)
318 return -EINVAL;
319 error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name);
320 kobject_put(kobj);
321
322 return error;
323}
324
325/**
326 * kobject_rename - change the name of an object
327 * @kobj: object in question.
328 * @new_name: object's new name
329 */
330
331int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent,
332 const char *new_name)
333{
334 int error = 0;
335
336 kobj = kobject_get(kobj);
337 if (!kobj)
338 return -EINVAL;
339 error = sysfs_rename_dir(kobj, new_parent, new_name);
307 kobject_put(kobj); 340 kobject_put(kobj);
308 341
309 return error; 342 return error;
@@ -312,7 +345,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
312/** 345/**
313 * kobject_move - move object to another parent 346 * kobject_move - move object to another parent
314 * @kobj: object in question. 347 * @kobj: object in question.
315 * @new_parent: object's new parent 348 * @new_parent: object's new parent (can be NULL)
316 */ 349 */
317 350
318int kobject_move(struct kobject *kobj, struct kobject *new_parent) 351int kobject_move(struct kobject *kobj, struct kobject *new_parent)
@@ -328,8 +361,8 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
328 return -EINVAL; 361 return -EINVAL;
329 new_parent = kobject_get(new_parent); 362 new_parent = kobject_get(new_parent);
330 if (!new_parent) { 363 if (!new_parent) {
331 error = -EINVAL; 364 if (kobj->kset)
332 goto out; 365 new_parent = kobject_get(&kobj->kset->kobj);
333 } 366 }
334 /* old object path */ 367 /* old object path */
335 devpath = kobject_get_path(kobj, GFP_KERNEL); 368 devpath = kobject_get_path(kobj, GFP_KERNEL);
@@ -366,6 +399,8 @@ out:
366 399
367void kobject_del(struct kobject * kobj) 400void kobject_del(struct kobject * kobj)
368{ 401{
402 if (!kobj)
403 return;
369 sysfs_remove_dir(kobj); 404 sysfs_remove_dir(kobj);
370 unlink(kobj); 405 unlink(kobj);
371} 406}
@@ -377,6 +412,8 @@ void kobject_del(struct kobject * kobj)
377 412
378void kobject_unregister(struct kobject * kobj) 413void kobject_unregister(struct kobject * kobj)
379{ 414{
415 if (!kobj)
416 return;
380 pr_debug("kobject %s: unregistering\n",kobject_name(kobj)); 417 pr_debug("kobject %s: unregistering\n",kobject_name(kobj));
381 kobject_uevent(kobj, KOBJ_REMOVE); 418 kobject_uevent(kobj, KOBJ_REMOVE);
382 kobject_del(kobj); 419 kobject_del(kobj);
@@ -414,8 +451,7 @@ void kobject_cleanup(struct kobject * kobj)
414 t->release(kobj); 451 t->release(kobj);
415 if (s) 452 if (s)
416 kset_put(s); 453 kset_put(s);
417 if (parent) 454 kobject_put(parent);
418 kobject_put(parent);
419} 455}
420 456
421static void kobject_release(struct kref *kref) 457static void kobject_release(struct kref *kref)
@@ -523,6 +559,8 @@ int kset_add(struct kset * k)
523 559
524int kset_register(struct kset * k) 560int kset_register(struct kset * k)
525{ 561{
562 if (!k)
563 return -EINVAL;
526 kset_init(k); 564 kset_init(k);
527 return kset_add(k); 565 return kset_add(k);
528} 566}
@@ -535,6 +573,8 @@ int kset_register(struct kset * k)
535 573
536void kset_unregister(struct kset * k) 574void kset_unregister(struct kset * k)
537{ 575{
576 if (!k)
577 return;
538 kobject_unregister(&k->kobj); 578 kobject_unregister(&k->kobj);
539} 579}
540 580
@@ -586,6 +626,9 @@ int subsystem_register(struct subsystem * s)
586{ 626{
587 int error; 627 int error;
588 628
629 if (!s)
630 return -EINVAL;
631
589 subsystem_init(s); 632 subsystem_init(s);
590 pr_debug("subsystem %s: registering\n",s->kset.kobj.name); 633 pr_debug("subsystem %s: registering\n",s->kset.kobj.name);
591 634
@@ -598,6 +641,8 @@ int subsystem_register(struct subsystem * s)
598 641
599void subsystem_unregister(struct subsystem * s) 642void subsystem_unregister(struct subsystem * s)
600{ 643{
644 if (!s)
645 return;
601 pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name); 646 pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name);
602 kset_unregister(&s->kset); 647 kset_unregister(&s->kset);
603} 648}
@@ -612,6 +657,10 @@ void subsystem_unregister(struct subsystem * s)
612int subsys_create_file(struct subsystem * s, struct subsys_attribute * a) 657int subsys_create_file(struct subsystem * s, struct subsys_attribute * a)
613{ 658{
614 int error = 0; 659 int error = 0;
660
661 if (!s || !a)
662 return -EINVAL;
663
615 if (subsys_get(s)) { 664 if (subsys_get(s)) {
616 error = sysfs_create_file(&s->kset.kobj,&a->attr); 665 error = sysfs_create_file(&s->kset.kobj,&a->attr);
617 subsys_put(s); 666 subsys_put(s);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10625785eefd..50a438010182 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Dynamic DMA mapping support. 2 * Dynamic DMA mapping support.
3 * 3 *
4 * This implementation is for IA-64 and EM64T platforms that do not support 4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware). 5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -28,6 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/scatterlist.h> 30#include <asm/scatterlist.h>
31#include <asm/swiotlb.h>
31 32
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/bootmem.h> 34#include <linux/bootmem.h>
@@ -35,8 +36,10 @@
35#define OFFSET(val,align) ((unsigned long) \ 36#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1))) 37 ( (val) & ( (align) - 1)))
37 38
39#ifndef SG_ENT_VIRT_ADDRESS
38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 40#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
39#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 41#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
42#endif
40 43
41/* 44/*
42 * Maximum allowable number of contiguous slabs to map, 45 * Maximum allowable number of contiguous slabs to map,
@@ -101,13 +104,25 @@ static unsigned int io_tlb_index;
101 * We need to save away the original address corresponding to a mapped entry 104 * We need to save away the original address corresponding to a mapped entry
102 * for the sync operations. 105 * for the sync operations.
103 */ 106 */
104static unsigned char **io_tlb_orig_addr; 107#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
108typedef char *io_tlb_addr_t;
109#define swiotlb_orig_addr_null(buffer) (!(buffer))
110#define ptr_to_io_tlb_addr(ptr) (ptr)
111#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
112#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
113#endif
114static io_tlb_addr_t *io_tlb_orig_addr;
105 115
106/* 116/*
107 * Protect the above data structures in the map and unmap calls 117 * Protect the above data structures in the map and unmap calls
108 */ 118 */
109static DEFINE_SPINLOCK(io_tlb_lock); 119static DEFINE_SPINLOCK(io_tlb_lock);
110 120
121#ifdef SWIOTLB_EXTRA_VARIABLES
122SWIOTLB_EXTRA_VARIABLES;
123#endif
124
125#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
111static int __init 126static int __init
112setup_io_tlb_npages(char *str) 127setup_io_tlb_npages(char *str)
113{ 128{
@@ -122,30 +137,50 @@ setup_io_tlb_npages(char *str)
122 swiotlb_force = 1; 137 swiotlb_force = 1;
123 return 1; 138 return 1;
124} 139}
140#endif
125__setup("swiotlb=", setup_io_tlb_npages); 141__setup("swiotlb=", setup_io_tlb_npages);
126/* make io_tlb_overflow tunable too? */ 142/* make io_tlb_overflow tunable too? */
127 143
144#ifndef swiotlb_adjust_size
145#define swiotlb_adjust_size(size) ((void)0)
146#endif
147
148#ifndef swiotlb_adjust_seg
149#define swiotlb_adjust_seg(start, size) ((void)0)
150#endif
151
152#ifndef swiotlb_print_info
153#define swiotlb_print_info(bytes) \
154 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
155 "0x%lx\n", bytes >> 20, \
156 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
157#endif
158
128/* 159/*
129 * Statically reserve bounce buffer space and initialize bounce buffer data 160 * Statically reserve bounce buffer space and initialize bounce buffer data
130 * structures for the software IO TLB used to implement the DMA API. 161 * structures for the software IO TLB used to implement the DMA API.
131 */ 162 */
132void 163void __init
133swiotlb_init_with_default_size (size_t default_size) 164swiotlb_init_with_default_size(size_t default_size)
134{ 165{
135 unsigned long i; 166 unsigned long i, bytes;
136 167
137 if (!io_tlb_nslabs) { 168 if (!io_tlb_nslabs) {
138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 169 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 170 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
140 } 171 }
172 swiotlb_adjust_size(io_tlb_nslabs);
173 swiotlb_adjust_size(io_tlb_overflow);
174
175 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
141 176
142 /* 177 /*
143 * Get IO TLB memory from the low pages 178 * Get IO TLB memory from the low pages
144 */ 179 */
145 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 180 io_tlb_start = alloc_bootmem_low_pages(bytes);
146 if (!io_tlb_start) 181 if (!io_tlb_start)
147 panic("Cannot allocate SWIOTLB buffer"); 182 panic("Cannot allocate SWIOTLB buffer");
148 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 183 io_tlb_end = io_tlb_start + bytes;
149 184
150 /* 185 /*
151 * Allocate and initialize the free list array. This array is used 186 * Allocate and initialize the free list array. This array is used
@@ -153,34 +188,45 @@ swiotlb_init_with_default_size (size_t default_size)
153 * between io_tlb_start and io_tlb_end. 188 * between io_tlb_start and io_tlb_end.
154 */ 189 */
155 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 190 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
156 for (i = 0; i < io_tlb_nslabs; i++) 191 for (i = 0; i < io_tlb_nslabs; i++) {
192 if ( !(i % IO_TLB_SEGSIZE) )
193 swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
194 IO_TLB_SEGSIZE << IO_TLB_SHIFT);
157 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 195 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
196 }
158 io_tlb_index = 0; 197 io_tlb_index = 0;
159 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); 198 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
160 199
161 /* 200 /*
162 * Get the overflow emergency buffer 201 * Get the overflow emergency buffer
163 */ 202 */
164 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 203 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
165 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 204 if (!io_tlb_overflow_buffer)
166 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); 205 panic("Cannot allocate SWIOTLB overflow buffer!\n");
206 swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
207
208 swiotlb_print_info(bytes);
167} 209}
210#ifndef __swiotlb_init_with_default_size
211#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
212#endif
168 213
169void 214void __init
170swiotlb_init (void) 215swiotlb_init(void)
171{ 216{
172 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 217 __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
173} 218}
174 219
220#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
175/* 221/*
176 * Systems with larger DMA zones (those that don't support ISA) can 222 * Systems with larger DMA zones (those that don't support ISA) can
177 * initialize the swiotlb later using the slab allocator if needed. 223 * initialize the swiotlb later using the slab allocator if needed.
178 * This should be just like above, but with some error catching. 224 * This should be just like above, but with some error catching.
179 */ 225 */
180int 226int
181swiotlb_late_init_with_default_size (size_t default_size) 227swiotlb_late_init_with_default_size(size_t default_size)
182{ 228{
183 unsigned long i, req_nslabs = io_tlb_nslabs; 229 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
184 unsigned int order; 230 unsigned int order;
185 231
186 if (!io_tlb_nslabs) { 232 if (!io_tlb_nslabs) {
@@ -191,8 +237,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
191 /* 237 /*
192 * Get IO TLB memory from the low pages 238 * Get IO TLB memory from the low pages
193 */ 239 */
194 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 240 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
195 io_tlb_nslabs = SLABS_PER_PAGE << order; 241 io_tlb_nslabs = SLABS_PER_PAGE << order;
242 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
196 243
197 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 244 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
198 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 245 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +252,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
205 if (!io_tlb_start) 252 if (!io_tlb_start)
206 goto cleanup1; 253 goto cleanup1;
207 254
208 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { 255 if (order != get_order(bytes)) {
209 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 256 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
210 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 257 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
211 io_tlb_nslabs = SLABS_PER_PAGE << order; 258 io_tlb_nslabs = SLABS_PER_PAGE << order;
259 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
212 } 260 }
213 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 261 io_tlb_end = io_tlb_start + bytes;
214 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 262 memset(io_tlb_start, 0, bytes);
215 263
216 /* 264 /*
217 * Allocate and initialize the free list array. This array is used 265 * Allocate and initialize the free list array. This array is used
@@ -227,12 +275,12 @@ swiotlb_late_init_with_default_size (size_t default_size)
227 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 275 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
228 io_tlb_index = 0; 276 io_tlb_index = 0;
229 277
230 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 278 io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
231 get_order(io_tlb_nslabs * sizeof(char *))); 279 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
232 if (!io_tlb_orig_addr) 280 if (!io_tlb_orig_addr)
233 goto cleanup3; 281 goto cleanup3;
234 282
235 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 283 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
236 284
237 /* 285 /*
238 * Get the overflow emergency buffer 286 * Get the overflow emergency buffer
@@ -242,29 +290,29 @@ swiotlb_late_init_with_default_size (size_t default_size)
242 if (!io_tlb_overflow_buffer) 290 if (!io_tlb_overflow_buffer)
243 goto cleanup4; 291 goto cleanup4;
244 292
245 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " 293 swiotlb_print_info(bytes);
246 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
247 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
248 294
249 return 0; 295 return 0;
250 296
251cleanup4: 297cleanup4:
252 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 298 free_pages((unsigned long)io_tlb_orig_addr,
253 sizeof(char *))); 299 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
254 io_tlb_orig_addr = NULL; 300 io_tlb_orig_addr = NULL;
255cleanup3: 301cleanup3:
256 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 302 free_pages((unsigned long)io_tlb_list,
257 sizeof(int))); 303 get_order(io_tlb_nslabs * sizeof(int)));
258 io_tlb_list = NULL; 304 io_tlb_list = NULL;
259 io_tlb_end = NULL;
260cleanup2: 305cleanup2:
306 io_tlb_end = NULL;
261 free_pages((unsigned long)io_tlb_start, order); 307 free_pages((unsigned long)io_tlb_start, order);
262 io_tlb_start = NULL; 308 io_tlb_start = NULL;
263cleanup1: 309cleanup1:
264 io_tlb_nslabs = req_nslabs; 310 io_tlb_nslabs = req_nslabs;
265 return -ENOMEM; 311 return -ENOMEM;
266} 312}
313#endif
267 314
315#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
268static inline int 316static inline int
269address_needs_mapping(struct device *hwdev, dma_addr_t addr) 317address_needs_mapping(struct device *hwdev, dma_addr_t addr)
270{ 318{
@@ -275,11 +323,35 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
275 return (addr & ~mask) != 0; 323 return (addr & ~mask) != 0;
276} 324}
277 325
326static inline int range_needs_mapping(const void *ptr, size_t size)
327{
328 return swiotlb_force;
329}
330
331static inline int order_needs_mapping(unsigned int order)
332{
333 return 0;
334}
335#endif
336
337static void
338__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
339{
340#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
341 if (dir == DMA_TO_DEVICE)
342 memcpy(dma_addr, buffer, size);
343 else
344 memcpy(buffer, dma_addr, size);
345#else
346 __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
347#endif
348}
349
278/* 350/*
279 * Allocates bounce buffer and returns its kernel virtual address. 351 * Allocates bounce buffer and returns its kernel virtual address.
280 */ 352 */
281static void * 353static void *
282map_single(struct device *hwdev, char *buffer, size_t size, int dir) 354map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
283{ 355{
284 unsigned long flags; 356 unsigned long flags;
285 char *dma_addr; 357 char *dma_addr;
@@ -352,7 +424,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
352 */ 424 */
353 io_tlb_orig_addr[index] = buffer; 425 io_tlb_orig_addr[index] = buffer;
354 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 426 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
355 memcpy(dma_addr, buffer, size); 427 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
356 428
357 return dma_addr; 429 return dma_addr;
358} 430}
@@ -366,17 +438,18 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
366 unsigned long flags; 438 unsigned long flags;
367 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 439 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
368 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
369 char *buffer = io_tlb_orig_addr[index]; 441 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
370 442
371 /* 443 /*
372 * First, sync the memory before unmapping the entry 444 * First, sync the memory before unmapping the entry
373 */ 445 */
374 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 446 if (!swiotlb_orig_addr_null(buffer)
447 && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
375 /* 448 /*
376 * bounce... copy the data back into the original buffer * and 449 * bounce... copy the data back into the original buffer * and
377 * delete the bounce buffer. 450 * delete the bounce buffer.
378 */ 451 */
379 memcpy(buffer, dma_addr, size); 452 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
380 453
381 /* 454 /*
382 * Return the buffer to the free list by setting the corresponding 455 * Return the buffer to the free list by setting the corresponding
@@ -409,18 +482,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
409 int dir, int target) 482 int dir, int target)
410{ 483{
411 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 484 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
412 char *buffer = io_tlb_orig_addr[index]; 485 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
413 486
414 switch (target) { 487 switch (target) {
415 case SYNC_FOR_CPU: 488 case SYNC_FOR_CPU:
416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 489 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
417 memcpy(buffer, dma_addr, size); 490 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
418 else 491 else
419 BUG_ON(dir != DMA_TO_DEVICE); 492 BUG_ON(dir != DMA_TO_DEVICE);
420 break; 493 break;
421 case SYNC_FOR_DEVICE: 494 case SYNC_FOR_DEVICE:
422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 495 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
423 memcpy(dma_addr, buffer, size); 496 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
424 else 497 else
425 BUG_ON(dir != DMA_FROM_DEVICE); 498 BUG_ON(dir != DMA_FROM_DEVICE);
426 break; 499 break;
@@ -429,11 +502,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
429 } 502 }
430} 503}
431 504
505#ifdef SWIOTLB_ARCH_NEED_ALLOC
506
432void * 507void *
433swiotlb_alloc_coherent(struct device *hwdev, size_t size, 508swiotlb_alloc_coherent(struct device *hwdev, size_t size,
434 dma_addr_t *dma_handle, gfp_t flags) 509 dma_addr_t *dma_handle, gfp_t flags)
435{ 510{
436 unsigned long dev_addr; 511 dma_addr_t dev_addr;
437 void *ret; 512 void *ret;
438 int order = get_order(size); 513 int order = get_order(size);
439 514
@@ -444,8 +519,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
444 */ 519 */
445 flags |= GFP_DMA; 520 flags |= GFP_DMA;
446 521
447 ret = (void *)__get_free_pages(flags, order); 522 if (!order_needs_mapping(order))
448 if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { 523 ret = (void *)__get_free_pages(flags, order);
524 else
525 ret = NULL;
526 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
449 /* 527 /*
450 * The allocated memory isn't reachable by the device. 528 * The allocated memory isn't reachable by the device.
451 * Fall back on swiotlb_map_single(). 529 * Fall back on swiotlb_map_single().
@@ -465,22 +543,24 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
465 if (swiotlb_dma_mapping_error(handle)) 543 if (swiotlb_dma_mapping_error(handle))
466 return NULL; 544 return NULL;
467 545
468 ret = phys_to_virt(handle); 546 ret = bus_to_virt(handle);
469 } 547 }
470 548
471 memset(ret, 0, size); 549 memset(ret, 0, size);
472 dev_addr = virt_to_phys(ret); 550 dev_addr = virt_to_bus(ret);
473 551
474 /* Confirm address can be DMA'd by device */ 552 /* Confirm address can be DMA'd by device */
475 if (address_needs_mapping(hwdev, dev_addr)) { 553 if (address_needs_mapping(hwdev, dev_addr)) {
476 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", 554 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
477 (unsigned long long)*hwdev->dma_mask, dev_addr); 555 (unsigned long long)*hwdev->dma_mask,
556 (unsigned long long)dev_addr);
478 panic("swiotlb_alloc_coherent: allocated memory is out of " 557 panic("swiotlb_alloc_coherent: allocated memory is out of "
479 "range for device"); 558 "range for device");
480 } 559 }
481 *dma_handle = dev_addr; 560 *dma_handle = dev_addr;
482 return ret; 561 return ret;
483} 562}
563EXPORT_SYMBOL(swiotlb_alloc_coherent);
484 564
485void 565void
486swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 566swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -493,6 +573,9 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
493 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 573 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
494 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 574 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
495} 575}
576EXPORT_SYMBOL(swiotlb_free_coherent);
577
578#endif
496 579
497static void 580static void
498swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 581swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -504,7 +587,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
504 * When the mapping is small enough return a static buffer to limit 587 * When the mapping is small enough return a static buffer to limit
505 * the damage, or panic when the transfer is too big. 588 * the damage, or panic when the transfer is too big.
506 */ 589 */
507 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " 590 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
508 "device %s\n", size, dev ? dev->bus_id : "?"); 591 "device %s\n", size, dev ? dev->bus_id : "?");
509 592
510 if (size > io_tlb_overflow && do_panic) { 593 if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +608,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
525dma_addr_t 608dma_addr_t
526swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 609swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
527{ 610{
528 unsigned long dev_addr = virt_to_phys(ptr); 611 dma_addr_t dev_addr = virt_to_bus(ptr);
529 void *map; 612 void *map;
530 613
531 BUG_ON(dir == DMA_NONE); 614 BUG_ON(dir == DMA_NONE);
@@ -534,19 +617,20 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
534 * we can safely return the device addr and not worry about bounce 617 * we can safely return the device addr and not worry about bounce
535 * buffering it. 618 * buffering it.
536 */ 619 */
537 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 620 if (!range_needs_mapping(ptr, size)
621 && !address_needs_mapping(hwdev, dev_addr))
538 return dev_addr; 622 return dev_addr;
539 623
540 /* 624 /*
541 * Oh well, have to allocate and map a bounce buffer. 625 * Oh well, have to allocate and map a bounce buffer.
542 */ 626 */
543 map = map_single(hwdev, ptr, size, dir); 627 map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
544 if (!map) { 628 if (!map) {
545 swiotlb_full(hwdev, size, dir, 1); 629 swiotlb_full(hwdev, size, dir, 1);
546 map = io_tlb_overflow_buffer; 630 map = io_tlb_overflow_buffer;
547 } 631 }
548 632
549 dev_addr = virt_to_phys(map); 633 dev_addr = virt_to_bus(map);
550 634
551 /* 635 /*
552 * Ensure that the address returned is DMA'ble 636 * Ensure that the address returned is DMA'ble
@@ -558,25 +642,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
558} 642}
559 643
560/* 644/*
561 * Since DMA is i-cache coherent, any (complete) pages that were written via
562 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
563 * flush them when they get mapped into an executable vm-area.
564 */
565static void
566mark_clean(void *addr, size_t size)
567{
568 unsigned long pg_addr, end;
569
570 pg_addr = PAGE_ALIGN((unsigned long) addr);
571 end = (unsigned long) addr + size;
572 while (pg_addr + PAGE_SIZE <= end) {
573 struct page *page = virt_to_page(pg_addr);
574 set_bit(PG_arch_1, &page->flags);
575 pg_addr += PAGE_SIZE;
576 }
577}
578
579/*
580 * Unmap a single streaming mode DMA translation. The dma_addr and size must 645 * Unmap a single streaming mode DMA translation. The dma_addr and size must
581 * match what was provided for in a previous swiotlb_map_single call. All 646 * match what was provided for in a previous swiotlb_map_single call. All
582 * other usages are undefined. 647 * other usages are undefined.
@@ -588,13 +653,13 @@ void
588swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 653swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
589 int dir) 654 int dir)
590{ 655{
591 char *dma_addr = phys_to_virt(dev_addr); 656 char *dma_addr = bus_to_virt(dev_addr);
592 657
593 BUG_ON(dir == DMA_NONE); 658 BUG_ON(dir == DMA_NONE);
594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 659 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
595 unmap_single(hwdev, dma_addr, size, dir); 660 unmap_single(hwdev, dma_addr, size, dir);
596 else if (dir == DMA_FROM_DEVICE) 661 else if (dir == DMA_FROM_DEVICE)
597 mark_clean(dma_addr, size); 662 dma_mark_clean(dma_addr, size);
598} 663}
599 664
600/* 665/*
@@ -611,13 +676,13 @@ static inline void
611swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 676swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
612 size_t size, int dir, int target) 677 size_t size, int dir, int target)
613{ 678{
614 char *dma_addr = phys_to_virt(dev_addr); 679 char *dma_addr = bus_to_virt(dev_addr);
615 680
616 BUG_ON(dir == DMA_NONE); 681 BUG_ON(dir == DMA_NONE);
617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 682 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
618 sync_single(hwdev, dma_addr, size, dir, target); 683 sync_single(hwdev, dma_addr, size, dir, target);
619 else if (dir == DMA_FROM_DEVICE) 684 else if (dir == DMA_FROM_DEVICE)
620 mark_clean(dma_addr, size); 685 dma_mark_clean(dma_addr, size);
621} 686}
622 687
623void 688void
@@ -642,13 +707,13 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
642 unsigned long offset, size_t size, 707 unsigned long offset, size_t size,
643 int dir, int target) 708 int dir, int target)
644{ 709{
645 char *dma_addr = phys_to_virt(dev_addr) + offset; 710 char *dma_addr = bus_to_virt(dev_addr) + offset;
646 711
647 BUG_ON(dir == DMA_NONE); 712 BUG_ON(dir == DMA_NONE);
648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 713 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
649 sync_single(hwdev, dma_addr, size, dir, target); 714 sync_single(hwdev, dma_addr, size, dir, target);
650 else if (dir == DMA_FROM_DEVICE) 715 else if (dir == DMA_FROM_DEVICE)
651 mark_clean(dma_addr, size); 716 dma_mark_clean(dma_addr, size);
652} 717}
653 718
654void 719void
@@ -687,18 +752,16 @@ int
687swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, 752swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
688 int dir) 753 int dir)
689{ 754{
690 void *addr; 755 dma_addr_t dev_addr;
691 unsigned long dev_addr;
692 int i; 756 int i;
693 757
694 BUG_ON(dir == DMA_NONE); 758 BUG_ON(dir == DMA_NONE);
695 759
696 for (i = 0; i < nelems; i++, sg++) { 760 for (i = 0; i < nelems; i++, sg++) {
697 addr = SG_ENT_VIRT_ADDRESS(sg); 761 dev_addr = SG_ENT_PHYS_ADDRESS(sg);
698 dev_addr = virt_to_phys(addr); 762 if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
699 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 763 || address_needs_mapping(hwdev, dev_addr)) {
700 void *map = map_single(hwdev, addr, sg->length, dir); 764 void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
701 sg->dma_address = virt_to_bus(map);
702 if (!map) { 765 if (!map) {
703 /* Don't panic here, we expect map_sg users 766 /* Don't panic here, we expect map_sg users
704 to do proper error handling. */ 767 to do proper error handling. */
@@ -707,6 +770,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
707 sg[0].dma_length = 0; 770 sg[0].dma_length = 0;
708 return 0; 771 return 0;
709 } 772 }
773 sg->dma_address = virt_to_bus(map);
710 } else 774 } else
711 sg->dma_address = dev_addr; 775 sg->dma_address = dev_addr;
712 sg->dma_length = sg->length; 776 sg->dma_length = sg->length;
@@ -728,9 +792,10 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
728 792
729 for (i = 0; i < nelems; i++, sg++) 793 for (i = 0; i < nelems; i++, sg++)
730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 794 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
731 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); 795 unmap_single(hwdev, bus_to_virt(sg->dma_address),
796 sg->dma_length, dir);
732 else if (dir == DMA_FROM_DEVICE) 797 else if (dir == DMA_FROM_DEVICE)
733 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 798 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
734} 799}
735 800
736/* 801/*
@@ -750,8 +815,10 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
750 815
751 for (i = 0; i < nelems; i++, sg++) 816 for (i = 0; i < nelems; i++, sg++)
752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 817 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
753 sync_single(hwdev, (void *) sg->dma_address, 818 sync_single(hwdev, bus_to_virt(sg->dma_address),
754 sg->dma_length, dir, target); 819 sg->dma_length, dir, target);
820 else if (dir == DMA_FROM_DEVICE)
821 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755} 822}
756 823
757void 824void
@@ -768,10 +835,48 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
768 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 835 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
769} 836}
770 837
838#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
839
840dma_addr_t
841swiotlb_map_page(struct device *hwdev, struct page *page,
842 unsigned long offset, size_t size,
843 enum dma_data_direction direction)
844{
845 dma_addr_t dev_addr;
846 char *map;
847
848 dev_addr = page_to_bus(page) + offset;
849 if (address_needs_mapping(hwdev, dev_addr)) {
850 map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
851 if (!map) {
852 swiotlb_full(hwdev, size, direction, 1);
853 map = io_tlb_overflow_buffer;
854 }
855 dev_addr = virt_to_bus(map);
856 }
857
858 return dev_addr;
859}
860
861void
862swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
863 size_t size, enum dma_data_direction direction)
864{
865 char *dma_addr = bus_to_virt(dev_addr);
866
867 BUG_ON(direction == DMA_NONE);
868 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
869 unmap_single(hwdev, dma_addr, size, direction);
870 else if (direction == DMA_FROM_DEVICE)
871 dma_mark_clean(dma_addr, size);
872}
873
874#endif
875
771int 876int
772swiotlb_dma_mapping_error(dma_addr_t dma_addr) 877swiotlb_dma_mapping_error(dma_addr_t dma_addr)
773{ 878{
774 return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); 879 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
775} 880}
776 881
777/* 882/*
@@ -780,10 +885,13 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
780 * during bus mastering, then you would pass 0x00ffffff as the mask to 885 * during bus mastering, then you would pass 0x00ffffff as the mask to
781 * this function. 886 * this function.
782 */ 887 */
888#ifndef __swiotlb_dma_supported
889#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
890#endif
783int 891int
784swiotlb_dma_supported (struct device *hwdev, u64 mask) 892swiotlb_dma_supported(struct device *hwdev, u64 mask)
785{ 893{
786 return (virt_to_phys (io_tlb_end) - 1) <= mask; 894 return __swiotlb_dma_supported(hwdev, mask);
787} 895}
788 896
789EXPORT_SYMBOL(swiotlb_init); 897EXPORT_SYMBOL(swiotlb_init);
@@ -798,6 +906,4 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
798EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 906EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
799EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 907EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
800EXPORT_SYMBOL(swiotlb_dma_mapping_error); 908EXPORT_SYMBOL(swiotlb_dma_mapping_error);
801EXPORT_SYMBOL(swiotlb_alloc_coherent);
802EXPORT_SYMBOL(swiotlb_free_coherent);
803EXPORT_SYMBOL(swiotlb_dma_supported); 909EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 55bb2634c088..2b7c2c7dad48 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -286,7 +286,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
286 kobject_init(&p->kobj); 286 kobject_init(&p->kobj);
287 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); 287 kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
288 p->kobj.ktype = &brport_ktype; 288 p->kobj.ktype = &brport_ktype;
289 p->kobj.parent = &(dev->class_dev.kobj); 289 p->kobj.parent = &(dev->dev.kobj);
290 p->kobj.kset = NULL; 290 p->kobj.kset = NULL;
291 291
292 return p; 292 return p;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index de9d1a9473f2..ce10464716a7 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -21,18 +21,17 @@
21 21
22#include "br_private.h" 22#include "br_private.h"
23 23
24#define to_class_dev(obj) container_of(obj,struct class_device,kobj) 24#define to_dev(obj) container_of(obj, struct device, kobj)
25#define to_net_dev(class) container_of(class, struct net_device, class_dev)
26#define to_bridge(cd) ((struct net_bridge *)(to_net_dev(cd)->priv)) 25#define to_bridge(cd) ((struct net_bridge *)(to_net_dev(cd)->priv))
27 26
28/* 27/*
29 * Common code for storing bridge parameters. 28 * Common code for storing bridge parameters.
30 */ 29 */
31static ssize_t store_bridge_parm(struct class_device *cd, 30static ssize_t store_bridge_parm(struct device *d,
32 const char *buf, size_t len, 31 const char *buf, size_t len,
33 void (*set)(struct net_bridge *, unsigned long)) 32 void (*set)(struct net_bridge *, unsigned long))
34{ 33{
35 struct net_bridge *br = to_bridge(cd); 34 struct net_bridge *br = to_bridge(d);
36 char *endp; 35 char *endp;
37 unsigned long val; 36 unsigned long val;
38 37
@@ -50,9 +49,10 @@ static ssize_t store_bridge_parm(struct class_device *cd,
50} 49}
51 50
52 51
53static ssize_t show_forward_delay(struct class_device *cd, char *buf) 52static ssize_t show_forward_delay(struct device *d,
53 struct device_attribute *attr, char *buf)
54{ 54{
55 struct net_bridge *br = to_bridge(cd); 55 struct net_bridge *br = to_bridge(d);
56 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 56 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
57} 57}
58 58
@@ -64,18 +64,20 @@ static void set_forward_delay(struct net_bridge *br, unsigned long val)
64 br->bridge_forward_delay = delay; 64 br->bridge_forward_delay = delay;
65} 65}
66 66
67static ssize_t store_forward_delay(struct class_device *cd, const char *buf, 67static ssize_t store_forward_delay(struct device *d,
68 size_t len) 68 struct device_attribute *attr,
69 const char *buf, size_t len)
69{ 70{
70 return store_bridge_parm(cd, buf, len, set_forward_delay); 71 return store_bridge_parm(d, buf, len, set_forward_delay);
71} 72}
72static CLASS_DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, 73static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
73 show_forward_delay, store_forward_delay); 74 show_forward_delay, store_forward_delay);
74 75
75static ssize_t show_hello_time(struct class_device *cd, char *buf) 76static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
77 char *buf)
76{ 78{
77 return sprintf(buf, "%lu\n", 79 return sprintf(buf, "%lu\n",
78 jiffies_to_clock_t(to_bridge(cd)->hello_time)); 80 jiffies_to_clock_t(to_bridge(d)->hello_time));
79} 81}
80 82
81static void set_hello_time(struct net_bridge *br, unsigned long val) 83static void set_hello_time(struct net_bridge *br, unsigned long val)
@@ -86,19 +88,20 @@ static void set_hello_time(struct net_bridge *br, unsigned long val)
86 br->bridge_hello_time = t; 88 br->bridge_hello_time = t;
87} 89}
88 90
89static ssize_t store_hello_time(struct class_device *cd, const char *buf, 91static ssize_t store_hello_time(struct device *d,
92 struct device_attribute *attr, const char *buf,
90 size_t len) 93 size_t len)
91{ 94{
92 return store_bridge_parm(cd, buf, len, set_hello_time); 95 return store_bridge_parm(d, buf, len, set_hello_time);
93} 96}
97static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
98 store_hello_time);
94 99
95static CLASS_DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, 100static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
96 store_hello_time); 101 char *buf)
97
98static ssize_t show_max_age(struct class_device *cd, char *buf)
99{ 102{
100 return sprintf(buf, "%lu\n", 103 return sprintf(buf, "%lu\n",
101 jiffies_to_clock_t(to_bridge(cd)->max_age)); 104 jiffies_to_clock_t(to_bridge(d)->max_age));
102} 105}
103 106
104static void set_max_age(struct net_bridge *br, unsigned long val) 107static void set_max_age(struct net_bridge *br, unsigned long val)
@@ -109,18 +112,17 @@ static void set_max_age(struct net_bridge *br, unsigned long val)
109 br->bridge_max_age = t; 112 br->bridge_max_age = t;
110} 113}
111 114
112static ssize_t store_max_age(struct class_device *cd, const char *buf, 115static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
113 size_t len) 116 const char *buf, size_t len)
114{ 117{
115 return store_bridge_parm(cd, buf, len, set_max_age); 118 return store_bridge_parm(d, buf, len, set_max_age);
116} 119}
120static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
117 121
118static CLASS_DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, 122static ssize_t show_ageing_time(struct device *d,
119 store_max_age); 123 struct device_attribute *attr, char *buf)
120
121static ssize_t show_ageing_time(struct class_device *cd, char *buf)
122{ 124{
123 struct net_bridge *br = to_bridge(cd); 125 struct net_bridge *br = to_bridge(d);
124 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); 126 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
125} 127}
126 128
@@ -129,17 +131,19 @@ static void set_ageing_time(struct net_bridge *br, unsigned long val)
129 br->ageing_time = clock_t_to_jiffies(val); 131 br->ageing_time = clock_t_to_jiffies(val);
130} 132}
131 133
132static ssize_t store_ageing_time(struct class_device *cd, const char *buf, 134static ssize_t store_ageing_time(struct device *d,
133 size_t len) 135 struct device_attribute *attr,
136 const char *buf, size_t len)
134{ 137{
135 return store_bridge_parm(cd, buf, len, set_ageing_time); 138 return store_bridge_parm(d, buf, len, set_ageing_time);
136} 139}
140static DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time,
141 store_ageing_time);
137 142
138static CLASS_DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time, 143static ssize_t show_stp_state(struct device *d,
139 store_ageing_time); 144 struct device_attribute *attr, char *buf)
140static ssize_t show_stp_state(struct class_device *cd, char *buf)
141{ 145{
142 struct net_bridge *br = to_bridge(cd); 146 struct net_bridge *br = to_bridge(d);
143 return sprintf(buf, "%d\n", br->stp_enabled); 147 return sprintf(buf, "%d\n", br->stp_enabled);
144} 148}
145 149
@@ -148,18 +152,19 @@ static void set_stp_state(struct net_bridge *br, unsigned long val)
148 br->stp_enabled = val; 152 br->stp_enabled = val;
149} 153}
150 154
151static ssize_t store_stp_state(struct class_device *cd, 155static ssize_t store_stp_state(struct device *d,
152 const char *buf, size_t len) 156 struct device_attribute *attr, const char *buf,
157 size_t len)
153{ 158{
154 return store_bridge_parm(cd, buf, len, set_stp_state); 159 return store_bridge_parm(d, buf, len, set_stp_state);
155} 160}
161static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
162 store_stp_state);
156 163
157static CLASS_DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state, 164static ssize_t show_priority(struct device *d, struct device_attribute *attr,
158 store_stp_state); 165 char *buf)
159
160static ssize_t show_priority(struct class_device *cd, char *buf)
161{ 166{
162 struct net_bridge *br = to_bridge(cd); 167 struct net_bridge *br = to_bridge(d);
163 return sprintf(buf, "%d\n", 168 return sprintf(buf, "%d\n",
164 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); 169 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
165} 170}
@@ -169,92 +174,107 @@ static void set_priority(struct net_bridge *br, unsigned long val)
169 br_stp_set_bridge_priority(br, (u16) val); 174 br_stp_set_bridge_priority(br, (u16) val);
170} 175}
171 176
172static ssize_t store_priority(struct class_device *cd, 177static ssize_t store_priority(struct device *d, struct device_attribute *attr,
173 const char *buf, size_t len) 178 const char *buf, size_t len)
174{ 179{
175 return store_bridge_parm(cd, buf, len, set_priority); 180 return store_bridge_parm(d, buf, len, set_priority);
176} 181}
177static CLASS_DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, 182static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, store_priority);
178 store_priority);
179 183
180static ssize_t show_root_id(struct class_device *cd, char *buf) 184static ssize_t show_root_id(struct device *d, struct device_attribute *attr,
185 char *buf)
181{ 186{
182 return br_show_bridge_id(buf, &to_bridge(cd)->designated_root); 187 return br_show_bridge_id(buf, &to_bridge(d)->designated_root);
183} 188}
184static CLASS_DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL); 189static DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL);
185 190
186static ssize_t show_bridge_id(struct class_device *cd, char *buf) 191static ssize_t show_bridge_id(struct device *d, struct device_attribute *attr,
192 char *buf)
187{ 193{
188 return br_show_bridge_id(buf, &to_bridge(cd)->bridge_id); 194 return br_show_bridge_id(buf, &to_bridge(d)->bridge_id);
189} 195}
190static CLASS_DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL); 196static DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL);
191 197
192static ssize_t show_root_port(struct class_device *cd, char *buf) 198static ssize_t show_root_port(struct device *d, struct device_attribute *attr,
199 char *buf)
193{ 200{
194 return sprintf(buf, "%d\n", to_bridge(cd)->root_port); 201 return sprintf(buf, "%d\n", to_bridge(d)->root_port);
195} 202}
196static CLASS_DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL); 203static DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL);
197 204
198static ssize_t show_root_path_cost(struct class_device *cd, char *buf) 205static ssize_t show_root_path_cost(struct device *d,
206 struct device_attribute *attr, char *buf)
199{ 207{
200 return sprintf(buf, "%d\n", to_bridge(cd)->root_path_cost); 208 return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost);
201} 209}
202static CLASS_DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL); 210static DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL);
203 211
204static ssize_t show_topology_change(struct class_device *cd, char *buf) 212static ssize_t show_topology_change(struct device *d,
213 struct device_attribute *attr, char *buf)
205{ 214{
206 return sprintf(buf, "%d\n", to_bridge(cd)->topology_change); 215 return sprintf(buf, "%d\n", to_bridge(d)->topology_change);
207} 216}
208static CLASS_DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL); 217static DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL);
209 218
210static ssize_t show_topology_change_detected(struct class_device *cd, char *buf) 219static ssize_t show_topology_change_detected(struct device *d,
220 struct device_attribute *attr,
221 char *buf)
211{ 222{
212 struct net_bridge *br = to_bridge(cd); 223 struct net_bridge *br = to_bridge(d);
213 return sprintf(buf, "%d\n", br->topology_change_detected); 224 return sprintf(buf, "%d\n", br->topology_change_detected);
214} 225}
215static CLASS_DEVICE_ATTR(topology_change_detected, S_IRUGO, show_topology_change_detected, NULL); 226static DEVICE_ATTR(topology_change_detected, S_IRUGO,
227 show_topology_change_detected, NULL);
216 228
217static ssize_t show_hello_timer(struct class_device *cd, char *buf) 229static ssize_t show_hello_timer(struct device *d,
230 struct device_attribute *attr, char *buf)
218{ 231{
219 struct net_bridge *br = to_bridge(cd); 232 struct net_bridge *br = to_bridge(d);
220 return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer)); 233 return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer));
221} 234}
222static CLASS_DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL); 235static DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL);
223 236
224static ssize_t show_tcn_timer(struct class_device *cd, char *buf) 237static ssize_t show_tcn_timer(struct device *d, struct device_attribute *attr,
238 char *buf)
225{ 239{
226 struct net_bridge *br = to_bridge(cd); 240 struct net_bridge *br = to_bridge(d);
227 return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer)); 241 return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer));
228} 242}
229static CLASS_DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL); 243static DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL);
230 244
231static ssize_t show_topology_change_timer(struct class_device *cd, char *buf) 245static ssize_t show_topology_change_timer(struct device *d,
246 struct device_attribute *attr,
247 char *buf)
232{ 248{
233 struct net_bridge *br = to_bridge(cd); 249 struct net_bridge *br = to_bridge(d);
234 return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer)); 250 return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer));
235} 251}
236static CLASS_DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer, NULL); 252static DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer,
253 NULL);
237 254
238static ssize_t show_gc_timer(struct class_device *cd, char *buf) 255static ssize_t show_gc_timer(struct device *d, struct device_attribute *attr,
256 char *buf)
239{ 257{
240 struct net_bridge *br = to_bridge(cd); 258 struct net_bridge *br = to_bridge(d);
241 return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer)); 259 return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer));
242} 260}
243static CLASS_DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL); 261static DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL);
244 262
245static ssize_t show_group_addr(struct class_device *cd, char *buf) 263static ssize_t show_group_addr(struct device *d,
264 struct device_attribute *attr, char *buf)
246{ 265{
247 struct net_bridge *br = to_bridge(cd); 266 struct net_bridge *br = to_bridge(d);
248 return sprintf(buf, "%x:%x:%x:%x:%x:%x\n", 267 return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
249 br->group_addr[0], br->group_addr[1], 268 br->group_addr[0], br->group_addr[1],
250 br->group_addr[2], br->group_addr[3], 269 br->group_addr[2], br->group_addr[3],
251 br->group_addr[4], br->group_addr[5]); 270 br->group_addr[4], br->group_addr[5]);
252} 271}
253 272
254static ssize_t store_group_addr(struct class_device *cd, const char *buf, 273static ssize_t store_group_addr(struct device *d,
255 size_t len) 274 struct device_attribute *attr,
275 const char *buf, size_t len)
256{ 276{
257 struct net_bridge *br = to_bridge(cd); 277 struct net_bridge *br = to_bridge(d);
258 unsigned new_addr[6]; 278 unsigned new_addr[6];
259 int i; 279 int i;
260 280
@@ -286,28 +306,28 @@ static ssize_t store_group_addr(struct class_device *cd, const char *buf,
286 return len; 306 return len;
287} 307}
288 308
289static CLASS_DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR, 309static DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR,
290 show_group_addr, store_group_addr); 310 show_group_addr, store_group_addr);
291 311
292 312
293static struct attribute *bridge_attrs[] = { 313static struct attribute *bridge_attrs[] = {
294 &class_device_attr_forward_delay.attr, 314 &dev_attr_forward_delay.attr,
295 &class_device_attr_hello_time.attr, 315 &dev_attr_hello_time.attr,
296 &class_device_attr_max_age.attr, 316 &dev_attr_max_age.attr,
297 &class_device_attr_ageing_time.attr, 317 &dev_attr_ageing_time.attr,
298 &class_device_attr_stp_state.attr, 318 &dev_attr_stp_state.attr,
299 &class_device_attr_priority.attr, 319 &dev_attr_priority.attr,
300 &class_device_attr_bridge_id.attr, 320 &dev_attr_bridge_id.attr,
301 &class_device_attr_root_id.attr, 321 &dev_attr_root_id.attr,
302 &class_device_attr_root_path_cost.attr, 322 &dev_attr_root_path_cost.attr,
303 &class_device_attr_root_port.attr, 323 &dev_attr_root_port.attr,
304 &class_device_attr_topology_change.attr, 324 &dev_attr_topology_change.attr,
305 &class_device_attr_topology_change_detected.attr, 325 &dev_attr_topology_change_detected.attr,
306 &class_device_attr_hello_timer.attr, 326 &dev_attr_hello_timer.attr,
307 &class_device_attr_tcn_timer.attr, 327 &dev_attr_tcn_timer.attr,
308 &class_device_attr_topology_change_timer.attr, 328 &dev_attr_topology_change_timer.attr,
309 &class_device_attr_gc_timer.attr, 329 &dev_attr_gc_timer.attr,
310 &class_device_attr_group_addr.attr, 330 &dev_attr_group_addr.attr,
311 NULL 331 NULL
312}; 332};
313 333
@@ -325,8 +345,8 @@ static struct attribute_group bridge_group = {
325static ssize_t brforward_read(struct kobject *kobj, char *buf, 345static ssize_t brforward_read(struct kobject *kobj, char *buf,
326 loff_t off, size_t count) 346 loff_t off, size_t count)
327{ 347{
328 struct class_device *cdev = to_class_dev(kobj); 348 struct device *dev = to_dev(kobj);
329 struct net_bridge *br = to_bridge(cdev); 349 struct net_bridge *br = to_bridge(dev);
330 int n; 350 int n;
331 351
332 /* must read whole records */ 352 /* must read whole records */
@@ -363,7 +383,7 @@ static struct bin_attribute bridge_forward = {
363 */ 383 */
364int br_sysfs_addbr(struct net_device *dev) 384int br_sysfs_addbr(struct net_device *dev)
365{ 385{
366 struct kobject *brobj = &dev->class_dev.kobj; 386 struct kobject *brobj = &dev->dev.kobj;
367 struct net_bridge *br = netdev_priv(dev); 387 struct net_bridge *br = netdev_priv(dev);
368 int err; 388 int err;
369 389
@@ -395,9 +415,9 @@ int br_sysfs_addbr(struct net_device *dev)
395 } 415 }
396 return 0; 416 return 0;
397 out3: 417 out3:
398 sysfs_remove_bin_file(&dev->class_dev.kobj, &bridge_forward); 418 sysfs_remove_bin_file(&dev->dev.kobj, &bridge_forward);
399 out2: 419 out2:
400 sysfs_remove_group(&dev->class_dev.kobj, &bridge_group); 420 sysfs_remove_group(&dev->dev.kobj, &bridge_group);
401 out1: 421 out1:
402 return err; 422 return err;
403 423
@@ -405,7 +425,7 @@ int br_sysfs_addbr(struct net_device *dev)
405 425
406void br_sysfs_delbr(struct net_device *dev) 426void br_sysfs_delbr(struct net_device *dev)
407{ 427{
408 struct kobject *kobj = &dev->class_dev.kobj; 428 struct kobject *kobj = &dev->dev.kobj;
409 struct net_bridge *br = netdev_priv(dev); 429 struct net_bridge *br = netdev_priv(dev);
410 430
411 kobject_unregister(&br->ifobj); 431 kobject_unregister(&br->ifobj);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index c51c9e42aeb3..0bc2aef8f9f3 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -211,7 +211,7 @@ int br_sysfs_addif(struct net_bridge_port *p)
211 struct brport_attribute **a; 211 struct brport_attribute **a;
212 int err; 212 int err;
213 213
214 err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj, 214 err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
215 SYSFS_BRIDGE_PORT_LINK); 215 SYSFS_BRIDGE_PORT_LINK);
216 if (err) 216 if (err)
217 goto out2; 217 goto out2;
diff --git a/net/core/dev.c b/net/core/dev.c
index e660cb57e42a..455d589683e8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -751,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname)
751 else 751 else
752 strlcpy(dev->name, newname, IFNAMSIZ); 752 strlcpy(dev->name, newname, IFNAMSIZ);
753 753
754 err = class_device_rename(&dev->class_dev, dev->name); 754 err = device_rename(&dev->dev, dev->name);
755 if (!err) { 755 if (!err) {
756 hlist_del(&dev->name_hlist); 756 hlist_del(&dev->name_hlist);
757 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); 757 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
@@ -3221,8 +3221,8 @@ void free_netdev(struct net_device *dev)
3221 BUG_ON(dev->reg_state != NETREG_UNREGISTERED); 3221 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3222 dev->reg_state = NETREG_RELEASED; 3222 dev->reg_state = NETREG_RELEASED;
3223 3223
3224 /* will free via class release */ 3224 /* will free via device release */
3225 class_device_put(&dev->class_dev); 3225 put_device(&dev->dev);
3226#else 3226#else
3227 kfree((char *)dev - dev->padded); 3227 kfree((char *)dev - dev->padded);
3228#endif 3228#endif
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f47f319bb7dc..44db095a8f7e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -18,9 +18,6 @@
18#include <linux/wireless.h> 18#include <linux/wireless.h>
19#include <net/iw_handler.h> 19#include <net/iw_handler.h>
20 20
21#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
22#define to_net_dev(class) container_of(class, struct net_device, class_dev)
23
24static const char fmt_hex[] = "%#x\n"; 21static const char fmt_hex[] = "%#x\n";
25static const char fmt_long_hex[] = "%#lx\n"; 22static const char fmt_long_hex[] = "%#lx\n";
26static const char fmt_dec[] = "%d\n"; 23static const char fmt_dec[] = "%d\n";
@@ -32,10 +29,11 @@ static inline int dev_isalive(const struct net_device *dev)
32} 29}
33 30
34/* use same locking rules as GIF* ioctl's */ 31/* use same locking rules as GIF* ioctl's */
35static ssize_t netdev_show(const struct class_device *cd, char *buf, 32static ssize_t netdev_show(const struct device *dev,
33 struct device_attribute *attr, char *buf,
36 ssize_t (*format)(const struct net_device *, char *)) 34 ssize_t (*format)(const struct net_device *, char *))
37{ 35{
38 struct net_device *net = to_net_dev(cd); 36 struct net_device *net = to_net_dev(dev);
39 ssize_t ret = -EINVAL; 37 ssize_t ret = -EINVAL;
40 38
41 read_lock(&dev_base_lock); 39 read_lock(&dev_base_lock);
@@ -52,14 +50,15 @@ static ssize_t format_##field(const struct net_device *net, char *buf) \
52{ \ 50{ \
53 return sprintf(buf, format_string, net->field); \ 51 return sprintf(buf, format_string, net->field); \
54} \ 52} \
55static ssize_t show_##field(struct class_device *cd, char *buf) \ 53static ssize_t show_##field(struct device *dev, \
54 struct device_attribute *attr, char *buf) \
56{ \ 55{ \
57 return netdev_show(cd, buf, format_##field); \ 56 return netdev_show(dev, attr, buf, format_##field); \
58} 57}
59 58
60 59
61/* use same locking and permission rules as SIF* ioctl's */ 60/* use same locking and permission rules as SIF* ioctl's */
62static ssize_t netdev_store(struct class_device *dev, 61static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
63 const char *buf, size_t len, 62 const char *buf, size_t len,
64 int (*set)(struct net_device *, unsigned long)) 63 int (*set)(struct net_device *, unsigned long))
65{ 64{
@@ -104,7 +103,8 @@ static ssize_t format_addr(char *buf, const unsigned char *addr, int len)
104 return cp - buf; 103 return cp - buf;
105} 104}
106 105
107static ssize_t show_address(struct class_device *dev, char *buf) 106static ssize_t show_address(struct device *dev, struct device_attribute *attr,
107 char *buf)
108{ 108{
109 struct net_device *net = to_net_dev(dev); 109 struct net_device *net = to_net_dev(dev);
110 ssize_t ret = -EINVAL; 110 ssize_t ret = -EINVAL;
@@ -116,7 +116,8 @@ static ssize_t show_address(struct class_device *dev, char *buf)
116 return ret; 116 return ret;
117} 117}
118 118
119static ssize_t show_broadcast(struct class_device *dev, char *buf) 119static ssize_t show_broadcast(struct device *dev,
120 struct device_attribute *attr, char *buf)
120{ 121{
121 struct net_device *net = to_net_dev(dev); 122 struct net_device *net = to_net_dev(dev);
122 if (dev_isalive(net)) 123 if (dev_isalive(net))
@@ -124,7 +125,8 @@ static ssize_t show_broadcast(struct class_device *dev, char *buf)
124 return -EINVAL; 125 return -EINVAL;
125} 126}
126 127
127static ssize_t show_carrier(struct class_device *dev, char *buf) 128static ssize_t show_carrier(struct device *dev,
129 struct device_attribute *attr, char *buf)
128{ 130{
129 struct net_device *netdev = to_net_dev(dev); 131 struct net_device *netdev = to_net_dev(dev);
130 if (netif_running(netdev)) { 132 if (netif_running(netdev)) {
@@ -133,7 +135,8 @@ static ssize_t show_carrier(struct class_device *dev, char *buf)
133 return -EINVAL; 135 return -EINVAL;
134} 136}
135 137
136static ssize_t show_dormant(struct class_device *dev, char *buf) 138static ssize_t show_dormant(struct device *dev,
139 struct device_attribute *attr, char *buf)
137{ 140{
138 struct net_device *netdev = to_net_dev(dev); 141 struct net_device *netdev = to_net_dev(dev);
139 142
@@ -153,7 +156,8 @@ static const char *operstates[] = {
153 "up" 156 "up"
154}; 157};
155 158
156static ssize_t show_operstate(struct class_device *dev, char *buf) 159static ssize_t show_operstate(struct device *dev,
160 struct device_attribute *attr, char *buf)
157{ 161{
158 const struct net_device *netdev = to_net_dev(dev); 162 const struct net_device *netdev = to_net_dev(dev);
159 unsigned char operstate; 163 unsigned char operstate;
@@ -178,9 +182,10 @@ static int change_mtu(struct net_device *net, unsigned long new_mtu)
178 return dev_set_mtu(net, (int) new_mtu); 182 return dev_set_mtu(net, (int) new_mtu);
179} 183}
180 184
181static ssize_t store_mtu(struct class_device *dev, const char *buf, size_t len) 185static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
186 const char *buf, size_t len)
182{ 187{
183 return netdev_store(dev, buf, len, change_mtu); 188 return netdev_store(dev, attr, buf, len, change_mtu);
184} 189}
185 190
186NETDEVICE_SHOW(flags, fmt_hex); 191NETDEVICE_SHOW(flags, fmt_hex);
@@ -190,9 +195,10 @@ static int change_flags(struct net_device *net, unsigned long new_flags)
190 return dev_change_flags(net, (unsigned) new_flags); 195 return dev_change_flags(net, (unsigned) new_flags);
191} 196}
192 197
193static ssize_t store_flags(struct class_device *dev, const char *buf, size_t len) 198static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
199 const char *buf, size_t len)
194{ 200{
195 return netdev_store(dev, buf, len, change_flags); 201 return netdev_store(dev, attr, buf, len, change_flags);
196} 202}
197 203
198NETDEVICE_SHOW(tx_queue_len, fmt_ulong); 204NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
@@ -203,9 +209,11 @@ static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
203 return 0; 209 return 0;
204} 210}
205 211
206static ssize_t store_tx_queue_len(struct class_device *dev, const char *buf, size_t len) 212static ssize_t store_tx_queue_len(struct device *dev,
213 struct device_attribute *attr,
214 const char *buf, size_t len)
207{ 215{
208 return netdev_store(dev, buf, len, change_tx_queue_len); 216 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
209} 217}
210 218
211NETDEVICE_SHOW(weight, fmt_dec); 219NETDEVICE_SHOW(weight, fmt_dec);
@@ -216,12 +224,13 @@ static int change_weight(struct net_device *net, unsigned long new_weight)
216 return 0; 224 return 0;
217} 225}
218 226
219static ssize_t store_weight(struct class_device *dev, const char *buf, size_t len) 227static ssize_t store_weight(struct device *dev, struct device_attribute *attr,
228 const char *buf, size_t len)
220{ 229{
221 return netdev_store(dev, buf, len, change_weight); 230 return netdev_store(dev, attr, buf, len, change_weight);
222} 231}
223 232
224static struct class_device_attribute net_class_attributes[] = { 233static struct device_attribute net_class_attributes[] = {
225 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 234 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
226 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 235 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
227 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 236 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
@@ -242,10 +251,11 @@ static struct class_device_attribute net_class_attributes[] = {
242}; 251};
243 252
244/* Show a given an attribute in the statistics group */ 253/* Show a given an attribute in the statistics group */
245static ssize_t netstat_show(const struct class_device *cd, char *buf, 254static ssize_t netstat_show(const struct device *d,
255 struct device_attribute *attr, char *buf,
246 unsigned long offset) 256 unsigned long offset)
247{ 257{
248 struct net_device *dev = to_net_dev(cd); 258 struct net_device *dev = to_net_dev(d);
249 struct net_device_stats *stats; 259 struct net_device_stats *stats;
250 ssize_t ret = -EINVAL; 260 ssize_t ret = -EINVAL;
251 261
@@ -265,12 +275,13 @@ static ssize_t netstat_show(const struct class_device *cd, char *buf,
265 275
266/* generate a read-only statistics attribute */ 276/* generate a read-only statistics attribute */
267#define NETSTAT_ENTRY(name) \ 277#define NETSTAT_ENTRY(name) \
268static ssize_t show_##name(struct class_device *cd, char *buf) \ 278static ssize_t show_##name(struct device *d, \
279 struct device_attribute *attr, char *buf) \
269{ \ 280{ \
270 return netstat_show(cd, buf, \ 281 return netstat_show(d, attr, buf, \
271 offsetof(struct net_device_stats, name)); \ 282 offsetof(struct net_device_stats, name)); \
272} \ 283} \
273static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 284static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
274 285
275NETSTAT_ENTRY(rx_packets); 286NETSTAT_ENTRY(rx_packets);
276NETSTAT_ENTRY(tx_packets); 287NETSTAT_ENTRY(tx_packets);
@@ -297,29 +308,29 @@ NETSTAT_ENTRY(rx_compressed);
297NETSTAT_ENTRY(tx_compressed); 308NETSTAT_ENTRY(tx_compressed);
298 309
299static struct attribute *netstat_attrs[] = { 310static struct attribute *netstat_attrs[] = {
300 &class_device_attr_rx_packets.attr, 311 &dev_attr_rx_packets.attr,
301 &class_device_attr_tx_packets.attr, 312 &dev_attr_tx_packets.attr,
302 &class_device_attr_rx_bytes.attr, 313 &dev_attr_rx_bytes.attr,
303 &class_device_attr_tx_bytes.attr, 314 &dev_attr_tx_bytes.attr,
304 &class_device_attr_rx_errors.attr, 315 &dev_attr_rx_errors.attr,
305 &class_device_attr_tx_errors.attr, 316 &dev_attr_tx_errors.attr,
306 &class_device_attr_rx_dropped.attr, 317 &dev_attr_rx_dropped.attr,
307 &class_device_attr_tx_dropped.attr, 318 &dev_attr_tx_dropped.attr,
308 &class_device_attr_multicast.attr, 319 &dev_attr_multicast.attr,
309 &class_device_attr_collisions.attr, 320 &dev_attr_collisions.attr,
310 &class_device_attr_rx_length_errors.attr, 321 &dev_attr_rx_length_errors.attr,
311 &class_device_attr_rx_over_errors.attr, 322 &dev_attr_rx_over_errors.attr,
312 &class_device_attr_rx_crc_errors.attr, 323 &dev_attr_rx_crc_errors.attr,
313 &class_device_attr_rx_frame_errors.attr, 324 &dev_attr_rx_frame_errors.attr,
314 &class_device_attr_rx_fifo_errors.attr, 325 &dev_attr_rx_fifo_errors.attr,
315 &class_device_attr_rx_missed_errors.attr, 326 &dev_attr_rx_missed_errors.attr,
316 &class_device_attr_tx_aborted_errors.attr, 327 &dev_attr_tx_aborted_errors.attr,
317 &class_device_attr_tx_carrier_errors.attr, 328 &dev_attr_tx_carrier_errors.attr,
318 &class_device_attr_tx_fifo_errors.attr, 329 &dev_attr_tx_fifo_errors.attr,
319 &class_device_attr_tx_heartbeat_errors.attr, 330 &dev_attr_tx_heartbeat_errors.attr,
320 &class_device_attr_tx_window_errors.attr, 331 &dev_attr_tx_window_errors.attr,
321 &class_device_attr_rx_compressed.attr, 332 &dev_attr_rx_compressed.attr,
322 &class_device_attr_tx_compressed.attr, 333 &dev_attr_tx_compressed.attr,
323 NULL 334 NULL
324}; 335};
325 336
@@ -331,11 +342,11 @@ static struct attribute_group netstat_group = {
331 342
332#ifdef WIRELESS_EXT 343#ifdef WIRELESS_EXT
333/* helper function that does all the locking etc for wireless stats */ 344/* helper function that does all the locking etc for wireless stats */
334static ssize_t wireless_show(struct class_device *cd, char *buf, 345static ssize_t wireless_show(struct device *d, char *buf,
335 ssize_t (*format)(const struct iw_statistics *, 346 ssize_t (*format)(const struct iw_statistics *,
336 char *)) 347 char *))
337{ 348{
338 struct net_device *dev = to_net_dev(cd); 349 struct net_device *dev = to_net_dev(d);
339 const struct iw_statistics *iw = NULL; 350 const struct iw_statistics *iw = NULL;
340 ssize_t ret = -EINVAL; 351 ssize_t ret = -EINVAL;
341 352
@@ -358,11 +369,12 @@ static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
358{ \ 369{ \
359 return sprintf(buf, format_string, iw->field); \ 370 return sprintf(buf, format_string, iw->field); \
360} \ 371} \
361static ssize_t show_iw_##name(struct class_device *cd, char *buf) \ 372static ssize_t show_iw_##name(struct device *d, \
373 struct device_attribute *attr, char *buf) \
362{ \ 374{ \
363 return wireless_show(cd, buf, format_iw_##name); \ 375 return wireless_show(d, buf, format_iw_##name); \
364} \ 376} \
365static CLASS_DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL) 377static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
366 378
367WIRELESS_SHOW(status, status, fmt_hex); 379WIRELESS_SHOW(status, status, fmt_hex);
368WIRELESS_SHOW(link, qual.qual, fmt_dec); 380WIRELESS_SHOW(link, qual.qual, fmt_dec);
@@ -376,16 +388,16 @@ WIRELESS_SHOW(retries, discard.retries, fmt_dec);
376WIRELESS_SHOW(beacon, miss.beacon, fmt_dec); 388WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
377 389
378static struct attribute *wireless_attrs[] = { 390static struct attribute *wireless_attrs[] = {
379 &class_device_attr_status.attr, 391 &dev_attr_status.attr,
380 &class_device_attr_link.attr, 392 &dev_attr_link.attr,
381 &class_device_attr_level.attr, 393 &dev_attr_level.attr,
382 &class_device_attr_noise.attr, 394 &dev_attr_noise.attr,
383 &class_device_attr_nwid.attr, 395 &dev_attr_nwid.attr,
384 &class_device_attr_crypt.attr, 396 &dev_attr_crypt.attr,
385 &class_device_attr_fragment.attr, 397 &dev_attr_fragment.attr,
386 &class_device_attr_retries.attr, 398 &dev_attr_retries.attr,
387 &class_device_attr_misc.attr, 399 &dev_attr_misc.attr,
388 &class_device_attr_beacon.attr, 400 &dev_attr_beacon.attr,
389 NULL 401 NULL
390}; 402};
391 403
@@ -396,10 +408,10 @@ static struct attribute_group wireless_group = {
396#endif 408#endif
397 409
398#ifdef CONFIG_HOTPLUG 410#ifdef CONFIG_HOTPLUG
399static int netdev_uevent(struct class_device *cd, char **envp, 411static int netdev_uevent(struct device *d, char **envp,
400 int num_envp, char *buf, int size) 412 int num_envp, char *buf, int size)
401{ 413{
402 struct net_device *dev = to_net_dev(cd); 414 struct net_device *dev = to_net_dev(d);
403 int i = 0; 415 int i = 0;
404 int n; 416 int n;
405 417
@@ -419,12 +431,11 @@ static int netdev_uevent(struct class_device *cd, char **envp,
419 431
420/* 432/*
421 * netdev_release -- destroy and free a dead device. 433 * netdev_release -- destroy and free a dead device.
422 * Called when last reference to class_device kobject is gone. 434 * Called when last reference to device kobject is gone.
423 */ 435 */
424static void netdev_release(struct class_device *cd) 436static void netdev_release(struct device *d)
425{ 437{
426 struct net_device *dev 438 struct net_device *dev = to_net_dev(d);
427 = container_of(cd, struct net_device, class_dev);
428 439
429 BUG_ON(dev->reg_state != NETREG_RELEASED); 440 BUG_ON(dev->reg_state != NETREG_RELEASED);
430 441
@@ -433,31 +444,31 @@ static void netdev_release(struct class_device *cd)
433 444
434static struct class net_class = { 445static struct class net_class = {
435 .name = "net", 446 .name = "net",
436 .release = netdev_release, 447 .dev_release = netdev_release,
437 .class_dev_attrs = net_class_attributes, 448 .dev_attrs = net_class_attributes,
438#ifdef CONFIG_HOTPLUG 449#ifdef CONFIG_HOTPLUG
439 .uevent = netdev_uevent, 450 .dev_uevent = netdev_uevent,
440#endif 451#endif
441}; 452};
442 453
443void netdev_unregister_sysfs(struct net_device * net) 454void netdev_unregister_sysfs(struct net_device * net)
444{ 455{
445 class_device_del(&(net->class_dev)); 456 device_del(&(net->dev));
446} 457}
447 458
448/* Create sysfs entries for network device. */ 459/* Create sysfs entries for network device. */
449int netdev_register_sysfs(struct net_device *net) 460int netdev_register_sysfs(struct net_device *net)
450{ 461{
451 struct class_device *class_dev = &(net->class_dev); 462 struct device *dev = &(net->dev);
452 struct attribute_group **groups = net->sysfs_groups; 463 struct attribute_group **groups = net->sysfs_groups;
453 464
454 class_device_initialize(class_dev); 465 device_initialize(dev);
455 class_dev->class = &net_class; 466 dev->class = &net_class;
456 class_dev->class_data = net; 467 dev->platform_data = net;
457 class_dev->groups = groups; 468 dev->groups = groups;
458 469
459 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ); 470 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
460 strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE); 471 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE);
461 472
462 if (net->get_stats) 473 if (net->get_stats)
463 *groups++ = &netstat_group; 474 *groups++ = &netstat_group;
@@ -467,7 +478,7 @@ int netdev_register_sysfs(struct net_device *net)
467 *groups++ = &wireless_group; 478 *groups++ = &wireless_group;
468#endif 479#endif
469 480
470 return class_device_add(class_dev); 481 return device_add(dev);
471} 482}
472 483
473int netdev_sysfs_init(void) 484int netdev_sysfs_init(void)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index de7801d589e7..f3404ae9f190 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -268,7 +268,7 @@ nodata:
268struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 268struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
269 unsigned int length, gfp_t gfp_mask) 269 unsigned int length, gfp_t gfp_mask)
270{ 270{
271 int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; 271 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
272 struct sk_buff *skb; 272 struct sk_buff *skb;
273 273
274 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 274 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index fa2f7da606a9..fb58e03b3fbd 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -265,6 +265,12 @@ ieee80211softmac_wx_get_rate(struct net_device *net_dev,
265 int err = -EINVAL; 265 int err = -EINVAL;
266 266
267 spin_lock_irqsave(&mac->lock, flags); 267 spin_lock_irqsave(&mac->lock, flags);
268
269 if (unlikely(!mac->running)) {
270 err = -ENODEV;
271 goto out_unlock;
272 }
273
268 switch (mac->txrates.default_rate) { 274 switch (mac->txrates.default_rate) {
269 case IEEE80211_CCK_RATE_1MB: 275 case IEEE80211_CCK_RATE_1MB:
270 data->bitrate.value = 1000000; 276 data->bitrate.value = 1000000;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index f01f8c072852..a1880e854dce 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -1,7 +1,7 @@
1#### 1####
2# kbuild: Generic definitions 2# kbuild: Generic definitions
3 3
4# Convinient variables 4# Convenient constants
5comma := , 5comma := ,
6squote := ' 6squote := '
7empty := 7empty :=
@@ -56,76 +56,87 @@ endef
56# gcc support functions 56# gcc support functions
57# See documentation in Documentation/kbuild/makefiles.txt 57# See documentation in Documentation/kbuild/makefiles.txt
58 58
59# output directory for tests below 59# checker-shell
60TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) 60# Usage: option = $(call checker-shell,$(CC)...-o $$OUT,option-ok,otherwise)
61# Exit code chooses option. $$OUT is safe location for needless output.
62define checker-shell
63$(shell set -e; \
64 DIR=$(KBUILD_EXTMOD); \
65 cd $${DIR:-$(objtree)}; \
66 OUT=$$PWD/.$$$$.null; \
67 if $(1) >/dev/null 2>&1; \
68 then echo "$(2)"; \
69 else echo "$(3)"; \
70 fi; \
71 rm -f $$OUT)
72endef
61 73
62# as-option 74# as-option
63# Usage: cflags-y += $(call as-option, -Wa$(comma)-isa=foo,) 75# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
64 76as-option = $(call checker-shell,\
65as-option = $(shell if $(CC) $(CFLAGS) $(1) -Wa,-Z -c -o /dev/null \ 77 $(CC) $(CFLAGS) $(1) -c -xassembler /dev/null -o $$OUT,$(1),$(2))
66 -xassembler /dev/null > /dev/null 2>&1; then echo "$(1)"; \
67 else echo "$(2)"; fi ;)
68 78
69# as-instr 79# as-instr
70# Usage: cflags-y += $(call as-instr, instr, option1, option2) 80# Usage: cflags-y += $(call as-instr,instr,option1,option2)
71 81as-instr = $(call checker-shell,\
72as-instr = $(shell if echo -e "$(1)" | \ 82 printf "$(1)" | $(CC) $(AFLAGS) -c -xassembler -o $$OUT -,$(2),$(3))
73 $(CC) $(AFLAGS) -c -xassembler - \
74 -o $(TMPOUT)astest$$$$.out > /dev/null 2>&1; \
75 then rm $(TMPOUT)astest$$$$.out; echo "$(2)"; \
76 else echo "$(3)"; fi)
77 83
78# cc-option 84# cc-option
79# Usage: cflags-y += $(call cc-option, -march=winchip-c6, -march=i586) 85# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
80 86cc-option = $(call checker-shell,\
81cc-option = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \ 87 $(CC) $(CFLAGS) $(if $(3),$(3),$(1)) -S -xc /dev/null -o $$OUT,$(1),$(2))
82 > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi ;)
83 88
84# cc-option-yn 89# cc-option-yn
85# Usage: flag := $(call cc-option-yn, -march=winchip-c6) 90# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
86cc-option-yn = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \ 91cc-option-yn = $(call cc-option,"y","n",$(1))
87 > /dev/null 2>&1; then echo "y"; else echo "n"; fi;)
88 92
89# cc-option-align 93# cc-option-align
90# Prefix align with either -falign or -malign 94# Prefix align with either -falign or -malign
91cc-option-align = $(subst -functions=0,,\ 95cc-option-align = $(subst -functions=0,,\
92 $(call cc-option,-falign-functions=0,-malign-functions=0)) 96 $(call cc-option,-falign-functions=0,-malign-functions=0))
93 97
94# cc-version 98# cc-version
95# Usage gcc-ver := $(call cc-version, $(CC)) 99# Usage gcc-ver := $(call cc-version,$(CC))
96cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) 100cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
97 101
98# cc-ifversion 102# cc-ifversion
99# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 103# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
100cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \ 104cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
101 echo $(3); fi;)
102 105
103# ld-option 106# ld-option
104# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both) 107# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both)
105ld-option = $(shell if $(CC) $(1) -nostdlib -xc /dev/null \ 108ld-option = $(call checker-shell,\
106 -o $(TMPOUT)ldtest$$$$.out > /dev/null 2>&1; \ 109 $(CC) $(1) -nostdlib -xc /dev/null -o $$OUT,$(1),$(2))
107 then rm $(TMPOUT)ldtest$$$$.out; echo "$(1)"; \ 110
108 else echo "$(2)"; fi) 111######
109 112
110###
111# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj= 113# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj=
112# Usage: 114# Usage:
113# $(Q)$(MAKE) $(build)=dir 115# $(Q)$(MAKE) $(build)=dir
114build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj 116build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
115 117
116# Prefix -I with $(srctree) if it is not an absolute path 118# Prefix -I with $(srctree) if it is not an absolute path,
117addtree = $(if $(filter-out -I/%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1))) $(1) 119# add original to the end
120addtree = $(if \
121 $(filter-out -I/%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1))) $(1)
122
118# Find all -I options and call addtree 123# Find all -I options and call addtree
119flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o))) 124flags = $(foreach o,$($(1)),\
125 $(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
126
127# echo command.
128# Short version is used, if $(quiet) equals `quiet_', otherwise full one.
129echo-cmd = $(if $($(quiet)cmd_$(1)),\
130 echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
120 131
121# If quiet is set, only print short version of command 132# printing commands
122cmd = @$(echo-cmd) $(cmd_$(1)) 133cmd = @$(echo-cmd) $(cmd_$(1))
123 134
124# Add $(obj)/ for paths that is not absolute 135# Add $(obj)/ for paths that are not absolute
125objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o))) 136objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
126 137
127### 138###
128# if_changed - execute command if any prerequisite is newer than 139# if_changed - execute command if any prerequisite is newer than
129# target, or command line has changed 140# target, or command line has changed
130# if_changed_dep - as if_changed, but uses fixdep to reveal dependencies 141# if_changed_dep - as if_changed, but uses fixdep to reveal dependencies
131# including used config symbols 142# including used config symbols
@@ -133,16 +144,12 @@ objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
133# See Documentation/kbuild/makefiles.txt for more info 144# See Documentation/kbuild/makefiles.txt for more info
134 145
135ifneq ($(KBUILD_NOCMDDEP),1) 146ifneq ($(KBUILD_NOCMDDEP),1)
136# Check if both arguments has same arguments. Result in empty string if equal 147# Check if both arguments has same arguments. Result is empty string, if equal.
137# User may override this check using make KBUILD_NOCMDDEP=1 148# User may override this check using make KBUILD_NOCMDDEP=1
138arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \ 149arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
139 $(filter-out $(cmd_$@), $(cmd_$(1))) ) 150 $(filter-out $(cmd_$@), $(cmd_$(1))) )
140endif 151endif
141 152
142# echo command. Short version is $(quiet) equals quiet, otherwise full command
143echo-cmd = $(if $($(quiet)cmd_$(1)), \
144 echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
145
146# >'< substitution is for echo to work, 153# >'< substitution is for echo to work,
147# >$< substitution to preserve $ when reloading .cmd file 154# >$< substitution to preserve $ when reloading .cmd file
148# note: when using inline perl scripts [perl -e '...$$t=1;...'] 155# note: when using inline perl scripts [perl -e '...$$t=1;...']
@@ -153,15 +160,15 @@ make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
153# PHONY targets skipped in both cases. 160# PHONY targets skipped in both cases.
154any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^) 161any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
155 162
156# Execute command if command has changed or prerequisitei(s) are updated 163# Execute command if command has changed or prerequisite(s) are updated.
157# 164#
158if_changed = $(if $(strip $(any-prereq) $(arg-check)), \ 165if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
159 @set -e; \ 166 @set -e; \
160 $(echo-cmd) $(cmd_$(1)); \ 167 $(echo-cmd) $(cmd_$(1)); \
161 echo 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd) 168 echo 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd)
162 169
163# execute the command and also postprocess generated .d dependencies 170# Execute the command and also postprocess generated .d dependencies file.
164# file 171#
165if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \ 172if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \
166 @set -e; \ 173 @set -e; \
167 $(echo-cmd) $(cmd_$(1)); \ 174 $(echo-cmd) $(cmd_$(1)); \
@@ -169,9 +176,10 @@ if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \
169 rm -f $(depfile); \ 176 rm -f $(depfile); \
170 mv -f $(dot-target).tmp $(dot-target).cmd) 177 mv -f $(dot-target).tmp $(dot-target).cmd)
171 178
179# Will check if $(cmd_foo) changed, or any of the prerequisites changed,
180# and if so will execute $(rule_foo).
172# Usage: $(call if_changed_rule,foo) 181# Usage: $(call if_changed_rule,foo)
173# will check if $(cmd_foo) changed, or any of the prequisites changed, 182#
174# and if so will execute $(rule_foo)
175if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \ 183if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \
176 @set -e; \ 184 @set -e; \
177 $(rule_$(1))) 185 $(rule_$(1)))
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 4c723fd18648..43f75d6e4d96 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -1,6 +1,6 @@
1#!/bin/bash 1#!/bin/bash
2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org> 2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org>
3# Copyright (c) 2006 Sam Ravnborg <sam@ravnborg.org> 3# Copyright (C) 2006 Sam Ravnborg <sam@ravnborg.org>
4# 4#
5# Released under the terms of the GNU GPL 5# Released under the terms of the GNU GPL
6# 6#
@@ -17,15 +17,15 @@ cat << EOF
17Usage: 17Usage:
18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ... 18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
19 -o <file> Create gzipped initramfs file named <file> using 19 -o <file> Create gzipped initramfs file named <file> using
20 gen_init_cpio and gzip 20 gen_init_cpio and gzip
21 -u <uid> User ID to map to user ID 0 (root). 21 -u <uid> User ID to map to user ID 0 (root).
22 <uid> is only meaningful if <cpio_source> 22 <uid> is only meaningful if <cpio_source>
23 is a directory. 23 is a directory.
24 -g <gid> Group ID to map to group ID 0 (root). 24 -g <gid> Group ID to map to group ID 0 (root).
25 <gid> is only meaningful if <cpio_source> 25 <gid> is only meaningful if <cpio_source>
26 is a directory. 26 is a directory.
27 <cpio_source> File list or directory for cpio archive. 27 <cpio_source> File list or directory for cpio archive.
28 If <cpio_source> is a .cpio file it will be used 28 If <cpio_source> is a .cpio file it will be used
29 as direct input to initramfs. 29 as direct input to initramfs.
30 -d Output the default cpio list. 30 -d Output the default cpio list.
31 31
@@ -36,6 +36,12 @@ to reset the root/group mapping.
36EOF 36EOF
37} 37}
38 38
39# awk style field access
40# $1 - field number; rest is argument string
41field() {
42 shift $1 ; echo $1
43}
44
39list_default_initramfs() { 45list_default_initramfs() {
40 # echo usr/kinit/kinit 46 # echo usr/kinit/kinit
41 : 47 :
@@ -119,22 +125,17 @@ parse() {
119 str="${ftype} ${name} ${location} ${str}" 125 str="${ftype} ${name} ${location} ${str}"
120 ;; 126 ;;
121 "nod") 127 "nod")
122 local dev_type= 128 local dev=`LC_ALL=C ls -l "${location}"`
123 local maj=$(LC_ALL=C ls -l "${location}" | \ 129 local maj=`field 5 ${dev}`
124 gawk '{sub(/,/, "", $5); print $5}') 130 local min=`field 6 ${dev}`
125 local min=$(LC_ALL=C ls -l "${location}" | \ 131 maj=${maj%,}
126 gawk '{print $6}') 132
127 133 [ -b "${location}" ] && dev="b" || dev="c"
128 if [ -b "${location}" ]; then 134
129 dev_type="b" 135 str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
130 else
131 dev_type="c"
132 fi
133 str="${ftype} ${name} ${str} ${dev_type} ${maj} ${min}"
134 ;; 136 ;;
135 "slink") 137 "slink")
136 local target=$(LC_ALL=C ls -l "${location}" | \ 138 local target=`field 11 $(LC_ALL=C ls -l "${location}")`
137 gawk '{print $11}')
138 str="${ftype} ${name} ${target} ${str}" 139 str="${ftype} ${name} ${target} ${str}"
139 ;; 140 ;;
140 *) 141 *)
diff --git a/scripts/makelst b/scripts/makelst
index 34bd72391238..4fc80f2b7e19 100755
--- a/scripts/makelst
+++ b/scripts/makelst
@@ -1,31 +1,31 @@
1#!/bin/bash 1#!/bin/sh
2# A script to dump mixed source code & assembly 2# A script to dump mixed source code & assembly
3# with correct relocations from System.map 3# with correct relocations from System.map
4# Requires the following lines in Rules.make. 4# Requires the following lines in makefile:
5# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
6# William Stearns <wstearns@pobox.com>
7#%.lst: %.c 5#%.lst: %.c
8# $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$@) -g -c -o $*.o $< 6# $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$@) -g -c -o $*.o $<
9# $(TOPDIR)/scripts/makelst $*.o $(TOPDIR)/System.map $(OBJDUMP) 7# $(srctree)/scripts/makelst $*.o $(objtree)/System.map $(OBJDUMP)
10# 8#
11# Copyright (C) 2000 IBM Corporation 9# Copyright (C) 2000 IBM Corporation
12# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 10# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
11# William Stearns <wstearns@pobox.com>
13# 12#
14 13
15t1=`$3 --syms $1 | grep .text | grep " F " | head -n 1` 14# awk style field access
15field() {
16 shift $1 ; echo $1
17}
18
19t1=`$3 --syms $1 | grep .text | grep -m1 " F "`
16if [ -n "$t1" ]; then 20if [ -n "$t1" ]; then
17 t2=`echo $t1 | gawk '{ print $6 }'` 21 t2=`field 6 $t1`
18 if [ ! -r $2 ]; then 22 if [ ! -r $2 ]; then
19 echo "No System.map" >&2 23 echo "No System.map" >&2
20 t7=0
21 else 24 else
22 t3=`grep $t2 $2` 25 t3=`grep $t2 $2`
23 t4=`echo $t3 | gawk '{ print $1 }'` 26 t4=`field 1 $t3`
24 t5=`echo $t1 | gawk '{ print $1 }'` 27 t5=`field 1 $t1`
25 t6=`echo $t4 - $t5 | tr a-f A-F` 28 t6=`printf "%lu" $((0x$t4 - 0x$t5))`
26 t7=`( echo ibase=16 ; echo $t6 ) | bc`
27 fi 29 fi
28else
29 t7=0
30fi 30fi
31$3 -r --source --adjust-vma=$t7 $1 31$3 -r --source --adjust-vma=${t6:-0} $1
diff --git a/security/keys/key.c b/security/keys/key.c
index ac9326c5f1da..700400d801dc 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -188,6 +188,7 @@ static inline void key_alloc_serial(struct key *key)
188 188
189 spin_lock(&key_serial_lock); 189 spin_lock(&key_serial_lock);
190 190
191attempt_insertion:
191 parent = NULL; 192 parent = NULL;
192 p = &key_serial_tree.rb_node; 193 p = &key_serial_tree.rb_node;
193 194
@@ -202,39 +203,33 @@ static inline void key_alloc_serial(struct key *key)
202 else 203 else
203 goto serial_exists; 204 goto serial_exists;
204 } 205 }
205 goto insert_here; 206
207 /* we've found a suitable hole - arrange for this key to occupy it */
208 rb_link_node(&key->serial_node, parent, p);
209 rb_insert_color(&key->serial_node, &key_serial_tree);
210
211 spin_unlock(&key_serial_lock);
212 return;
206 213
207 /* we found a key with the proposed serial number - walk the tree from 214 /* we found a key with the proposed serial number - walk the tree from
208 * that point looking for the next unused serial number */ 215 * that point looking for the next unused serial number */
209serial_exists: 216serial_exists:
210 for (;;) { 217 for (;;) {
211 key->serial++; 218 key->serial++;
212 if (key->serial < 2) 219 if (key->serial < 3) {
213 key->serial = 2; 220 key->serial = 3;
214 221 goto attempt_insertion;
215 if (!rb_parent(parent)) 222 }
216 p = &key_serial_tree.rb_node;
217 else if (rb_parent(parent)->rb_left == parent)
218 p = &(rb_parent(parent)->rb_left);
219 else
220 p = &(rb_parent(parent)->rb_right);
221 223
222 parent = rb_next(parent); 224 parent = rb_next(parent);
223 if (!parent) 225 if (!parent)
224 break; 226 goto attempt_insertion;
225 227
226 xkey = rb_entry(parent, struct key, serial_node); 228 xkey = rb_entry(parent, struct key, serial_node);
227 if (key->serial < xkey->serial) 229 if (key->serial < xkey->serial)
228 goto insert_here; 230 goto attempt_insertion;
229 } 231 }
230 232
231 /* we've found a suitable hole - arrange for this key to occupy it */
232insert_here:
233 rb_link_node(&key->serial_node, parent, p);
234 rb_insert_color(&key->serial_node, &key_serial_tree);
235
236 spin_unlock(&key_serial_lock);
237
238} /* end key_alloc_serial() */ 233} /* end key_alloc_serial() */
239 234
240/*****************************************************************************/ 235/*****************************************************************************/